Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull vfs fixes from Al Viro:
"Overlayfs fixes from Miklos, assorted fixes from me.

Stable fodder of varying severity, all sat in -next for a while"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
ovl: ignore permissions on underlying lookup
vfs: add lookup_hash() helper
vfs: rename: check backing inode being equal
vfs: add vfs_select_inode() helper
get_rock_ridge_filename(): handle malformed NM entries
ecryptfs: fix handling of directory opening
atomic_open(): fix the handling of create_error
fix the copy vs. map logics in blk_rq_map_user_iov()
do_splice_to(): cap the size before passing to ->splice_read()

+152 -91
+8 -39
block/blk-map.c
··· 9 9 10 10 #include "blk.h" 11 11 12 - static bool iovec_gap_to_prv(struct request_queue *q, 13 - struct iovec *prv, struct iovec *cur) 14 - { 15 - unsigned long prev_end; 16 - 17 - if (!queue_virt_boundary(q)) 18 - return false; 19 - 20 - if (prv->iov_base == NULL && prv->iov_len == 0) 21 - /* prv is not set - don't check */ 22 - return false; 23 - 24 - prev_end = (unsigned long)(prv->iov_base + prv->iov_len); 25 - 26 - return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) || 27 - prev_end & queue_virt_boundary(q)); 28 - } 29 - 30 12 int blk_rq_append_bio(struct request_queue *q, struct request *rq, 31 13 struct bio *bio) 32 14 { ··· 107 125 struct rq_map_data *map_data, 108 126 const struct iov_iter *iter, gfp_t gfp_mask) 109 127 { 110 - struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0}; 111 - bool copy = (q->dma_pad_mask & iter->count) || map_data; 128 + bool copy = false; 129 + unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); 112 130 struct bio *bio = NULL; 113 131 struct iov_iter i; 114 132 int ret; 115 133 116 - if (!iter || !iter->count) 117 - return -EINVAL; 118 - 119 - iov_for_each(iov, i, *iter) { 120 - unsigned long uaddr = (unsigned long) iov.iov_base; 121 - 122 - if (!iov.iov_len) 123 - return -EINVAL; 124 - 125 - /* 126 - * Keep going so we check length of all segments 127 - */ 128 - if ((uaddr & queue_dma_alignment(q)) || 129 - iovec_gap_to_prv(q, &prv, &iov)) 130 - copy = true; 131 - 132 - prv.iov_base = iov.iov_base; 133 - prv.iov_len = iov.iov_len; 134 - } 134 + if (map_data) 135 + copy = true; 136 + else if (iov_iter_alignment(iter) & align) 137 + copy = true; 138 + else if (queue_virt_boundary(q)) 139 + copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); 135 140 136 141 i = *iter; 137 142 do {
+55 -16
fs/ecryptfs/file.c
··· 112 112 .sb = inode->i_sb, 113 113 }; 114 114 lower_file = ecryptfs_file_to_lower(file); 115 - lower_file->f_pos = ctx->pos; 116 115 rc = iterate_dir(lower_file, &buf.ctx); 117 116 ctx->pos = buf.ctx.pos; 118 117 if (rc < 0) ··· 222 223 } 223 224 ecryptfs_set_file_lower( 224 225 file, ecryptfs_inode_to_private(inode)->lower_file); 225 - if (d_is_dir(ecryptfs_dentry)) { 226 - ecryptfs_printk(KERN_DEBUG, "This is a directory\n"); 227 - mutex_lock(&crypt_stat->cs_mutex); 228 - crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); 229 - mutex_unlock(&crypt_stat->cs_mutex); 230 - rc = 0; 231 - goto out; 232 - } 233 226 rc = read_or_initialize_metadata(ecryptfs_dentry); 234 227 if (rc) 235 228 goto out_put; ··· 236 245 ecryptfs_file_to_private(file)); 237 246 out: 238 247 return rc; 248 + } 249 + 250 + /** 251 + * ecryptfs_dir_open 252 + * @inode: inode speciying file to open 253 + * @file: Structure to return filled in 254 + * 255 + * Opens the file specified by inode. 256 + * 257 + * Returns zero on success; non-zero otherwise 258 + */ 259 + static int ecryptfs_dir_open(struct inode *inode, struct file *file) 260 + { 261 + struct dentry *ecryptfs_dentry = file->f_path.dentry; 262 + /* Private value of ecryptfs_dentry allocated in 263 + * ecryptfs_lookup() */ 264 + struct ecryptfs_file_info *file_info; 265 + struct file *lower_file; 266 + 267 + /* Released in ecryptfs_release or end of function if failure */ 268 + file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL); 269 + ecryptfs_set_file_private(file, file_info); 270 + if (unlikely(!file_info)) { 271 + ecryptfs_printk(KERN_ERR, 272 + "Error attempting to allocate memory\n"); 273 + return -ENOMEM; 274 + } 275 + lower_file = dentry_open(ecryptfs_dentry_to_lower_path(ecryptfs_dentry), 276 + file->f_flags, current_cred()); 277 + if (IS_ERR(lower_file)) { 278 + printk(KERN_ERR "%s: Error attempting to initialize " 279 + "the lower file for the dentry with name " 280 + "[%pd]; rc = [%ld]\n", __func__, 281 + ecryptfs_dentry, PTR_ERR(lower_file)); 282 + kmem_cache_free(ecryptfs_file_info_cache, file_info); 283 + return PTR_ERR(lower_file); 284 + } 285 + ecryptfs_set_file_lower(file, lower_file); 286 + return 0; 239 287 } 240 288 241 289 static int ecryptfs_flush(struct file *file, fl_owner_t td) ··· 295 265 kmem_cache_free(ecryptfs_file_info_cache, 296 266 ecryptfs_file_to_private(file)); 297 267 return 0; 268 + } 269 + 270 + static int ecryptfs_dir_release(struct inode *inode, struct file *file) 271 + { 272 + fput(ecryptfs_file_to_lower(file)); 273 + kmem_cache_free(ecryptfs_file_info_cache, 274 + ecryptfs_file_to_private(file)); 275 + return 0; 276 + } 277 + 278 + static loff_t ecryptfs_dir_llseek(struct file *file, loff_t offset, int whence) 279 + { 280 + return vfs_llseek(ecryptfs_file_to_lower(file), offset, whence); 298 281 } 299 282 300 283 static int ··· 389 346 #ifdef CONFIG_COMPAT 390 347 .compat_ioctl = ecryptfs_compat_ioctl, 391 348 #endif 392 - .open = ecryptfs_open, 393 - .flush = ecryptfs_flush, 394 - .release = ecryptfs_release, 349 + .open = ecryptfs_dir_open, 350 + .release = ecryptfs_dir_release, 395 351 .fsync = ecryptfs_fsync, 396 - .fasync = ecryptfs_fasync, 397 - .splice_read = generic_file_splice_read, 398 - .llseek = default_llseek, 352 + .llseek = ecryptfs_dir_llseek, 399 353 }; 400 354 401 355 const struct file_operations ecryptfs_main_fops = { 402 356 .llseek = generic_file_llseek, 403 357 .read_iter = ecryptfs_read_update_atime, 404 358 .write_iter = generic_file_write_iter, 405 - .iterate = ecryptfs_readdir, 406 359 .unlocked_ioctl = ecryptfs_unlocked_ioctl, 407 360 #ifdef CONFIG_COMPAT 408 361 .compat_ioctl = ecryptfs_compat_ioctl,
+10 -3
fs/isofs/rock.c
··· 203 203 int retnamlen = 0; 204 204 int truncate = 0; 205 205 int ret = 0; 206 + char *p; 207 + int len; 206 208 207 209 if (!ISOFS_SB(inode->i_sb)->s_rock) 208 210 return 0; ··· 269 267 rr->u.NM.flags); 270 268 break; 271 269 } 272 - if ((strlen(retname) + rr->len - 5) >= 254) { 270 + len = rr->len - 5; 271 + if (retnamlen + len >= 254) { 273 272 truncate = 1; 274 273 break; 275 274 } 276 - strncat(retname, rr->u.NM.name, rr->len - 5); 277 - retnamlen += rr->len - 5; 275 + p = memchr(rr->u.NM.name, '\0', len); 276 + if (unlikely(p)) 277 + len = p - rr->u.NM.name; 278 + memcpy(retname + retnamlen, rr->u.NM.name, len); 279 + retnamlen += len; 280 + retname[retnamlen] = '\0'; 278 281 break; 279 282 case SIG('R', 'E'): 280 283 kfree(rs.buffer);
+37 -22
fs/namei.c
··· 2267 2267 EXPORT_SYMBOL(vfs_path_lookup); 2268 2268 2269 2269 /** 2270 + * lookup_hash - lookup single pathname component on already hashed name 2271 + * @name: name and hash to lookup 2272 + * @base: base directory to lookup from 2273 + * 2274 + * The name must have been verified and hashed (see lookup_one_len()). Using 2275 + * this after just full_name_hash() is unsafe. 2276 + * 2277 + * This function also doesn't check for search permission on base directory. 2278 + * 2279 + * Use lookup_one_len_unlocked() instead, unless you really know what you are 2280 + * doing. 2281 + * 2282 + * Do not hold i_mutex; this helper takes i_mutex if necessary. 2283 + */ 2284 + struct dentry *lookup_hash(const struct qstr *name, struct dentry *base) 2285 + { 2286 + struct dentry *ret; 2287 + 2288 + ret = lookup_dcache(name, base, 0); 2289 + if (!ret) 2290 + ret = lookup_slow(name, base, 0); 2291 + 2292 + return ret; 2293 + } 2294 + EXPORT_SYMBOL(lookup_hash); 2295 + 2296 + /** 2270 2297 * lookup_one_len - filesystem helper to lookup single pathname component 2271 2298 * @name: pathname component to lookup 2272 2299 * @base: base directory to lookup from ··· 2364 2337 struct qstr this; 2365 2338 unsigned int c; 2366 2339 int err; 2367 - struct dentry *ret; 2368 2340 2369 2341 this.name = name; 2370 2342 this.len = len; ··· 2395 2369 if (err) 2396 2370 return ERR_PTR(err); 2397 2371 2398 - ret = lookup_dcache(&this, base, 0); 2399 - if (!ret) 2400 - ret = lookup_slow(&this, base, 0); 2401 - return ret; 2372 + return lookup_hash(&this, base); 2402 2373 } 2403 2374 EXPORT_SYMBOL(lookup_one_len_unlocked); 2404 2375 ··· 2965 2942 dentry = lookup_real(dir, dentry, nd->flags); 2966 2943 if (IS_ERR(dentry)) 2967 2944 return PTR_ERR(dentry); 2968 - 2969 - if (create_error) { 2970 - int open_flag = op->open_flag; 2971 - 2972 - error = create_error; 2973 - if ((open_flag & O_EXCL)) { 2974 - if (!dentry->d_inode) 2975 - goto out; 2976 - } else if (!dentry->d_inode) { 2977 - goto out; 2978 - } else if ((open_flag & O_TRUNC) && 2979 - d_is_reg(dentry)) { 2980 - goto out; 2981 - } 2982 - /* will fail later, go on to get the right error */ 2983 - } 2945 + } 2946 + if (create_error && !dentry->d_inode) { 2947 + error = create_error; 2948 + goto out; 2984 2949 } 2985 2950 looked_up: 2986 2951 path->dentry = dentry; ··· 4224 4213 bool new_is_dir = false; 4225 4214 unsigned max_links = new_dir->i_sb->s_max_links; 4226 4215 4227 - if (source == target) 4216 + /* 4217 + * Check source == target. 4218 + * On overlayfs need to look at underlying inodes. 4219 + */ 4220 + if (vfs_select_inode(old_dentry, 0) == vfs_select_inode(new_dentry, 0)) 4228 4221 return 0; 4229 4222 4230 4223 error = may_delete(old_dir, old_dentry, is_dir);
+4 -8
fs/open.c
··· 840 840 int vfs_open(const struct path *path, struct file *file, 841 841 const struct cred *cred) 842 842 { 843 - struct dentry *dentry = path->dentry; 844 - struct inode *inode = dentry->d_inode; 843 + struct inode *inode = vfs_select_inode(path->dentry, file->f_flags); 844 + 845 + if (IS_ERR(inode)) 846 + return PTR_ERR(inode); 845 847 846 848 file->f_path = *path; 847 - if (dentry->d_flags & DCACHE_OP_SELECT_INODE) { 848 - inode = dentry->d_op->d_select_inode(dentry, file->f_flags); 849 - if (IS_ERR(inode)) 850 - return PTR_ERR(inode); 851 - } 852 - 853 849 return do_dentry_open(file, inode, NULL, cred); 854 850 } 855 851
+1 -3
fs/overlayfs/super.c
··· 411 411 { 412 412 struct dentry *dentry; 413 413 414 - inode_lock(dir->d_inode); 415 - dentry = lookup_one_len(name->name, dir, name->len); 416 - inode_unlock(dir->d_inode); 414 + dentry = lookup_hash(name, dir); 417 415 418 416 if (IS_ERR(dentry)) { 419 417 if (PTR_ERR(dentry) == -ENOENT)
+3
fs/splice.c
··· 1143 1143 if (unlikely(ret < 0)) 1144 1144 return ret; 1145 1145 1146 + if (unlikely(len > MAX_RW_COUNT)) 1147 + len = MAX_RW_COUNT; 1148 + 1146 1149 if (in->f_op->splice_read) 1147 1150 splice_read = in->f_op->splice_read; 1148 1151 else
+12
include/linux/dcache.h
··· 565 565 return dentry; 566 566 } 567 567 568 + static inline struct inode *vfs_select_inode(struct dentry *dentry, 569 + unsigned open_flags) 570 + { 571 + struct inode *inode = d_inode(dentry); 572 + 573 + if (inode && unlikely(dentry->d_flags & DCACHE_OP_SELECT_INODE)) 574 + inode = dentry->d_op->d_select_inode(dentry, open_flags); 575 + 576 + return inode; 577 + } 578 + 579 + 568 580 #endif /* __LINUX_DCACHE_H */
+2
include/linux/namei.h
··· 79 79 80 80 extern struct dentry *lookup_one_len(const char *, struct dentry *, int); 81 81 extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int); 82 + struct qstr; 83 + extern struct dentry *lookup_hash(const struct qstr *, struct dentry *); 82 84 83 85 extern int follow_down_one(struct path *); 84 86 extern int follow_down(struct path *);
+1
include/linux/uio.h
··· 87 87 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); 88 88 size_t iov_iter_zero(size_t bytes, struct iov_iter *); 89 89 unsigned long iov_iter_alignment(const struct iov_iter *i); 90 + unsigned long iov_iter_gap_alignment(const struct iov_iter *i); 90 91 void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, 91 92 unsigned long nr_segs, size_t count); 92 93 void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec,
+19
lib/iov_iter.c
··· 569 569 } 570 570 EXPORT_SYMBOL(iov_iter_alignment); 571 571 572 + unsigned long iov_iter_gap_alignment(const struct iov_iter *i) 573 + { 574 + unsigned long res = 0; 575 + size_t size = i->count; 576 + if (!size) 577 + return 0; 578 + 579 + iterate_all_kinds(i, size, v, 580 + (res |= (!res ? 0 : (unsigned long)v.iov_base) | 581 + (size != v.iov_len ? size : 0), 0), 582 + (res |= (!res ? 0 : (unsigned long)v.bv_offset) | 583 + (size != v.bv_len ? size : 0)), 584 + (res |= (!res ? 0 : (unsigned long)v.iov_base) | 585 + (size != v.iov_len ? size : 0)) 586 + ); 587 + return res; 588 + } 589 + EXPORT_SYMBOL(iov_iter_gap_alignment); 590 + 572 591 ssize_t iov_iter_get_pages(struct iov_iter *i, 573 592 struct page **pages, size_t maxsize, unsigned maxpages, 574 593 size_t *start)