Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull vfs fixes from Al Viro:

- untangle sys_close() abuses in xt_bpf

- deal with register_shrinker() failures in sget()

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
fix "netfilter: xt_bpf: Fix XT_BPF_MODE_FD_PINNED mode of 'xt_bpf_info_v1'"
sget(): handle failures of register_shrinker()
mm,vmscan: Make unregister_shrinker() no-op if register_shrinker() failed.

+60 -15
+5 -1
fs/super.c
··· 517 517 hlist_add_head(&s->s_instances, &type->fs_supers); 518 518 spin_unlock(&sb_lock); 519 519 get_filesystem(type); 520 - register_shrinker(&s->s_shrink); 520 + err = register_shrinker(&s->s_shrink); 521 + if (err) { 522 + deactivate_locked_super(s); 523 + s = ERR_PTR(err); 524 + } 521 525 return s; 522 526 } 523 527
+10
include/linux/bpf.h
··· 419 419 attr->numa_node : NUMA_NO_NODE; 420 420 } 421 421 422 + struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 423 + 422 424 #else /* !CONFIG_BPF_SYSCALL */ 423 425 static inline struct bpf_prog *bpf_prog_get(u32 ufd) 424 426 { ··· 508 506 { 509 507 return 0; 510 508 } 509 + 510 + static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, 511 + enum bpf_prog_type type) 512 + { 513 + return ERR_PTR(-EOPNOTSUPP); 514 + } 511 515 #endif /* CONFIG_BPF_SYSCALL */ 512 516 513 517 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, ··· 521 513 { 522 514 return bpf_prog_get_type_dev(ufd, type, false); 523 515 } 516 + 517 + bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); 524 518 525 519 int bpf_prog_offload_compile(struct bpf_prog *prog); 526 520 void bpf_prog_offload_destroy(struct bpf_prog *prog);
+39 -1
kernel/bpf/inode.c
··· 368 368 putname(pname); 369 369 return ret; 370 370 } 371 - EXPORT_SYMBOL_GPL(bpf_obj_get_user); 371 + 372 + static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type) 373 + { 374 + struct bpf_prog *prog; 375 + int ret = inode_permission(inode, MAY_READ | MAY_WRITE); 376 + if (ret) 377 + return ERR_PTR(ret); 378 + 379 + if (inode->i_op == &bpf_map_iops) 380 + return ERR_PTR(-EINVAL); 381 + if (inode->i_op != &bpf_prog_iops) 382 + return ERR_PTR(-EACCES); 383 + 384 + prog = inode->i_private; 385 + 386 + ret = security_bpf_prog(prog); 387 + if (ret < 0) 388 + return ERR_PTR(ret); 389 + 390 + if (!bpf_prog_get_ok(prog, &type, false)) 391 + return ERR_PTR(-EINVAL); 392 + 393 + return bpf_prog_inc(prog); 394 + } 395 + 396 + struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type) 397 + { 398 + struct bpf_prog *prog; 399 + struct path path; 400 + int ret = kern_path(name, LOOKUP_FOLLOW, &path); 401 + if (ret) 402 + return ERR_PTR(ret); 403 + prog = __get_prog_inode(d_backing_inode(path.dentry), type); 404 + if (!IS_ERR(prog)) 405 + touch_atime(&path); 406 + path_put(&path); 407 + return prog; 408 + } 409 + EXPORT_SYMBOL(bpf_prog_get_type_path); 372 410 373 411 static void bpf_evict_inode(struct inode *inode) 374 412 {
+1 -1
kernel/bpf/syscall.c
··· 1057 1057 } 1058 1058 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 1059 1059 1060 - static bool bpf_prog_get_ok(struct bpf_prog *prog, 1060 + bool bpf_prog_get_ok(struct bpf_prog *prog, 1061 1061 enum bpf_prog_type *attach_type, bool attach_drv) 1062 1062 { 1063 1063 /* not an attachment, just a refcount inc, always allow */
+3
mm/vmscan.c
··· 297 297 */ 298 298 void unregister_shrinker(struct shrinker *shrinker) 299 299 { 300 + if (!shrinker->nr_deferred) 301 + return; 300 302 down_write(&shrinker_rwsem); 301 303 list_del(&shrinker->list); 302 304 up_write(&shrinker_rwsem); 303 305 kfree(shrinker->nr_deferred); 306 + shrinker->nr_deferred = NULL; 304 307 } 305 308 EXPORT_SYMBOL(unregister_shrinker); 306 309
+2 -12
net/netfilter/xt_bpf.c
··· 55 55 56 56 static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret) 57 57 { 58 - mm_segment_t oldfs = get_fs(); 59 - int retval, fd; 60 - 61 58 if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX) 62 59 return -EINVAL; 63 60 64 - set_fs(KERNEL_DS); 65 - fd = bpf_obj_get_user(path, 0); 66 - set_fs(oldfs); 67 - if (fd < 0) 68 - return fd; 69 - 70 - retval = __bpf_mt_check_fd(fd, ret); 71 - sys_close(fd); 72 - return retval; 61 + *ret = bpf_prog_get_type_path(path, BPF_PROG_TYPE_SOCKET_FILTER); 62 + return PTR_ERR_OR_ZERO(*ret); 73 63 } 74 64 75 65 static int bpf_mt_check(const struct xt_mtchk_param *par)