Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'vfs-6.19-rc1.fd_prepare.fs' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull fd prepare updates from Christian Brauner:
"This adds the FD_ADD() and FD_PREPARE() primitive. They simplify the
common pattern of get_unused_fd_flags() + create file + fd_install()
that is used extensively throughout the kernel and currently requires
cumbersome cleanup paths.

FD_ADD() - For simple cases where a file is installed immediately:

fd = FD_ADD(O_CLOEXEC, vfio_device_open_file(device));
if (fd < 0)
vfio_device_put_registration(device);
return fd;

FD_PREPARE() - For cases requiring access to the fd or file, or
additional work before publishing:

FD_PREPARE(fdf, O_CLOEXEC, sync_file->file);
if (fdf.err) {
fput(sync_file->file);
return fdf.err;
}

data.fence = fd_prepare_fd(fdf);
if (copy_to_user((void __user *)arg, &data, sizeof(data)))
return -EFAULT;

return fd_publish(fdf);

The primitives are centered around struct fd_prepare. FD_PREPARE()
encapsulates all allocation and cleanup logic and must be followed by
a call to fd_publish() which associates the fd with the file and
installs it into the caller's fdtable. If fd_publish() isn't called,
both are deallocated automatically. FD_ADD() is a shorthand that does
fd_publish() immediately and never exposes the struct to the caller.

I've implemented this in a way that it's compatible with the cleanup
infrastructure while also being usable separately. IOW, it's centered
around struct fd_prepare which is aliased to class_fd_prepare_t and so
we can make use of all the basica guard infrastructure"

* tag 'vfs-6.19-rc1.fd_prepare.fs' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: (42 commits)
io_uring: convert io_create_mock_file() to FD_PREPARE()
file: convert replace_fd() to FD_PREPARE()
vfio: convert vfio_group_ioctl_get_device_fd() to FD_ADD()
tty: convert ptm_open_peer() to FD_ADD()
ntsync: convert ntsync_obj_get_fd() to FD_PREPARE()
media: convert media_request_alloc() to FD_PREPARE()
hv: convert mshv_ioctl_create_partition() to FD_ADD()
gpio: convert linehandle_create() to FD_PREPARE()
pseries: port papr_rtas_setup_file_interface() to FD_ADD()
pseries: convert papr_platform_dump_create_handle() to FD_ADD()
spufs: convert spufs_gang_open() to FD_PREPARE()
papr-hvpipe: convert papr_hvpipe_dev_create_handle() to FD_PREPARE()
spufs: convert spufs_context_open() to FD_PREPARE()
net/socket: convert __sys_accept4_file() to FD_ADD()
net/socket: convert sock_map_fd() to FD_ADD()
net/kcm: convert kcm_ioctl() to FD_PREPARE()
net/handshake: convert handshake_nl_accept_doit() to FD_PREPARE()
secretmem: convert memfd_secret() to FD_ADD()
memfd: convert memfd_create() to FD_ADD()
bpf: convert bpf_token_create() to FD_PREPARE()
...

+506 -873
+10 -32
arch/powerpc/platforms/cell/spufs/inode.c
··· 267 267 268 268 static int spufs_context_open(const struct path *path) 269 269 { 270 - int ret; 271 - struct file *filp; 272 - 273 - ret = get_unused_fd_flags(0); 274 - if (ret < 0) 275 - return ret; 276 - 277 - filp = dentry_open(path, O_RDONLY, current_cred()); 278 - if (IS_ERR(filp)) { 279 - put_unused_fd(ret); 280 - return PTR_ERR(filp); 281 - } 282 - 283 - filp->f_op = &spufs_context_fops; 284 - fd_install(ret, filp); 285 - return ret; 270 + FD_PREPARE(fdf, 0, dentry_open(path, O_RDONLY, current_cred())); 271 + if (fdf.err) 272 + return fdf.err; 273 + fd_prepare_file(fdf)->f_op = &spufs_context_fops; 274 + return fd_publish(fdf); 286 275 } 287 276 288 277 static struct spu_context * ··· 497 508 498 509 static int spufs_gang_open(const struct path *path) 499 510 { 500 - int ret; 501 - struct file *filp; 502 - 503 - ret = get_unused_fd_flags(0); 504 - if (ret < 0) 505 - return ret; 506 - 507 511 /* 508 512 * get references for dget and mntget, will be released 509 513 * in error path of *_open(). 510 514 */ 511 - filp = dentry_open(path, O_RDONLY, current_cred()); 512 - if (IS_ERR(filp)) { 513 - put_unused_fd(ret); 514 - return PTR_ERR(filp); 515 - } 516 - 517 - filp->f_op = &spufs_gang_fops; 518 - fd_install(ret, filp); 519 - return ret; 515 + FD_PREPARE(fdf, 0, dentry_open(path, O_RDONLY, current_cred())); 516 + if (fdf.err) 517 + return fdf.err; 518 + fd_prepare_file(fdf)->f_op = &spufs_gang_fops; 519 + return fd_publish(fdf); 520 520 } 521 521 522 522 static int spufs_create_gang(struct inode *inode,
+9 -30
arch/powerpc/platforms/pseries/papr-hvpipe.c
··· 479 479 480 480 static int papr_hvpipe_dev_create_handle(u32 srcID) 481 481 { 482 - struct hvpipe_source_info *src_info; 483 - struct file *file; 484 - long err; 485 - int fd; 482 + struct hvpipe_source_info *src_info __free(kfree) = NULL; 486 483 487 484 spin_lock(&hvpipe_src_list_lock); 488 485 /* ··· 503 506 src_info->tsk = current; 504 507 init_waitqueue_head(&src_info->recv_wqh); 505 508 506 - fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 507 - if (fd < 0) { 508 - err = fd; 509 - goto free_buf; 510 - } 509 + FD_PREPARE(fdf, O_RDONLY | O_CLOEXEC, 510 + anon_inode_getfile("[papr-hvpipe]", &papr_hvpipe_handle_ops, 511 + (void *)src_info, O_RDWR)); 512 + if (fdf.err) 513 + return fdf.err; 511 514 512 - file = anon_inode_getfile("[papr-hvpipe]", 513 - &papr_hvpipe_handle_ops, (void *)src_info, 514 - O_RDWR); 515 - if (IS_ERR(file)) { 516 - err = PTR_ERR(file); 517 - goto free_fd; 518 - } 519 - 515 + retain_and_null_ptr(src_info); 520 516 spin_lock(&hvpipe_src_list_lock); 521 517 /* 522 518 * If two processes are executing ioctl() for the same ··· 518 528 */ 519 529 if (hvpipe_find_source(srcID)) { 520 530 spin_unlock(&hvpipe_src_list_lock); 521 - err = -EALREADY; 522 - goto free_file; 531 + return -EALREADY; 523 532 } 524 533 list_add(&src_info->list, &hvpipe_src_list); 525 534 spin_unlock(&hvpipe_src_list_lock); 526 - 527 - fd_install(fd, file); 528 - return fd; 529 - 530 - free_file: 531 - fput(file); 532 - free_fd: 533 - put_unused_fd(fd); 534 - free_buf: 535 - kfree(src_info); 536 - return err; 535 + return fd_publish(fdf); 537 536 } 538 537 539 538 /*
+8 -22
arch/powerpc/platforms/pseries/papr-platform-dump.c
··· 303 303 { 304 304 struct ibm_platform_dump_params *params; 305 305 u64 param_dump_tag; 306 - struct file *file; 307 - long err; 308 306 int fd; 309 307 310 308 /* ··· 332 334 params->dump_tag_lo = (u32)(dump_tag & 0x00000000ffffffffULL); 333 335 params->status = RTAS_IBM_PLATFORM_DUMP_START; 334 336 335 - fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 337 + fd = FD_ADD(O_RDONLY | O_CLOEXEC, 338 + anon_inode_getfile_fmode("[papr-platform-dump]", 339 + &papr_platform_dump_handle_ops, 340 + (void *)params, O_RDONLY, 341 + FMODE_LSEEK | FMODE_PREAD)); 336 342 if (fd < 0) { 337 - err = fd; 338 - goto free_area; 343 + rtas_work_area_free(params->work_area); 344 + kfree(params); 345 + return fd; 339 346 } 340 - 341 - file = anon_inode_getfile_fmode("[papr-platform-dump]", 342 - &papr_platform_dump_handle_ops, 343 - (void *)params, O_RDONLY, 344 - FMODE_LSEEK | FMODE_PREAD); 345 - if (IS_ERR(file)) { 346 - err = PTR_ERR(file); 347 - goto put_fd; 348 - } 349 - 350 - fd_install(fd, file); 351 347 352 348 list_add(&params->list, &platform_dump_list); 353 349 354 350 pr_info("%s (%d) initiated platform dump for dump tag %llu\n", 355 351 current->comm, current->pid, dump_tag); 356 352 return fd; 357 - put_fd: 358 - put_unused_fd(fd); 359 - free_area: 360 - rtas_work_area_free(params->work_area); 361 - kfree(params); 362 - return err; 363 353 } 364 354 365 355 /*
+5 -22
arch/powerpc/platforms/pseries/papr-rtas-common.c
··· 205 205 char *name) 206 206 { 207 207 const struct papr_rtas_blob *blob; 208 - struct file *file; 209 - long ret; 210 208 int fd; 211 209 212 210 blob = papr_rtas_retrieve(seq); 213 211 if (IS_ERR(blob)) 214 212 return PTR_ERR(blob); 215 213 216 - fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 217 - if (fd < 0) { 218 - ret = fd; 219 - goto free_blob; 220 - } 221 - 222 - file = anon_inode_getfile_fmode(name, fops, (void *)blob, 223 - O_RDONLY, FMODE_LSEEK | FMODE_PREAD); 224 - if (IS_ERR(file)) { 225 - ret = PTR_ERR(file); 226 - goto put_fd; 227 - } 228 - 229 - fd_install(fd, file); 214 + fd = FD_ADD(O_RDONLY | O_CLOEXEC, 215 + anon_inode_getfile_fmode(name, fops, (void *)blob, O_RDONLY, 216 + FMODE_LSEEK | FMODE_PREAD)); 217 + if (fd < 0) 218 + papr_rtas_blob_free(blob); 230 219 return fd; 231 - 232 - put_fd: 233 - put_unused_fd(fd); 234 - free_blob: 235 - papr_rtas_blob_free(blob); 236 - return ret; 237 220 } 238 221 239 222 /*
+1 -9
drivers/dma-buf/dma-buf.c
··· 768 768 */ 769 769 int dma_buf_fd(struct dma_buf *dmabuf, int flags) 770 770 { 771 - int fd; 772 - 773 771 if (!dmabuf || !dmabuf->file) 774 772 return -EINVAL; 775 773 776 - fd = get_unused_fd_flags(flags); 777 - if (fd < 0) 778 - return fd; 779 - 780 - fd_install(fd, dmabuf->file); 781 - 782 - return fd; 774 + return FD_ADD(flags, dmabuf->file); 783 775 } 784 776 EXPORT_SYMBOL_NS_GPL(dma_buf_fd, "DMA_BUF"); 785 777
+21 -45
drivers/gpio/gpiolib-cdev.c
··· 298 298 #endif 299 299 }; 300 300 301 + DEFINE_FREE(linehandle_free, struct linehandle_state *, if (!IS_ERR_OR_NULL(_T)) linehandle_free(_T)) 302 + 301 303 static int linehandle_create(struct gpio_device *gdev, void __user *ip) 302 304 { 303 305 struct gpiohandle_request handlereq; 304 - struct linehandle_state *lh; 305 - struct file *file; 306 - int fd, i, ret; 306 + struct linehandle_state *lh __free(linehandle_free) = NULL; 307 + int i, ret; 307 308 u32 lflags; 308 309 309 310 if (copy_from_user(&handlereq, ip, sizeof(handlereq))) ··· 328 327 lh->label = kstrndup(handlereq.consumer_label, 329 328 sizeof(handlereq.consumer_label) - 1, 330 329 GFP_KERNEL); 331 - if (!lh->label) { 332 - ret = -ENOMEM; 333 - goto out_free_lh; 334 - } 330 + if (!lh->label) 331 + return -ENOMEM; 335 332 } 336 333 337 334 lh->num_descs = handlereq.lines; ··· 339 340 u32 offset = handlereq.lineoffsets[i]; 340 341 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset); 341 342 342 - if (IS_ERR(desc)) { 343 - ret = PTR_ERR(desc); 344 - goto out_free_lh; 345 - } 343 + if (IS_ERR(desc)) 344 + return PTR_ERR(desc); 346 345 347 346 ret = gpiod_request_user(desc, lh->label); 348 347 if (ret) 349 - goto out_free_lh; 348 + return ret; 350 349 lh->descs[i] = desc; 351 350 linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags); 352 351 353 352 ret = gpiod_set_transitory(desc, false); 354 353 if (ret < 0) 355 - goto out_free_lh; 354 + return ret; 356 355 357 356 /* 358 357 * Lines have to be requested explicitly for input ··· 361 364 362 365 ret = gpiod_direction_output_nonotify(desc, val); 363 366 if (ret) 364 - goto out_free_lh; 367 + return ret; 365 368 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) { 366 369 ret = gpiod_direction_input_nonotify(desc); 367 370 if (ret) 368 - goto out_free_lh; 371 + return ret; 369 372 } 370 373 371 374 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED); ··· 374 377 offset); 375 378 } 376 379 377 - fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 378 - if (fd < 0) { 379 - ret = fd; 380 - goto out_free_lh; 381 - } 380 + FD_PREPARE(fdf, O_RDONLY | O_CLOEXEC, 381 + anon_inode_getfile("gpio-linehandle", &linehandle_fileops, 382 + lh, O_RDONLY | O_CLOEXEC)); 383 + if (fdf.err) 384 + return fdf.err; 385 + retain_and_null_ptr(lh); 382 386 383 - file = anon_inode_getfile("gpio-linehandle", 384 - &linehandle_fileops, 385 - lh, 386 - O_RDONLY | O_CLOEXEC); 387 - if (IS_ERR(file)) { 388 - ret = PTR_ERR(file); 389 - goto out_put_unused_fd; 390 - } 391 - 392 - handlereq.fd = fd; 393 - if (copy_to_user(ip, &handlereq, sizeof(handlereq))) { 394 - /* 395 - * fput() will trigger the release() callback, so do not go onto 396 - * the regular error cleanup path here. 397 - */ 398 - fput(file); 399 - put_unused_fd(fd); 387 + handlereq.fd = fd_prepare_fd(fdf); 388 + if (copy_to_user(ip, &handlereq, sizeof(handlereq))) 400 389 return -EFAULT; 401 - } 402 390 403 - fd_install(fd, file); 391 + fd_publish(fdf); 404 392 405 393 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n", 406 394 lh->num_descs); 407 395 408 396 return 0; 409 - 410 - out_put_unused_fd: 411 - put_unused_fd(fd); 412 - out_free_lh: 413 - linehandle_free(lh); 414 - return ret; 415 397 } 416 398 #endif /* CONFIG_GPIO_CDEV_V1 */ 417 399
+6 -24
drivers/hv/mshv_root_main.c
··· 1870 1870 struct hv_partition_creation_properties creation_properties = {}; 1871 1871 union hv_partition_isolation_properties isolation_properties = {}; 1872 1872 struct mshv_partition *partition; 1873 - struct file *file; 1874 - int fd; 1875 1873 long ret; 1876 1874 1877 1875 if (copy_from_user(&args, user_arg, sizeof(args))) ··· 1936 1938 goto delete_partition; 1937 1939 1938 1940 ret = mshv_init_async_handler(partition); 1939 - if (ret) 1940 - goto remove_partition; 1941 - 1942 - fd = get_unused_fd_flags(O_CLOEXEC); 1943 - if (fd < 0) { 1944 - ret = fd; 1945 - goto remove_partition; 1941 + if (!ret) { 1942 + ret = FD_ADD(O_CLOEXEC, anon_inode_getfile("mshv_partition", 1943 + &mshv_partition_fops, 1944 + partition, O_RDWR)); 1945 + if (ret >= 0) 1946 + return ret; 1946 1947 } 1947 - 1948 - file = anon_inode_getfile("mshv_partition", &mshv_partition_fops, 1949 - partition, O_RDWR); 1950 - if (IS_ERR(file)) { 1951 - ret = PTR_ERR(file); 1952 - goto put_fd; 1953 - } 1954 - 1955 - fd_install(fd, file); 1956 - 1957 - return fd; 1958 - 1959 - put_fd: 1960 - put_unused_fd(fd); 1961 - remove_partition: 1962 1948 remove_partition(partition); 1963 1949 delete_partition: 1964 1950 hv_call_delete_partition(partition->pt_id);
+12 -22
drivers/media/mc/mc-request.c
··· 282 282 int media_request_alloc(struct media_device *mdev, int *alloc_fd) 283 283 { 284 284 struct media_request *req; 285 - struct file *filp; 286 - int fd; 287 285 int ret; 288 286 289 287 /* Either both are NULL or both are non-NULL */ ··· 295 297 if (!req) 296 298 return -ENOMEM; 297 299 298 - fd = get_unused_fd_flags(O_CLOEXEC); 299 - if (fd < 0) { 300 - ret = fd; 301 - goto err_free_req; 302 - } 303 - 304 - filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC); 305 - if (IS_ERR(filp)) { 306 - ret = PTR_ERR(filp); 307 - goto err_put_fd; 308 - } 309 - 310 - filp->private_data = req; 311 300 req->mdev = mdev; 312 301 req->state = MEDIA_REQUEST_STATE_IDLE; 313 302 req->num_incomplete_objects = 0; ··· 305 320 req->updating_count = 0; 306 321 req->access_count = 0; 307 322 308 - *alloc_fd = fd; 323 + FD_PREPARE(fdf, O_CLOEXEC, 324 + anon_inode_getfile("request", &request_fops, NULL, 325 + O_CLOEXEC)); 326 + if (fdf.err) { 327 + ret = fdf.err; 328 + goto err_free_req; 329 + } 330 + 331 + fd_prepare_file(fdf)->private_data = req; 332 + 333 + *alloc_fd = fd_publish(fdf); 309 334 310 335 snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d", 311 - atomic_inc_return(&mdev->request_id), fd); 336 + atomic_inc_return(&mdev->request_id), *alloc_fd); 312 337 dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str); 313 338 314 - fd_install(fd, filp); 315 - 316 339 return 0; 317 - 318 - err_put_fd: 319 - put_unused_fd(fd); 320 340 321 341 err_free_req: 322 342 if (mdev->ops->req_free)
+6 -15
drivers/misc/ntsync.c
··· 721 721 722 722 static int ntsync_obj_get_fd(struct ntsync_obj *obj) 723 723 { 724 - struct file *file; 725 - int fd; 726 - 727 - fd = get_unused_fd_flags(O_CLOEXEC); 728 - if (fd < 0) 729 - return fd; 730 - file = anon_inode_getfile("ntsync", &ntsync_obj_fops, obj, O_RDWR); 731 - if (IS_ERR(file)) { 732 - put_unused_fd(fd); 733 - return PTR_ERR(file); 734 - } 735 - obj->file = file; 736 - fd_install(fd, file); 737 - 738 - return fd; 724 + FD_PREPARE(fdf, O_CLOEXEC, 725 + anon_inode_getfile("ntsync", &ntsync_obj_fops, obj, O_RDWR)); 726 + if (fdf.err) 727 + return fdf.err; 728 + obj->file = fd_prepare_file(fdf); 729 + return fd_publish(fdf); 739 730 } 740 731 741 732 static int ntsync_create_sem(struct ntsync_device *dev, void __user *argp)
+18 -33
drivers/tty/pty.c
··· 589 589 #ifdef CONFIG_UNIX98_PTYS 590 590 static struct cdev ptmx_cdev; 591 591 592 + static struct file *ptm_open_peer_file(struct file *master, 593 + struct tty_struct *tty, int flags) 594 + { 595 + struct path path; 596 + struct file *file; 597 + 598 + /* Compute the slave's path */ 599 + path.mnt = devpts_mntget(master, tty->driver_data); 600 + if (IS_ERR(path.mnt)) 601 + return ERR_CAST(path.mnt); 602 + path.dentry = tty->link->driver_data; 603 + 604 + file = dentry_open(&path, flags, current_cred()); 605 + mntput(path.mnt); 606 + return file; 607 + } 608 + 592 609 /** 593 610 * ptm_open_peer - open the peer of a pty 594 611 * @master: the open struct file of the ptmx device node ··· 618 601 */ 619 602 int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags) 620 603 { 621 - int fd; 622 - struct file *filp; 623 - int retval = -EINVAL; 624 - struct path path; 625 - 626 604 if (tty->driver != ptm_driver) 627 605 return -EIO; 628 606 629 - fd = get_unused_fd_flags(flags); 630 - if (fd < 0) { 631 - retval = fd; 632 - goto err; 633 - } 634 - 635 - /* Compute the slave's path */ 636 - path.mnt = devpts_mntget(master, tty->driver_data); 637 - if (IS_ERR(path.mnt)) { 638 - retval = PTR_ERR(path.mnt); 639 - goto err_put; 640 - } 641 - path.dentry = tty->link->driver_data; 642 - 643 - filp = dentry_open(&path, flags, current_cred()); 644 - mntput(path.mnt); 645 - if (IS_ERR(filp)) { 646 - retval = PTR_ERR(filp); 647 - goto err_put; 648 - } 649 - 650 - fd_install(fd, filp); 651 - return fd; 652 - 653 - err_put: 654 - put_unused_fd(fd); 655 - err: 656 - return retval; 607 + return FD_ADD(flags, ptm_open_peer_file(master, tty, flags)); 657 608 } 658 609 659 610 static int pty_unix98_ioctl(struct tty_struct *tty,
+5 -23
drivers/vfio/group.c
··· 299 299 char __user *arg) 300 300 { 301 301 struct vfio_device *device; 302 - struct file *filep; 303 302 char *buf; 304 - int fdno; 305 - int ret; 303 + int fd; 306 304 307 305 buf = strndup_user(arg, PAGE_SIZE); 308 306 if (IS_ERR(buf)) ··· 311 313 if (IS_ERR(device)) 312 314 return PTR_ERR(device); 313 315 314 - fdno = get_unused_fd_flags(O_CLOEXEC); 315 - if (fdno < 0) { 316 - ret = fdno; 317 - goto err_put_device; 318 - } 319 - 320 - filep = vfio_device_open_file(device); 321 - if (IS_ERR(filep)) { 322 - ret = PTR_ERR(filep); 323 - goto err_put_fdno; 324 - } 325 - 326 - fd_install(fdno, filep); 327 - return fdno; 328 - 329 - err_put_fdno: 330 - put_unused_fd(fdno); 331 - err_put_device: 332 - vfio_device_put_registration(device); 333 - return ret; 316 + fd = FD_ADD(O_CLOEXEC, vfio_device_open_file(device)); 317 + if (fd < 0) 318 + vfio_device_put_registration(device); 319 + return fd; 334 320 } 335 321 336 322 static int vfio_group_ioctl_get_status(struct vfio_group *group,
+2 -21
fs/anon_inodes.c
··· 280 280 const struct inode *context_inode, 281 281 bool make_inode) 282 282 { 283 - int error, fd; 284 - struct file *file; 285 - 286 - error = get_unused_fd_flags(flags); 287 - if (error < 0) 288 - return error; 289 - fd = error; 290 - 291 - file = __anon_inode_getfile(name, fops, priv, flags, context_inode, 292 - make_inode); 293 - if (IS_ERR(file)) { 294 - error = PTR_ERR(file); 295 - goto err_put_unused_fd; 296 - } 297 - fd_install(fd, file); 298 - 299 - return fd; 300 - 301 - err_put_unused_fd: 302 - put_unused_fd(fd); 303 - return error; 283 + return FD_ADD(flags, __anon_inode_getfile(name, fops, priv, flags, 284 + context_inode, make_inode)); 304 285 } 305 286 306 287 /**
+6 -24
fs/autofs/dev-ioctl.c
··· 231 231 */ 232 232 static int autofs_dev_ioctl_open_mountpoint(const char *name, dev_t devid) 233 233 { 234 - int err, fd; 234 + struct path path __free(path_put) = {}; 235 + int err; 235 236 236 - fd = get_unused_fd_flags(O_CLOEXEC); 237 - if (likely(fd >= 0)) { 238 - struct file *filp; 239 - struct path path; 237 + err = find_autofs_mount(name, &path, test_by_dev, &devid); 238 + if (err) 239 + return err; 240 240 241 - err = find_autofs_mount(name, &path, test_by_dev, &devid); 242 - if (err) 243 - goto out; 244 - 245 - filp = dentry_open(&path, O_RDONLY, current_cred()); 246 - path_put(&path); 247 - if (IS_ERR(filp)) { 248 - err = PTR_ERR(filp); 249 - goto out; 250 - } 251 - 252 - fd_install(fd, filp); 253 - } 254 - 255 - return fd; 256 - 257 - out: 258 - put_unused_fd(fd); 259 - return err; 241 + return FD_ADD(O_CLOEXEC, dentry_open(&path, O_RDONLY, current_cred())); 260 242 } 261 243 262 244 /* Open a file descriptor on an autofs mount point */
+10 -19
fs/eventfd.c
··· 378 378 379 379 static int do_eventfd(unsigned int count, int flags) 380 380 { 381 - struct eventfd_ctx *ctx; 382 - struct file *file; 383 - int fd; 381 + struct eventfd_ctx *ctx __free(kfree) = NULL; 384 382 385 383 /* Check the EFD_* constants for consistency. */ 386 384 BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC); ··· 396 398 init_waitqueue_head(&ctx->wqh); 397 399 ctx->count = count; 398 400 ctx->flags = flags; 399 - ctx->id = ida_alloc(&eventfd_ida, GFP_KERNEL); 400 401 401 402 flags &= EFD_SHARED_FCNTL_FLAGS; 402 403 flags |= O_RDWR; 403 - fd = get_unused_fd_flags(flags); 404 - if (fd < 0) 405 - goto err; 406 404 407 - file = anon_inode_getfile_fmode("[eventfd]", &eventfd_fops, 408 - ctx, flags, FMODE_NOWAIT); 409 - if (IS_ERR(file)) { 410 - put_unused_fd(fd); 411 - fd = PTR_ERR(file); 412 - goto err; 413 - } 414 - fd_install(fd, file); 415 - return fd; 416 - err: 417 - eventfd_free_ctx(ctx); 418 - return fd; 405 + FD_PREPARE(fdf, flags, 406 + anon_inode_getfile_fmode("[eventfd]", &eventfd_fops, ctx, 407 + flags, FMODE_NOWAIT)); 408 + if (fdf.err) 409 + return fdf.err; 410 + 411 + ctx->id = ida_alloc(&eventfd_ida, GFP_KERNEL); 412 + retain_and_null_ptr(ctx); 413 + return fd_publish(fdf); 419 414 } 420 415 421 416 SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
+10 -22
fs/eventpoll.c
··· 2165 2165 */ 2166 2166 static int do_epoll_create(int flags) 2167 2167 { 2168 - int error, fd; 2169 - struct eventpoll *ep = NULL; 2170 - struct file *file; 2168 + int error; 2169 + struct eventpoll *ep; 2171 2170 2172 2171 /* Check the EPOLL_* constant for consistency. */ 2173 2172 BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC); ··· 2183 2184 * Creates all the items needed to setup an eventpoll file. That is, 2184 2185 * a file structure and a free file descriptor. 2185 2186 */ 2186 - fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC)); 2187 - if (fd < 0) { 2188 - error = fd; 2189 - goto out_free_ep; 2187 + FD_PREPARE(fdf, O_RDWR | (flags & O_CLOEXEC), 2188 + anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep, 2189 + O_RDWR | (flags & O_CLOEXEC))); 2190 + if (fdf.err) { 2191 + ep_clear_and_put(ep); 2192 + return fdf.err; 2190 2193 } 2191 - file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep, 2192 - O_RDWR | (flags & O_CLOEXEC)); 2193 - if (IS_ERR(file)) { 2194 - error = PTR_ERR(file); 2195 - goto out_free_fd; 2196 - } 2197 - ep->file = file; 2198 - fd_install(fd, file); 2199 - return fd; 2200 - 2201 - out_free_fd: 2202 - put_unused_fd(fd); 2203 - out_free_ep: 2204 - ep_clear_and_put(ep); 2205 - return error; 2194 + ep->file = fd_prepare_file(fdf); 2195 + return fd_publish(fdf); 2206 2196 } 2207 2197 2208 2198 SYSCALL_DEFINE1(epoll_create1, int, flags)
+1 -2
fs/exec.c
··· 1280 1280 1281 1281 /* Pass the opened binary to the interpreter. */ 1282 1282 if (bprm->have_execfd) { 1283 - retval = get_unused_fd_flags(0); 1283 + retval = FD_ADD(0, bprm->executable); 1284 1284 if (retval < 0) 1285 1285 goto out_unlock; 1286 - fd_install(retval, bprm->executable); 1287 1286 bprm->executable = NULL; 1288 1287 bprm->execfd = retval; 1289 1288 }
+13 -17
fs/fhandle.c
··· 404 404 return retval; 405 405 } 406 406 407 + static struct file *file_open_handle(struct path *path, int open_flag) 408 + { 409 + const struct export_operations *eops; 410 + 411 + eops = path->mnt->mnt_sb->s_export_op; 412 + if (eops->open) 413 + return eops->open(path, open_flag); 414 + 415 + return file_open_root(path, "", open_flag, 0); 416 + } 417 + 407 418 static long do_handle_open(int mountdirfd, struct file_handle __user *ufh, 408 419 int open_flag) 409 420 { 410 - long retval = 0; 421 + long retval; 411 422 struct path path __free(path_put) = {}; 412 - struct file *file; 413 - const struct export_operations *eops; 414 423 415 424 retval = handle_to_path(mountdirfd, ufh, &path, open_flag); 416 425 if (retval) 417 426 return retval; 418 427 419 - CLASS(get_unused_fd, fd)(open_flag); 420 - if (fd < 0) 421 - return fd; 422 - 423 - eops = path.mnt->mnt_sb->s_export_op; 424 - if (eops->open) 425 - file = eops->open(&path, open_flag); 426 - else 427 - file = file_open_root(&path, "", open_flag, 0); 428 - if (IS_ERR(file)) 429 - return PTR_ERR(file); 430 - 431 - fd_install(fd, file); 432 - return take_fd(fd); 428 + return FD_ADD(open_flag, file_open_handle(&path, open_flag)); 433 429 } 434 430 435 431 /**
+8 -11
fs/file.c
··· 1380 1380 */ 1381 1381 int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags) 1382 1382 { 1383 - int new_fd; 1384 1383 int error; 1385 1384 1386 1385 error = security_file_receive(file); 1387 1386 if (error) 1388 1387 return error; 1389 1388 1390 - new_fd = get_unused_fd_flags(o_flags); 1391 - if (new_fd < 0) 1392 - return new_fd; 1389 + FD_PREPARE(fdf, o_flags, file); 1390 + if (fdf.err) 1391 + return fdf.err; 1392 + get_file(file); 1393 1393 1394 1394 if (ufd) { 1395 - error = put_user(new_fd, ufd); 1396 - if (error) { 1397 - put_unused_fd(new_fd); 1395 + error = put_user(fd_prepare_fd(fdf), ufd); 1396 + if (error) 1398 1397 return error; 1399 - } 1400 1398 } 1401 1399 1402 - fd_install(new_fd, get_file(file)); 1403 - __receive_sock(file); 1404 - return new_fd; 1400 + __receive_sock(fd_prepare_file(fdf)); 1401 + return fd_publish(fdf); 1405 1402 } 1406 1403 EXPORT_SYMBOL_GPL(receive_fd); 1407 1404
+37 -66
fs/namespace.c
··· 3100 3100 3101 3101 SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags) 3102 3102 { 3103 - int fd; 3104 - struct file *file __free(fput) = NULL; 3105 - 3106 - file = vfs_open_tree(dfd, filename, flags); 3107 - if (IS_ERR(file)) 3108 - return PTR_ERR(file); 3109 - 3110 - fd = get_unused_fd_flags(flags & O_CLOEXEC); 3111 - if (fd < 0) 3112 - return fd; 3113 - 3114 - fd_install(fd, no_free_ptr(file)); 3115 - return fd; 3103 + return FD_ADD(flags, vfs_open_tree(dfd, filename, flags)); 3116 3104 } 3117 3105 3118 3106 /* ··· 4269 4281 SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags, 4270 4282 unsigned int, attr_flags) 4271 4283 { 4284 + struct path new_path __free(path_put) = {}; 4272 4285 struct mnt_namespace *ns; 4273 4286 struct fs_context *fc; 4274 - struct file *file; 4275 - struct path newmount; 4287 + struct vfsmount *new_mnt; 4276 4288 struct mount *mnt; 4277 4289 unsigned int mnt_flags = 0; 4278 4290 long ret; ··· 4310 4322 4311 4323 fc = fd_file(f)->private_data; 4312 4324 4313 - ret = mutex_lock_interruptible(&fc->uapi_mutex); 4314 - if (ret < 0) 4325 + ACQUIRE(mutex_intr, uapi_mutex)(&fc->uapi_mutex); 4326 + ret = ACQUIRE_ERR(mutex_intr, &uapi_mutex); 4327 + if (ret) 4315 4328 return ret; 4316 4329 4317 4330 /* There must be a valid superblock or we can't mount it */ 4318 4331 ret = -EINVAL; 4319 4332 if (!fc->root) 4320 - goto err_unlock; 4333 + return ret; 4321 4334 4322 4335 ret = -EPERM; 4323 4336 if (mount_too_revealing(fc->root->d_sb, &mnt_flags)) { 4324 4337 errorfcp(fc, "VFS", "Mount too revealing"); 4325 - goto err_unlock; 4338 + return ret; 4326 4339 } 4327 4340 4328 4341 ret = -EBUSY; 4329 4342 if (fc->phase != FS_CONTEXT_AWAITING_MOUNT) 4330 - goto err_unlock; 4343 + return ret; 4331 4344 4332 4345 if (fc->sb_flags & SB_MANDLOCK) 4333 4346 warn_mandlock(); 4334 4347 4335 - newmount.mnt = vfs_create_mount(fc); 4336 - if (IS_ERR(newmount.mnt)) { 4337 - ret = PTR_ERR(newmount.mnt); 4338 - goto err_unlock; 4339 - } 4340 - newmount.dentry = dget(fc->root); 4341 - newmount.mnt->mnt_flags = mnt_flags; 4348 + new_mnt = vfs_create_mount(fc); 4349 + if (IS_ERR(new_mnt)) 4350 + return PTR_ERR(new_mnt); 4351 + new_mnt->mnt_flags = mnt_flags; 4352 + 4353 + new_path.dentry = dget(fc->root); 4354 + new_path.mnt = new_mnt; 4342 4355 4343 4356 /* We've done the mount bit - now move the file context into more or 4344 4357 * less the same state as if we'd done an fspick(). We don't want to ··· 4349 4360 vfs_clean_context(fc); 4350 4361 4351 4362 ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, true); 4352 - if (IS_ERR(ns)) { 4353 - ret = PTR_ERR(ns); 4354 - goto err_path; 4355 - } 4356 - mnt = real_mount(newmount.mnt); 4363 + if (IS_ERR(ns)) 4364 + return PTR_ERR(ns); 4365 + mnt = real_mount(new_path.mnt); 4357 4366 ns->root = mnt; 4358 4367 ns->nr_mounts = 1; 4359 4368 mnt_add_to_ns(ns, mnt); 4360 - mntget(newmount.mnt); 4369 + mntget(new_path.mnt); 4361 4370 4362 - /* Attach to an apparent O_PATH fd with a note that we need to unmount 4363 - * it, not just simply put it. 4364 - */ 4365 - file = dentry_open(&newmount, O_PATH, fc->cred); 4366 - if (IS_ERR(file)) { 4367 - dissolve_on_fput(newmount.mnt); 4368 - ret = PTR_ERR(file); 4369 - goto err_path; 4371 + FD_PREPARE(fdf, (flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0, 4372 + dentry_open(&new_path, O_PATH, fc->cred)); 4373 + if (fdf.err) { 4374 + dissolve_on_fput(new_path.mnt); 4375 + return fdf.err; 4370 4376 } 4371 - file->f_mode |= FMODE_NEED_UNMOUNT; 4372 4377 4373 - ret = get_unused_fd_flags((flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0); 4374 - if (ret >= 0) 4375 - fd_install(ret, file); 4376 - else 4377 - fput(file); 4378 - 4379 - err_path: 4380 - path_put(&newmount); 4381 - err_unlock: 4382 - mutex_unlock(&fc->uapi_mutex); 4383 - return ret; 4378 + /* 4379 + * Attach to an apparent O_PATH fd with a note that we 4380 + * need to unmount it, not just simply put it. 4381 + */ 4382 + fd_prepare_file(fdf)->f_mode |= FMODE_NEED_UNMOUNT; 4383 + return fd_publish(fdf); 4384 4384 } 4385 4385 4386 4386 static inline int vfs_move_mount(const struct path *from_path, ··· 5011 5033 unsigned, flags, struct mount_attr __user *, uattr, 5012 5034 size_t, usize) 5013 5035 { 5014 - struct file __free(fput) *file = NULL; 5015 - int fd; 5016 - 5017 5036 if (!uattr && usize) 5018 5037 return -EINVAL; 5019 5038 5020 - file = vfs_open_tree(dfd, filename, flags); 5021 - if (IS_ERR(file)) 5022 - return PTR_ERR(file); 5039 + FD_PREPARE(fdf, flags, vfs_open_tree(dfd, filename, flags)); 5040 + if (fdf.err) 5041 + return fdf.err; 5023 5042 5024 5043 if (uattr) { 5025 - int ret; 5026 5044 struct mount_kattr kattr = {}; 5045 + struct file *file = fd_prepare_file(fdf); 5046 + int ret; 5027 5047 5028 5048 if (flags & OPEN_TREE_CLONE) 5029 5049 kattr.kflags = MOUNT_KATTR_IDMAP_REPLACE; ··· 5037 5061 return ret; 5038 5062 } 5039 5063 5040 - fd = get_unused_fd_flags(flags & O_CLOEXEC); 5041 - if (fd < 0) 5042 - return fd; 5043 - 5044 - fd_install(fd, no_free_ptr(file)); 5045 - return fd; 5064 + return fd_publish(fdf); 5046 5065 } 5047 5066 5048 5067 int show_path(struct seq_file *m, struct dentry *root)
+22 -38
fs/notify/fanotify/fanotify_user.c
··· 1597 1597 return hash; 1598 1598 } 1599 1599 1600 + DEFINE_CLASS(fsnotify_group, 1601 + struct fsnotify_group *, 1602 + if (!IS_ERR_OR_NULL(_T)) fsnotify_destroy_group(_T), 1603 + fsnotify_alloc_group(ops, flags), 1604 + const struct fsnotify_ops *ops, int flags) 1605 + 1600 1606 /* fanotify syscalls */ 1601 1607 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) 1602 1608 { 1603 1609 struct user_namespace *user_ns = current_user_ns(); 1604 - struct fsnotify_group *group; 1605 1610 int f_flags, fd; 1606 1611 unsigned int fid_mode = flags & FANOTIFY_FID_BITS; 1607 1612 unsigned int class = flags & FANOTIFY_CLASS_BITS; 1608 1613 unsigned int internal_flags = 0; 1609 - struct file *file; 1610 1614 1611 1615 pr_debug("%s: flags=%x event_f_flags=%x\n", 1612 1616 __func__, flags, event_f_flags); ··· 1694 1690 if (flags & FAN_NONBLOCK) 1695 1691 f_flags |= O_NONBLOCK; 1696 1692 1697 - /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */ 1698 - group = fsnotify_alloc_group(&fanotify_fsnotify_ops, 1693 + CLASS(fsnotify_group, group)(&fanotify_fsnotify_ops, 1699 1694 FSNOTIFY_GROUP_USER); 1700 - if (IS_ERR(group)) { 1695 + /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */ 1696 + if (IS_ERR(group)) 1701 1697 return PTR_ERR(group); 1702 - } 1703 1698 1704 1699 /* Enforce groups limits per user in all containing user ns */ 1705 1700 group->fanotify_data.ucounts = inc_ucount(user_ns, current_euid(), 1706 1701 UCOUNT_FANOTIFY_GROUPS); 1707 - if (!group->fanotify_data.ucounts) { 1708 - fd = -EMFILE; 1709 - goto out_destroy_group; 1710 - } 1702 + if (!group->fanotify_data.ucounts) 1703 + return -EMFILE; 1711 1704 1712 1705 group->fanotify_data.flags = flags | internal_flags; 1713 1706 group->memcg = get_mem_cgroup_from_mm(current->mm); 1714 1707 group->user_ns = get_user_ns(user_ns); 1715 1708 1716 1709 group->fanotify_data.merge_hash = fanotify_alloc_merge_hash(); 1717 - if (!group->fanotify_data.merge_hash) { 1718 - fd = -ENOMEM; 1719 - goto out_destroy_group; 1720 - } 1710 + if (!group->fanotify_data.merge_hash) 1711 + return -ENOMEM; 1721 1712 1722 1713 group->overflow_event = fanotify_alloc_overflow_event(); 1723 - if (unlikely(!group->overflow_event)) { 1724 - fd = -ENOMEM; 1725 - goto out_destroy_group; 1726 - } 1714 + if (unlikely(!group->overflow_event)) 1715 + return -ENOMEM; 1727 1716 1728 1717 if (force_o_largefile()) 1729 1718 event_f_flags |= O_LARGEFILE; ··· 1735 1738 group->priority = FSNOTIFY_PRIO_PRE_CONTENT; 1736 1739 break; 1737 1740 default: 1738 - fd = -EINVAL; 1739 - goto out_destroy_group; 1741 + return -EINVAL; 1740 1742 } 1741 1743 1742 1744 BUILD_BUG_ON(!(FANOTIFY_ADMIN_INIT_FLAGS & FAN_UNLIMITED_QUEUE)); ··· 1746 1750 } 1747 1751 1748 1752 if (flags & FAN_ENABLE_AUDIT) { 1749 - fd = -EPERM; 1750 1753 if (!capable(CAP_AUDIT_WRITE)) 1751 - goto out_destroy_group; 1754 + return -EPERM; 1752 1755 } 1753 1756 1754 - fd = get_unused_fd_flags(f_flags); 1755 - if (fd < 0) 1756 - goto out_destroy_group; 1757 - 1758 - file = anon_inode_getfile_fmode("[fanotify]", &fanotify_fops, group, 1759 - f_flags, FMODE_NONOTIFY); 1760 - if (IS_ERR(file)) { 1761 - put_unused_fd(fd); 1762 - fd = PTR_ERR(file); 1763 - goto out_destroy_group; 1764 - } 1765 - fd_install(fd, file); 1766 - return fd; 1767 - 1768 - out_destroy_group: 1769 - fsnotify_destroy_group(group); 1757 + fd = FD_ADD(f_flags, 1758 + anon_inode_getfile_fmode("[fanotify]", &fanotify_fops, 1759 + group, f_flags, FMODE_NONOTIFY)); 1760 + if (fd >= 0) 1761 + retain_and_null_ptr(group); 1770 1762 return fd; 1771 1763 } 1772 1764
+13 -34
fs/nsfs.c
··· 110 110 int open_namespace(struct ns_common *ns) 111 111 { 112 112 struct path path __free(path_put) = {}; 113 - struct file *f; 114 113 int err; 115 114 116 115 /* call first to consume reference */ ··· 117 118 if (err < 0) 118 119 return err; 119 120 120 - CLASS(get_unused_fd, fd)(O_CLOEXEC); 121 - if (fd < 0) 122 - return fd; 123 - 124 - f = dentry_open(&path, O_RDONLY, current_cred()); 125 - if (IS_ERR(f)) 126 - return PTR_ERR(f); 127 - 128 - fd_install(fd, f); 129 - return take_fd(fd); 121 + return FD_ADD(O_CLOEXEC, dentry_open(&path, O_RDONLY, current_cred())); 130 122 } 131 123 132 124 int open_related_ns(struct ns_common *ns, ··· 303 313 struct mnt_ns_info kinfo = {}; 304 314 struct mnt_ns_info __user *uinfo = (struct mnt_ns_info __user *)arg; 305 315 struct path path __free(path_put) = {}; 306 - struct file *f __free(fput) = NULL; 307 316 size_t usize = _IOC_SIZE(ioctl); 308 317 309 318 if (ns->ns_type != CLONE_NEWNS) ··· 321 332 if (ret) 322 333 return ret; 323 334 324 - CLASS(get_unused_fd, fd)(O_CLOEXEC); 325 - if (fd < 0) 326 - return fd; 327 - 328 - f = dentry_open(&path, O_RDONLY, current_cred()); 329 - if (IS_ERR(f)) 330 - return PTR_ERR(f); 331 - 332 - if (uinfo) { 333 - /* 334 - * If @uinfo is passed return all information about the 335 - * mount namespace as well. 336 - */ 337 - ret = copy_ns_info_to_user(to_mnt_ns(ns), uinfo, usize, &kinfo); 338 - if (ret) 339 - return ret; 340 - } 341 - 342 - /* Transfer reference of @f to caller's fdtable. */ 343 - fd_install(fd, no_free_ptr(f)); 344 - /* File descriptor is live so hand it off to the caller. */ 345 - return take_fd(fd); 335 + FD_PREPARE(fdf, O_CLOEXEC, dentry_open(&path, O_RDONLY, current_cred())); 336 + if (fdf.err) 337 + return fdf.err; 338 + /* 339 + * If @uinfo is passed return all information about the 340 + * mount namespace as well. 341 + */ 342 + ret = copy_ns_info_to_user(to_mnt_ns(ns), uinfo, usize, &kinfo); 343 + if (ret) 344 + return ret; 345 + ret = fd_publish(fdf); 346 + break; 346 347 } 347 348 default: 348 349 ret = -ENOTTY;
+3 -14
fs/open.c
··· 1416 1416 struct open_how *how) 1417 1417 { 1418 1418 struct open_flags op; 1419 - struct filename *tmp; 1420 - int err, fd; 1419 + struct filename *tmp __free(putname) = NULL; 1420 + int err; 1421 1421 1422 1422 err = build_open_flags(how, &op); 1423 1423 if (unlikely(err)) ··· 1427 1427 if (IS_ERR(tmp)) 1428 1428 return PTR_ERR(tmp); 1429 1429 1430 - fd = get_unused_fd_flags(how->flags); 1431 - if (likely(fd >= 0)) { 1432 - struct file *f = do_filp_open(dfd, tmp, &op); 1433 - if (IS_ERR(f)) { 1434 - put_unused_fd(fd); 1435 - fd = PTR_ERR(f); 1436 - } else { 1437 - fd_install(fd, f); 1438 - } 1439 - } 1440 - putname(tmp); 1441 - return fd; 1430 + return FD_ADD(how->flags, do_filp_open(dfd, tmp, &op)); 1442 1431 } 1443 1432 1444 1433 int do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
+11 -18
fs/signalfd.c
··· 250 250 251 251 static int do_signalfd4(int ufd, sigset_t *mask, int flags) 252 252 { 253 - struct signalfd_ctx *ctx; 254 - 255 253 /* Check the SFD_* constants for consistency. */ 256 254 BUILD_BUG_ON(SFD_CLOEXEC != O_CLOEXEC); 257 255 BUILD_BUG_ON(SFD_NONBLOCK != O_NONBLOCK); ··· 261 263 signotset(mask); 262 264 263 265 if (ufd == -1) { 264 - struct file *file; 266 + int fd; 267 + struct signalfd_ctx *ctx __free(kfree) = NULL; 265 268 266 269 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 267 270 if (!ctx) ··· 270 271 271 272 ctx->sigmask = *mask; 272 273 273 - ufd = get_unused_fd_flags(flags & O_CLOEXEC); 274 - if (ufd < 0) { 275 - kfree(ctx); 276 - return ufd; 277 - } 278 - 279 - file = anon_inode_getfile_fmode("[signalfd]", &signalfd_fops, 280 - ctx, O_RDWR | (flags & O_NONBLOCK), 281 - FMODE_NOWAIT); 282 - if (IS_ERR(file)) { 283 - put_unused_fd(ufd); 284 - kfree(ctx); 285 - return PTR_ERR(file); 286 - } 287 - fd_install(ufd, file); 274 + fd = FD_ADD(flags & O_CLOEXEC, 275 + anon_inode_getfile_fmode( 276 + "[signalfd]", &signalfd_fops, ctx, 277 + O_RDWR | (flags & O_NONBLOCK), FMODE_NOWAIT)); 278 + if (fd >= 0) 279 + retain_and_null_ptr(ctx); 280 + return fd; 288 281 } else { 282 + struct signalfd_ctx *ctx; 283 + 289 284 CLASS(fd, f)(ufd); 290 285 if (fd_empty(f)) 291 286 return -EBADF;
+9 -20
fs/timerfd.c
··· 393 393 394 394 SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags) 395 395 { 396 - int ufd; 397 - struct timerfd_ctx *ctx; 398 - struct file *file; 396 + struct timerfd_ctx *ctx __free(kfree) = NULL; 397 + int ret; 399 398 400 399 /* Check the TFD_* constants for consistency. */ 401 400 BUILD_BUG_ON(TFD_CLOEXEC != O_CLOEXEC); ··· 431 432 432 433 ctx->moffs = ktime_mono_to_real(0); 433 434 434 - ufd = get_unused_fd_flags(flags & TFD_SHARED_FCNTL_FLAGS); 435 - if (ufd < 0) { 436 - kfree(ctx); 437 - return ufd; 438 - } 439 - 440 - file = anon_inode_getfile_fmode("[timerfd]", &timerfd_fops, ctx, 441 - O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS), 442 - FMODE_NOWAIT); 443 - if (IS_ERR(file)) { 444 - put_unused_fd(ufd); 445 - kfree(ctx); 446 - return PTR_ERR(file); 447 - } 448 - 449 - fd_install(ufd, file); 450 - return ufd; 435 + ret = FD_ADD(flags & TFD_SHARED_FCNTL_FLAGS, 436 + anon_inode_getfile_fmode("[timerfd]", &timerfd_fops, ctx, 437 + O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS), 438 + FMODE_NOWAIT)); 439 + if (ret >= 0) 440 + retain_and_null_ptr(ctx); 441 + return ret; 451 442 } 452 443 453 444 static int do_timerfd_settime(int ufd, int flags,
+10 -20
fs/userfaultfd.c
··· 2111 2111 2112 2112 static int new_userfaultfd(int flags) 2113 2113 { 2114 - struct userfaultfd_ctx *ctx; 2115 - struct file *file; 2116 - int fd; 2114 + struct userfaultfd_ctx *ctx __free(kfree) = NULL; 2117 2115 2118 2116 VM_WARN_ON_ONCE(!current->mm); 2119 2117 ··· 2133 2135 atomic_set(&ctx->mmap_changing, 0); 2134 2136 ctx->mm = current->mm; 2135 2137 2136 - fd = get_unused_fd_flags(flags & UFFD_SHARED_FCNTL_FLAGS); 2137 - if (fd < 0) 2138 - goto err_out; 2138 + FD_PREPARE(fdf, flags & UFFD_SHARED_FCNTL_FLAGS, 2139 + anon_inode_create_getfile("[userfaultfd]", &userfaultfd_fops, ctx, 2140 + O_RDONLY | (flags & UFFD_SHARED_FCNTL_FLAGS), 2141 + NULL)); 2142 + if (fdf.err) 2143 + return fdf.err; 2139 2144 2140 - /* Create a new inode so that the LSM can block the creation. */ 2141 - file = anon_inode_create_getfile("[userfaultfd]", &userfaultfd_fops, ctx, 2142 - O_RDONLY | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL); 2143 - if (IS_ERR(file)) { 2144 - put_unused_fd(fd); 2145 - fd = PTR_ERR(file); 2146 - goto err_out; 2147 - } 2148 2145 /* prevent the mm struct to be freed */ 2149 2146 mmgrab(ctx->mm); 2150 - file->f_mode |= FMODE_NOWAIT; 2151 - fd_install(fd, file); 2152 - return fd; 2153 - err_out: 2154 - kmem_cache_free(userfaultfd_ctx_cachep, ctx); 2155 - return fd; 2147 + fd_prepare_file(fdf)->f_mode |= FMODE_NOWAIT; 2148 + retain_and_null_ptr(ctx); 2149 + return fd_publish(fdf); 2156 2150 } 2157 2151 2158 2152 static inline bool userfaultfd_syscall_allowed(int flags)
+17 -39
fs/xfs/xfs_handle.c
··· 233 233 xfs_fsop_handlereq_t *hreq) 234 234 { 235 235 const struct cred *cred = current_cred(); 236 - int error; 237 - int fd; 238 236 int permflag; 239 - struct file *filp; 240 237 struct inode *inode; 241 238 struct dentry *dentry; 242 239 fmode_t fmode; 243 - struct path path; 240 + struct path path __free(path_put) = {}; 244 241 245 242 if (!capable(CAP_SYS_ADMIN)) 246 243 return -EPERM; ··· 246 249 if (IS_ERR(dentry)) 247 250 return PTR_ERR(dentry); 248 251 inode = d_inode(dentry); 252 + path.dentry = dentry; 249 253 250 254 /* Restrict xfs_open_by_handle to directories & regular files. */ 251 - if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { 252 - error = -EPERM; 253 - goto out_dput; 254 - } 255 + if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) 256 + return -EPERM; 255 257 256 258 #if BITS_PER_LONG != 32 257 259 hreq->oflags |= O_LARGEFILE; ··· 259 263 permflag = hreq->oflags; 260 264 fmode = OPEN_FMODE(permflag); 261 265 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) && 262 - (fmode & FMODE_WRITE) && IS_APPEND(inode)) { 263 - error = -EPERM; 264 - goto out_dput; 265 - } 266 + (fmode & FMODE_WRITE) && IS_APPEND(inode)) 267 + return -EPERM; 266 268 267 - if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) { 268 - error = -EPERM; 269 - goto out_dput; 270 - } 269 + if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) 270 + return -EPERM; 271 271 272 272 /* Can't write directories. */ 273 - if (S_ISDIR(inode->i_mode) && (fmode & FMODE_WRITE)) { 274 - error = -EISDIR; 275 - goto out_dput; 276 - } 273 + if (S_ISDIR(inode->i_mode) && (fmode & FMODE_WRITE)) 274 + return -EISDIR; 277 275 278 - fd = get_unused_fd_flags(0); 279 - if (fd < 0) { 280 - error = fd; 281 - goto out_dput; 282 - } 276 + path.mnt = mntget(parfilp->f_path.mnt); 283 277 284 - path.mnt = parfilp->f_path.mnt; 285 - path.dentry = dentry; 286 - filp = dentry_open(&path, hreq->oflags, cred); 287 - dput(dentry); 288 - if (IS_ERR(filp)) { 289 - put_unused_fd(fd); 290 - return PTR_ERR(filp); 291 - } 278 + FD_PREPARE(fdf, 0, dentry_open(&path, hreq->oflags, cred)); 279 + if (fdf.err) 280 + return fdf.err; 292 281 293 282 if (S_ISREG(inode->i_mode)) { 283 + struct file *filp = fd_prepare_file(fdf); 284 + 294 285 filp->f_flags |= O_NOATIME; 295 286 filp->f_mode |= FMODE_NOCMTIME; 296 287 } 297 288 298 - fd_install(fd, filp); 299 - return fd; 300 - 301 - out_dput: 302 - dput(dentry); 303 - return error; 289 + return fd_publish(fdf); 304 290 } 305 291 306 292 int
+7
include/linux/cleanup.h
··· 261 261 * CLASS(name, var)(args...): 262 262 * declare the variable @var as an instance of the named class 263 263 * 264 + * CLASS_INIT(name, var, init_expr): 265 + * declare the variable @var as an instance of the named class with 266 + * custom initialization expression. 267 + * 264 268 * Ex. 265 269 * 266 270 * DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd) ··· 293 289 #define CLASS(_name, var) \ 294 290 class_##_name##_t var __cleanup(class_##_name##_destructor) = \ 295 291 class_##_name##_constructor 292 + 293 + #define CLASS_INIT(_name, _var, _init_expr) \ 294 + class_##_name##_t _var __cleanup(class_##_name##_destructor) = (_init_expr) 296 295 297 296 #define __scoped_class(_name, var, _label, args...) \ 298 297 for (CLASS(_name, var)(args); ; ({ goto _label; })) \
+126
include/linux/file.h
··· 127 127 128 128 extern unsigned int sysctl_nr_open_min, sysctl_nr_open_max; 129 129 130 + /* 131 + * fd_prepare: Combined fd + file allocation cleanup class. 132 + * @err: Error code to indicate if allocation succeeded. 133 + * @__fd: Allocated fd (may not be accessed directly) 134 + * @__file: Allocated struct file pointer (may not be accessed directly) 135 + * 136 + * Allocates an fd and a file together. On error paths, automatically cleans 137 + * up whichever resource was successfully allocated. Allows flexible file 138 + * allocation with different functions per usage. 139 + * 140 + * Do not use directly. 141 + */ 142 + struct fd_prepare { 143 + s32 err; 144 + s32 __fd; /* do not access directly */ 145 + struct file *__file; /* do not access directly */ 146 + }; 147 + 148 + /* Typedef for fd_prepare cleanup guards. */ 149 + typedef struct fd_prepare class_fd_prepare_t; 150 + 151 + /* 152 + * Accessors for fd_prepare class members. 153 + * _Generic() is used for zero-cost type safety. 154 + */ 155 + #define fd_prepare_fd(_fdf) \ 156 + (_Generic((_fdf), struct fd_prepare: (_fdf).__fd)) 157 + 158 + #define fd_prepare_file(_fdf) \ 159 + (_Generic((_fdf), struct fd_prepare: (_fdf).__file)) 160 + 161 + /* Do not use directly. */ 162 + static inline void class_fd_prepare_destructor(const struct fd_prepare *fdf) 163 + { 164 + if (unlikely(fdf->err)) { 165 + if (likely(fdf->__fd >= 0)) 166 + put_unused_fd(fdf->__fd); 167 + if (unlikely(!IS_ERR_OR_NULL(fdf->__file))) 168 + fput(fdf->__file); 169 + } 170 + } 171 + 172 + /* Do not use directly. */ 173 + static inline int class_fd_prepare_lock_err(const struct fd_prepare *fdf) 174 + { 175 + if (unlikely(fdf->err)) 176 + return fdf->err; 177 + if (unlikely(fdf->__fd < 0)) 178 + return fdf->__fd; 179 + if (unlikely(IS_ERR(fdf->__file))) 180 + return PTR_ERR(fdf->__file); 181 + if (unlikely(!fdf->__file)) 182 + return -ENOMEM; 183 + return 0; 184 + } 185 + 186 + /* 187 + * __FD_PREPARE_INIT - Helper to initialize fd_prepare class. 188 + * @_fd_flags: flags for get_unused_fd_flags() 189 + * @_file_owned: expression that returns struct file * 190 + * 191 + * Returns a struct fd_prepare with fd, file, and err set. 192 + * If fd allocation fails, fd will be negative and err will be set. If 193 + * fd succeeds but file_init_expr fails, file will be ERR_PTR and err 194 + * will be set. The err field is the single source of truth for error 195 + * checking. 196 + */ 197 + #define __FD_PREPARE_INIT(_fd_flags, _file_owned) \ 198 + ({ \ 199 + struct fd_prepare fdf = { \ 200 + .__fd = get_unused_fd_flags((_fd_flags)), \ 201 + }; \ 202 + if (likely(fdf.__fd >= 0)) \ 203 + fdf.__file = (_file_owned); \ 204 + fdf.err = ACQUIRE_ERR(fd_prepare, &fdf); \ 205 + fdf; \ 206 + }) 207 + 208 + /* 209 + * FD_PREPARE - Macro to declare and initialize an fd_prepare variable. 210 + * 211 + * Declares and initializes an fd_prepare variable with automatic 212 + * cleanup. No separate scope required - cleanup happens when variable 213 + * goes out of scope. 214 + * 215 + * @_fdf: name of struct fd_prepare variable to define 216 + * @_fd_flags: flags for get_unused_fd_flags() 217 + * @_file_owned: struct file to take ownership of (can be expression) 218 + */ 219 + #define FD_PREPARE(_fdf, _fd_flags, _file_owned) \ 220 + CLASS_INIT(fd_prepare, _fdf, __FD_PREPARE_INIT(_fd_flags, _file_owned)) 221 + 222 + /* 223 + * fd_publish - Publish prepared fd and file to the fd table. 224 + * @_fdf: struct fd_prepare variable 225 + */ 226 + #define fd_publish(_fdf) \ 227 + ({ \ 228 + struct fd_prepare *fdp = &(_fdf); \ 229 + VFS_WARN_ON_ONCE(fdp->err); \ 230 + VFS_WARN_ON_ONCE(fdp->__fd < 0); \ 231 + VFS_WARN_ON_ONCE(IS_ERR_OR_NULL(fdp->__file)); \ 232 + fd_install(fdp->__fd, fdp->__file); \ 233 + fdp->__fd; \ 234 + }) 235 + 236 + /* Do not use directly. */ 237 + #define __FD_ADD(_fdf, _fd_flags, _file_owned) \ 238 + ({ \ 239 + FD_PREPARE(_fdf, _fd_flags, _file_owned); \ 240 + s32 ret = _fdf.err; \ 241 + if (likely(!ret)) \ 242 + ret = fd_publish(_fdf); \ 243 + ret; \ 244 + }) 245 + 246 + /* 247 + * FD_ADD - Allocate and install an fd and file in one step. 248 + * @_fd_flags: flags for get_unused_fd_flags() 249 + * @_file_owned: struct file to take ownership of 250 + * 251 + * Returns the allocated fd number, or negative error code on failure. 252 + */ 253 + #define FD_ADD(_fd_flags, _file_owned) \ 254 + __FD_ADD(__UNIQUE_ID(fd_prepare), _fd_flags, _file_owned) 255 + 130 256 #endif /* __LINUX_FILE_H */
+15 -28
io_uring/mock_file.c
··· 211 211 const struct file_operations *fops = &io_mock_fops; 212 212 const struct io_uring_sqe *sqe = cmd->sqe; 213 213 struct io_uring_mock_create mc, __user *uarg; 214 - struct io_mock_file *mf = NULL; 215 - struct file *file = NULL; 214 + struct file *file; 215 + struct io_mock_file *mf __free(kfree) = NULL; 216 216 size_t uarg_size; 217 - int fd = -1, ret; 218 217 219 218 /* 220 219 * It's a testing only driver that allows exercising edge cases ··· 245 246 if (!mf) 246 247 return -ENOMEM; 247 248 248 - ret = fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); 249 - if (fd < 0) 250 - goto fail; 251 - 252 249 init_waitqueue_head(&mf->poll_wq); 253 250 mf->size = mc.file_size; 254 251 mf->rw_delay_ns = mc.rw_delay_ns; ··· 253 258 mf->pollable = true; 254 259 } 255 260 256 - file = anon_inode_create_getfile("[io_uring_mock]", fops, 257 - mf, O_RDWR | O_CLOEXEC, NULL); 258 - if (IS_ERR(file)) { 259 - ret = PTR_ERR(file); 260 - goto fail; 261 - } 261 + FD_PREPARE(fdf, O_RDWR | O_CLOEXEC, 262 + anon_inode_create_getfile("[io_uring_mock]", fops, mf, 263 + O_RDWR | O_CLOEXEC, NULL)); 264 + if (fdf.err) 265 + return fdf.err; 262 266 263 - file->f_mode |= FMODE_READ | FMODE_CAN_READ | 264 - FMODE_WRITE | FMODE_CAN_WRITE | 265 - FMODE_LSEEK; 267 + retain_and_null_ptr(mf); 268 + file = fd_prepare_file(fdf); 269 + file->f_mode |= FMODE_READ | FMODE_CAN_READ | FMODE_WRITE | 270 + FMODE_CAN_WRITE | FMODE_LSEEK; 266 271 if (mc.flags & IORING_MOCK_CREATE_F_SUPPORT_NOWAIT) 267 272 file->f_mode |= FMODE_NOWAIT; 268 273 269 - mc.out_fd = fd; 270 - if (copy_to_user(uarg, &mc, uarg_size)) { 271 - fput(file); 272 - ret = -EFAULT; 273 - goto fail; 274 - } 274 + mc.out_fd = fd_prepare_fd(fdf); 275 + if (copy_to_user(uarg, &mc, uarg_size)) 276 + return -EFAULT; 275 277 276 - fd_install(fd, file); 278 + fd_publish(fdf); 277 279 return 0; 278 - fail: 279 - if (fd >= 0) 280 - put_unused_fd(fd); 281 - kfree(mf); 282 - return ret; 283 280 } 284 281 285 282 static int io_probe_mock(struct io_uring_cmd *cmd)
+26 -31
ipc/mqueue.c
··· 892 892 return inode_permission(&nop_mnt_idmap, d_inode(dentry), acc); 893 893 } 894 894 895 + static struct file *mqueue_file_open(struct filename *name, 896 + struct vfsmount *mnt, int oflag, bool ro, 897 + umode_t mode, struct mq_attr *attr) 898 + { 899 + struct dentry *dentry; 900 + struct file *file; 901 + int ret; 902 + 903 + dentry = start_creating_noperm(mnt->mnt_root, &QSTR(name->name)); 904 + if (IS_ERR(dentry)) 905 + return ERR_CAST(dentry); 906 + 907 + ret = prepare_open(dentry, oflag, ro, mode, name, attr); 908 + file = ERR_PTR(ret); 909 + if (!ret) { 910 + const struct path path = { .mnt = mnt, .dentry = dentry }; 911 + file = dentry_open(&path, oflag, current_cred()); 912 + } 913 + 914 + end_creating(dentry); 915 + return file; 916 + } 917 + 895 918 static int do_mq_open(const char __user *u_name, int oflag, umode_t mode, 896 919 struct mq_attr *attr) 897 920 { 921 + struct filename *name __free(putname) = NULL;; 898 922 struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt; 899 - struct dentry *root = mnt->mnt_root; 900 - struct filename *name; 901 - struct path path; 902 - int fd, error; 903 - int ro; 923 + int fd, ro; 904 924 905 925 audit_mq_open(oflag, mode, attr); 906 926 ··· 928 908 if (IS_ERR(name)) 929 909 return PTR_ERR(name); 930 910 931 - fd = get_unused_fd_flags(O_CLOEXEC); 932 - if (fd < 0) 933 - goto out_putname; 934 - 935 911 ro = mnt_want_write(mnt); /* we'll drop it in any case */ 936 - path.dentry = start_creating_noperm(root, &QSTR(name->name)); 937 - if (IS_ERR(path.dentry)) { 938 - error = PTR_ERR(path.dentry); 939 - goto out_putfd; 940 - } 941 - path.mnt = mnt; 942 - error = prepare_open(path.dentry, oflag, ro, mode, name, attr); 943 - if (!error) { 944 - struct file *file = dentry_open(&path, oflag, current_cred()); 945 - if (!IS_ERR(file)) 946 - fd_install(fd, file); 947 - else 948 - error = PTR_ERR(file); 949 - } 950 - out_putfd: 951 - if (error) { 952 - put_unused_fd(fd); 953 - fd = error; 954 - } 955 - end_creating(path.dentry); 912 + fd = FD_ADD(O_CLOEXEC, mqueue_file_open(name, mnt, oflag, ro, mode, attr)); 956 913 if (!ro) 957 914 mnt_drop_write(mnt); 958 - out_putname: 959 - putname(name); 960 915 return fd; 961 916 } 962 917
+7 -20
kernel/bpf/bpf_iter.c
··· 634 634 int bpf_iter_new_fd(struct bpf_link *link) 635 635 { 636 636 struct bpf_iter_link *iter_link; 637 - struct file *file; 638 637 unsigned int flags; 639 - int err, fd; 638 + int err; 640 639 641 640 if (link->ops != &bpf_iter_link_lops) 642 641 return -EINVAL; 643 642 644 643 flags = O_RDONLY | O_CLOEXEC; 645 - fd = get_unused_fd_flags(flags); 646 - if (fd < 0) 647 - return fd; 648 644 649 - file = anon_inode_getfile("bpf_iter", &bpf_iter_fops, NULL, flags); 650 - if (IS_ERR(file)) { 651 - err = PTR_ERR(file); 652 - goto free_fd; 653 - } 645 + FD_PREPARE(fdf, flags, anon_inode_getfile("bpf_iter", &bpf_iter_fops, NULL, flags)); 646 + if (fdf.err) 647 + return fdf.err; 654 648 655 649 iter_link = container_of(link, struct bpf_iter_link, link); 656 - err = prepare_seq_file(file, iter_link); 650 + err = prepare_seq_file(fd_prepare_file(fdf), iter_link); 657 651 if (err) 658 - goto free_file; 652 + return err; /* Automatic cleanup handles fput */ 659 653 660 - fd_install(fd, file); 661 - return fd; 662 - 663 - free_file: 664 - fput(file); 665 - free_fd: 666 - put_unused_fd(fd); 667 - return err; 654 + return fd_publish(fdf); 668 655 } 669 656 670 657 struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop)
+15 -32
kernel/bpf/token.c
··· 110 110 111 111 int bpf_token_create(union bpf_attr *attr) 112 112 { 113 + struct bpf_token *token __free(kfree) = NULL; 113 114 struct bpf_mount_opts *mnt_opts; 114 - struct bpf_token *token = NULL; 115 115 struct user_namespace *userns; 116 116 struct inode *inode; 117 - struct file *file; 118 117 CLASS(fd, f)(attr->token_create.bpffs_fd); 119 118 struct path path; 120 119 struct super_block *sb; 121 120 umode_t mode; 122 - int err, fd; 121 + int err; 123 122 124 123 if (fd_empty(f)) 125 124 return -EBADF; ··· 165 166 inode->i_fop = &bpf_token_fops; 166 167 clear_nlink(inode); /* make sure it is unlinked */ 167 168 168 - file = alloc_file_pseudo(inode, path.mnt, BPF_TOKEN_INODE_NAME, O_RDWR, &bpf_token_fops); 169 - if (IS_ERR(file)) { 170 - iput(inode); 171 - return PTR_ERR(file); 172 - } 169 + FD_PREPARE(fdf, O_CLOEXEC, 170 + alloc_file_pseudo(inode, path.mnt, BPF_TOKEN_INODE_NAME, 171 + O_RDWR, &bpf_token_fops)); 172 + if (fdf.err) 173 + return fdf.err; 173 174 174 175 token = kzalloc(sizeof(*token), GFP_USER); 175 - if (!token) { 176 - err = -ENOMEM; 177 - goto out_file; 178 - } 176 + if (!token) 177 + return -ENOMEM; 179 178 180 179 atomic64_set(&token->refcnt, 1); 181 180 182 - /* remember bpffs owning userns for future ns_capable() checks */ 183 - token->userns = get_user_ns(userns); 184 - 181 + /* remember bpffs owning userns for future ns_capable() checks. */ 182 + token->userns = userns; 185 183 token->allowed_cmds = mnt_opts->delegate_cmds; 186 184 token->allowed_maps = mnt_opts->delegate_maps; 187 185 token->allowed_progs = mnt_opts->delegate_progs; ··· 186 190 187 191 err = security_bpf_token_create(token, attr, &path); 188 192 if (err) 189 - goto out_token; 193 + return err; 190 194 191 - fd = get_unused_fd_flags(O_CLOEXEC); 192 - if (fd < 0) { 193 - err = fd; 194 - goto out_token; 195 - } 196 - 197 - file->private_data = token; 198 - fd_install(fd, file); 199 - 200 - return fd; 201 - 202 - out_token: 203 - bpf_token_free(token); 204 - out_file: 205 - fput(file); 206 - return err; 195 + get_user_ns(token->userns); 196 + fd_prepare_file(fdf)->private_data = no_free_ptr(token); 197 + return fd_publish(fdf); 207 198 } 208 199 209 200 int bpf_token_get_info_by_fd(struct bpf_token *token,
+5 -24
mm/memfd.c
··· 497 497 const char __user *, uname, 498 498 unsigned int, flags) 499 499 { 500 - struct file *file; 501 - int fd, error; 502 - char *name; 500 + char *name __free(kfree) = NULL; 501 + unsigned int fd_flags; 502 + int error; 503 503 504 504 error = sanitize_flags(&flags); 505 505 if (error < 0) ··· 509 509 if (IS_ERR(name)) 510 510 return PTR_ERR(name); 511 511 512 - fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0); 513 - if (fd < 0) { 514 - error = fd; 515 - goto err_free_name; 516 - } 517 - 518 - file = alloc_file(name, flags); 519 - if (IS_ERR(file)) { 520 - error = PTR_ERR(file); 521 - goto err_free_fd; 522 - } 523 - 524 - fd_install(fd, file); 525 - kfree(name); 526 - return fd; 527 - 528 - err_free_fd: 529 - put_unused_fd(fd); 530 - err_free_name: 531 - kfree(name); 532 - return error; 512 + fd_flags = (flags & MFD_CLOEXEC) ? O_CLOEXEC : 0; 513 + return FD_ADD(fd_flags, alloc_file(name, flags)); 533 514 }
+1 -19
mm/secretmem.c
··· 224 224 225 225 SYSCALL_DEFINE1(memfd_secret, unsigned int, flags) 226 226 { 227 - struct file *file; 228 - int fd, err; 229 - 230 227 /* make sure local flags do not confict with global fcntl.h */ 231 228 BUILD_BUG_ON(SECRETMEM_FLAGS_MASK & O_CLOEXEC); 232 229 ··· 235 238 if (atomic_read(&secretmem_users) < 0) 236 239 return -ENFILE; 237 240 238 - fd = get_unused_fd_flags(flags & O_CLOEXEC); 239 - if (fd < 0) 240 - return fd; 241 - 242 - file = secretmem_file_create(flags); 243 - if (IS_ERR(file)) { 244 - err = PTR_ERR(file); 245 - goto err_put_fd; 246 - } 247 - 248 - fd_install(fd, file); 249 - return fd; 250 - 251 - err_put_fd: 252 - put_unused_fd(fd); 253 - return err; 241 + return FD_ADD(flags & O_CLOEXEC, secretmem_file_create(flags)); 254 242 } 255 243 256 244 static int secretmem_init_fs_context(struct fs_context *fc)
+17 -19
net/handshake/netlink.c
··· 93 93 struct handshake_net *hn = handshake_pernet(net); 94 94 struct handshake_req *req = NULL; 95 95 struct socket *sock; 96 - int class, fd, err; 96 + int class, err; 97 97 98 98 err = -EOPNOTSUPP; 99 99 if (!hn) ··· 106 106 107 107 err = -EAGAIN; 108 108 req = handshake_req_next(hn, class); 109 - if (!req) 110 - goto out_status; 109 + if (req) { 110 + sock = req->hr_sk->sk_socket; 111 111 112 - sock = req->hr_sk->sk_socket; 113 - fd = get_unused_fd_flags(O_CLOEXEC); 114 - if (fd < 0) { 115 - err = fd; 116 - goto out_complete; 112 + FD_PREPARE(fdf, O_CLOEXEC, sock->file); 113 + if (fdf.err) { 114 + err = fdf.err; 115 + goto out_complete; 116 + } 117 + 118 + get_file(sock->file); /* FD_PREPARE() consumes a reference. */ 119 + err = req->hr_proto->hp_accept(req, info, fd_prepare_fd(fdf)); 120 + if (err) 121 + goto out_complete; /* Automatic cleanup handles fput */ 122 + 123 + trace_handshake_cmd_accept(net, req, req->hr_sk, fd_prepare_fd(fdf)); 124 + fd_publish(fdf); 125 + return 0; 117 126 } 118 - 119 - err = req->hr_proto->hp_accept(req, info, fd); 120 - if (err) { 121 - put_unused_fd(fd); 122 - goto out_complete; 123 - } 124 - 125 - fd_install(fd, get_file(sock->file)); 126 - 127 - trace_handshake_cmd_accept(net, req, req->hr_sk, fd); 128 - return 0; 129 127 130 128 out_complete: 131 129 handshake_complete(req, -EIO, NULL);
+7 -15
net/kcm/kcmsock.c
··· 1560 1560 } 1561 1561 case SIOCKCMCLONE: { 1562 1562 struct kcm_clone info; 1563 - struct file *file; 1564 1563 1565 - info.fd = get_unused_fd_flags(0); 1566 - if (unlikely(info.fd < 0)) 1567 - return info.fd; 1564 + FD_PREPARE(fdf, 0, kcm_clone(sock)); 1565 + if (fdf.err) 1566 + return fdf.err; 1568 1567 1569 - file = kcm_clone(sock); 1570 - if (IS_ERR(file)) { 1571 - put_unused_fd(info.fd); 1572 - return PTR_ERR(file); 1573 - } 1574 - if (copy_to_user((void __user *)arg, &info, 1575 - sizeof(info))) { 1576 - put_unused_fd(info.fd); 1577 - fput(file); 1568 + info.fd = fd_prepare_fd(fdf); 1569 + if (copy_to_user((void __user *)arg, &info, sizeof(info))) 1578 1570 return -EFAULT; 1579 - } 1580 - fd_install(info.fd, file); 1571 + 1572 + fd_publish(fdf); 1581 1573 err = 0; 1582 1574 break; 1583 1575 }
+6 -28
net/socket.c
··· 503 503 504 504 static int sock_map_fd(struct socket *sock, int flags) 505 505 { 506 - struct file *newfile; 507 - int fd = get_unused_fd_flags(flags); 508 - if (unlikely(fd < 0)) { 506 + int fd; 507 + 508 + fd = FD_ADD(flags, sock_alloc_file(sock, flags, NULL)); 509 + if (fd < 0) 509 510 sock_release(sock); 510 - return fd; 511 - } 512 - 513 - newfile = sock_alloc_file(sock, flags, NULL); 514 - if (!IS_ERR(newfile)) { 515 - fd_install(fd, newfile); 516 - return fd; 517 - } 518 - 519 - put_unused_fd(fd); 520 - return PTR_ERR(newfile); 511 + return fd; 521 512 } 522 513 523 514 /** ··· 2003 2012 int __user *upeer_addrlen, int flags) 2004 2013 { 2005 2014 struct proto_accept_arg arg = { }; 2006 - struct file *newfile; 2007 - int newfd; 2008 2015 2009 2016 if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 2010 2017 return -EINVAL; ··· 2010 2021 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) 2011 2022 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; 2012 2023 2013 - newfd = get_unused_fd_flags(flags); 2014 - if (unlikely(newfd < 0)) 2015 - return newfd; 2016 - 2017 - newfile = do_accept(file, &arg, upeer_sockaddr, upeer_addrlen, 2018 - flags); 2019 - if (IS_ERR(newfile)) { 2020 - put_unused_fd(newfd); 2021 - return PTR_ERR(newfile); 2022 - } 2023 - fd_install(newfd, newfile); 2024 - return newfd; 2024 + return FD_ADD(flags, do_accept(file, &arg, upeer_sockaddr, upeer_addrlen, flags)); 2025 2025 } 2026 2026 2027 2027 /*
+1 -15
net/unix/af_unix.c
··· 3276 3276 3277 3277 static int unix_open_file(struct sock *sk) 3278 3278 { 3279 - struct file *f; 3280 - int fd; 3281 - 3282 3279 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 3283 3280 return -EPERM; 3284 3281 ··· 3285 3288 if (!unix_sk(sk)->path.dentry) 3286 3289 return -ENOENT; 3287 3290 3288 - fd = get_unused_fd_flags(O_CLOEXEC); 3289 - if (fd < 0) 3290 - return fd; 3291 - 3292 - f = dentry_open(&unix_sk(sk)->path, O_PATH, current_cred()); 3293 - if (IS_ERR(f)) { 3294 - put_unused_fd(fd); 3295 - return PTR_ERR(f); 3296 - } 3297 - 3298 - fd_install(fd, f); 3299 - return fd; 3291 + return FD_ADD(O_CLOEXEC, dentry_open(&unix_sk(sk)->path, O_PATH, current_cred())); 3300 3292 } 3301 3293 3302 3294 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)