Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'io_uring-5.7-2020-05-01' of git://git.kernel.dk/linux-block

Pull io_uring fixes from Jens Axboe:

- Fix for statx not grabbing the file table, making AT_EMPTY_PATH fail

- Cover a few cases where async poll can handle retry, eliminating the
need for an async thread

- fallback request busy/free fix (Bijan)

- syzbot reported SQPOLL thread exit fix for non-preempt (Xiaoguang)

- Fix extra put of req for sync_file_range (Pavel)

- Always punt splice async. We'll improve this for 5.8, but wanted to
eliminate the inode mutex lock from the non-blocking path for 5.7
(Pavel)

* tag 'io_uring-5.7-2020-05-01' of git://git.kernel.dk/linux-block:
io_uring: punt splice async because of inode mutex
io_uring: check non-sync defer_list carefully
io_uring: fix extra put in sync_file_range()
io_uring: use cond_resched() in io_ring_ctx_wait_and_kill()
io_uring: use proper references for fallback_req locking
io_uring: only force async punt if poll based retry can't handle it
io_uring: enable poll retry for any file with ->read_iter / ->write_iter
io_uring: statx must grab the file table for valid fd

+31 -27
+31 -27
fs/io_uring.c
··· 524 524 REQ_F_OVERFLOW_BIT, 525 525 REQ_F_POLLED_BIT, 526 526 REQ_F_BUFFER_SELECTED_BIT, 527 + REQ_F_NO_FILE_TABLE_BIT, 527 528 528 529 /* not a real bit, just to check we're not overflowing the space */ 529 530 __REQ_F_LAST_BIT, ··· 578 577 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT), 579 578 /* buffer already selected */ 580 579 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT), 580 + /* doesn't need file table for this request */ 581 + REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT), 581 582 }; 582 583 583 584 struct async_poll { ··· 802 799 .needs_file = 1, 803 800 .fd_non_neg = 1, 804 801 .needs_fs = 1, 802 + .file_table = 1, 805 803 }, 806 804 [IORING_OP_READ] = { 807 805 .needs_mm = 1, ··· 1295 1291 struct io_kiocb *req; 1296 1292 1297 1293 req = ctx->fallback_req; 1298 - if (!test_and_set_bit_lock(0, (unsigned long *) ctx->fallback_req)) 1294 + if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req)) 1299 1295 return req; 1300 1296 1301 1297 return NULL; ··· 1382 1378 if (likely(!io_is_fallback_req(req))) 1383 1379 kmem_cache_free(req_cachep, req); 1384 1380 else 1385 - clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req); 1381 + clear_bit_unlock(0, (unsigned long *) &req->ctx->fallback_req); 1386 1382 } 1387 1383 1388 1384 struct req_batch { ··· 2038 2034 * any file. For now, just ensure that anything potentially problematic is done 2039 2035 * inline. 2040 2036 */ 2041 - static bool io_file_supports_async(struct file *file) 2037 + static bool io_file_supports_async(struct file *file, int rw) 2042 2038 { 2043 2039 umode_t mode = file_inode(file)->i_mode; 2044 2040 ··· 2047 2043 if (S_ISREG(mode) && file->f_op != &io_uring_fops) 2048 2044 return true; 2049 2045 2050 - return false; 2046 + if (!(file->f_mode & FMODE_NOWAIT)) 2047 + return false; 2048 + 2049 + if (rw == READ) 2050 + return file->f_op->read_iter != NULL; 2051 + 2052 + return file->f_op->write_iter != NULL; 2051 2053 } 2052 2054 2053 2055 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, ··· 2581 2571 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so 2582 2572 * we know to async punt it even if it was opened O_NONBLOCK 2583 2573 */ 2584 - if (force_nonblock && !io_file_supports_async(req->file)) 2574 + if (force_nonblock && !io_file_supports_async(req->file, READ)) 2585 2575 goto copy_iov; 2586 2576 2587 2577 iov_count = iov_iter_count(&iter); ··· 2604 2594 if (ret) 2605 2595 goto out_free; 2606 2596 /* any defer here is final, must blocking retry */ 2607 - if (!(req->flags & REQ_F_NOWAIT)) 2597 + if (!(req->flags & REQ_F_NOWAIT) && 2598 + !file_can_poll(req->file)) 2608 2599 req->flags |= REQ_F_MUST_PUNT; 2609 2600 return -EAGAIN; 2610 2601 } ··· 2673 2662 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so 2674 2663 * we know to async punt it even if it was opened O_NONBLOCK 2675 2664 */ 2676 - if (force_nonblock && !io_file_supports_async(req->file)) 2665 + if (force_nonblock && !io_file_supports_async(req->file, WRITE)) 2677 2666 goto copy_iov; 2678 2667 2679 2668 /* file path doesn't support NOWAIT for non-direct_IO */ ··· 2727 2716 if (ret) 2728 2717 goto out_free; 2729 2718 /* any defer here is final, must blocking retry */ 2730 - req->flags |= REQ_F_MUST_PUNT; 2719 + if (!file_can_poll(req->file)) 2720 + req->flags |= REQ_F_MUST_PUNT; 2731 2721 return -EAGAIN; 2732 2722 } 2733 2723 } ··· 2768 2756 return 0; 2769 2757 } 2770 2758 2771 - static bool io_splice_punt(struct file *file) 2772 - { 2773 - if (get_pipe_info(file)) 2774 - return false; 2775 - if (!io_file_supports_async(file)) 2776 - return true; 2777 - return !(file->f_flags & O_NONBLOCK); 2778 - } 2779 - 2780 2759 static int io_splice(struct io_kiocb *req, bool force_nonblock) 2781 2760 { 2782 2761 struct io_splice *sp = &req->splice; ··· 2777 2774 loff_t *poff_in, *poff_out; 2778 2775 long ret; 2779 2776 2780 - if (force_nonblock) { 2781 - if (io_splice_punt(in) || io_splice_punt(out)) 2782 - return -EAGAIN; 2783 - flags |= SPLICE_F_NONBLOCK; 2784 - } 2777 + if (force_nonblock) 2778 + return -EAGAIN; 2785 2779 2786 2780 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in; 2787 2781 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out; ··· 3355 3355 struct kstat stat; 3356 3356 int ret; 3357 3357 3358 - if (force_nonblock) 3358 + if (force_nonblock) { 3359 + /* only need file table for an actual valid fd */ 3360 + if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD) 3361 + req->flags |= REQ_F_NO_FILE_TABLE; 3359 3362 return -EAGAIN; 3363 + } 3360 3364 3361 3365 if (vfs_stat_set_lookup_flags(&lookup_flags, ctx->how.flags)) 3362 3366 return -EINVAL; ··· 3506 3502 if (io_req_cancelled(req)) 3507 3503 return; 3508 3504 __io_sync_file_range(req); 3509 - io_put_req(req); /* put submission ref */ 3505 + io_steal_work(req, workptr); 3510 3506 } 3511 3507 3512 3508 static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock) ··· 5019 5015 int ret; 5020 5016 5021 5017 /* Still need defer if there is pending req in defer list. */ 5022 - if (!req_need_defer(req) && list_empty(&ctx->defer_list)) 5018 + if (!req_need_defer(req) && list_empty_careful(&ctx->defer_list)) 5023 5019 return 0; 5024 5020 5025 5021 if (!req->io && io_alloc_async_ctx(req)) ··· 5433 5429 int ret = -EBADF; 5434 5430 struct io_ring_ctx *ctx = req->ctx; 5435 5431 5436 - if (req->work.files) 5432 + if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE)) 5437 5433 return 0; 5438 5434 if (!ctx->ring_file) 5439 5435 return -EBADF; ··· 7331 7327 * it could cause shutdown to hang. 7332 7328 */ 7333 7329 while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait)) 7334 - cpu_relax(); 7330 + cond_resched(); 7335 7331 7336 7332 io_kill_timeouts(ctx); 7337 7333 io_poll_remove_all(ctx);