Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'block-5.6-2020-02-16' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
"Not a lot here, which is great, basically just three small bcache
fixes from Coly, and four NVMe fixes via Keith"

* tag 'block-5.6-2020-02-16' of git://git.kernel.dk/linux-block:
nvme: fix the parameter order for nvme_get_log in nvme_get_fw_slot_info
nvme/pci: move cqe check after device shutdown
nvme: prevent warning triggered by nvme_stop_keep_alive
nvme/tcp: fix bug on double requeue when send fails
bcache: remove macro nr_to_fifo_front()
bcache: Revert "bcache: shrink btree node cache after bch_btree_check()"
bcache: ignore pending signals when creating gc and allocator thread

+63 -38
+16 -2
drivers/md/bcache/alloc.c
··· 67 67 #include <linux/blkdev.h> 68 68 #include <linux/kthread.h> 69 69 #include <linux/random.h> 70 + #include <linux/sched/signal.h> 70 71 #include <trace/events/bcache.h> 71 72 72 73 #define MAX_OPEN_BUCKETS 128 ··· 734 733 735 734 int bch_cache_allocator_start(struct cache *ca) 736 735 { 737 - struct task_struct *k = kthread_run(bch_allocator_thread, 738 - ca, "bcache_allocator"); 736 + struct task_struct *k; 737 + 738 + /* 739 + * In case previous btree check operation occupies too many 740 + * system memory for bcache btree node cache, and the 741 + * registering process is selected by OOM killer. Here just 742 + * ignore the SIGKILL sent by OOM killer if there is, to 743 + * avoid kthread_run() being failed by pending signals. The 744 + * bcache registering process will exit after the registration 745 + * done. 746 + */ 747 + if (signal_pending(current)) 748 + flush_signals(current); 749 + 750 + k = kthread_run(bch_allocator_thread, ca, "bcache_allocator"); 739 751 if (IS_ERR(k)) 740 752 return PTR_ERR(k); 741 753
+13
drivers/md/bcache/btree.c
··· 34 34 #include <linux/random.h> 35 35 #include <linux/rcupdate.h> 36 36 #include <linux/sched/clock.h> 37 + #include <linux/sched/signal.h> 37 38 #include <linux/rculist.h> 38 39 #include <linux/delay.h> 39 40 #include <trace/events/bcache.h> ··· 1914 1913 1915 1914 int bch_gc_thread_start(struct cache_set *c) 1916 1915 { 1916 + /* 1917 + * In case previous btree check operation occupies too many 1918 + * system memory for bcache btree node cache, and the 1919 + * registering process is selected by OOM killer. Here just 1920 + * ignore the SIGKILL sent by OOM killer if there is, to 1921 + * avoid kthread_run() being failed by pending signals. The 1922 + * bcache registering process will exit after the registration 1923 + * done. 1924 + */ 1925 + if (signal_pending(current)) 1926 + flush_signals(current); 1927 + 1917 1928 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc"); 1918 1929 return PTR_ERR_OR_ZERO(c->gc_thread); 1919 1930 }
+2 -5
drivers/md/bcache/journal.c
··· 417 417 418 418 /* Journalling */ 419 419 420 - #define nr_to_fifo_front(p, front_p, mask) (((p) - (front_p)) & (mask)) 421 - 422 420 static void btree_flush_write(struct cache_set *c) 423 421 { 424 422 struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR]; ··· 508 510 * journal entry can be reclaimed). These selected nodes 509 511 * will be ignored and skipped in the folowing for-loop. 510 512 */ 511 - if (nr_to_fifo_front(btree_current_write(b)->journal, 512 - fifo_front_p, 513 - mask) != 0) { 513 + if (((btree_current_write(b)->journal - fifo_front_p) & 514 + mask) != 0) { 514 515 mutex_unlock(&b->write_lock); 515 516 continue; 516 517 }
-17
drivers/md/bcache/super.c
··· 1917 1917 if (bch_btree_check(c)) 1918 1918 goto err; 1919 1919 1920 - /* 1921 - * bch_btree_check() may occupy too much system memory which 1922 - * has negative effects to user space application (e.g. data 1923 - * base) performance. Shrink the mca cache memory proactively 1924 - * here to avoid competing memory with user space workloads.. 1925 - */ 1926 - if (!c->shrinker_disabled) { 1927 - struct shrink_control sc; 1928 - 1929 - sc.gfp_mask = GFP_KERNEL; 1930 - sc.nr_to_scan = c->btree_cache_used * c->btree_pages; 1931 - /* first run to clear b->accessed tag */ 1932 - c->shrink.scan_objects(&c->shrink, &sc); 1933 - /* second run to reap non-accessed nodes */ 1934 - c->shrink.scan_objects(&c->shrink, &sc); 1935 - } 1936 - 1937 1920 bch_journal_mark(c, &journal); 1938 1921 bch_initial_gc_finish(c); 1939 1922 pr_debug("btree_check() done");
+6 -6
drivers/nvme/host/core.c
··· 66 66 * nvme_reset_wq - hosts nvme reset works 67 67 * nvme_delete_wq - hosts nvme delete works 68 68 * 69 - * nvme_wq will host works such are scan, aen handling, fw activation, 70 - * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq 69 + * nvme_wq will host works such as scan, aen handling, fw activation, 70 + * keep-alive, periodic reconnects etc. nvme_reset_wq 71 71 * runs reset works which also flush works hosted on nvme_wq for 72 72 * serialization purposes. nvme_delete_wq host controller deletion 73 73 * works which flush reset works for serialization. ··· 976 976 startka = true; 977 977 spin_unlock_irqrestore(&ctrl->lock, flags); 978 978 if (startka) 979 - schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 979 + queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); 980 980 } 981 981 982 982 static int nvme_keep_alive(struct nvme_ctrl *ctrl) ··· 1006 1006 dev_dbg(ctrl->device, 1007 1007 "reschedule traffic based keep-alive timer\n"); 1008 1008 ctrl->comp_seen = false; 1009 - schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 1009 + queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); 1010 1010 return; 1011 1011 } 1012 1012 ··· 1023 1023 if (unlikely(ctrl->kato == 0)) 1024 1024 return; 1025 1025 1026 - schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 1026 + queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); 1027 1027 } 1028 1028 1029 1029 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) ··· 3867 3867 if (!log) 3868 3868 return; 3869 3869 3870 - if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log, 3870 + if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log, 3871 3871 sizeof(*log), 0)) 3872 3872 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); 3873 3873 kfree(log);
+18 -5
drivers/nvme/host/pci.c
··· 1401 1401 nvme_poll_irqdisable(nvmeq, -1); 1402 1402 } 1403 1403 1404 + /* 1405 + * Called only on a device that has been disabled and after all other threads 1406 + * that can check this device's completion queues have synced. This is the 1407 + * last chance for the driver to see a natural completion before 1408 + * nvme_cancel_request() terminates all incomplete requests. 1409 + */ 1410 + static void nvme_reap_pending_cqes(struct nvme_dev *dev) 1411 + { 1412 + u16 start, end; 1413 + int i; 1414 + 1415 + for (i = dev->ctrl.queue_count - 1; i > 0; i--) { 1416 + nvme_process_cq(&dev->queues[i], &start, &end, -1); 1417 + nvme_complete_cqes(&dev->queues[i], start, end); 1418 + } 1419 + } 1420 + 1404 1421 static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, 1405 1422 int entry_size) 1406 1423 { ··· 2252 2235 if (timeout == 0) 2253 2236 return false; 2254 2237 2255 - /* handle any remaining CQEs */ 2256 - if (opcode == nvme_admin_delete_cq && 2257 - !test_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags)) 2258 - nvme_poll_irqdisable(nvmeq, -1); 2259 - 2260 2238 sent--; 2261 2239 if (nr_queues) 2262 2240 goto retry; ··· 2440 2428 nvme_suspend_io_queues(dev); 2441 2429 nvme_suspend_queue(&dev->queues[0]); 2442 2430 nvme_pci_disable(dev); 2431 + nvme_reap_pending_cqes(dev); 2443 2432 2444 2433 blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); 2445 2434 blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
+1 -1
drivers/nvme/host/rdma.c
··· 1088 1088 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) 1089 1089 return; 1090 1090 1091 - queue_work(nvme_wq, &ctrl->err_work); 1091 + queue_work(nvme_reset_wq, &ctrl->err_work); 1092 1092 } 1093 1093 1094 1094 static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
+7 -2
drivers/nvme/host/tcp.c
··· 422 422 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 423 423 return; 424 424 425 - queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work); 425 + queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); 426 426 } 427 427 428 428 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, ··· 1054 1054 } else if (unlikely(result < 0)) { 1055 1055 dev_err(queue->ctrl->ctrl.device, 1056 1056 "failed to send request %d\n", result); 1057 - if (result != -EPIPE) 1057 + 1058 + /* 1059 + * Fail the request unless peer closed the connection, 1060 + * in which case error recovery flow will complete all. 1061 + */ 1062 + if ((result != -EPIPE) && (result != -ECONNRESET)) 1058 1063 nvme_tcp_fail_request(queue->request); 1059 1064 nvme_tcp_done_send_req(queue); 1060 1065 return;