Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

- Missing CRC32 selections (Arnd)

- Fix for a merge window regression with bdev inode init (Christoph)

- bcache fixes

- rnbd fixes

- NVMe pull request from Christoph:
- fix a race in the nvme-tcp send code (Sagi Grimberg)
- fix a list corruption in an nvme-rdma error path (Israel Rukshin)
- avoid a possible double fetch in nvme-pci (Lalithambika Krishnakumar)
- add the susystem NQN quirk for a Samsung driver (Gopal Tiwari)
- fix two compiler warnings in nvme-fcloop (James Smart)
- don't call sleeping functions from irq context in nvme-fc (James Smart)
- remove an unused argument (Max Gurtovoy)
- remove unused exports (Minwoo Im)

- Use-after-free fix for partition iteration (Ming)

- Missing blk-mq debugfs flag annotation (John)

- Bdev freeze regression fix (Satya)

- blk-iocost NULL pointer deref fix (Tejun)

* tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block: (26 commits)
bcache: set bcache device into read-only mode for BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET
bcache: introduce BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE for large bucket
bcache: check unsupported feature sets for bcache register
bcache: fix typo from SUUP to SUPP in features.h
bcache: set pdev_set_uuid before scond loop iteration
blk-mq-debugfs: Add decode for BLK_MQ_F_TAG_HCTX_SHARED
block/rnbd-clt: avoid module unload race with close confirmation
block/rnbd: Adding name to the Contributors List
block/rnbd-clt: Fix sg table use after free
block/rnbd-srv: Fix use after free in rnbd_srv_sess_dev_force_close
block/rnbd: Select SG_POOL for RNBD_CLIENT
block: pre-initialize struct block_device in bdev_alloc_inode
fs: Fix freeze_bdev()/thaw_bdev() accounting of bd_fsfreeze_sb
nvme: remove the unused status argument from nvme_trace_bio_complete
nvmet-rdma: Fix list_del corruption on queue establishment failure
nvme: unexport functions with no external caller
nvme: avoid possible double fetch in handling CQE
nvme-tcp: Fix possible race of io_work and direct send
nvme-pci: mark Samsung PM1725a as IGNORE_DEV_SUBNQN
nvme-fcloop: Fix sscanf type and list_first_entry_or_null warnings
...

+174 -61
+4 -4
block/bfq-iosched.c
··· 6332 6332 * limit 'something'. 6333 6333 */ 6334 6334 /* no more than 50% of tags for async I/O */ 6335 - bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U); 6335 + bfqd->word_depths[0][0] = max(bt->sb.depth >> 1, 1U); 6336 6336 /* 6337 6337 * no more than 75% of tags for sync writes (25% extra tags 6338 6338 * w.r.t. async I/O, to prevent async I/O from starving sync 6339 6339 * writes) 6340 6340 */ 6341 - bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U); 6341 + bfqd->word_depths[0][1] = max((bt->sb.depth * 3) >> 2, 1U); 6342 6342 6343 6343 /* 6344 6344 * In-word depths in case some bfq_queue is being weight- ··· 6348 6348 * shortage. 6349 6349 */ 6350 6350 /* no more than ~18% of tags for async I/O */ 6351 - bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U); 6351 + bfqd->word_depths[1][0] = max((bt->sb.depth * 3) >> 4, 1U); 6352 6352 /* no more than ~37% of tags for sync writes (~20% extra tags) */ 6353 - bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U); 6353 + bfqd->word_depths[1][1] = max((bt->sb.depth * 6) >> 4, 1U); 6354 6354 6355 6355 for (i = 0; i < 2; i++) 6356 6356 for (j = 0; j < 2; j++)
+11 -5
block/blk-iocost.c
··· 2551 2551 bool use_debt, ioc_locked; 2552 2552 unsigned long flags; 2553 2553 2554 - /* bypass IOs if disabled or for root cgroup */ 2555 - if (!ioc->enabled || !iocg->level) 2554 + /* bypass IOs if disabled, still initializing, or for root cgroup */ 2555 + if (!ioc->enabled || !iocg || !iocg->level) 2556 2556 return; 2557 2557 2558 2558 /* calculate the absolute vtime cost */ ··· 2679 2679 struct bio *bio) 2680 2680 { 2681 2681 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg); 2682 - struct ioc *ioc = iocg->ioc; 2682 + struct ioc *ioc = rqos_to_ioc(rqos); 2683 2683 sector_t bio_end = bio_end_sector(bio); 2684 2684 struct ioc_now now; 2685 2685 u64 vtime, abs_cost, cost; 2686 2686 unsigned long flags; 2687 2687 2688 - /* bypass if disabled or for root cgroup */ 2689 - if (!ioc->enabled || !iocg->level) 2688 + /* bypass if disabled, still initializing, or for root cgroup */ 2689 + if (!ioc->enabled || !iocg || !iocg->level) 2690 2690 return; 2691 2691 2692 2692 abs_cost = calc_vtime_cost(bio, iocg, true); ··· 2863 2863 ioc_refresh_params(ioc, true); 2864 2864 spin_unlock_irq(&ioc->lock); 2865 2865 2866 + /* 2867 + * rqos must be added before activation to allow iocg_pd_init() to 2868 + * lookup the ioc from q. This means that the rqos methods may get 2869 + * called before policy activation completion, can't assume that the 2870 + * target bio has an iocg associated and need to test for NULL iocg. 2871 + */ 2866 2872 rq_qos_add(q, rqos); 2867 2873 ret = blkcg_activate_policy(q, &blkcg_policy_iocost); 2868 2874 if (ret) {
+1
block/blk-mq-debugfs.c
··· 246 246 HCTX_FLAG_NAME(BLOCKING), 247 247 HCTX_FLAG_NAME(NO_SCHED), 248 248 HCTX_FLAG_NAME(STACKING), 249 + HCTX_FLAG_NAME(TAG_HCTX_SHARED), 249 250 }; 250 251 #undef HCTX_FLAG_NAME 251 252
+9 -6
block/genhd.c
··· 246 246 part = rcu_dereference(ptbl->part[piter->idx]); 247 247 if (!part) 248 248 continue; 249 - if (!bdev_nr_sectors(part) && 250 - !(piter->flags & DISK_PITER_INCL_EMPTY) && 251 - !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && 252 - piter->idx == 0)) 253 - continue; 254 - 255 249 piter->part = bdgrab(part); 256 250 if (!piter->part) 257 251 continue; 252 + if (!bdev_nr_sectors(part) && 253 + !(piter->flags & DISK_PITER_INCL_EMPTY) && 254 + !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && 255 + piter->idx == 0)) { 256 + bdput(piter->part); 257 + piter->part = NULL; 258 + continue; 259 + } 260 + 258 261 piter->idx += inc; 259 262 break; 260 263 }
+1
drivers/block/Kconfig
··· 445 445 config BLK_DEV_RSXX 446 446 tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver" 447 447 depends on PCI 448 + select CRC32 448 449 help 449 450 Device driver for IBM's high speed PCIe SSD 450 451 storage device: Flash Adapter 900GB Full Height.
+1
drivers/block/rnbd/Kconfig
··· 7 7 tristate "RDMA Network Block Device driver client" 8 8 depends on INFINIBAND_RTRS_CLIENT 9 9 select BLK_DEV_RNBD 10 + select SG_POOL 10 11 help 11 12 RNBD client is a network block device driver using rdma transport. 12 13
+1
drivers/block/rnbd/README
··· 90 90 Lutz Pogrell <lutz.pogrell@cloud.ionos.com> 91 91 Milind Dumbare <Milind.dumbare@gmail.com> 92 92 Roman Penyaev <roman.penyaev@profitbricks.com> 93 + Swapnil Ingle <ingleswapnil@gmail.com>
+9 -9
drivers/block/rnbd/rnbd-clt.c
··· 375 375 init_waitqueue_head(&iu->comp.wait); 376 376 iu->comp.errno = INT_MAX; 377 377 378 + if (sg_alloc_table(&iu->sgt, 1, GFP_KERNEL)) { 379 + rnbd_put_permit(sess, permit); 380 + kfree(iu); 381 + return NULL; 382 + } 383 + 378 384 return iu; 379 385 } 380 386 381 387 static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu) 382 388 { 383 389 if (atomic_dec_and_test(&iu->refcount)) { 390 + sg_free_table(&iu->sgt); 384 391 rnbd_put_permit(sess, iu->permit); 385 392 kfree(iu); 386 393 } ··· 494 487 iu->buf = NULL; 495 488 iu->dev = dev; 496 489 497 - sg_alloc_table(&iu->sgt, 1, GFP_KERNEL); 498 - 499 490 msg.hdr.type = cpu_to_le16(RNBD_MSG_CLOSE); 500 491 msg.device_id = cpu_to_le32(device_id); 501 492 ··· 507 502 err = errno; 508 503 } 509 504 510 - sg_free_table(&iu->sgt); 511 505 rnbd_put_iu(sess, iu); 512 506 return err; 513 507 } ··· 579 575 iu->buf = rsp; 580 576 iu->dev = dev; 581 577 582 - sg_alloc_table(&iu->sgt, 1, GFP_KERNEL); 583 578 sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp)); 584 579 585 580 msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN); ··· 597 594 err = errno; 598 595 } 599 596 600 - sg_free_table(&iu->sgt); 601 597 rnbd_put_iu(sess, iu); 602 598 return err; 603 599 } ··· 624 622 625 623 iu->buf = rsp; 626 624 iu->sess = sess; 627 - 628 - sg_alloc_table(&iu->sgt, 1, GFP_KERNEL); 629 625 sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp)); 630 626 631 627 msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO); ··· 650 650 } else { 651 651 err = errno; 652 652 } 653 - sg_free_table(&iu->sgt); 654 653 rnbd_put_iu(sess, iu); 655 654 return err; 656 655 } ··· 1697 1698 */ 1698 1699 1699 1700 list_for_each_entry_safe(sess, sn, &sess_list, list) { 1700 - WARN_ON(!rnbd_clt_get_sess(sess)); 1701 + if (!rnbd_clt_get_sess(sess)) 1702 + continue; 1701 1703 close_rtrs(sess); 1702 1704 list_for_each_entry_safe(dev, tn, &sess->devs_list, list) { 1703 1705 /*
+5 -3
drivers/block/rnbd/rnbd-srv.c
··· 338 338 339 339 void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev) 340 340 { 341 - mutex_lock(&sess_dev->sess->lock); 342 - rnbd_srv_destroy_dev_session_sysfs(sess_dev); 343 - mutex_unlock(&sess_dev->sess->lock); 341 + struct rnbd_srv_session *sess = sess_dev->sess; 342 + 344 343 sess_dev->keep_id = true; 344 + mutex_lock(&sess->lock); 345 + rnbd_srv_destroy_dev_session_sysfs(sess_dev); 346 + mutex_unlock(&sess->lock); 345 347 } 346 348 347 349 static int process_msg_close(struct rtrs_srv *rtrs,
+1
drivers/lightnvm/Kconfig
··· 19 19 20 20 config NVM_PBLK 21 21 tristate "Physical Block Device Open-Channel SSD target" 22 + select CRC32 22 23 help 23 24 Allows an open-channel SSD to be exposed as a block device to the 24 25 host. The target assumes the device exposes raw flash and must be
+1 -1
drivers/md/bcache/features.c
··· 17 17 }; 18 18 19 19 static struct feature feature_list[] = { 20 - {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LARGE_BUCKET, 20 + {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE, 21 21 "large_bucket"}, 22 22 {0, 0, 0 }, 23 23 };
+25 -5
drivers/md/bcache/features.h
··· 13 13 14 14 /* Feature set definition */ 15 15 /* Incompat feature set */ 16 - #define BCH_FEATURE_INCOMPAT_LARGE_BUCKET 0x0001 /* 32bit bucket size */ 16 + /* 32bit bucket size, obsoleted */ 17 + #define BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET 0x0001 18 + /* real bucket size is (1 << bucket_size) */ 19 + #define BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE 0x0002 17 20 18 - #define BCH_FEATURE_COMPAT_SUUP 0 19 - #define BCH_FEATURE_RO_COMPAT_SUUP 0 20 - #define BCH_FEATURE_INCOMPAT_SUUP BCH_FEATURE_INCOMPAT_LARGE_BUCKET 21 + #define BCH_FEATURE_COMPAT_SUPP 0 22 + #define BCH_FEATURE_RO_COMPAT_SUPP 0 23 + #define BCH_FEATURE_INCOMPAT_SUPP (BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET| \ 24 + BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE) 21 25 22 26 #define BCH_HAS_COMPAT_FEATURE(sb, mask) \ 23 27 ((sb)->feature_compat & (mask)) ··· 81 77 ~BCH##_FEATURE_INCOMPAT_##flagname; \ 82 78 } 83 79 84 - BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LARGE_BUCKET); 80 + BCH_FEATURE_INCOMPAT_FUNCS(obso_large_bucket, OBSO_LARGE_BUCKET); 81 + BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LOG_LARGE_BUCKET_SIZE); 82 + 83 + static inline bool bch_has_unknown_compat_features(struct cache_sb *sb) 84 + { 85 + return ((sb->feature_compat & ~BCH_FEATURE_COMPAT_SUPP) != 0); 86 + } 87 + 88 + static inline bool bch_has_unknown_ro_compat_features(struct cache_sb *sb) 89 + { 90 + return ((sb->feature_ro_compat & ~BCH_FEATURE_RO_COMPAT_SUPP) != 0); 91 + } 92 + 93 + static inline bool bch_has_unknown_incompat_features(struct cache_sb *sb) 94 + { 95 + return ((sb->feature_incompat & ~BCH_FEATURE_INCOMPAT_SUPP) != 0); 96 + } 85 97 86 98 int bch_print_cache_set_feature_compat(struct cache_set *c, char *buf, int size); 87 99 int bch_print_cache_set_feature_ro_compat(struct cache_set *c, char *buf, int size);
+49 -4
drivers/md/bcache/super.c
··· 64 64 { 65 65 unsigned int bucket_size = le16_to_cpu(s->bucket_size); 66 66 67 - if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES && 68 - bch_has_feature_large_bucket(sb)) 69 - bucket_size |= le16_to_cpu(s->bucket_size_hi) << 16; 67 + if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) { 68 + if (bch_has_feature_large_bucket(sb)) { 69 + unsigned int max, order; 70 + 71 + max = sizeof(unsigned int) * BITS_PER_BYTE - 1; 72 + order = le16_to_cpu(s->bucket_size); 73 + /* 74 + * bcache tool will make sure the overflow won't 75 + * happen, an error message here is enough. 76 + */ 77 + if (order > max) 78 + pr_err("Bucket size (1 << %u) overflows\n", 79 + order); 80 + bucket_size = 1 << order; 81 + } else if (bch_has_feature_obso_large_bucket(sb)) { 82 + bucket_size += 83 + le16_to_cpu(s->obso_bucket_size_hi) << 16; 84 + } 85 + } 70 86 71 87 return bucket_size; 72 88 } ··· 244 228 sb->feature_compat = le64_to_cpu(s->feature_compat); 245 229 sb->feature_incompat = le64_to_cpu(s->feature_incompat); 246 230 sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat); 231 + 232 + /* Check incompatible features */ 233 + err = "Unsupported compatible feature found"; 234 + if (bch_has_unknown_compat_features(sb)) 235 + goto err; 236 + 237 + err = "Unsupported read-only compatible feature found"; 238 + if (bch_has_unknown_ro_compat_features(sb)) 239 + goto err; 240 + 241 + err = "Unsupported incompatible feature found"; 242 + if (bch_has_unknown_incompat_features(sb)) 243 + goto err; 244 + 247 245 err = read_super_common(sb, bdev, s); 248 246 if (err) 249 247 goto err; ··· 1332 1302 bcache_device_link(&dc->disk, c, "bdev"); 1333 1303 atomic_inc(&c->attached_dev_nr); 1334 1304 1305 + if (bch_has_feature_obso_large_bucket(&(c->cache->sb))) { 1306 + pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n"); 1307 + pr_err("Please update to the latest bcache-tools to create the cache device\n"); 1308 + set_disk_ro(dc->disk.disk, 1); 1309 + } 1310 + 1335 1311 /* Allow the writeback thread to proceed */ 1336 1312 up_write(&dc->writeback_lock); 1337 1313 ··· 1559 1523 goto err; 1560 1524 1561 1525 bcache_device_link(d, c, "volume"); 1526 + 1527 + if (bch_has_feature_obso_large_bucket(&c->cache->sb)) { 1528 + pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n"); 1529 + pr_err("Please update to the latest bcache-tools to create the cache device\n"); 1530 + set_disk_ro(d->disk, 1); 1531 + } 1562 1532 1563 1533 return 0; 1564 1534 err: ··· 2124 2082 closure_sync(&cl); 2125 2083 c->cache->sb.last_mount = (u32)ktime_get_real_seconds(); 2126 2084 bcache_write_super(c); 2085 + 2086 + if (bch_has_feature_obso_large_bucket(&c->cache->sb)) 2087 + pr_err("Detect obsoleted large bucket layout, all attached bcache device will be read-only\n"); 2127 2088 2128 2089 list_for_each_entry_safe(dc, t, &uncached_devices, list) 2129 2090 bch_cached_dev_attach(dc, c, NULL); ··· 2689 2644 } 2690 2645 2691 2646 list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) { 2647 + char *pdev_set_uuid = pdev->dc->sb.set_uuid; 2692 2648 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) { 2693 - char *pdev_set_uuid = pdev->dc->sb.set_uuid; 2694 2649 char *set_uuid = c->set_uuid; 2695 2650 2696 2651 if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
+3 -5
drivers/nvme/host/core.c
··· 179 179 } 180 180 EXPORT_SYMBOL_GPL(nvme_reset_ctrl); 181 181 182 - int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) 182 + static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) 183 183 { 184 184 int ret; 185 185 ··· 192 192 193 193 return ret; 194 194 } 195 - EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync); 196 195 197 196 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) 198 197 { ··· 330 331 req->__sector = nvme_lba_to_sect(req->q->queuedata, 331 332 le64_to_cpu(nvme_req(req)->result.u64)); 332 333 333 - nvme_trace_bio_complete(req, status); 334 + nvme_trace_bio_complete(req); 334 335 blk_mq_end_request(req, status); 335 336 } 336 337 ··· 577 578 } 578 579 EXPORT_SYMBOL_GPL(nvme_alloc_request); 579 580 580 - struct request *nvme_alloc_request_qid(struct request_queue *q, 581 + static struct request *nvme_alloc_request_qid(struct request_queue *q, 581 582 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid) 582 583 { 583 584 struct request *req; ··· 588 589 nvme_init_request(req, cmd); 589 590 return req; 590 591 } 591 - EXPORT_SYMBOL_GPL(nvme_alloc_request_qid); 592 592 593 593 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable) 594 594 {
+14 -1
drivers/nvme/host/fc.c
··· 166 166 struct blk_mq_tag_set admin_tag_set; 167 167 struct blk_mq_tag_set tag_set; 168 168 169 + struct work_struct ioerr_work; 169 170 struct delayed_work connect_work; 170 171 171 172 struct kref ref; ··· 1890 1889 } 1891 1890 1892 1891 static void 1892 + nvme_fc_ctrl_ioerr_work(struct work_struct *work) 1893 + { 1894 + struct nvme_fc_ctrl *ctrl = 1895 + container_of(work, struct nvme_fc_ctrl, ioerr_work); 1896 + 1897 + nvme_fc_error_recovery(ctrl, "transport detected io error"); 1898 + } 1899 + 1900 + static void 1893 1901 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) 1894 1902 { 1895 1903 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); ··· 2056 2046 2057 2047 check_error: 2058 2048 if (terminate_assoc) 2059 - nvme_fc_error_recovery(ctrl, "transport detected io error"); 2049 + queue_work(nvme_reset_wq, &ctrl->ioerr_work); 2060 2050 } 2061 2051 2062 2052 static int ··· 3243 3233 { 3244 3234 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 3245 3235 3236 + cancel_work_sync(&ctrl->ioerr_work); 3246 3237 cancel_delayed_work_sync(&ctrl->connect_work); 3247 3238 /* 3248 3239 * kill the association on the link side. this will block ··· 3460 3449 3461 3450 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); 3462 3451 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); 3452 + INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work); 3463 3453 spin_lock_init(&ctrl->lock); 3464 3454 3465 3455 /* io queue count */ ··· 3552 3540 3553 3541 fail_ctrl: 3554 3542 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); 3543 + cancel_work_sync(&ctrl->ioerr_work); 3555 3544 cancel_work_sync(&ctrl->ctrl.reset_work); 3556 3545 cancel_delayed_work_sync(&ctrl->connect_work); 3557 3546
+2 -7
drivers/nvme/host/nvme.h
··· 610 610 #define NVME_QID_ANY -1 611 611 struct request *nvme_alloc_request(struct request_queue *q, 612 612 struct nvme_command *cmd, blk_mq_req_flags_t flags); 613 - struct request *nvme_alloc_request_qid(struct request_queue *q, 614 - struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid); 615 613 void nvme_cleanup_cmd(struct request *req); 616 614 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 617 615 struct nvme_command *cmd); ··· 628 630 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); 629 631 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); 630 632 int nvme_reset_ctrl(struct nvme_ctrl *ctrl); 631 - int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); 632 633 int nvme_try_sched_reset(struct nvme_ctrl *ctrl); 633 634 int nvme_delete_ctrl(struct nvme_ctrl *ctrl); 634 635 ··· 672 675 kblockd_schedule_work(&head->requeue_work); 673 676 } 674 677 675 - static inline void nvme_trace_bio_complete(struct request *req, 676 - blk_status_t status) 678 + static inline void nvme_trace_bio_complete(struct request *req) 677 679 { 678 680 struct nvme_ns *ns = req->q->queuedata; 679 681 ··· 727 731 static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) 728 732 { 729 733 } 730 - static inline void nvme_trace_bio_complete(struct request *req, 731 - blk_status_t status) 734 + static inline void nvme_trace_bio_complete(struct request *req) 732 735 { 733 736 } 734 737 static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
+6 -4
drivers/nvme/host/pci.c
··· 967 967 static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) 968 968 { 969 969 struct nvme_completion *cqe = &nvmeq->cqes[idx]; 970 + __u16 command_id = READ_ONCE(cqe->command_id); 970 971 struct request *req; 971 972 972 973 /* ··· 976 975 * aborts. We don't even bother to allocate a struct request 977 976 * for them but rather special case them here. 978 977 */ 979 - if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) { 978 + if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { 980 979 nvme_complete_async_event(&nvmeq->dev->ctrl, 981 980 cqe->status, &cqe->result); 982 981 return; 983 982 } 984 983 985 - req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id); 984 + req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id); 986 985 if (unlikely(!req)) { 987 986 dev_warn(nvmeq->dev->ctrl.device, 988 987 "invalid id %d completed on queue %d\n", 989 - cqe->command_id, le16_to_cpu(cqe->sq_id)); 988 + command_id, le16_to_cpu(cqe->sq_id)); 990 989 return; 991 990 } 992 991 ··· 3197 3196 { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ 3198 3197 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3199 3198 { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ 3200 - .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3199 + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 3200 + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3201 3201 { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */ 3202 3202 .driver_data = NVME_QUIRK_LIGHTNVM, }, 3203 3203 { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
+11 -1
drivers/nvme/host/tcp.c
··· 262 262 } 263 263 } 264 264 265 + static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue) 266 + { 267 + int ret; 268 + 269 + /* drain the send queue as much as we can... */ 270 + do { 271 + ret = nvme_tcp_try_send(queue); 272 + } while (ret > 0); 273 + } 274 + 265 275 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, 266 276 bool sync, bool last) 267 277 { ··· 289 279 if (queue->io_cpu == smp_processor_id() && 290 280 sync && empty && mutex_trylock(&queue->send_mutex)) { 291 281 queue->more_requests = !last; 292 - nvme_tcp_try_send(queue); 282 + nvme_tcp_send_all(queue); 293 283 queue->more_requests = false; 294 284 mutex_unlock(&queue->send_mutex); 295 285 } else if (last) {
+4 -3
drivers/nvme/target/fcloop.c
··· 1501 1501 fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr, 1502 1502 const char *buf, size_t count) 1503 1503 { 1504 - int opcode, starting, amount; 1504 + unsigned int opcode; 1505 + int starting, amount; 1505 1506 1506 1507 if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3) 1507 1508 return -EBADRQC; ··· 1589 1588 1590 1589 static void __exit fcloop_exit(void) 1591 1590 { 1592 - struct fcloop_lport *lport; 1593 - struct fcloop_nport *nport; 1591 + struct fcloop_lport *lport = NULL; 1592 + struct fcloop_nport *nport = NULL; 1594 1593 struct fcloop_tport *tport; 1595 1594 struct fcloop_rport *rport; 1596 1595 unsigned long flags;
+10
drivers/nvme/target/rdma.c
··· 1641 1641 spin_lock_irqsave(&queue->state_lock, flags); 1642 1642 switch (queue->state) { 1643 1643 case NVMET_RDMA_Q_CONNECTING: 1644 + while (!list_empty(&queue->rsp_wait_list)) { 1645 + struct nvmet_rdma_rsp *rsp; 1646 + 1647 + rsp = list_first_entry(&queue->rsp_wait_list, 1648 + struct nvmet_rdma_rsp, 1649 + wait_list); 1650 + list_del(&rsp->wait_list); 1651 + nvmet_rdma_put_rsp(rsp); 1652 + } 1653 + fallthrough; 1644 1654 case NVMET_RDMA_Q_LIVE: 1645 1655 queue->state = NVMET_RDMA_Q_DISCONNECTING; 1646 1656 disconnect = true;
+5 -2
fs/block_dev.c
··· 605 605 error = thaw_super(sb); 606 606 if (error) 607 607 bdev->bd_fsfreeze_count++; 608 + else 609 + bdev->bd_fsfreeze_sb = NULL; 608 610 out: 609 611 mutex_unlock(&bdev->bd_fsfreeze_mutex); 610 612 return error; ··· 776 774 static struct inode *bdev_alloc_inode(struct super_block *sb) 777 775 { 778 776 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL); 777 + 779 778 if (!ei) 780 779 return NULL; 780 + memset(&ei->bdev, 0, sizeof(ei->bdev)); 781 + ei->bdev.bd_bdi = &noop_backing_dev_info; 781 782 return &ei->vfs_inode; 782 783 } 783 784 ··· 874 869 mapping_set_gfp_mask(&inode->i_data, GFP_USER); 875 870 876 871 bdev = I_BDEV(inode); 877 - memset(bdev, 0, sizeof(*bdev)); 878 872 mutex_init(&bdev->bd_mutex); 879 873 mutex_init(&bdev->bd_fsfreeze_mutex); 880 874 spin_lock_init(&bdev->bd_size_lock); 881 875 bdev->bd_disk = disk; 882 876 bdev->bd_partno = partno; 883 877 bdev->bd_inode = inode; 884 - bdev->bd_bdi = &noop_backing_dev_info; 885 878 #ifdef CONFIG_SYSFS 886 879 INIT_LIST_HEAD(&bdev->bd_holder_disks); 887 880 #endif
+1 -1
include/uapi/linux/bcache.h
··· 213 213 __le16 keys; 214 214 }; 215 215 __le64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */ 216 - __le16 bucket_size_hi; 216 + __le16 obso_bucket_size_hi; /* obsoleted */ 217 217 }; 218 218 219 219 /*