Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'block-7.1-20260430' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux

Pull block fixes from Jens Axboe:

- MD pull request via Yu:
- Fix a raid5 UAF on IO across the reshape position
- Avoid failing RAID1/RAID10 devices for invalid IO errors
- Fix RAID10 divide-by-zero when far_copies is zero
- Restore bitmap grow through sysfs
- Use mddev_is_dm() instead of open-coding gendisk checks
- Use ATTRIBUTE_GROUPS() for md default sysfs attributes
- Replace open-coded wait loops with wait_event helpers

- NVMe pull request via Keith:
- Target data transfer size configuation (Aurelien)
- Enable P2P for RDMA (Shivaji Kant)
- TCP target updates (Maurizio, Alistair, Chaitanya, Shivam Kumar)
- TCP host updates (Alistair, Chaitanya)
- Authentication updates (Alistair, Daniel, Chris Leech)
- Multipath fixes (John Garry)
- New quirks (Alan Cui, Tao Jiang)
- Apple driver fix (Fedor Pchelkin)
- PCI admin doorbell update fix (Keith)

- Properly propagate CDROM read-only state to the block layer

* tag 'block-7.1-20260430' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux: (35 commits)
md: use ATTRIBUTE_GROUPS() for md default sysfs attributes
md: use mddev_is_dm() instead of open-coding gendisk checks
md/raid1: replace wait loop with wait_event_idle() in raid1_write_request()
md/md-bitmap: add a none backend for bitmap grow
md/md-bitmap: split bitmap sysfs groups
md: factor bitmap creation away from sysfs handling
md: use mddev_lock_nointr() in mddev_suspend_and_lock_nointr()
md: replace wait loop with wait_event() in md_handle_request()
md/raid10: fix divide-by-zero in setup_geo() with zero far_copies
md/raid1,raid10: don't fail devices for invalid IO errors
MAINTAINERS: Add Xiao Ni as md/raid reviewer
md/raid5: Fix UAF on IO across the reshape position
cdrom, scsi: sr: propagate read-only status to block layer via set_disk_ro()
nvme-auth: Hash DH shared secret to create session key
nvme-pci: fix missed admin queue sq doorbell write
nvme-auth: Include SC_C in RVAL controller hash
nvme-tcp: teardown circular locking fixes
nvmet-tcp: Don't clear tls_key when freeing sq
Revert "nvmet-tcp: Don't free SQ on authentication success"
nvme: skip trace completion for host path errors
...

+588 -276
+1
MAINTAINERS
··· 24798 24798 M: Song Liu <song@kernel.org> 24799 24799 M: Yu Kuai <yukuai@fnnas.com> 24800 24800 R: Li Nan <linan122@huawei.com> 24801 + R: Xiao Ni <xiao@kernel.org> 24801 24802 L: linux-raid@vger.kernel.org 24802 24803 S: Supported 24803 24804 Q: https://patchwork.kernel.org/project/linux-raid/list/
+48 -25
drivers/cdrom/cdrom.c
··· 631 631 632 632 WARN_ON(!cdo->generic_packet); 633 633 634 + /* 635 + * Propagate the drive's write support to the block layer so BLKROGET 636 + * reflects actual write capability. Drivers that use GET CONFIGURATION 637 + * features (CDC_MRW_W, CDC_RAM) must have called 638 + * cdrom_probe_write_features() before register_cdrom() so the mask is 639 + * complete here. 640 + */ 641 + set_disk_ro(disk, !CDROM_CAN(CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | 642 + CDC_CD_RW)); 643 + 634 644 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name); 635 645 mutex_lock(&cdrom_mutex); 636 646 list_add(&cdi->list, &cdrom_list); ··· 751 741 752 742 return 0; 753 743 } 744 + 745 + /* 746 + * Probe write-related MMC features via GET CONFIGURATION and update 747 + * cdi->mask accordingly. Drivers that populate cdi->mask from the MODE SENSE 748 + * capabilities page (e.g. sr) should call this after those MODE SENSE bits 749 + * have been set but before register_cdrom(), so that the full set of 750 + * write-capability bits is known by the time register_cdrom() decides on the 751 + * initial read-only state of the disk. 752 + */ 753 + void cdrom_probe_write_features(struct cdrom_device_info *cdi) 754 + { 755 + int mrw, mrw_write, ram_write; 756 + 757 + mrw = 0; 758 + if (!cdrom_is_mrw(cdi, &mrw_write)) 759 + mrw = 1; 760 + 761 + if (CDROM_CAN(CDC_MO_DRIVE)) 762 + ram_write = 1; 763 + else 764 + (void) cdrom_is_random_writable(cdi, &ram_write); 765 + 766 + if (mrw) 767 + cdi->mask &= ~CDC_MRW; 768 + else 769 + cdi->mask |= CDC_MRW; 770 + 771 + if (mrw_write) 772 + cdi->mask &= ~CDC_MRW_W; 773 + else 774 + cdi->mask |= CDC_MRW_W; 775 + 776 + if (ram_write) 777 + cdi->mask &= ~CDC_RAM; 778 + else 779 + cdi->mask |= CDC_RAM; 780 + } 781 + EXPORT_SYMBOL(cdrom_probe_write_features); 754 782 755 783 static int cdrom_media_erasable(struct cdrom_device_info *cdi) 756 784 { ··· 942 894 */ 943 895 static int cdrom_open_write(struct cdrom_device_info *cdi) 944 896 { 945 - int mrw, mrw_write, ram_write; 946 897 int ret = 1; 947 - 948 - mrw = 0; 949 - if (!cdrom_is_mrw(cdi, &mrw_write)) 950 - mrw = 1; 951 - 952 - if (CDROM_CAN(CDC_MO_DRIVE)) 953 - ram_write = 1; 954 - else 955 - (void) cdrom_is_random_writable(cdi, &ram_write); 956 - 957 - if (mrw) 958 - cdi->mask &= ~CDC_MRW; 959 - else 960 - cdi->mask |= CDC_MRW; 961 - 962 - if (mrw_write) 963 - cdi->mask &= ~CDC_MRW_W; 964 - else 965 - cdi->mask |= CDC_MRW_W; 966 - 967 - if (ram_write) 968 - cdi->mask &= ~CDC_RAM; 969 - else 970 - cdi->mask |= CDC_RAM; 971 898 972 899 if (CDROM_CAN(CDC_MRW_W)) 973 900 ret = cdrom_mrw_open_write(cdi);
+120 -13
drivers/md/md-bitmap.c
··· 216 216 }; 217 217 218 218 static struct workqueue_struct *md_bitmap_wq; 219 + static struct attribute_group md_bitmap_internal_group; 219 220 220 221 static int __bitmap_resize(struct bitmap *bitmap, sector_t blocks, 221 222 int chunksize, bool init); ··· 2581 2580 return __bitmap_resize(bitmap, blocks, chunksize, false); 2582 2581 } 2583 2582 2583 + static bool bitmap_none_enabled(void *data, bool flush) 2584 + { 2585 + return false; 2586 + } 2587 + 2588 + static int bitmap_none_create(struct mddev *mddev) 2589 + { 2590 + return 0; 2591 + } 2592 + 2593 + static int bitmap_none_load(struct mddev *mddev) 2594 + { 2595 + return 0; 2596 + } 2597 + 2598 + static void bitmap_none_destroy(struct mddev *mddev) 2599 + { 2600 + } 2601 + 2602 + static int bitmap_none_get_stats(void *data, struct md_bitmap_stats *stats) 2603 + { 2604 + return -ENOENT; 2605 + } 2606 + 2584 2607 static ssize_t 2585 2608 location_show(struct mddev *mddev, char *page) 2586 2609 { ··· 2643 2618 goto out; 2644 2619 } 2645 2620 2646 - bitmap_destroy(mddev); 2621 + sysfs_unmerge_group(&mddev->kobj, &md_bitmap_internal_group); 2622 + md_bitmap_destroy_nosysfs(mddev); 2623 + mddev->bitmap_id = ID_BITMAP_NONE; 2624 + if (!mddev_set_bitmap_ops_nosysfs(mddev)) 2625 + goto none_err; 2647 2626 mddev->bitmap_info.offset = 0; 2648 2627 if (mddev->bitmap_info.file) { 2649 2628 struct file *f = mddev->bitmap_info.file; ··· 2683 2654 } 2684 2655 2685 2656 mddev->bitmap_info.offset = offset; 2686 - rv = bitmap_create(mddev); 2687 - if (rv) 2688 - goto out; 2657 + md_bitmap_destroy_nosysfs(mddev); 2658 + mddev->bitmap_id = ID_BITMAP; 2659 + if (!mddev_set_bitmap_ops_nosysfs(mddev)) 2660 + goto bitmap_err; 2689 2661 2690 - rv = bitmap_load(mddev); 2662 + rv = md_bitmap_create_nosysfs(mddev); 2663 + if (rv) 2664 + goto create_err; 2665 + 2666 + rv = mddev->bitmap_ops->load(mddev); 2691 2667 if (rv) { 2692 2668 mddev->bitmap_info.offset = 0; 2693 - bitmap_destroy(mddev); 2694 - goto out; 2669 + goto load_err; 2695 2670 } 2671 + 2672 + rv = sysfs_merge_group(&mddev->kobj, 2673 + &md_bitmap_internal_group); 2674 + if (rv) 2675 + goto merge_err; 2696 2676 } 2697 2677 } 2698 2678 if (!mddev->external) { ··· 2717 2679 if (rv) 2718 2680 return rv; 2719 2681 return len; 2682 + 2683 + merge_err: 2684 + mddev->bitmap_info.offset = 0; 2685 + load_err: 2686 + md_bitmap_destroy_nosysfs(mddev); 2687 + create_err: 2688 + mddev->bitmap_info.offset = 0; 2689 + mddev->bitmap_id = ID_BITMAP_NONE; 2690 + if (!mddev_set_bitmap_ops_nosysfs(mddev)) 2691 + rv = -ENOENT; 2692 + goto out; 2693 + bitmap_err: 2694 + rv = -ENOENT; 2695 + none_err: 2696 + mddev->bitmap_info.offset = 0; 2697 + goto out; 2720 2698 } 2721 2699 2722 2700 static struct md_sysfs_entry bitmap_location = ··· 3009 2955 __ATTR(max_backlog_used, S_IRUGO | S_IWUSR, 3010 2956 behind_writes_used_show, behind_writes_used_reset); 3011 2957 3012 - static struct attribute *md_bitmap_attrs[] = { 2958 + static struct attribute *md_bitmap_common_attrs[] = { 3013 2959 &bitmap_location.attr, 2960 + NULL 2961 + }; 2962 + 2963 + static struct attribute *md_bitmap_internal_attrs[] = { 3014 2964 &bitmap_space.attr, 3015 2965 &bitmap_timeout.attr, 3016 2966 &bitmap_backlog.attr, ··· 3025 2967 NULL 3026 2968 }; 3027 2969 3028 - static struct attribute_group md_bitmap_group = { 2970 + static struct attribute_group md_bitmap_common_group = { 3029 2971 .name = "bitmap", 3030 - .attrs = md_bitmap_attrs, 2972 + .attrs = md_bitmap_common_attrs, 2973 + }; 2974 + 2975 + static struct attribute_group md_bitmap_internal_group = { 2976 + .name = "bitmap", 2977 + .attrs = md_bitmap_internal_attrs, 2978 + }; 2979 + 2980 + static const struct attribute_group *bitmap_groups[] = { 2981 + &md_bitmap_common_group, 2982 + &md_bitmap_internal_group, 2983 + NULL, 2984 + }; 2985 + 2986 + static const struct attribute_group *bitmap_none_groups[] = { 2987 + &md_bitmap_common_group, 2988 + NULL, 2989 + }; 2990 + 2991 + static struct bitmap_operations bitmap_none_ops = { 2992 + .head = { 2993 + .type = MD_BITMAP, 2994 + .id = ID_BITMAP_NONE, 2995 + .name = "none", 2996 + }, 2997 + 2998 + .enabled = bitmap_none_enabled, 2999 + .create = bitmap_none_create, 3000 + .load = bitmap_none_load, 3001 + .destroy = bitmap_none_destroy, 3002 + .get_stats = bitmap_none_get_stats, 3003 + 3004 + .groups = bitmap_none_groups, 3031 3005 }; 3032 3006 3033 3007 static struct bitmap_operations bitmap_ops = { ··· 3103 3013 .set_pages = bitmap_set_pages, 3104 3014 .free = md_bitmap_free, 3105 3015 3106 - .group = &md_bitmap_group, 3016 + .groups = bitmap_groups, 3107 3017 }; 3108 3018 3109 3019 int md_bitmap_init(void) 3110 3020 { 3021 + int err; 3022 + 3111 3023 md_bitmap_wq = alloc_workqueue("md_bitmap", WQ_MEM_RECLAIM | WQ_UNBOUND, 3112 3024 0); 3113 3025 if (!md_bitmap_wq) 3114 3026 return -ENOMEM; 3115 3027 3116 - return register_md_submodule(&bitmap_ops.head); 3028 + err = register_md_submodule(&bitmap_none_ops.head); 3029 + if (err) 3030 + goto err_wq; 3031 + 3032 + err = register_md_submodule(&bitmap_ops.head); 3033 + if (err) 3034 + goto err_none; 3035 + 3036 + return 0; 3037 + 3038 + err_none: 3039 + unregister_md_submodule(&bitmap_none_ops.head); 3040 + err_wq: 3041 + destroy_workqueue(md_bitmap_wq); 3042 + return err; 3117 3043 } 3118 3044 3119 3045 void md_bitmap_exit(void) 3120 3046 { 3121 - destroy_workqueue(md_bitmap_wq); 3122 3047 unregister_md_submodule(&bitmap_ops.head); 3048 + unregister_md_submodule(&bitmap_none_ops.head); 3049 + destroy_workqueue(md_bitmap_wq); 3123 3050 }
+1 -1
drivers/md/md-bitmap.h
··· 125 125 void (*set_pages)(void *data, unsigned long pages); 126 126 void (*free)(void *data); 127 127 128 - struct attribute_group *group; 128 + const struct attribute_group **groups; 129 129 }; 130 130 131 131 /* the bitmap API */
+6 -1
drivers/md/md-llbitmap.c
··· 1738 1738 .attrs = md_llbitmap_attrs, 1739 1739 }; 1740 1740 1741 + static const struct attribute_group *md_llbitmap_groups[] = { 1742 + &md_llbitmap_group, 1743 + NULL, 1744 + }; 1745 + 1741 1746 static struct bitmap_operations llbitmap_ops = { 1742 1747 .head = { 1743 1748 .type = MD_BITMAP, ··· 1779 1774 .dirty_bits = llbitmap_dirty_bits, 1780 1775 .write_all = llbitmap_write_all, 1781 1776 1782 - .group = &md_llbitmap_group, 1777 + .groups = md_llbitmap_groups, 1783 1778 }; 1784 1779 1785 1780 int md_llbitmap_init(void)
+102 -80
drivers/md/md.c
··· 396 396 { 397 397 check_suspended: 398 398 if (is_suspended(mddev, bio)) { 399 - DEFINE_WAIT(__wait); 400 399 /* Bail out if REQ_NOWAIT is set for the bio */ 401 400 if (bio->bi_opf & REQ_NOWAIT) { 402 401 bio_wouldblock_error(bio); 403 402 return true; 404 403 } 405 - for (;;) { 406 - prepare_to_wait(&mddev->sb_wait, &__wait, 407 - TASK_UNINTERRUPTIBLE); 408 - if (!is_suspended(mddev, bio)) 409 - break; 410 - schedule(); 411 - } 412 - finish_wait(&mddev->sb_wait, &__wait); 404 + wait_event(mddev->sb_wait, !is_suspended(mddev, bio)); 413 405 } 414 406 if (!percpu_ref_tryget_live(&mddev->active_io)) 415 407 goto check_suspended; 416 408 417 409 if (!mddev->pers->make_request(mddev, bio)) { 418 410 percpu_ref_put(&mddev->active_io); 419 - if (!mddev->gendisk && mddev->pers->prepare_suspend) 411 + if (mddev_is_dm(mddev) && mddev->pers->prepare_suspend) 420 412 return false; 421 413 goto check_suspended; 422 414 } ··· 679 687 680 688 static void no_op(struct percpu_ref *r) {} 681 689 682 - static bool mddev_set_bitmap_ops(struct mddev *mddev) 690 + static void md_bitmap_sysfs_add(struct mddev *mddev) 683 691 { 684 - struct bitmap_operations *old = mddev->bitmap_ops; 692 + if (sysfs_update_groups(&mddev->kobj, mddev->bitmap_ops->groups)) 693 + pr_warn("md: cannot register extra bitmap attributes for %s\n", 694 + mdname(mddev)); 695 + else 696 + /* 697 + * Inform user with KOBJ_CHANGE about new bitmap 698 + * attributes. 699 + */ 700 + kobject_uevent(&mddev->kobj, KOBJ_CHANGE); 701 + } 702 + 703 + static void md_bitmap_sysfs_del(struct mddev *mddev) 704 + { 705 + int nr_groups = 0; 706 + 707 + for (nr_groups = 0; mddev->bitmap_ops->groups[nr_groups]; nr_groups++) 708 + ; 709 + 710 + while (--nr_groups >= 1) 711 + sysfs_unmerge_group(&mddev->kobj, 712 + mddev->bitmap_ops->groups[nr_groups]); 713 + sysfs_remove_group(&mddev->kobj, mddev->bitmap_ops->groups[0]); 714 + } 715 + 716 + bool mddev_set_bitmap_ops_nosysfs(struct mddev *mddev) 717 + { 685 718 struct md_submodule_head *head; 686 719 687 - if (mddev->bitmap_id == ID_BITMAP_NONE || 688 - (old && old->head.id == mddev->bitmap_id)) 720 + if (mddev->bitmap_ops && 721 + mddev->bitmap_ops->head.id == mddev->bitmap_id) 689 722 return true; 690 723 691 724 xa_lock(&md_submodule); ··· 728 711 729 712 mddev->bitmap_ops = (void *)head; 730 713 xa_unlock(&md_submodule); 731 - 732 - if (!mddev_is_dm(mddev) && mddev->bitmap_ops->group) { 733 - if (sysfs_create_group(&mddev->kobj, mddev->bitmap_ops->group)) 734 - pr_warn("md: cannot register extra bitmap attributes for %s\n", 735 - mdname(mddev)); 736 - else 737 - /* 738 - * Inform user with KOBJ_CHANGE about new bitmap 739 - * attributes. 740 - */ 741 - kobject_uevent(&mddev->kobj, KOBJ_CHANGE); 742 - } 743 714 return true; 744 715 745 716 err: 746 717 xa_unlock(&md_submodule); 747 718 return false; 748 - } 749 - 750 - static void mddev_clear_bitmap_ops(struct mddev *mddev) 751 - { 752 - if (!mddev_is_dm(mddev) && mddev->bitmap_ops && 753 - mddev->bitmap_ops->group) 754 - sysfs_remove_group(&mddev->kobj, mddev->bitmap_ops->group); 755 - 756 - mddev->bitmap_ops = NULL; 757 719 } 758 720 759 721 int mddev_init(struct mddev *mddev) ··· 4275 4279 4276 4280 xa_lock(&md_submodule); 4277 4281 xa_for_each(&md_submodule, i, head) { 4278 - if (head->type != MD_BITMAP) 4282 + if (head->type != MD_BITMAP || head->id == ID_BITMAP_NONE) 4279 4283 continue; 4280 4284 4281 4285 if (mddev->bitmap_id == head->id) ··· 6055 6059 &md_logical_block_size.attr, 6056 6060 NULL, 6057 6061 }; 6058 - 6059 - static const struct attribute_group md_default_group = { 6060 - .attrs = md_default_attrs, 6061 - }; 6062 + ATTRIBUTE_GROUPS(md_default); 6062 6063 6063 6064 static struct attribute *md_redundancy_attrs[] = { 6064 6065 &md_scan_mode.attr, ··· 6078 6085 static const struct attribute_group md_redundancy_group = { 6079 6086 .name = NULL, 6080 6087 .attrs = md_redundancy_attrs, 6081 - }; 6082 - 6083 - static const struct attribute_group *md_attr_groups[] = { 6084 - &md_default_group, 6085 - NULL, 6086 6088 }; 6087 6089 6088 6090 static ssize_t ··· 6162 6174 static const struct kobj_type md_ktype = { 6163 6175 .release = md_kobj_release, 6164 6176 .sysfs_ops = &md_sysfs_ops, 6165 - .default_groups = md_attr_groups, 6177 + .default_groups = md_default_groups, 6166 6178 }; 6167 6179 6168 6180 int mdp_major = 0; ··· 6527 6539 return id; 6528 6540 } 6529 6541 6530 - static int md_bitmap_create(struct mddev *mddev) 6542 + int md_bitmap_create_nosysfs(struct mddev *mddev) 6531 6543 { 6532 6544 enum md_submodule_id orig_id = mddev->bitmap_id; 6533 6545 enum md_submodule_id sb_id; ··· 6536 6548 if (mddev->bitmap_id == ID_BITMAP_NONE) 6537 6549 return -EINVAL; 6538 6550 6539 - if (!mddev_set_bitmap_ops(mddev)) 6551 + if (!mddev_set_bitmap_ops_nosysfs(mddev)) { 6552 + mddev->bitmap_id = orig_id; 6540 6553 return -ENOENT; 6554 + } 6541 6555 6542 6556 err = mddev->bitmap_ops->create(mddev); 6543 6557 if (!err) ··· 6550 6560 * doesn't match, and mdadm is not the latest version to set 6551 6561 * bitmap_type, set bitmap_ops based on the disk version. 6552 6562 */ 6553 - mddev_clear_bitmap_ops(mddev); 6563 + mddev->bitmap_ops = NULL; 6554 6564 6555 6565 sb_id = md_bitmap_get_id_from_sb(mddev); 6556 - if (sb_id == ID_BITMAP_NONE || sb_id == orig_id) 6566 + if (sb_id == ID_BITMAP_NONE || sb_id == orig_id) { 6567 + mddev->bitmap_id = orig_id; 6557 6568 return err; 6569 + } 6558 6570 6559 6571 pr_info("md: %s: bitmap version mismatch, switching from %d to %d\n", 6560 6572 mdname(mddev), orig_id, sb_id); 6561 6573 6562 6574 mddev->bitmap_id = sb_id; 6563 - if (!mddev_set_bitmap_ops(mddev)) { 6575 + if (!mddev_set_bitmap_ops_nosysfs(mddev)) { 6564 6576 mddev->bitmap_id = orig_id; 6565 6577 return -ENOENT; 6566 6578 } 6567 6579 6568 6580 err = mddev->bitmap_ops->create(mddev); 6569 6581 if (err) { 6570 - mddev_clear_bitmap_ops(mddev); 6582 + mddev->bitmap_ops = NULL; 6571 6583 mddev->bitmap_id = orig_id; 6572 6584 } 6573 6585 6574 6586 return err; 6575 6587 } 6576 6588 6577 - static void md_bitmap_destroy(struct mddev *mddev) 6589 + static int md_bitmap_create(struct mddev *mddev) 6590 + { 6591 + int err; 6592 + 6593 + err = md_bitmap_create_nosysfs(mddev); 6594 + if (err) 6595 + return err; 6596 + 6597 + if (!mddev_is_dm(mddev) && mddev->bitmap_ops->groups) 6598 + md_bitmap_sysfs_add(mddev); 6599 + 6600 + return 0; 6601 + } 6602 + 6603 + void md_bitmap_destroy_nosysfs(struct mddev *mddev) 6578 6604 { 6579 6605 if (!md_bitmap_registered(mddev)) 6580 6606 return; 6581 6607 6582 6608 mddev->bitmap_ops->destroy(mddev); 6583 - mddev_clear_bitmap_ops(mddev); 6609 + mddev->bitmap_ops = NULL; 6610 + } 6611 + 6612 + static void md_bitmap_destroy(struct mddev *mddev) 6613 + { 6614 + if (!mddev_is_dm(mddev) && mddev->bitmap_ops && 6615 + mddev->bitmap_ops->groups) 6616 + md_bitmap_sysfs_del(mddev); 6617 + 6618 + md_bitmap_destroy_nosysfs(mddev); 6619 + } 6620 + 6621 + static void md_bitmap_set_none(struct mddev *mddev) 6622 + { 6623 + mddev->bitmap_id = ID_BITMAP_NONE; 6624 + if (!mddev_set_bitmap_ops_nosysfs(mddev)) 6625 + return; 6626 + 6627 + if (!mddev_is_dm(mddev) && mddev->bitmap_ops->groups) 6628 + md_bitmap_sysfs_add(mddev); 6584 6629 } 6585 6630 6586 6631 int md_run(struct mddev *mddev) ··· 6738 6713 } 6739 6714 6740 6715 /* dm-raid expect sync_thread to be frozen until resume */ 6741 - if (mddev->gendisk) 6716 + if (!mddev_is_dm(mddev)) 6742 6717 mddev->recovery = 0; 6743 6718 6744 6719 /* may be over-ridden by personality */ ··· 6826 6801 6827 6802 if (mddev->sb_flags) 6828 6803 md_update_sb(mddev, 0); 6804 + 6805 + if (IS_ENABLED(CONFIG_MD_BITMAP) && !mddev->bitmap_info.file && 6806 + !mddev->bitmap_info.offset) 6807 + md_bitmap_set_none(mddev); 6829 6808 6830 6809 md_new_event(); 6831 6810 return 0; ··· 7776 7747 { 7777 7748 int err = 0; 7778 7749 7779 - if (!md_bitmap_registered(mddev)) 7750 + if (!md_bitmap_registered(mddev) || 7751 + mddev->bitmap_id == ID_BITMAP_NONE) 7780 7752 return -EINVAL; 7781 7753 7782 7754 if (mddev->pers) { ··· 7842 7812 7843 7813 if (err) { 7844 7814 md_bitmap_destroy(mddev); 7815 + md_bitmap_set_none(mddev); 7845 7816 fd = -1; 7846 7817 } 7847 7818 } else if (fd < 0) { 7848 7819 md_bitmap_destroy(mddev); 7820 + md_bitmap_set_none(mddev); 7849 7821 } 7850 7822 } 7851 7823 ··· 8154 8122 mddev->bitmap_info.default_offset; 8155 8123 mddev->bitmap_info.space = 8156 8124 mddev->bitmap_info.default_space; 8125 + mddev->bitmap_id = ID_BITMAP; 8157 8126 rv = md_bitmap_create(mddev); 8158 8127 if (!rv) 8159 8128 rv = mddev->bitmap_ops->load(mddev); 8160 8129 8161 - if (rv) 8130 + if (rv) { 8162 8131 md_bitmap_destroy(mddev); 8132 + mddev->bitmap_info.offset = 0; 8133 + md_bitmap_set_none(mddev); 8134 + } 8163 8135 } else { 8164 8136 struct md_bitmap_stats stats; 8165 8137 ··· 8191 8155 } 8192 8156 md_bitmap_destroy(mddev); 8193 8157 mddev->bitmap_info.offset = 0; 8158 + md_bitmap_set_none(mddev); 8194 8159 } 8195 8160 } 8196 8161 md_update_sb(mddev, 1); ··· 9378 9341 9379 9342 static void md_end_clone_io(struct bio *bio) 9380 9343 { 9381 - struct md_io_clone *md_io_clone = bio->bi_private; 9344 + struct md_io_clone *md_io_clone = container_of(bio, struct md_io_clone, 9345 + bio_clone); 9382 9346 struct bio *orig_bio = md_io_clone->orig_bio; 9383 9347 struct mddev *mddev = md_io_clone->mddev; 9348 + struct completion *reshape_completion = bio->bi_private; 9384 9349 9385 9350 if (bio_data_dir(orig_bio) == WRITE && md_bitmap_enabled(mddev, false)) 9386 9351 md_bitmap_end(mddev, md_io_clone); ··· 9394 9355 bio_end_io_acct(orig_bio, md_io_clone->start_time); 9395 9356 9396 9357 bio_put(bio); 9397 - bio_endio(orig_bio); 9358 + if (unlikely(reshape_completion)) 9359 + complete(reshape_completion); 9360 + else 9361 + bio_endio(orig_bio); 9398 9362 percpu_ref_put(&mddev->active_io); 9399 9363 } 9400 9364 ··· 9422 9380 } 9423 9381 9424 9382 clone->bi_end_io = md_end_clone_io; 9425 - clone->bi_private = md_io_clone; 9383 + clone->bi_private = NULL; 9426 9384 *bio = clone; 9427 9385 } 9428 9386 ··· 9432 9390 md_clone_bio(mddev, bio); 9433 9391 } 9434 9392 EXPORT_SYMBOL_GPL(md_account_bio); 9435 - 9436 - void md_free_cloned_bio(struct bio *bio) 9437 - { 9438 - struct md_io_clone *md_io_clone = bio->bi_private; 9439 - struct bio *orig_bio = md_io_clone->orig_bio; 9440 - struct mddev *mddev = md_io_clone->mddev; 9441 - 9442 - if (bio_data_dir(orig_bio) == WRITE && md_bitmap_enabled(mddev, false)) 9443 - md_bitmap_end(mddev, md_io_clone); 9444 - 9445 - if (bio->bi_status && !orig_bio->bi_status) 9446 - orig_bio->bi_status = bio->bi_status; 9447 - 9448 - if (md_io_clone->start_time) 9449 - bio_end_io_acct(orig_bio, md_io_clone->start_time); 9450 - 9451 - bio_put(bio); 9452 - percpu_ref_put(&mddev->active_io); 9453 - } 9454 - EXPORT_SYMBOL_GPL(md_free_cloned_bio); 9455 9393 9456 9394 /* md_allow_write(mddev) 9457 9395 * Calling this ensures that the array is marked 'active' so that writes
+4 -2
drivers/md/md.h
··· 920 920 void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, 921 921 struct bio *bio, sector_t start, sector_t size); 922 922 void md_account_bio(struct mddev *mddev, struct bio **bio); 923 - void md_free_cloned_bio(struct bio *bio); 924 923 925 924 extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio); 926 925 void md_write_metadata(struct mddev *mddev, struct md_rdev *rdev, ··· 934 935 extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); 935 936 extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); 936 937 extern int md_check_no_bitmap(struct mddev *mddev); 938 + bool mddev_set_bitmap_ops_nosysfs(struct mddev *mddev); 939 + int md_bitmap_create_nosysfs(struct mddev *mddev); 940 + void md_bitmap_destroy_nosysfs(struct mddev *mddev); 937 941 extern int md_integrity_register(struct mddev *mddev); 938 942 extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); 939 943 ··· 1017 1015 static inline void mddev_suspend_and_lock_nointr(struct mddev *mddev) 1018 1016 { 1019 1017 mddev_suspend(mddev, false); 1020 - mutex_lock(&mddev->reconfig_mutex); 1018 + mddev_lock_nointr(mddev); 1021 1019 } 1022 1020 1023 1021 static inline void mddev_unlock_and_resume(struct mddev *mddev)
+6 -1
drivers/md/raid1-10.c
··· 293 293 * bio with REQ_RAHEAD or REQ_NOWAIT can fail at anytime, before such IO is 294 294 * submitted to the underlying disks, hence don't record badblocks or retry 295 295 * in this case. 296 + * 297 + * BLK_STS_INVAL means the bio was not valid for the underlying device. This 298 + * is a user error, not a device failure, so retrying or recording bad blocks 299 + * would be wrong. 296 300 */ 297 301 static inline bool raid1_should_handle_error(struct bio *bio) 298 302 { 299 - return !(bio->bi_opf & (REQ_RAHEAD | REQ_NOWAIT)); 303 + return !(bio->bi_opf & (REQ_RAHEAD | REQ_NOWAIT)) && 304 + bio->bi_status != BLK_STS_INVAL; 300 305 }
+4 -11
drivers/md/raid1.c
··· 1510 1510 mddev->cluster_ops->area_resyncing(mddev, WRITE, 1511 1511 bio->bi_iter.bi_sector, bio_end_sector(bio))) { 1512 1512 1513 - DEFINE_WAIT(w); 1514 1513 if (bio->bi_opf & REQ_NOWAIT) { 1515 1514 bio_wouldblock_error(bio); 1516 1515 return; 1517 1516 } 1518 - for (;;) { 1519 - prepare_to_wait(&conf->wait_barrier, 1520 - &w, TASK_IDLE); 1521 - if (!mddev->cluster_ops->area_resyncing(mddev, WRITE, 1522 - bio->bi_iter.bi_sector, 1523 - bio_end_sector(bio))) 1524 - break; 1525 - schedule(); 1526 - } 1527 - finish_wait(&conf->wait_barrier, &w); 1517 + wait_event_idle(conf->wait_barrier, 1518 + !mddev->cluster_ops->area_resyncing(mddev, WRITE, 1519 + bio->bi_iter.bi_sector, 1520 + bio_end_sector(bio))); 1528 1521 } 1529 1522 1530 1523 /*
+2
drivers/md/raid10.c
··· 3791 3791 nc = layout & 255; 3792 3792 fc = (layout >> 8) & 255; 3793 3793 fo = layout & (1<<16); 3794 + if (!nc || !fc) 3795 + return -1; 3794 3796 geo->raid_disks = disks; 3795 3797 geo->near_copies = nc; 3796 3798 geo->far_copies = fc;
+6 -1
drivers/md/raid5.c
··· 6217 6217 6218 6218 mempool_free(ctx, conf->ctx_pool); 6219 6219 if (res == STRIPE_WAIT_RESHAPE) { 6220 - md_free_cloned_bio(bi); 6220 + DECLARE_COMPLETION_ONSTACK(done); 6221 + WRITE_ONCE(bi->bi_private, &done); 6222 + 6223 + bio_endio(bi); 6224 + 6225 + wait_for_completion(&done); 6221 6226 return false; 6222 6227 } 6223 6228
+74 -20
drivers/nvme/common/auth.c
··· 351 351 } 352 352 EXPORT_SYMBOL_GPL(nvme_auth_transform_key); 353 353 354 + /** 355 + * nvme_auth_augmented_challenge() - Compute the augmented DH-HMAC-CHAP challenge 356 + * @hmac_id: Hash algorithm identifier 357 + * @skey: Session key 358 + * @skey_len: Length of @skey 359 + * @challenge: Challenge value 360 + * @aug: Output buffer for the augmented challenge 361 + * @hlen: Hash output length (length of @challenge and @aug) 362 + * 363 + * NVMe base specification 8.3.5.5.4: The augmented challenge is computed 364 + * applying the HMAC function using the hash function H() selected by the 365 + * HashID parameter ... with the hash of the ephemeral DH key ... as HMAC key 366 + * to the challenge C (i.e., Ca = HMAC(H(g^xy mod p), C)). 367 + * 368 + * As the session key skey is already H(g^xy mod p) per section 8.3.5.5.9, use 369 + * it directly as the HMAC key without additional hashing. 370 + * 371 + * Return: 0 on success, negative errno on failure. 372 + */ 354 373 int nvme_auth_augmented_challenge(u8 hmac_id, const u8 *skey, size_t skey_len, 355 374 const u8 *challenge, u8 *aug, size_t hlen) 356 375 { 357 - u8 hashed_key[NVME_AUTH_MAX_DIGEST_SIZE]; 358 - int ret; 359 - 360 - ret = nvme_auth_hash(hmac_id, skey, skey_len, hashed_key); 361 - if (ret) 362 - return ret; 363 - ret = nvme_auth_hmac(hmac_id, hashed_key, hlen, challenge, hlen, aug); 364 - memzero_explicit(hashed_key, sizeof(hashed_key)); 365 - return ret; 376 + return nvme_auth_hmac(hmac_id, skey, skey_len, challenge, hlen, aug); 366 377 } 367 378 EXPORT_SYMBOL_GPL(nvme_auth_augmented_challenge); 368 379 ··· 414 403 } 415 404 EXPORT_SYMBOL_GPL(nvme_auth_gen_pubkey); 416 405 417 - int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm, 418 - const u8 *ctrl_key, size_t ctrl_key_len, 419 - u8 *sess_key, size_t sess_key_len) 406 + /** 407 + * nvme_auth_gen_session_key() - Generate an ephemeral session key 408 + * @dh_tfm: Diffie-Hellman transform with local private key already set 409 + * @public_key: Peer's public key 410 + * @public_key_len: Length of @public_key 411 + * @sess_key: Output buffer for the session key 412 + * @sess_key_len: Size of @sess_key buffer 413 + * @hash_id: Hash algorithm identifier 414 + * 415 + * NVMe base specification 8.3.5.5.9: The session key Ks shall be computed from 416 + * the ephemeral DH key (i.e., g^xy mod p) ... by applying the hash function 417 + * H() selected by the HashID parameter ... (i.e., Ks = H(g^xy mod p)). 418 + * 419 + * Return: 0 on success, negative errno on failure. 420 + */ 421 + int nvme_auth_gen_session_key(struct crypto_kpp *dh_tfm, 422 + const u8 *public_key, size_t public_key_len, 423 + u8 *sess_key, size_t sess_key_len, u8 hash_id) 420 424 { 421 425 struct kpp_request *req; 422 426 struct crypto_wait wait; 423 427 struct scatterlist src, dst; 428 + u8 *dh_secret; 429 + size_t dh_secret_len, hash_len; 424 430 int ret; 425 431 426 - req = kpp_request_alloc(dh_tfm, GFP_KERNEL); 427 - if (!req) 432 + hash_len = nvme_auth_hmac_hash_len(hash_id); 433 + if (!hash_len) { 434 + pr_warn("%s: invalid hash algorithm %d\n", __func__, hash_id); 435 + return -EINVAL; 436 + } 437 + 438 + if (sess_key_len != hash_len) { 439 + pr_warn("%s: sess_key buffer missized (%zu != %zu)\n", 440 + __func__, sess_key_len, hash_len); 441 + return -EINVAL; 442 + } 443 + 444 + dh_secret_len = crypto_kpp_maxsize(dh_tfm); 445 + dh_secret = kzalloc(dh_secret_len, GFP_KERNEL); 446 + if (!dh_secret) 428 447 return -ENOMEM; 429 448 449 + req = kpp_request_alloc(dh_tfm, GFP_KERNEL); 450 + if (!req) { 451 + ret = -ENOMEM; 452 + goto out_free_secret; 453 + } 454 + 430 455 crypto_init_wait(&wait); 431 - sg_init_one(&src, ctrl_key, ctrl_key_len); 432 - kpp_request_set_input(req, &src, ctrl_key_len); 433 - sg_init_one(&dst, sess_key, sess_key_len); 434 - kpp_request_set_output(req, &dst, sess_key_len); 456 + sg_init_one(&src, public_key, public_key_len); 457 + kpp_request_set_input(req, &src, public_key_len); 458 + sg_init_one(&dst, dh_secret, dh_secret_len); 459 + kpp_request_set_output(req, &dst, dh_secret_len); 435 460 kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 436 461 crypto_req_done, &wait); 437 462 438 463 ret = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait); 439 - 440 464 kpp_request_free(req); 465 + 466 + if (ret) 467 + goto out_free_secret; 468 + 469 + ret = nvme_auth_hash(hash_id, dh_secret, dh_secret_len, sess_key); 470 + 471 + out_free_secret: 472 + kfree_sensitive(dh_secret); 441 473 return ret; 442 474 } 443 - EXPORT_SYMBOL_GPL(nvme_auth_gen_shared_secret); 475 + EXPORT_SYMBOL_GPL(nvme_auth_gen_session_key); 444 476 445 477 int nvme_auth_parse_key(const char *secret, struct nvme_dhchap_key **ret_key) 446 478 {
+1 -5
drivers/nvme/host/apple.c
··· 1267 1267 1268 1268 static void apple_nvme_free_ctrl(struct nvme_ctrl *ctrl) 1269 1269 { 1270 - struct apple_nvme *anv = ctrl_to_apple_nvme(ctrl); 1271 - 1272 - if (anv->ctrl.admin_q) 1273 - blk_put_queue(anv->ctrl.admin_q); 1274 - put_device(anv->dev); 1270 + put_device(ctrl->dev); 1275 1271 } 1276 1272 1277 1273 static const struct nvme_ctrl_ops nvme_ctrl_ops = {
+9 -7
drivers/nvme/host/auth.c
··· 535 535 put_unaligned_le16(chap->transaction, buf); 536 536 nvme_auth_hmac_update(&hmac, buf, 2); 537 537 538 - memset(buf, 0, 4); 538 + *buf = chap->sc_c; 539 539 nvme_auth_hmac_update(&hmac, buf, 1); 540 540 nvme_auth_hmac_update(&hmac, "Controller", 10); 541 541 nvme_auth_hmac_update(&hmac, ctrl->opts->subsysnqn, 542 542 strlen(ctrl->opts->subsysnqn)); 543 + memset(buf, 0, 4); 543 544 nvme_auth_hmac_update(&hmac, buf, 1); 544 545 nvme_auth_hmac_update(&hmac, ctrl->opts->host->nqn, 545 546 strlen(ctrl->opts->host->nqn)); ··· 588 587 } 589 588 590 589 gen_sesskey: 591 - chap->sess_key_len = chap->host_key_len; 590 + chap->sess_key_len = chap->hash_len; 592 591 chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL); 593 592 if (!chap->sess_key) { 594 593 chap->sess_key_len = 0; ··· 596 595 return -ENOMEM; 597 596 } 598 597 599 - ret = nvme_auth_gen_shared_secret(chap->dh_tfm, 600 - chap->ctrl_key, chap->ctrl_key_len, 601 - chap->sess_key, chap->sess_key_len); 598 + ret = nvme_auth_gen_session_key(chap->dh_tfm, 599 + chap->ctrl_key, chap->ctrl_key_len, 600 + chap->sess_key, chap->sess_key_len, 601 + chap->hash_id); 602 602 if (ret) { 603 603 dev_dbg(ctrl->device, 604 - "failed to generate shared secret, error %d\n", ret); 604 + "failed to generate session key, error %d\n", ret); 605 605 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 606 606 return ret; 607 607 } 608 - dev_dbg(ctrl->device, "shared secret %*ph\n", 608 + dev_dbg(ctrl->device, "session key %*ph\n", 609 609 (int)chap->sess_key_len, chap->sess_key); 610 610 return 0; 611 611 }
+11 -5
drivers/nvme/host/core.c
··· 454 454 blk_mq_end_request(req, status); 455 455 } 456 456 457 - void nvme_complete_rq(struct request *req) 457 + static void __nvme_complete_rq(struct request *req) 458 458 { 459 459 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 460 460 461 - trace_nvme_complete_rq(req); 462 461 nvme_cleanup_cmd(req); 463 462 464 463 /* ··· 492 493 return; 493 494 } 494 495 } 496 + 497 + void nvme_complete_rq(struct request *req) 498 + { 499 + trace_nvme_complete_rq(req); 500 + __nvme_complete_rq(req); 501 + } 495 502 EXPORT_SYMBOL_GPL(nvme_complete_rq); 496 503 497 504 void nvme_complete_batch_req(struct request *req) ··· 518 513 { 519 514 nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR; 520 515 blk_mq_set_request_complete(req); 521 - nvme_complete_rq(req); 516 + __nvme_complete_rq(req); 522 517 return BLK_STS_OK; 523 518 } 524 519 EXPORT_SYMBOL_GPL(nvme_host_path_error); ··· 3049 3044 * 3050 3045 * The device is left in a state where it is also not possible 3051 3046 * to use "nvme set-feature" to disable APST, but booting with 3052 - * nvme_core.default_ps_max_latency=0 works. 3047 + * nvme_core.default_ps_max_latency_us=0 works. 3053 3048 */ 3054 3049 .vid = 0x1e0f, 3055 3050 .mn = "KCD6XVUL6T40", ··· 4088 4083 mutex_unlock(&ctrl->subsys->lock); 4089 4084 4090 4085 #ifdef CONFIG_NVME_MULTIPATH 4091 - cancel_delayed_work(&head->remove_work); 4086 + if (cancel_delayed_work(&head->remove_work)) 4087 + module_put(THIS_MODULE); 4092 4088 #endif 4093 4089 return 0; 4094 4090
+1
drivers/nvme/host/fc.c
··· 3968 3968 3969 3969 MODULE_DESCRIPTION("NVMe host FC transport driver"); 3970 3970 MODULE_LICENSE("GPL v2"); 3971 + MODULE_ALIAS("nvme-fc");
-4
drivers/nvme/host/multipath.c
··· 231 231 bool changed = false; 232 232 int node; 233 233 234 - if (!head) 235 - goto out; 236 - 237 234 for_each_node(node) { 238 235 if (ns == rcu_access_pointer(head->current_path[node])) { 239 236 rcu_assign_pointer(head->current_path[node], NULL); 240 237 changed = true; 241 238 } 242 239 } 243 - out: 244 240 return changed; 245 241 } 246 242
+5
drivers/nvme/host/pci.c
··· 2241 2241 static const struct blk_mq_ops nvme_mq_admin_ops = { 2242 2242 .queue_rq = nvme_queue_rq, 2243 2243 .complete = nvme_pci_complete_rq, 2244 + .commit_rqs = nvme_commit_rqs, 2244 2245 .init_hctx = nvme_admin_init_hctx, 2245 2246 .init_request = nvme_pci_init_request, 2246 2247 .timeout = nvme_timeout, ··· 4105 4104 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 4106 4105 { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ 4107 4106 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 4107 + { PCI_DEVICE(0x1c5f, 0x0555), /* Memblaze Pblaze5 adapter */ 4108 + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, 4109 + { PCI_DEVICE(0x144d, 0xa808), /* Samsung PM981/983 */ 4110 + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 4108 4111 { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ 4109 4112 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 4110 4113 { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
+9
drivers/nvme/host/rdma.c
··· 2189 2189 nvme_rdma_reconnect_or_remove(ctrl, ret); 2190 2190 } 2191 2191 2192 + static bool nvme_rdma_supports_pci_p2pdma(struct nvme_ctrl *ctrl) 2193 + { 2194 + struct nvme_rdma_ctrl *r_ctrl = to_rdma_ctrl(ctrl); 2195 + 2196 + return ib_dma_pci_p2p_dma_supported(r_ctrl->device->dev); 2197 + } 2198 + 2192 2199 static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { 2193 2200 .name = "rdma", 2194 2201 .module = THIS_MODULE, ··· 2210 2203 .get_address = nvmf_get_address, 2211 2204 .stop_ctrl = nvme_rdma_stop_ctrl, 2212 2205 .get_virt_boundary = nvme_get_virt_boundary, 2206 + .supports_pci_p2pdma = nvme_rdma_supports_pci_p2pdma, 2213 2207 }; 2214 2208 2215 2209 /* ··· 2440 2432 2441 2433 MODULE_DESCRIPTION("NVMe host RDMA transport driver"); 2442 2434 MODULE_LICENSE("GPL v2"); 2435 + MODULE_ALIAS("nvme-rdma");
+19
drivers/nvme/host/sysfs.c
··· 883 883 } 884 884 static DEVICE_ATTR_RO(tls_keyring); 885 885 886 + static ssize_t tls_mode_show(struct device *dev, 887 + struct device_attribute *attr, char *buf) 888 + { 889 + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 890 + const char *mode; 891 + 892 + if (ctrl->opts->tls) 893 + mode = "tls"; 894 + else 895 + mode = "concat"; 896 + 897 + return sysfs_emit(buf, "%s\n", mode); 898 + } 899 + static DEVICE_ATTR_RO(tls_mode); 900 + 886 901 static struct attribute *nvme_tls_attrs[] = { 887 902 &dev_attr_tls_key.attr, 888 903 &dev_attr_tls_configured_key.attr, 889 904 &dev_attr_tls_keyring.attr, 905 + &dev_attr_tls_mode.attr, 890 906 NULL, 891 907 }; 892 908 ··· 923 907 return 0; 924 908 if (a == &dev_attr_tls_keyring.attr && 925 909 !ctrl->opts->keyring) 910 + return 0; 911 + if (a == &dev_attr_tls_mode.attr && 912 + !ctrl->opts->tls && !ctrl->opts->concat) 926 913 return 0; 927 914 928 915 return a->mode;
+22 -7
drivers/nvme/host/tcp.c
··· 1438 1438 { 1439 1439 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 1440 1440 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; 1441 - unsigned int noreclaim_flag; 1441 + unsigned int noio_flag; 1442 1442 1443 1443 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) 1444 1444 return; 1445 1445 1446 1446 page_frag_cache_drain(&queue->pf_cache); 1447 1447 1448 - noreclaim_flag = memalloc_noreclaim_save(); 1449 - /* ->sock will be released by fput() */ 1450 - fput(queue->sock->file); 1448 + /** 1449 + * Prevent memory reclaim from triggering block I/O during socket 1450 + * teardown. The socket release path fput -> tcp_close -> 1451 + * tcp_disconnect -> tcp_send_active_reset may allocate memory, and 1452 + * allowing reclaim to issue I/O could deadlock if we're being called 1453 + * from block device teardown (e.g., del_gendisk -> elevator cleanup) 1454 + * which holds locks that the I/O completion path needs. 1455 + */ 1456 + noio_flag = memalloc_noio_save(); 1457 + 1458 + /** 1459 + * Release the socket synchronously. During reset in 1460 + * nvme_reset_ctrl_work(), queue teardown is immediately followed by 1461 + * re-allocation. fput() defers socket cleanup to delayed_fput_work 1462 + * in workqueue context, which can race with new queue setup. 1463 + */ 1464 + __fput_sync(queue->sock->file); 1451 1465 queue->sock = NULL; 1452 - memalloc_noreclaim_restore(noreclaim_flag); 1466 + memalloc_noio_restore(noio_flag); 1453 1467 1454 1468 kfree(queue->pdu); 1455 1469 mutex_destroy(&queue->send_mutex); ··· 1915 1901 err_rcv_pdu: 1916 1902 kfree(queue->pdu); 1917 1903 err_sock: 1918 - /* ->sock will be released by fput() */ 1919 - fput(queue->sock->file); 1904 + /* Use sync variant - see nvme_tcp_free_queue() for explanation */ 1905 + __fput_sync(queue->sock->file); 1920 1906 queue->sock = NULL; 1921 1907 err_destroy_mutex: 1922 1908 mutex_destroy(&queue->send_mutex); ··· 3085 3071 3086 3072 MODULE_DESCRIPTION("NVMe host TCP transport driver"); 3087 3073 MODULE_LICENSE("GPL v2"); 3074 + MODULE_ALIAS("nvme-tcp");
+2 -6
drivers/nvme/target/admin-cmd.c
··· 687 687 id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL | 688 688 NVME_CTRL_CMIC_ANA; 689 689 690 - /* Limit MDTS according to transport capability */ 691 - if (ctrl->ops->get_mdts) 692 - id->mdts = ctrl->ops->get_mdts(ctrl); 693 - else 694 - id->mdts = 0; 695 - 690 + /* Limit MDTS according to port config or transport capability */ 691 + id->mdts = nvmet_ctrl_mdts(req); 696 692 id->cntlid = cpu_to_le16(ctrl->cntlid); 697 693 id->ver = cpu_to_le32(ctrl->subsys->ver); 698 694
+10 -11
drivers/nvme/target/auth.c
··· 229 229 void nvmet_auth_sq_free(struct nvmet_sq *sq) 230 230 { 231 231 cancel_delayed_work(&sq->auth_expired_work); 232 - #ifdef CONFIG_NVME_TARGET_TCP_TLS 233 - sq->tls_key = NULL; 234 - #endif 235 232 kfree(sq->dhchap_c1); 236 233 sq->dhchap_c1 = NULL; 237 234 kfree(sq->dhchap_c2); ··· 399 402 put_unaligned_le16(req->sq->dhchap_tid, buf); 400 403 nvme_auth_hmac_update(&hmac, buf, 2); 401 404 402 - memset(buf, 0, 4); 405 + *buf = req->sq->sc_c; 403 406 nvme_auth_hmac_update(&hmac, buf, 1); 404 407 nvme_auth_hmac_update(&hmac, "Controller", 10); 405 408 nvme_auth_hmac_update(&hmac, ctrl->subsys->subsysnqn, 406 409 strlen(ctrl->subsys->subsysnqn)); 410 + memset(buf, 0, 4); 407 411 nvme_auth_hmac_update(&hmac, buf, 1); 408 412 nvme_auth_hmac_update(&hmac, ctrl->hostnqn, strlen(ctrl->hostnqn)); 409 413 nvme_auth_hmac_final(&hmac, response); ··· 447 449 struct nvmet_ctrl *ctrl = req->sq->ctrl; 448 450 int ret; 449 451 450 - req->sq->dhchap_skey_len = ctrl->dh_keysize; 452 + req->sq->dhchap_skey_len = nvme_auth_hmac_hash_len(ctrl->shash_id); 451 453 req->sq->dhchap_skey = kzalloc(req->sq->dhchap_skey_len, GFP_KERNEL); 452 454 if (!req->sq->dhchap_skey) 453 455 return -ENOMEM; 454 - ret = nvme_auth_gen_shared_secret(ctrl->dh_tfm, 455 - pkey, pkey_size, 456 - req->sq->dhchap_skey, 457 - req->sq->dhchap_skey_len); 456 + ret = nvme_auth_gen_session_key(ctrl->dh_tfm, 457 + pkey, pkey_size, 458 + req->sq->dhchap_skey, 459 + req->sq->dhchap_skey_len, 460 + ctrl->shash_id); 458 461 if (ret) 459 - pr_debug("failed to compute shared secret, err %d\n", ret); 462 + pr_debug("failed to compute session key, err %d\n", ret); 460 463 else 461 - pr_debug("%s: shared secret %*ph\n", __func__, 464 + pr_debug("%s: session key %*ph\n", __func__, 462 465 (int)req->sq->dhchap_skey_len, 463 466 req->sq->dhchap_skey); 464 467
+27
drivers/nvme/target/configfs.c
··· 301 301 302 302 CONFIGFS_ATTR(nvmet_, param_max_queue_size); 303 303 304 + static ssize_t nvmet_param_mdts_show(struct config_item *item, char *page) 305 + { 306 + struct nvmet_port *port = to_nvmet_port(item); 307 + 308 + return snprintf(page, PAGE_SIZE, "%d\n", port->mdts); 309 + } 310 + 311 + static ssize_t nvmet_param_mdts_store(struct config_item *item, 312 + const char *page, size_t count) 313 + { 314 + struct nvmet_port *port = to_nvmet_port(item); 315 + int ret; 316 + 317 + if (nvmet_is_port_enabled(port, __func__)) 318 + return -EACCES; 319 + ret = kstrtoint(page, 0, &port->mdts); 320 + if (ret) { 321 + pr_err("Invalid value '%s' for mdts\n", page); 322 + return -EINVAL; 323 + } 324 + return count; 325 + } 326 + 327 + CONFIGFS_ATTR(nvmet_, param_mdts); 328 + 304 329 #ifdef CONFIG_BLK_DEV_INTEGRITY 305 330 static ssize_t nvmet_param_pi_enable_show(struct config_item *item, 306 331 char *page) ··· 2020 1995 &nvmet_attr_addr_tsas, 2021 1996 &nvmet_attr_param_inline_data_size, 2022 1997 &nvmet_attr_param_max_queue_size, 1998 + &nvmet_attr_param_mdts, 2023 1999 #ifdef CONFIG_BLK_DEV_INTEGRITY 2024 2000 &nvmet_attr_param_pi_enable, 2025 2001 #endif ··· 2079 2053 INIT_LIST_HEAD(&port->referrals); 2080 2054 port->inline_data_size = -1; /* < 0 == let the transport choose */ 2081 2055 port->max_queue_size = -1; /* < 0 == let the transport choose */ 2056 + port->mdts = -1; /* < 0 == let the transport choose */ 2082 2057 2083 2058 port->disc_addr.trtype = NVMF_TRTYPE_MAX; 2084 2059 port->disc_addr.portid = cpu_to_le16(portid);
+9 -1
drivers/nvme/target/core.c
··· 370 370 NVMET_MIN_QUEUE_SIZE, 371 371 NVMET_MAX_QUEUE_SIZE); 372 372 373 + /* 374 + * If the transport didn't set the mdts properly, then clamp it to the 375 + * target limits. Also set default values in case the transport didn't 376 + * set it at all. 377 + */ 378 + if (port->mdts < 0 || port->mdts > NVMET_MAX_MDTS) 379 + port->mdts = 0; 380 + 373 381 port->enabled = true; 374 382 port->tr_ops = ops; 375 383 return 0; ··· 1751 1743 1752 1744 nvmet_stop_keep_alive_timer(ctrl); 1753 1745 1754 - flush_work(&ctrl->async_event_work); 1746 + cancel_work_sync(&ctrl->async_event_work); 1755 1747 cancel_work_sync(&ctrl->fatal_err_work); 1756 1748 1757 1749 nvmet_destroy_auth(ctrl);
+5 -4
drivers/nvme/target/fabrics-cmd-auth.c
··· 395 395 goto complete; 396 396 } 397 397 /* Final states, clear up variables */ 398 - if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) { 399 - nvmet_auth_sq_free(req->sq); 398 + nvmet_auth_sq_free(req->sq); 399 + if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) 400 400 nvmet_ctrl_fatal_error(ctrl); 401 - } 402 401 403 402 complete: 404 403 nvmet_req_complete(req, status); ··· 573 574 status = nvmet_copy_to_sgl(req, 0, d, al); 574 575 kfree(d); 575 576 done: 576 - if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { 577 + if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2) 578 + nvmet_auth_sq_free(req->sq); 579 + else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { 577 580 nvmet_auth_sq_free(req->sq); 578 581 nvmet_ctrl_fatal_error(ctrl); 579 582 }
+13
drivers/nvme/target/nvmet.h
··· 214 214 bool enabled; 215 215 int inline_data_size; 216 216 int max_queue_size; 217 + int mdts; 217 218 const struct nvmet_fabrics_ops *tr_ops; 218 219 bool pi_enable; 219 220 }; ··· 674 673 #define NVMET_MAX_QUEUE_SIZE 1024 675 674 #define NVMET_NR_QUEUES 128 676 675 #define NVMET_MAX_CMD(ctrl) (NVME_CAP_MQES(ctrl->cap) + 1) 676 + #define NVMET_MAX_MDTS 255 677 677 678 678 /* 679 679 * Nice round number that makes a list of nsids fit into a page. ··· 761 759 static inline bool nvmet_is_pci_ctrl(struct nvmet_ctrl *ctrl) 762 760 { 763 761 return ctrl->port->disc_addr.trtype == NVMF_TRTYPE_PCI; 762 + } 763 + 764 + /* Limit MDTS according to port config or transport capability */ 765 + static inline u8 nvmet_ctrl_mdts(struct nvmet_req *req) 766 + { 767 + struct nvmet_ctrl *ctrl = req->sq->ctrl; 768 + u8 mdts = req->port->mdts; 769 + 770 + if (!ctrl->ops->get_mdts) 771 + return mdts; 772 + return min_not_zero(ctrl->ops->get_mdts(ctrl), mdts); 764 773 } 765 774 766 775 #ifdef CONFIG_NVME_TARGET_PASSTHRU
+64 -53
drivers/nvme/target/tcp.c
··· 349 349 cmd->req.sg = NULL; 350 350 } 351 351 352 - static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue); 353 - 354 - static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) 352 + static int nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) 355 353 { 356 354 struct bio_vec *iov = cmd->iov; 357 355 struct scatterlist *sg; ··· 362 364 offset = cmd->rbytes_done; 363 365 cmd->sg_idx = offset / PAGE_SIZE; 364 366 sg_offset = offset % PAGE_SIZE; 365 - if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt) { 366 - nvmet_tcp_fatal_error(cmd->queue); 367 - return; 368 - } 367 + if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt) 368 + return -EPROTO; 369 + 369 370 sg = &cmd->req.sg[cmd->sg_idx]; 370 371 sg_remaining = cmd->req.sg_cnt - cmd->sg_idx; 371 372 372 373 while (length) { 373 - if (!sg_remaining) { 374 - nvmet_tcp_fatal_error(cmd->queue); 375 - return; 376 - } 377 - if (!sg->length || sg->length <= sg_offset) { 378 - nvmet_tcp_fatal_error(cmd->queue); 379 - return; 380 - } 374 + if (!sg_remaining) 375 + return -EPROTO; 376 + 377 + if (!sg->length || sg->length <= sg_offset) 378 + return -EPROTO; 379 + 381 380 u32 iov_len = min_t(u32, length, sg->length - sg_offset); 382 381 383 382 bvec_set_page(iov, sg_page(sg), iov_len, ··· 389 394 390 395 iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov, 391 396 nr_pages, cmd->pdu_len); 392 - } 393 - 394 - static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) 395 - { 396 - queue->rcv_state = NVMET_TCP_RECV_ERR; 397 - if (queue->nvme_sq.ctrl) 398 - nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); 399 - else 400 - kernel_sock_shutdown(queue->sock, SHUT_RDWR); 397 + return 0; 401 398 } 402 399 403 400 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status) 404 401 { 402 + /* 403 + * Keep rcv_state at RECV_ERR even for the internal -ESHUTDOWN path. 404 + * nvmet_tcp_handle_icreq() can return -ESHUTDOWN after the ICReq has 405 + * already been consumed and queue teardown has started. 406 + * 407 + * If nvmet_tcp_data_ready() or nvmet_tcp_write_space() queues 408 + * nvmet_tcp_io_work() again before nvmet_tcp_release_queue_work() 409 + * cancels it, the queue must not keep that old receive state. 410 + * Otherwise the next nvmet_tcp_io_work() run can reach 411 + * nvmet_tcp_done_recv_pdu() and try to handle the same ICReq again. 412 + * 413 + * That is why queue->rcv_state needs to be updated before we return. 414 + */ 405 415 queue->rcv_state = NVMET_TCP_RECV_ERR; 406 - if (status == -EPIPE || status == -ECONNRESET) 416 + if (status == -EPIPE || status == -ECONNRESET || !queue->nvme_sq.ctrl) 407 417 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 408 418 else 409 - nvmet_tcp_fatal_error(queue); 419 + nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); 410 420 } 411 421 412 422 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) ··· 887 887 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) { 888 888 pr_err("bad nvme-tcp pdu length (%d)\n", 889 889 le32_to_cpu(icreq->hdr.plen)); 890 - nvmet_tcp_fatal_error(queue); 891 890 return -EPROTO; 892 891 } 893 892 ··· 921 922 iov.iov_len = sizeof(*icresp); 922 923 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 923 924 if (ret < 0) { 925 + spin_lock_bh(&queue->state_lock); 926 + if (queue->state == NVMET_TCP_Q_DISCONNECTING) { 927 + spin_unlock_bh(&queue->state_lock); 928 + return -ESHUTDOWN; 929 + } 924 930 queue->state = NVMET_TCP_Q_FAILED; 931 + spin_unlock_bh(&queue->state_lock); 925 932 return ret; /* queue removal will cleanup */ 926 933 } 927 934 935 + spin_lock_bh(&queue->state_lock); 936 + if (queue->state == NVMET_TCP_Q_DISCONNECTING) { 937 + spin_unlock_bh(&queue->state_lock); 938 + /* Tell nvmet_tcp_socket_error() teardown is in progress. */ 939 + return -ESHUTDOWN; 940 + } 928 941 queue->state = NVMET_TCP_Q_LIVE; 942 + spin_unlock_bh(&queue->state_lock); 929 943 nvmet_prepare_receive_pdu(queue); 930 944 return 0; 931 945 } 932 946 933 - static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, 947 + static int nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, 934 948 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) 935 949 { 936 950 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); ··· 959 947 if (!nvme_is_write(cmd->req.cmd) || !data_len || 960 948 data_len > cmd->req.port->inline_data_size) { 961 949 nvmet_prepare_receive_pdu(queue); 962 - return; 950 + return 0; 963 951 } 964 952 965 953 ret = nvmet_tcp_map_data(cmd); 966 954 if (unlikely(ret)) { 967 955 pr_err("queue %d: failed to map data\n", queue->idx); 968 - nvmet_tcp_fatal_error(queue); 969 - return; 956 + return -EPROTO; 970 957 } 971 958 972 959 queue->rcv_state = NVMET_TCP_RECV_DATA; 973 - nvmet_tcp_build_pdu_iovec(cmd); 974 960 cmd->flags |= NVMET_TCP_F_INIT_FAILED; 961 + ret = nvmet_tcp_build_pdu_iovec(cmd); 962 + if (unlikely(ret)) 963 + pr_err("queue %d: failed to build PDU iovec\n", queue->idx); 964 + 965 + return ret; 975 966 } 976 967 977 968 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) ··· 1026 1011 goto err_proto; 1027 1012 } 1028 1013 cmd->pdu_recv = 0; 1029 - nvmet_tcp_build_pdu_iovec(cmd); 1014 + if (unlikely(nvmet_tcp_build_pdu_iovec(cmd))) { 1015 + pr_err("queue %d: failed to build PDU iovec\n", queue->idx); 1016 + goto err_proto; 1017 + } 1030 1018 queue->cmd = cmd; 1031 1019 queue->rcv_state = NVMET_TCP_RECV_DATA; 1032 1020 ··· 1037 1019 1038 1020 err_proto: 1039 1021 /* FIXME: use proper transport errors */ 1040 - nvmet_tcp_fatal_error(queue); 1041 1022 return -EPROTO; 1042 1023 } 1043 1024 ··· 1051 1034 if (hdr->type != nvme_tcp_icreq) { 1052 1035 pr_err("unexpected pdu type (%d) before icreq\n", 1053 1036 hdr->type); 1054 - nvmet_tcp_fatal_error(queue); 1055 1037 return -EPROTO; 1056 1038 } 1057 1039 return nvmet_tcp_handle_icreq(queue); ··· 1059 1043 if (unlikely(hdr->type == nvme_tcp_icreq)) { 1060 1044 pr_err("queue %d: received icreq pdu in state %d\n", 1061 1045 queue->idx, queue->state); 1062 - nvmet_tcp_fatal_error(queue); 1063 1046 return -EPROTO; 1064 1047 } 1065 1048 ··· 1075 1060 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d", 1076 1061 queue->idx, queue->nr_cmds, queue->send_list_len, 1077 1062 nvme_cmd->common.opcode); 1078 - nvmet_tcp_fatal_error(queue); 1079 1063 return -ENOMEM; 1080 1064 } 1081 1065 ··· 1088 1074 le32_to_cpu(req->cmd->common.dptr.sgl.length), 1089 1075 le16_to_cpu(req->cqe->status)); 1090 1076 1091 - nvmet_tcp_handle_req_failure(queue, queue->cmd, req); 1092 - return 0; 1077 + return nvmet_tcp_handle_req_failure(queue, queue->cmd, req); 1093 1078 } 1094 1079 1095 1080 ret = nvmet_tcp_map_data(queue->cmd); 1096 1081 if (unlikely(ret)) { 1097 1082 pr_err("queue %d: failed to map data\n", queue->idx); 1098 1083 if (nvmet_tcp_has_inline_data(queue->cmd)) 1099 - nvmet_tcp_fatal_error(queue); 1100 - else 1101 - nvmet_req_complete(req, ret); 1084 + return -EPROTO; 1085 + 1086 + nvmet_req_complete(req, ret); 1102 1087 ret = -EAGAIN; 1103 1088 goto out; 1104 1089 } ··· 1105 1092 if (nvmet_tcp_need_data_in(queue->cmd)) { 1106 1093 if (nvmet_tcp_has_inline_data(queue->cmd)) { 1107 1094 queue->rcv_state = NVMET_TCP_RECV_DATA; 1108 - nvmet_tcp_build_pdu_iovec(queue->cmd); 1109 - return 0; 1095 + ret = nvmet_tcp_build_pdu_iovec(queue->cmd); 1096 + if (unlikely(ret)) 1097 + pr_err("queue %d: failed to build PDU iovec\n", 1098 + queue->idx); 1099 + return ret; 1110 1100 } 1111 1101 /* send back R2T */ 1112 1102 nvmet_tcp_queue_response(&queue->cmd->req); ··· 1220 1204 1221 1205 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) { 1222 1206 pr_err("unexpected pdu type %d\n", hdr->type); 1223 - nvmet_tcp_fatal_error(queue); 1224 1207 return -EIO; 1225 1208 } 1226 1209 ··· 1233 1218 } 1234 1219 1235 1220 if (queue->hdr_digest && 1236 - nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) { 1237 - nvmet_tcp_fatal_error(queue); /* fatal */ 1221 + nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) 1238 1222 return -EPROTO; 1239 - } 1240 1223 1241 1224 if (queue->data_digest && 1242 - nvmet_tcp_check_ddgst(queue, &queue->pdu)) { 1243 - nvmet_tcp_fatal_error(queue); /* fatal */ 1225 + nvmet_tcp_check_ddgst(queue, &queue->pdu)) 1244 1226 return -EPROTO; 1245 - } 1246 1227 1247 1228 return nvmet_tcp_done_recv_pdu(queue); 1248 1229 } ··· 1321 1310 queue->idx, cmd->req.cmd->common.command_id, 1322 1311 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), 1323 1312 le32_to_cpu(cmd->exp_ddgst)); 1324 - nvmet_req_uninit(&cmd->req); 1313 + if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED)) 1314 + nvmet_req_uninit(&cmd->req); 1325 1315 nvmet_tcp_free_cmd_buffers(cmd); 1326 - nvmet_tcp_fatal_error(queue); 1327 1316 ret = -EPROTO; 1328 1317 goto out; 1329 1318 }
+1 -5
drivers/nvme/target/zns.c
··· 69 69 void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req) 70 70 { 71 71 u8 zasl = req->sq->ctrl->subsys->zasl; 72 - struct nvmet_ctrl *ctrl = req->sq->ctrl; 73 72 struct nvme_id_ctrl_zns *id; 74 73 u16 status; 75 74 ··· 78 79 goto out; 79 80 } 80 81 81 - if (ctrl->ops->get_mdts) 82 - id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl); 83 - else 84 - id->zasl = zasl; 82 + id->zasl = min_not_zero(nvmet_ctrl_mdts(req), zasl); 85 83 86 84 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); 87 85
+2 -9
drivers/scsi/sr.c
··· 395 395 396 396 switch (req_op(rq)) { 397 397 case REQ_OP_WRITE: 398 - if (!cd->writeable) 398 + if (get_disk_ro(cd->disk)) 399 399 goto out; 400 400 SCpnt->cmnd[0] = WRITE_10; 401 401 cd->cdi.media_written = 1; ··· 681 681 error = -ENOMEM; 682 682 if (get_capabilities(cd)) 683 683 goto fail_minor; 684 + cdrom_probe_write_features(&cd->cdi); 684 685 sr_vendor_init(cd); 685 686 686 687 set_capacity(disk, cd->capacity); ··· 899 898 cd->cdi.mask |= CDC_SELECT_DISC; 900 899 /*else I don't think it can close its tray 901 900 cd->cdi.mask |= CDC_CLOSE_TRAY; */ 902 - 903 - /* 904 - * if DVD-RAM, MRW-W or CD-RW, we are randomly writable 905 - */ 906 - if ((cd->cdi.mask & (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | CDC_CD_RW)) != 907 - (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | CDC_CD_RW)) { 908 - cd->writeable = 1; 909 - } 910 901 911 902 kfree(buffer); 912 903 return 0;
-1
drivers/scsi/sr.h
··· 35 35 struct scsi_device *device; 36 36 unsigned int vendor; /* vendor code, see sr_vendor.c */ 37 37 unsigned long ms_offset; /* for reading multisession-CD's */ 38 - unsigned writeable : 1; 39 38 unsigned use:1; /* is this device still supportable */ 40 39 unsigned xa_flag:1; /* CD has XA sectors ? */ 41 40 unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */
+1
include/linux/cdrom.h
··· 108 108 extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi, 109 109 unsigned int clearing); 110 110 111 + extern void cdrom_probe_write_features(struct cdrom_device_info *cdi); 111 112 extern int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi); 112 113 extern void unregister_cdrom(struct cdrom_device_info *cdi); 113 114
+3 -3
include/linux/nvme-auth.h
··· 49 49 int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid); 50 50 int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm, 51 51 u8 *host_key, size_t host_key_len); 52 - int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm, 53 - const u8 *ctrl_key, size_t ctrl_key_len, 54 - u8 *sess_key, size_t sess_key_len); 52 + int nvme_auth_gen_session_key(struct crypto_kpp *dh_tfm, 53 + const u8 *public_key, size_t public_key_len, 54 + u8 *sess_key, size_t sess_key_len, u8 hash_id); 55 55 int nvme_auth_generate_psk(u8 hmac_id, const u8 *skey, size_t skey_len, 56 56 const u8 *c1, const u8 *c2, size_t hash_len, 57 57 u8 **ret_psk, size_t *ret_len);