Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'block-5.9-2020-08-14' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
"A few fixes on the block side of things:

- Discard granularity fix (Coly)

- rnbd cleanups (Guoqing)

- md error handling fix (Dan)

- md sysfs fix (Junxiao)

- Fix flush request accounting, which caused an IO slowdown for some
configurations (Ming)

- Properly propagate loop flag for partition scanning (Lennart)"

* tag 'block-5.9-2020-08-14' of git://git.kernel.dk/linux-block:
block: fix double account of flush request's driver tag
loop: unset GENHD_FL_NO_PART_SCAN on LOOP_CONFIGURE
rnbd: no need to set bi_end_io in rnbd_bio_map_kern
rnbd: remove rnbd_dev_submit_io
md-cluster: Fix potential error pointer dereference in resize_bitmaps()
block: check queue's limits.discard_granularity in __blkdev_issue_discard()
md: get sysfs entry after redundancy attr group create

+62 -66
+9 -2
block/blk-flush.c
··· 308 308 flush_rq->mq_ctx = first_rq->mq_ctx; 309 309 flush_rq->mq_hctx = first_rq->mq_hctx; 310 310 311 - if (!q->elevator) 311 + if (!q->elevator) { 312 312 flush_rq->tag = first_rq->tag; 313 - else 313 + 314 + /* 315 + * We borrow data request's driver tag, so have to mark 316 + * this flush request as INFLIGHT for avoiding double 317 + * account of this driver tag 318 + */ 319 + flush_rq->rq_flags |= RQF_MQ_INFLIGHT; 320 + } else 314 321 flush_rq->internal_tag = first_rq->internal_tag; 315 322 316 323 flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
+9
block/blk-lib.c
··· 47 47 op = REQ_OP_DISCARD; 48 48 } 49 49 50 + /* In case the discard granularity isn't set by buggy device driver */ 51 + if (WARN_ON_ONCE(!q->limits.discard_granularity)) { 52 + char dev_name[BDEVNAME_SIZE]; 53 + 54 + bdevname(bdev, dev_name); 55 + pr_err_ratelimited("%s: Error: discard_granularity is 0.\n", dev_name); 56 + return -EOPNOTSUPP; 57 + } 58 + 50 59 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; 51 60 if ((sector | nr_sects) & bs_mask) 52 61 return -EINVAL;
+2
drivers/block/loop.c
··· 1171 1171 if (part_shift) 1172 1172 lo->lo_flags |= LO_FLAGS_PARTSCAN; 1173 1173 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; 1174 + if (partscan) 1175 + lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; 1174 1176 1175 1177 /* Grab the block_device to prevent its destruction after we 1176 1178 * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
+3 -34
drivers/block/rnbd/rnbd-srv-dev.c
··· 45 45 kfree(dev); 46 46 } 47 47 48 - static void rnbd_dev_bi_end_io(struct bio *bio) 48 + void rnbd_dev_bi_end_io(struct bio *bio) 49 49 { 50 50 struct rnbd_dev_blk_io *io = bio->bi_private; 51 51 ··· 63 63 * Map the kernel address into a bio suitable for io to a block 64 64 * device. Returns an error pointer in case of error. 65 65 */ 66 - static struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs, 67 - unsigned int len, gfp_t gfp_mask) 66 + struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs, 67 + unsigned int len, gfp_t gfp_mask) 68 68 { 69 69 unsigned long kaddr = (unsigned long)data; 70 70 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; ··· 99 99 offset = 0; 100 100 } 101 101 102 - bio->bi_end_io = bio_put; 103 102 return bio; 104 - } 105 - 106 - int rnbd_dev_submit_io(struct rnbd_dev *dev, sector_t sector, void *data, 107 - size_t len, u32 bi_size, enum rnbd_io_flags flags, 108 - short prio, void *priv) 109 - { 110 - struct rnbd_dev_blk_io *io; 111 - struct bio *bio; 112 - 113 - /* Generate bio with pages pointing to the rdma buffer */ 114 - bio = rnbd_bio_map_kern(data, dev->ibd_bio_set, len, GFP_KERNEL); 115 - if (IS_ERR(bio)) 116 - return PTR_ERR(bio); 117 - 118 - io = container_of(bio, struct rnbd_dev_blk_io, bio); 119 - 120 - io->dev = dev; 121 - io->priv = priv; 122 - 123 - bio->bi_end_io = rnbd_dev_bi_end_io; 124 - bio->bi_private = io; 125 - bio->bi_opf = rnbd_to_bio_flags(flags); 126 - bio->bi_iter.bi_sector = sector; 127 - bio->bi_iter.bi_size = bi_size; 128 - bio_set_prio(bio, prio); 129 - bio_set_dev(bio, dev->bdev); 130 - 131 - submit_bio(bio); 132 - 133 - return 0; 134 103 }
+5 -14
drivers/block/rnbd/rnbd-srv-dev.h
··· 41 41 42 42 void rnbd_endio(void *priv, int error); 43 43 44 + void rnbd_dev_bi_end_io(struct bio *bio); 45 + 46 + struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs, 47 + unsigned int len, gfp_t gfp_mask); 48 + 44 49 static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev) 45 50 { 46 51 return queue_max_segments(bdev_get_queue(dev->bdev)); ··· 79 74 { 80 75 return bdev_get_queue(dev->bdev)->limits.discard_alignment; 81 76 } 82 - 83 - /** 84 - * rnbd_dev_submit_io() - Submit an I/O to the disk 85 - * @dev: device to that the I/O is submitted 86 - * @sector: address to read/write data to 87 - * @data: I/O data to write or buffer to read I/O date into 88 - * @len: length of @data 89 - * @bi_size: Amount of data that will be read/written 90 - * @prio: IO priority 91 - * @priv: private data passed to @io_fn 92 - */ 93 - int rnbd_dev_submit_io(struct rnbd_dev *dev, sector_t sector, void *data, 94 - size_t len, u32 bi_size, enum rnbd_io_flags flags, 95 - short prio, void *priv); 96 77 97 78 #endif /* RNBD_SRV_DEV_H */
+23 -9
drivers/block/rnbd/rnbd-srv.c
··· 124 124 struct rnbd_srv_sess_dev *sess_dev; 125 125 u32 dev_id; 126 126 int err; 127 + struct rnbd_dev_blk_io *io; 128 + struct bio *bio; 129 + short prio; 127 130 128 131 priv = kmalloc(sizeof(*priv), GFP_KERNEL); 129 132 if (!priv) ··· 145 142 priv->sess_dev = sess_dev; 146 143 priv->id = id; 147 144 148 - err = rnbd_dev_submit_io(sess_dev->rnbd_dev, le64_to_cpu(msg->sector), 149 - data, datalen, le32_to_cpu(msg->bi_size), 150 - le32_to_cpu(msg->rw), 151 - srv_sess->ver < RNBD_PROTO_VER_MAJOR || 152 - usrlen < sizeof(*msg) ? 153 - 0 : le16_to_cpu(msg->prio), priv); 154 - if (unlikely(err)) { 155 - rnbd_srv_err(sess_dev, "Submitting I/O to device failed, err: %d\n", 156 - err); 145 + /* Generate bio with pages pointing to the rdma buffer */ 146 + bio = rnbd_bio_map_kern(data, sess_dev->rnbd_dev->ibd_bio_set, datalen, GFP_KERNEL); 147 + if (IS_ERR(bio)) { 148 + rnbd_srv_err(sess_dev, "Failed to generate bio, err: %ld\n", PTR_ERR(bio)); 157 149 goto sess_dev_put; 158 150 } 151 + 152 + io = container_of(bio, struct rnbd_dev_blk_io, bio); 153 + io->dev = sess_dev->rnbd_dev; 154 + io->priv = priv; 155 + 156 + bio->bi_end_io = rnbd_dev_bi_end_io; 157 + bio->bi_private = io; 158 + bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw)); 159 + bio->bi_iter.bi_sector = le64_to_cpu(msg->sector); 160 + bio->bi_iter.bi_size = le32_to_cpu(msg->bi_size); 161 + prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR || 162 + usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio); 163 + bio_set_prio(bio, prio); 164 + bio_set_dev(bio, sess_dev->rnbd_dev->bdev); 165 + 166 + submit_bio(bio); 159 167 160 168 return 0; 161 169
+1
drivers/md/md-cluster.c
··· 1139 1139 bitmap = get_bitmap_from_slot(mddev, i); 1140 1140 if (IS_ERR(bitmap)) { 1141 1141 pr_err("can't get bitmap from slot %d\n", i); 1142 + bitmap = NULL; 1142 1143 goto out; 1143 1144 } 1144 1145 counts = &bitmap->counts;
+10 -7
drivers/md/md.c
··· 850 850 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 851 851 if (mddev->sysfs_action) 852 852 sysfs_put(mddev->sysfs_action); 853 + if (mddev->sysfs_completed) 854 + sysfs_put(mddev->sysfs_completed); 855 + if (mddev->sysfs_degraded) 856 + sysfs_put(mddev->sysfs_degraded); 853 857 mddev->sysfs_action = NULL; 858 + mddev->sysfs_completed = NULL; 859 + mddev->sysfs_degraded = NULL; 854 860 } 855 861 } 856 862 mddev->sysfs_active = 0; ··· 4074 4068 pr_warn("md: cannot register extra attributes for %s\n", 4075 4069 mdname(mddev)); 4076 4070 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); 4071 + mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); 4072 + mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); 4077 4073 } 4078 4074 if (oldpers->sync_request != NULL && 4079 4075 pers->sync_request == NULL) { ··· 5590 5582 5591 5583 if (mddev->sysfs_state) 5592 5584 sysfs_put(mddev->sysfs_state); 5593 - if (mddev->sysfs_completed) 5594 - sysfs_put(mddev->sysfs_completed); 5595 - if (mddev->sysfs_degraded) 5596 - sysfs_put(mddev->sysfs_degraded); 5597 5585 if (mddev->sysfs_level) 5598 5586 sysfs_put(mddev->sysfs_level); 5599 - 5600 5587 5601 5588 if (mddev->gendisk) 5602 5589 del_gendisk(mddev->gendisk); ··· 5760 5757 if (!error && mddev->kobj.sd) { 5761 5758 kobject_uevent(&mddev->kobj, KOBJ_ADD); 5762 5759 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); 5763 - mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); 5764 - mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); 5765 5760 mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level"); 5766 5761 } 5767 5762 mddev_put(mddev); ··· 6037 6036 pr_warn("md: cannot register extra attributes for %s\n", 6038 6037 mdname(mddev)); 6039 6038 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); 6039 + mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); 6040 + mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); 6040 6041 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ 6041 6042 mddev->ro = 0; 6042 6043