Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block layer fixes from Jens Axboe:
"A small collection of fixes for the current series. This contains:

- Two fixes for xen-blkfront, from Bob Liu.

- A bug fix for NVMe, releasing only the specific resources we
requested.

- Fix for a debugfs flags entry for nbd, from Josef.

- Plug fix from Omar, fixing up a case of code being switched between
two functions.

- A missing bio_put() for the new discard callers of
submit_bio_wait(), fixing a regression causing a leak of the bio.
From Shaun.

- Improve dirty limit calculation precision in the writeback code,
fixing a case where setting a limit lower than 1% of memory would
end up being zero. From Tejun"

* 'for-linus' of git://git.kernel.dk/linux-block:
NVMe: Only release requested regions
xen-blkfront: fix resume issues after a migration
xen-blkfront: don't call talk_to_blkback when already connected to blkback
nbd: pass the nbd pointer for flags debugfs
block: missing bio_put following submit_bio_wait
blk-mq: really fix plug list flushing for nomerge queues
writeback: use higher precision calculation in domain_dirty_limits()

+59 -37
+9 -3
block/blk-lib.c
··· 113 113 ret = submit_bio_wait(type, bio); 114 114 if (ret == -EOPNOTSUPP) 115 115 ret = 0; 116 + bio_put(bio); 116 117 } 117 118 blk_finish_plug(&plug); 118 119 ··· 166 165 } 167 166 } 168 167 169 - if (bio) 168 + if (bio) { 170 169 ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio); 170 + bio_put(bio); 171 + } 171 172 return ret != -EOPNOTSUPP ? ret : 0; 172 173 } 173 174 EXPORT_SYMBOL(blkdev_issue_write_same); ··· 209 206 } 210 207 } 211 208 212 - if (bio) 213 - return submit_bio_wait(WRITE, bio); 209 + if (bio) { 210 + ret = submit_bio_wait(WRITE, bio); 211 + bio_put(bio); 212 + return ret; 213 + } 214 214 return 0; 215 215 } 216 216
+8 -9
block/blk-mq.c
··· 1262 1262 1263 1263 blk_queue_split(q, &bio, q->bio_split); 1264 1264 1265 - if (!is_flush_fua && !blk_queue_nomerges(q)) { 1266 - if (blk_attempt_plug_merge(q, bio, &request_count, 1267 - &same_queue_rq)) 1268 - return BLK_QC_T_NONE; 1269 - } else 1270 - request_count = blk_plug_queued_count(q); 1265 + if (!is_flush_fua && !blk_queue_nomerges(q) && 1266 + blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) 1267 + return BLK_QC_T_NONE; 1271 1268 1272 1269 rq = blk_mq_map_request(q, bio, &data); 1273 1270 if (unlikely(!rq)) ··· 1355 1358 1356 1359 blk_queue_split(q, &bio, q->bio_split); 1357 1360 1358 - if (!is_flush_fua && !blk_queue_nomerges(q) && 1359 - blk_attempt_plug_merge(q, bio, &request_count, NULL)) 1360 - return BLK_QC_T_NONE; 1361 + if (!is_flush_fua && !blk_queue_nomerges(q)) { 1362 + if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) 1363 + return BLK_QC_T_NONE; 1364 + } else 1365 + request_count = blk_plug_queued_count(q); 1361 1366 1362 1367 rq = blk_mq_map_request(q, bio, &data); 1363 1368 if (unlikely(!rq))
+1 -1
drivers/block/nbd.c
··· 941 941 debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize); 942 942 debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout); 943 943 debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize); 944 - debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops); 944 + debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops); 945 945 946 946 return 0; 947 947 }
+22 -13
drivers/block/xen-blkfront.c
··· 874 874 const struct blk_mq_queue_data *qd) 875 875 { 876 876 unsigned long flags; 877 - struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data; 877 + int qid = hctx->queue_num; 878 + struct blkfront_info *info = hctx->queue->queuedata; 879 + struct blkfront_ring_info *rinfo = NULL; 878 880 881 + BUG_ON(info->nr_rings <= qid); 882 + rinfo = &info->rinfo[qid]; 879 883 blk_mq_start_request(qd->rq); 880 884 spin_lock_irqsave(&rinfo->ring_lock, flags); 881 885 if (RING_FULL(&rinfo->ring)) ··· 905 901 return BLK_MQ_RQ_QUEUE_BUSY; 906 902 } 907 903 908 - static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 909 - unsigned int index) 910 - { 911 - struct blkfront_info *info = (struct blkfront_info *)data; 912 - 913 - BUG_ON(info->nr_rings <= index); 914 - hctx->driver_data = &info->rinfo[index]; 915 - return 0; 916 - } 917 - 918 904 static struct blk_mq_ops blkfront_mq_ops = { 919 905 .queue_rq = blkif_queue_rq, 920 906 .map_queue = blk_mq_map_queue, 921 - .init_hctx = blk_mq_init_hctx, 922 907 }; 923 908 924 909 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, ··· 943 950 return PTR_ERR(rq); 944 951 } 945 952 953 + rq->queuedata = info; 946 954 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); 947 955 948 956 if (info->feature_discard) { ··· 2143 2149 return err; 2144 2150 2145 2151 err = talk_to_blkback(dev, info); 2152 + if (!err) 2153 + blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); 2146 2154 2147 2155 /* 2148 2156 * We have to wait for the backend to switch to ··· 2481 2485 break; 2482 2486 2483 2487 case XenbusStateConnected: 2484 - if (dev->state != XenbusStateInitialised) { 2488 + /* 2489 + * talk_to_blkback sets state to XenbusStateInitialised 2490 + * and blkfront_connect sets it to XenbusStateConnected 2491 + * (if connection went OK). 2492 + * 2493 + * If the backend (or toolstack) decides to poke at backend 2494 + * state (and re-trigger the watch by setting the state repeatedly 2495 + * to XenbusStateConnected (4)) we need to deal with this. 2496 + * This is allowed as this is used to communicate to the guest 2497 + * that the size of disk has changed! 2498 + */ 2499 + if ((dev->state != XenbusStateInitialised) && 2500 + (dev->state != XenbusStateConnected)) { 2485 2501 if (talk_to_blkback(dev, info)) 2486 2502 break; 2487 2503 } 2504 + 2488 2505 blkfront_connect(info); 2489 2506 break; 2490 2507
+7 -2
drivers/nvme/host/pci.c
··· 1679 1679 1680 1680 static void nvme_dev_unmap(struct nvme_dev *dev) 1681 1681 { 1682 + struct pci_dev *pdev = to_pci_dev(dev->dev); 1683 + int bars; 1684 + 1682 1685 if (dev->bar) 1683 1686 iounmap(dev->bar); 1684 - pci_release_regions(to_pci_dev(dev->dev)); 1687 + 1688 + bars = pci_select_bars(pdev, IORESOURCE_MEM); 1689 + pci_release_selected_regions(pdev, bars); 1685 1690 } 1686 1691 1687 1692 static void nvme_pci_disable(struct nvme_dev *dev) ··· 1929 1924 1930 1925 return 0; 1931 1926 release: 1932 - pci_release_regions(pdev); 1927 + pci_release_selected_regions(pdev, bars); 1933 1928 return -ENODEV; 1934 1929 } 1935 1930
+12 -9
mm/page-writeback.c
··· 373 373 struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc); 374 374 unsigned long bytes = vm_dirty_bytes; 375 375 unsigned long bg_bytes = dirty_background_bytes; 376 - unsigned long ratio = vm_dirty_ratio; 377 - unsigned long bg_ratio = dirty_background_ratio; 376 + /* convert ratios to per-PAGE_SIZE for higher precision */ 377 + unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100; 378 + unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100; 378 379 unsigned long thresh; 379 380 unsigned long bg_thresh; 380 381 struct task_struct *tsk; ··· 387 386 /* 388 387 * The byte settings can't be applied directly to memcg 389 388 * domains. Convert them to ratios by scaling against 390 - * globally available memory. 389 + * globally available memory. As the ratios are in 390 + * per-PAGE_SIZE, they can be obtained by dividing bytes by 391 + * number of pages. 391 392 */ 392 393 if (bytes) 393 - ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 / 394 - global_avail, 100UL); 394 + ratio = min(DIV_ROUND_UP(bytes, global_avail), 395 + PAGE_SIZE); 395 396 if (bg_bytes) 396 - bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 / 397 - global_avail, 100UL); 397 + bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail), 398 + PAGE_SIZE); 398 399 bytes = bg_bytes = 0; 399 400 } 400 401 401 402 if (bytes) 402 403 thresh = DIV_ROUND_UP(bytes, PAGE_SIZE); 403 404 else 404 - thresh = (ratio * available_memory) / 100; 405 + thresh = (ratio * available_memory) / PAGE_SIZE; 405 406 406 407 if (bg_bytes) 407 408 bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE); 408 409 else 409 - bg_thresh = (bg_ratio * available_memory) / 100; 410 + bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE; 410 411 411 412 if (bg_thresh >= thresh) 412 413 bg_thresh = thresh / 2;