Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'block-5.10-2020-12-12' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
"This should be it for 5.10.

Mike and Song looked into the warning case, and thankfully it appears
the fix was pretty trivial - we can just change the md device chunk
type to unsigned int to get rid of it. They cannot currently be < 0,
and nobody is checking for that either.

We're reverting the discard changes as the corruption reports came in
very late, and there's just no time to attempt to deal with it at this
point. Reverting the changes in question is the right call for 5.10"

* tag 'block-5.10-2020-12-12' of git://git.kernel.dk/linux-block:
md: change mddev 'chunk_sectors' from int to unsigned
Revert "md: add md_submit_discard_bio() for submitting discard bio"
Revert "md/raid10: extend r10bio devs to raid disks"
Revert "md/raid10: pull codes that wait for blocked dev into one function"
Revert "md/raid10: improve raid10 discard request"
Revert "md/raid10: improve discard request for far layout"
Revert "dm raid: remove unnecessary discard limits for raid10"

+82 -393
+11
drivers/md/dm-raid.c
··· 3728 3728 3729 3729 blk_limits_io_min(limits, chunk_size_bytes); 3730 3730 blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs)); 3731 + 3732 + /* 3733 + * RAID10 personality requires bio splitting, 3734 + * RAID0/1/4/5/6 don't and process large discard bios properly. 3735 + */ 3736 + if (rs_is_raid10(rs)) { 3737 + limits->discard_granularity = max(chunk_size_bytes, 3738 + limits->discard_granularity); 3739 + limits->max_discard_sectors = min_not_zero(rs->md.chunk_sectors, 3740 + limits->max_discard_sectors); 3741 + } 3731 3742 } 3732 3743 3733 3744 static void raid_postsuspend(struct dm_target *ti)
-20
drivers/md/md.c
··· 8582 8582 8583 8583 EXPORT_SYMBOL(md_write_end); 8584 8584 8585 - /* This is used by raid0 and raid10 */ 8586 - void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, 8587 - struct bio *bio, sector_t start, sector_t size) 8588 - { 8589 - struct bio *discard_bio = NULL; 8590 - 8591 - if (__blkdev_issue_discard(rdev->bdev, start, size, 8592 - GFP_NOIO, 0, &discard_bio) || !discard_bio) 8593 - return; 8594 - 8595 - bio_chain(discard_bio, bio); 8596 - bio_clone_blkg_association(discard_bio, bio); 8597 - if (mddev->gendisk) 8598 - trace_block_bio_remap(bdev_get_queue(rdev->bdev), 8599 - discard_bio, disk_devt(mddev->gendisk), 8600 - bio->bi_iter.bi_sector); 8601 - submit_bio_noacct(discard_bio); 8602 - } 8603 - EXPORT_SYMBOL(md_submit_discard_bio); 8604 - 8605 8585 /* md_allow_write(mddev) 8606 8586 * Calling this ensures that the array is marked 'active' so that writes 8607 8587 * may proceed without blocking. It is important to call this before
+2 -4
drivers/md/md.h
··· 311 311 int external; /* metadata is 312 312 * managed externally */ 313 313 char metadata_type[17]; /* externally set*/ 314 - int chunk_sectors; 314 + unsigned int chunk_sectors; 315 315 time64_t ctime, utime; 316 316 int level, layout; 317 317 char clevel[16]; ··· 339 339 */ 340 340 sector_t reshape_position; 341 341 int delta_disks, new_level, new_layout; 342 - int new_chunk_sectors; 342 + unsigned int new_chunk_sectors; 343 343 int reshape_backwards; 344 344 345 345 struct md_thread *thread; /* management thread */ ··· 713 713 extern void md_done_sync(struct mddev *mddev, int blocks, int ok); 714 714 extern void md_error(struct mddev *mddev, struct md_rdev *rdev); 715 715 extern void md_finish_reshape(struct mddev *mddev); 716 - extern void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, 717 - struct bio *bio, sector_t start, sector_t size); 718 716 719 717 extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio); 720 718 extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
+12 -2
drivers/md/raid0.c
··· 477 477 478 478 for (disk = 0; disk < zone->nb_dev; disk++) { 479 479 sector_t dev_start, dev_end; 480 + struct bio *discard_bio = NULL; 480 481 struct md_rdev *rdev; 481 482 482 483 if (disk < start_disk_index) ··· 500 499 501 500 rdev = conf->devlist[(zone - conf->strip_zone) * 502 501 conf->strip_zone[0].nb_dev + disk]; 503 - md_submit_discard_bio(mddev, rdev, bio, 502 + if (__blkdev_issue_discard(rdev->bdev, 504 503 dev_start + zone->dev_start + rdev->data_offset, 505 - dev_end - dev_start); 504 + dev_end - dev_start, GFP_NOIO, 0, &discard_bio) || 505 + !discard_bio) 506 + continue; 507 + bio_chain(discard_bio, bio); 508 + bio_clone_blkg_association(discard_bio, bio); 509 + if (mddev->gendisk) 510 + trace_block_bio_remap(bdev_get_queue(rdev->bdev), 511 + discard_bio, disk_devt(mddev->gendisk), 512 + bio->bi_iter.bi_sector); 513 + submit_bio_noacct(discard_bio); 506 514 } 507 515 bio_endio(bio); 508 516 }
+57 -366
drivers/md/raid10.c
··· 91 91 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) 92 92 { 93 93 struct r10conf *conf = data; 94 - int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]); 94 + int size = offsetof(struct r10bio, devs[conf->copies]); 95 95 96 96 /* allocate a r10bio with room for raid_disks entries in the 97 97 * bios array */ ··· 238 238 { 239 239 int i; 240 240 241 - for (i = 0; i < conf->geo.raid_disks; i++) { 241 + for (i = 0; i < conf->copies; i++) { 242 242 struct bio **bio = & r10_bio->devs[i].bio; 243 243 if (!BIO_SPECIAL(*bio)) 244 244 bio_put(*bio); ··· 327 327 int slot; 328 328 int repl = 0; 329 329 330 - for (slot = 0; slot < conf->geo.raid_disks; slot++) { 330 + for (slot = 0; slot < conf->copies; slot++) { 331 331 if (r10_bio->devs[slot].bio == bio) 332 332 break; 333 333 if (r10_bio->devs[slot].repl_bio == bio) { ··· 336 336 } 337 337 } 338 338 339 + BUG_ON(slot == conf->copies); 339 340 update_head_pos(slot, r10_bio); 340 341 341 342 if (slotp) ··· 1276 1275 } 1277 1276 } 1278 1277 1279 - static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio) 1280 - { 1281 - int i; 1282 - struct r10conf *conf = mddev->private; 1283 - struct md_rdev *blocked_rdev; 1284 - 1285 - retry_wait: 1286 - blocked_rdev = NULL; 1287 - rcu_read_lock(); 1288 - for (i = 0; i < conf->copies; i++) { 1289 - struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 1290 - struct md_rdev *rrdev = rcu_dereference( 1291 - conf->mirrors[i].replacement); 1292 - if (rdev == rrdev) 1293 - rrdev = NULL; 1294 - if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 1295 - atomic_inc(&rdev->nr_pending); 1296 - blocked_rdev = rdev; 1297 - break; 1298 - } 1299 - if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) { 1300 - atomic_inc(&rrdev->nr_pending); 1301 - blocked_rdev = rrdev; 1302 - break; 1303 - } 1304 - 1305 - if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) { 1306 - sector_t first_bad; 1307 - sector_t dev_sector = r10_bio->devs[i].addr; 1308 - int bad_sectors; 1309 - int is_bad; 1310 - 1311 - /* Discard request doesn't care the write result 1312 - * so it doesn't need to wait blocked disk here. 1313 - */ 1314 - if (!r10_bio->sectors) 1315 - continue; 1316 - 1317 - is_bad = is_badblock(rdev, dev_sector, r10_bio->sectors, 1318 - &first_bad, &bad_sectors); 1319 - if (is_bad < 0) { 1320 - /* Mustn't write here until the bad block 1321 - * is acknowledged 1322 - */ 1323 - atomic_inc(&rdev->nr_pending); 1324 - set_bit(BlockedBadBlocks, &rdev->flags); 1325 - blocked_rdev = rdev; 1326 - break; 1327 - } 1328 - } 1329 - } 1330 - rcu_read_unlock(); 1331 - 1332 - if (unlikely(blocked_rdev)) { 1333 - /* Have to wait for this device to get unblocked, then retry */ 1334 - allow_barrier(conf); 1335 - raid10_log(conf->mddev, "%s wait rdev %d blocked", 1336 - __func__, blocked_rdev->raid_disk); 1337 - md_wait_for_blocked_rdev(blocked_rdev, mddev); 1338 - wait_barrier(conf); 1339 - goto retry_wait; 1340 - } 1341 - } 1342 - 1343 1278 static void raid10_write_request(struct mddev *mddev, struct bio *bio, 1344 1279 struct r10bio *r10_bio) 1345 1280 { 1346 1281 struct r10conf *conf = mddev->private; 1347 1282 int i; 1283 + struct md_rdev *blocked_rdev; 1348 1284 sector_t sectors; 1349 1285 int max_sectors; 1350 1286 ··· 1339 1401 1340 1402 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */ 1341 1403 raid10_find_phys(conf, r10_bio); 1342 - 1343 - wait_blocked_dev(mddev, r10_bio); 1344 - 1404 + retry_write: 1405 + blocked_rdev = NULL; 1345 1406 rcu_read_lock(); 1346 1407 max_sectors = r10_bio->sectors; 1347 1408 ··· 1351 1414 conf->mirrors[d].replacement); 1352 1415 if (rdev == rrdev) 1353 1416 rrdev = NULL; 1417 + if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 1418 + atomic_inc(&rdev->nr_pending); 1419 + blocked_rdev = rdev; 1420 + break; 1421 + } 1422 + if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) { 1423 + atomic_inc(&rrdev->nr_pending); 1424 + blocked_rdev = rrdev; 1425 + break; 1426 + } 1354 1427 if (rdev && (test_bit(Faulty, &rdev->flags))) 1355 1428 rdev = NULL; 1356 1429 if (rrdev && (test_bit(Faulty, &rrdev->flags))) ··· 1381 1434 1382 1435 is_bad = is_badblock(rdev, dev_sector, max_sectors, 1383 1436 &first_bad, &bad_sectors); 1437 + if (is_bad < 0) { 1438 + /* Mustn't write here until the bad block 1439 + * is acknowledged 1440 + */ 1441 + atomic_inc(&rdev->nr_pending); 1442 + set_bit(BlockedBadBlocks, &rdev->flags); 1443 + blocked_rdev = rdev; 1444 + break; 1445 + } 1384 1446 if (is_bad && first_bad <= dev_sector) { 1385 1447 /* Cannot write here at all */ 1386 1448 bad_sectors -= (dev_sector - first_bad); ··· 1424 1468 } 1425 1469 } 1426 1470 rcu_read_unlock(); 1471 + 1472 + if (unlikely(blocked_rdev)) { 1473 + /* Have to wait for this device to get unblocked, then retry */ 1474 + int j; 1475 + int d; 1476 + 1477 + for (j = 0; j < i; j++) { 1478 + if (r10_bio->devs[j].bio) { 1479 + d = r10_bio->devs[j].devnum; 1480 + rdev_dec_pending(conf->mirrors[d].rdev, mddev); 1481 + } 1482 + if (r10_bio->devs[j].repl_bio) { 1483 + struct md_rdev *rdev; 1484 + d = r10_bio->devs[j].devnum; 1485 + rdev = conf->mirrors[d].replacement; 1486 + if (!rdev) { 1487 + /* Race with remove_disk */ 1488 + smp_mb(); 1489 + rdev = conf->mirrors[d].rdev; 1490 + } 1491 + rdev_dec_pending(rdev, mddev); 1492 + } 1493 + } 1494 + allow_barrier(conf); 1495 + raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); 1496 + md_wait_for_blocked_rdev(blocked_rdev, mddev); 1497 + wait_barrier(conf); 1498 + goto retry_write; 1499 + } 1427 1500 1428 1501 if (max_sectors < r10_bio->sectors) 1429 1502 r10_bio->sectors = max_sectors; ··· 1493 1508 r10_bio->mddev = mddev; 1494 1509 r10_bio->sector = bio->bi_iter.bi_sector; 1495 1510 r10_bio->state = 0; 1496 - memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->geo.raid_disks); 1511 + memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies); 1497 1512 1498 1513 if (bio_data_dir(bio) == READ) 1499 1514 raid10_read_request(mddev, bio, r10_bio); 1500 1515 else 1501 1516 raid10_write_request(mddev, bio, r10_bio); 1502 - } 1503 - 1504 - static struct bio *raid10_split_bio(struct r10conf *conf, 1505 - struct bio *bio, sector_t sectors, bool want_first) 1506 - { 1507 - struct bio *split; 1508 - 1509 - split = bio_split(bio, sectors, GFP_NOIO, &conf->bio_split); 1510 - bio_chain(split, bio); 1511 - allow_barrier(conf); 1512 - if (want_first) { 1513 - submit_bio_noacct(bio); 1514 - bio = split; 1515 - } else 1516 - submit_bio_noacct(split); 1517 - wait_barrier(conf); 1518 - 1519 - return bio; 1520 - } 1521 - 1522 - static void raid_end_discard_bio(struct r10bio *r10bio) 1523 - { 1524 - struct r10conf *conf = r10bio->mddev->private; 1525 - struct r10bio *first_r10bio; 1526 - 1527 - while (atomic_dec_and_test(&r10bio->remaining)) { 1528 - 1529 - allow_barrier(conf); 1530 - 1531 - if (!test_bit(R10BIO_Discard, &r10bio->state)) { 1532 - first_r10bio = (struct r10bio *)r10bio->master_bio; 1533 - free_r10bio(r10bio); 1534 - r10bio = first_r10bio; 1535 - } else { 1536 - md_write_end(r10bio->mddev); 1537 - bio_endio(r10bio->master_bio); 1538 - free_r10bio(r10bio); 1539 - break; 1540 - } 1541 - } 1542 - } 1543 - 1544 - static void raid10_end_discard_request(struct bio *bio) 1545 - { 1546 - struct r10bio *r10_bio = bio->bi_private; 1547 - struct r10conf *conf = r10_bio->mddev->private; 1548 - struct md_rdev *rdev = NULL; 1549 - int dev; 1550 - int slot, repl; 1551 - 1552 - /* 1553 - * We don't care the return value of discard bio 1554 - */ 1555 - if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) 1556 - set_bit(R10BIO_Uptodate, &r10_bio->state); 1557 - 1558 - dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 1559 - if (repl) 1560 - rdev = conf->mirrors[dev].replacement; 1561 - if (!rdev) { 1562 - /* raid10_remove_disk uses smp_mb to make sure rdev is set to 1563 - * replacement before setting replacement to NULL. It can read 1564 - * rdev first without barrier protect even replacment is NULL 1565 - */ 1566 - smp_rmb(); 1567 - rdev = conf->mirrors[dev].rdev; 1568 - } 1569 - 1570 - raid_end_discard_bio(r10_bio); 1571 - rdev_dec_pending(rdev, conf->mddev); 1572 - } 1573 - 1574 - /* There are some limitations to handle discard bio 1575 - * 1st, the discard size is bigger than stripe_size*2. 1576 - * 2st, if the discard bio spans reshape progress, we use the old way to 1577 - * handle discard bio 1578 - */ 1579 - static int raid10_handle_discard(struct mddev *mddev, struct bio *bio) 1580 - { 1581 - struct r10conf *conf = mddev->private; 1582 - struct geom *geo = &conf->geo; 1583 - struct r10bio *r10_bio, *first_r10bio; 1584 - int far_copies = geo->far_copies; 1585 - bool first_copy = true; 1586 - 1587 - int disk; 1588 - sector_t chunk; 1589 - unsigned int stripe_size; 1590 - sector_t split_size; 1591 - 1592 - sector_t bio_start, bio_end; 1593 - sector_t first_stripe_index, last_stripe_index; 1594 - sector_t start_disk_offset; 1595 - unsigned int start_disk_index; 1596 - sector_t end_disk_offset; 1597 - unsigned int end_disk_index; 1598 - unsigned int remainder; 1599 - 1600 - if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 1601 - return -EAGAIN; 1602 - 1603 - wait_barrier(conf); 1604 - 1605 - /* Check reshape again to avoid reshape happens after checking 1606 - * MD_RECOVERY_RESHAPE and before wait_barrier 1607 - */ 1608 - if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 1609 - goto out; 1610 - 1611 - stripe_size = geo->raid_disks << geo->chunk_shift; 1612 - bio_start = bio->bi_iter.bi_sector; 1613 - bio_end = bio_end_sector(bio); 1614 - 1615 - /* Maybe one discard bio is smaller than strip size or across one stripe 1616 - * and discard region is larger than one stripe size. For far offset layout, 1617 - * if the discard region is not aligned with stripe size, there is hole 1618 - * when we submit discard bio to member disk. For simplicity, we only 1619 - * handle discard bio which discard region is bigger than stripe_size*2 1620 - */ 1621 - if (bio_sectors(bio) < stripe_size*2) 1622 - goto out; 1623 - 1624 - /* For far and far offset layout, if bio is not aligned with stripe size, 1625 - * it splits the part that is not aligned with strip size. 1626 - */ 1627 - div_u64_rem(bio_start, stripe_size, &remainder); 1628 - if ((far_copies > 1) && remainder) { 1629 - split_size = stripe_size - remainder; 1630 - bio = raid10_split_bio(conf, bio, split_size, false); 1631 - } 1632 - div_u64_rem(bio_end, stripe_size, &remainder); 1633 - if ((far_copies > 1) && remainder) { 1634 - split_size = bio_sectors(bio) - remainder; 1635 - bio = raid10_split_bio(conf, bio, split_size, true); 1636 - } 1637 - 1638 - bio_start = bio->bi_iter.bi_sector; 1639 - bio_end = bio_end_sector(bio); 1640 - 1641 - /* raid10 uses chunk as the unit to store data. It's similar like raid0. 1642 - * One stripe contains the chunks from all member disk (one chunk from 1643 - * one disk at the same HBA address). For layout detail, see 'man md 4' 1644 - */ 1645 - chunk = bio_start >> geo->chunk_shift; 1646 - chunk *= geo->near_copies; 1647 - first_stripe_index = chunk; 1648 - start_disk_index = sector_div(first_stripe_index, geo->raid_disks); 1649 - if (geo->far_offset) 1650 - first_stripe_index *= geo->far_copies; 1651 - start_disk_offset = (bio_start & geo->chunk_mask) + 1652 - (first_stripe_index << geo->chunk_shift); 1653 - 1654 - chunk = bio_end >> geo->chunk_shift; 1655 - chunk *= geo->near_copies; 1656 - last_stripe_index = chunk; 1657 - end_disk_index = sector_div(last_stripe_index, geo->raid_disks); 1658 - if (geo->far_offset) 1659 - last_stripe_index *= geo->far_copies; 1660 - end_disk_offset = (bio_end & geo->chunk_mask) + 1661 - (last_stripe_index << geo->chunk_shift); 1662 - 1663 - retry_discard: 1664 - r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO); 1665 - r10_bio->mddev = mddev; 1666 - r10_bio->state = 0; 1667 - r10_bio->sectors = 0; 1668 - memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks); 1669 - wait_blocked_dev(mddev, r10_bio); 1670 - 1671 - /* For far layout it needs more than one r10bio to cover all regions. 1672 - * Inspired by raid10_sync_request, we can use the first r10bio->master_bio 1673 - * to record the discard bio. Other r10bio->master_bio record the first 1674 - * r10bio. The first r10bio only release after all other r10bios finish. 1675 - * The discard bio returns only first r10bio finishes 1676 - */ 1677 - if (first_copy) { 1678 - r10_bio->master_bio = bio; 1679 - set_bit(R10BIO_Discard, &r10_bio->state); 1680 - first_copy = false; 1681 - first_r10bio = r10_bio; 1682 - } else 1683 - r10_bio->master_bio = (struct bio *)first_r10bio; 1684 - 1685 - rcu_read_lock(); 1686 - for (disk = 0; disk < geo->raid_disks; disk++) { 1687 - struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev); 1688 - struct md_rdev *rrdev = rcu_dereference( 1689 - conf->mirrors[disk].replacement); 1690 - 1691 - r10_bio->devs[disk].bio = NULL; 1692 - r10_bio->devs[disk].repl_bio = NULL; 1693 - 1694 - if (rdev && (test_bit(Faulty, &rdev->flags))) 1695 - rdev = NULL; 1696 - if (rrdev && (test_bit(Faulty, &rrdev->flags))) 1697 - rrdev = NULL; 1698 - if (!rdev && !rrdev) 1699 - continue; 1700 - 1701 - if (rdev) { 1702 - r10_bio->devs[disk].bio = bio; 1703 - atomic_inc(&rdev->nr_pending); 1704 - } 1705 - if (rrdev) { 1706 - r10_bio->devs[disk].repl_bio = bio; 1707 - atomic_inc(&rrdev->nr_pending); 1708 - } 1709 - } 1710 - rcu_read_unlock(); 1711 - 1712 - atomic_set(&r10_bio->remaining, 1); 1713 - for (disk = 0; disk < geo->raid_disks; disk++) { 1714 - sector_t dev_start, dev_end; 1715 - struct bio *mbio, *rbio = NULL; 1716 - struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev); 1717 - struct md_rdev *rrdev = rcu_dereference( 1718 - conf->mirrors[disk].replacement); 1719 - 1720 - /* 1721 - * Now start to calculate the start and end address for each disk. 1722 - * The space between dev_start and dev_end is the discard region. 1723 - * 1724 - * For dev_start, it needs to consider three conditions: 1725 - * 1st, the disk is before start_disk, you can imagine the disk in 1726 - * the next stripe. So the dev_start is the start address of next 1727 - * stripe. 1728 - * 2st, the disk is after start_disk, it means the disk is at the 1729 - * same stripe of first disk 1730 - * 3st, the first disk itself, we can use start_disk_offset directly 1731 - */ 1732 - if (disk < start_disk_index) 1733 - dev_start = (first_stripe_index + 1) * mddev->chunk_sectors; 1734 - else if (disk > start_disk_index) 1735 - dev_start = first_stripe_index * mddev->chunk_sectors; 1736 - else 1737 - dev_start = start_disk_offset; 1738 - 1739 - if (disk < end_disk_index) 1740 - dev_end = (last_stripe_index + 1) * mddev->chunk_sectors; 1741 - else if (disk > end_disk_index) 1742 - dev_end = last_stripe_index * mddev->chunk_sectors; 1743 - else 1744 - dev_end = end_disk_offset; 1745 - 1746 - /* It only handles discard bio which size is >= stripe size, so 1747 - * dev_end > dev_start all the time 1748 - */ 1749 - if (r10_bio->devs[disk].bio) { 1750 - mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); 1751 - mbio->bi_end_io = raid10_end_discard_request; 1752 - mbio->bi_private = r10_bio; 1753 - r10_bio->devs[disk].bio = mbio; 1754 - r10_bio->devs[disk].devnum = disk; 1755 - atomic_inc(&r10_bio->remaining); 1756 - md_submit_discard_bio(mddev, rdev, mbio, 1757 - dev_start + choose_data_offset(r10_bio, rdev), 1758 - dev_end - dev_start); 1759 - bio_endio(mbio); 1760 - } 1761 - if (r10_bio->devs[disk].repl_bio) { 1762 - rbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); 1763 - rbio->bi_end_io = raid10_end_discard_request; 1764 - rbio->bi_private = r10_bio; 1765 - r10_bio->devs[disk].repl_bio = rbio; 1766 - r10_bio->devs[disk].devnum = disk; 1767 - atomic_inc(&r10_bio->remaining); 1768 - md_submit_discard_bio(mddev, rrdev, rbio, 1769 - dev_start + choose_data_offset(r10_bio, rrdev), 1770 - dev_end - dev_start); 1771 - bio_endio(rbio); 1772 - } 1773 - } 1774 - 1775 - if (!geo->far_offset && --far_copies) { 1776 - first_stripe_index += geo->stride >> geo->chunk_shift; 1777 - start_disk_offset += geo->stride; 1778 - last_stripe_index += geo->stride >> geo->chunk_shift; 1779 - end_disk_offset += geo->stride; 1780 - atomic_inc(&first_r10bio->remaining); 1781 - raid_end_discard_bio(r10_bio); 1782 - wait_barrier(conf); 1783 - goto retry_discard; 1784 - } 1785 - 1786 - raid_end_discard_bio(r10_bio); 1787 - 1788 - return 0; 1789 - out: 1790 - allow_barrier(conf); 1791 - return -EAGAIN; 1792 1517 } 1793 1518 1794 1519 static bool raid10_make_request(struct mddev *mddev, struct bio *bio) ··· 1514 1819 1515 1820 if (!md_write_start(mddev, bio)) 1516 1821 return false; 1517 - 1518 - if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) 1519 - if (!raid10_handle_discard(mddev, bio)) 1520 - return true; 1521 1822 1522 1823 /* 1523 1824 * If this request crosses a chunk boundary, we need to split ··· 3754 4063 3755 4064 if (mddev->queue) { 3756 4065 blk_queue_max_discard_sectors(mddev->queue, 3757 - UINT_MAX); 4066 + mddev->chunk_sectors); 3758 4067 blk_queue_max_write_same_sectors(mddev->queue, 0); 3759 4068 blk_queue_max_write_zeroes_sectors(mddev->queue, 0); 3760 4069 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
-1
drivers/md/raid10.h
··· 179 179 R10BIO_Previous, 180 180 /* failfast devices did receive failfast requests. */ 181 181 R10BIO_FailFast, 182 - R10BIO_Discard, 183 182 }; 184 183 #endif