Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'block-7.1-20260424' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux

Pull block fixes from Jens Axboe:

- Series for zloop, fixing a variety of issues

- t10-pi code cleanup

- Fix for a merge window regression with the bio memory allocation mask

- Fix for a merge window regression in ublk, caused by an issue with
the maple tree iteration code at teardown

- ublk self tests additions

- Zoned device pgmap fixes

- Various little cleanups and fixes

* tag 'block-7.1-20260424' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux: (21 commits)
Revert "floppy: fix reference leak on platform_device_register() failure"
ublk: avoid unpinning pages under maple tree spinlock
ublk: refactor common helper ublk_shmem_remove_ranges()
ublk: fix maple tree lockdep warning in ublk_buf_cleanup
selftests: ublk: add ublk auto integrity test
selftests: ublk: enable test_integrity_02.sh on fio 3.42
selftests: ublk: remove unused argument to _cleanup
block: only restrict bio allocation gfp mask asked to block
block/blk-throttle: Add WQ_PERCPU to alloc_workqueue users
block: Add WQ_PERCPU to alloc_workqueue users
block: relax pgmap check in bio_add_page for compatible zone device pages
block: add pgmap check to biovec_phys_mergeable
floppy: fix reference leak on platform_device_register() failure
ublk: use unchecked copy helpers for bio page data
t10-pi: reduce ref tag code duplication
zloop: remove irq-safe locking
zloop: factor out zloop_mark_{full,empty} helpers
zloop: set RQF_QUIET when completing requests on deleted devices
zloop: improve the unaligned write pointer warning
zloop: use vfs_truncate
...

+347 -187
+1 -1
block/bio-integrity-auto.c
··· 125 125 * Make it highpri CPU intensive wq with max concurrency of 1. 126 126 */ 127 127 kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM | 128 - WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1); 128 + WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_PERCPU, 1); 129 129 if (!kintegrityd_wq) 130 130 panic("Failed to create kintegrityd\n"); 131 131 return 0;
+3 -3
block/bio-integrity.c
··· 231 231 if (bip->bip_vcnt > 0) { 232 232 struct bio_vec *bv = &bip->bip_vec[bip->bip_vcnt - 1]; 233 233 234 - if (!zone_device_pages_have_same_pgmap(bv->bv_page, page)) 234 + if (!zone_device_pages_compatible(bv->bv_page, page)) 235 235 return 0; 236 - 237 - if (bvec_try_merge_hw_page(q, bv, page, len, offset)) { 236 + if (zone_device_pages_have_same_pgmap(bv->bv_page, page) && 237 + bvec_try_merge_hw_page(q, bv, page, len, offset)) { 238 238 bip->bip_iter.bi_size += len; 239 239 return len; 240 240 }
+6 -5
block/bio.c
··· 544 544 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0)) 545 545 return NULL; 546 546 547 - gfp = try_alloc_gfp(gfp); 547 + if (saved_gfp & __GFP_DIRECT_RECLAIM) 548 + gfp = try_alloc_gfp(gfp); 548 549 if (bs->cache && nr_vecs <= BIO_INLINE_VECS) { 549 550 /* 550 551 * Set REQ_ALLOC_CACHE even if no cached bio is available to ··· 1049 1048 if (bio->bi_vcnt > 0) { 1050 1049 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; 1051 1050 1052 - if (!zone_device_pages_have_same_pgmap(bv->bv_page, page)) 1051 + if (!zone_device_pages_compatible(bv->bv_page, page)) 1053 1052 return 0; 1054 - 1055 - if (bvec_try_merge_page(bv, page, len, offset)) { 1053 + if (zone_device_pages_have_same_pgmap(bv->bv_page, page) && 1054 + bvec_try_merge_page(bv, page, len, offset)) { 1056 1055 bio->bi_iter.bi_size += len; 1057 1056 return len; 1058 1057 } ··· 1959 1958 1960 1959 if (flags & BIOSET_NEED_RESCUER) { 1961 1960 bs->rescue_workqueue = alloc_workqueue("bioset", 1962 - WQ_MEM_RECLAIM, 0); 1961 + WQ_MEM_RECLAIM | WQ_PERCPU, 0); 1963 1962 if (!bs->rescue_workqueue) 1964 1963 goto bad; 1965 1964 }
+1 -1
block/blk-core.c
··· 1282 1282 1283 1283 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 1284 1284 kblockd_workqueue = alloc_workqueue("kblockd", 1285 - WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 1285 + WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_PERCPU, 0); 1286 1286 if (!kblockd_workqueue) 1287 1287 panic("Failed to create kblockd\n"); 1288 1288
+1 -1
block/blk-throttle.c
··· 1839 1839 1840 1840 static int __init throtl_init(void) 1841 1841 { 1842 - kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0); 1842 + kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM | WQ_PERCPU, 0); 1843 1843 if (!kthrotld_workqueue) 1844 1844 panic("Failed to create kthrotld\n"); 1845 1845
+21
block/blk.h
··· 127 127 128 128 if (addr1 + vec1->bv_len != addr2) 129 129 return false; 130 + if (!zone_device_pages_have_same_pgmap(vec1->bv_page, vec2->bv_page)) 131 + return false; 130 132 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page)) 131 133 return false; 132 134 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask)) 133 135 return false; 136 + return true; 137 + } 138 + 139 + /* 140 + * Check if two pages from potentially different zone device pgmaps can 141 + * coexist as separate bvec entries in the same bio. 142 + * 143 + * The block DMA iterator (blk_dma_map_iter_start) caches the P2PDMA mapping 144 + * state from the first segment and applies it to all subsequent segments, so 145 + * P2PDMA pages from different pgmaps must not be mixed in the same bio. 146 + * 147 + * Other zone device types (FS_DAX, GENERIC) use the same dma_map_phys() path 148 + * as normal RAM. PRIVATE and COHERENT pages never appear in bios. 149 + */ 150 + static inline bool zone_device_pages_compatible(const struct page *a, 151 + const struct page *b) 152 + { 153 + if (is_pci_p2pdma_page(a) || is_pci_p2pdma_page(b)) 154 + return zone_device_pages_have_same_pgmap(a, b); 134 155 return true; 135 156 } 136 157
+80 -43
drivers/block/ublk_drv.c
··· 1319 1319 1320 1320 len = bv->bv_len - *offset; 1321 1321 bv_buf = kmap_local_page(bv->bv_page) + bv->bv_offset + *offset; 1322 + /* 1323 + * Bio pages may originate from slab caches without a usercopy region 1324 + * (e.g. jbd2 frozen metadata buffers). This is the same data that 1325 + * the loop driver writes to its backing file — no exposure risk. 1326 + * The bvec length is always trusted, so the size check in 1327 + * check_copy_size() is not needed either. Use the unchecked 1328 + * helpers to avoid false positives on slab pages. 1329 + */ 1322 1330 if (dir == ITER_DEST) 1323 - copied = copy_to_iter(bv_buf, len, uiter); 1331 + copied = _copy_to_iter(bv_buf, len, uiter); 1324 1332 else 1325 - copied = copy_from_iter(bv_buf, len, uiter); 1333 + copied = _copy_from_iter(bv_buf, len, uiter); 1326 1334 1327 1335 kunmap_local(bv_buf); 1328 1336 ··· 5421 5413 return ret; 5422 5414 } 5423 5415 5424 - static int __ublk_ctrl_unreg_buf(struct ublk_device *ub, int buf_index) 5416 + static void ublk_unpin_range_pages(unsigned long base_pfn, 5417 + unsigned long nr_pages) 5418 + { 5419 + #define UBLK_UNPIN_BATCH 32 5420 + struct page *pages[UBLK_UNPIN_BATCH]; 5421 + unsigned long off; 5422 + 5423 + for (off = 0; off < nr_pages; ) { 5424 + unsigned int batch = min_t(unsigned long, 5425 + nr_pages - off, UBLK_UNPIN_BATCH); 5426 + unsigned int j; 5427 + 5428 + for (j = 0; j < batch; j++) 5429 + pages[j] = pfn_to_page(base_pfn + off + j); 5430 + unpin_user_pages(pages, batch); 5431 + off += batch; 5432 + } 5433 + } 5434 + 5435 + /* 5436 + * Inner loop: erase up to UBLK_REMOVE_BATCH matching ranges under 5437 + * mas_lock, collecting them into an xarray. Then drop the lock and 5438 + * unpin pages + free ranges outside spinlock context. 5439 + * 5440 + * Returns true if the tree walk completed, false if more ranges remain. 5441 + * Xarray key is the base PFN, value encodes nr_pages via xa_mk_value(). 5442 + */ 5443 + #define UBLK_REMOVE_BATCH 64 5444 + 5445 + static bool __ublk_shmem_remove_ranges(struct ublk_device *ub, 5446 + int buf_index, int *ret) 5425 5447 { 5426 5448 MA_STATE(mas, &ub->buf_tree, 0, ULONG_MAX); 5427 5449 struct ublk_buf_range *range; 5428 - struct page *pages[32]; 5429 - int ret = -ENOENT; 5450 + struct xarray to_unpin; 5451 + unsigned long idx; 5452 + unsigned int count = 0; 5453 + bool done = false; 5454 + void *entry; 5455 + 5456 + xa_init(&to_unpin); 5430 5457 5431 5458 mas_lock(&mas); 5432 5459 mas_for_each(&mas, range, ULONG_MAX) { 5433 - unsigned long base, nr, off; 5460 + unsigned long nr; 5434 5461 5435 - if (range->buf_index != buf_index) 5462 + if (buf_index >= 0 && range->buf_index != buf_index) 5436 5463 continue; 5437 5464 5438 - ret = 0; 5439 - base = mas.index; 5440 - nr = mas.last - base + 1; 5465 + *ret = 0; 5466 + nr = mas.last - mas.index + 1; 5467 + if (xa_err(xa_store(&to_unpin, mas.index, 5468 + xa_mk_value(nr), GFP_ATOMIC))) 5469 + goto unlock; 5441 5470 mas_erase(&mas); 5442 - 5443 - for (off = 0; off < nr; ) { 5444 - unsigned int batch = min_t(unsigned long, 5445 - nr - off, 32); 5446 - unsigned int j; 5447 - 5448 - for (j = 0; j < batch; j++) 5449 - pages[j] = pfn_to_page(base + off + j); 5450 - unpin_user_pages(pages, batch); 5451 - off += batch; 5452 - } 5453 5471 kfree(range); 5472 + if (++count >= UBLK_REMOVE_BATCH) 5473 + goto unlock; 5454 5474 } 5475 + done = true; 5476 + unlock: 5455 5477 mas_unlock(&mas); 5456 5478 5479 + xa_for_each(&to_unpin, idx, entry) 5480 + ublk_unpin_range_pages(idx, xa_to_value(entry)); 5481 + xa_destroy(&to_unpin); 5482 + 5483 + return done; 5484 + } 5485 + 5486 + /* 5487 + * Remove ranges from the maple tree matching buf_index, unpin pages 5488 + * and free range structs. If buf_index < 0, remove all ranges. 5489 + * Processes ranges in batches to avoid holding the maple tree spinlock 5490 + * across potentially expensive page unpinning. 5491 + */ 5492 + static int ublk_shmem_remove_ranges(struct ublk_device *ub, int buf_index) 5493 + { 5494 + int ret = -ENOENT; 5495 + 5496 + while (!__ublk_shmem_remove_ranges(ub, buf_index, &ret)) 5497 + cond_resched(); 5457 5498 return ret; 5458 5499 } 5459 5500 ··· 5521 5464 5522 5465 memflags = ublk_lock_buf_tree(ub); 5523 5466 5524 - ret = __ublk_ctrl_unreg_buf(ub, index); 5467 + ret = ublk_shmem_remove_ranges(ub, index); 5525 5468 if (!ret) 5526 5469 ida_free(&ub->buf_ida, index); 5527 5470 ··· 5531 5474 5532 5475 static void ublk_buf_cleanup(struct ublk_device *ub) 5533 5476 { 5534 - MA_STATE(mas, &ub->buf_tree, 0, ULONG_MAX); 5535 - struct ublk_buf_range *range; 5536 - struct page *pages[32]; 5537 - 5538 - mas_for_each(&mas, range, ULONG_MAX) { 5539 - unsigned long base = mas.index; 5540 - unsigned long nr = mas.last - base + 1; 5541 - unsigned long off; 5542 - 5543 - for (off = 0; off < nr; ) { 5544 - unsigned int batch = min_t(unsigned long, 5545 - nr - off, 32); 5546 - unsigned int j; 5547 - 5548 - for (j = 0; j < batch; j++) 5549 - pages[j] = pfn_to_page(base + off + j); 5550 - unpin_user_pages(pages, batch); 5551 - off += batch; 5552 - } 5553 - kfree(range); 5554 - } 5477 + ublk_shmem_remove_ranges(ub, -1); 5555 5478 mtree_destroy(&ub->buf_tree); 5556 5479 ida_destroy(&ub->buf_ida); 5557 5480 }
+59 -64
drivers/block/zloop.c
··· 288 288 } 289 289 } 290 290 291 + static void zloop_mark_full(struct zloop_device *zlo, struct zloop_zone *zone) 292 + { 293 + lockdep_assert_held(&zone->wp_lock); 294 + 295 + zloop_lru_remove_open_zone(zlo, zone); 296 + zone->cond = BLK_ZONE_COND_FULL; 297 + zone->wp = ULLONG_MAX; 298 + } 299 + 300 + static void zloop_mark_empty(struct zloop_device *zlo, struct zloop_zone *zone) 301 + { 302 + lockdep_assert_held(&zone->wp_lock); 303 + 304 + zloop_lru_remove_open_zone(zlo, zone); 305 + zone->cond = BLK_ZONE_COND_EMPTY; 306 + zone->wp = zone->start; 307 + } 308 + 291 309 static int zloop_update_seq_zone(struct zloop_device *zlo, unsigned int zone_no) 292 310 { 293 311 struct zloop_zone *zone = &zlo->zones[zone_no]; 294 312 struct kstat stat; 295 313 sector_t file_sectors; 296 - unsigned long flags; 297 314 int ret; 298 315 299 316 lockdep_assert_held(&zone->lock); ··· 330 313 return -EINVAL; 331 314 } 332 315 333 - if (file_sectors & ((zlo->block_size >> SECTOR_SHIFT) - 1)) { 334 - pr_err("Zone %u file size not aligned to block size %u\n", 335 - zone_no, zlo->block_size); 316 + if (!IS_ALIGNED(stat.size, zlo->block_size)) { 317 + pr_err("Zone %u file size (%llu) not aligned to block size %u\n", 318 + zone_no, stat.size, zlo->block_size); 336 319 return -EINVAL; 337 320 } 338 321 339 - spin_lock_irqsave(&zone->wp_lock, flags); 322 + spin_lock(&zone->wp_lock); 340 323 if (!file_sectors) { 341 - zloop_lru_remove_open_zone(zlo, zone); 342 - zone->cond = BLK_ZONE_COND_EMPTY; 343 - zone->wp = zone->start; 324 + zloop_mark_empty(zlo, zone); 344 325 } else if (file_sectors == zlo->zone_capacity) { 345 - zloop_lru_remove_open_zone(zlo, zone); 346 - zone->cond = BLK_ZONE_COND_FULL; 347 - zone->wp = ULLONG_MAX; 326 + zloop_mark_full(zlo, zone); 348 327 } else { 349 328 if (zone->cond != BLK_ZONE_COND_IMP_OPEN && 350 329 zone->cond != BLK_ZONE_COND_EXP_OPEN) 351 330 zone->cond = BLK_ZONE_COND_CLOSED; 352 331 zone->wp = zone->start + file_sectors; 353 332 } 354 - spin_unlock_irqrestore(&zone->wp_lock, flags); 333 + spin_unlock(&zone->wp_lock); 355 334 356 335 return 0; 357 336 } ··· 380 367 static int zloop_close_zone(struct zloop_device *zlo, unsigned int zone_no) 381 368 { 382 369 struct zloop_zone *zone = &zlo->zones[zone_no]; 383 - unsigned long flags; 384 370 int ret = 0; 385 371 386 372 if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) ··· 398 386 break; 399 387 case BLK_ZONE_COND_IMP_OPEN: 400 388 case BLK_ZONE_COND_EXP_OPEN: 401 - spin_lock_irqsave(&zone->wp_lock, flags); 389 + spin_lock(&zone->wp_lock); 402 390 zloop_lru_remove_open_zone(zlo, zone); 403 391 if (zone->wp == zone->start) 404 392 zone->cond = BLK_ZONE_COND_EMPTY; 405 393 else 406 394 zone->cond = BLK_ZONE_COND_CLOSED; 407 - spin_unlock_irqrestore(&zone->wp_lock, flags); 395 + spin_unlock(&zone->wp_lock); 408 396 break; 409 397 case BLK_ZONE_COND_EMPTY: 410 398 case BLK_ZONE_COND_FULL: ··· 422 410 static int zloop_reset_zone(struct zloop_device *zlo, unsigned int zone_no) 423 411 { 424 412 struct zloop_zone *zone = &zlo->zones[zone_no]; 425 - unsigned long flags; 426 413 int ret = 0; 427 414 428 415 if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) ··· 439 428 goto unlock; 440 429 } 441 430 442 - spin_lock_irqsave(&zone->wp_lock, flags); 443 - zloop_lru_remove_open_zone(zlo, zone); 444 - zone->cond = BLK_ZONE_COND_EMPTY; 445 - zone->wp = zone->start; 431 + spin_lock(&zone->wp_lock); 432 + zloop_mark_empty(zlo, zone); 446 433 clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags); 447 - spin_unlock_irqrestore(&zone->wp_lock, flags); 434 + spin_unlock(&zone->wp_lock); 448 435 449 436 unlock: 450 437 mutex_unlock(&zone->lock); ··· 467 458 static int zloop_finish_zone(struct zloop_device *zlo, unsigned int zone_no) 468 459 { 469 460 struct zloop_zone *zone = &zlo->zones[zone_no]; 470 - unsigned long flags; 471 461 int ret = 0; 472 462 473 463 if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) ··· 484 476 goto unlock; 485 477 } 486 478 487 - spin_lock_irqsave(&zone->wp_lock, flags); 488 - zloop_lru_remove_open_zone(zlo, zone); 489 - zone->cond = BLK_ZONE_COND_FULL; 490 - zone->wp = ULLONG_MAX; 479 + spin_lock(&zone->wp_lock); 480 + zloop_mark_full(zlo, zone); 491 481 clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags); 492 - spin_unlock_irqrestore(&zone->wp_lock, flags); 482 + spin_unlock(&zone->wp_lock); 493 483 494 484 unlock: 495 485 mutex_unlock(&zone->lock); ··· 577 571 bool is_append = req_op(rq) == REQ_OP_ZONE_APPEND; 578 572 struct zloop_zone *zone = &zlo->zones[zone_no]; 579 573 sector_t zone_end = zone->start + zlo->zone_capacity; 580 - unsigned long flags; 581 574 int ret = 0; 582 575 583 - spin_lock_irqsave(&zone->wp_lock, flags); 576 + spin_lock(&zone->wp_lock); 584 577 585 578 /* 586 579 * Zone append operations always go at the current write pointer, but ··· 621 616 */ 622 617 if (!is_append || !zlo->ordered_zone_append) { 623 618 zone->wp += nr_sectors; 624 - if (zone->wp == zone_end) { 625 - zloop_lru_remove_open_zone(zlo, zone); 626 - zone->cond = BLK_ZONE_COND_FULL; 627 - zone->wp = ULLONG_MAX; 628 - } 619 + if (zone->wp == zone_end) 620 + zloop_mark_full(zlo, zone); 629 621 } 630 622 out_unlock: 631 - spin_unlock_irqrestore(&zone->wp_lock, flags); 623 + spin_unlock(&zone->wp_lock); 632 624 return ret; 633 625 } 634 626 ··· 863 861 struct zloop_zone *zone = &zlo->zones[zone_no]; 864 862 sector_t zone_end = zone->start + zlo->zone_capacity; 865 863 sector_t nr_sectors = blk_rq_sectors(rq); 866 - unsigned long flags; 867 864 868 - spin_lock_irqsave(&zone->wp_lock, flags); 865 + spin_lock(&zone->wp_lock); 869 866 870 867 if (zone->cond == BLK_ZONE_COND_FULL || 871 868 zone->wp + nr_sectors > zone_end) { 872 - spin_unlock_irqrestore(&zone->wp_lock, flags); 869 + spin_unlock(&zone->wp_lock); 873 870 return false; 874 871 } 875 872 876 873 rq->__sector = zone->wp; 877 874 zone->wp += blk_rq_sectors(rq); 878 - if (zone->wp >= zone_end) { 879 - zloop_lru_remove_open_zone(zlo, zone); 880 - zone->cond = BLK_ZONE_COND_FULL; 881 - zone->wp = ULLONG_MAX; 882 - } 875 + if (zone->wp >= zone_end) 876 + zloop_mark_full(zlo, zone); 883 877 884 - spin_unlock_irqrestore(&zone->wp_lock, flags); 878 + spin_unlock(&zone->wp_lock); 885 879 886 880 return true; 887 881 } ··· 889 891 struct zloop_cmd *cmd = blk_mq_rq_to_pdu(rq); 890 892 struct zloop_device *zlo = rq->q->queuedata; 891 893 892 - if (data_race(READ_ONCE(zlo->state)) == Zlo_deleting) 894 + if (data_race(READ_ONCE(zlo->state)) == Zlo_deleting) { 895 + rq->rq_flags |= RQF_QUIET; 893 896 return BLK_STS_IOERR; 897 + } 894 898 895 899 /* 896 900 * If we need to strongly order zone append operations, set the request ··· 938 938 struct zloop_device *zlo = disk->private_data; 939 939 struct blk_zone blkz = {}; 940 940 unsigned int first, i; 941 - unsigned long flags; 942 941 int ret; 943 942 944 943 first = disk_zone_no(disk, sector); ··· 961 962 962 963 blkz.start = zone->start; 963 964 blkz.len = zlo->zone_size; 964 - spin_lock_irqsave(&zone->wp_lock, flags); 965 + spin_lock(&zone->wp_lock); 965 966 blkz.wp = zone->wp; 966 - spin_unlock_irqrestore(&zone->wp_lock, flags); 967 + spin_unlock(&zone->wp_lock); 967 968 blkz.cond = zone->cond; 968 969 if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) { 969 970 blkz.type = BLK_ZONE_TYPE_CONVENTIONAL; ··· 1362 1363 return ret; 1363 1364 } 1364 1365 1365 - static void zloop_truncate(struct file *file, loff_t pos) 1366 - { 1367 - struct mnt_idmap *idmap = file_mnt_idmap(file); 1368 - struct dentry *dentry = file_dentry(file); 1369 - struct iattr newattrs; 1370 - 1371 - newattrs.ia_size = pos; 1372 - newattrs.ia_valid = ATTR_SIZE; 1373 - 1374 - inode_lock(dentry->d_inode); 1375 - notify_change(idmap, dentry, &newattrs, NULL); 1376 - inode_unlock(dentry->d_inode); 1377 - } 1378 - 1379 1366 static void zloop_forget_cache(struct zloop_device *zlo) 1380 1367 { 1381 1368 unsigned int i; ··· 1386 1401 zlo->disk->part0, ret); 1387 1402 continue; 1388 1403 } 1389 - if (old_wp < zone->wp) 1390 - zloop_truncate(file, old_wp); 1404 + 1405 + if (old_wp > zone->wp) 1406 + continue; 1407 + /* 1408 + * This should not happen, if we recored a full zone, it can't 1409 + * be active. 1410 + */ 1411 + if (WARN_ON_ONCE(old_wp == ULLONG_MAX)) 1412 + continue; 1413 + 1414 + vfs_truncate(&file->f_path, 1415 + (old_wp - zone->start) << SECTOR_SHIFT); 1391 1416 } 1392 1417 } 1393 1418
+13 -12
include/linux/t10-pi.h
··· 4 4 5 5 #include <linux/types.h> 6 6 #include <linux/blk-mq.h> 7 + #include <linux/wordpart.h> 7 8 8 9 /* 9 10 * A T10 PI-capable target device can be formatted with different ··· 26 25 T10_PI_TYPE3_PROTECTION = 0x3, 27 26 }; 28 27 28 + static inline u64 full_pi_ref_tag(const struct request *rq) 29 + { 30 + unsigned int shift = ilog2(queue_logical_block_size(rq->q)); 31 + 32 + if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && 33 + rq->q->limits.integrity.interval_exp) 34 + shift = rq->q->limits.integrity.interval_exp; 35 + return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT); 36 + } 37 + 29 38 /* 30 39 * T10 Protection Information tuple. 31 40 */ ··· 50 39 51 40 static inline u32 t10_pi_ref_tag(struct request *rq) 52 41 { 53 - unsigned int shift = ilog2(queue_logical_block_size(rq->q)); 54 - 55 - if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && 56 - rq->q->limits.integrity.interval_exp) 57 - shift = rq->q->limits.integrity.interval_exp; 58 - return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff; 42 + return lower_32_bits(full_pi_ref_tag(rq)); 59 43 } 60 44 61 45 struct crc64_pi_tuple { ··· 70 64 71 65 static inline u64 ext_pi_ref_tag(struct request *rq) 72 66 { 73 - unsigned int shift = ilog2(queue_logical_block_size(rq->q)); 74 - 75 - if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && 76 - rq->q->limits.integrity.interval_exp) 77 - shift = rq->q->limits.integrity.interval_exp; 78 - return lower_48_bits(blk_rq_pos(rq) >> (shift - SECTOR_SHIFT)); 67 + return lower_48_bits(full_pi_ref_tag(rq)); 79 68 } 80 69 81 70 #endif
+1
tools/testing/selftests/ublk/Makefile
··· 37 37 38 38 TEST_PROGS += test_integrity_01.sh 39 39 TEST_PROGS += test_integrity_02.sh 40 + TEST_PROGS += test_integrity_03.sh 40 41 41 42 TEST_PROGS += test_recover_01.sh 42 43 TEST_PROGS += test_recover_02.sh
+2 -2
tools/testing/selftests/ublk/test_batch_01.sh
··· 18 18 _check_add_dev $TID $? 19 19 20 20 if ! _mkfs_mount_test /dev/ublkb"${dev_id}"; then 21 - _cleanup_test "generic" 21 + _cleanup_test 22 22 _show_result $TID 255 23 23 fi 24 24 ··· 27 27 _mkfs_mount_test /dev/ublkb"${dev_id}" 28 28 ERR_CODE=$? 29 29 30 - _cleanup_test "generic" 30 + _cleanup_test 31 31 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_batch_02.sh
··· 25 25 --iodepth=32 --size=100M --numjobs=4 > /dev/null 2>&1 26 26 ERR_CODE=$? 27 27 28 - _cleanup_test "generic" 28 + _cleanup_test 29 29 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_batch_03.sh
··· 25 25 --iodepth=32 --size=100M --numjobs=4 > /dev/null 2>&1 26 26 ERR_CODE=$? 27 27 28 - _cleanup_test "generic" 28 + _cleanup_test 29 29 _show_result $TID $ERR_CODE
+2 -2
tools/testing/selftests/ublk/test_generic_02.sh
··· 29 29 done 30 30 31 31 if ! kill -0 "$btrace_pid" 2>/dev/null; then 32 - _cleanup_test "null" 32 + _cleanup_test 33 33 exit "$UBLK_SKIP_CODE" 34 34 fi 35 35 ··· 51 51 grep "^out_of_order:" "$UBLK_TMP" 52 52 ERR_CODE=255 53 53 fi 54 - _cleanup_test "null" 54 + _cleanup_test 55 55 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_generic_03.sh
··· 23 23 if [ "$max_segment_size" != "32768" ]; then 24 24 ERR_CODE=255 25 25 fi 26 - _cleanup_test "null" 26 + _cleanup_test 27 27 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_generic_06.sh
··· 36 36 ERR_CODE=255 37 37 fi 38 38 39 - _cleanup_test "fault_inject" 39 + _cleanup_test 40 40 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_generic_07.sh
··· 23 23 ERR_CODE=$? 24 24 fi 25 25 26 - _cleanup_test "generic" 26 + _cleanup_test 27 27 _show_result $TID $ERR_CODE
+2 -2
tools/testing/selftests/ublk/test_generic_08.sh
··· 18 18 _check_add_dev $TID $? 19 19 20 20 if ! _mkfs_mount_test /dev/ublkb"${dev_id}"; then 21 - _cleanup_test "generic" 21 + _cleanup_test 22 22 _show_result $TID 255 23 23 fi 24 24 ··· 27 27 _mkfs_mount_test /dev/ublkb"${dev_id}" 28 28 ERR_CODE=$? 29 29 30 - _cleanup_test "generic" 30 + _cleanup_test 31 31 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_generic_09.sh
··· 22 22 fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio --rw=readwrite --iodepth=32 --size=256M > /dev/null 2>&1 23 23 ERR_CODE=$? 24 24 25 - _cleanup_test "null" 25 + _cleanup_test 26 26 27 27 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_generic_10.sh
··· 25 25 ERR_CODE=255 26 26 fi 27 27 28 - _cleanup_test "null" 28 + _cleanup_test 29 29 _show_result $TID $ERR_CODE
+2 -2
tools/testing/selftests/ublk/test_generic_12.sh
··· 25 25 sleep 2 26 26 27 27 if ! kill -0 "$btrace_pid" > /dev/null 2>&1; then 28 - _cleanup_test "null" 28 + _cleanup_test 29 29 exit "$UBLK_SKIP_CODE" 30 30 fi 31 31 ··· 54 54 ERR_CODE=255 55 55 fi 56 56 57 - _cleanup_test "null" 57 + _cleanup_test 58 58 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_generic_13.sh
··· 15 15 ERR_CODE=255 16 16 fi 17 17 18 - _cleanup_test "null" 18 + _cleanup_test 19 19 _show_result $TID $ERR_CODE
+2 -2
tools/testing/selftests/ublk/test_generic_16.sh
··· 9 9 10 10 # Check if SAFE_STOP_DEV feature is supported 11 11 if ! _have_feature "SAFE_STOP_DEV"; then 12 - _cleanup_test "null" 12 + _cleanup_test 13 13 exit "$UBLK_SKIP_CODE" 14 14 fi 15 15 ··· 52 52 _ublk_del_dev "${dev_id}" 53 53 udevadm settle 54 54 55 - _cleanup_test "null" 55 + _cleanup_test 56 56 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_generic_17.sh
··· 31 31 # time out here 32 32 _ublk_del_dev "${dev_id}" 33 33 34 - _cleanup_test "fault_inject" 34 + _cleanup_test 35 35 _show_result $TID $ERR_CODE
+3 -2
tools/testing/selftests/ublk/test_integrity_02.sh
··· 7 7 exit $UBLK_SKIP_CODE 8 8 fi 9 9 10 + min_fio_version=fio-3.42 10 11 fio_version=$(fio --version) 11 - if [[ "$fio_version" =~ fio-[0-9]+\.[0-9]+$ ]]; then 12 - echo "Requires development fio version with https://github.com/axboe/fio/pull/1992" 12 + if ! sort --version-sort --check=quiet <(printf "%s\n%s\n" "$min_fio_version" "$fio_version"); then 13 + echo "Requires fio version with https://github.com/axboe/fio/pull/1992" 13 14 exit $UBLK_SKIP_CODE 14 15 fi 15 16
+103
tools/testing/selftests/ublk/test_integrity_03.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + . "$(cd "$(dirname "$0")" && pwd)"/test_common.sh 5 + 6 + if ! _have_program fio; then 7 + exit $UBLK_SKIP_CODE 8 + fi 9 + 10 + _test_fill_and_verify() { 11 + fio --name fill --rw randwrite $fio_args > /dev/null 12 + if [ $? != 0 ]; then 13 + echo "fio fill failed" 14 + ERR_CODE=255 15 + return 1 16 + fi 17 + 18 + fio --name verify --rw randread $fio_args > /dev/null 19 + if [ $? != 0 ]; then 20 + echo "fio verify failed" 21 + ERR_CODE=255 22 + return 1 23 + fi 24 + } 25 + 26 + _test_corrupted_reftag() { 27 + local dd_reftag_args="bs=1 seek=58 count=6 oflag=dsync conv=notrunc status=none" 28 + 29 + # Overwrite 6-byte reftag at offset 48 + 10 = 58 30 + dd if=/dev/urandom "of=${UBLK_BACKFILES[1]}" $dd_reftag_args 31 + if [ $? != 0 ]; then 32 + echo "dd corrupted_reftag failed" 33 + ERR_CODE=255 34 + return 1 35 + fi 36 + 37 + if fio --name corrupted_reftag --rw randread $fio_args > /dev/null 2> "$fio_err"; then 38 + echo "fio corrupted_reftag unexpectedly succeeded" 39 + ERR_CODE=255 40 + return 1 41 + fi 42 + 43 + if ! grep -q "$expected_err" "$fio_err"; then 44 + echo "fio corrupted_reftag message not found: $expected_err" 45 + ERR_CODE=255 46 + return 1 47 + fi 48 + 49 + # Reset to 0 50 + dd if=/dev/zero "of=${UBLK_BACKFILES[1]}" $dd_reftag_args 51 + if [ $? != 0 ]; then 52 + echo "dd restore corrupted_reftag failed" 53 + ERR_CODE=255 54 + return 1 55 + fi 56 + } 57 + 58 + _test_corrupted_data() { 59 + local dd_data_args="bs=512 count=1 oflag=direct,dsync conv=notrunc status=none" 60 + 61 + dd if=/dev/zero "of=${UBLK_BACKFILES[0]}" $dd_data_args 62 + if [ $? != 0 ]; then 63 + echo "dd corrupted_data failed" 64 + ERR_CODE=255 65 + return 1 66 + fi 67 + 68 + if fio --name corrupted_data --rw randread $fio_args > /dev/null 2> "$fio_err"; then 69 + echo "fio corrupted_data unexpectedly succeeded" 70 + ERR_CODE=255 71 + return 1 72 + fi 73 + 74 + if ! grep -q "$expected_err" "$fio_err"; then 75 + echo "fio corrupted_data message not found: $expected_err" 76 + ERR_CODE=255 77 + return 1 78 + fi 79 + } 80 + 81 + _prep_test "loop" "end-to-end auto integrity" 82 + 83 + _create_backfile 0 256M 84 + _create_backfile 1 32M # 256M * (64 integrity bytes / 512 data bytes) 85 + integrity_params="--integrity_capable --integrity_reftag 86 + --metadata_size 64 --pi_offset 48 --csum_type nvme" 87 + dev_id=$(_add_ublk_dev -t loop -u $integrity_params "${UBLK_BACKFILES[@]}") 88 + _check_add_dev "$TID" $? 89 + 90 + fio_args="--ioengine libaio --direct 1 --bsrange 512-1M --iodepth 32 91 + --filename /dev/ublkb$dev_id" 92 + fio_err=$(mktemp "${UBLK_TEST_DIR}"/fio_err_XXXXX) 93 + ERR_CODE=0 94 + 95 + expected_err="Invalid or incomplete multibyte or wide character: read offset=0" 96 + _test_fill_and_verify && \ 97 + _test_corrupted_reftag && \ 98 + _test_corrupted_data 99 + 100 + rm -f "$fio_err" 101 + 102 + _cleanup_test 103 + _show_result "$TID" $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_loop_01.sh
··· 20 20 _run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M 21 21 ERR_CODE=$? 22 22 23 - _cleanup_test "loop" 23 + _cleanup_test 24 24 25 25 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_loop_02.sh
··· 14 14 _mkfs_mount_test /dev/ublkb"${dev_id}" 15 15 ERR_CODE=$? 16 16 17 - _cleanup_test "loop" 17 + _cleanup_test 18 18 19 19 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_loop_03.sh
··· 19 19 _run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M 20 20 ERR_CODE=$? 21 21 22 - _cleanup_test "loop" 22 + _cleanup_test 23 23 24 24 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_loop_04.sh
··· 15 15 _mkfs_mount_test /dev/ublkb"${dev_id}" 16 16 ERR_CODE=$? 17 17 18 - _cleanup_test "loop" 18 + _cleanup_test 19 19 20 20 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_loop_05.sh
··· 20 20 _run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M 21 21 ERR_CODE=$? 22 22 23 - _cleanup_test "loop" 23 + _cleanup_test 24 24 25 25 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_loop_06.sh
··· 19 19 _run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M 20 20 ERR_CODE=$? 21 21 22 - _cleanup_test "loop" 22 + _cleanup_test 23 23 24 24 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_loop_07.sh
··· 15 15 _mkfs_mount_test /dev/ublkb"${dev_id}" 16 16 ERR_CODE=$? 17 17 18 - _cleanup_test "loop" 18 + _cleanup_test 19 19 20 20 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_null_01.sh
··· 18 18 fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio --rw=readwrite --iodepth=32 --size=256M > /dev/null 2>&1 19 19 ERR_CODE=$? 20 20 21 - _cleanup_test "null" 21 + _cleanup_test 22 22 23 23 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_null_02.sh
··· 18 18 fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio --rw=readwrite --iodepth=32 --size=256M > /dev/null 2>&1 19 19 ERR_CODE=$? 20 20 21 - _cleanup_test "null" 21 + _cleanup_test 22 22 23 23 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_null_03.sh
··· 18 18 fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio --rw=readwrite --iodepth=32 --size=256M > /dev/null 2>&1 19 19 ERR_CODE=$? 20 20 21 - _cleanup_test "null" 21 + _cleanup_test 22 22 23 23 _show_result $TID $ERR_CODE
+2 -2
tools/testing/selftests/ublk/test_part_01.sh
··· 82 82 _prep_test "generic" "test UBLK_F_NO_AUTO_PART_SCAN" 83 83 84 84 if ! _have_feature "UBLK_F_NO_AUTO_PART_SCAN"; then 85 - _cleanup_test "generic" 85 + _cleanup_test 86 86 exit "$UBLK_SKIP_CODE" 87 87 fi 88 88 ··· 100 100 [ "$ERR_CODE" -eq 0 ] && test_no_auto_part_scan "${UBLK_BACKFILES[0]}" 101 101 [ $? -ne 0 ] && ERR_CODE=255 102 102 103 - _cleanup_test "generic" 103 + _cleanup_test 104 104 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_part_02.sh
··· 63 63 # Test 2: With recovery support - should transition to QUIESCED 64 64 _test_partition_scan_no_hang "yes" "QUIESCED" 65 65 66 - _cleanup_test "partition_scan" 66 + _cleanup_test 67 67 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_recover_01.sh
··· 40 40 ublk_run_recover_test -t stripe -q 2 -r 1 -i 1 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & 41 41 wait 42 42 43 - _cleanup_test "recover" 43 + _cleanup_test 44 44 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_recover_02.sh
··· 44 44 ublk_run_recover_test -t stripe -q 2 -r 1 -z -i 1 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & 45 45 wait 46 46 47 - _cleanup_test "recover" 47 + _cleanup_test 48 48 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_recover_03.sh
··· 39 39 ublk_run_quiesce_recover -t stripe -q 2 -r 1 -i 1 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & 40 40 wait 41 41 42 - _cleanup_test "quiesce" 42 + _cleanup_test 43 43 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_recover_04.sh
··· 35 35 ublk_run_recover_test -t stripe -q 2 -r 1 -u -i 1 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & 36 36 wait 37 37 38 - _cleanup_test "recover" 38 + _cleanup_test 39 39 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_shmemzc_01.sh
··· 67 67 rmdir "$HTLB_MNT" 68 68 echo "$OLD_NR_HP" > /proc/sys/vm/nr_hugepages 69 69 70 - _cleanup_test "shmem_zc" 70 + _cleanup_test 71 71 72 72 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_shmemzc_02.sh
··· 63 63 rmdir "$HTLB_MNT" 64 64 echo "$OLD_NR_HP" > /proc/sys/vm/nr_hugepages 65 65 66 - _cleanup_test "shmem_zc" 66 + _cleanup_test 67 67 68 68 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_shmemzc_03.sh
··· 64 64 rmdir "$HTLB_MNT" 65 65 echo "$OLD_NR_HP" > /proc/sys/vm/nr_hugepages 66 66 67 - _cleanup_test "shmem_zc" 67 + _cleanup_test 68 68 69 69 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_shmemzc_04.sh
··· 67 67 rmdir "$HTLB_MNT" 68 68 echo "$OLD_NR_HP" > /proc/sys/vm/nr_hugepages 69 69 70 - _cleanup_test "shmem_zc" 70 + _cleanup_test 71 71 72 72 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_stress_01.sh
··· 29 29 ublk_io_and_remove 256M -t stripe -q 4 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & 30 30 wait 31 31 32 - _cleanup_test "stress" 32 + _cleanup_test 33 33 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_stress_02.sh
··· 31 31 wait 32 32 done 33 33 34 - _cleanup_test "stress" 34 + _cleanup_test 35 35 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_stress_03.sh
··· 49 49 wait 50 50 fi 51 51 52 - _cleanup_test "stress" 52 + _cleanup_test 53 53 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_stress_04.sh
··· 48 48 wait 49 49 fi 50 50 51 - _cleanup_test "stress" 51 + _cleanup_test 52 52 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_stress_05.sh
··· 79 79 fi 80 80 wait 81 81 82 - _cleanup_test "stress" 82 + _cleanup_test 83 83 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_stress_06.sh
··· 34 34 ublk_io_and_remove 256M -t stripe -q 4 -u --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & 35 35 wait 36 36 37 - _cleanup_test "stress" 37 + _cleanup_test 38 38 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_stress_07.sh
··· 34 34 ublk_io_and_kill_daemon 256M -t stripe -q 4 -u --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & 35 35 wait 36 36 37 - _cleanup_test "stress" 37 + _cleanup_test 38 38 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_stress_08.sh
··· 40 40 ublk_io_and_remove 8G -t null -q 4 -z --auto_zc --auto_zc_fallback -b & 41 41 wait 42 42 43 - _cleanup_test "stress" 43 + _cleanup_test 44 44 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_stress_09.sh
··· 39 39 ublk_io_and_kill_daemon 8G -t null -q 4 -z --auto_zc --auto_zc_fallback -b & 40 40 wait 41 41 42 - _cleanup_test "stress" 42 + _cleanup_test 43 43 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_stripe_01.sh
··· 21 21 _run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=512M 22 22 ERR_CODE=$? 23 23 24 - _cleanup_test "stripe" 24 + _cleanup_test 25 25 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_stripe_02.sh
··· 16 16 _mkfs_mount_test /dev/ublkb"${dev_id}" 17 17 ERR_CODE=$? 18 18 19 - _cleanup_test "stripe" 19 + _cleanup_test 20 20 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_stripe_03.sh
··· 21 21 _run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=512M 22 22 ERR_CODE=$? 23 23 24 - _cleanup_test "stripe" 24 + _cleanup_test 25 25 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_stripe_04.sh
··· 16 16 _mkfs_mount_test /dev/ublkb"${dev_id}" 17 17 ERR_CODE=$? 18 18 19 - _cleanup_test "stripe" 19 + _cleanup_test 20 20 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_stripe_05.sh
··· 21 21 _run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=512M 22 22 ERR_CODE=$? 23 23 24 - _cleanup_test "stripe" 24 + _cleanup_test 25 25 _show_result $TID $ERR_CODE
+1 -1
tools/testing/selftests/ublk/test_stripe_06.sh
··· 16 16 _mkfs_mount_test /dev/ublkb"${dev_id}" 17 17 ERR_CODE=$? 18 18 19 - _cleanup_test "stripe" 19 + _cleanup_test 20 20 _show_result $TID $ERR_CODE