Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

zloop: refactor zloop_rw

Split out two helpers functions to make the function more readable and
to avoid conditional locking.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Link: https://patch.msgid.link/20260323071156.2940772-2-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
eff8d165 67807fba

+124 -116
+124 -116
drivers/block/zloop.c
··· 378 378 zloop_put_cmd(cmd); 379 379 } 380 380 381 - static void zloop_rw(struct zloop_cmd *cmd) 381 + static int zloop_do_rw(struct zloop_cmd *cmd) 382 382 { 383 383 struct request *rq = blk_mq_rq_from_pdu(cmd); 384 + int rw = req_op(rq) == REQ_OP_READ ? ITER_DEST : ITER_SOURCE; 385 + unsigned int nr_bvec = blk_rq_nr_bvec(rq); 384 386 struct zloop_device *zlo = rq->q->queuedata; 385 - unsigned int zone_no = rq_zone_no(rq); 386 - sector_t sector = blk_rq_pos(rq); 387 - sector_t nr_sectors = blk_rq_sectors(rq); 388 - bool is_append = req_op(rq) == REQ_OP_ZONE_APPEND; 389 - bool is_write = req_op(rq) == REQ_OP_WRITE || is_append; 390 - int rw = is_write ? ITER_SOURCE : ITER_DEST; 387 + struct zloop_zone *zone = &zlo->zones[rq_zone_no(rq)]; 391 388 struct req_iterator rq_iter; 392 - struct zloop_zone *zone; 393 389 struct iov_iter iter; 394 - struct bio_vec tmp; 395 - unsigned long flags; 396 - sector_t zone_end; 397 - unsigned int nr_bvec; 398 - int ret; 399 - 400 - atomic_set(&cmd->ref, 2); 401 - cmd->sector = sector; 402 - cmd->nr_sectors = nr_sectors; 403 - cmd->ret = 0; 404 - 405 - if (WARN_ON_ONCE(is_append && !zlo->zone_append)) { 406 - ret = -EIO; 407 - goto out; 408 - } 409 - 410 - /* We should never get an I/O beyond the device capacity. */ 411 - if (WARN_ON_ONCE(zone_no >= zlo->nr_zones)) { 412 - ret = -EIO; 413 - goto out; 414 - } 415 - zone = &zlo->zones[zone_no]; 416 - zone_end = zone->start + zlo->zone_capacity; 417 - 418 - /* 419 - * The block layer should never send requests that are not fully 420 - * contained within the zone. 421 - */ 422 - if (WARN_ON_ONCE(sector + nr_sectors > zone->start + zlo->zone_size)) { 423 - ret = -EIO; 424 - goto out; 425 - } 426 - 427 - if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) { 428 - mutex_lock(&zone->lock); 429 - ret = zloop_update_seq_zone(zlo, zone_no); 430 - mutex_unlock(&zone->lock); 431 - if (ret) 432 - goto out; 433 - } 434 - 435 - if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write) { 436 - mutex_lock(&zone->lock); 437 - 438 - spin_lock_irqsave(&zone->wp_lock, flags); 439 - 440 - /* 441 - * Zone append operations always go at the current write 442 - * pointer, but regular write operations must already be 443 - * aligned to the write pointer when submitted. 444 - */ 445 - if (is_append) { 446 - /* 447 - * If ordered zone append is in use, we already checked 448 - * and set the target sector in zloop_queue_rq(). 449 - */ 450 - if (!zlo->ordered_zone_append) { 451 - if (zone->cond == BLK_ZONE_COND_FULL || 452 - zone->wp + nr_sectors > zone_end) { 453 - spin_unlock_irqrestore(&zone->wp_lock, 454 - flags); 455 - ret = -EIO; 456 - goto unlock; 457 - } 458 - sector = zone->wp; 459 - } 460 - cmd->sector = sector; 461 - } else if (sector != zone->wp) { 462 - spin_unlock_irqrestore(&zone->wp_lock, flags); 463 - pr_err("Zone %u: unaligned write: sect %llu, wp %llu\n", 464 - zone_no, sector, zone->wp); 465 - ret = -EIO; 466 - goto unlock; 467 - } 468 - 469 - /* Implicitly open the target zone. */ 470 - if (zone->cond == BLK_ZONE_COND_CLOSED || 471 - zone->cond == BLK_ZONE_COND_EMPTY) 472 - zone->cond = BLK_ZONE_COND_IMP_OPEN; 473 - 474 - /* 475 - * Advance the write pointer, unless ordered zone append is in 476 - * use. If the write fails, the write pointer position will be 477 - * corrected when the next I/O starts execution. 478 - */ 479 - if (!is_append || !zlo->ordered_zone_append) { 480 - zone->wp += nr_sectors; 481 - if (zone->wp == zone_end) { 482 - zone->cond = BLK_ZONE_COND_FULL; 483 - zone->wp = ULLONG_MAX; 484 - } 485 - } 486 - 487 - spin_unlock_irqrestore(&zone->wp_lock, flags); 488 - } 489 - 490 - nr_bvec = blk_rq_nr_bvec(rq); 491 390 492 391 if (rq->bio != rq->biotail) { 493 - struct bio_vec *bvec; 392 + struct bio_vec tmp, *bvec; 494 393 495 394 cmd->bvec = kmalloc_objs(*cmd->bvec, nr_bvec, GFP_NOIO); 496 - if (!cmd->bvec) { 497 - ret = -EIO; 498 - goto unlock; 499 - } 395 + if (!cmd->bvec) 396 + return -EIO; 500 397 501 398 /* 502 399 * The bios of the request may be started from the middle of ··· 419 522 iter.iov_offset = rq->bio->bi_iter.bi_bvec_done; 420 523 } 421 524 422 - cmd->iocb.ki_pos = (sector - zone->start) << SECTOR_SHIFT; 525 + cmd->iocb.ki_pos = (cmd->sector - zone->start) << SECTOR_SHIFT; 423 526 cmd->iocb.ki_filp = zone->file; 424 527 cmd->iocb.ki_complete = zloop_rw_complete; 425 528 if (!zlo->buffered_io) ··· 427 530 cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); 428 531 429 532 if (rw == ITER_SOURCE) 430 - ret = zone->file->f_op->write_iter(&cmd->iocb, &iter); 431 - else 432 - ret = zone->file->f_op->read_iter(&cmd->iocb, &iter); 433 - unlock: 434 - if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write) 533 + return zone->file->f_op->write_iter(&cmd->iocb, &iter); 534 + return zone->file->f_op->read_iter(&cmd->iocb, &iter); 535 + } 536 + 537 + static int zloop_seq_write_prep(struct zloop_cmd *cmd) 538 + { 539 + struct request *rq = blk_mq_rq_from_pdu(cmd); 540 + struct zloop_device *zlo = rq->q->queuedata; 541 + unsigned int zone_no = rq_zone_no(rq); 542 + sector_t nr_sectors = blk_rq_sectors(rq); 543 + bool is_append = req_op(rq) == REQ_OP_ZONE_APPEND; 544 + struct zloop_zone *zone = &zlo->zones[zone_no]; 545 + sector_t zone_end = zone->start + zlo->zone_capacity; 546 + unsigned long flags; 547 + int ret = 0; 548 + 549 + spin_lock_irqsave(&zone->wp_lock, flags); 550 + 551 + /* 552 + * Zone append operations always go at the current write pointer, but 553 + * regular write operations must already be aligned to the write pointer 554 + * when submitted. 555 + */ 556 + if (is_append) { 557 + /* 558 + * If ordered zone append is in use, we already checked and set 559 + * the target sector in zloop_queue_rq(). 560 + */ 561 + if (!zlo->ordered_zone_append) { 562 + if (zone->cond == BLK_ZONE_COND_FULL || 563 + zone->wp + nr_sectors > zone_end) { 564 + ret = -EIO; 565 + goto out_unlock; 566 + } 567 + cmd->sector = zone->wp; 568 + } 569 + } else { 570 + if (cmd->sector != zone->wp) { 571 + pr_err("Zone %u: unaligned write: sect %llu, wp %llu\n", 572 + zone_no, cmd->sector, zone->wp); 573 + ret = -EIO; 574 + goto out_unlock; 575 + } 576 + } 577 + 578 + /* Implicitly open the target zone. */ 579 + if (zone->cond == BLK_ZONE_COND_CLOSED || 580 + zone->cond == BLK_ZONE_COND_EMPTY) 581 + zone->cond = BLK_ZONE_COND_IMP_OPEN; 582 + 583 + /* 584 + * Advance the write pointer, unless ordered zone append is in use. If 585 + * the write fails, the write pointer position will be corrected when 586 + * the next I/O starts execution. 587 + */ 588 + if (!is_append || !zlo->ordered_zone_append) { 589 + zone->wp += nr_sectors; 590 + if (zone->wp == zone_end) { 591 + zone->cond = BLK_ZONE_COND_FULL; 592 + zone->wp = ULLONG_MAX; 593 + } 594 + } 595 + out_unlock: 596 + spin_unlock_irqrestore(&zone->wp_lock, flags); 597 + return ret; 598 + } 599 + 600 + static void zloop_rw(struct zloop_cmd *cmd) 601 + { 602 + struct request *rq = blk_mq_rq_from_pdu(cmd); 603 + struct zloop_device *zlo = rq->q->queuedata; 604 + unsigned int zone_no = rq_zone_no(rq); 605 + sector_t nr_sectors = blk_rq_sectors(rq); 606 + bool is_append = req_op(rq) == REQ_OP_ZONE_APPEND; 607 + bool is_write = req_op(rq) == REQ_OP_WRITE || is_append; 608 + struct zloop_zone *zone; 609 + int ret = -EIO; 610 + 611 + atomic_set(&cmd->ref, 2); 612 + cmd->sector = blk_rq_pos(rq); 613 + cmd->nr_sectors = nr_sectors; 614 + cmd->ret = 0; 615 + 616 + if (WARN_ON_ONCE(is_append && !zlo->zone_append)) 617 + goto out; 618 + 619 + /* We should never get an I/O beyond the device capacity. */ 620 + if (WARN_ON_ONCE(zone_no >= zlo->nr_zones)) 621 + goto out; 622 + 623 + zone = &zlo->zones[zone_no]; 624 + 625 + /* 626 + * The block layer should never send requests that are not fully 627 + * contained within the zone. 628 + */ 629 + if (WARN_ON_ONCE(cmd->sector + nr_sectors > 630 + zone->start + zlo->zone_size)) 631 + goto out; 632 + 633 + if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) { 634 + mutex_lock(&zone->lock); 635 + ret = zloop_update_seq_zone(zlo, zone_no); 435 636 mutex_unlock(&zone->lock); 637 + if (ret) 638 + goto out; 639 + } 640 + 641 + if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write) { 642 + mutex_lock(&zone->lock); 643 + ret = zloop_seq_write_prep(cmd); 644 + if (!ret) 645 + ret = zloop_do_rw(cmd); 646 + mutex_unlock(&zone->lock); 647 + } else { 648 + ret = zloop_do_rw(cmd); 649 + } 436 650 out: 437 651 if (ret != -EIOCBQUEUED) 438 652 zloop_rw_complete(&cmd->iocb, ret);