Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'block-5.15-2021-10-17' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
"Bigger than usual for this point in time, the majority is fixing some
issues around BDI lifetimes with the move from the request_queue to
the disk in this release. In detail:

- Series on draining fs IO for del_gendisk() (Christoph)

- NVMe pull request via Christoph:
- fix the abort command id (Keith Busch)
- nvme: fix per-namespace chardev deletion (Adam Manzanares)

- brd locking scope fix (Tetsuo)

- BFQ fix (Paolo)"

* tag 'block-5.15-2021-10-17' of git://git.kernel.dk/linux-block:
block, bfq: reset last_bfqq_created on group change
block: warn when putting the final reference on a registered disk
brd: reduce the brd_devices_mutex scope
kyber: avoid q->disk dereferences in trace points
block: keep q_usage_counter in atomic mode after del_gendisk
block: drain file system I/O on del_gendisk
block: split bio_queue_enter from blk_queue_enter
block: factor out a blk_try_enter_queue helper
block: call submit_bio_checks under q_usage_counter
nvme: fix per-namespace chardev deletion
block/rnbd-clt-sysfs: fix a couple uninitialized variable bugs
nvme-pci: Fix abort command id

+171 -120
+6
block/bfq-cgroup.c
··· 666 666 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); 667 667 bfqg_and_blkg_put(bfqq_group(bfqq)); 668 668 669 + if (entity->parent && 670 + entity->parent->last_bfqq_created == bfqq) 671 + entity->parent->last_bfqq_created = NULL; 672 + else if (bfqd->last_bfqq_created == bfqq) 673 + bfqd->last_bfqq_created = NULL; 674 + 669 675 entity->parent = bfqg->my_entity; 670 676 entity->sched_data = &bfqg->sched_data; 671 677 /* pin down bfqg and its associated blkg */
+78 -70
block/blk-core.c
··· 49 49 #include "blk-mq.h" 50 50 #include "blk-mq-sched.h" 51 51 #include "blk-pm.h" 52 - #include "blk-rq-qos.h" 53 52 54 53 struct dentry *blk_debugfs_root; 55 54 ··· 336 337 } 337 338 EXPORT_SYMBOL(blk_put_queue); 338 339 339 - void blk_set_queue_dying(struct request_queue *q) 340 + void blk_queue_start_drain(struct request_queue *q) 340 341 { 341 - blk_queue_flag_set(QUEUE_FLAG_DYING, q); 342 - 343 342 /* 344 343 * When queue DYING flag is set, we need to block new req 345 344 * entering queue, so we call blk_freeze_queue_start() to 346 345 * prevent I/O from crossing blk_queue_enter(). 347 346 */ 348 347 blk_freeze_queue_start(q); 349 - 350 348 if (queue_is_mq(q)) 351 349 blk_mq_wake_waiters(q); 352 - 353 350 /* Make blk_queue_enter() reexamine the DYING flag. */ 354 351 wake_up_all(&q->mq_freeze_wq); 352 + } 353 + 354 + void blk_set_queue_dying(struct request_queue *q) 355 + { 356 + blk_queue_flag_set(QUEUE_FLAG_DYING, q); 357 + blk_queue_start_drain(q); 355 358 } 356 359 EXPORT_SYMBOL_GPL(blk_set_queue_dying); 357 360 ··· 386 385 */ 387 386 blk_freeze_queue(q); 388 387 389 - rq_qos_exit(q); 390 - 391 388 blk_queue_flag_set(QUEUE_FLAG_DEAD, q); 392 - 393 - /* for synchronous bio-based driver finish in-flight integrity i/o */ 394 - blk_flush_integrity(); 395 389 396 390 blk_sync_queue(q); 397 391 if (queue_is_mq(q)) ··· 412 416 } 413 417 EXPORT_SYMBOL(blk_cleanup_queue); 414 418 419 + static bool blk_try_enter_queue(struct request_queue *q, bool pm) 420 + { 421 + rcu_read_lock(); 422 + if (!percpu_ref_tryget_live(&q->q_usage_counter)) 423 + goto fail; 424 + 425 + /* 426 + * The code that increments the pm_only counter must ensure that the 427 + * counter is globally visible before the queue is unfrozen. 428 + */ 429 + if (blk_queue_pm_only(q) && 430 + (!pm || queue_rpm_status(q) == RPM_SUSPENDED)) 431 + goto fail_put; 432 + 433 + rcu_read_unlock(); 434 + return true; 435 + 436 + fail_put: 437 + percpu_ref_put(&q->q_usage_counter); 438 + fail: 439 + rcu_read_unlock(); 440 + return false; 441 + } 442 + 415 443 /** 416 444 * blk_queue_enter() - try to increase q->q_usage_counter 417 445 * @q: request queue pointer ··· 445 425 { 446 426 const bool pm = flags & BLK_MQ_REQ_PM; 447 427 448 - while (true) { 449 - bool success = false; 450 - 451 - rcu_read_lock(); 452 - if (percpu_ref_tryget_live(&q->q_usage_counter)) { 453 - /* 454 - * The code that increments the pm_only counter is 455 - * responsible for ensuring that that counter is 456 - * globally visible before the queue is unfrozen. 457 - */ 458 - if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) || 459 - !blk_queue_pm_only(q)) { 460 - success = true; 461 - } else { 462 - percpu_ref_put(&q->q_usage_counter); 463 - } 464 - } 465 - rcu_read_unlock(); 466 - 467 - if (success) 468 - return 0; 469 - 428 + while (!blk_try_enter_queue(q, pm)) { 470 429 if (flags & BLK_MQ_REQ_NOWAIT) 471 430 return -EBUSY; 472 431 473 432 /* 474 - * read pair of barrier in blk_freeze_queue_start(), 475 - * we need to order reading __PERCPU_REF_DEAD flag of 476 - * .q_usage_counter and reading .mq_freeze_depth or 477 - * queue dying flag, otherwise the following wait may 478 - * never return if the two reads are reordered. 433 + * read pair of barrier in blk_freeze_queue_start(), we need to 434 + * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and 435 + * reading .mq_freeze_depth or queue dying flag, otherwise the 436 + * following wait may never return if the two reads are 437 + * reordered. 479 438 */ 480 439 smp_rmb(); 481 - 482 440 wait_event(q->mq_freeze_wq, 483 441 (!q->mq_freeze_depth && 484 442 blk_pm_resume_queue(pm, q)) || ··· 464 466 if (blk_queue_dying(q)) 465 467 return -ENODEV; 466 468 } 469 + 470 + return 0; 467 471 } 468 472 469 473 static inline int bio_queue_enter(struct bio *bio) 470 474 { 471 - struct request_queue *q = bio->bi_bdev->bd_disk->queue; 472 - bool nowait = bio->bi_opf & REQ_NOWAIT; 473 - int ret; 475 + struct gendisk *disk = bio->bi_bdev->bd_disk; 476 + struct request_queue *q = disk->queue; 474 477 475 - ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0); 476 - if (unlikely(ret)) { 477 - if (nowait && !blk_queue_dying(q)) 478 + while (!blk_try_enter_queue(q, false)) { 479 + if (bio->bi_opf & REQ_NOWAIT) { 480 + if (test_bit(GD_DEAD, &disk->state)) 481 + goto dead; 478 482 bio_wouldblock_error(bio); 479 - else 480 - bio_io_error(bio); 483 + return -EBUSY; 484 + } 485 + 486 + /* 487 + * read pair of barrier in blk_freeze_queue_start(), we need to 488 + * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and 489 + * reading .mq_freeze_depth or queue dying flag, otherwise the 490 + * following wait may never return if the two reads are 491 + * reordered. 492 + */ 493 + smp_rmb(); 494 + wait_event(q->mq_freeze_wq, 495 + (!q->mq_freeze_depth && 496 + blk_pm_resume_queue(false, q)) || 497 + test_bit(GD_DEAD, &disk->state)); 498 + if (test_bit(GD_DEAD, &disk->state)) 499 + goto dead; 481 500 } 482 501 483 - return ret; 502 + return 0; 503 + dead: 504 + bio_io_error(bio); 505 + return -ENODEV; 484 506 } 485 507 486 508 void blk_queue_exit(struct request_queue *q) ··· 917 899 struct gendisk *disk = bio->bi_bdev->bd_disk; 918 900 blk_qc_t ret = BLK_QC_T_NONE; 919 901 920 - if (blk_crypto_bio_prep(&bio)) { 921 - if (!disk->fops->submit_bio) 922 - return blk_mq_submit_bio(bio); 902 + if (unlikely(bio_queue_enter(bio) != 0)) 903 + return BLK_QC_T_NONE; 904 + 905 + if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio)) 906 + goto queue_exit; 907 + if (disk->fops->submit_bio) { 923 908 ret = disk->fops->submit_bio(bio); 909 + goto queue_exit; 924 910 } 911 + return blk_mq_submit_bio(bio); 912 + 913 + queue_exit: 925 914 blk_queue_exit(disk->queue); 926 915 return ret; 927 916 } ··· 966 941 struct request_queue *q = bio->bi_bdev->bd_disk->queue; 967 942 struct bio_list lower, same; 968 943 969 - if (unlikely(bio_queue_enter(bio) != 0)) 970 - continue; 971 - 972 944 /* 973 945 * Create a fresh bio_list for all subordinate requests. 974 946 */ ··· 1001 979 static blk_qc_t __submit_bio_noacct_mq(struct bio *bio) 1002 980 { 1003 981 struct bio_list bio_list[2] = { }; 1004 - blk_qc_t ret = BLK_QC_T_NONE; 982 + blk_qc_t ret; 1005 983 1006 984 current->bio_list = bio_list; 1007 985 1008 986 do { 1009 - struct gendisk *disk = bio->bi_bdev->bd_disk; 1010 - 1011 - if (unlikely(bio_queue_enter(bio) != 0)) 1012 - continue; 1013 - 1014 - if (!blk_crypto_bio_prep(&bio)) { 1015 - blk_queue_exit(disk->queue); 1016 - ret = BLK_QC_T_NONE; 1017 - continue; 1018 - } 1019 - 1020 - ret = blk_mq_submit_bio(bio); 987 + ret = __submit_bio(bio); 1021 988 } while ((bio = bio_list_pop(&bio_list[0]))); 1022 989 1023 990 current->bio_list = NULL; ··· 1024 1013 */ 1025 1014 blk_qc_t submit_bio_noacct(struct bio *bio) 1026 1015 { 1027 - if (!submit_bio_checks(bio)) 1028 - return BLK_QC_T_NONE; 1029 - 1030 1016 /* 1031 1017 * We only want one ->submit_bio to be active at a time, else stack 1032 1018 * usage with stacked devices could be a problem. Use current->bio_list
+8 -1
block/blk-mq.c
··· 188 188 } 189 189 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); 190 190 191 - void blk_mq_unfreeze_queue(struct request_queue *q) 191 + void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic) 192 192 { 193 193 mutex_lock(&q->mq_freeze_lock); 194 + if (force_atomic) 195 + q->q_usage_counter.data->force_atomic = true; 194 196 q->mq_freeze_depth--; 195 197 WARN_ON_ONCE(q->mq_freeze_depth < 0); 196 198 if (!q->mq_freeze_depth) { ··· 200 198 wake_up_all(&q->mq_freeze_wq); 201 199 } 202 200 mutex_unlock(&q->mq_freeze_lock); 201 + } 202 + 203 + void blk_mq_unfreeze_queue(struct request_queue *q) 204 + { 205 + __blk_mq_unfreeze_queue(q, false); 203 206 } 204 207 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); 205 208
+2
block/blk.h
··· 51 51 void blk_free_flush_queue(struct blk_flush_queue *q); 52 52 53 53 void blk_freeze_queue(struct request_queue *q); 54 + void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic); 55 + void blk_queue_start_drain(struct request_queue *q); 54 56 55 57 #define BIO_INLINE_VECS 4 56 58 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
+23
block/genhd.c
··· 26 26 #include <linux/badblocks.h> 27 27 28 28 #include "blk.h" 29 + #include "blk-rq-qos.h" 29 30 30 31 static struct kobject *block_depr; 31 32 ··· 560 559 */ 561 560 void del_gendisk(struct gendisk *disk) 562 561 { 562 + struct request_queue *q = disk->queue; 563 + 563 564 might_sleep(); 564 565 565 566 if (WARN_ON_ONCE(!disk_live(disk) && !(disk->flags & GENHD_FL_HIDDEN))) ··· 578 575 fsync_bdev(disk->part0); 579 576 __invalidate_device(disk->part0, true); 580 577 578 + /* 579 + * Fail any new I/O. 580 + */ 581 + set_bit(GD_DEAD, &disk->state); 581 582 set_capacity(disk, 0); 583 + 584 + /* 585 + * Prevent new I/O from crossing bio_queue_enter(). 586 + */ 587 + blk_queue_start_drain(q); 588 + blk_mq_freeze_queue_wait(q); 589 + 590 + rq_qos_exit(q); 591 + blk_sync_queue(q); 592 + blk_flush_integrity(); 593 + /* 594 + * Allow using passthrough request again after the queue is torn down. 595 + */ 596 + blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q); 597 + __blk_mq_unfreeze_queue(q, true); 582 598 583 599 if (!(disk->flags & GENHD_FL_HIDDEN)) { 584 600 sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); ··· 1078 1056 struct gendisk *disk = dev_to_disk(dev); 1079 1057 1080 1058 might_sleep(); 1059 + WARN_ON_ONCE(disk_live(disk)); 1081 1060 1082 1061 disk_release_events(disk); 1083 1062 kfree(disk->random);
+6 -4
block/kyber-iosched.c
··· 151 151 152 152 struct kyber_queue_data { 153 153 struct request_queue *q; 154 + dev_t dev; 154 155 155 156 /* 156 157 * Each scheduling domain has a limited number of in-flight requests ··· 258 257 } 259 258 memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type])); 260 259 261 - trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain], 260 + trace_kyber_latency(kqd->dev, kyber_domain_names[sched_domain], 262 261 kyber_latency_type_names[type], percentile, 263 262 bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples); 264 263 ··· 271 270 depth = clamp(depth, 1U, kyber_depth[sched_domain]); 272 271 if (depth != kqd->domain_tokens[sched_domain].sb.depth) { 273 272 sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth); 274 - trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain], 273 + trace_kyber_adjust(kqd->dev, kyber_domain_names[sched_domain], 275 274 depth); 276 275 } 277 276 } ··· 367 366 goto err; 368 367 369 368 kqd->q = q; 369 + kqd->dev = disk_devt(q->disk); 370 370 371 371 kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency, 372 372 GFP_KERNEL | __GFP_ZERO); ··· 776 774 list_del_init(&rq->queuelist); 777 775 return rq; 778 776 } else { 779 - trace_kyber_throttled(kqd->q, 777 + trace_kyber_throttled(kqd->dev, 780 778 kyber_domain_names[khd->cur_domain]); 781 779 } 782 780 } else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) { ··· 789 787 list_del_init(&rq->queuelist); 790 788 return rq; 791 789 } else { 792 - trace_kyber_throttled(kqd->q, 790 + trace_kyber_throttled(kqd->dev, 793 791 kyber_domain_names[khd->cur_domain]); 794 792 } 795 793 }
+22 -22
drivers/block/brd.c
··· 373 373 struct gendisk *disk; 374 374 char buf[DISK_NAME_LEN]; 375 375 376 + mutex_lock(&brd_devices_mutex); 377 + list_for_each_entry(brd, &brd_devices, brd_list) { 378 + if (brd->brd_number == i) { 379 + mutex_unlock(&brd_devices_mutex); 380 + return -EEXIST; 381 + } 382 + } 376 383 brd = kzalloc(sizeof(*brd), GFP_KERNEL); 377 - if (!brd) 384 + if (!brd) { 385 + mutex_unlock(&brd_devices_mutex); 378 386 return -ENOMEM; 387 + } 379 388 brd->brd_number = i; 389 + list_add_tail(&brd->brd_list, &brd_devices); 390 + mutex_unlock(&brd_devices_mutex); 391 + 380 392 spin_lock_init(&brd->brd_lock); 381 393 INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC); 382 394 ··· 423 411 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); 424 412 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue); 425 413 add_disk(disk); 426 - list_add_tail(&brd->brd_list, &brd_devices); 427 414 428 415 return 0; 429 416 430 417 out_free_dev: 418 + mutex_lock(&brd_devices_mutex); 419 + list_del(&brd->brd_list); 420 + mutex_unlock(&brd_devices_mutex); 431 421 kfree(brd); 432 422 return -ENOMEM; 433 423 } 434 424 435 425 static void brd_probe(dev_t dev) 436 426 { 437 - int i = MINOR(dev) / max_part; 438 - struct brd_device *brd; 439 - 440 - mutex_lock(&brd_devices_mutex); 441 - list_for_each_entry(brd, &brd_devices, brd_list) { 442 - if (brd->brd_number == i) 443 - goto out_unlock; 444 - } 445 - 446 - brd_alloc(i); 447 - out_unlock: 448 - mutex_unlock(&brd_devices_mutex); 427 + brd_alloc(MINOR(dev) / max_part); 449 428 } 450 429 451 430 static void brd_del_one(struct brd_device *brd) 452 431 { 453 - list_del(&brd->brd_list); 454 432 del_gendisk(brd->brd_disk); 455 433 blk_cleanup_disk(brd->brd_disk); 456 434 brd_free_pages(brd); 435 + mutex_lock(&brd_devices_mutex); 436 + list_del(&brd->brd_list); 437 + mutex_unlock(&brd_devices_mutex); 457 438 kfree(brd); 458 439 } 459 440 ··· 496 491 497 492 brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL); 498 493 499 - mutex_lock(&brd_devices_mutex); 500 494 for (i = 0; i < rd_nr; i++) { 501 495 err = brd_alloc(i); 502 496 if (err) 503 497 goto out_free; 504 498 } 505 499 506 - mutex_unlock(&brd_devices_mutex); 507 - 508 500 pr_info("brd: module loaded\n"); 509 501 return 0; 510 502 511 503 out_free: 504 + unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); 512 505 debugfs_remove_recursive(brd_debugfs_dir); 513 506 514 507 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) 515 508 brd_del_one(brd); 516 - mutex_unlock(&brd_devices_mutex); 517 - unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); 518 509 519 510 pr_info("brd: module NOT loaded !!!\n"); 520 511 return err; ··· 520 519 { 521 520 struct brd_device *brd, *next; 522 521 522 + unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); 523 523 debugfs_remove_recursive(brd_debugfs_dir); 524 524 525 525 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) 526 526 brd_del_one(brd); 527 - 528 - unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); 529 527 530 528 pr_info("brd: module unloaded\n"); 531 529 }
+3 -1
drivers/block/rnbd/rnbd-clt-sysfs.c
··· 71 71 int opt_mask = 0; 72 72 int token; 73 73 int ret = -EINVAL; 74 - int i, dest_port, nr_poll_queues; 74 + int nr_poll_queues = 0; 75 + int dest_port = 0; 75 76 int p_cnt = 0; 77 + int i; 76 78 77 79 options = kstrdup(buf, GFP_KERNEL); 78 80 if (!options)
+12 -9
drivers/nvme/host/core.c
··· 3550 3550 return 0; 3551 3551 } 3552 3552 3553 + static void nvme_cdev_rel(struct device *dev) 3554 + { 3555 + ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(dev->devt)); 3556 + } 3557 + 3553 3558 void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device) 3554 3559 { 3555 3560 cdev_device_del(cdev, cdev_device); 3556 - ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(cdev_device->devt)); 3561 + put_device(cdev_device); 3557 3562 } 3558 3563 3559 3564 int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, ··· 3571 3566 return minor; 3572 3567 cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor); 3573 3568 cdev_device->class = nvme_ns_chr_class; 3569 + cdev_device->release = nvme_cdev_rel; 3574 3570 device_initialize(cdev_device); 3575 3571 cdev_init(cdev, fops); 3576 3572 cdev->owner = owner; 3577 3573 ret = cdev_device_add(cdev, cdev_device); 3578 - if (ret) { 3574 + if (ret) 3579 3575 put_device(cdev_device); 3580 - ida_simple_remove(&nvme_ns_chr_minor_ida, minor); 3581 - } 3576 + 3582 3577 return ret; 3583 3578 } 3584 3579 ··· 3610 3605 ns->ctrl->instance, ns->head->instance); 3611 3606 if (ret) 3612 3607 return ret; 3613 - ret = nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops, 3614 - ns->ctrl->ops->module); 3615 - if (ret) 3616 - kfree_const(ns->cdev_device.kobj.name); 3617 - return ret; 3608 + 3609 + return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops, 3610 + ns->ctrl->ops->module); 3618 3611 } 3619 3612 3620 3613 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
-2
drivers/nvme/host/multipath.c
··· 431 431 return ret; 432 432 ret = nvme_cdev_add(&head->cdev, &head->cdev_device, 433 433 &nvme_ns_head_chr_fops, THIS_MODULE); 434 - if (ret) 435 - kfree_const(head->cdev_device.kobj.name); 436 434 return ret; 437 435 } 438 436
+1 -1
drivers/nvme/host/pci.c
··· 1330 1330 iod->aborted = 1; 1331 1331 1332 1332 cmd.abort.opcode = nvme_admin_abort_cmd; 1333 - cmd.abort.cid = req->tag; 1333 + cmd.abort.cid = nvme_cid(req); 1334 1334 cmd.abort.sqid = cpu_to_le16(nvmeq->qid); 1335 1335 1336 1336 dev_warn(nvmeq->dev->ctrl.device,
+1
include/linux/genhd.h
··· 149 149 unsigned long state; 150 150 #define GD_NEED_PART_SCAN 0 151 151 #define GD_READ_ONLY 1 152 + #define GD_DEAD 2 152 153 153 154 struct mutex open_mutex; /* open/close mutex */ 154 155 unsigned open_partitions; /* number of open partitions */
+9 -10
include/trace/events/kyber.h
··· 13 13 14 14 TRACE_EVENT(kyber_latency, 15 15 16 - TP_PROTO(struct request_queue *q, const char *domain, const char *type, 16 + TP_PROTO(dev_t dev, const char *domain, const char *type, 17 17 unsigned int percentile, unsigned int numerator, 18 18 unsigned int denominator, unsigned int samples), 19 19 20 - TP_ARGS(q, domain, type, percentile, numerator, denominator, samples), 20 + TP_ARGS(dev, domain, type, percentile, numerator, denominator, samples), 21 21 22 22 TP_STRUCT__entry( 23 23 __field( dev_t, dev ) ··· 30 30 ), 31 31 32 32 TP_fast_assign( 33 - __entry->dev = disk_devt(q->disk); 33 + __entry->dev = dev; 34 34 strlcpy(__entry->domain, domain, sizeof(__entry->domain)); 35 35 strlcpy(__entry->type, type, sizeof(__entry->type)); 36 36 __entry->percentile = percentile; ··· 47 47 48 48 TRACE_EVENT(kyber_adjust, 49 49 50 - TP_PROTO(struct request_queue *q, const char *domain, 51 - unsigned int depth), 50 + TP_PROTO(dev_t dev, const char *domain, unsigned int depth), 52 51 53 - TP_ARGS(q, domain, depth), 52 + TP_ARGS(dev, domain, depth), 54 53 55 54 TP_STRUCT__entry( 56 55 __field( dev_t, dev ) ··· 58 59 ), 59 60 60 61 TP_fast_assign( 61 - __entry->dev = disk_devt(q->disk); 62 + __entry->dev = dev; 62 63 strlcpy(__entry->domain, domain, sizeof(__entry->domain)); 63 64 __entry->depth = depth; 64 65 ), ··· 70 71 71 72 TRACE_EVENT(kyber_throttled, 72 73 73 - TP_PROTO(struct request_queue *q, const char *domain), 74 + TP_PROTO(dev_t dev, const char *domain), 74 75 75 - TP_ARGS(q, domain), 76 + TP_ARGS(dev, domain), 76 77 77 78 TP_STRUCT__entry( 78 79 __field( dev_t, dev ) ··· 80 81 ), 81 82 82 83 TP_fast_assign( 83 - __entry->dev = disk_devt(q->disk); 84 + __entry->dev = dev; 84 85 strlcpy(__entry->domain, domain, sizeof(__entry->domain)); 85 86 ), 86 87