Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

scsi: ufs: core: Switch to scsi_get_internal_cmd()

Instead of storing the tag of the reserved command in hba->reserved_slot,
use scsi_get_internal_cmd() and scsi_put_internal_cmd() to allocate the
tag for the reserved command dynamically. Add
ufshcd_queue_reserved_command() for submitting reserved commands. Add
support in ufshcd_abort() for device management commands. Use
blk_execute_rq() for submitting reserved commands. Remove the code and
data structures that became superfluous. This includes
ufshcd_wait_for_dev_cmd(), hba->reserved_slot and ufs_dev_cmd.complete.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Link: https://patch.msgid.link/20251031204029.2883185-29-bvanassche@acm.org
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

Bart Van Assche and committed by
Martin K. Petersen
08b12cda a11c015c

+116 -159
+7 -12
drivers/ufs/core/ufs-mcq.c
··· 479 479 mutex_init(&hwq->sq_mutex); 480 480 } 481 481 482 - /* The very first HW queue serves device commands */ 483 - hba->dev_cmd_queue = &hba->uhq[0]; 484 - 485 482 host->host_tagset = 1; 486 483 return 0; 487 484 } ··· 533 536 { 534 537 struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, task_tag); 535 538 struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd); 539 + struct request *rq = scsi_cmd_to_rq(cmd); 536 540 struct ufs_hw_queue *hwq; 537 541 void __iomem *reg, *opr_sqd_base; 538 542 u32 nexus, id, val; ··· 542 544 if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC) 543 545 return -ETIMEDOUT; 544 546 545 - if (task_tag != hba->reserved_slot) { 546 - if (!cmd) 547 - return -EINVAL; 548 - hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); 549 - if (!hwq) 550 - return 0; 551 - } else { 552 - hwq = hba->dev_cmd_queue; 553 - } 547 + if (!cmd) 548 + return -EINVAL; 549 + 550 + hwq = ufshcd_mcq_req_to_hwq(hba, rq); 551 + if (!hwq) 552 + return 0; 554 553 555 554 id = hwq->id; 556 555
+1 -24
drivers/ufs/core/ufshcd-priv.h
··· 369 369 static inline struct scsi_cmnd *ufshcd_tag_to_cmd(struct ufs_hba *hba, u32 tag) 370 370 { 371 371 struct blk_mq_tags *tags = hba->host->tag_set.shared_tags; 372 - struct request *rq; 373 - 374 - /* 375 - * Handle reserved tags differently because the UFS driver does not 376 - * call blk_mq_alloc_request() for allocating reserved requests. 377 - * Allocating reserved tags with blk_mq_alloc_request() would require 378 - * the following: 379 - * - Allocate an additional request queue from &hba->host->tag_set for 380 - * allocating reserved requests from. 381 - * - For that request queue, allocate a SCSI device. 382 - * - Calling blk_mq_alloc_request(hba->dev_mgmt_queue, REQ_OP_DRV_OUT, 383 - * BLK_MQ_REQ_RESERVED) for allocating a reserved request and 384 - * blk_mq_free_request() for freeing reserved requests. 385 - * - Set the .device pointer for these reserved requests. 386 - * - Submit reserved requests with blk_execute_rq(). 387 - * - Modify ufshcd_queuecommand() such that it handles reserved requests 388 - * in another way than SCSI requests. 389 - * - Modify ufshcd_compl_one_cqe() such that it calls scsi_done() for 390 - * device management commands. 391 - * - Modify all callback functions called by blk_mq_tagset_busy_iter() 392 - * calls in the UFS driver and skip device management commands. 393 - */ 394 - rq = tag < UFSHCD_NUM_RESERVED ? tags->static_rqs[tag] : 395 - blk_mq_tag_to_rq(tags, tag); 372 + struct request *rq = blk_mq_tag_to_rq(tags, tag); 396 373 397 374 if (WARN_ON_ONCE(!rq)) 398 375 return NULL;
+108 -117
drivers/ufs/core/ufshcd.c
··· 2350 2350 spin_unlock_irqrestore(hba->host->host_lock, flags); 2351 2351 } 2352 2352 2353 - /* 2354 - * Returns %true for SCSI commands and %false for device management commands. 2355 - * Must not be called for SCSI commands that have not yet been started. 2356 - */ 2353 + /* Returns %true for SCSI commands and %false for device management commands. */ 2357 2354 static bool ufshcd_is_scsi_cmd(struct scsi_cmnd *cmd) 2358 2355 { 2359 - return blk_mq_request_started(scsi_cmd_to_rq(cmd)); 2356 + return !blk_mq_is_reserved_rq(scsi_cmd_to_rq(cmd)); 2360 2357 } 2361 2358 2362 2359 /** ··· 2484 2487 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_SDB) + 1; 2485 2488 hba->nutmrs = 2486 2489 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; 2487 - hba->reserved_slot = 0; 2488 2490 2489 2491 hba->nortt = FIELD_GET(MASK_NUMBER_OUTSTANDING_RTT, hba->capabilities) + 1; 2490 2492 ··· 3112 3116 return err; 3113 3117 } 3114 3118 3119 + static int ufshcd_queue_reserved_command(struct Scsi_Host *host, 3120 + struct scsi_cmnd *cmd) 3121 + { 3122 + struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd); 3123 + struct request *rq = scsi_cmd_to_rq(cmd); 3124 + struct ufs_hba *hba = shost_priv(host); 3125 + struct ufs_hw_queue *hwq = 3126 + hba->mcq_enabled ? ufshcd_mcq_req_to_hwq(hba, rq) : NULL; 3127 + 3128 + ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); 3129 + ufshcd_send_command(hba, cmd, hwq); 3130 + return 0; 3131 + } 3132 + 3115 3133 static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd, 3116 3134 enum dev_cmd_type cmd_type, u8 lun, int tag) 3117 3135 { ··· 3255 3245 return err; 3256 3246 } 3257 3247 3258 - /* 3259 - * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; 3260 - * < 0 if another error occurred. 3261 - */ 3262 - static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, 3263 - struct ufshcd_lrb *lrbp, int max_timeout) 3264 - { 3265 - struct scsi_cmnd *cmd = (struct scsi_cmnd *)lrbp - 1; 3266 - const int tag = scsi_cmd_to_rq(cmd)->tag; 3267 - unsigned long time_left = msecs_to_jiffies(max_timeout); 3268 - unsigned long flags; 3269 - bool pending; 3270 - int err; 3271 - 3272 - retry: 3273 - time_left = wait_for_completion_timeout(&hba->dev_cmd.complete, 3274 - time_left); 3275 - 3276 - if (likely(time_left)) { 3277 - err = ufshcd_get_tr_ocs(lrbp, NULL); 3278 - } else { 3279 - err = -ETIMEDOUT; 3280 - dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", 3281 - __func__, tag); 3282 - 3283 - /* MCQ mode */ 3284 - if (hba->mcq_enabled) { 3285 - /* successfully cleared the command, retry if needed */ 3286 - if (ufshcd_clear_cmd(hba, tag) == 0) 3287 - err = -EAGAIN; 3288 - return err; 3289 - } 3290 - 3291 - /* SDB mode */ 3292 - if (ufshcd_clear_cmd(hba, tag) == 0) { 3293 - /* successfully cleared the command, retry if needed */ 3294 - err = -EAGAIN; 3295 - /* 3296 - * Since clearing the command succeeded we also need to 3297 - * clear the task tag bit from the outstanding_reqs 3298 - * variable. 3299 - */ 3300 - spin_lock_irqsave(&hba->outstanding_lock, flags); 3301 - pending = test_bit(tag, &hba->outstanding_reqs); 3302 - if (pending) 3303 - __clear_bit(tag, &hba->outstanding_reqs); 3304 - spin_unlock_irqrestore(&hba->outstanding_lock, flags); 3305 - 3306 - if (!pending) { 3307 - /* 3308 - * The completion handler ran while we tried to 3309 - * clear the command. 3310 - */ 3311 - time_left = 1; 3312 - goto retry; 3313 - } 3314 - } else { 3315 - dev_err(hba->dev, "%s: failed to clear tag %d\n", 3316 - __func__, tag); 3317 - 3318 - spin_lock_irqsave(&hba->outstanding_lock, flags); 3319 - pending = test_bit(tag, &hba->outstanding_reqs); 3320 - spin_unlock_irqrestore(&hba->outstanding_lock, flags); 3321 - 3322 - if (!pending) { 3323 - /* 3324 - * The completion handler ran while we tried to 3325 - * clear the command. 3326 - */ 3327 - time_left = 1; 3328 - goto retry; 3329 - } 3330 - } 3331 - } 3332 - 3333 - return err; 3334 - } 3335 - 3336 3248 static void ufshcd_dev_man_lock(struct ufs_hba *hba) 3337 3249 { 3338 3250 ufshcd_hold(hba); ··· 3269 3337 ufshcd_release(hba); 3270 3338 } 3271 3339 3340 + static struct scsi_cmnd *ufshcd_get_dev_mgmt_cmd(struct ufs_hba *hba) 3341 + { 3342 + /* 3343 + * The caller must hold this lock to guarantee that the NOWAIT 3344 + * allocation will succeed. 3345 + */ 3346 + lockdep_assert_held(&hba->dev_cmd.lock); 3347 + 3348 + return scsi_get_internal_cmd( 3349 + hba->host->pseudo_sdev, DMA_TO_DEVICE, 3350 + BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); 3351 + } 3352 + 3353 + static void ufshcd_put_dev_mgmt_cmd(struct scsi_cmnd *cmd) 3354 + { 3355 + scsi_put_internal_cmd(cmd); 3356 + } 3357 + 3272 3358 /* 3273 3359 * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; 3274 3360 * < 0 if another error occurred. ··· 3295 3345 const u32 tag, int timeout) 3296 3346 { 3297 3347 struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd); 3298 - int err; 3348 + struct request *rq = scsi_cmd_to_rq(cmd); 3349 + blk_status_t sts; 3299 3350 3300 - ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); 3301 - ufshcd_send_command(hba, cmd, hba->dev_cmd_queue); 3302 - err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); 3303 - 3304 - ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, 3305 - (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); 3306 - 3307 - return err; 3351 + rq->timeout = timeout; 3352 + sts = blk_execute_rq(rq, true); 3353 + if (sts != BLK_STS_OK) 3354 + return blk_status_to_errno(sts); 3355 + return lrbp->utr_descriptor_ptr->header.ocs; 3308 3356 } 3309 3357 3310 3358 /** ··· 3320 3372 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, 3321 3373 enum dev_cmd_type cmd_type, int timeout) 3322 3374 { 3323 - const u32 tag = hba->reserved_slot; 3324 - struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, tag); 3375 + struct scsi_cmnd *cmd = ufshcd_get_dev_mgmt_cmd(hba); 3325 3376 struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd); 3377 + u32 tag; 3326 3378 int err; 3327 3379 3328 - /* Protects use of hba->reserved_slot. */ 3380 + /* Protects use of hba->dev_cmd. */ 3329 3381 lockdep_assert_held(&hba->dev_cmd.lock); 3382 + 3383 + if (WARN_ON_ONCE(!cmd)) 3384 + return -ENOMEM; 3385 + 3386 + tag = scsi_cmd_to_rq(cmd)->tag; 3330 3387 3331 3388 err = ufshcd_compose_dev_cmd(hba, cmd, cmd_type, tag); 3332 3389 if (unlikely(err)) 3333 - return err; 3390 + goto out; 3334 3391 3335 3392 err = ufshcd_issue_dev_cmd(hba, cmd, tag, timeout); 3336 - if (err) 3337 - return err; 3393 + if (err == 0) 3394 + err = ufshcd_dev_cmd_completion(hba, lrbp); 3338 3395 3339 - return ufshcd_dev_cmd_completion(hba, lrbp); 3396 + out: 3397 + ufshcd_put_dev_mgmt_cmd(cmd); 3398 + 3399 + return err; 3340 3400 } 3341 3401 3342 3402 /** ··· 5614 5658 struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd); 5615 5659 enum utp_ocs ocs; 5616 5660 5661 + if (WARN_ONCE(!cmd, "cqe->command_desc_base_addr = %#llx\n", 5662 + le64_to_cpu(cqe->command_desc_base_addr))) 5663 + return; 5664 + 5617 5665 if (hba->monitor.enabled) { 5618 5666 lrbp->compl_time_stamp = ktime_get(); 5619 5667 lrbp->compl_time_stamp_local_clock = local_clock(); ··· 5628 5668 ufshcd_add_command_trace(hba, cmd, UFS_CMD_COMP); 5629 5669 cmd->result = ufshcd_transfer_rsp_status(hba, cmd, cqe); 5630 5670 ufshcd_release_scsi_cmd(hba, cmd); 5631 - /* Do not touch lrbp after scsi done */ 5632 - scsi_done(cmd); 5633 5671 } else { 5634 5672 if (cqe) { 5635 5673 ocs = cqe->overall_status & MASK_OCS; 5636 5674 lrbp->utr_descriptor_ptr->header.ocs = ocs; 5675 + } else { 5676 + ocs = lrbp->utr_descriptor_ptr->header.ocs; 5637 5677 } 5638 - complete(&hba->dev_cmd.complete); 5678 + ufshcd_add_query_upiu_trace( 5679 + hba, 5680 + ocs == OCS_SUCCESS ? UFS_QUERY_COMP : UFS_QUERY_ERR, 5681 + (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); 5682 + cmd->result = 0; 5639 5683 } 5684 + /* Do not touch lrbp after scsi_done() has been called. */ 5685 + scsi_done(cmd); 5640 5686 } 5641 5687 5642 5688 /** ··· 7352 7386 enum dev_cmd_type cmd_type, 7353 7387 enum query_opcode desc_op) 7354 7388 { 7355 - const u32 tag = hba->reserved_slot; 7356 - struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, tag); 7389 + struct scsi_cmnd *cmd = ufshcd_get_dev_mgmt_cmd(hba); 7357 7390 struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd); 7391 + u32 tag; 7358 7392 int err = 0; 7359 7393 u8 upiu_flags; 7360 7394 7361 - /* Protects use of hba->reserved_slot. */ 7395 + /* Protects use of hba->dev_cmd. */ 7362 7396 lockdep_assert_held(&hba->dev_cmd.lock); 7397 + 7398 + if (WARN_ON_ONCE(!cmd)) 7399 + return -ENOMEM; 7400 + 7401 + tag = scsi_cmd_to_rq(cmd)->tag; 7363 7402 7364 7403 ufshcd_setup_dev_cmd(hba, cmd, cmd_type, 0, tag); 7365 7404 ··· 7388 7417 7389 7418 err = ufshcd_issue_dev_cmd(hba, cmd, tag, dev_cmd_timeout); 7390 7419 if (err) 7391 - return err; 7420 + goto put_dev_mgmt_cmd; 7392 7421 7393 7422 /* just copy the upiu response as it is */ 7394 7423 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu)); ··· 7408 7437 err = -EINVAL; 7409 7438 } 7410 7439 } 7440 + 7441 + put_dev_mgmt_cmd: 7442 + ufshcd_put_dev_mgmt_cmd(cmd); 7411 7443 7412 7444 return err; 7413 7445 } ··· 7505 7531 struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list, 7506 7532 enum dma_data_direction dir) 7507 7533 { 7508 - const u32 tag = hba->reserved_slot; 7509 - struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, tag); 7510 - struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd); 7534 + struct scsi_cmnd *cmd; 7535 + struct ufshcd_lrb *lrbp; 7536 + u32 tag; 7511 7537 int err = 0; 7512 7538 int result; 7513 7539 u8 upiu_flags; ··· 7515 7541 u16 ehs_len; 7516 7542 int ehs = (hba->capabilities & MASK_EHSLUTRD_SUPPORTED) ? 2 : 0; 7517 7543 7518 - /* Protects use of hba->reserved_slot. */ 7519 7544 ufshcd_dev_man_lock(hba); 7545 + 7546 + cmd = ufshcd_get_dev_mgmt_cmd(hba); 7547 + 7548 + if (WARN_ON_ONCE(!cmd)) { 7549 + err = -ENOMEM; 7550 + goto unlock; 7551 + } 7552 + 7553 + lrbp = scsi_cmd_priv(cmd); 7554 + tag = scsi_cmd_to_rq(cmd)->tag; 7520 7555 7521 7556 ufshcd_setup_dev_cmd(hba, cmd, DEV_CMD_TYPE_RPMB, UFS_UPIU_RPMB_WLUN, 7522 7557 tag); ··· 7547 7564 7548 7565 err = ufshcd_issue_dev_cmd(hba, cmd, tag, ADVANCED_RPMB_REQ_TIMEOUT); 7549 7566 if (err) 7550 - return err; 7567 + goto put_dev_mgmt_cmd; 7551 7568 7552 7569 err = ufshcd_dev_cmd_completion(hba, lrbp); 7553 7570 if (!err) { ··· 7573 7590 } 7574 7591 } 7575 7592 7593 + put_dev_mgmt_cmd: 7594 + ufshcd_put_dev_mgmt_cmd(cmd); 7595 + 7596 + unlock: 7576 7597 ufshcd_dev_man_unlock(hba); 7577 7598 7578 7599 return err ? : result; ··· 7747 7760 { 7748 7761 struct Scsi_Host *host = cmd->device->host; 7749 7762 struct ufs_hba *hba = shost_priv(host); 7750 - int tag = scsi_cmd_to_rq(cmd)->tag; 7763 + struct request *rq = scsi_cmd_to_rq(cmd); 7764 + int tag = rq->tag; 7751 7765 struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd); 7752 7766 unsigned long flags; 7753 7767 int err = FAILED; ··· 7778 7790 * to reduce repeated printouts. For other aborted requests only print 7779 7791 * basic details. 7780 7792 */ 7781 - scsi_print_command(cmd); 7793 + if (ufshcd_is_scsi_cmd(cmd)) 7794 + scsi_print_command(cmd); 7782 7795 if (!hba->req_abort_count) { 7783 7796 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag); 7784 7797 ufshcd_print_evt_hist(hba); ··· 7831 7842 goto release; 7832 7843 } 7833 7844 7834 - err = ufshcd_try_to_abort_task(hba, tag); 7845 + if (blk_mq_is_reserved_rq(rq)) 7846 + err = ufshcd_clear_cmd(hba, tag); 7847 + else 7848 + err = ufshcd_try_to_abort_task(hba, tag); 7835 7849 if (err) { 7836 7850 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); 7837 7851 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); ··· 9204 9212 .cmd_size = sizeof(struct ufshcd_lrb), 9205 9213 .init_cmd_priv = ufshcd_init_cmd_priv, 9206 9214 .queuecommand = ufshcd_queuecommand, 9215 + .queue_reserved_command = ufshcd_queue_reserved_command, 9207 9216 .nr_reserved_cmds = UFSHCD_NUM_RESERVED, 9208 9217 .mq_poll = ufshcd_poll, 9209 9218 .sdev_init = ufshcd_sdev_init, ··· 10756 10763 * drivers can override this setting as needed. 10757 10764 */ 10758 10765 hba->vcc_off_delay_us = 2000; 10759 - 10760 - init_completion(&hba->dev_cmd.complete); 10761 10766 10762 10767 err = ufshcd_hba_init(hba); 10763 10768 if (err)
-6
include/ufs/ufshcd.h
··· 236 236 * struct ufs_dev_cmd - all assosiated fields with device management commands 237 237 * @type: device management command type - Query, NOP OUT 238 238 * @lock: lock to allow one command at a time 239 - * @complete: internal commands completion 240 239 * @query: Device management query information 241 240 */ 242 241 struct ufs_dev_cmd { 243 242 enum dev_cmd_type type; 244 243 struct mutex lock; 245 - struct completion complete; 246 244 struct ufs_query query; 247 245 }; 248 246 ··· 836 838 * @nutrs: Transfer Request Queue depth supported by controller 837 839 * @nortt - Max outstanding RTTs supported by controller 838 840 * @nutmrs: Task Management Queue depth supported by controller 839 - * @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock. 840 841 * @ufs_version: UFS Version to which controller complies 841 842 * @vops: pointer to variant specific operations 842 843 * @vps: pointer to variant specific parameters ··· 926 929 * @res: array of resource info of MCQ registers 927 930 * @mcq_base: Multi circular queue registers base address 928 931 * @uhq: array of supported hardware queues 929 - * @dev_cmd_queue: Queue for issuing device management commands 930 932 * @mcq_opr: MCQ operation and runtime registers 931 933 * @ufs_rtc_update_work: A work for UFS RTC periodic update 932 934 * @pm_qos_req: PM QoS request handle ··· 977 981 int nortt; 978 982 u32 mcq_capabilities; 979 983 int nutmrs; 980 - u32 reserved_slot; 981 984 u32 ufs_version; 982 985 const struct ufs_hba_variant_ops *vops; 983 986 struct ufs_hba_variant_params *vps; ··· 1094 1099 bool mcq_esi_enabled; 1095 1100 void __iomem *mcq_base; 1096 1101 struct ufs_hw_queue *uhq; 1097 - struct ufs_hw_queue *dev_cmd_queue; 1098 1102 struct ufshcd_mcq_opr_info_t mcq_opr[OPR_MAX]; 1099 1103 1100 1104 struct delayed_work ufs_rtc_update_work;