Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
"This is two driver fixes (megaraid_sas and hisi_sas).

The megaraid one is a revert of a previous revert of a cpu hotplug fix
which exposed a bug in the block layer which has been fixed in this
merge window.

The hisi_sas performance enhancement comes from switching to interrupt
managed completion queues, which depended on the addition of
devm_platform_get_irqs_affinity() which is now upstream via the irq
tree in the last merge window"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
scsi: hisi_sas: Expose HW queues for v2 hw
Revert "Revert "scsi: megaraid_sas: Added support for shared host tagset for cpuhotplug""

+123 -26
+4
drivers/scsi/hisi_sas/hisi_sas.h
··· 14 14 #include <linux/debugfs.h> 15 15 #include <linux/dmapool.h> 16 16 #include <linux/iopoll.h> 17 + #include <linux/irq.h> 17 18 #include <linux/lcm.h> 18 19 #include <linux/libata.h> 19 20 #include <linux/mfd/syscon.h> ··· 295 294 296 295 struct hisi_sas_hw { 297 296 int (*hw_init)(struct hisi_hba *hisi_hba); 297 + int (*interrupt_preinit)(struct hisi_hba *hisi_hba); 298 298 void (*setup_itct)(struct hisi_hba *hisi_hba, 299 299 struct hisi_sas_device *device); 300 300 int (*slot_index_alloc)(struct hisi_hba *hisi_hba, ··· 394 392 u32 ctrl_clock_ena_reg; 395 393 u32 refclk_frequency_mhz; 396 394 u8 sas_addr[SAS_ADDR_SIZE]; 395 + 396 + int *irq_map; /* v2 hw */ 397 397 398 398 int n_phy; 399 399 spinlock_t lock;
+11
drivers/scsi/hisi_sas/hisi_sas_main.c
··· 2614 2614 return NULL; 2615 2615 } 2616 2616 2617 + static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba) 2618 + { 2619 + if (hisi_hba->hw->interrupt_preinit) 2620 + return hisi_hba->hw->interrupt_preinit(hisi_hba); 2621 + return 0; 2622 + } 2623 + 2617 2624 int hisi_sas_probe(struct platform_device *pdev, 2618 2625 const struct hisi_sas_hw *hw) 2619 2626 { ··· 2677 2670 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 2678 2671 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2679 2672 } 2673 + 2674 + rc = hisi_sas_interrupt_preinit(hisi_hba); 2675 + if (rc) 2676 + goto err_out_ha; 2680 2677 2681 2678 rc = scsi_add_host(shost, &pdev->dev); 2682 2679 if (rc)
+53 -13
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
··· 3302 3302 fatal_axi_int_v2_hw 3303 3303 }; 3304 3304 3305 + #define CQ0_IRQ_INDEX (96) 3306 + 3307 + static int hisi_sas_v2_interrupt_preinit(struct hisi_hba *hisi_hba) 3308 + { 3309 + struct platform_device *pdev = hisi_hba->platform_dev; 3310 + struct Scsi_Host *shost = hisi_hba->shost; 3311 + struct irq_affinity desc = { 3312 + .pre_vectors = CQ0_IRQ_INDEX, 3313 + .post_vectors = 16, 3314 + }; 3315 + int resv = desc.pre_vectors + desc.post_vectors, minvec = resv + 1, nvec; 3316 + 3317 + nvec = devm_platform_get_irqs_affinity(pdev, &desc, minvec, 128, 3318 + &hisi_hba->irq_map); 3319 + if (nvec < 0) 3320 + return nvec; 3321 + 3322 + shost->nr_hw_queues = hisi_hba->cq_nvecs = nvec - resv; 3323 + 3324 + return 0; 3325 + } 3326 + 3305 3327 /* 3306 3328 * There is a limitation in the hip06 chipset that we need 3307 3329 * to map in all mbigen interrupts, even if they are not used. ··· 3332 3310 { 3333 3311 struct platform_device *pdev = hisi_hba->platform_dev; 3334 3312 struct device *dev = &pdev->dev; 3335 - int irq, rc = 0, irq_map[128]; 3313 + int irq, rc = 0; 3336 3314 int i, phy_no, fatal_no, queue_no; 3337 3315 3338 - for (i = 0; i < 128; i++) 3339 - irq_map[i] = platform_get_irq(pdev, i); 3340 - 3341 3316 for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) { 3342 - irq = irq_map[i + 1]; /* Phy up/down is irq1 */ 3317 + irq = hisi_hba->irq_map[i + 1]; /* Phy up/down is irq1 */ 3343 3318 rc = devm_request_irq(dev, irq, phy_interrupts[i], 0, 3344 3319 DRV_NAME " phy", hisi_hba); 3345 3320 if (rc) { ··· 3350 3331 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 3351 3332 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 3352 3333 3353 - irq = irq_map[phy_no + 72]; 3334 + irq = hisi_hba->irq_map[phy_no + 72]; 3354 3335 rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0, 3355 3336 DRV_NAME " sata", phy); 3356 3337 if (rc) { ··· 3362 3343 } 3363 3344 3364 3345 for (fatal_no = 0; fatal_no < HISI_SAS_FATAL_INT_NR; fatal_no++) { 3365 - irq = irq_map[fatal_no + 81]; 3346 + irq = hisi_hba->irq_map[fatal_no + 81]; 3366 3347 rc = devm_request_irq(dev, irq, fatal_interrupts[fatal_no], 0, 3367 3348 DRV_NAME " fatal", hisi_hba); 3368 3349 if (rc) { ··· 3373 3354 } 3374 3355 } 3375 3356 3376 - for (queue_no = 0; queue_no < hisi_hba->queue_count; queue_no++) { 3357 + for (queue_no = 0; queue_no < hisi_hba->cq_nvecs; queue_no++) { 3377 3358 struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no]; 3378 3359 3379 - cq->irq_no = irq_map[queue_no + 96]; 3360 + cq->irq_no = hisi_hba->irq_map[queue_no + 96]; 3380 3361 rc = devm_request_threaded_irq(dev, cq->irq_no, 3381 3362 cq_interrupt_v2_hw, 3382 3363 cq_thread_v2_hw, IRQF_ONESHOT, 3383 3364 DRV_NAME " cq", cq); 3384 3365 if (rc) { 3385 3366 dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n", 3386 - irq, rc); 3367 + cq->irq_no, rc); 3387 3368 rc = -ENOENT; 3388 3369 goto err_out; 3389 3370 } 3371 + cq->irq_mask = irq_get_affinity_mask(cq->irq_no); 3390 3372 } 3391 - 3392 - hisi_hba->cq_nvecs = hisi_hba->queue_count; 3393 - 3394 3373 err_out: 3395 3374 return rc; 3396 3375 } ··· 3546 3529 NULL 3547 3530 }; 3548 3531 3532 + static int map_queues_v2_hw(struct Scsi_Host *shost) 3533 + { 3534 + struct hisi_hba *hisi_hba = shost_priv(shost); 3535 + struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 3536 + const struct cpumask *mask; 3537 + unsigned int queue, cpu; 3538 + 3539 + for (queue = 0; queue < qmap->nr_queues; queue++) { 3540 + mask = irq_get_affinity_mask(hisi_hba->irq_map[96 + queue]); 3541 + if (!mask) 3542 + continue; 3543 + 3544 + for_each_cpu(cpu, mask) 3545 + qmap->mq_map[cpu] = qmap->queue_offset + queue; 3546 + } 3547 + 3548 + return 0; 3549 + 3550 + } 3551 + 3549 3552 static struct scsi_host_template sht_v2_hw = { 3550 3553 .name = DRV_NAME, 3551 3554 .proc_name = DRV_NAME, ··· 3590 3553 #endif 3591 3554 .shost_attrs = host_attrs_v2_hw, 3592 3555 .host_reset = hisi_sas_host_reset, 3556 + .map_queues = map_queues_v2_hw, 3557 + .host_tagset = 1, 3593 3558 }; 3594 3559 3595 3560 static const struct hisi_sas_hw hisi_sas_v2_hw = { 3596 3561 .hw_init = hisi_sas_v2_init, 3562 + .interrupt_preinit = hisi_sas_v2_interrupt_preinit, 3597 3563 .setup_itct = setup_itct_v2_hw, 3598 3564 .slot_index_alloc = slot_index_alloc_quirk_v2_hw, 3599 3565 .alloc_dev = alloc_dev_quirk_v2_hw,
+39
drivers/scsi/megaraid/megaraid_sas_base.c
··· 37 37 #include <linux/poll.h> 38 38 #include <linux/vmalloc.h> 39 39 #include <linux/irq_poll.h> 40 + #include <linux/blk-mq-pci.h> 40 41 41 42 #include <scsi/scsi.h> 42 43 #include <scsi/scsi_cmnd.h> ··· 113 112 unsigned int enable_sdev_max_qd; 114 113 module_param(enable_sdev_max_qd, int, 0444); 115 114 MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0"); 115 + 116 + int host_tagset_enable = 1; 117 + module_param(host_tagset_enable, int, 0444); 118 + MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)"); 116 119 117 120 MODULE_LICENSE("GPL"); 118 121 MODULE_VERSION(MEGASAS_VERSION); ··· 3124 3119 return 0; 3125 3120 } 3126 3121 3122 + static int megasas_map_queues(struct Scsi_Host *shost) 3123 + { 3124 + struct megasas_instance *instance; 3125 + 3126 + instance = (struct megasas_instance *)shost->hostdata; 3127 + 3128 + if (shost->nr_hw_queues == 1) 3129 + return 0; 3130 + 3131 + return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], 3132 + instance->pdev, instance->low_latency_index_start); 3133 + } 3134 + 3127 3135 static void megasas_aen_polling(struct work_struct *work); 3128 3136 3129 3137 /** ··· 3445 3427 .eh_timed_out = megasas_reset_timer, 3446 3428 .shost_attrs = megaraid_host_attrs, 3447 3429 .bios_param = megasas_bios_param, 3430 + .map_queues = megasas_map_queues, 3448 3431 .change_queue_depth = scsi_change_queue_depth, 3449 3432 .max_segment_size = 0xffffffff, 3450 3433 }; ··· 6827 6808 host->max_lun = MEGASAS_MAX_LUN; 6828 6809 host->max_cmd_len = 16; 6829 6810 6811 + /* Use shared host tagset only for fusion adaptors 6812 + * if there are managed interrupts (smp affinity enabled case). 6813 + * Single msix_vectors in kdump, so shared host tag is also disabled. 6814 + */ 6815 + 6816 + host->host_tagset = 0; 6817 + host->nr_hw_queues = 1; 6818 + 6819 + if ((instance->adapter_type != MFI_SERIES) && 6820 + (instance->msix_vectors > instance->low_latency_index_start) && 6821 + host_tagset_enable && 6822 + instance->smp_affinity_enable) { 6823 + host->host_tagset = 1; 6824 + host->nr_hw_queues = instance->msix_vectors - 6825 + instance->low_latency_index_start; 6826 + } 6827 + 6828 + dev_info(&instance->pdev->dev, 6829 + "Max firmware commands: %d shared with nr_hw_queues = %d\n", 6830 + instance->max_fw_cmds, host->nr_hw_queues); 6830 6831 /* 6831 6832 * Notify the mid-layer about the new controller 6832 6833 */
+16 -13
drivers/scsi/megaraid/megaraid_sas_fusion.c
··· 359 359 { 360 360 int sdev_busy; 361 361 362 - /* nr_hw_queue = 1 for MegaRAID */ 363 - struct blk_mq_hw_ctx *hctx = 364 - scmd->device->request_queue->queue_hw_ctx[0]; 365 - 366 - sdev_busy = atomic_read(&hctx->nr_active); 362 + /* TBD - if sml remove device_busy in future, driver 363 + * should track counter in internal structure. 364 + */ 365 + sdev_busy = atomic_read(&scmd->device->device_busy); 367 366 368 367 if (instance->perf_mode == MR_BALANCED_PERF_MODE && 369 - sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) 368 + sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) { 370 369 cmd->request_desc->SCSIIO.MSIxIndex = 371 370 mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) / 372 371 MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start); 373 - else if (instance->msix_load_balance) 372 + } else if (instance->msix_load_balance) { 374 373 cmd->request_desc->SCSIIO.MSIxIndex = 375 374 (mega_mod64(atomic64_add_return(1, &instance->total_io_count), 376 375 instance->msix_vectors)); 377 - else 376 + } else if (instance->host->nr_hw_queues > 1) { 377 + u32 tag = blk_mq_unique_tag(scmd->request); 378 + 379 + cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) + 380 + instance->low_latency_index_start; 381 + } else { 378 382 cmd->request_desc->SCSIIO.MSIxIndex = 379 383 instance->reply_map[raw_smp_processor_id()]; 384 + } 380 385 } 381 386 382 387 /** ··· 961 956 if (megasas_alloc_cmdlist_fusion(instance)) 962 957 goto fail_exit; 963 958 964 - dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n", 965 - instance->max_fw_cmds); 966 - 967 959 /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */ 968 960 io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 969 961 io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; ··· 1104 1102 MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing) 1105 1103 instance->perf_mode = MR_BALANCED_PERF_MODE; 1106 1104 1107 - dev_info(&instance->pdev->dev, "Performance mode :%s\n", 1108 - MEGASAS_PERF_MODE_2STR(instance->perf_mode)); 1105 + dev_info(&instance->pdev->dev, "Performance mode :%s (latency index = %d)\n", 1106 + MEGASAS_PERF_MODE_2STR(instance->perf_mode), 1107 + instance->low_latency_index_start); 1109 1108 1110 1109 instance->fw_sync_cache_support = (scratch_pad_1 & 1111 1110 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;