Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
"Usual driver updates (qla2xxx, mpi3mr, mpt3sas, ufs) plus assorted
cleanups and fixes.

The biggest core change is the massive code motion in the sd driver to
remove forward declarations and the most significant change is to
enumify the queuecommand return"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (78 commits)
scsi: csiostor: Fix dereference of null pointer rn
scsi: buslogic: Reduce stack usage
scsi: ufs: host: mediatek: Require CONFIG_PM
scsi: ufs: mediatek: Fix page faults in ufs_mtk_clk_scale() trace event
scsi: smartpqi: Fix memory leak in pqi_report_phys_luns()
scsi: mpi3mr: Make driver probing asynchronous
scsi: ufs: core: Flush exception handling work when RPM level is zero
scsi: efct: Use IRQF_ONESHOT and default primary handler
scsi: ufs: core: Use a host-wide tagset in SDB mode
scsi: qla2xxx: target: Add WQ_PERCPU to alloc_workqueue() users
scsi: qla2xxx: Add WQ_PERCPU to alloc_workqueue() users
scsi: qla4xxx: Add WQ_PERCPU to alloc_workqueue() users
scsi: mpi3mr: Driver version update to 8.17.0.3.50
scsi: mpi3mr: Fixed the W=1 compilation warning
scsi: mpi3mr: Record and report controller firmware faults
scsi: mpi3mr: Update MPI Headers to revision 39
scsi: mpi3mr: Use negotiated link rate from DevicePage0
scsi: mpi3mr: Avoid redundant diag-fault resets
scsi: mpi3mr: Rename log data save helper to reflect threaded/BH context
scsi: mpi3mr: Add module parameter to control threaded IRQ polling
...

+3616 -767
+56
Documentation/devicetree/bindings/ufs/qcom,sa8255p-ufshc.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/ufs/qcom,sa8255p-ufshc.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Qualcomm SA8255P UFS Host Controller 8 + 9 + maintainers: 10 + - Ram Kumar Dwivedi <ram.dwivedi@oss.qualcomm.com> 11 + 12 + properties: 13 + compatible: 14 + const: qcom,sa8255p-ufshc 15 + 16 + reg: 17 + maxItems: 1 18 + 19 + interrupts: 20 + maxItems: 1 21 + 22 + iommus: 23 + maxItems: 1 24 + 25 + dma-coherent: true 26 + 27 + power-domains: 28 + maxItems: 1 29 + 30 + required: 31 + - compatible 32 + - reg 33 + - interrupts 34 + - power-domains 35 + - iommus 36 + - dma-coherent 37 + 38 + allOf: 39 + - $ref: ufs-common.yaml 40 + 41 + unevaluatedProperties: false 42 + 43 + examples: 44 + - | 45 + #include <dt-bindings/interrupt-controller/arm-gic.h> 46 + 47 + ufshc@1d84000 { 48 + compatible = "qcom,sa8255p-ufshc"; 49 + reg = <0x01d84000 0x3000>; 50 + interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>; 51 + lanes-per-direction = <2>; 52 + 53 + iommus = <&apps_smmu 0x100 0x0>; 54 + power-domains = <&scmi3_pd 0>; 55 + dma-coherent; 56 + };
+2 -1
Documentation/scsi/scsi_mid_low_api.rst
··· 903 903 * 904 904 * Defined in: LLD 905 905 **/ 906 - int queuecommand(struct Scsi_Host *shost, struct scsi_cmnd * scp) 906 + enum scsi_qc_status queuecommand(struct Scsi_Host *shost, 907 + struct scsi_cmnd *scp) 907 908 908 909 909 910 /**
+1 -1
MAINTAINERS
··· 27063 27063 L: linux-arm-msm@vger.kernel.org 27064 27064 L: linux-scsi@vger.kernel.org 27065 27065 S: Maintained 27066 - F: Documentation/devicetree/bindings/ufs/qcom,ufs.yaml 27066 + F: Documentation/devicetree/bindings/ufs/qcom* 27067 27067 F: drivers/ufs/host/ufs-qcom* 27068 27068 27069 27069 UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER RENESAS HOOKS
+5 -3
drivers/ata/libata-scsi.c
··· 4309 4309 return NULL; 4310 4310 } 4311 4311 4312 - int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev) 4312 + enum scsi_qc_status __ata_scsi_queuecmd(struct scsi_cmnd *scmd, 4313 + struct ata_device *dev) 4313 4314 { 4314 4315 struct ata_port *ap = dev->link->ap; 4315 4316 u8 scsi_op = scmd->cmnd[0]; ··· 4384 4383 * Return value from __ata_scsi_queuecmd() if @cmd can be queued, 4385 4384 * 0 otherwise. 4386 4385 */ 4387 - int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd) 4386 + enum scsi_qc_status ata_scsi_queuecmd(struct Scsi_Host *shost, 4387 + struct scsi_cmnd *cmd) 4388 4388 { 4389 4389 struct ata_port *ap; 4390 4390 struct ata_device *dev; 4391 4391 struct scsi_device *scsidev = cmd->device; 4392 - int rc = 0; 4392 + enum scsi_qc_status rc = 0; 4393 4393 unsigned long irq_flags; 4394 4394 4395 4395 ap = ata_shost_to_port(shost);
+2 -1
drivers/ata/libata.h
··· 164 164 void ata_scsi_sdev_config(struct scsi_device *sdev); 165 165 int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim, 166 166 struct ata_device *dev); 167 - int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev); 167 + enum scsi_qc_status __ata_scsi_queuecmd(struct scsi_cmnd *scmd, 168 + struct ata_device *dev); 168 169 169 170 /* libata-eh.c */ 170 171 extern unsigned int ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
+8
drivers/base/transport_class.c
··· 165 165 goto err_del; 166 166 } 167 167 168 + if (tcont->encryption) { 169 + error = sysfs_create_group(&classdev->kobj, tcont->encryption); 170 + if (error) 171 + goto err_del; 172 + } 173 + 168 174 return 0; 169 175 170 176 err_del: ··· 246 240 if (tclass->remove != anon_transport_dummy_function) { 247 241 if (tcont->statistics) 248 242 sysfs_remove_group(&classdev->kobj, tcont->statistics); 243 + if (tcont->encryption) 244 + sysfs_remove_group(&classdev->kobj, tcont->encryption); 249 245 attribute_container_class_device_del(classdev); 250 246 } 251 247
+4 -3
drivers/firewire/sbp2.c
··· 1440 1440 1441 1441 /* SCSI stack integration */ 1442 1442 1443 - static int sbp2_scsi_queuecommand(struct Scsi_Host *shost, 1444 - struct scsi_cmnd *cmd) 1443 + static enum scsi_qc_status sbp2_scsi_queuecommand(struct Scsi_Host *shost, 1444 + struct scsi_cmnd *cmd) 1445 1445 { 1446 1446 struct sbp2_logical_unit *lu = cmd->device->hostdata; 1447 1447 struct fw_device *device = target_parent_device(lu->tgt); 1448 + enum scsi_qc_status retval = SCSI_MLQUEUE_HOST_BUSY; 1448 1449 struct sbp2_command_orb *orb; 1449 - int generation, retval = SCSI_MLQUEUE_HOST_BUSY; 1450 + int generation; 1450 1451 1451 1452 orb = kzalloc(sizeof(*orb), GFP_ATOMIC); 1452 1453 if (orb == NULL)
+2 -1
drivers/infiniband/ulp/srp/ib_srp.c
··· 2149 2149 target->qp_in_error = true; 2150 2150 } 2151 2151 2152 - static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) 2152 + static enum scsi_qc_status srp_queuecommand(struct Scsi_Host *shost, 2153 + struct scsi_cmnd *scmnd) 2153 2154 { 2154 2155 struct request *rq = scsi_cmd_to_rq(scmnd); 2155 2156 struct srp_target_port *target = host_to_target(shost);
+4 -3
drivers/message/fusion/mptfc.c
··· 97 97 98 98 static int mptfc_target_alloc(struct scsi_target *starget); 99 99 static int mptfc_sdev_init(struct scsi_device *sdev); 100 - static int mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt); 100 + static enum scsi_qc_status mptfc_qcmd(struct Scsi_Host *shost, 101 + struct scsi_cmnd *SCpnt); 101 102 static void mptfc_target_destroy(struct scsi_target *starget); 102 103 static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout); 103 104 static void mptfc_remove(struct pci_dev *pdev); ··· 677 676 return 0; 678 677 } 679 678 680 - static int 681 - mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt) 679 + static enum scsi_qc_status mptfc_qcmd(struct Scsi_Host *shost, 680 + struct scsi_cmnd *SCpnt) 682 681 { 683 682 struct mptfc_rport_info *ri; 684 683 struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device));
+2 -2
drivers/message/fusion/mptsas.c
··· 1920 1920 return 0; 1921 1921 } 1922 1922 1923 - static int 1924 - mptsas_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt) 1923 + static enum scsi_qc_status mptsas_qcmd(struct Scsi_Host *shost, 1924 + struct scsi_cmnd *SCpnt) 1925 1925 { 1926 1926 MPT_SCSI_HOST *hd; 1927 1927 MPT_ADAPTER *ioc;
+1 -2
drivers/message/fusion/mptscsih.c
··· 1309 1309 * 1310 1310 * Returns 0. (rtn value discarded by linux scsi mid-layer) 1311 1311 */ 1312 - int 1313 - mptscsih_qcmd(struct scsi_cmnd *SCpnt) 1312 + enum scsi_qc_status mptscsih_qcmd(struct scsi_cmnd *SCpnt) 1314 1313 { 1315 1314 MPT_SCSI_HOST *hd; 1316 1315 MPT_FRAME_HDR *mf;
+1 -1
drivers/message/fusion/mptscsih.h
··· 113 113 #endif 114 114 extern int mptscsih_show_info(struct seq_file *, struct Scsi_Host *); 115 115 extern const char * mptscsih_info(struct Scsi_Host *SChost); 116 - extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt); 116 + extern enum scsi_qc_status mptscsih_qcmd(struct scsi_cmnd *SCpnt); 117 117 extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, 118 118 u8 id, u64 lun, int ctx2abort, ulong timeout); 119 119 extern void mptscsih_sdev_destroy(struct scsi_device *device);
+2 -2
drivers/message/fusion/mptspi.c
··· 774 774 return 0; 775 775 } 776 776 777 - static int 778 - mptspi_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt) 777 + static enum scsi_qc_status mptspi_qcmd(struct Scsi_Host *shost, 778 + struct scsi_cmnd *SCpnt) 779 779 { 780 780 struct _MPT_SCSI_HOST *hd = shost_priv(shost); 781 781 VirtDevice *vdevice = SCpnt->device->hostdata;
+2 -2
drivers/s390/scsi/zfcp_scsi.c
··· 63 63 scsi_done(scpnt); 64 64 } 65 65 66 - static 67 - int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt) 66 + static enum scsi_qc_status zfcp_scsi_queuecommand(struct Scsi_Host *shost, 67 + struct scsi_cmnd *scpnt) 68 68 { 69 69 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); 70 70 struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device));
+1 -1
drivers/scsi/3w-9xxx.c
··· 1746 1746 } /* End twa_scsi_eh_reset() */ 1747 1747 1748 1748 /* This is the main scsi queue function to handle scsi opcodes */ 1749 - static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt) 1749 + static enum scsi_qc_status twa_scsi_queue_lck(struct scsi_cmnd *SCpnt) 1750 1750 { 1751 1751 void (*done)(struct scsi_cmnd *) = scsi_done; 1752 1752 int request_id, retval;
+5 -3
drivers/scsi/3w-sas.c
··· 1453 1453 } /* End twl_scsi_eh_reset() */ 1454 1454 1455 1455 /* This is the main scsi queue function to handle scsi opcodes */ 1456 - static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt) 1456 + static enum scsi_qc_status twl_scsi_queue_lck(struct scsi_cmnd *SCpnt) 1457 1457 { 1458 + TW_Device_Extension *tw_dev = 1459 + (TW_Device_Extension *)SCpnt->device->host->hostdata; 1458 1460 void (*done)(struct scsi_cmnd *) = scsi_done; 1459 - int request_id, retval; 1460 - TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; 1461 + enum scsi_qc_status retval; 1462 + int request_id; 1461 1463 1462 1464 /* If we are resetting due to timed out ioctl, report as busy */ 1463 1465 if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
+1 -1
drivers/scsi/3w-xxxx.c
··· 1920 1920 } /* End tw_scsiop_test_unit_ready_complete() */ 1921 1921 1922 1922 /* This is the main scsi queue function to handle scsi opcodes */ 1923 - static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt) 1923 + static enum scsi_qc_status tw_scsi_queue_lck(struct scsi_cmnd *SCpnt) 1924 1924 { 1925 1925 void (*done)(struct scsi_cmnd *) = scsi_done; 1926 1926 unsigned char *command = SCpnt->cmnd;
+3 -3
drivers/scsi/53c700.c
··· 152 152 /* This is the script */ 153 153 #include "53c700_d.h" 154 154 155 - 156 - STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *); 155 + STATIC enum scsi_qc_status NCR_700_queuecommand(struct Scsi_Host *h, 156 + struct scsi_cmnd *); 157 157 STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt); 158 158 STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt); 159 159 STATIC void NCR_700_chip_setup(struct Scsi_Host *host); ··· 1751 1751 return IRQ_RETVAL(handled); 1752 1752 } 1753 1753 1754 - static int NCR_700_queuecommand_lck(struct scsi_cmnd *SCp) 1754 + static enum scsi_qc_status NCR_700_queuecommand_lck(struct scsi_cmnd *SCp) 1755 1755 { 1756 1756 struct NCR_700_Host_Parameters *hostdata = 1757 1757 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
+5 -3
drivers/scsi/BusLogic.c
··· 920 920 a particular probe order. 921 921 */ 922 922 923 - static void __init blogic_init_probeinfo_list(struct blogic_adapter *adapter) 923 + static noinline_for_stack void __init 924 + blogic_init_probeinfo_list(struct blogic_adapter *adapter) 924 925 { 925 926 /* 926 927 If a PCI BIOS is present, interrogate it for MultiMaster and ··· 1691 1690 blogic_reportconfig reports the configuration of Host Adapter. 1692 1691 */ 1693 1692 1694 - static bool __init blogic_reportconfig(struct blogic_adapter *adapter) 1693 + static noinline_for_stack bool __init 1694 + blogic_reportconfig(struct blogic_adapter *adapter) 1695 1695 { 1696 1696 unsigned short alltgt_mask = (1 << adapter->maxdev) - 1; 1697 1697 unsigned short sync_ok, fast_ok; ··· 2879 2877 Outgoing Mailbox for execution by the associated Host Adapter. 2880 2878 */ 2881 2879 2882 - static int blogic_qcmd_lck(struct scsi_cmnd *command) 2880 + static enum scsi_qc_status blogic_qcmd_lck(struct scsi_cmnd *command) 2883 2881 { 2884 2882 void (*comp_cb)(struct scsi_cmnd *) = scsi_done; 2885 2883 struct blogic_adapter *adapter =
+2 -1
drivers/scsi/BusLogic.h
··· 1272 1272 */ 1273 1273 1274 1274 static const char *blogic_drvr_info(struct Scsi_Host *); 1275 - static int blogic_qcmd(struct Scsi_Host *h, struct scsi_cmnd *); 1275 + static enum scsi_qc_status blogic_qcmd(struct Scsi_Host *h, 1276 + struct scsi_cmnd *command); 1276 1277 static int blogic_diskparam(struct scsi_device *, struct gendisk *, sector_t, int *); 1277 1278 static int blogic_sdev_configure(struct scsi_device *, 1278 1279 struct queue_limits *lim);
+2 -2
drivers/scsi/NCR5380.c
··· 555 555 * main coroutine is not running, it is restarted. 556 556 */ 557 557 558 - static int NCR5380_queue_command(struct Scsi_Host *instance, 559 - struct scsi_cmnd *cmd) 558 + static enum scsi_qc_status NCR5380_queue_command(struct Scsi_Host *instance, 559 + struct scsi_cmnd *cmd) 560 560 { 561 561 struct NCR5380_hostdata *hostdata = shost_priv(instance); 562 562 struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
+1 -1
drivers/scsi/a100u2w.c
··· 909 909 * block, build the host specific scb structures and if there is room 910 910 * queue the command down to the controller 911 911 */ 912 - static int inia100_queue_lck(struct scsi_cmnd *cmd) 912 + static enum scsi_qc_status inia100_queue_lck(struct scsi_cmnd *cmd) 913 913 { 914 914 struct orc_scb *scb; 915 915 struct orc_host *host; /* Point to Host adapter control block */
+2 -2
drivers/scsi/aacraid/linit.c
··· 237 237 * TODO: unify with aac_scsi_cmd(). 238 238 */ 239 239 240 - static int aac_queuecommand(struct Scsi_Host *shost, 241 - struct scsi_cmnd *cmd) 240 + static enum scsi_qc_status aac_queuecommand(struct Scsi_Host *shost, 241 + struct scsi_cmnd *cmd) 242 242 { 243 243 aac_priv(cmd)->owner = AAC_OWNER_LOWLEVEL; 244 244
+3 -2
drivers/scsi/advansys.c
··· 8462 8462 * This function always returns 0. Command return status is saved 8463 8463 * in the 'scp' result field. 8464 8464 */ 8465 - static int advansys_queuecommand_lck(struct scsi_cmnd *scp) 8465 + static enum scsi_qc_status advansys_queuecommand_lck(struct scsi_cmnd *scp) 8466 8466 { 8467 8467 struct Scsi_Host *shost = scp->device->host; 8468 - int asc_res, result = 0; 8468 + enum scsi_qc_status result = 0; 8469 + int asc_res; 8469 8470 8470 8471 ASC_STATS(shost, queuecommand); 8471 8472
+4 -4
drivers/scsi/aha152x.c
··· 924 924 /* 925 925 * Queue a command and setup interrupts for a free bus. 926 926 */ 927 - static int aha152x_internal_queue(struct scsi_cmnd *SCpnt, 927 + static enum scsi_qc_status aha152x_internal_queue(struct scsi_cmnd *SCpnt, 928 928 struct completion *complete, int phase) 929 929 { 930 930 struct aha152x_cmd_priv *acp = aha152x_priv(SCpnt); ··· 939 939 if (acp->phase & (resetting | check_condition)) { 940 940 if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) { 941 941 scmd_printk(KERN_ERR, SCpnt, "cannot reuse command\n"); 942 - return FAILED; 942 + return SCSI_MLQUEUE_HOST_BUSY; 943 943 } 944 944 } else { 945 945 SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC); 946 946 if(!SCpnt->host_scribble) { 947 947 scmd_printk(KERN_ERR, SCpnt, "allocation failed\n"); 948 - return FAILED; 948 + return SCSI_MLQUEUE_HOST_BUSY; 949 949 } 950 950 } 951 951 ··· 995 995 * queue a command 996 996 * 997 997 */ 998 - static int aha152x_queue_lck(struct scsi_cmnd *SCpnt) 998 + static enum scsi_qc_status aha152x_queue_lck(struct scsi_cmnd *SCpnt) 999 999 { 1000 1000 return aha152x_internal_queue(SCpnt, NULL, 0); 1001 1001 }
+2 -1
drivers/scsi/aha1542.c
··· 411 411 } 412 412 } 413 413 414 - static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) 414 + static enum scsi_qc_status aha1542_queuecommand(struct Scsi_Host *sh, 415 + struct scsi_cmnd *cmd) 415 416 { 416 417 struct aha1542_cmd *acmd = scsi_cmd_priv(cmd); 417 418 struct aha1542_hostdata *aha1542 = shost_priv(sh);
+1 -1
drivers/scsi/aha1740.c
··· 319 319 return IRQ_RETVAL(handled); 320 320 } 321 321 322 - static int aha1740_queuecommand_lck(struct scsi_cmnd *SCpnt) 322 + static enum scsi_qc_status aha1740_queuecommand_lck(struct scsi_cmnd *SCpnt) 323 323 { 324 324 void (*done)(struct scsi_cmnd *) = scsi_done; 325 325 unchar direction;
+6 -6
drivers/scsi/aic7xxx/aic79xx_osm.c
··· 359 359 static u_int ahd_linux_user_tagdepth(struct ahd_softc *ahd, 360 360 struct ahd_devinfo *devinfo); 361 361 static void ahd_linux_device_queue_depth(struct scsi_device *); 362 - static int ahd_linux_run_command(struct ahd_softc*, 362 + static enum scsi_qc_status ahd_linux_run_command(struct ahd_softc*, 363 363 struct ahd_linux_device *, 364 364 struct scsi_cmnd *); 365 365 static void ahd_linux_setup_tag_info_global(char *p); ··· 577 577 /* 578 578 * Queue an SCB to the controller. 579 579 */ 580 - static int ahd_linux_queue_lck(struct scsi_cmnd *cmd) 580 + static enum scsi_qc_status ahd_linux_queue_lck(struct scsi_cmnd *cmd) 581 581 { 582 - struct ahd_softc *ahd; 583 - struct ahd_linux_device *dev = scsi_transport_device_data(cmd->device); 584 - int rtn = SCSI_MLQUEUE_HOST_BUSY; 582 + struct ahd_linux_device *dev = scsi_transport_device_data(cmd->device); 583 + enum scsi_qc_status rtn = SCSI_MLQUEUE_HOST_BUSY; 584 + struct ahd_softc *ahd; 585 585 586 586 ahd = *(struct ahd_softc **)cmd->device->host->hostdata; 587 587 ··· 1535 1535 } 1536 1536 } 1537 1537 1538 - static int 1538 + static enum scsi_qc_status 1539 1539 ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev, 1540 1540 struct scsi_cmnd *cmd) 1541 1541 {
+2 -2
drivers/scsi/aic7xxx/aic7xxx_osm.c
··· 519 519 /* 520 520 * Queue an SCB to the controller. 521 521 */ 522 - static int ahc_linux_queue_lck(struct scsi_cmnd *cmd) 522 + static enum scsi_qc_status ahc_linux_queue_lck(struct scsi_cmnd *cmd) 523 523 { 524 524 struct ahc_softc *ahc; 525 525 struct ahc_linux_device *dev = scsi_transport_device_data(cmd->device); 526 - int rtn = SCSI_MLQUEUE_HOST_BUSY; 526 + enum scsi_qc_status rtn = SCSI_MLQUEUE_HOST_BUSY; 527 527 unsigned long flags; 528 528 529 529 ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
+3 -2
drivers/scsi/arcmsr/arcmsr_hba.c
··· 113 113 static int arcmsr_bus_reset(struct scsi_cmnd *); 114 114 static int arcmsr_bios_param(struct scsi_device *sdev, 115 115 struct gendisk *disk, sector_t capacity, int *info); 116 - static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 116 + static enum scsi_qc_status arcmsr_queue_command(struct Scsi_Host *h, 117 + struct scsi_cmnd *cmd); 117 118 static int arcmsr_probe(struct pci_dev *pdev, 118 119 const struct pci_device_id *id); 119 120 static int __maybe_unused arcmsr_suspend(struct device *dev); ··· 3313 3312 } 3314 3313 } 3315 3314 3316 - static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd) 3315 + static enum scsi_qc_status arcmsr_queue_command_lck(struct scsi_cmnd *cmd) 3317 3316 { 3318 3317 struct Scsi_Host *host = cmd->device->host; 3319 3318 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
+1 -1
drivers/scsi/arm/acornscsi.c
··· 2408 2408 * Params : cmd - SCSI command 2409 2409 * Returns : 0, or < 0 on error. 2410 2410 */ 2411 - static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt) 2411 + static enum scsi_qc_status acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt) 2412 2412 { 2413 2413 struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt); 2414 2414 void (*done)(struct scsi_cmnd *) = scsi_done;
+6 -5
drivers/scsi/arm/fas216.c
··· 2202 2202 * Returns: 0 on success, else error. 2203 2203 * Notes: io_request_lock is held, interrupts are disabled. 2204 2204 */ 2205 - static int fas216_queue_command_internal(struct scsi_cmnd *SCpnt, 2206 - void (*done)(struct scsi_cmnd *)) 2205 + static enum scsi_qc_status 2206 + fas216_queue_command_internal(struct scsi_cmnd *SCpnt, 2207 + void (*done)(struct scsi_cmnd *)) 2207 2208 { 2208 2209 FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; 2209 - int result; 2210 + enum scsi_qc_status result; 2210 2211 2211 2212 fas216_checkmagic(info); 2212 2213 ··· 2244 2243 return result; 2245 2244 } 2246 2245 2247 - static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt) 2246 + static enum scsi_qc_status fas216_queue_command_lck(struct scsi_cmnd *SCpnt) 2248 2247 { 2249 2248 return fas216_queue_command_internal(SCpnt, scsi_done); 2250 2249 } ··· 2274 2273 * Returns: scsi result code. 2275 2274 * Notes: io_request_lock is held, interrupts are disabled. 2276 2275 */ 2277 - static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt) 2276 + static enum scsi_qc_status fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt) 2278 2277 { 2279 2278 FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; 2280 2279
+7 -4
drivers/scsi/arm/fas216.h
··· 338 338 */ 339 339 extern int fas216_add (struct Scsi_Host *instance, struct device *dev); 340 340 341 - /* Function: int fas216_queue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt) 341 + /* Function: enum scsi_qc_status fas216_queue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt) 342 342 * Purpose : queue a command for adapter to process. 343 343 * Params : h - host adapter 344 344 * : SCpnt - Command to queue 345 345 * Returns : 0 - success, else error 346 346 */ 347 - extern int fas216_queue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt); 347 + extern enum scsi_qc_status fas216_queue_command(struct Scsi_Host *h, 348 + struct scsi_cmnd *SCpnt); 348 349 349 - /* Function: int fas216_noqueue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt) 350 + /* Function: enum scsi_qc_status fas216_noqueue_command(struct Scsi_Host *h, 351 + * struct scsi_cmnd *SCpnt) 350 352 * Purpose : queue a command for adapter to process, and process it to completion. 351 353 * Params : h - host adapter 352 354 * : SCpnt - Command to queue 353 355 * Returns : 0 - success, else error 354 356 */ 355 - extern int fas216_noqueue_command(struct Scsi_Host *, struct scsi_cmnd *); 357 + extern enum scsi_qc_status fas216_noqueue_command(struct Scsi_Host *h, 358 + struct scsi_cmnd *SCpnt); 356 359 357 360 /* Function: irqreturn_t fas216_intr (FAS216_Info *info) 358 361 * Purpose : handle interrupts from the interface to progress a command
+1 -1
drivers/scsi/atp870u.c
··· 617 617 * 618 618 * Queue a command to the ATP queue. Called with the host lock held. 619 619 */ 620 - static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p) 620 + static enum scsi_qc_status atp870u_queuecommand_lck(struct scsi_cmnd *req_p) 621 621 { 622 622 void (*done)(struct scsi_cmnd *) = scsi_done; 623 623 unsigned char c;
+3 -2
drivers/scsi/bfa/bfad_im.c
··· 24 24 struct scsi_transport_template *bfad_im_scsi_transport_template; 25 25 struct scsi_transport_template *bfad_im_scsi_vport_transport_template; 26 26 static void bfad_im_itnim_work_handler(struct work_struct *work); 27 - static int bfad_im_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmnd); 27 + static enum scsi_qc_status bfad_im_queuecommand(struct Scsi_Host *h, 28 + struct scsi_cmnd *cmnd); 28 29 static int bfad_im_sdev_init(struct scsi_device *sdev); 29 30 static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, 30 31 struct bfad_itnim_s *itnim); ··· 1200 1199 /* 1201 1200 * Scsi_Host template entry, queue a SCSI command to the BFAD. 1202 1201 */ 1203 - static int bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd) 1202 + static enum scsi_qc_status bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd) 1204 1203 { 1205 1204 void (*done)(struct scsi_cmnd *) = scsi_done; 1206 1205 struct bfad_im_port_s *im_port =
+2 -1
drivers/scsi/bnx2fc/bnx2fc.h
··· 498 498 struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt); 499 499 struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type); 500 500 void bnx2fc_cmd_release(struct kref *ref); 501 - int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd); 501 + enum scsi_qc_status bnx2fc_queuecommand(struct Scsi_Host *host, 502 + struct scsi_cmnd *sc_cmd); 502 503 int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba); 503 504 int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba); 504 505 int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
+2 -2
drivers/scsi/bnx2fc/bnx2fc_io.c
··· 1836 1836 * 1837 1837 * This is the IO strategy routine, called by SCSI-ML 1838 1838 **/ 1839 - int bnx2fc_queuecommand(struct Scsi_Host *host, 1840 - struct scsi_cmnd *sc_cmd) 1839 + enum scsi_qc_status bnx2fc_queuecommand(struct Scsi_Host *host, 1840 + struct scsi_cmnd *sc_cmd) 1841 1841 { 1842 1842 struct fc_lport *lport = shost_priv(host); 1843 1843 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+9 -9
drivers/scsi/ch.c
··· 894 894 895 895 /* ------------------------------------------------------------------------ */ 896 896 897 - static int ch_probe(struct device *dev) 897 + static int ch_probe(struct scsi_device *sd) 898 898 { 899 - struct scsi_device *sd = to_scsi_device(dev); 899 + struct device *dev = &sd->sdev_gendev; 900 900 struct device *class_dev; 901 901 int ret; 902 902 scsi_changer *ch; ··· 967 967 return ret; 968 968 } 969 969 970 - static int ch_remove(struct device *dev) 970 + static void ch_remove(struct scsi_device *sd) 971 971 { 972 + struct device *dev = &sd->sdev_gendev; 972 973 scsi_changer *ch = dev_get_drvdata(dev); 973 974 974 975 spin_lock(&ch_index_lock); ··· 980 979 device_destroy(&ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor)); 981 980 scsi_device_put(ch->device); 982 981 kref_put(&ch->ref, ch_destroy); 983 - return 0; 984 982 } 985 983 986 984 static struct scsi_driver ch_template = { 987 - .gendrv = { 985 + .probe = ch_probe, 986 + .remove = ch_remove, 987 + .gendrv = { 988 988 .name = "ch", 989 989 .owner = THIS_MODULE, 990 - .probe = ch_probe, 991 - .remove = ch_remove, 992 990 }, 993 991 }; 994 992 ··· 1014 1014 SCSI_CHANGER_MAJOR); 1015 1015 goto fail1; 1016 1016 } 1017 - rc = scsi_register_driver(&ch_template.gendrv); 1017 + rc = scsi_register_driver(&ch_template); 1018 1018 if (rc < 0) 1019 1019 goto fail2; 1020 1020 return 0; ··· 1028 1028 1029 1029 static void __exit exit_ch_module(void) 1030 1030 { 1031 - scsi_unregister_driver(&ch_template.gendrv); 1031 + scsi_unregister_driver(&ch_template); 1032 1032 unregister_chrdev(SCSI_CHANGER_MAJOR, "ch"); 1033 1033 class_unregister(&ch_sysfs_class); 1034 1034 idr_destroy(&ch_index_idr);
+4 -3
drivers/scsi/csiostor/csio_scsi.c
··· 1775 1775 * - Kicks off the SCSI state machine for this IO. 1776 1776 * - Returns busy status on error. 1777 1777 */ 1778 - static int 1779 - csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd) 1778 + static enum scsi_qc_status csio_queuecommand(struct Scsi_Host *host, 1779 + struct scsi_cmnd *cmnd) 1780 1780 { 1781 1781 struct csio_lnode *ln = shost_priv(host); 1782 1782 struct csio_hw *hw = csio_lnode_to_hw(ln); ··· 2074 2074 struct csio_scsi_level_data sld; 2075 2075 2076 2076 if (!rn) 2077 - goto fail; 2077 + goto fail_ret; 2078 2078 2079 2079 csio_dbg(hw, "Request to reset LUN:%llu (ssni:0x%x tgtid:%d)\n", 2080 2080 cmnd->device->lun, rn->flowid, rn->scsi_id); ··· 2220 2220 csio_put_scsi_ioreq_lock(hw, scsim, ioreq); 2221 2221 fail: 2222 2222 CSIO_INC_STATS(rn, n_lun_rst_fail); 2223 + fail_ret: 2223 2224 return FAILED; 2224 2225 } 2225 2226
+1 -1
drivers/scsi/dc395x.c
··· 873 873 * and is expected to be held on return. 874 874 * 875 875 */ 876 - static int dc395x_queue_command_lck(struct scsi_cmnd *cmd) 876 + static enum scsi_qc_status dc395x_queue_command_lck(struct scsi_cmnd *cmd) 877 877 { 878 878 void (*done)(struct scsi_cmnd *) = scsi_done; 879 879 struct DeviceCtlBlk *dcb;
+2 -1
drivers/scsi/esas2r/esas2r.h
··· 968 968 int esas2r_ioctl(struct scsi_device *dev, unsigned int cmd, void __user *arg); 969 969 u8 handle_hba_ioctl(struct esas2r_adapter *a, 970 970 struct atto_ioctl *ioctl_hba); 971 - int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd); 971 + enum scsi_qc_status esas2r_queuecommand(struct Scsi_Host *host, 972 + struct scsi_cmnd *cmd); 972 973 int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh); 973 974 long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); 974 975
+2 -1
drivers/scsi/esas2r/esas2r_main.c
··· 818 818 return len; 819 819 } 820 820 821 - int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 821 + enum scsi_qc_status esas2r_queuecommand(struct Scsi_Host *host, 822 + struct scsi_cmnd *cmd) 822 823 { 823 824 struct esas2r_adapter *a = 824 825 (struct esas2r_adapter *)cmd->device->host->hostdata;
+1 -1
drivers/scsi/esp_scsi.c
··· 952 952 scsi_track_queue_full(dev, lp->num_tagged - 1); 953 953 } 954 954 955 - static int esp_queuecommand_lck(struct scsi_cmnd *cmd) 955 + static enum scsi_qc_status esp_queuecommand_lck(struct scsi_cmnd *cmd) 956 956 { 957 957 struct scsi_device *dev = cmd->device; 958 958 struct esp *esp = shost_priv(dev->host);
+2 -1
drivers/scsi/fdomain.c
··· 402 402 return IRQ_HANDLED; 403 403 } 404 404 405 - static int fdomain_queue(struct Scsi_Host *sh, struct scsi_cmnd *cmd) 405 + static enum scsi_qc_status fdomain_queue(struct Scsi_Host *sh, 406 + struct scsi_cmnd *cmd) 406 407 { 407 408 struct scsi_pointer *scsi_pointer = fdomain_scsi_pointer(cmd); 408 409 struct fdomain *fd = shost_priv(cmd->device->host);
+2 -1
drivers/scsi/fnic/fnic.h
··· 503 503 void fnic_flush_tx(struct work_struct *work); 504 504 void fnic_update_mac_locked(struct fnic *, u8 *new); 505 505 506 - int fnic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); 506 + enum scsi_qc_status fnic_queuecommand(struct Scsi_Host *shost, 507 + struct scsi_cmnd *sc); 507 508 int fnic_abort_cmd(struct scsi_cmnd *); 508 509 int fnic_device_reset(struct scsi_cmnd *); 509 510 int fnic_eh_host_reset_handler(struct scsi_cmnd *sc);
+2 -1
drivers/scsi/fnic/fnic_scsi.c
··· 454 454 return 0; 455 455 } 456 456 457 - int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) 457 + enum scsi_qc_status fnic_queuecommand(struct Scsi_Host *shost, 458 + struct scsi_cmnd *sc) 458 459 { 459 460 struct request *const rq = scsi_cmd_to_rq(sc); 460 461 uint32_t mqtag = 0;
+2 -3
drivers/scsi/hosts.c
··· 626 626 { 627 627 int cnt = 0; 628 628 629 - if (shost->tag_set.ops) 630 - blk_mq_tagset_busy_iter(&shost->tag_set, 631 - scsi_host_check_in_flight, &cnt); 629 + blk_mq_tagset_busy_iter(&shost->tag_set, 630 + scsi_host_check_in_flight, &cnt); 632 631 return cnt; 633 632 } 634 633 EXPORT_SYMBOL(scsi_host_busy);
+4 -2
drivers/scsi/hpsa.c
··· 276 276 #define VPD_PAGE (1 << 8) 277 277 #define HPSA_SIMPLE_ERROR_BITS 0x03 278 278 279 - static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 279 + static enum scsi_qc_status hpsa_scsi_queue_command(struct Scsi_Host *h, 280 + struct scsi_cmnd *cmd); 280 281 static void hpsa_scan_start(struct Scsi_Host *); 281 282 static int hpsa_scan_finished(struct Scsi_Host *sh, 282 283 unsigned long elapsed_time); ··· 5668 5667 } 5669 5668 5670 5669 /* Running in struct Scsi_Host->host_lock less mode */ 5671 - static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) 5670 + static enum scsi_qc_status hpsa_scsi_queue_command(struct Scsi_Host *sh, 5671 + struct scsi_cmnd *cmd) 5672 5672 { 5673 5673 struct ctlr_info *h; 5674 5674 struct hpsa_scsi_dev_t *dev;
+1 -1
drivers/scsi/hptiop.c
··· 993 993 return 0; 994 994 } 995 995 996 - static int hptiop_queuecommand_lck(struct scsi_cmnd *scp) 996 + static enum scsi_qc_status hptiop_queuecommand_lck(struct scsi_cmnd *scp) 997 997 { 998 998 struct Scsi_Host *host = scp->device->host; 999 999 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
+2 -1
drivers/scsi/ibmvscsi/ibmvfc.c
··· 1960 1960 * Returns: 1961 1961 * 0 on success / other on failure 1962 1962 **/ 1963 - static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) 1963 + static enum scsi_qc_status ibmvfc_queuecommand(struct Scsi_Host *shost, 1964 + struct scsi_cmnd *cmnd) 1964 1965 { 1965 1966 struct ibmvfc_host *vhost = shost_priv(shost); 1966 1967 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+5 -4
drivers/scsi/ibmvscsi/ibmvscsi.c
··· 868 868 * Returns the value returned from ibmvscsi_send_crq(). (Zero for success) 869 869 * Note that this routine assumes that host_lock is held for synchronization 870 870 */ 871 - static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, 872 - struct ibmvscsi_host_data *hostdata, 873 - unsigned long timeout) 871 + static enum scsi_qc_status 872 + ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, 873 + struct ibmvscsi_host_data *hostdata, 874 + unsigned long timeout) 874 875 { 875 876 __be64 *crq_as_u64 = (__be64 *)&evt_struct->crq; 876 877 int request_status = 0; ··· 1041 1040 * @cmnd: struct scsi_cmnd to be executed 1042 1041 * @done: Callback function to be called when cmd is completed 1043 1042 */ 1044 - static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd) 1043 + static enum scsi_qc_status ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd) 1045 1044 { 1046 1045 void (*done)(struct scsi_cmnd *) = scsi_done; 1047 1046 struct srp_cmd *srp_cmd;
+1 -1
drivers/scsi/imm.c
··· 925 925 return 0; 926 926 } 927 927 928 - static int imm_queuecommand_lck(struct scsi_cmnd *cmd) 928 + static enum scsi_qc_status imm_queuecommand_lck(struct scsi_cmnd *cmd) 929 929 { 930 930 imm_struct *dev = imm_dev(cmd->device->host); 931 931
+1 -1
drivers/scsi/initio.c
··· 2606 2606 * zero if successful or indicate a host busy condition if not (which 2607 2607 * will cause the mid layer to call us again later with the command) 2608 2608 */ 2609 - static int i91u_queuecommand_lck(struct scsi_cmnd *cmd) 2609 + static enum scsi_qc_status i91u_queuecommand_lck(struct scsi_cmnd *cmd) 2610 2610 { 2611 2611 struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata; 2612 2612 struct scsi_ctrl_blk *cmnd;
+2 -2
drivers/scsi/ipr.c
··· 6242 6242 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy 6243 6243 * SCSI_MLQUEUE_HOST_BUSY if host is busy 6244 6244 **/ 6245 - static int ipr_queuecommand(struct Scsi_Host *shost, 6246 - struct scsi_cmnd *scsi_cmd) 6245 + static enum scsi_qc_status ipr_queuecommand(struct Scsi_Host *shost, 6246 + struct scsi_cmnd *scsi_cmd) 6247 6247 { 6248 6248 struct ipr_ioa_cfg *ioa_cfg; 6249 6249 struct ipr_resource_entry *res;
+2 -2
drivers/scsi/ips.c
··· 231 231 */ 232 232 static int ips_eh_abort(struct scsi_cmnd *); 233 233 static int ips_eh_reset(struct scsi_cmnd *); 234 - static int ips_queue(struct Scsi_Host *, struct scsi_cmnd *); 234 + static enum scsi_qc_status ips_queue(struct Scsi_Host *, struct scsi_cmnd *); 235 235 static const char *ips_info(struct Scsi_Host *); 236 236 static irqreturn_t do_ipsintr(int, void *); 237 237 static int ips_hainit(ips_ha_t *); ··· 1018 1018 /* Linux obtains io_request_lock before calling this function */ 1019 1019 /* */ 1020 1020 /****************************************************************************/ 1021 - static int ips_queue_lck(struct scsi_cmnd *SC) 1021 + static enum scsi_qc_status ips_queue_lck(struct scsi_cmnd *SC) 1022 1022 { 1023 1023 void (*done)(struct scsi_cmnd *) = scsi_done; 1024 1024 ips_ha_t *ha;
+2 -1
drivers/scsi/libfc/fc_fcp.c
··· 1854 1854 * 1855 1855 * This is the i/o strategy routine, called by the SCSI layer. 1856 1856 */ 1857 - int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd) 1857 + enum scsi_qc_status fc_queuecommand(struct Scsi_Host *shost, 1858 + struct scsi_cmnd *sc_cmd) 1858 1859 { 1859 1860 struct fc_lport *lport = shost_priv(shost); 1860 1861 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+2 -1
drivers/scsi/libiscsi.c
··· 1747 1747 FAILURE_SESSION_NOT_READY, 1748 1748 }; 1749 1749 1750 - int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) 1750 + enum scsi_qc_status iscsi_queuecommand(struct Scsi_Host *host, 1751 + struct scsi_cmnd *sc) 1751 1752 { 1752 1753 struct iscsi_cls_session *cls_session; 1753 1754 struct iscsi_host *ihost;
+2 -1
drivers/scsi/libsas/sas_scsi_host.c
··· 158 158 return task; 159 159 } 160 160 161 - int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 161 + enum scsi_qc_status sas_queuecommand(struct Scsi_Host *host, 162 + struct scsi_cmnd *cmd) 162 163 { 163 164 struct sas_internal *i = to_sas_internal(host->transportt); 164 165 struct domain_device *dev = cmd_to_domain_dev(cmd);
+40
drivers/scsi/lpfc/lpfc_attr.c
··· 6979 6979 return; 6980 6980 } 6981 6981 6982 + /** 6983 + * lpfc_get_enc_info - Return encryption information about the session for 6984 + * a given remote port. 6985 + * @rport: ptr to fc_rport from scsi transport fc 6986 + * 6987 + * Given an rport object, iterate through the fc_nodes list to find node 6988 + * corresponding with rport. Pass the encryption information from the node to 6989 + * rport's encryption attribute for reporting to upper layers. Information is 6990 + * passed through nlp_enc_info struct which contains encryption status. 6991 + * 6992 + * Returns: 6993 + * - Address of rport's fc_encryption_info struct 6994 + * - NULL when not found 6995 + **/ 6996 + static struct fc_encryption_info * 6997 + lpfc_get_enc_info(struct fc_rport *rport) 6998 + { 6999 + struct Scsi_Host *shost = rport_to_shost(rport); 7000 + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 7001 + struct fc_encryption_info *ef = NULL; 7002 + struct lpfc_nodelist *ndlp, *next_ndlp; 7003 + unsigned long iflags; 7004 + 7005 + spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags); 7006 + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 7007 + if (ndlp->rport && ndlp->rport == rport) { 7008 + ef = &rport->enc_info; 7009 + ef->status = ndlp->nlp_enc_info.status; 7010 + break; 7011 + } 7012 + } 7013 + spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags); 7014 + return ef; 7015 + } 7016 + 7017 + 6982 7018 /* 6983 7019 * The LPFC driver treats linkdown handling as target loss events so there 6984 7020 * are no sysfs handlers for link_down_tmo. ··· 7232 7196 .get_fc_host_stats = lpfc_get_stats, 7233 7197 .reset_fc_host_stats = lpfc_reset_stats, 7234 7198 7199 + .get_fc_rport_enc_info = lpfc_get_enc_info, 7200 + 7235 7201 .dd_fcrport_size = sizeof(struct lpfc_rport_data), 7236 7202 .show_rport_maxframe_size = 1, 7237 7203 .show_rport_supported_classes = 1, ··· 7302 7264 7303 7265 .get_fc_host_stats = lpfc_get_stats, 7304 7266 .reset_fc_host_stats = lpfc_reset_stats, 7267 + 7268 + .get_fc_rport_enc_info = lpfc_get_enc_info, 7305 7269 7306 7270 .dd_fcrport_size = sizeof(struct lpfc_rport_data), 7307 7271 .show_rport_maxframe_size = 1,
+7
drivers/scsi/lpfc/lpfc_debugfs.c
··· 872 872 ndlp->nlp_rpi); 873 873 len += scnprintf(buf+len, size-len, "flag:x%08lx ", 874 874 ndlp->nlp_flag); 875 + if (ndlp->nlp_enc_info.status) { 876 + len += scnprintf(buf + len, 877 + size - len, "ENCRYPTED"); 878 + len += scnprintf(buf + len, size - len, 879 + ndlp->nlp_enc_info.level 880 + ? "(CNSA2.0) " : "(CNSA1.0) "); 881 + } 875 882 if (!ndlp->nlp_type) 876 883 len += scnprintf(buf+len, size-len, "UNKNOWN_TYPE "); 877 884 if (ndlp->nlp_type & NLP_FC_NODE)
+7
drivers/scsi/lpfc/lpfc_disc.h
··· 77 77 unsigned long xri_bitmap[XRI_BITMAP_ULONGS]; 78 78 }; 79 79 80 + struct lpfc_enc_info { 81 + u8 status; /* encryption status for session */ 82 + u8 level; /* CNSA encryption level */ 83 + }; 84 + 80 85 enum lpfc_fc4_xpt_flags { 81 86 NLP_XPT_REGD = 0x1, 82 87 SCSI_XPT_REGD = 0x2, ··· 142 137 u8 nlp_nvme_info; /* NVME NSLER Support */ 143 138 uint8_t vmid_support; /* destination VMID support */ 144 139 #define NLP_NVME_NSLER 0x1 /* NVME NSLER device */ 140 + 141 + struct lpfc_enc_info nlp_enc_info; /* Encryption information struct */ 145 142 146 143 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ 147 144 struct lpfc_hba *phba;
+57
drivers/scsi/lpfc/lpfc_els.c
··· 2014 2014 lpfc_nlp_put(ndlp); 2015 2015 return; 2016 2016 } 2017 + 2018 + /** 2019 + * lpfc_check_encryption - Reports an ndlp's encryption information 2020 + * @phba: pointer to lpfc hba data structure. 2021 + * @ndlp: pointer to a node-list data structure. 2022 + * @cmdiocb: pointer to lpfc command iocbq data structure. 2023 + * @rspiocb: pointer to lpfc response iocbq data structure. 2024 + * 2025 + * This routine is called in the completion callback function for issuing 2026 + * or receiving a Port Login (PLOGI) command. In a PLOGI completion, if FEDIF 2027 + * is supported, encryption information will be provided in completion status 2028 + * data. If @phba supports FEDIF, a log message containing encryption 2029 + * information will be logged. Encryption status is also saved for encryption 2030 + * reporting with upper layer through the rport encryption attribute. 2031 + **/ 2032 + static void 2033 + lpfc_check_encryption(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 2034 + struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) 2035 + { 2036 + struct lpfc_vport *vport = cmdiocb->vport; 2037 + u32 did = ndlp->nlp_DID; 2038 + struct lpfc_enc_info *nlp_enc_info = &ndlp->nlp_enc_info; 2039 + char enc_status[FC_RPORT_ENCRYPTION_STATUS_MAX_LEN] = {0}; 2040 + char enc_level[8] = "N/A"; 2041 + u8 encryption; 2042 + 2043 + if (phba->sli4_hba.encryption_support && 2044 + ((did & Fabric_DID_MASK) != Fabric_DID_MASK)) { 2045 + encryption = bf_get(lpfc_wcqe_c_enc, 2046 + &rspiocb->wcqe_cmpl); 2047 + nlp_enc_info->status = encryption; 2048 + 2049 + strscpy(enc_status, encryption ? "Encrypted" : "Unencrypted", 2050 + sizeof(enc_status)); 2051 + 2052 + if (encryption) { 2053 + nlp_enc_info->level = bf_get(lpfc_wcqe_c_enc_lvl, 2054 + &rspiocb->wcqe_cmpl); 2055 + strscpy(enc_level, nlp_enc_info->level ? "CNSA2.0" : 2056 + "CNSA1.0", 2057 + sizeof(enc_level)); 2058 + } 2059 + 2060 + lpfc_printf_vlog(vport, KERN_INFO, LOG_ENCRYPTION, 2061 + "0924 DID:x%06x %s Session " 2062 + "Established, Encryption Level:%s " 2063 + "rpi:x%x\n", 2064 + ndlp->nlp_DID, enc_status, enc_level, 2065 + ndlp->nlp_rpi); 2066 + } 2067 + } 2068 + 2017 2069 /** 2018 2070 * lpfc_cmpl_els_plogi - Completion callback function for plogi 2019 2071 * @phba: pointer to lpfc hba data structure. ··· 2204 2152 if (!lpfc_is_els_acc_rsp(prsp)) 2205 2153 goto out; 2206 2154 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 2155 + 2156 + lpfc_check_encryption(phba, ndlp, cmdiocb, rspiocb); 2207 2157 2208 2158 sp = (struct serv_parm *)((u8 *)prsp->virt + 2209 2159 sizeof(u32)); ··· 5460 5406 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5461 5407 goto out; 5462 5408 } 5409 + 5410 + if (!ulp_status && test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag)) 5411 + lpfc_check_encryption(phba, ndlp, cmdiocb, rspiocb); 5463 5412 5464 5413 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5465 5414 "ELS rsp cmpl: status:x%x/x%x did:x%x",
+1
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 5340 5340 clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); 5341 5341 if (acc_plogi) 5342 5342 clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); 5343 + memset(&ndlp->nlp_enc_info, 0, sizeof(ndlp->nlp_enc_info)); 5343 5344 return 1; 5344 5345 } 5345 5346 clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag);
+10 -1
drivers/scsi/lpfc/lpfc_hw4.h
··· 437 437 #define lpfc_wcqe_c_cmf_bw_MASK 0x0FFFFFFF 438 438 #define lpfc_wcqe_c_cmf_bw_WORD total_data_placed 439 439 uint32_t parameter; 440 + #define lpfc_wcqe_c_enc_SHIFT 31 441 + #define lpfc_wcqe_c_enc_MASK 0x00000001 442 + #define lpfc_wcqe_c_enc_WORD parameter 443 + #define lpfc_wcqe_c_enc_lvl_SHIFT 30 444 + #define lpfc_wcqe_c_enc_lvl_MASK 0x00000001 445 + #define lpfc_wcqe_c_enc_lvl_WORD parameter 440 446 #define lpfc_wcqe_c_bg_edir_SHIFT 5 441 447 #define lpfc_wcqe_c_bg_edir_MASK 0x00000001 442 448 #define lpfc_wcqe_c_bg_edir_WORD parameter ··· 2948 2942 #define lpfc_mbx_rd_conf_topology_SHIFT 24 2949 2943 #define lpfc_mbx_rd_conf_topology_MASK 0x000000FF 2950 2944 #define lpfc_mbx_rd_conf_topology_WORD word2 2951 - uint32_t rsvd_3; 2945 + uint32_t word3; 2946 + #define lpfc_mbx_rd_conf_fedif_SHIFT 6 2947 + #define lpfc_mbx_rd_conf_fedif_MASK 0x00000001 2948 + #define lpfc_mbx_rd_conf_fedif_WORD word3 2952 2949 uint32_t word4; 2953 2950 #define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0 2954 2951 #define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF
+5
drivers/scsi/lpfc/lpfc_init.c
··· 9999 9999 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 10000 10000 phba->max_vports = phba->max_vpi; 10001 10001 10002 + if (bf_get(lpfc_mbx_rd_conf_fedif, rd_config)) 10003 + phba->sli4_hba.encryption_support = true; 10004 + else 10005 + phba->sli4_hba.encryption_support = false; 10006 + 10002 10007 /* Next decide on FPIN or Signal E2E CGN support 10003 10008 * For congestion alarms and warnings valid combination are: 10004 10009 * 1. FPIN alarms / FPIN warnings
+2 -1
drivers/scsi/lpfc/lpfc_logmsg.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * 4 + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * 5 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 6 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 7 7 * EMULEX and SLI are trademarks of Emulex. * ··· 47 47 #define LOG_RSVD1 0x01000000 /* Reserved */ 48 48 #define LOG_RSVD2 0x02000000 /* Reserved */ 49 49 #define LOG_CGN_MGMT 0x04000000 /* Congestion Mgmt events */ 50 + #define LOG_ENCRYPTION 0x40000000 /* EDIF Encryption events. */ 50 51 #define LOG_TRACE_EVENT 0x80000000 /* Dmp the DBG log on this err */ 51 52 #define LOG_ALL_MSG 0x7fffffff /* LOG all messages */ 52 53
+4 -4
drivers/scsi/lpfc/lpfc_scsi.c
··· 5233 5233 * 0 - Success 5234 5234 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. 5235 5235 **/ 5236 - static int 5237 - lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) 5236 + static enum scsi_qc_status lpfc_queuecommand(struct Scsi_Host *shost, 5237 + struct scsi_cmnd *cmnd) 5238 5238 { 5239 5239 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5240 5240 struct lpfc_hba *phba = vport->phba; ··· 6743 6743 return false; 6744 6744 } 6745 6745 6746 - static int 6747 - lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) 6746 + static enum scsi_qc_status lpfc_no_command(struct Scsi_Host *shost, 6747 + struct scsi_cmnd *cmnd) 6748 6748 { 6749 6749 return SCSI_MLQUEUE_HOST_BUSY; 6750 6750 }
+18 -44
drivers/scsi/lpfc/lpfc_sli.c
··· 20432 20432 uint16_t 20433 20433 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 20434 20434 { 20435 - uint16_t next_fcf_index; 20435 + uint16_t next; 20436 20436 20437 - initial_priority: 20438 - /* Search start from next bit of currently registered FCF index */ 20439 - next_fcf_index = phba->fcf.current_rec.fcf_indx; 20437 + do { 20438 + for_each_set_bit_wrap(next, phba->fcf.fcf_rr_bmask, 20439 + LPFC_SLI4_FCF_TBL_INDX_MAX, phba->fcf.current_rec.fcf_indx) { 20440 + if (next == phba->fcf.current_rec.fcf_indx) 20441 + continue; 20440 20442 20441 - next_priority: 20442 - /* Determine the next fcf index to check */ 20443 - next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX; 20444 - next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 20445 - LPFC_SLI4_FCF_TBL_INDX_MAX, 20446 - next_fcf_index); 20443 + if (!(phba->fcf.fcf_pri[next].fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)) { 20444 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 20445 + "2845 Get next roundrobin failover FCF (x%x)\n", next); 20446 + return next; 20447 + } 20447 20448 20448 - /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 20449 - if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 20450 - /* 20451 - * If we have wrapped then we need to clear the bits that 20452 - * have been tested so that we can detect when we should 20453 - * change the priority level. 20454 - */ 20455 - next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask, 20456 - LPFC_SLI4_FCF_TBL_INDX_MAX); 20457 - } 20449 + if (list_is_singular(&phba->fcf.fcf_pri_list)) 20450 + return LPFC_FCOE_FCF_NEXT_NONE; 20451 + } 20458 20452 20459 - 20460 - /* Check roundrobin failover list empty condition */ 20461 - if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || 20462 - next_fcf_index == phba->fcf.current_rec.fcf_indx) { 20463 20453 /* 20464 20454 * If next fcf index is not found check if there are lower 20465 20455 * Priority level fcf's in the fcf_priority list. 20466 20456 * Set up the rr_bmask with all of the avaiable fcf bits 20467 20457 * at that level and continue the selection process. 20468 20458 */ 20469 - if (lpfc_check_next_fcf_pri_level(phba)) 20470 - goto initial_priority; 20471 - lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 20472 - "2844 No roundrobin failover FCF available\n"); 20459 + } while (lpfc_check_next_fcf_pri_level(phba)); 20473 20460 20474 - return LPFC_FCOE_FCF_NEXT_NONE; 20475 - } 20461 + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 20462 + "2844 No roundrobin failover FCF available\n"); 20476 20463 20477 - if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 20478 - phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & 20479 - LPFC_FCF_FLOGI_FAILED) { 20480 - if (list_is_singular(&phba->fcf.fcf_pri_list)) 20481 - return LPFC_FCOE_FCF_NEXT_NONE; 20482 - 20483 - goto next_priority; 20484 - } 20485 - 20486 - lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 20487 - "2845 Get next roundrobin failover FCF (x%x)\n", 20488 - next_fcf_index); 20489 - 20490 - return next_fcf_index; 20464 + return LPFC_FCOE_FCF_NEXT_NONE; 20491 20465 } 20492 20466 20493 20467 /**
+4
drivers/scsi/lpfc/lpfc_sli4.h
··· 888 888 #define LPFC_FP_EQ_MAX_INTR_SEC 10000 889 889 890 890 uint32_t intr_enable; 891 + 892 + /* Indicates whether SLI Port supports FEDIF */ 893 + bool encryption_support; 894 + 891 895 struct lpfc_bmbx bmbx; 892 896 struct lpfc_max_cfg_param max_cfg_param; 893 897 uint16_t extents_in_use; /* must allocate resource extents. */
+1 -1
drivers/scsi/lpfc/lpfc_version.h
··· 20 20 * included with this package. * 21 21 *******************************************************************/ 22 22 23 - #define LPFC_DRIVER_VERSION "14.4.0.12" 23 + #define LPFC_DRIVER_VERSION "14.4.0.13" 24 24 #define LPFC_DRIVER_NAME "lpfc" 25 25 26 26 /* Used for SLI 2/3 */
+1 -1
drivers/scsi/mac53c94.c
··· 66 66 static void cmd_done(struct fsc_state *, int result); 67 67 static void set_dma_cmds(struct fsc_state *, struct scsi_cmnd *); 68 68 69 - static int mac53c94_queue_lck(struct scsi_cmnd *cmd) 69 + static enum scsi_qc_status mac53c94_queue_lck(struct scsi_cmnd *cmd) 70 70 { 71 71 struct fsc_state *state; 72 72
+9 -8
drivers/scsi/megaraid.c
··· 372 372 * 373 373 * The command queuing entry point for the mid-layer. 374 374 */ 375 - static int megaraid_queue_lck(struct scsi_cmnd *scmd) 375 + static enum scsi_qc_status megaraid_queue_lck(struct scsi_cmnd *scmd) 376 376 { 377 377 adapter_t *adapter; 378 378 scb_t *scb; 379 - int busy=0; 379 + enum scsi_qc_status busy = 0; 380 380 unsigned long flags; 381 381 382 382 adapter = (adapter_t *)scmd->device->host->hostdata; ··· 518 518 * boot settings. 519 519 */ 520 520 static scb_t * 521 - mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy) 521 + mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, 522 + enum scsi_qc_status *busy) 522 523 { 523 524 mega_passthru *pthru; 524 525 scb_t *scb; ··· 641 640 } 642 641 643 642 if(!(scb = mega_allocate_scb(adapter, cmd))) { 644 - *busy = 1; 643 + *busy = SCSI_MLQUEUE_HOST_BUSY; 645 644 return NULL; 646 645 } 647 646 ··· 689 688 690 689 /* Allocate a SCB and initialize passthru */ 691 690 if(!(scb = mega_allocate_scb(adapter, cmd))) { 692 - *busy = 1; 691 + *busy = SCSI_MLQUEUE_HOST_BUSY; 693 692 return NULL; 694 693 } 695 694 pthru = scb->pthru; ··· 731 730 732 731 /* Allocate a SCB and initialize mailbox */ 733 732 if(!(scb = mega_allocate_scb(adapter, cmd))) { 734 - *busy = 1; 733 + *busy = SCSI_MLQUEUE_HOST_BUSY; 735 734 return NULL; 736 735 } 737 736 mbox = (mbox_t *)scb->raw_mbox; ··· 871 870 872 871 /* Allocate a SCB and initialize mailbox */ 873 872 if(!(scb = mega_allocate_scb(adapter, cmd))) { 874 - *busy = 1; 873 + *busy = SCSI_MLQUEUE_HOST_BUSY; 875 874 return NULL; 876 875 } 877 876 ··· 899 898 else { 900 899 /* Allocate a SCB and initialize passthru */ 901 900 if(!(scb = mega_allocate_scb(adapter, cmd))) { 902 - *busy = 1; 901 + *busy = SCSI_MLQUEUE_HOST_BUSY; 903 902 return NULL; 904 903 } 905 904
+4 -2
drivers/scsi/megaraid.h
··· 962 962 static int issue_scb(adapter_t *, scb_t *); 963 963 static int mega_setup_mailbox(adapter_t *); 964 964 965 - static int megaraid_queue (struct Scsi_Host *, struct scsi_cmnd *); 966 - static scb_t * mega_build_cmd(adapter_t *, struct scsi_cmnd *, int *); 965 + static enum scsi_qc_status megaraid_queue(struct Scsi_Host *, 966 + struct scsi_cmnd *); 967 + static scb_t *mega_build_cmd(adapter_t *, struct scsi_cmnd *, 968 + enum scsi_qc_status *); 967 969 static void __mega_runpendq(adapter_t *); 968 970 static int issue_scb_block(adapter_t *, u_char *); 969 971
+13 -10
drivers/scsi/megaraid/megaraid_mbox.c
··· 109 109 static void megaraid_mbox_display_scb(adapter_t *, scb_t *); 110 110 static void megaraid_mbox_setup_device_map(adapter_t *); 111 111 112 - static int megaraid_queue_command(struct Scsi_Host *, struct scsi_cmnd *); 113 - static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *); 112 + static enum scsi_qc_status megaraid_queue_command(struct Scsi_Host *, 113 + struct scsi_cmnd *); 114 + static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, 115 + enum scsi_qc_status *); 114 116 static void megaraid_mbox_runpendq(adapter_t *, scb_t *); 115 117 static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *, 116 118 struct scsi_cmnd *); ··· 1436 1434 * 1437 1435 * Queue entry point for mailbox based controllers. 1438 1436 */ 1439 - static int megaraid_queue_command_lck(struct scsi_cmnd *scp) 1437 + static enum scsi_qc_status megaraid_queue_command_lck(struct scsi_cmnd *scp) 1440 1438 { 1441 1439 void (*done)(struct scsi_cmnd *) = scsi_done; 1442 1440 adapter_t *adapter; 1443 1441 scb_t *scb; 1444 - int if_busy; 1442 + enum scsi_qc_status if_busy; 1445 1443 1446 1444 adapter = SCP2ADAPTER(scp); 1447 1445 scp->result = 0; ··· 1479 1477 * firmware. We also complete certain commands without sending them to firmware. 1480 1478 */ 1481 1479 static scb_t * 1482 - megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) 1480 + megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, 1481 + enum scsi_qc_status *busy) 1483 1482 { 1484 1483 mraid_device_t *rdev = ADAP2RAIDDEV(adapter); 1485 1484 int channel; ··· 1519 1516 1520 1517 if (!(scb = megaraid_alloc_scb(adapter, scp))) { 1521 1518 scp->result = (DID_ERROR << 16); 1522 - *busy = 1; 1519 + *busy = SCSI_MLQUEUE_HOST_BUSY; 1523 1520 return NULL; 1524 1521 } 1525 1522 ··· 1602 1599 /* Allocate a SCB and initialize passthru */ 1603 1600 if (!(scb = megaraid_alloc_scb(adapter, scp))) { 1604 1601 scp->result = (DID_ERROR << 16); 1605 - *busy = 1; 1602 + *busy = SCSI_MLQUEUE_HOST_BUSY; 1606 1603 return NULL; 1607 1604 } 1608 1605 ··· 1647 1644 */ 1648 1645 if (!(scb = megaraid_alloc_scb(adapter, scp))) { 1649 1646 scp->result = (DID_ERROR << 16); 1650 - *busy = 1; 1647 + *busy = SCSI_MLQUEUE_HOST_BUSY; 1651 1648 return NULL; 1652 1649 } 1653 1650 ccb = (mbox_ccb_t *)scb->ccb; ··· 1743 1740 */ 1744 1741 if (!(scb = megaraid_alloc_scb(adapter, scp))) { 1745 1742 scp->result = (DID_ERROR << 16); 1746 - *busy = 1; 1743 + *busy = SCSI_MLQUEUE_HOST_BUSY; 1747 1744 return NULL; 1748 1745 } 1749 1746 ··· 1811 1808 // Allocate a SCB and initialize passthru 1812 1809 if (!(scb = megaraid_alloc_scb(adapter, scp))) { 1813 1810 scp->result = (DID_ERROR << 16); 1814 - *busy = 1; 1811 + *busy = SCSI_MLQUEUE_HOST_BUSY; 1815 1812 return NULL; 1816 1813 } 1817 1814
+2 -2
drivers/scsi/megaraid/megaraid_sas_base.c
··· 1781 1781 * @shost: adapter SCSI host 1782 1782 * @scmd: SCSI command to be queued 1783 1783 */ 1784 - static int 1785 - megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 1784 + static enum scsi_qc_status megasas_queue_command(struct Scsi_Host *shost, 1785 + struct scsi_cmnd *scmd) 1786 1786 { 1787 1787 struct megasas_instance *instance; 1788 1788 struct MR_PRIV_DEVICE *mr_device_priv_data;
+1 -1
drivers/scsi/mesh.c
··· 1625 1625 * Called by midlayer with host locked to queue a new 1626 1626 * request 1627 1627 */ 1628 - static int mesh_queue_lck(struct scsi_cmnd *cmd) 1628 + static enum scsi_qc_status mesh_queue_lck(struct scsi_cmnd *cmd) 1629 1629 { 1630 1630 struct mesh_state *ms; 1631 1631
+89 -3
drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 2 /* 3 - * Copyright 2017-2023 Broadcom Inc. All rights reserved. 3 + * Copyright 2017-2026 Broadcom Inc. All rights reserved. 4 4 */ 5 5 #ifndef MPI30_CNFG_H 6 6 #define MPI30_CNFG_H 1 ··· 1037 1037 #define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_SHIFT (2) 1038 1038 #define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_SSD_MASK (0x0003) 1039 1039 #define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_SSD_SHIFT (0) 1040 + #define MPI3_IOUNIT5_DEVICE_SHUTDOWN_HDD_SPINDOWN_ENABLE (0x8000) 1040 1041 #define MPI3_IOUNIT5_FLAGS_SATAPUIS_MASK (0x0c) 1041 1042 #define MPI3_IOUNIT5_FLAGS_SATAPUIS_NOT_SUPPORTED (0x00) 1042 1043 #define MPI3_IOUNIT5_FLAGS_SATAPUIS_OS_CONTROLLED (0x04) ··· 1075 1074 u8 current_key_encryption_algo; 1076 1075 u8 key_digest_hash_algo; 1077 1076 union mpi3_version_union current_svn; 1078 - __le32 reserved14; 1077 + __le16 pending_svn_time; 1078 + __le16 reserved16; 1079 1079 __le32 current_key[128]; 1080 1080 union mpi3_iounit8_digest digest[MPI3_IOUNIT8_DIGEST_MAX]; 1081 1081 }; ··· 1408 1406 }; 1409 1407 1410 1408 #define MPI3_DRIVER1_PAGEVERSION (0x00) 1409 + #define MPI3_DRIVER1_FLAGS_DEVICE_SHUTDOWN_ON_UNLOAD_DISABLE (0x0001) 1411 1410 #ifndef MPI3_DRIVER2_TRIGGER_MAX 1412 1411 #define MPI3_DRIVER2_TRIGGER_MAX (1) 1413 1412 #endif ··· 1564 1561 u8 consumer; 1565 1562 __le16 key_data_size; 1566 1563 __le32 additional_key_data; 1567 - __le32 reserved08[2]; 1564 + u8 library_version; 1565 + u8 reserved09[3]; 1566 + __le32 reserved0c; 1568 1567 union mpi3_security1_key_data key_data; 1569 1568 }; 1570 1569 ··· 1619 1614 u8 reserved9d[3]; 1620 1615 struct mpi3_security2_trusted_root trusted_root[MPI3_SECURITY2_TRUSTED_ROOT_MAX]; 1621 1616 }; 1617 + 1618 + struct mpi3_security_page3 { 1619 + struct mpi3_config_page_header header; 1620 + __le16 key_data_length; 1621 + __le16 reserved0a; 1622 + u8 key_number; 1623 + u8 reserved0d[3]; 1624 + union mpi3_security_mac mac; 1625 + union mpi3_security_nonce nonce; 1626 + __le32 reserved90[12]; 1627 + u8 flags; 1628 + u8 consumer; 1629 + __le16 key_data_size; 1630 + __le32 additional_key_data; 1631 + u8 library_version; 1632 + u8 reserved_c9[3]; 1633 + __le32 reserved_cc; 1634 + u8 key_data[]; 1635 + }; 1636 + 1637 + #define MPI3_SECURITY3_PAGEVERSION (0x00) 1638 + #define MPI3_SECURITY3_FLAGS_TYPE_MASK (0x0f) 1639 + #define MPI3_SECURITY3_FLAGS_TYPE_SHIFT (0) 1640 + #define MPI3_SECURITY3_FLAGS_TYPE_NOT_VALID (0) 1641 + #define MPI3_SECURITY3_FLAGS_TYPE_MLDSA_PRIVATE (1) 1642 + #define MPI3_SECURITY3_FLAGS_TYPE_MLDSA_PUBLIC (2) 1643 + struct mpi3_security_page10 { 1644 + struct mpi3_config_page_header header; 1645 + __le32 reserved08[2]; 1646 + union mpi3_security_mac mac; 1647 + union mpi3_security_nonce nonce; 1648 + __le64 current_token_nonce; 1649 + __le64 previous_token_nonce; 1650 + __le32 reserved_a0[8]; 1651 + u8 diagnostic_auth_id[64]; 1652 + }; 1653 + #define MPI3_SECURITY10_PAGEVERSION (0x00) 1654 + 1655 + struct mpi3_security_page11 { 1656 + struct mpi3_config_page_header header; 1657 + u8 flags; 1658 + u8 reserved09[3]; 1659 + __le32 reserved0c; 1660 + __le32 diagnostic_token_length; 1661 + __le32 reserved14[3]; 1662 + u8 diagnostic_token[]; 1663 + }; 1664 + #define MPI3_SECURITY11_PAGEVERSION (0x00) 1665 + #define MPI3_SECURITY11_FLAGS_TOKEN_ENABLED (0x01) 1666 + 1667 + struct mpi3_security12_diag_feature { 1668 + __le32 feature_identifier; 1669 + u8 feature_size; 1670 + u8 feature_type; 1671 + __le16 reserved06; 1672 + u8 status; 1673 + u8 section; 1674 + __le16 reserved0a; 1675 + __le32 reserved0c; 1676 + u8 feature_data[64]; 1677 + }; 1678 + #define MPI3_SECURITY12_DIAG_FEATURE_STATUS_MASK (0x03) 1679 + #define MPI3_SECURITY12_DIAG_FEATURE_STATUS_SHIFT (0) 1680 + #define MPI3_SECURITY12_DIAG_FEATURE_STATUS_UNKNOWN (0x00) 1681 + #define MPI3_SECURITY12_DIAG_FEATURE_STATUS_DISABLED (0x01) 1682 + #define MPI3_SECURITY12_DIAG_FEATURE_STATUS_ENABLED (0x02) 1683 + #define MPI3_SECURITY12_DIAG_FEATURE_SECTION_PROTECTED (0x00) 1684 + #define MPI3_SECURITY12_DIAG_FEATURE_SECTION_UNPROTECTED (0x01) 1685 + #define MPI3_SECURITY12_DIAG_FEATURE_SECTION_PAYLOAD (0x02) 1686 + #define MPI3_SECURITY12_DIAG_FEATURE_SECTION_SIGNATURE (0x03) 1687 + struct mpi3_security_page12 { 1688 + struct mpi3_config_page_header header; 1689 + __le32 reserved08[2]; 1690 + u8 num_diag_features; 1691 + u8 reserved11[3]; 1692 + __le32 reserved14[3]; 1693 + struct mpi3_security12_diag_feature diag_feature[]; 1694 + }; 1695 + 1622 1696 #define MPI3_SECURITY2_PAGEVERSION (0x00) 1623 1697 struct mpi3_sas_io_unit0_phy_data { 1624 1698 u8 io_unit_port; ··· 2398 2314 u8 attached_phy_identifier; 2399 2315 u8 max_port_connections; 2400 2316 u8 zone_group; 2317 + u8 reserved10[3]; 2318 + u8 negotiated_link_rate; 2401 2319 }; 2402 2320 2403 2321 #define MPI3_DEVICE0_SASSATA_FLAGS_WRITE_SAME_UNMAP_NCQ (0x0400)
+99 -3
drivers/scsi/mpi3mr/mpi/mpi30_image.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 2 /* 3 - * Copyright 2018-2023 Broadcom Inc. All rights reserved. 3 + * Copyright 2018-2026 Broadcom Inc. All rights reserved. 4 4 */ 5 5 #ifndef MPI30_IMAGE_H 6 6 #define MPI30_IMAGE_H 1 ··· 135 135 __le32 package_version_string_offset; 136 136 __le32 package_build_date_string_offset; 137 137 __le32 package_build_time_string_offset; 138 - __le32 reserved4c; 138 + __le32 diag_authorization_key_offset; 139 139 __le32 diag_authorization_identifier[16]; 140 140 struct mpi3_ci_manifest_mpi_comp_image_ref component_image_ref[MPI3_CI_MANIFEST_MPI_MAX]; 141 141 }; ··· 148 148 #define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_GCA (0x50) 149 149 #define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_POINT (0x60) 150 150 #define MPI3_CI_MANIFEST_MPI_FLAGS_DIAG_AUTHORIZATION (0x01) 151 + #define MPI3_CI_MANIFEST_MPI_FLAGS_DIAG_AUTH_ANCHOR_MASK (0x06) 152 + #define MPI3_CI_MANIFEST_MPI_FLAGS_DIAG_AUTH_ANCHOR_SHIFT (1) 153 + #define MPI3_CI_MANIFEST_MPI_FLAGS_DIAG_AUTH_ANCHOR_IDENTIFIER (0x00) 154 + #define MPI3_CI_MANIFEST_MPI_FLAGS_DIAG_AUTH_ANCHOR_KEY_OFFSET (0x02) 151 155 #define MPI3_CI_MANIFEST_MPI_SUBSYSTEMID_IGNORED (0xffff) 152 156 #define MPI3_CI_MANIFEST_MPI_PKG_VER_STR_OFF_UNSPECIFIED (0x00000000) 153 157 #define MPI3_CI_MANIFEST_MPI_PKG_BUILD_DATE_STR_OFF_UNSPECIFIED (0x00000000) 154 158 #define MPI3_CI_MANIFEST_MPI_PKG_BUILD_TIME_STR_OFF_UNSPECIFIED (0x00000000) 159 + 160 + struct mpi3_sb_manifest_ci_digest { 161 + __le32 signature1; 162 + __le32 reserved04[2]; 163 + u8 hash_algorithm; 164 + u8 reserved09[3]; 165 + struct mpi3_comp_image_version component_image_version; 166 + __le32 component_image_version_string_offset; 167 + __le32 digest[16]; 168 + }; 169 + 170 + struct mpi3_sb_manifest_ci_ref_element { 171 + u8 num_ci_digests; 172 + u8 reserved01[3]; 173 + struct mpi3_sb_manifest_ci_digest ci_digest[]; 174 + }; 175 + 176 + struct mpi3_sb_manifest_embedded_key_element { 177 + __le32 reserved00[3]; 178 + u8 key_algorithm; 179 + u8 flags; 180 + __le16 public_key_size; 181 + __le32 start_tag; 182 + __le32 public_key[]; 183 + }; 184 + 185 + #define MPI3_SB_MANIFEST_EMBEDDED_KEY_FLAGS_KEYINDEX_MASK (0x03) 186 + #define MPI3_SB_MANIFEST_EMBEDDED_KEY_FLAGS_KEYINDEX_STRT (0x00) 187 + #define MPI3_SB_MANIFEST_EMBEDDED_KEY_FLAGS_KEYINDEX_K2GO (0x01) 188 + #define MPI3_SB_MANIFEST_EMBEDDED_KEY_STARTTAG_STRT (0x54525453) 189 + #define MPI3_SB_MANIFEST_EMBEDDED_KEY_STARTTAG_K2GO (0x4f47324b) 190 + #define MPI3_SB_MANIFEST_EMBEDDED_KEY_ENDTAG_STOP (0x504f5453) 191 + #define MPI3_SB_MANIFEST_EMBEDDED_KEY_ENDTAG_K2ST (0x5453324b) 192 + 193 + struct mpi3_sb_manifest_diag_key_element { 194 + __le32 reserved00[3]; 195 + u8 key_algorithm; 196 + u8 flags; 197 + __le16 public_key_size; 198 + __le32 public_key[]; 199 + }; 200 + 201 + #define MPI3_SB_MANIFEST_DIAG_KEY_FLAGS_KEYINDEX_MASK (0x03) 202 + #define MPI3_SB_MANIFEST_DIAG_KEY_FLAGS_KEYSELECT_FW_KEY (0x04) 203 + union mpi3_sb_manifest_element_data { 204 + struct mpi3_sb_manifest_ci_ref_element ci_ref; 205 + struct mpi3_sb_manifest_embedded_key_element embed_key; 206 + struct mpi3_sb_manifest_diag_key_element diag_key; 207 + __le32 dword; 208 + }; 209 + struct mpi3_sb_manifest_element { 210 + u8 manifest_element_form; 211 + u8 reserved01[3]; 212 + union mpi3_sb_manifest_element_data form_specific[]; 213 + }; 214 + #define MPI3_SB_MANIFEST_ELEMENT_FORM_CI_REFS (0x01) 215 + #define MPI3_SB_MANIFEST_ELEMENT_FORM_EMBED_KEY (0x02) 216 + #define MPI3_SB_MANIFEST_ELEMENT_FORM_DIAG_KEY (0x03) 217 + struct mpi3_sb_manifest_mpi { 218 + u8 manifest_type; 219 + u8 reserved01[3]; 220 + __le32 reserved04[3]; 221 + u8 reserved10; 222 + u8 release_level; 223 + __le16 reserved12; 224 + __le16 reserved14; 225 + __le16 flags; 226 + __le32 reserved18[2]; 227 + __le16 vendor_id; 228 + __le16 device_id; 229 + __le16 subsystem_vendor_id; 230 + __le16 subsystem_id; 231 + __le32 reserved28[2]; 232 + union mpi3_version_union package_security_version; 233 + __le32 reserved34; 234 + struct mpi3_comp_image_version package_version; 235 + __le32 package_version_string_offset; 236 + __le32 package_build_date_string_offset; 237 + __le32 package_build_time_string_offset; 238 + __le32 component_image_references_offset; 239 + __le32 embedded_key0offset; 240 + __le32 embedded_key1offset; 241 + __le32 diag_authorization_key_offset; 242 + __le32 reserved5c[9]; 243 + struct mpi3_sb_manifest_element manifest_elements[]; 244 + }; 245 + 155 246 union mpi3_ci_manifest { 156 247 struct mpi3_ci_manifest_mpi mpi; 248 + struct mpi3_sb_manifest_mpi sb_mpi; 157 249 __le32 dword[1]; 158 250 }; 159 251 160 - #define MPI3_CI_MANIFEST_TYPE_MPI (0x00) 252 + #define MPI3_SB_MANIFEST_APU_IMMEDIATE_DEFER_APU_ENABLE (0x01) 253 + 254 + #define MPI3_CI_MANIFEST_TYPE_MPI (0x00) 255 + #define MPI3_CI_MANIFEST_TYPE_SB (0x01) 256 + 161 257 struct mpi3_extended_image_header { 162 258 u8 image_type; 163 259 u8 reserved01[3];
+1 -1
drivers/scsi/mpi3mr/mpi/mpi30_init.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 2 /* 3 - * Copyright 2016-2023 Broadcom Inc. All rights reserved. 3 + * Copyright 2016-2026 Broadcom Inc. All rights reserved. 4 4 */ 5 5 #ifndef MPI30_INIT_H 6 6 #define MPI30_INIT_H 1
+1
drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
··· 662 662 #define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RELEASED (0x01) 663 663 #define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_PAUSED (0x02) 664 664 #define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RESUMED (0x03) 665 + #define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_CLEARED (0x04) 665 666 #define MPI3_PEL_LOCALE_FLAGS_NON_BLOCKING_BOOT_EVENT (0x0200) 666 667 #define MPI3_PEL_LOCALE_FLAGS_BLOCKING_BOOT_EVENT (0x0100) 667 668 #define MPI3_PEL_LOCALE_FLAGS_PCIE (0x0080)
+1 -1
drivers/scsi/mpi3mr/mpi/mpi30_pci.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 2 /* 3 - * Copyright 2016-2023 Broadcom Inc. All rights reserved. 3 + * Copyright 2016-2026 Broadcom Inc. All rights reserved. 4 4 * 5 5 */ 6 6 #ifndef MPI30_PCI_H
+1 -1
drivers/scsi/mpi3mr/mpi/mpi30_sas.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 2 /* 3 - * Copyright 2016-2023 Broadcom Inc. All rights reserved. 3 + * Copyright 2016-2026 Broadcom Inc. All rights reserved. 4 4 */ 5 5 #ifndef MPI30_SAS_H 6 6 #define MPI30_SAS_H 1
+5 -1
drivers/scsi/mpi3mr/mpi/mpi30_tool.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 2 /* 3 - * Copyright 2016-2024 Broadcom Inc. All rights reserved. 3 + * Copyright 2016-2026 Broadcom Inc. All rights reserved. 4 4 */ 5 5 #ifndef MPI30_TOOL_H 6 6 #define MPI30_TOOL_H 1 ··· 8 8 #define MPI3_DIAG_BUFFER_TYPE_TRACE (0x01) 9 9 #define MPI3_DIAG_BUFFER_TYPE_FW (0x02) 10 10 #define MPI3_DIAG_BUFFER_ACTION_RELEASE (0x01) 11 + #define MPI3_DIAG_BUFFER_ACTION_PAUSE (0x02) 12 + #define MPI3_DIAG_BUFFER_ACTION_RESUME (0x03) 13 + #define MPI3_DIAG_BUFFER_ACTION_CLEAR (0x04) 14 + 11 15 12 16 #define MPI3_DIAG_BUFFER_POST_MSGFLAGS_SEGMENTED (0x01) 13 17 struct mpi3_diag_buffer_post_request {
+2 -2
drivers/scsi/mpi3mr/mpi/mpi30_transport.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 2 /* 3 - * Copyright 2016-2023 Broadcom Inc. All rights reserved. 3 + * Copyright 2016-2026 Broadcom Inc. All rights reserved. 4 4 */ 5 5 #ifndef MPI30_TRANSPORT_H 6 6 #define MPI30_TRANSPORT_H 1 ··· 18 18 19 19 #define MPI3_VERSION_MAJOR (3) 20 20 #define MPI3_VERSION_MINOR (0) 21 - #define MPI3_VERSION_UNIT (37) 21 + #define MPI3_VERSION_UNIT (39) 22 22 #define MPI3_VERSION_DEV (0) 23 23 #define MPI3_DEVHANDLE_INVALID (0xffff) 24 24 struct mpi3_sysif_oper_queue_indexes {
+13 -5
drivers/scsi/mpi3mr/mpi3mr.h
··· 56 56 extern int prot_mask; 57 57 extern atomic64_t event_counter; 58 58 59 - #define MPI3MR_DRIVER_VERSION "8.15.0.5.51" 60 - #define MPI3MR_DRIVER_RELDATE "18-November-2025" 59 + #define MPI3MR_DRIVER_VERSION "8.17.0.3.50" 60 + #define MPI3MR_DRIVER_RELDATE "09-January-2026" 61 61 62 62 #define MPI3MR_DRIVER_NAME "mpi3mr" 63 63 #define MPI3MR_DRIVER_LICENSE "GPL" ··· 643 643 * @dev_info: Device information bits 644 644 * @phy_id: Phy identifier provided in device page 0 645 645 * @attached_phy_id: Attached phy identifier provided in device page 0 646 + * @negotiated_link_rate: Negotiated link rate from device page 0 646 647 * @sas_transport_attached: Is this device exposed to transport 647 648 * @pend_sas_rphy_add: Flag to check device is in process of add 648 649 * @hba_port: HBA port entry ··· 655 654 u16 dev_info; 656 655 u8 phy_id; 657 656 u8 attached_phy_id; 657 + u8 negotiated_link_rate; 658 658 u8 sas_transport_attached; 659 659 u8 pend_sas_rphy_add; 660 660 struct mpi3mr_hba_port *hba_port; ··· 1078 1076 * @fwevt_worker_thread: Firmware event worker thread 1079 1077 * @fwevt_lock: Firmware event lock 1080 1078 * @fwevt_list: Firmware event list 1081 - * @watchdog_work_q_name: Fault watchdog worker thread name 1082 1079 * @watchdog_work_q: Fault watchdog worker thread 1083 1080 * @watchdog_work: Fault watchdog work 1084 1081 * @watchdog_lock: Fault watchdog lock ··· 1136 1135 * @default_qcount: Total Default queues 1137 1136 * @active_poll_qcount: Currently active poll queue count 1138 1137 * @requested_poll_qcount: User requested poll queue count 1138 + * @fault_during_init: Indicates a firmware fault occurred during initialization 1139 + * @saved_fault_code: Firmware fault code captured at the time of failure 1140 + * @saved_fault_info: Additional firmware-provided fault information 1141 + * @fwfault_counter: Count of firmware faults detected by the driver 1139 1142 * @bsg_dev: BSG device structure 1140 1143 * @bsg_queue: Request queue for BSG device 1141 1144 * @stop_bsgs: Stop BSG request flag ··· 1270 1265 spinlock_t fwevt_lock; 1271 1266 struct list_head fwevt_list; 1272 1267 1273 - char watchdog_work_q_name[50]; 1274 1268 struct workqueue_struct *watchdog_work_q; 1275 1269 struct delayed_work watchdog_work; 1276 1270 spinlock_t watchdog_lock; ··· 1342 1338 u16 default_qcount; 1343 1339 u16 active_poll_qcount; 1344 1340 u16 requested_poll_qcount; 1341 + u8 fault_during_init; 1342 + u32 saved_fault_code; 1343 + u32 saved_fault_info[3]; 1344 + u64 fwfault_counter; 1345 1345 1346 1346 struct device bsg_dev; 1347 1347 struct request_queue *bsg_queue; ··· 1516 1508 struct mpi3mr_drv_cmd *drv_cmd); 1517 1509 int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc, 1518 1510 struct mpi3mr_drv_cmd *drv_cmd); 1519 - void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data, 1511 + void mpi3mr_app_save_logdata_th(struct mpi3mr_ioc *mrioc, char *event_data, 1520 1512 u16 event_data_size); 1521 1513 struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle( 1522 1514 struct mpi3mr_ioc *mrioc, u16 handle);
+26 -2
drivers/scsi/mpi3mr/mpi3mr_app.c
··· 2920 2920 } 2921 2921 2922 2922 /** 2923 - * mpi3mr_app_save_logdata - Save Log Data events 2923 + * mpi3mr_app_save_logdata_th - Save Log Data events 2924 2924 * @mrioc: Adapter instance reference 2925 2925 * @event_data: event data associated with log data event 2926 2926 * @event_data_size: event data size to copy ··· 2932 2932 * 2933 2933 * Return:Nothing 2934 2934 */ 2935 - void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data, 2935 + void mpi3mr_app_save_logdata_th(struct mpi3mr_ioc *mrioc, char *event_data, 2936 2936 u16 event_data_size) 2937 2937 { 2938 2938 u32 index = mrioc->logdata_buf_idx, sz; ··· 3255 3255 3256 3256 static DEVICE_ATTR_RO(adp_state); 3257 3257 3258 + /** 3259 + * fwfault_count_show() - SysFS callback to show firmware fault count 3260 + * @dev: class device 3261 + * @attr: Device attribute 3262 + * @buf: Buffer to copy data into 3263 + * 3264 + * Displays the total number of firmware faults detected by the driver 3265 + * since the controller was initialized. 3266 + * 3267 + * Return: Number of bytes written to @buf 3268 + */ 3269 + 3270 + static ssize_t 3271 + fwfault_count_show(struct device *dev, struct device_attribute *attr, 3272 + char *buf) 3273 + { 3274 + struct Scsi_Host *shost = class_to_shost(dev); 3275 + struct mpi3mr_ioc *mrioc = shost_priv(shost); 3276 + 3277 + return snprintf(buf, PAGE_SIZE, "%llu\n", mrioc->fwfault_counter); 3278 + } 3279 + static DEVICE_ATTR_RO(fwfault_count); 3280 + 3258 3281 static struct attribute *mpi3mr_host_attrs[] = { 3259 3282 &dev_attr_version_fw.attr, 3260 3283 &dev_attr_fw_queue_depth.attr, ··· 3286 3263 &dev_attr_reply_qfull_count.attr, 3287 3264 &dev_attr_logging_level.attr, 3288 3265 &dev_attr_adp_state.attr, 3266 + &dev_attr_fwfault_count.attr, 3289 3267 NULL, 3290 3268 }; 3291 3269
+124 -12
drivers/scsi/mpi3mr/mpi3mr_fw.c
··· 21 21 static int poll_queues; 22 22 module_param(poll_queues, int, 0444); 23 23 MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)"); 24 + static bool threaded_isr_poll = true; 25 + module_param(threaded_isr_poll, bool, 0444); 26 + MODULE_PARM_DESC(threaded_isr_poll, 27 + "Enablement of IRQ polling thread (default=true)"); 24 28 25 29 #if defined(writeq) && defined(CONFIG_64BIT) 26 30 static inline void mpi3mr_writeq(__u64 b, void __iomem *addr, ··· 599 595 * Exit completion loop to avoid CPU lockup 600 596 * Ensure remaining completion happens from threaded ISR. 601 597 */ 602 - if (num_op_reply > mrioc->max_host_ios) { 598 + if ((num_op_reply > mrioc->max_host_ios) && 599 + (threaded_isr_poll == true)) { 603 600 op_reply_q->enable_irq_poll = true; 604 601 break; 605 602 } ··· 697 692 * If more IOs are expected, schedule IRQ polling thread. 698 693 * Otherwise exit from ISR. 699 694 */ 700 - if (!intr_info->op_reply_q) 695 + if ((threaded_isr_poll == false) || !intr_info->op_reply_q) 701 696 return ret; 702 697 703 698 if (!intr_info->op_reply_q->enable_irq_poll || ··· 776 771 intr_info->msix_index = index; 777 772 intr_info->op_reply_q = NULL; 778 773 779 - snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d", 780 - mrioc->driver_name, mrioc->id, index); 774 + scnprintf(intr_info->name, MPI3MR_NAME_LENGTH, 775 + "%.32s%d-msix%u", mrioc->driver_name, mrioc->id, index); 781 776 782 777 #ifndef CONFIG_PREEMPT_RT 783 778 retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr, ··· 1109 1104 } 1110 1105 1111 1106 /** 1107 + * mpi3mr_save_fault_info - Save fault information 1108 + * @mrioc: Adapter instance reference 1109 + * 1110 + * Save the controller fault information if there is a 1111 + * controller fault. 1112 + * 1113 + * Return: Nothing. 1114 + */ 1115 + static void mpi3mr_save_fault_info(struct mpi3mr_ioc *mrioc) 1116 + { 1117 + u32 ioc_status, i; 1118 + 1119 + ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1120 + 1121 + if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { 1122 + mrioc->saved_fault_code = readl(&mrioc->sysif_regs->fault) & 1123 + MPI3_SYSIF_FAULT_CODE_MASK; 1124 + for (i = 0; i < 3; i++) { 1125 + mrioc->saved_fault_info[i] = 1126 + readl(&mrioc->sysif_regs->fault_info[i]); 1127 + } 1128 + } 1129 + } 1130 + 1131 + /** 1112 1132 * mpi3mr_get_iocstate - Get IOC State 1113 1133 * @mrioc: Adapter instance reference 1114 1134 * ··· 1272 1242 ioc_warn(mrioc, "cannot allocate DMA memory for the mpt commands\n" 1273 1243 "from the applications, application interface for MPT command is disabled\n"); 1274 1244 mpi3mr_free_ioctl_dma_memory(mrioc); 1245 + } 1246 + 1247 + /** 1248 + * mpi3mr_fault_uevent_emit - Emit uevent for any controller 1249 + * fault 1250 + * @mrioc: Pointer to the mpi3mr_ioc structure for the controller instance 1251 + * 1252 + * This function is invoked when the controller undergoes any 1253 + * type of fault. 1254 + */ 1255 + 1256 + static void mpi3mr_fault_uevent_emit(struct mpi3mr_ioc *mrioc) 1257 + { 1258 + struct kobj_uevent_env *env; 1259 + int ret; 1260 + 1261 + env = kzalloc(sizeof(*env), GFP_KERNEL); 1262 + if (!env) 1263 + return; 1264 + 1265 + ret = add_uevent_var(env, "DRIVER=%s", mrioc->driver_name); 1266 + if (ret) 1267 + goto out_free; 1268 + 1269 + ret = add_uevent_var(env, "IOC_ID=%u", mrioc->id); 1270 + if (ret) 1271 + goto out_free; 1272 + 1273 + ret = add_uevent_var(env, "FAULT_CODE=0x%08x", 1274 + mrioc->saved_fault_code); 1275 + if (ret) 1276 + goto out_free; 1277 + 1278 + ret = add_uevent_var(env, "FAULT_INFO0=0x%08x", 1279 + mrioc->saved_fault_info[0]); 1280 + if (ret) 1281 + goto out_free; 1282 + 1283 + ret = add_uevent_var(env, "FAULT_INFO1=0x%08x", 1284 + mrioc->saved_fault_info[1]); 1285 + if (ret) 1286 + goto out_free; 1287 + 1288 + ret = add_uevent_var(env, "FAULT_INFO2=0x%08x", 1289 + mrioc->saved_fault_info[2]); 1290 + if (ret) 1291 + goto out_free; 1292 + 1293 + kobject_uevent_env(&mrioc->shost->shost_gendev.kobj, 1294 + KOBJ_CHANGE, env->envp); 1295 + 1296 + out_free: 1297 + kfree(env); 1298 + 1275 1299 } 1276 1300 1277 1301 /** ··· 1559 1475 if (ioc_state == MRIOC_STATE_FAULT) { 1560 1476 timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; 1561 1477 mpi3mr_print_fault_info(mrioc); 1478 + mpi3mr_save_fault_info(mrioc); 1479 + mrioc->fault_during_init = 1; 1480 + mrioc->fwfault_counter++; 1481 + 1562 1482 do { 1563 1483 host_diagnostic = 1564 1484 readl(&mrioc->sysif_regs->host_diagnostic); ··· 1789 1701 scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX << 1790 1702 MPI3MR_RESET_REASON_OSTYPE_SHIFT) | (mrioc->facts.ioc_num << 1791 1703 MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason); 1792 - writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 1704 + writel(scratch_pad0, &mrioc->sysif_regs->scratchpad[0]); 1705 + if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT) 1706 + mpi3mr_set_diagsave(mrioc); 1793 1707 writel(host_diagnostic | reset_type, 1794 1708 &mrioc->sysif_regs->host_diagnostic); 1795 1709 switch (reset_type) { ··· 2660 2570 mpi3mr_set_trigger_data_in_all_hdb(mrioc, 2661 2571 MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0); 2662 2572 mpi3mr_print_fault_info(mrioc); 2573 + mpi3mr_save_fault_info(mrioc); 2574 + mrioc->fault_during_init = 1; 2575 + mrioc->fwfault_counter++; 2663 2576 return; 2664 2577 } 2665 2578 ··· 2680 2587 break; 2681 2588 msleep(100); 2682 2589 } while (--timeout); 2590 + 2591 + mpi3mr_save_fault_info(mrioc); 2592 + mrioc->fault_during_init = 1; 2593 + mrioc->fwfault_counter++; 2683 2594 } 2684 2595 2685 2596 /** ··· 2860 2763 union mpi3mr_trigger_data trigger_data; 2861 2764 u16 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH; 2862 2765 2766 + if (mrioc->fault_during_init) { 2767 + mpi3mr_fault_uevent_emit(mrioc); 2768 + mrioc->fault_during_init = 0; 2769 + } 2770 + 2863 2771 if (mrioc->reset_in_progress || mrioc->pci_err_recovery) 2864 2772 return; 2865 2773 ··· 2937 2835 goto schedule_work; 2938 2836 } 2939 2837 2838 + mpi3mr_save_fault_info(mrioc); 2839 + mpi3mr_fault_uevent_emit(mrioc); 2840 + mrioc->fwfault_counter++; 2841 + 2940 2842 switch (trigger_data.fault) { 2941 2843 case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED: 2942 2844 case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED: ··· 2984 2878 return; 2985 2879 2986 2880 INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work); 2987 - snprintf(mrioc->watchdog_work_q_name, 2988 - sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name, 2989 - mrioc->id); 2990 2881 mrioc->watchdog_work_q = alloc_ordered_workqueue( 2991 - "%s", WQ_MEM_RECLAIM, mrioc->watchdog_work_q_name); 2882 + "watchdog_%s%d", WQ_MEM_RECLAIM, mrioc->name, mrioc->id); 2992 2883 if (!mrioc->watchdog_work_q) { 2993 2884 ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__); 2994 2885 return; ··· 5504 5401 { 5505 5402 int retval = 0, i; 5506 5403 unsigned long flags; 5404 + enum mpi3mr_iocstate ioc_state; 5507 5405 u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; 5508 5406 union mpi3mr_trigger_data trigger_data; 5509 5407 ··· 5563 5459 mrioc->io_admin_reset_sync = 1; 5564 5460 5565 5461 if (snapdump) { 5566 - mpi3mr_set_diagsave(mrioc); 5567 5462 retval = mpi3mr_issue_reset(mrioc, 5568 5463 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 5569 5464 if (!retval) { ··· 5576 5473 break; 5577 5474 msleep(100); 5578 5475 } while (--timeout); 5476 + 5477 + mpi3mr_save_fault_info(mrioc); 5478 + mpi3mr_fault_uevent_emit(mrioc); 5479 + mrioc->fwfault_counter++; 5579 5480 mpi3mr_set_trigger_data_in_all_hdb(mrioc, 5580 5481 MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0); 5581 5482 } ··· 5668 5561 if (mrioc->pel_enabled) 5669 5562 atomic64_inc(&event_counter); 5670 5563 } else { 5671 - mpi3mr_issue_reset(mrioc, 5672 - MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 5564 + dprint_reset(mrioc, 5565 + "soft_reset_handler failed, marking controller as unrecoverable\n"); 5566 + ioc_state = mpi3mr_get_iocstate(mrioc); 5567 + 5568 + if (ioc_state != MRIOC_STATE_FAULT) 5569 + mpi3mr_issue_reset(mrioc, 5570 + MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 5673 5571 mrioc->device_refresh_on = 0; 5674 5572 mrioc->unrecoverable = 1; 5675 5573 mrioc->reset_in_progress = 0;
+105 -6
drivers/scsi/mpi3mr/mpi3mr_os.c
··· 1139 1139 } 1140 1140 1141 1141 /** 1142 + * mpi3mr_debug_dump_devpg0 - Dump device page0 1143 + * @mrioc: Adapter instance reference 1144 + * @dev_pg0: Device page 0. 1145 + * 1146 + * Prints pertinent details of the device page 0. 1147 + * 1148 + * Return: Nothing. 1149 + */ 1150 + static void 1151 + mpi3mr_debug_dump_devpg0(struct mpi3mr_ioc *mrioc, struct mpi3_device_page0 *dev_pg0) 1152 + { 1153 + ioc_info(mrioc, 1154 + "device_pg0: handle(0x%04x), perst_id(%d), wwid(0x%016llx), encl_handle(0x%04x), slot(%d)\n", 1155 + le16_to_cpu(dev_pg0->dev_handle), 1156 + le16_to_cpu(dev_pg0->persistent_id), 1157 + le64_to_cpu(dev_pg0->wwid), le16_to_cpu(dev_pg0->enclosure_handle), 1158 + le16_to_cpu(dev_pg0->slot)); 1159 + ioc_info(mrioc, "device_pg0: access_status(0x%02x), flags(0x%04x), device_form(0x%02x), queue_depth(%d)\n", 1160 + dev_pg0->access_status, le16_to_cpu(dev_pg0->flags), 1161 + dev_pg0->device_form, le16_to_cpu(dev_pg0->queue_depth)); 1162 + ioc_info(mrioc, "device_pg0: parent_handle(0x%04x), iounit_port(%d)\n", 1163 + le16_to_cpu(dev_pg0->parent_dev_handle), dev_pg0->io_unit_port); 1164 + 1165 + switch (dev_pg0->device_form) { 1166 + case MPI3_DEVICE_DEVFORM_SAS_SATA: 1167 + { 1168 + 1169 + struct mpi3_device0_sas_sata_format *sasinf = 1170 + &dev_pg0->device_specific.sas_sata_format; 1171 + ioc_info(mrioc, 1172 + "device_pg0: sas_sata: sas_address(0x%016llx),flags(0x%04x),\n" 1173 + "device_info(0x%04x), phy_num(%d), attached_phy_id(%d),negotiated_link_rate(0x%02x)\n", 1174 + le64_to_cpu(sasinf->sas_address), 1175 + le16_to_cpu(sasinf->flags), 1176 + le16_to_cpu(sasinf->device_info), sasinf->phy_num, 1177 + sasinf->attached_phy_identifier, sasinf->negotiated_link_rate); 1178 + break; 1179 + } 1180 + case MPI3_DEVICE_DEVFORM_PCIE: 1181 + { 1182 + 1183 + struct mpi3_device0_pcie_format *pcieinf = 1184 + &dev_pg0->device_specific.pcie_format; 1185 + ioc_info(mrioc, 1186 + "device_pg0: pcie: port_num(%d), device_info(0x%04x), mdts(%d), page_sz(0x%02x)\n", 1187 + pcieinf->port_num, le16_to_cpu(pcieinf->device_info), 1188 + le32_to_cpu(pcieinf->maximum_data_transfer_size), 1189 + pcieinf->page_size); 1190 + ioc_info(mrioc, 1191 + "device_pg0: pcie: abort_timeout(%d), reset_timeout(%d) capabilities (0x%08x)\n", 1192 + pcieinf->nvme_abort_to, pcieinf->controller_reset_to, 1193 + le32_to_cpu(pcieinf->capabilities)); 1194 + break; 1195 + } 1196 + case MPI3_DEVICE_DEVFORM_VD: 1197 + { 1198 + 1199 + struct mpi3_device0_vd_format *vdinf = 1200 + &dev_pg0->device_specific.vd_format; 1201 + 1202 + ioc_info(mrioc, 1203 + "device_pg0: vd: state(0x%02x), raid_level(%d), flags(0x%04x),\n" 1204 + "device_info(0x%04x) abort_timeout(%d), reset_timeout(%d)\n", 1205 + vdinf->vd_state, vdinf->raid_level, 1206 + le16_to_cpu(vdinf->flags), 1207 + le16_to_cpu(vdinf->device_info), 1208 + vdinf->vd_abort_to, vdinf->vd_reset_to); 1209 + ioc_info(mrioc, 1210 + "device_pg0: vd: tg_id(%d), high(%dMiB), low(%dMiB), qd_reduction_factor(%d)\n", 1211 + vdinf->io_throttle_group, 1212 + le16_to_cpu(vdinf->io_throttle_group_high), 1213 + le16_to_cpu(vdinf->io_throttle_group_low), 1214 + ((le16_to_cpu(vdinf->flags) & 1215 + MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_MASK) >> 12)); 1216 + break; 1217 + 1218 + } 1219 + default: 1220 + break; 1221 + } 1222 + } 1223 + 1224 + /** 1142 1225 * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf 1143 1226 * @mrioc: Adapter instance reference 1144 1227 * @tgtdev: Target device internal structure ··· 1241 1158 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 1242 1159 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1243 1160 u8 prot_mask = 0; 1161 + 1162 + if (mrioc->logging_level & 1163 + (MPI3_DEBUG_EVENT | MPI3_DEBUG_EVENT_WORK_TASK)) 1164 + mpi3mr_debug_dump_devpg0(mrioc, dev_pg0); 1244 1165 1245 1166 tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id); 1246 1167 tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle); ··· 1324 1237 tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num; 1325 1238 tgtdev->dev_spec.sas_sata_inf.attached_phy_id = 1326 1239 sasinf->attached_phy_identifier; 1240 + tgtdev->dev_spec.sas_sata_inf.negotiated_link_rate = 1241 + sasinf->negotiated_link_rate; 1327 1242 if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) != 1328 1243 MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE) 1329 1244 tgtdev->is_hidden = 1; ··· 2051 1962 static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc, 2052 1963 struct mpi3mr_fwevt *fwevt) 2053 1964 { 2054 - mpi3mr_app_save_logdata(mrioc, fwevt->event_data, 1965 + mpi3mr_app_save_logdata_th(mrioc, fwevt->event_data, 2055 1966 fwevt->event_data_size); 2056 1967 } 2057 1968 ··· 3147 3058 } 3148 3059 case MPI3_EVENT_DEVICE_INFO_CHANGED: 3149 3060 case MPI3_EVENT_LOG_DATA: 3061 + 3062 + sz = event_reply->event_data_length * 4; 3063 + mpi3mr_app_save_logdata_th(mrioc, 3064 + (char *)event_reply->event_data, sz); 3065 + break; 3150 3066 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 3151 3067 case MPI3_EVENT_ENCL_DEVICE_ADDED: 3152 3068 { ··· 5123 5029 * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy. 5124 5030 * SCSI_MLQUEUE_HOST_BUSY when the host queue is full. 5125 5031 */ 5126 - static int mpi3mr_qcmd(struct Scsi_Host *shost, 5127 - struct scsi_cmnd *scmd) 5032 + static enum scsi_qc_status mpi3mr_qcmd(struct Scsi_Host *shost, 5033 + struct scsi_cmnd *scmd) 5128 5034 { 5129 5035 struct mpi3mr_ioc *mrioc = shost_priv(shost); 5130 5036 struct mpi3mr_stgt_priv_data *stgt_priv_data; ··· 5475 5381 if (retval < 0) 5476 5382 goto id_alloc_failed; 5477 5383 mrioc->id = (u8)retval; 5478 - sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME); 5479 - sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id); 5384 + strscpy(mrioc->driver_name, MPI3MR_DRIVER_NAME, 5385 + sizeof(mrioc->driver_name)); 5386 + scnprintf(mrioc->name, sizeof(mrioc->name), 5387 + "%s%u", mrioc->driver_name, mrioc->id); 5480 5388 INIT_LIST_HEAD(&mrioc->list); 5481 5389 spin_lock(&mrioc_list_lock); 5482 5390 list_add_tail(&mrioc->list, &mrioc_list); ··· 6079 5983 .remove = mpi3mr_remove, 6080 5984 .shutdown = mpi3mr_shutdown, 6081 5985 .err_handler = &mpi3mr_err_handler, 6082 - .driver.pm = &mpi3mr_pm_ops, 5986 + .driver = { 5987 + .probe_type = PROBE_PREFER_ASYNCHRONOUS, 5988 + .pm = &mpi3mr_pm_ops, 5989 + }, 6083 5990 }; 6084 5991 6085 5992 static ssize_t event_counter_show(struct device_driver *dd, char *buf)
+18 -12
drivers/scsi/mpi3mr/mpi3mr_transport.c
··· 2284 2284 * @mrioc: Adapter instance reference 2285 2285 * @tgtdev: Target device 2286 2286 * 2287 - * This function identifies whether the target device is 2288 - * attached directly or through expander and issues sas phy 2289 - * page0 or expander phy page1 and gets the link rate, if there 2290 - * is any failure in reading the pages then this returns link 2291 - * rate of 1.5. 2287 + * This function first tries to use the link rate from DevicePage0 2288 + * (populated by firmware during device discovery). If the cached 2289 + * value is not available or invalid, it falls back to reading from 2290 + * sas phy page0 or expander phy page1. 2291 + * 2292 2292 * 2293 2293 * Return: logical link rate. 2294 2294 */ ··· 2301 2301 u32 phynum_handle; 2302 2302 u16 ioc_status; 2303 2303 2304 + /* First, try to use link rate from DevicePage0 (populated by firmware) */ 2305 + if (tgtdev->dev_spec.sas_sata_inf.negotiated_link_rate >= 2306 + MPI3_SAS_NEG_LINK_RATE_1_5) { 2307 + link_rate = tgtdev->dev_spec.sas_sata_inf.negotiated_link_rate; 2308 + goto out; 2309 + } 2310 + 2311 + /* Fallback to reading from phy pages if DevicePage0 value not available */ 2304 2312 phy_number = tgtdev->dev_spec.sas_sata_inf.phy_id; 2305 2313 if (!(tgtdev->devpg0_flag & MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED)) { 2306 2314 phynum_handle = ((phy_number<<MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT) ··· 2326 2318 __FILE__, __LINE__, __func__); 2327 2319 goto out; 2328 2320 } 2329 - link_rate = (expander_pg1.negotiated_link_rate & 2330 - MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >> 2331 - MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT; 2321 + link_rate = expander_pg1.negotiated_link_rate; 2332 2322 goto out; 2333 2323 } 2334 2324 if (mpi3mr_cfg_get_sas_phy_pg0(mrioc, &ioc_status, &phy_pg0, ··· 2341 2335 __FILE__, __LINE__, __func__); 2342 2336 goto out; 2343 2337 } 2344 - link_rate = (phy_pg0.negotiated_link_rate & 2345 - MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >> 2346 - MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT; 2338 + link_rate = phy_pg0.negotiated_link_rate; 2339 + 2347 2340 out: 2348 - return link_rate; 2341 + return ((link_rate & MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >> 2342 + MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT); 2349 2343 } 2350 2344 2351 2345 /**
+9 -8
drivers/scsi/mpt3sas/mpt3sas_base.c
··· 843 843 /* initialize fault polling */ 844 844 845 845 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); 846 - snprintf(ioc->fault_reset_work_q_name, 847 - sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status", 848 - ioc->driver_name, ioc->id); 849 846 ioc->fault_reset_work_q = alloc_ordered_workqueue( 850 - "%s", WQ_MEM_RECLAIM, ioc->fault_reset_work_q_name); 847 + "poll_%s%d_status", WQ_MEM_RECLAIM, ioc->driver_name, ioc->id); 851 848 if (!ioc->fault_reset_work_q) { 852 849 ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__); 853 850 return; ··· 1561 1564 int i; 1562 1565 u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1; 1563 1566 u8 cb_idx = 0xFF; 1567 + u16 discovery_smid = 1568 + ioc->shost->can_queue + INTERNAL_SCSIIO_FOR_DISCOVERY; 1564 1569 1565 1570 if (smid < ioc->hi_priority_smid) { 1566 1571 struct scsiio_tracker *st; ··· 1571 1572 st = _get_st_from_smid(ioc, smid); 1572 1573 if (st) 1573 1574 cb_idx = st->cb_idx; 1574 - } else if (smid == ctl_smid) 1575 + } else if (smid < discovery_smid) 1575 1576 cb_idx = ioc->ctl_cb_idx; 1577 + else 1578 + cb_idx = ioc->scsih_cb_idx; 1576 1579 } else if (smid < ioc->internal_smid) { 1577 1580 i = smid - ioc->hi_priority_smid; 1578 1581 cb_idx = ioc->hpr_lookup[i].cb_idx; ··· 3175 3174 3176 3175 if (index >= ioc->iopoll_q_start_index) { 3177 3176 qid = index - ioc->iopoll_q_start_index; 3178 - snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-mq-poll%d", 3177 + scnprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-mq-poll%d", 3179 3178 ioc->driver_name, ioc->id, qid); 3180 3179 reply_q->is_iouring_poll_q = 1; 3181 3180 ioc->io_uring_poll_queues[qid].reply_q = reply_q; ··· 3184 3183 3185 3184 3186 3185 if (ioc->msix_enable) 3187 - snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", 3186 + scnprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", 3188 3187 ioc->driver_name, ioc->id, index); 3189 3188 else 3190 - snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d", 3189 + scnprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d", 3191 3190 ioc->driver_name, ioc->id); 3192 3191 r = request_irq(pci_irq_vector(pdev, index), _base_interrupt, 3193 3192 IRQF_SHARED, reply_q->name, reply_q);
+6 -4
drivers/scsi/mpt3sas/mpt3sas_base.h
··· 147 147 #define INTERNAL_CMDS_COUNT 10 /* reserved cmds */ 148 148 /* reserved for issuing internally framed scsi io cmds */ 149 149 #define INTERNAL_SCSIIO_CMDS_COUNT 3 150 + #define INTERNAL_SCSIIO_FOR_DISCOVERY 2 150 151 151 152 #define MPI3_HIM_MASK 0xFFFFFFFF /* mask every bit*/ 152 153 ··· 481 480 u32 flags; 482 481 u8 configured_lun; 483 482 u8 block; 483 + u8 deleted; 484 484 u8 tlr_snoop_check; 485 485 u8 ignore_delay_remove; 486 486 /* Iopriority Command Handling */ ··· 579 577 u8 chassis_slot; 580 578 u8 is_chassis_slot_valid; 581 579 u8 connector_name[5]; 580 + u8 ssd_device; 582 581 struct kref refcount; 582 + 583 583 u8 port_type; 584 584 struct hba_port *port; 585 585 struct sas_rphy *rphy; ··· 1163 1159 * @mask_interrupts: ignore interrupt 1164 1160 * @pci_access_mutex: Mutex to synchronize ioctl, sysfs show path and 1165 1161 * pci resource handling 1166 - * @fault_reset_work_q_name: fw fault work queue 1167 - * @fault_reset_work_q: "" 1168 - * @fault_reset_work: "" 1162 + * @fault_reset_work_q: fw fault workqueue 1163 + * @fault_reset_work: fw fault work 1169 1164 * @firmware_event_thread: fw event work queue 1170 1165 * @fw_event_lock: 1171 1166 * @fw_event_list: list of fw events ··· 1348 1345 u8 mask_interrupts; 1349 1346 1350 1347 /* fw fault handler */ 1351 - char fault_reset_work_q_name[20]; 1352 1348 struct workqueue_struct *fault_reset_work_q; 1353 1349 struct delayed_work fault_reset_work; 1354 1350
+1302 -34
drivers/scsi/mpt3sas/mpt3sas_scsih.c
··· 61 61 62 62 #define PCIE_CHANNEL 2 63 63 64 + #define MPT3_MAX_LUNS (255) 65 + 64 66 /* forward proto's */ 65 67 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, 66 68 struct _sas_node *sas_expander); ··· 72 70 struct _sas_device *sas_device); 73 71 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, 74 72 u8 retry_count, u8 is_pd); 75 - static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); 73 + static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, 74 + u8 retry_count); 76 75 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, 77 76 struct _pcie_device *pcie_device); 78 77 static void 79 78 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); 80 79 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid); 81 80 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc); 81 + static enum device_responsive_state 82 + _scsih_wait_for_target_to_become_ready(struct MPT3SAS_ADAPTER *ioc, u16 handle, 83 + u8 retry_count, u8 is_pd, u8 tr_timeout, u8 tr_method); 84 + static enum device_responsive_state 85 + _scsih_ata_pass_thru_idd(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 *is_ssd_device, 86 + u8 tr_timeout, u8 tr_method); 87 + static enum device_responsive_state 88 + _scsih_wait_for_device_to_become_ready(struct MPT3SAS_ADAPTER *ioc, u16 handle, 89 + u8 retry_count, u8 is_pd, int lun, u8 tr_timeout, u8 tr_method); 90 + static void _firmware_event_work_delayed(struct work_struct *work); 82 91 83 92 /* global parameters */ 84 93 LIST_HEAD(mpt3sas_ioc_list); ··· 172 159 MODULE_PARM_DESC(enable_sdev_max_qd, 173 160 "Enable sdev max qd as can_queue, def=disabled(0)"); 174 161 162 + /* 163 + * permit overriding the SCSI command issuing capability of 164 + * the driver to bring the drive to READY state 165 + */ 166 + static int issue_scsi_cmd_to_bringup_drive = 1; 167 + module_param(issue_scsi_cmd_to_bringup_drive, int, 0444); 168 + MODULE_PARM_DESC(issue_scsi_cmd_to_bringup_drive, "allow host driver to\n" 169 + "issue SCSI commands to bring the drive to READY state, default=1 "); 170 + 175 171 static int multipath_on_hba = -1; 176 172 module_param(multipath_on_hba, int, 0); 177 173 MODULE_PARM_DESC(multipath_on_hba, ··· 195 173 MODULE_PARM_DESC(host_tagset_enable, 196 174 "Shared host tagset enable/disable Default: enable(1)"); 197 175 176 + static int command_retry_count = 144; 177 + module_param(command_retry_count, int, 0444); 178 + MODULE_PARM_DESC(command_retry_count, "Device discovery TUR command retry\n" 179 + "count: (default=144)"); 180 + 198 181 /* raid transport support */ 199 182 static struct raid_template *mpt3sas_raid_template; 200 183 static struct raid_template *mpt2sas_raid_template; 201 184 185 + /** 186 + * enum device_responsive_state - responsive state 187 + * @DEVICE_READY: device is ready to be added 188 + * @DEVICE_RETRY: device can be retried later 189 + * @DEVICE_RETRY_UA: retry unit attentions 190 + * @DEVICE_START_UNIT: requires start unit 191 + * @DEVICE_STOP_UNIT: requires stop unit 192 + * @DEVICE_ERROR: device reported some fatal error 193 + * 194 + */ 195 + enum device_responsive_state { 196 + DEVICE_READY, 197 + DEVICE_RETRY, 198 + DEVICE_RETRY_UA, 199 + DEVICE_START_UNIT, 200 + DEVICE_STOP_UNIT, 201 + DEVICE_ERROR, 202 + }; 202 203 203 204 /** 204 205 * struct sense_info - common structure for obtaining sense keys ··· 250 205 251 206 /** 252 207 * struct fw_event_work - firmware event struct 208 + * @retries: retry count for processing the event 209 + * @delayed_work_active: flag indicating if delayed work is active 210 + * @delayed_work: delayed work item for deferred event handling 253 211 * @list: link list framework 254 212 * @work: work object (ioc->fault_reset_work_q) 255 213 * @ioc: per adapter object ··· 267 219 * This object stored on ioc->fw_event_list. 268 220 */ 269 221 struct fw_event_work { 222 + u8 *retries; 223 + u8 delayed_work_active; 224 + struct delayed_work delayed_work; 270 225 struct list_head list; 271 226 struct work_struct work; 272 227 ··· 281 230 u16 event; 282 231 struct kref refcount; 283 232 char event_data[] __aligned(4); 233 + 284 234 }; 285 235 286 236 static void fw_event_work_free(struct kref *r) 287 237 { 288 - kfree(container_of(r, struct fw_event_work, refcount)); 238 + struct fw_event_work *fw_work; 239 + 240 + fw_work = container_of(r, struct fw_event_work, refcount); 241 + kfree(fw_work->retries); 242 + kfree(fw_work); 289 243 } 290 244 291 245 static void fw_event_work_get(struct fw_event_work *fw_work) ··· 1011 955 sas_device_put(sas_device); 1012 956 } 1013 957 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 958 + 1014 959 } 1015 960 1016 961 /** ··· 2585 2528 char *r_level = ""; 2586 2529 u16 handle, volume_handle = 0; 2587 2530 u64 volume_wwid = 0; 2531 + enum device_responsive_state retval; 2532 + u8 count = 0; 2588 2533 2589 2534 qdepth = 1; 2590 2535 sas_device_priv_data = sdev->hostdata; ··· 2745 2686 2746 2687 pcie_device_put(pcie_device); 2747 2688 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2689 + 2748 2690 mpt3sas_scsih_change_queue_depth(sdev, qdepth); 2749 2691 lim->virt_boundary_mask = ioc->page_size - 1; 2750 2692 return 0; ··· 2797 2737 sas_device_put(sas_device); 2798 2738 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2799 2739 2800 - if (!ssp_target) 2740 + if (!ssp_target) { 2801 2741 _scsih_display_sata_capabilities(ioc, handle, sdev); 2742 + 2743 + do { 2744 + retval = _scsih_ata_pass_thru_idd(ioc, handle, 2745 + &sas_device->ssd_device, 30, 0); 2746 + } while ((retval == DEVICE_RETRY || retval == DEVICE_RETRY_UA) 2747 + && count++ < 3); 2748 + } 2802 2749 2803 2750 2804 2751 mpt3sas_scsih_change_queue_depth(sdev, qdepth); ··· 3662 3595 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 3663 3596 } 3664 3597 3598 + /** 3599 + * _scsih_fw_event_requeue - requeue an event 3600 + * @ioc: per adapter object 3601 + * @fw_event: object describing the event 3602 + * @delay: time in milliseconds to wait before retrying the event 3603 + * 3604 + * Context: This function will acquire ioc->fw_event_lock. 3605 + * 3606 + * Return nothing. 3607 + */ 3608 + static void 3609 + _scsih_fw_event_requeue(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work 3610 + *fw_event, unsigned long delay) 3611 + { 3612 + unsigned long flags; 3613 + 3614 + if (ioc->firmware_event_thread == NULL) 3615 + return; 3616 + 3617 + spin_lock_irqsave(&ioc->fw_event_lock, flags); 3618 + fw_event_work_get(fw_event); 3619 + list_add_tail(&fw_event->list, &ioc->fw_event_list); 3620 + if (!fw_event->delayed_work_active) { 3621 + fw_event->delayed_work_active = 1; 3622 + INIT_DELAYED_WORK(&fw_event->delayed_work, 3623 + _firmware_event_work_delayed); 3624 + } 3625 + queue_delayed_work(ioc->firmware_event_thread, &fw_event->delayed_work, 3626 + msecs_to_jiffies(delay)); 3627 + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 3628 + } 3665 3629 3666 3630 /** 3667 3631 * mpt3sas_send_trigger_data_event - send event for processing trigger data ··· 3923 3825 /** 3924 3826 * _scsih_ublock_io_all_device - unblock every device 3925 3827 * @ioc: per adapter object 3828 + * @no_turs: flag to disable TEST UNIT READY checks during device unblocking 3926 3829 * 3927 3830 * change the device state from block to running 3928 3831 */ 3929 3832 static void 3930 - _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc) 3833 + _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc, u8 no_turs) 3931 3834 { 3932 3835 struct MPT3SAS_DEVICE *sas_device_priv_data; 3933 3836 struct scsi_device *sdev; 3837 + struct MPT3SAS_TARGET *sas_target; 3838 + enum device_responsive_state rc; 3839 + struct _sas_device *sas_device = NULL; 3840 + struct _pcie_device *pcie_device = NULL; 3841 + int count = 0; 3842 + u8 tr_method = 0; 3843 + u8 tr_timeout = 30; 3844 + 3934 3845 3935 3846 shost_for_each_device(sdev, ioc->shost) { 3936 3847 sas_device_priv_data = sdev->hostdata; 3937 3848 if (!sas_device_priv_data) 3938 3849 continue; 3850 + 3851 + sas_target = sas_device_priv_data->sas_target; 3852 + if (!sas_target || sas_target->deleted) 3853 + continue; 3854 + 3939 3855 if (!sas_device_priv_data->block) 3940 3856 continue; 3941 3857 3942 - dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, 3943 - "device_running, handle(0x%04x)\n", 3944 - sas_device_priv_data->sas_target->handle)); 3858 + if ((no_turs) || (!issue_scsi_cmd_to_bringup_drive)) { 3859 + sdev_printk(KERN_WARNING, sdev, "device_unblocked handle(0x%04x)\n", 3860 + sas_device_priv_data->sas_target->handle); 3861 + _scsih_internal_device_unblock(sdev, sas_device_priv_data); 3862 + continue; 3863 + } 3864 + 3865 + do { 3866 + pcie_device = mpt3sas_get_pdev_by_handle(ioc, sas_target->handle); 3867 + if (pcie_device && (!ioc->tm_custom_handling) && 3868 + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { 3869 + tr_timeout = pcie_device->reset_timeout; 3870 + tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 3871 + } 3872 + rc = _scsih_wait_for_device_to_become_ready(ioc, 3873 + sas_target->handle, 0, (sas_target->flags & 3874 + MPT_TARGET_FLAGS_RAID_COMPONENT), sdev->lun, tr_timeout, tr_method); 3875 + if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || 3876 + rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) 3877 + ssleep(1); 3878 + if (pcie_device) 3879 + pcie_device_put(pcie_device); 3880 + } while ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || 3881 + rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) 3882 + && count++ < command_retry_count); 3883 + sas_device_priv_data->block = 0; 3884 + if (rc != DEVICE_READY) 3885 + sas_device_priv_data->deleted = 1; 3886 + 3945 3887 _scsih_internal_device_unblock(sdev, sas_device_priv_data); 3888 + 3889 + if (rc != DEVICE_READY) { 3890 + sdev_printk(KERN_WARNING, sdev, "%s: device_offlined,\n" 3891 + "handle(0x%04x)\n", 3892 + __func__, sas_device_priv_data->sas_target->handle); 3893 + scsi_device_set_state(sdev, SDEV_OFFLINE); 3894 + sas_device = mpt3sas_get_sdev_by_addr(ioc, 3895 + sas_device_priv_data->sas_target->sas_address, 3896 + sas_device_priv_data->sas_target->port); 3897 + if (sas_device) { 3898 + _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL); 3899 + sas_device_put(sas_device); 3900 + } else { 3901 + pcie_device = mpt3sas_get_pdev_by_wwid(ioc, 3902 + sas_device_priv_data->sas_target->sas_address); 3903 + if (pcie_device) { 3904 + if (pcie_device->enclosure_handle != 0) 3905 + sdev_printk(KERN_INFO, sdev, "enclosure logical id\n" 3906 + "(0x%016llx), slot(%d)\n", (unsigned long long) 3907 + pcie_device->enclosure_logical_id, 3908 + pcie_device->slot); 3909 + if (pcie_device->connector_name[0] != '\0') 3910 + sdev_printk(KERN_INFO, sdev, "enclosure level(0x%04x),\n" 3911 + " connector name( %s)\n", 3912 + pcie_device->enclosure_level, 3913 + pcie_device->connector_name); 3914 + pcie_device_put(pcie_device); 3915 + } 3916 + } 3917 + } else 3918 + sdev_printk(KERN_WARNING, sdev, "device_unblocked,\n" 3919 + "handle(0x%04x)\n", 3920 + sas_device_priv_data->sas_target->handle); 3946 3921 } 3947 3922 } 3948 3923 3924 + /** 3925 + * _scsih_ublock_io_device_wait - unblock IO for target 3926 + * @ioc: per adapter object 3927 + * @sas_address: sas address 3928 + * @port: hba port entry 3929 + * 3930 + * make sure device is reponsponding before unblocking 3931 + */ 3932 + static void 3933 + _scsih_ublock_io_device_wait(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, 3934 + struct hba_port *port) 3935 + { 3936 + struct MPT3SAS_DEVICE *sas_device_priv_data; 3937 + struct MPT3SAS_TARGET *sas_target; 3938 + enum device_responsive_state rc; 3939 + struct scsi_device *sdev; 3940 + int host_reset_completion_count; 3941 + struct _sas_device *sas_device; 3942 + struct _pcie_device *pcie_device; 3943 + u8 tr_timeout = 30; 3944 + u8 tr_method = 0; 3945 + int count = 0; 3946 + 3947 + /* moving devices from SDEV_OFFLINE to SDEV_BLOCK */ 3948 + shost_for_each_device(sdev, ioc->shost) { 3949 + sas_device_priv_data = sdev->hostdata; 3950 + if (!sas_device_priv_data) 3951 + continue; 3952 + sas_target = sas_device_priv_data->sas_target; 3953 + if (!sas_target) 3954 + continue; 3955 + if (sas_target->sas_address != sas_address || 3956 + sas_target->port != port) 3957 + continue; 3958 + if (sdev->sdev_state == SDEV_OFFLINE) { 3959 + sas_device_priv_data->block = 1; 3960 + sas_device_priv_data->deleted = 0; 3961 + scsi_device_set_state(sdev, SDEV_RUNNING); 3962 + scsi_internal_device_block_nowait(sdev); 3963 + } 3964 + } 3965 + 3966 + /* moving devices from SDEV_BLOCK to SDEV_RUNNING state */ 3967 + shost_for_each_device(sdev, ioc->shost) { 3968 + sas_device_priv_data = sdev->hostdata; 3969 + if (!sas_device_priv_data) 3970 + continue; 3971 + sas_target = sas_device_priv_data->sas_target; 3972 + if (!sas_target) 3973 + continue; 3974 + if (sas_target->sas_address != sas_address || 3975 + sas_target->port != port) 3976 + continue; 3977 + if (!sas_device_priv_data->block) 3978 + continue; 3979 + 3980 + do { 3981 + host_reset_completion_count = 0; 3982 + pcie_device = mpt3sas_get_pdev_by_handle(ioc, sas_target->handle); 3983 + if (pcie_device && (!ioc->tm_custom_handling) && 3984 + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { 3985 + tr_timeout = pcie_device->reset_timeout; 3986 + tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 3987 + } 3988 + rc = _scsih_wait_for_device_to_become_ready(ioc, 3989 + sas_target->handle, 0, (sas_target->flags & 3990 + MPT_TARGET_FLAGS_RAID_COMPONENT), sdev->lun, tr_timeout, tr_method); 3991 + if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || 3992 + rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) { 3993 + do { 3994 + msleep(500); 3995 + host_reset_completion_count++; 3996 + } while (rc == DEVICE_RETRY && 3997 + ioc->shost_recovery); 3998 + if (host_reset_completion_count > 1) { 3999 + rc = _scsih_wait_for_device_to_become_ready(ioc, 4000 + sas_target->handle, 0, (sas_target->flags & 4001 + MPT_TARGET_FLAGS_RAID_COMPONENT), sdev->lun, 4002 + tr_timeout, tr_method); 4003 + if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || 4004 + rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) 4005 + msleep(500); 4006 + } 4007 + continue; 4008 + } 4009 + if (pcie_device) 4010 + pcie_device_put(pcie_device); 4011 + } while ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || 4012 + rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) 4013 + && count++ <= command_retry_count); 4014 + 4015 + sas_device_priv_data->block = 0; 4016 + if (rc != DEVICE_READY) 4017 + sas_device_priv_data->deleted = 1; 4018 + scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); 4019 + 4020 + if (rc != DEVICE_READY) { 4021 + sdev_printk(KERN_WARNING, sdev, 4022 + "%s: device_offlined, handle(0x%04x)\n", 4023 + __func__, sas_device_priv_data->sas_target->handle); 4024 + 4025 + sas_device = mpt3sas_get_sdev_by_handle(ioc, 4026 + sas_device_priv_data->sas_target->handle); 4027 + if (sas_device) { 4028 + _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL); 4029 + sas_device_put(sas_device); 4030 + } else { 4031 + pcie_device = mpt3sas_get_pdev_by_handle(ioc, 4032 + sas_device_priv_data->sas_target->handle); 4033 + if (pcie_device) { 4034 + if (pcie_device->enclosure_handle != 0) 4035 + sdev_printk(KERN_INFO, sdev, 4036 + "device_offlined, enclosure logical id(0x%016llx),\n" 4037 + " slot(%d)\n", (unsigned long long) 4038 + pcie_device->enclosure_logical_id, 4039 + pcie_device->slot); 4040 + if (pcie_device->connector_name[0] != '\0') 4041 + sdev_printk(KERN_WARNING, sdev, 4042 + "device_offlined, enclosure level(0x%04x),\n" 4043 + "connector name( %s)\n", 4044 + pcie_device->enclosure_level, 4045 + pcie_device->connector_name); 4046 + pcie_device_put(pcie_device); 4047 + } 4048 + } 4049 + scsi_device_set_state(sdev, SDEV_OFFLINE); 4050 + } else { 4051 + sdev_printk(KERN_WARNING, sdev, 4052 + "device_unblocked, handle(0x%04x)\n", 4053 + sas_device_priv_data->sas_target->handle); 4054 + } 4055 + } 4056 + } 3949 4057 3950 4058 /** 3951 4059 * _scsih_ublock_io_device - prepare device to be deleted ··· 5422 5118 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or 5423 5119 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full 5424 5120 */ 5425 - static int 5426 - scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 5121 + static enum scsi_qc_status scsih_qcmd(struct Scsi_Host *shost, 5122 + struct scsi_cmnd *scmd) 5427 5123 { 5428 5124 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 5429 5125 struct MPT3SAS_DEVICE *sas_device_priv_data; ··· 7412 7108 return 1; 7413 7109 } 7414 7110 7111 + /** 7112 + * _scsi_send_scsi_io - send internal SCSI_IO to target 7113 + * @ioc: per adapter object 7114 + * @transfer_packet: packet describing the transfer 7115 + * @tr_timeout: Target Reset Timeout 7116 + * @tr_method: Target Reset Method 7117 + * Context: user 7118 + * 7119 + * Returns 0 for success, non-zero for failure. 7120 + */ 7121 + static int 7122 + _scsi_send_scsi_io(struct MPT3SAS_ADAPTER *ioc, struct _scsi_io_transfer 7123 + *transfer_packet, u8 tr_timeout, u8 tr_method) 7124 + { 7125 + Mpi2SCSIIOReply_t *mpi_reply; 7126 + Mpi2SCSIIORequest_t *mpi_request; 7127 + u16 smid; 7128 + u8 issue_reset = 0; 7129 + int rc; 7130 + void *priv_sense; 7131 + u32 mpi_control; 7132 + void *psge; 7133 + dma_addr_t data_out_dma = 0; 7134 + dma_addr_t data_in_dma = 0; 7135 + size_t data_in_sz = 0; 7136 + size_t data_out_sz = 0; 7137 + u16 handle; 7138 + u8 retry_count = 0, host_reset_count = 0; 7139 + int tm_return_code; 7415 7140 7141 + if (ioc->pci_error_recovery) { 7142 + pr_info("%s: pci error recovery in progress!\n", __func__); 7143 + return -EFAULT; 7144 + } 7416 7145 7146 + if (ioc->shost_recovery) { 7147 + pr_info("%s: host recovery in progress!\n", __func__); 7148 + return -EAGAIN; 7149 + } 7417 7150 7418 - #define MPT3_MAX_LUNS (255) 7151 + handle = transfer_packet->handle; 7152 + if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) { 7153 + pr_info("%s: no device!\n", __func__); 7154 + return -EFAULT; 7155 + } 7156 + 7157 + mutex_lock(&ioc->scsih_cmds.mutex); 7158 + 7159 + if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 7160 + pr_err("%s: scsih_cmd in use\n", __func__); 7161 + rc = -EAGAIN; 7162 + goto out; 7163 + } 7164 + 7165 + retry_loop: 7166 + if (test_bit(handle, ioc->device_remove_in_progress)) { 7167 + pr_info("%s: device removal in progress\n", __func__); 7168 + rc = -EFAULT; 7169 + goto out; 7170 + } 7171 + 7172 + ioc->scsih_cmds.status = MPT3_CMD_PENDING; 7173 + 7174 + rc = mpt3sas_wait_for_ioc(ioc, 10); 7175 + if (rc) 7176 + goto out; 7177 + 7178 + /* Use second reserved smid for discovery related IOs */ 7179 + smid = ioc->shost->can_queue + INTERNAL_SCSIIO_FOR_DISCOVERY; 7180 + 7181 + rc = 0; 7182 + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 7183 + ioc->scsih_cmds.smid = smid; 7184 + memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t)); 7185 + if (transfer_packet->is_raid) 7186 + mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 7187 + else 7188 + mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 7189 + mpi_request->DevHandle = cpu_to_le16(handle); 7190 + 7191 + switch (transfer_packet->dir) { 7192 + case DMA_TO_DEVICE: 7193 + mpi_control = MPI2_SCSIIO_CONTROL_WRITE; 7194 + data_out_dma = transfer_packet->data_dma; 7195 + data_out_sz = transfer_packet->data_length; 7196 + break; 7197 + case DMA_FROM_DEVICE: 7198 + mpi_control = MPI2_SCSIIO_CONTROL_READ; 7199 + data_in_dma = transfer_packet->data_dma; 7200 + data_in_sz = transfer_packet->data_length; 7201 + break; 7202 + case DMA_BIDIRECTIONAL: 7203 + mpi_control = MPI2_SCSIIO_CONTROL_BIDIRECTIONAL; 7204 + /* TODO - is BIDI support needed ?? */ 7205 + WARN_ON_ONCE(true); 7206 + break; 7207 + default: 7208 + case DMA_NONE: 7209 + mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; 7210 + break; 7211 + } 7212 + 7213 + psge = &mpi_request->SGL; 7214 + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, 7215 + data_in_sz); 7216 + 7217 + mpi_request->Control = cpu_to_le32(mpi_control | 7218 + MPI2_SCSIIO_CONTROL_SIMPLEQ); 7219 + mpi_request->DataLength = cpu_to_le32(transfer_packet->data_length); 7220 + mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; 7221 + mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 7222 + mpi_request->SenseBufferLowAddress = 7223 + mpt3sas_base_get_sense_buffer_dma(ioc, smid); 7224 + priv_sense = mpt3sas_base_get_sense_buffer(ioc, smid); 7225 + mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4; 7226 + mpi_request->IoFlags = cpu_to_le16(transfer_packet->cdb_length); 7227 + int_to_scsilun(transfer_packet->lun, (struct scsi_lun *) 7228 + mpi_request->LUN); 7229 + memcpy(mpi_request->CDB.CDB32, transfer_packet->cdb, 7230 + transfer_packet->cdb_length); 7231 + init_completion(&ioc->scsih_cmds.done); 7232 + if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) 7233 + ioc->put_smid_scsi_io(ioc, smid, handle); 7234 + else 7235 + ioc->put_smid_default(ioc, smid); 7236 + wait_for_completion_timeout(&ioc->scsih_cmds.done, 7237 + transfer_packet->timeout*HZ); 7238 + if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 7239 + mpt3sas_check_cmd_timeout(ioc, 7240 + ioc->scsih_cmds.status, mpi_request, 7241 + sizeof(Mpi2SCSIIORequest_t)/4, issue_reset); 7242 + goto issue_target_reset; 7243 + } 7244 + if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { 7245 + transfer_packet->valid_reply = 1; 7246 + mpi_reply = ioc->scsih_cmds.reply; 7247 + transfer_packet->sense_length = 7248 + le32_to_cpu(mpi_reply->SenseCount); 7249 + if (transfer_packet->sense_length) 7250 + memcpy(transfer_packet->sense, priv_sense, 7251 + transfer_packet->sense_length); 7252 + transfer_packet->transfer_length = 7253 + le32_to_cpu(mpi_reply->TransferCount); 7254 + transfer_packet->ioc_status = 7255 + le16_to_cpu(mpi_reply->IOCStatus) & 7256 + MPI2_IOCSTATUS_MASK; 7257 + transfer_packet->scsi_state = mpi_reply->SCSIState; 7258 + transfer_packet->scsi_status = mpi_reply->SCSIStatus; 7259 + transfer_packet->log_info = 7260 + le32_to_cpu(mpi_reply->IOCLogInfo); 7261 + } 7262 + goto out; 7263 + 7264 + issue_target_reset: 7265 + if (issue_reset) { 7266 + pr_info("issue target reset: handle (0x%04x)\n", handle); 7267 + tm_return_code = 7268 + mpt3sas_scsih_issue_locked_tm(ioc, handle, 7269 + 0xFFFFFFFF, 0xFFFFFFFF, 0, 7270 + MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, smid, 0, 7271 + tr_timeout, tr_method); 7272 + 7273 + if (tm_return_code == SUCCESS) { 7274 + pr_info("target reset completed: handle (0x%04x)\n", handle); 7275 + /* If the command is successfully aborted due to 7276 + * target reset TM then do up to three retries else 7277 + * command will be terminated by the host reset TM and 7278 + * hence retry once. 7279 + */ 7280 + if (((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) && 7281 + retry_count++ < 3) || 7282 + ((ioc->scsih_cmds.status & MPT3_CMD_RESET) && 7283 + host_reset_count++ == 0)) { 7284 + pr_info("issue retry: handle (0x%04x)\n", handle); 7285 + goto retry_loop; 7286 + } 7287 + } else 7288 + pr_info("target reset didn't complete: handle(0x%04x)\n", handle); 7289 + rc = -EFAULT; 7290 + } else 7291 + rc = -EAGAIN; 7292 + 7293 + out: 7294 + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 7295 + mutex_unlock(&ioc->scsih_cmds.mutex); 7296 + return rc; 7297 + } 7298 + 7299 + /** 7300 + * _scsih_determine_disposition - 7301 + * @ioc: per adapter object 7302 + * @transfer_packet: packet describing the transfer 7303 + * Context: user 7304 + * 7305 + * Determines if an internal generated scsi_io is good data, or 7306 + * whether it needs to be retried or treated as an error. 7307 + * 7308 + * Returns device_responsive_state 7309 + */ 7310 + static enum device_responsive_state 7311 + _scsih_determine_disposition(struct MPT3SAS_ADAPTER *ioc, 7312 + struct _scsi_io_transfer *transfer_packet) 7313 + { 7314 + static enum device_responsive_state rc; 7315 + struct sense_info sense_info = {0, 0, 0}; 7316 + u8 check_sense = 0; 7317 + char *desc = NULL; 7318 + 7319 + if (!transfer_packet->valid_reply) 7320 + return DEVICE_READY; 7321 + 7322 + switch (transfer_packet->ioc_status) { 7323 + case MPI2_IOCSTATUS_BUSY: 7324 + case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: 7325 + case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 7326 + case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 7327 + case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 7328 + rc = DEVICE_RETRY; 7329 + break; 7330 + case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 7331 + if (transfer_packet->log_info == 0x31170000) { 7332 + rc = DEVICE_RETRY; 7333 + break; 7334 + } 7335 + if (transfer_packet->cdb[0] == REPORT_LUNS) 7336 + rc = DEVICE_READY; 7337 + else 7338 + rc = DEVICE_RETRY; 7339 + break; 7340 + case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 7341 + case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 7342 + case MPI2_IOCSTATUS_SUCCESS: 7343 + if (!transfer_packet->scsi_state && 7344 + !transfer_packet->scsi_status) { 7345 + rc = DEVICE_READY; 7346 + break; 7347 + } 7348 + if (transfer_packet->scsi_state & 7349 + MPI2_SCSI_STATE_AUTOSENSE_VALID) { 7350 + rc = DEVICE_ERROR; 7351 + check_sense = 1; 7352 + break; 7353 + } 7354 + if (transfer_packet->scsi_state & 7355 + (MPI2_SCSI_STATE_AUTOSENSE_FAILED | 7356 + MPI2_SCSI_STATE_NO_SCSI_STATUS | 7357 + MPI2_SCSI_STATE_TERMINATED)) { 7358 + rc = DEVICE_RETRY; 7359 + break; 7360 + } 7361 + if (transfer_packet->scsi_status >= 7362 + MPI2_SCSI_STATUS_BUSY) { 7363 + rc = DEVICE_RETRY; 7364 + break; 7365 + } 7366 + rc = DEVICE_READY; 7367 + break; 7368 + case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 7369 + if (transfer_packet->scsi_state & 7370 + MPI2_SCSI_STATE_TERMINATED) 7371 + rc = DEVICE_RETRY; 7372 + else 7373 + rc = DEVICE_ERROR; 7374 + break; 7375 + case MPI2_IOCSTATUS_INSUFFICIENT_POWER: 7376 + default: 7377 + rc = DEVICE_ERROR; 7378 + break; 7379 + } 7380 + 7381 + if (check_sense) { 7382 + _scsih_normalize_sense(transfer_packet->sense, &sense_info); 7383 + if (sense_info.skey == UNIT_ATTENTION) 7384 + rc = DEVICE_RETRY_UA; 7385 + else if (sense_info.skey == NOT_READY) { 7386 + /* medium isn't present */ 7387 + if (sense_info.asc == 0x3a) 7388 + rc = DEVICE_READY; 7389 + /* LOGICAL UNIT NOT READY */ 7390 + else if (sense_info.asc == 0x04) { 7391 + if (sense_info.ascq == 0x03 || 7392 + sense_info.ascq == 0x0b || 7393 + sense_info.ascq == 0x0c) { 7394 + rc = DEVICE_ERROR; 7395 + } else 7396 + rc = DEVICE_START_UNIT; 7397 + } 7398 + /* LOGICAL UNIT HAS NOT SELF-CONFIGURED YET */ 7399 + else if (sense_info.asc == 0x3e && !sense_info.ascq) 7400 + rc = DEVICE_START_UNIT; 7401 + } else if (sense_info.skey == ILLEGAL_REQUEST && 7402 + transfer_packet->cdb[0] == REPORT_LUNS) { 7403 + rc = DEVICE_READY; 7404 + } else if (sense_info.skey == MEDIUM_ERROR) { 7405 + 7406 + /* medium is corrupt, lets add the device so 7407 + * users can collect some info as needed 7408 + */ 7409 + 7410 + if (sense_info.asc == 0x31) 7411 + rc = DEVICE_READY; 7412 + } else if (sense_info.skey == HARDWARE_ERROR) { 7413 + /* Defect List Error, still add the device */ 7414 + if (sense_info.asc == 0x19) 7415 + rc = DEVICE_READY; 7416 + } 7417 + } 7418 + 7419 + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { 7420 + switch (rc) { 7421 + case DEVICE_READY: 7422 + desc = "ready"; 7423 + break; 7424 + case DEVICE_RETRY: 7425 + desc = "retry"; 7426 + break; 7427 + case DEVICE_RETRY_UA: 7428 + desc = "retry_ua"; 7429 + break; 7430 + case DEVICE_START_UNIT: 7431 + desc = "start_unit"; 7432 + break; 7433 + case DEVICE_STOP_UNIT: 7434 + desc = "stop_unit"; 7435 + break; 7436 + case DEVICE_ERROR: 7437 + desc = "error"; 7438 + break; 7439 + } 7440 + 7441 + pr_info("ioc_status(0x%04x),\n" 7442 + "loginfo(0x%08x), scsi_status(0x%02x),\n" 7443 + "scsi_state(0x%02x), rc(%s)\n", 7444 + transfer_packet->ioc_status, 7445 + transfer_packet->log_info, transfer_packet->scsi_status, 7446 + transfer_packet->scsi_state, desc); 7447 + 7448 + if (check_sense) 7449 + pr_info("\t[sense_key,asc,ascq]:\n" 7450 + "[0x%02x,0x%02x,0x%02x]\n", 7451 + sense_info.skey, sense_info.asc, sense_info.ascq); 7452 + } 7453 + return rc; 7454 + } 7455 + 7456 + /** 7457 + * _scsih_report_luns - send REPORT_LUNS to target 7458 + * @ioc: per adapter object 7459 + * @handle: expander handle 7460 + * @data: report luns data payload 7461 + * @data_length: length of data in bytes 7462 + * @retry_count: Requeue count 7463 + * @is_pd: is this hidden raid component 7464 + * @tr_timeout: Target Reset Timeout 7465 + * @tr_method: Target Reset Method 7466 + * Context: user 7467 + * 7468 + * Returns device_responsive_state 7469 + */ 7470 + static enum device_responsive_state 7471 + _scsih_report_luns(struct MPT3SAS_ADAPTER *ioc, u16 handle, void *data, 7472 + u32 data_length, u8 retry_count, u8 is_pd, u8 tr_timeout, u8 tr_method) 7473 + { 7474 + struct _scsi_io_transfer *transfer_packet; 7475 + enum device_responsive_state rc; 7476 + void *lun_data; 7477 + int return_code; 7478 + int retries; 7479 + 7480 + lun_data = NULL; 7481 + transfer_packet = kzalloc(sizeof(struct _scsi_io_transfer), GFP_KERNEL); 7482 + if (!transfer_packet) { 7483 + 7484 + ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); 7485 + rc = DEVICE_RETRY; 7486 + goto out; 7487 + } 7488 + 7489 + lun_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, 7490 + &transfer_packet->data_dma, GFP_ATOMIC); 7491 + if (!lun_data) { 7492 + 7493 + ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); 7494 + rc = DEVICE_RETRY; 7495 + goto out; 7496 + } 7497 + 7498 + for (retries = 0; retries < 4; retries++) { 7499 + rc = DEVICE_ERROR; 7500 + ioc_info(ioc, "REPORT_LUNS: handle(0x%04x),\n" 7501 + "retries(%d)\n", handle, retries); 7502 + memset(lun_data, 0, data_length); 7503 + transfer_packet->handle = handle; 7504 + transfer_packet->dir = DMA_FROM_DEVICE; 7505 + transfer_packet->data_length = data_length; 7506 + transfer_packet->cdb_length = 12; 7507 + transfer_packet->cdb[0] = REPORT_LUNS; 7508 + transfer_packet->cdb[6] = (data_length >> 24) & 0xFF; 7509 + transfer_packet->cdb[7] = (data_length >> 16) & 0xFF; 7510 + transfer_packet->cdb[8] = (data_length >> 8) & 0xFF; 7511 + transfer_packet->cdb[9] = data_length & 0xFF; 7512 + transfer_packet->timeout = 30; 7513 + transfer_packet->is_raid = is_pd; 7514 + 7515 + return_code = _scsi_send_scsi_io(ioc, transfer_packet, tr_timeout, tr_method); 7516 + switch (return_code) { 7517 + case 0: 7518 + rc = _scsih_determine_disposition(ioc, transfer_packet); 7519 + if (rc == DEVICE_READY) { 7520 + memcpy(data, lun_data, data_length); 7521 + goto out; 7522 + } else if (rc == DEVICE_ERROR) 7523 + goto out; 7524 + break; 7525 + case -EAGAIN: 7526 + rc = DEVICE_RETRY; 7527 + break; 7528 + case -EFAULT: 7529 + default: 7530 + ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); 7531 + goto out; 7532 + } 7533 + } 7534 + out: 7535 + 7536 + if (lun_data) 7537 + dma_free_coherent(&ioc->pdev->dev, data_length, lun_data, 7538 + transfer_packet->data_dma); 7539 + kfree(transfer_packet); 7540 + 7541 + if ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || 7542 + rc == DEVICE_RETRY_UA) && retry_count >= command_retry_count) 7543 + rc = DEVICE_ERROR; 7544 + 7545 + return rc; 7546 + } 7547 + 7548 + /** 7549 + * _scsih_start_unit - send START_UNIT to target 7550 + * @ioc: per adapter object 7551 + * @handle: expander handle 7552 + * @lun: lun number 7553 + * @is_pd: is this hidden raid component 7554 + * @tr_timeout: Target Reset Timeout 7555 + * @tr_method: Target Reset Method 7556 + * Context: user 7557 + * 7558 + * Returns device_responsive_state 7559 + */ 7560 + static enum device_responsive_state 7561 + _scsih_start_unit(struct MPT3SAS_ADAPTER *ioc, u16 handle, u32 lun, u8 is_pd, 7562 + u8 tr_timeout, u8 tr_method) 7563 + { 7564 + struct _scsi_io_transfer *transfer_packet; 7565 + enum device_responsive_state rc; 7566 + int return_code; 7567 + 7568 + transfer_packet = kzalloc(sizeof(struct _scsi_io_transfer), GFP_KERNEL); 7569 + if (!transfer_packet) { 7570 + 7571 + pr_info("failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); 7572 + rc = DEVICE_RETRY; 7573 + goto out; 7574 + } 7575 + 7576 + rc = DEVICE_READY; 7577 + transfer_packet->handle = handle; 7578 + transfer_packet->dir = DMA_NONE; 7579 + transfer_packet->lun = lun; 7580 + transfer_packet->cdb_length = 6; 7581 + transfer_packet->cdb[0] = START_STOP; 7582 + transfer_packet->cdb[1] = 1; 7583 + transfer_packet->cdb[4] = 1; 7584 + transfer_packet->timeout = 30; 7585 + transfer_packet->is_raid = is_pd; 7586 + 7587 + pr_info("START_UNIT: handle(0x%04x), lun(%d)\n", handle, lun); 7588 + 7589 + return_code = _scsi_send_scsi_io(ioc, transfer_packet, tr_timeout, tr_method); 7590 + switch (return_code) { 7591 + case 0: 7592 + rc = _scsih_determine_disposition(ioc, transfer_packet); 7593 + break; 7594 + case -EAGAIN: 7595 + rc = DEVICE_RETRY; 7596 + break; 7597 + case -EFAULT: 7598 + default: 7599 + pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); 7600 + rc = DEVICE_ERROR; 7601 + break; 7602 + } 7603 + out: 7604 + kfree(transfer_packet); 7605 + return rc; 7606 + } 7607 + 7608 + /** 7609 + * _scsih_test_unit_ready - send TUR to target 7610 + * @ioc: per adapter object 7611 + * @handle: expander handle 7612 + * @lun: lun number 7613 + * @is_pd: is this hidden raid component 7614 + * @tr_timeout: Target Reset timeout value for Pcie devie 7615 + * @tr_method: pcie device Target reset method 7616 + * Context: user 7617 + * 7618 + * Returns device_responsive_state 7619 + */ 7620 + static enum device_responsive_state 7621 + _scsih_test_unit_ready(struct MPT3SAS_ADAPTER *ioc, u16 handle, u32 lun, 7622 + u8 is_pd, u8 tr_timeout, u8 tr_method) 7623 + { 7624 + struct _scsi_io_transfer *transfer_packet; 7625 + enum device_responsive_state rc; 7626 + int return_code; 7627 + int sata_init_failure = 0; 7628 + 7629 + transfer_packet = kzalloc(sizeof(struct _scsi_io_transfer), GFP_KERNEL); 7630 + if (!transfer_packet) { 7631 + 7632 + pr_info("failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); 7633 + rc = DEVICE_RETRY; 7634 + goto out; 7635 + } 7636 + 7637 + rc = DEVICE_READY; 7638 + transfer_packet->handle = handle; 7639 + transfer_packet->dir = DMA_NONE; 7640 + transfer_packet->lun = lun; 7641 + transfer_packet->cdb_length = 6; 7642 + transfer_packet->cdb[0] = TEST_UNIT_READY; 7643 + transfer_packet->timeout = 30; 7644 + transfer_packet->is_raid = is_pd; 7645 + 7646 + sata_init_retry: 7647 + pr_info("TEST_UNIT_READY: handle(0x%04x) lun(%d)\n", handle, lun); 7648 + 7649 + return_code = _scsi_send_scsi_io(ioc, transfer_packet, tr_timeout, tr_method); 7650 + switch (return_code) { 7651 + case 0: 7652 + rc = _scsih_determine_disposition(ioc, transfer_packet); 7653 + if (rc == DEVICE_RETRY && 7654 + transfer_packet->log_info == 0x31111000) { 7655 + if (!sata_init_failure++) { 7656 + pr_info("SATA Initialization Timeout sending a retry\n"); 7657 + rc = DEVICE_READY; 7658 + goto sata_init_retry; 7659 + } else { 7660 + pr_err("SATA Initialization Failed\n"); 7661 + rc = DEVICE_ERROR; 7662 + } 7663 + } 7664 + break; 7665 + case -EAGAIN: 7666 + rc = DEVICE_RETRY; 7667 + break; 7668 + case -EFAULT: 7669 + default: 7670 + pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); 7671 + rc = DEVICE_ERROR; 7672 + break; 7673 + } 7674 + out: 7675 + kfree(transfer_packet); 7676 + return rc; 7677 + } 7678 + 7679 + /** 7680 + * _scsih_ata_pass_thru_idd - obtain SATA device Identify Device Data 7681 + * @ioc: per adapter object 7682 + * @handle: device handle 7683 + * @is_ssd_device : is this SATA SSD device 7684 + * @tr_timeout: Target Reset Timeout 7685 + * @tr_method: Target Reset Method 7686 + * Context: user 7687 + * 7688 + * Returns device_responsive_state 7689 + */ 7690 + static enum device_responsive_state 7691 + _scsih_ata_pass_thru_idd(struct MPT3SAS_ADAPTER *ioc, u16 handle, 7692 + u8 *is_ssd_device, u8 tr_timeout, u8 tr_method) 7693 + { 7694 + struct _scsi_io_transfer *transfer_packet; 7695 + enum device_responsive_state rc; 7696 + u16 *idd_data; 7697 + int return_code; 7698 + u32 data_length; 7699 + 7700 + idd_data = NULL; 7701 + transfer_packet = kzalloc(sizeof(struct _scsi_io_transfer), GFP_KERNEL); 7702 + if (!transfer_packet) { 7703 + 7704 + ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); 7705 + rc = DEVICE_RETRY; 7706 + goto out; 7707 + } 7708 + data_length = 512; 7709 + idd_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, 7710 + &transfer_packet->data_dma, GFP_ATOMIC); 7711 + if (!idd_data) { 7712 + 7713 + ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); 7714 + rc = DEVICE_RETRY; 7715 + goto out; 7716 + } 7717 + rc = DEVICE_READY; 7718 + memset(idd_data, 0, data_length); 7719 + transfer_packet->handle = handle; 7720 + transfer_packet->dir = DMA_FROM_DEVICE; 7721 + transfer_packet->data_length = data_length; 7722 + transfer_packet->cdb_length = 12; 7723 + transfer_packet->cdb[0] = ATA_12; 7724 + transfer_packet->cdb[1] = 0x8; 7725 + transfer_packet->cdb[2] = 0xd; 7726 + transfer_packet->cdb[3] = 0x1; 7727 + transfer_packet->cdb[9] = 0xec; 7728 + transfer_packet->timeout = 30; 7729 + 7730 + return_code = _scsi_send_scsi_io(ioc, transfer_packet, 30, 0); 7731 + switch (return_code) { 7732 + case 0: 7733 + rc = _scsih_determine_disposition(ioc, transfer_packet); 7734 + if (rc == DEVICE_READY) { 7735 + // Check if nominal media rotation rate is set to 1 i.e. SSD device 7736 + if (idd_data[217] == 1) 7737 + *is_ssd_device = 1; 7738 + } 7739 + break; 7740 + case -EAGAIN: 7741 + rc = DEVICE_RETRY; 7742 + break; 7743 + case -EFAULT: 7744 + default: 7745 + 7746 + ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); 7747 + rc = DEVICE_ERROR; 7748 + break; 7749 + } 7750 + 7751 + out: 7752 + if (idd_data) { 7753 + dma_free_coherent(&ioc->pdev->dev, data_length, idd_data, 7754 + transfer_packet->data_dma); 7755 + } 7756 + kfree(transfer_packet); 7757 + return rc; 7758 + } 7759 + 7760 + /** 7761 + * _scsih_wait_for_device_to_become_ready - handle busy devices 7762 + * @ioc: per adapter object 7763 + * @handle: expander handle 7764 + * @retry_count: number of times this event has been retried 7765 + * @is_pd: is this hidden raid component 7766 + * @lun: lun number 7767 + * @tr_timeout: Target Reset Timeout 7768 + * @tr_method: Target Reset Method 7769 + * 7770 + * Some devices spend too much time in busy state, queue event later 7771 + * 7772 + * Return the device_responsive_state. 7773 + */ 7774 + 7775 + static enum device_responsive_state 7776 + _scsih_wait_for_device_to_become_ready(struct MPT3SAS_ADAPTER *ioc, u16 handle, 7777 + u8 retry_count, u8 is_pd, int lun, u8 tr_timeout, u8 tr_method) 7778 + { 7779 + enum device_responsive_state rc; 7780 + 7781 + if (ioc->pci_error_recovery) 7782 + return DEVICE_ERROR; 7783 + 7784 + if (ioc->shost_recovery) 7785 + return DEVICE_RETRY; 7786 + 7787 + rc = _scsih_test_unit_ready(ioc, handle, lun, is_pd, tr_timeout, tr_method); 7788 + if (rc == DEVICE_READY || rc == DEVICE_ERROR) 7789 + return rc; 7790 + else if (rc == DEVICE_START_UNIT) { 7791 + rc = _scsih_start_unit(ioc, handle, lun, is_pd, tr_timeout, tr_method); 7792 + if (rc == DEVICE_ERROR) 7793 + return rc; 7794 + rc = _scsih_test_unit_ready(ioc, handle, lun, is_pd, tr_timeout, tr_method); 7795 + } 7796 + 7797 + if ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || 7798 + rc == DEVICE_RETRY_UA) && retry_count >= command_retry_count) 7799 + rc = DEVICE_ERROR; 7800 + return rc; 7801 + } 7802 + 7803 + static inline int mpt_scsilun_to_int(struct scsi_lun *scsilun) 7804 + { 7805 + return scsilun_to_int(scsilun); 7806 + } 7807 + 7808 + /** 7809 + * _scsih_wait_for_target_to_become_ready - handle busy devices 7810 + * @ioc: per adapter object 7811 + * @handle: expander handle 7812 + * @retry_count: number of times this event has been retried 7813 + * @is_pd: is this hidden raid component 7814 + * @tr_timeout: Target Reset timeout value 7815 + * @tr_method: Target Reset method Hot/Protocol level. 7816 + * 7817 + * Some devices spend too much time in busy state, queue event later 7818 + * 7819 + * Return the device_responsive_state. 7820 + */ 7821 + static enum device_responsive_state 7822 + _scsih_wait_for_target_to_become_ready(struct MPT3SAS_ADAPTER *ioc, u16 handle, 7823 + u8 retry_count, u8 is_pd, u8 tr_timeout, u8 tr_method) 7824 + { 7825 + enum device_responsive_state rc; 7826 + struct scsi_lun *lun_data; 7827 + u32 length, num_luns; 7828 + u8 *data; 7829 + int lun; 7830 + struct scsi_lun *lunp; 7831 + 7832 + lun_data = kcalloc(MPT3_MAX_LUNS, sizeof(struct scsi_lun), GFP_KERNEL); 7833 + if (!lun_data) { 7834 + 7835 + ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); 7836 + return DEVICE_RETRY; 7837 + } 7838 + 7839 + rc = _scsih_report_luns(ioc, handle, lun_data, 7840 + MPT3_MAX_LUNS * sizeof(struct scsi_lun), retry_count, is_pd, 7841 + tr_timeout, tr_method); 7842 + 7843 + if (rc != DEVICE_READY) 7844 + goto out; 7845 + 7846 + /* some debug bits*/ 7847 + data = (u8 *)lun_data; 7848 + length = ((data[0] << 24) | (data[1] << 16) | 7849 + (data[2] << 8) | (data[3] << 0)); 7850 + 7851 + num_luns = (length / sizeof(struct scsi_lun)); 7852 + 7853 + lunp = &lun_data[1]; 7854 + lun = (num_luns) ? mpt_scsilun_to_int(&lun_data[1]) : 0; 7855 + rc = _scsih_wait_for_device_to_become_ready(ioc, handle, retry_count, 7856 + is_pd, lun, tr_timeout, tr_method); 7857 + 7858 + if (rc == DEVICE_ERROR) { 7859 + struct scsi_lun *lunq; 7860 + 7861 + for (lunq = lunp++; lunq <= &lun_data[num_luns]; lunq++) { 7862 + 7863 + rc = _scsih_wait_for_device_to_become_ready(ioc, handle, 7864 + retry_count, is_pd, mpt_scsilun_to_int(lunq), 7865 + tr_timeout, tr_method); 7866 + if (rc != DEVICE_ERROR) 7867 + goto out; 7868 + } 7869 + } 7870 + out: 7871 + kfree(lun_data); 7872 + return rc; 7873 + } 7419 7874 7420 7875 7421 7876 /** ··· 8302 7239 sas_device->handle, handle); 8303 7240 sas_target_priv_data->handle = handle; 8304 7241 sas_device->handle = handle; 8305 - if (le16_to_cpu(sas_device_pg0.Flags) & 8306 - MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 7242 + if ((le16_to_cpu(sas_device_pg0.Flags) & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) 7243 + && (ioc->hba_mpi_version_belonged != MPI2_VERSION)) { 8307 7244 sas_device->enclosure_level = 8308 7245 sas_device_pg0.EnclosureLevel; 8309 7246 memcpy(sas_device->connector_name, ··· 8345 7282 goto out_unlock; 8346 7283 8347 7284 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 8348 - _scsih_ublock_io_device(ioc, sas_address, port); 7285 + 7286 + if (issue_scsi_cmd_to_bringup_drive) 7287 + _scsih_ublock_io_device_wait(ioc, sas_address, port); 7288 + else 7289 + _scsih_ublock_io_device(ioc, sas_address, port); 8349 7290 8350 7291 if (sas_device) 8351 7292 sas_device_put(sas_device); ··· 8365 7298 * _scsih_add_device - creating sas device object 8366 7299 * @ioc: per adapter object 8367 7300 * @handle: sas device handle 8368 - * @phy_num: phy number end device attached to 7301 + * @retry_count: number of times this event has been retried 8369 7302 * @is_pd: is this hidden raid component 8370 7303 * 8371 7304 * Creating end device object, stored in ioc->sas_device_list. ··· 8373 7306 * Return: 0 for success, non-zero for failure. 8374 7307 */ 8375 7308 static int 8376 - _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, 7309 + _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 retry_count, 8377 7310 u8 is_pd) 8378 7311 { 8379 7312 Mpi2ConfigReply_t mpi_reply; 8380 7313 Mpi2SasDevicePage0_t sas_device_pg0; 8381 7314 struct _sas_device *sas_device; 8382 7315 struct _enclosure_node *enclosure_dev = NULL; 7316 + enum device_responsive_state rc; 8383 7317 u32 ioc_status; 8384 7318 u64 sas_address; 8385 7319 u32 device_info; 7320 + u8 connector_name[5]; 8386 7321 u8 port_id; 8387 7322 8388 7323 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, ··· 8438 7369 if (enclosure_dev == NULL) 8439 7370 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", 8440 7371 sas_device_pg0.EnclosureHandle); 7372 + } 7373 + 7374 + /* 7375 + * Wait for device that is becoming ready 7376 + * queue request later if device is busy. 7377 + */ 7378 + if ((!ioc->wait_for_discovery_to_complete) && 7379 + (issue_scsi_cmd_to_bringup_drive)) { 7380 + ioc_info(ioc, "detecting: handle(0x%04x),\n" 7381 + "sas_address(0x%016llx), phy(%d)\n", handle, 7382 + (unsigned long long)sas_address, sas_device_pg0.PhyNum); 7383 + rc = _scsih_wait_for_target_to_become_ready(ioc, handle, 7384 + retry_count, is_pd, 30, 0); 7385 + if (rc != DEVICE_READY) { 7386 + if (le16_to_cpu(sas_device_pg0.EnclosureHandle) != 0) 7387 + dewtprintk(ioc, ioc_info(ioc, "%s:\n" 7388 + "device not ready: slot(%d)\n", __func__, 7389 + le16_to_cpu(sas_device_pg0.Slot))); 7390 + if ((le16_to_cpu(sas_device_pg0.Flags) & 7391 + MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) && 7392 + (ioc->hba_mpi_version_belonged != MPI2_VERSION)) { 7393 + memcpy(connector_name, 7394 + sas_device_pg0.ConnectorName, 4); 7395 + connector_name[4] = '\0'; 7396 + dewtprintk(ioc, ioc_info(ioc, "%s:\n" 7397 + "device not ready:\n" 7398 + "enclosure level(0x%04x),\n" 7399 + "connector name( %s)\n", __func__, 7400 + sas_device_pg0.EnclosureLevel, connector_name)); 7401 + } 7402 + 7403 + if ((enclosure_dev) && (le16_to_cpu(enclosure_dev->pg0.Flags) & 7404 + MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID)) 7405 + ioc_info(ioc, "chassis slot(0x%04x)\n", 7406 + enclosure_dev->pg0.ChassisSlot); 7407 + 7408 + if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || 7409 + rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) 7410 + return 1; 7411 + else if (rc == DEVICE_ERROR) 7412 + return 0; 7413 + } 8441 7414 } 8442 7415 8443 7416 sas_device = kzalloc(sizeof(struct _sas_device), ··· 8697 7586 struct fw_event_work *fw_event) 8698 7587 { 8699 7588 int i; 7589 + int rc; 7590 + int requeue_event; 8700 7591 u16 parent_handle, handle; 8701 7592 u16 reason_code; 8702 7593 u8 phy_number, max_phys; 8703 7594 struct _sas_node *sas_expander; 7595 + struct _sas_device *sas_device; 8704 7596 u64 sas_address; 8705 7597 unsigned long flags; 8706 7598 u8 link_rate, prev_link_rate; ··· 8753 7639 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 8754 7640 8755 7641 /* handle siblings events */ 8756 - for (i = 0; i < event_data->NumEntries; i++) { 7642 + for (i = 0, requeue_event = 0; i < event_data->NumEntries; i++) { 8757 7643 if (fw_event->ignore) { 8758 7644 dewtprintk(ioc, 8759 7645 ioc_info(ioc, "ignoring expander event\n")); ··· 8770 7656 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code != 8771 7657 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) 8772 7658 continue; 7659 + if (fw_event->delayed_work_active && (reason_code == 7660 + MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) { 7661 + dewtprintk(ioc, ioc_info(ioc, "ignoring\n" 7662 + "Target not responding event phy in re-queued event processing\n")); 7663 + continue; 7664 + } 7665 + 7666 + if (fw_event->delayed_work_active && (reason_code == 7667 + MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) { 7668 + dewtprintk(ioc, ioc_info(ioc, "ignoring Target not responding\n" 7669 + "event phy in re-queued event processing\n")); 7670 + continue; 7671 + } 7672 + 8773 7673 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 8774 7674 if (!handle) 8775 7675 continue; ··· 8807 7679 _scsih_check_device(ioc, sas_address, handle, 8808 7680 phy_number, link_rate); 8809 7681 7682 + /* This code after this point handles the test case 7683 + * where a device has been added, however its returning 7684 + * BUSY for sometime. Then before the Device Missing 7685 + * Delay expires and the device becomes READY, the 7686 + * device is removed and added back. 7687 + */ 7688 + spin_lock_irqsave(&ioc->sas_device_lock, flags); 7689 + sas_device = __mpt3sas_get_sdev_by_handle(ioc, 7690 + handle); 7691 + spin_unlock_irqrestore(&ioc->sas_device_lock, 7692 + flags); 7693 + 7694 + if (sas_device) { 7695 + sas_device_put(sas_device); 7696 + break; 7697 + } 7698 + 8810 7699 if (!test_bit(handle, ioc->pend_os_device_add)) 8811 7700 break; 7701 + 7702 + dewtprintk(ioc, ioc_info(ioc, "handle(0x%04x) device not found: convert\n" 7703 + "event to a device add\n", handle)); 7704 + event_data->PHY[i].PhyStatus &= 0xF0; 7705 + event_data->PHY[i].PhyStatus |= 7706 + MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED; 8812 7707 8813 7708 fallthrough; 8814 7709 ··· 8843 7692 mpt3sas_transport_update_links(ioc, sas_address, 8844 7693 handle, phy_number, link_rate, port); 8845 7694 8846 - _scsih_add_device(ioc, handle, phy_number, 0); 7695 + if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) 7696 + break; 7697 + 7698 + rc = _scsih_add_device(ioc, handle, 7699 + fw_event->retries[i], 0); 7700 + if (rc) {/* retry due to busy device */ 7701 + fw_event->retries[i]++; 7702 + requeue_event = 1; 7703 + } else {/* mark entry vacant */ 7704 + event_data->PHY[i].PhyStatus |= 7705 + MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT; 7706 + } 8847 7707 8848 7708 break; 8849 7709 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: ··· 8869 7707 sas_expander) 8870 7708 mpt3sas_expander_remove(ioc, sas_address, port); 8871 7709 8872 - return 0; 7710 + return requeue_event; 8873 7711 } 8874 7712 8875 7713 /** ··· 9240 8078 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 9241 8079 pcie_device_put(pcie_device); 9242 8080 9243 - _scsih_ublock_io_device(ioc, wwid, NULL); 8081 + if (issue_scsi_cmd_to_bringup_drive) 8082 + _scsih_ublock_io_device_wait(ioc, wwid, NULL); 8083 + else 8084 + _scsih_ublock_io_device(ioc, wwid, NULL); 9244 8085 9245 8086 return; 9246 8087 } ··· 9252 8087 * _scsih_pcie_add_device - creating pcie device object 9253 8088 * @ioc: per adapter object 9254 8089 * @handle: pcie device handle 8090 + * @retry_count: number of times this event has been retried 9255 8091 * 9256 8092 * Creating end device object, stored in ioc->pcie_device_list. 9257 8093 * 9258 8094 * Return: 1 means queue the event later, 0 means complete the event 9259 8095 */ 9260 8096 static int 9261 - _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) 8097 + _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 retry_count) 9262 8098 { 9263 8099 Mpi26PCIeDevicePage0_t pcie_device_pg0; 9264 8100 Mpi26PCIeDevicePage2_t pcie_device_pg2; 9265 8101 Mpi2ConfigReply_t mpi_reply; 9266 8102 struct _pcie_device *pcie_device; 9267 8103 struct _enclosure_node *enclosure_dev; 8104 + enum device_responsive_state rc; 8105 + u8 connector_name[5]; 8106 + u8 tr_timeout = 30; 8107 + u8 tr_method = 0; 9268 8108 u32 ioc_status; 9269 8109 u64 wwid; 9270 8110 ··· 9336 8166 "failure at %s:%d/%s()!\n", __FILE__, 9337 8167 __LINE__, __func__); 9338 8168 return 0; 8169 + } 8170 + 8171 + if (!ioc->tm_custom_handling) { 8172 + tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 8173 + if (pcie_device_pg2.ControllerResetTO) 8174 + tr_timeout = pcie_device_pg2.ControllerResetTO; 8175 + 8176 + } 8177 + } 8178 + 8179 + /* 8180 + * Wait for device that is becoming ready 8181 + * queue request later if device is busy. 8182 + */ 8183 + if ((!ioc->wait_for_discovery_to_complete) && 8184 + (issue_scsi_cmd_to_bringup_drive) && 8185 + (pcie_device_pg0.AccessStatus != 8186 + MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)) { 8187 + ioc_info(ioc, "detecting: handle(0x%04x),\n" 8188 + "wwid(0x%016llx), port(%d)\n", handle, 8189 + (unsigned long long)wwid, pcie_device_pg0.PortNum); 8190 + 8191 + rc = _scsih_wait_for_target_to_become_ready(ioc, handle, 8192 + retry_count, 0, tr_timeout, tr_method); 8193 + if (rc != DEVICE_READY) { 8194 + if (le16_to_cpu(pcie_device_pg0.EnclosureHandle) != 0) 8195 + dewtprintk(ioc, ioc_info(ioc, "%s:\n" 8196 + "device not ready: slot(%d)\n", 8197 + __func__, 8198 + le16_to_cpu(pcie_device_pg0.Slot))); 8199 + 8200 + if (le32_to_cpu(pcie_device_pg0.Flags) & 8201 + MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { 8202 + memcpy(connector_name, 8203 + pcie_device_pg0.ConnectorName, 4); 8204 + connector_name[4] = '\0'; 8205 + dewtprintk(ioc, ioc_info(ioc, "%s: device not ready: enclosure\n" 8206 + "level(0x%04x), connector name( %s)\n", __func__, 8207 + pcie_device_pg0.EnclosureLevel, 8208 + connector_name)); 8209 + } 8210 + 8211 + if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || 8212 + rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) 8213 + return 1; 8214 + else if (rc == DEVICE_ERROR) 8215 + return 0; 9339 8216 } 9340 8217 } 9341 8218 ··· 9547 8330 * Context: user. 9548 8331 * 9549 8332 */ 9550 - static void 8333 + static int 9551 8334 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc, 9552 8335 struct fw_event_work *fw_event) 9553 8336 { ··· 9557 8340 u8 link_rate, prev_link_rate; 9558 8341 unsigned long flags; 9559 8342 int rc; 8343 + int requeue_event; 9560 8344 Mpi26EventDataPCIeTopologyChangeList_t *event_data = 9561 8345 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data; 9562 8346 struct _pcie_device *pcie_device; ··· 9567 8349 9568 8350 if (ioc->shost_recovery || ioc->remove_host || 9569 8351 ioc->pci_error_recovery) 9570 - return; 8352 + return 0; 9571 8353 9572 8354 if (fw_event->ignore) { 9573 8355 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n")); 9574 - return; 8356 + return 0; 9575 8357 } 9576 8358 9577 8359 /* handle siblings events */ 9578 - for (i = 0; i < event_data->NumEntries; i++) { 8360 + for (i = 0, requeue_event = 0; i < event_data->NumEntries; i++) { 9579 8361 if (fw_event->ignore) { 9580 8362 dewtprintk(ioc, 9581 8363 ioc_info(ioc, "ignoring switch event\n")); 9582 - return; 8364 + return 0; 9583 8365 } 9584 8366 if (ioc->remove_host || ioc->pci_error_recovery) 9585 - return; 8367 + return 0; 9586 8368 reason_code = event_data->PortEntry[i].PortStatus; 9587 8369 handle = 9588 8370 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); ··· 9636 8418 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5) 9637 8419 break; 9638 8420 9639 - rc = _scsih_pcie_add_device(ioc, handle); 9640 - if (!rc) { 8421 + rc = _scsih_pcie_add_device(ioc, handle, fw_event->retries[i]); 8422 + if (rc) {/* retry due to busy device */ 8423 + fw_event->retries[i]++; 8424 + requeue_event = 1; 8425 + } else { 9641 8426 /* mark entry vacant */ 9642 8427 /* TODO This needs to be reviewed and fixed, 9643 8428 * we dont have an entry ··· 9655 8434 break; 9656 8435 } 9657 8436 } 8437 + return requeue_event; 9658 8438 } 9659 8439 9660 8440 /** 9661 8441 * _scsih_pcie_device_status_change_event_debug - debug for device event 9662 - * @ioc: ? 8442 + * @ioc: per adapter object 9663 8443 * @event_data: event data payload 9664 8444 * Context: user. 9665 8445 */ ··· 10032 8810 10033 8811 ioc->broadcast_aen_busy = 0; 10034 8812 if (!ioc->shost_recovery) 10035 - _scsih_ublock_io_all_device(ioc); 8813 + _scsih_ublock_io_all_device(ioc, 1); 10036 8814 mutex_unlock(&ioc->tm_cmds.mutex); 10037 8815 } 10038 8816 ··· 11566 10344 ioc_info(ioc, "removing unresponding devices: complete\n"); 11567 10345 11568 10346 /* unblock devices */ 11569 - _scsih_ublock_io_all_device(ioc); 10347 + _scsih_ublock_io_all_device(ioc, 0); 11570 10348 } 11571 10349 11572 10350 static void ··· 11846 10624 } 11847 10625 retry_count = 0; 11848 10626 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle); 11849 - _scsih_pcie_add_device(ioc, handle); 10627 + while (_scsih_pcie_add_device(ioc, handle, retry_count++)) 10628 + ssleep(1); 11850 10629 11851 10630 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n", 11852 10631 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID)); ··· 11991 10768 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle); 11992 10769 break; 11993 10770 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 11994 - _scsih_sas_topology_change_event(ioc, fw_event); 10771 + if (_scsih_sas_topology_change_event(ioc, fw_event)) { 10772 + _scsih_fw_event_requeue(ioc, fw_event, 1000); 10773 + ioc->current_event = NULL; 10774 + return; 10775 + } 11995 10776 break; 11996 10777 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 11997 10778 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) ··· 12035 10808 _scsih_pcie_enumeration_event(ioc, fw_event); 12036 10809 break; 12037 10810 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 12038 - _scsih_pcie_topology_change_event(ioc, fw_event); 10811 + if (_scsih_pcie_topology_change_event(ioc, fw_event)) { 10812 + _scsih_fw_event_requeue(ioc, fw_event, 1000); 10813 + ioc->current_event = NULL; 10814 + return; 10815 + } 12039 10816 break; 12040 10817 } 12041 10818 out: ··· 12060 10829 { 12061 10830 struct fw_event_work *fw_event = container_of(work, 12062 10831 struct fw_event_work, work); 10832 + 10833 + _mpt3sas_fw_work(fw_event->ioc, fw_event); 10834 + } 10835 + 10836 + static void 10837 + _firmware_event_work_delayed(struct work_struct *work) 10838 + { 10839 + struct fw_event_work *fw_event = container_of(work, 10840 + struct fw_event_work, delayed_work.work); 12063 10841 12064 10842 _mpt3sas_fw_work(fw_event->ioc, fw_event); 12065 10843 } ··· 12251 11011 ioc_err(ioc, "failure at %s:%d/%s()!\n", 12252 11012 __FILE__, __LINE__, __func__); 12253 11013 return 1; 11014 + } 11015 + 11016 + if (event == MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST) { 11017 + Mpi2EventDataSasTopologyChangeList_t *topo_event_data = 11018 + (Mpi2EventDataSasTopologyChangeList_t *) 11019 + mpi_reply->EventData; 11020 + fw_event->retries = kzalloc(topo_event_data->NumEntries, 11021 + GFP_ATOMIC); 11022 + if (!fw_event->retries) { 11023 + 11024 + ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); 11025 + kfree(fw_event->event_data); 11026 + fw_event_work_put(fw_event); 11027 + return 1; 11028 + } 11029 + } 11030 + 11031 + if (event == MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST) { 11032 + Mpi26EventDataPCIeTopologyChangeList_t *topo_event_data = 11033 + (Mpi26EventDataPCIeTopologyChangeList_t *) mpi_reply->EventData; 11034 + fw_event->retries = kzalloc(topo_event_data->NumEntries, 11035 + GFP_ATOMIC); 11036 + if (!fw_event->retries) { 11037 + 11038 + ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); 11039 + fw_event_work_put(fw_event); 11040 + return 1; 11041 + } 12254 11042 } 12255 11043 12256 11044 memcpy(fw_event->event_data, mpi_reply->EventData, sz);
+2 -2
drivers/scsi/mvumi.c
··· 2077 2077 * @shost: Scsi host to queue command on 2078 2078 * @scmd: SCSI command to be queued 2079 2079 */ 2080 - static int mvumi_queue_command(struct Scsi_Host *shost, 2081 - struct scsi_cmnd *scmd) 2080 + static enum scsi_qc_status mvumi_queue_command(struct Scsi_Host *shost, 2081 + struct scsi_cmnd *scmd) 2082 2082 { 2083 2083 struct mvumi_cmd *cmd; 2084 2084 struct mvumi_hba *mhba;
+6 -6
drivers/scsi/myrb.c
··· 1260 1260 return SUCCESS; 1261 1261 } 1262 1262 1263 - static int myrb_pthru_queuecommand(struct Scsi_Host *shost, 1264 - struct scsi_cmnd *scmd) 1263 + static enum scsi_qc_status myrb_pthru_queuecommand(struct Scsi_Host *shost, 1264 + struct scsi_cmnd *scmd) 1265 1265 { 1266 1266 struct request *rq = scsi_cmd_to_rq(scmd); 1267 1267 struct myrb_hba *cb = shost_priv(shost); ··· 1416 1416 scsi_sg_copy_from_buffer(scmd, data, 8); 1417 1417 } 1418 1418 1419 - static int myrb_ldev_queuecommand(struct Scsi_Host *shost, 1420 - struct scsi_cmnd *scmd) 1419 + static enum scsi_qc_status myrb_ldev_queuecommand(struct Scsi_Host *shost, 1420 + struct scsi_cmnd *scmd) 1421 1421 { 1422 1422 struct myrb_hba *cb = shost_priv(shost); 1423 1423 struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd); ··· 1603 1603 return 0; 1604 1604 } 1605 1605 1606 - static int myrb_queuecommand(struct Scsi_Host *shost, 1607 - struct scsi_cmnd *scmd) 1606 + static enum scsi_qc_status myrb_queuecommand(struct Scsi_Host *shost, 1607 + struct scsi_cmnd *scmd) 1608 1608 { 1609 1609 struct scsi_device *sdev = scmd->device; 1610 1610
+2 -2
drivers/scsi/myrs.c
··· 1581 1581 scsi_sg_copy_from_buffer(scmd, modes, mode_len); 1582 1582 } 1583 1583 1584 - static int myrs_queuecommand(struct Scsi_Host *shost, 1585 - struct scsi_cmnd *scmd) 1584 + static enum scsi_qc_status myrs_queuecommand(struct Scsi_Host *shost, 1585 + struct scsi_cmnd *scmd) 1586 1586 { 1587 1587 struct request *rq = scsi_cmd_to_rq(scmd); 1588 1588 struct myrs_hba *cs = shost_priv(shost);
+1 -1
drivers/scsi/ncr53c8xx.c
··· 7852 7852 return 0; 7853 7853 } 7854 7854 7855 - static int ncr53c8xx_queue_command_lck(struct scsi_cmnd *cmd) 7855 + static enum scsi_qc_status ncr53c8xx_queue_command_lck(struct scsi_cmnd *cmd) 7856 7856 { 7857 7857 struct ncr_cmd_priv *cmd_priv = scsi_cmd_priv(cmd); 7858 7858 void (*done)(struct scsi_cmnd *) = scsi_done;
+3 -2
drivers/scsi/nsp32.c
··· 185 185 static int nsp32_show_info (struct seq_file *, struct Scsi_Host *); 186 186 187 187 static int nsp32_detect (struct pci_dev *pdev); 188 - static int nsp32_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); 188 + static enum scsi_qc_status nsp32_queuecommand(struct Scsi_Host *, 189 + struct scsi_cmnd *); 189 190 static const char *nsp32_info (struct Scsi_Host *); 190 191 static int nsp32_release (struct Scsi_Host *); 191 192 ··· 906 905 return TRUE; 907 906 } 908 907 909 - static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt) 908 + static enum scsi_qc_status nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt) 910 909 { 911 910 void (*done)(struct scsi_cmnd *) = scsi_done; 912 911 nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
+1 -1
drivers/scsi/pcmcia/nsp_cs.c
··· 186 186 scsi_done(SCpnt); 187 187 } 188 188 189 - static int nsp_queuecommand_lck(struct scsi_cmnd *const SCpnt) 189 + static enum scsi_qc_status nsp_queuecommand_lck(struct scsi_cmnd *const SCpnt) 190 190 { 191 191 struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt); 192 192 #ifdef NSP_DEBUG
+2 -1
drivers/scsi/pcmcia/nsp_cs.h
··· 294 294 static const char *nsp_info (struct Scsi_Host *shpnt); 295 295 static int nsp_show_info (struct seq_file *m, 296 296 struct Scsi_Host *host); 297 - static int nsp_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *SCpnt); 297 + static enum scsi_qc_status nsp_queuecommand(struct Scsi_Host *h, 298 + struct scsi_cmnd *SCpnt); 298 299 299 300 /* Error handler */ 300 301 /*static int nsp_eh_abort (struct scsi_cmnd *SCpnt);*/
+1 -1
drivers/scsi/pcmcia/sym53c500_cs.c
··· 544 544 return (info_msg); 545 545 } 546 546 547 - static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt) 547 + static enum scsi_qc_status SYM53C500_queue_lck(struct scsi_cmnd *SCpnt) 548 548 { 549 549 struct sym53c500_cmd_priv *scp = scsi_cmd_priv(SCpnt); 550 550 int i;
+2 -2
drivers/scsi/pmcraid.c
··· 3242 3242 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy 3243 3243 * SCSI_MLQUEUE_HOST_BUSY if host is busy 3244 3244 */ 3245 - static int pmcraid_queuecommand_lck(struct scsi_cmnd *scsi_cmd) 3245 + static enum scsi_qc_status pmcraid_queuecommand_lck(struct scsi_cmnd *scsi_cmd) 3246 3246 { 3247 3247 struct pmcraid_instance *pinstance; 3248 3248 struct pmcraid_resource_entry *res; 3249 3249 struct pmcraid_ioarcb *ioarcb; 3250 + enum scsi_qc_status rc = 0; 3250 3251 struct pmcraid_cmd *cmd; 3251 3252 u32 fw_version; 3252 - int rc = 0; 3253 3253 3254 3254 pinstance = 3255 3255 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
+1 -1
drivers/scsi/ppa.c
··· 816 816 return 0; 817 817 } 818 818 819 - static int ppa_queuecommand_lck(struct scsi_cmnd *cmd) 819 + static enum scsi_qc_status ppa_queuecommand_lck(struct scsi_cmnd *cmd) 820 820 { 821 821 ppa_struct *dev = ppa_dev(cmd->device->host); 822 822
+1 -1
drivers/scsi/ps3rom.c
··· 201 201 return 0; 202 202 } 203 203 204 - static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd) 204 + static enum scsi_qc_status ps3rom_queuecommand_lck(struct scsi_cmnd *cmd) 205 205 { 206 206 struct ps3rom_private *priv = shost_priv(cmd->device->host); 207 207 struct ps3_storage_device *dev = priv->dev;
+2 -2
drivers/scsi/qedf/qedf.h
··· 487 487 488 488 extern struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf); 489 489 extern void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr); 490 - extern int qedf_queuecommand(struct Scsi_Host *host, 491 - struct scsi_cmnd *sc_cmd); 490 + extern enum scsi_qc_status qedf_queuecommand(struct Scsi_Host *host, 491 + struct scsi_cmnd *sc_cmd); 492 492 extern void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb); 493 493 extern u8 *qedf_get_src_mac(struct fc_lport *lport); 494 494 extern void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb);
+2 -2
drivers/scsi/qedf/qedf_io.c
··· 930 930 return false; 931 931 } 932 932 933 - int 934 - qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd) 933 + enum scsi_qc_status qedf_queuecommand(struct Scsi_Host *host, 934 + struct scsi_cmnd *sc_cmd) 935 935 { 936 936 struct fc_lport *lport = shost_priv(host); 937 937 struct qedf_ctx *qedf = lport_priv(lport);
+10 -8
drivers/scsi/qla1280.c
··· 406 406 static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int); 407 407 static int qla1280_abort_isp(struct scsi_qla_host *); 408 408 #ifdef QLA_64BIT_PTR 409 - static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *); 409 + static enum scsi_qc_status qla1280_64bit_start_scsi(struct scsi_qla_host *, 410 + struct srb *); 410 411 #else 411 - static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *); 412 + static enum scsi_qc_status qla1280_32bit_start_scsi(struct scsi_qla_host *, 413 + struct srb *); 412 414 #endif 413 415 static void qla1280_nv_write(struct scsi_qla_host *, uint16_t); 414 416 static void qla1280_poll(struct scsi_qla_host *); ··· 684 682 * handling). Unfortunately, it sometimes calls the scheduler in interrupt 685 683 * context which is a big NO! NO!. 686 684 **************************************************************************/ 687 - static int qla1280_queuecommand_lck(struct scsi_cmnd *cmd) 685 + static enum scsi_qc_status qla1280_queuecommand_lck(struct scsi_cmnd *cmd) 688 686 { 689 687 struct Scsi_Host *host = cmd->device->host; 690 688 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; 691 689 struct srb *sp = scsi_cmd_priv(cmd); 692 - int status; 690 + enum scsi_qc_status status; 693 691 694 692 sp->cmd = cmd; 695 693 sp->flags = 0; ··· 2732 2730 * 0 = success, was able to issue command. 2733 2731 */ 2734 2732 #ifdef QLA_64BIT_PTR 2735 - static int 2733 + static enum scsi_qc_status 2736 2734 qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) 2737 2735 { 2738 2736 struct device_reg __iomem *reg = ha->iobase; ··· 2740 2738 cmd_a64_entry_t *pkt; 2741 2739 __le32 *dword_ptr; 2742 2740 dma_addr_t dma_handle; 2743 - int status = 0; 2741 + enum scsi_qc_status status = 0; 2744 2742 int cnt; 2745 2743 int req_cnt; 2746 2744 int seg_cnt; ··· 2986 2984 * Returns: 2987 2985 * 0 = success, was able to issue command. 2988 2986 */ 2989 - static int 2987 + static enum scsi_qc_status 2990 2988 qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) 2991 2989 { 2992 2990 struct device_reg __iomem *reg = ha->iobase; 2993 2991 struct scsi_cmnd *cmd = sp->cmd; 2994 2992 struct cmd_entry *pkt; 2995 2993 __le32 *dword_ptr; 2996 - int status = 0; 2994 + enum scsi_qc_status status = 0; 2997 2995 int cnt; 2998 2996 int req_cnt; 2999 2997 int seg_cnt;
+136 -11
drivers/scsi/qla2xxx/qla_bsg.c
··· 11 11 #include <linux/delay.h> 12 12 #include <linux/bsg-lib.h> 13 13 14 + static int qla28xx_validate_flash_image(struct bsg_job *bsg_job); 15 + 14 16 static void qla2xxx_free_fcport_work(struct work_struct *work) 15 17 { 16 18 struct fc_port *fcport = container_of(work, typeof(*fcport), ··· 1548 1546 ha->optrom_buffer = NULL; 1549 1547 ha->optrom_state = QLA_SWAITING; 1550 1548 mutex_unlock(&ha->optrom_mutex); 1551 - bsg_job_done(bsg_job, bsg_reply->result, 1552 - bsg_reply->reply_payload_rcv_len); 1549 + if (!rval) 1550 + bsg_job_done(bsg_job, bsg_reply->result, 1551 + bsg_reply->reply_payload_rcv_len); 1553 1552 return rval; 1554 1553 } 1555 1554 ··· 2553 2550 } 2554 2551 2555 2552 static int 2553 + qla2x00_get_drv_attr(struct bsg_job *bsg_job) 2554 + { 2555 + struct qla_drv_attr drv_attr; 2556 + struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2557 + 2558 + memset(&drv_attr, 0, sizeof(struct qla_drv_attr)); 2559 + drv_attr.ext_attributes |= QLA_IMG_SET_VALID_SUPPORT; 2560 + 2561 + 2562 + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2563 + bsg_job->reply_payload.sg_cnt, &drv_attr, 2564 + sizeof(struct qla_drv_attr)); 2565 + 2566 + bsg_reply->reply_payload_rcv_len = sizeof(struct qla_drv_attr); 2567 + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2568 + 2569 + bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2570 + bsg_reply->result = DID_OK << 16; 2571 + bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); 2572 + 2573 + return 0; 2574 + } 2575 + 2576 + static int 2556 2577 qla2x00_manage_host_stats(struct bsg_job *bsg_job) 2557 2578 { 2558 2579 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); ··· 2639 2612 sizeof(struct ql_vnd_mng_host_stats_resp)); 2640 2613 2641 2614 bsg_reply->result = DID_OK; 2642 - bsg_job_done(bsg_job, bsg_reply->result, 2643 - bsg_reply->reply_payload_rcv_len); 2615 + if (!ret) 2616 + bsg_job_done(bsg_job, bsg_reply->result, 2617 + bsg_reply->reply_payload_rcv_len); 2644 2618 2645 2619 return ret; 2646 2620 } ··· 2730 2702 bsg_job->reply_payload.sg_cnt, 2731 2703 data, response_len); 2732 2704 bsg_reply->result = DID_OK; 2733 - bsg_job_done(bsg_job, bsg_reply->result, 2734 - bsg_reply->reply_payload_rcv_len); 2705 + if (!ret) 2706 + bsg_job_done(bsg_job, bsg_reply->result, 2707 + bsg_reply->reply_payload_rcv_len); 2735 2708 2736 2709 kfree(data); 2737 2710 host_stat_out: ··· 2831 2802 bsg_job->reply_payload.sg_cnt, data, 2832 2803 response_len); 2833 2804 bsg_reply->result = DID_OK; 2834 - bsg_job_done(bsg_job, bsg_reply->result, 2835 - bsg_reply->reply_payload_rcv_len); 2805 + if (!ret) 2806 + bsg_job_done(bsg_job, bsg_reply->result, 2807 + bsg_reply->reply_payload_rcv_len); 2836 2808 2837 2809 tgt_stat_out: 2838 2810 kfree(data); ··· 2894 2864 bsg_job->reply_payload.sg_cnt, &rsp_data, 2895 2865 sizeof(struct ql_vnd_mng_host_port_resp)); 2896 2866 bsg_reply->result = DID_OK; 2897 - bsg_job_done(bsg_job, bsg_reply->result, 2898 - bsg_reply->reply_payload_rcv_len); 2867 + if (!ret) 2868 + bsg_job_done(bsg_job, bsg_reply->result, 2869 + bsg_reply->reply_payload_rcv_len); 2899 2870 2900 2871 return ret; 2901 2872 } ··· 2963 2932 2964 2933 case QL_VND_GET_FLASH_UPDATE_CAPS: 2965 2934 return qla27xx_get_flash_upd_cap(bsg_job); 2935 + 2936 + case QL_VND_GET_DRV_ATTR: 2937 + return qla2x00_get_drv_attr(bsg_job); 2938 + 2939 + case QL_VND_IMG_SET_VALID: 2940 + return qla28xx_validate_flash_image(bsg_job); 2966 2941 2967 2942 case QL_VND_SET_FLASH_UPDATE_CAPS: 2968 2943 return qla27xx_set_flash_upd_cap(bsg_job); ··· 3277 3240 3278 3241 bsg_job->reply_len = sizeof(*bsg_job->reply); 3279 3242 bsg_reply->result = DID_OK << 16; 3280 - bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); 3243 + if (!ret) 3244 + bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); 3281 3245 3282 3246 kfree(req_data); 3283 3247 3284 3248 return ret; 3249 + } 3250 + 3251 + static int 3252 + qla28xx_do_validate_flash_image(struct bsg_job *bsg_job, uint16_t *state) 3253 + { 3254 + struct fc_bsg_request *bsg_request = bsg_job->request; 3255 + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 3256 + uint16_t mstate[16]; 3257 + uint16_t mpi_state = 0; 3258 + uint16_t img_idx; 3259 + int rval = QLA_SUCCESS; 3260 + 3261 + memset(mstate, 0, sizeof(mstate)); 3262 + 3263 + rval = qla2x00_get_firmware_state(vha, mstate); 3264 + if (rval != QLA_SUCCESS) { 3265 + ql_log(ql_log_warn, vha, 0xffff, 3266 + "MBC to get MPI state failed (%d)\n", rval); 3267 + rval = -EINVAL; 3268 + goto exit_flash_img; 3269 + } 3270 + 3271 + mpi_state = mstate[11]; 3272 + 3273 + if (!(mpi_state & BIT_9 && mpi_state & BIT_8 && mpi_state & BIT_15)) { 3274 + ql_log(ql_log_warn, vha, 0xffff, 3275 + "MPI firmware state failed (0x%02x)\n", mpi_state); 3276 + rval = -EINVAL; 3277 + goto exit_flash_img; 3278 + } 3279 + 3280 + rval = qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_LOCK); 3281 + if (rval != QLA_SUCCESS) { 3282 + ql_log(ql_log_warn, vha, 0xffff, 3283 + "Unable to lock flash semaphore."); 3284 + goto exit_flash_img; 3285 + } 3286 + 3287 + img_idx = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 3288 + 3289 + rval = qla_mpipt_validate_fw(vha, img_idx, state); 3290 + if (rval != QLA_SUCCESS) { 3291 + ql_log(ql_log_warn, vha, 0xffff, 3292 + "Failed to validate Firmware image index [0x%x].\n", 3293 + img_idx); 3294 + } 3295 + 3296 + qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_UNLOCK); 3297 + 3298 + exit_flash_img: 3299 + return rval; 3300 + } 3301 + 3302 + static int qla28xx_validate_flash_image(struct bsg_job *bsg_job) 3303 + { 3304 + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 3305 + struct fc_bsg_reply *bsg_reply = bsg_job->reply; 3306 + struct qla_hw_data *ha = vha->hw; 3307 + uint16_t state = 0; 3308 + int rval = 0; 3309 + 3310 + if (!IS_QLA28XX(ha) || vha->vp_idx != 0) 3311 + return -EPERM; 3312 + 3313 + mutex_lock(&ha->optrom_mutex); 3314 + rval = qla28xx_do_validate_flash_image(bsg_job, &state); 3315 + if (rval) 3316 + rval = -EINVAL; 3317 + mutex_unlock(&ha->optrom_mutex); 3318 + 3319 + bsg_job->reply_len = sizeof(struct fc_bsg_reply); 3320 + 3321 + if (rval) 3322 + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 3323 + (state == 39) ? EXT_STATUS_IMG_SET_VALID_ERR : 3324 + EXT_STATUS_IMG_SET_CONFIG_ERR; 3325 + else 3326 + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 3327 + 3328 + bsg_reply->result = DID_OK << 16; 3329 + bsg_reply->reply_payload_rcv_len = 0; 3330 + bsg_job->reply_len = sizeof(struct fc_bsg_reply); 3331 + if (!rval) 3332 + bsg_job_done(bsg_job, bsg_reply->result, 3333 + bsg_reply->reply_payload_rcv_len); 3334 + 3335 + return QLA_SUCCESS; 3285 3336 }
+12
drivers/scsi/qla2xxx/qla_bsg.h
··· 32 32 #define QL_VND_GET_PRIV_STATS_EX 0x1A 33 33 #define QL_VND_SS_GET_FLASH_IMAGE_STATUS 0x1E 34 34 #define QL_VND_EDIF_MGMT 0X1F 35 + #define QL_VND_GET_DRV_ATTR 0x22 35 36 #define QL_VND_MANAGE_HOST_STATS 0x23 36 37 #define QL_VND_GET_HOST_STATS 0x24 37 38 #define QL_VND_GET_TGT_STATS 0x25 38 39 #define QL_VND_MANAGE_HOST_PORT 0x26 39 40 #define QL_VND_MBX_PASSTHRU 0x2B 40 41 #define QL_VND_DPORT_DIAGNOSTICS_V2 0x2C 42 + #define QL_VND_IMG_SET_VALID 0x30 41 43 42 44 /* BSG Vendor specific subcode returns */ 43 45 #define EXT_STATUS_OK 0 ··· 52 50 #define EXT_STATUS_BUFFER_TOO_SMALL 16 53 51 #define EXT_STATUS_NO_MEMORY 17 54 52 #define EXT_STATUS_DEVICE_OFFLINE 22 53 + #define EXT_STATUS_IMG_SET_VALID_ERR 47 54 + #define EXT_STATUS_IMG_SET_CONFIG_ERR 48 55 55 56 56 /* 57 57 * To support bidirectional iocb ··· 320 316 uint8_t npiv_config_2_3; 321 317 uint8_t nvme_params; 322 318 uint8_t reserved[31]; 319 + } __packed; 320 + 321 + struct qla_drv_attr { 322 + uint32_t attributes; 323 + u32 ext_attributes; 324 + #define QLA_IMG_SET_VALID_SUPPORT BIT_4 325 + u32 status_flags; 326 + uint8_t reserved[20]; 323 327 } __packed; 324 328 325 329 #include "qla_edif_bsg.h"
+28 -2
drivers/scsi/qla2xxx/qla_def.h
··· 1270 1270 */ 1271 1271 #define MBC_LOAD_RAM 1 /* Load RAM. */ 1272 1272 #define MBC_EXECUTE_FIRMWARE 2 /* Execute firmware. */ 1273 + #define MBC_LOAD_FLASH_FIRMWARE 3 /* Load flash firmware. */ 1273 1274 #define MBC_READ_RAM_WORD 5 /* Read RAM word. */ 1274 1275 #define MBC_MAILBOX_REGISTER_TEST 6 /* Wrap incoming mailboxes */ 1275 1276 #define MBC_VERIFY_CHECKSUM 7 /* Verify checksum. */ ··· 1385 1384 #define MBC_SET_GET_ETH_SERDES_REG 0x150 1386 1385 #define HCS_WRITE_SERDES 0x3 1387 1386 #define HCS_READ_SERDES 0x4 1387 + 1388 + /* 1389 + * ISP2[7|8]xx mailbox commands. 1390 + */ 1391 + #define MBC_MPI_PASSTHROUGH 0x200 1392 + 1393 + /* MBC_MPI_PASSTHROUGH */ 1394 + #define MPIPT_REQ_V1 1 1395 + enum { 1396 + MPIPT_SUBCMD_GET_SUP_CMD = 0x10, 1397 + MPIPT_SUBCMD_GET_SUP_FEATURE, 1398 + MPIPT_SUBCMD_GET_STATUS, 1399 + MPIPT_SUBCMD_VALIDATE_FW, 1400 + }; 1401 + 1402 + enum { 1403 + MPIPT_MPI_STATUS = 1, 1404 + MPIPT_FCORE_STATUS, 1405 + MPIPT_LOCKDOWN_STATUS, 1406 + }; 1388 1407 1389 1408 /* Firmware return data sizes */ 1390 1409 #define FCAL_MAP_SIZE 128 ··· 4170 4149 uint32_t eeh_flush:2; 4171 4150 #define EEH_FLUSH_RDY 1 4172 4151 #define EEH_FLUSH_DONE 2 4152 + uint32_t secure_mcu:1; 4173 4153 } flags; 4174 4154 4175 4155 uint16_t max_exchg; ··· 4435 4413 #define IS_ZIO_THRESHOLD_CAPABLE(ha) \ 4436 4414 ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&\ 4437 4415 (ha->zio_mode == QLA_ZIO_MODE_6)) 4416 + 4417 + #define IS_QLA28XX_SECURED(ha) (IS_QLA28XX(ha) && ha->flags.secure_mcu) 4438 4418 4439 4419 /* HBA serial number */ 4440 4420 uint8_t serial0; ··· 5392 5368 struct list_head next; 5393 5369 }; 5394 5370 5395 - /* Refer to SNIA SFF 8247 */ 5371 + /* Refer to SNIA SFF 8472 */ 5396 5372 struct sff_8247_a0 { 5397 5373 u8 txid; /* transceiver id */ 5398 5374 u8 ext_txid; ··· 5436 5412 #define FC_SP_32 BIT_3 5437 5413 #define FC_SP_2 BIT_2 5438 5414 #define FC_SP_1 BIT_0 5415 + #define FC_SPEED_2 BIT_1 5439 5416 u8 fc_sp_cc10; 5440 5417 u8 encode; 5441 5418 u8 bitrate; ··· 5455 5430 u8 vendor_pn[SFF_PART_NAME_LEN]; /* part number */ 5456 5431 u8 vendor_rev[4]; 5457 5432 u8 wavelength[2]; 5458 - u8 resv; 5433 + #define FC_SP_64 BIT_0 5434 + u8 fiber_channel_speed2; 5459 5435 u8 cc_base; 5460 5436 u8 options[2]; /* offset 64 */ 5461 5437 u8 br_max;
+5
drivers/scsi/qla2xxx/qla_gbl.h
··· 345 345 qla2x00_execute_fw(scsi_qla_host_t *, uint32_t); 346 346 347 347 extern int 348 + qla28xx_load_flash_firmware(scsi_qla_host_t *vha); 349 + 350 + extern int 348 351 qla2x00_get_fw_version(scsi_qla_host_t *); 349 352 350 353 extern int ··· 841 838 /* Mailbox related functions */ 842 839 extern int qla82xx_abort_isp(scsi_qla_host_t *); 843 840 extern int qla82xx_restart_isp(scsi_qla_host_t *); 841 + 842 + extern int qla_mpipt_validate_fw(scsi_qla_host_t *vha, u16 img_idx, u16 *state); 844 843 845 844 /* IOCB related functions */ 846 845 extern int qla82xx_start_scsi(srb_t *);
+20 -21
drivers/scsi/qla2xxx/qla_gs.c
··· 3266 3266 atomic_read(&fcport->state) == FCS_ONLINE) || 3267 3267 do_delete) { 3268 3268 if (fcport->loop_id != FC_NO_LOOP_ID) { 3269 - if (fcport->flags & FCF_FCP2_DEVICE) 3270 - continue; 3271 - 3272 3269 ql_log(ql_log_warn, vha, 0x20f0, 3273 3270 "%s %d %8phC post del sess\n", 3274 3271 __func__, __LINE__, ··· 3532 3535 if (vha->scan.scan_flags & SF_SCANNING) { 3533 3536 spin_unlock_irqrestore(&vha->work_lock, flags); 3534 3537 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2012, 3535 - "%s: scan active\n", __func__); 3536 - return rval; 3538 + "%s: scan active for sp:%p\n", __func__, sp); 3539 + goto done_free_sp; 3537 3540 } 3538 3541 vha->scan.scan_flags |= SF_SCANNING; 3539 3542 if (!sp) ··· 3698 3701 return rval; 3699 3702 3700 3703 done_free_sp: 3701 - if (sp->u.iocb_cmd.u.ctarg.req) { 3702 - dma_free_coherent(&vha->hw->pdev->dev, 3703 - sp->u.iocb_cmd.u.ctarg.req_allocated_size, 3704 - sp->u.iocb_cmd.u.ctarg.req, 3705 - sp->u.iocb_cmd.u.ctarg.req_dma); 3706 - sp->u.iocb_cmd.u.ctarg.req = NULL; 3707 - } 3708 - if (sp->u.iocb_cmd.u.ctarg.rsp) { 3709 - dma_free_coherent(&vha->hw->pdev->dev, 3710 - sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 3711 - sp->u.iocb_cmd.u.ctarg.rsp, 3712 - sp->u.iocb_cmd.u.ctarg.rsp_dma); 3713 - sp->u.iocb_cmd.u.ctarg.rsp = NULL; 3714 - } 3704 + if (sp) { 3705 + if (sp->u.iocb_cmd.u.ctarg.req) { 3706 + dma_free_coherent(&vha->hw->pdev->dev, 3707 + sp->u.iocb_cmd.u.ctarg.req_allocated_size, 3708 + sp->u.iocb_cmd.u.ctarg.req, 3709 + sp->u.iocb_cmd.u.ctarg.req_dma); 3710 + sp->u.iocb_cmd.u.ctarg.req = NULL; 3711 + } 3712 + if (sp->u.iocb_cmd.u.ctarg.rsp) { 3713 + dma_free_coherent(&vha->hw->pdev->dev, 3714 + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 3715 + sp->u.iocb_cmd.u.ctarg.rsp, 3716 + sp->u.iocb_cmd.u.ctarg.rsp_dma); 3717 + sp->u.iocb_cmd.u.ctarg.rsp = NULL; 3718 + } 3715 3719 3716 - /* ref: INIT */ 3717 - kref_put(&sp->cmd_kref, qla2x00_sp_release); 3720 + /* ref: INIT */ 3721 + kref_put(&sp->cmd_kref, qla2x00_sp_release); 3722 + } 3718 3723 3719 3724 spin_lock_irqsave(&vha->work_lock, flags); 3720 3725 vha->scan.scan_flags &= ~SF_SCANNING;
+217 -15
drivers/scsi/qla2xxx/qla_init.c
··· 1859 1859 case RSCN_PORT_ADDR: 1860 1860 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); 1861 1861 if (fcport) { 1862 - if (ql2xfc2target && 1863 - fcport->flags & FCF_FCP2_DEVICE && 1864 - atomic_read(&fcport->state) == FCS_ONLINE) { 1865 - ql_dbg(ql_dbg_disc, vha, 0x2115, 1866 - "Delaying session delete for FCP2 portid=%06x %8phC ", 1867 - fcport->d_id.b24, fcport->port_name); 1868 - return; 1869 - } 1870 - 1871 1862 if (vha->hw->flags.edif_enabled && DBELL_ACTIVE(vha)) { 1872 1863 /* 1873 1864 * On ipsec start by remote port, Target port ··· 2462 2471 ea->sp->gen1, fcport->rscn_gen, 2463 2472 ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]); 2464 2473 2465 - if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 2466 - (fcport->fw_login_state == DSC_LS_PRLI_PEND)) { 2474 + if (fcport->fw_login_state == DSC_LS_PLOGI_PEND) { 2475 + ql_dbg(ql_dbg_disc, vha, 0x20ea, 2476 + "%s %d %8phC Remote is trying to login\n", 2477 + __func__, __LINE__, fcport->port_name); 2478 + /* 2479 + * If we get here, there is port thats already logged in, 2480 + * but it's state has not moved ahead. Recheck with FW on 2481 + * what state it is in and proceed ahead 2482 + */ 2483 + if (!N2N_TOPO(vha->hw)) { 2484 + fcport->fw_login_state = DSC_LS_PRLI_COMP; 2485 + qla24xx_post_gpdb_work(vha, fcport, 0); 2486 + } 2487 + return; 2488 + } 2489 + 2490 + if (fcport->fw_login_state == DSC_LS_PRLI_PEND) { 2467 2491 ql_dbg(ql_dbg_disc, vha, 0x20ea, 2468 2492 "%s %d %8phC Remote is trying to login\n", 2469 2493 __func__, __LINE__, fcport->port_name); ··· 4079 4073 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data; 4080 4074 u8 str[STR_LEN], *ptr, p; 4081 4075 int leftover, len; 4076 + 4077 + ql_dbg(ql_dbg_init, vha, 0x015a, 4078 + "SFP: %.*s -> %.*s ->%s%s%s%s%s%s%s\n", 4079 + (int)sizeof(a0->vendor_name), a0->vendor_name, 4080 + (int)sizeof(a0->vendor_pn), a0->vendor_pn, 4081 + a0->fc_sp_cc10 & FC_SP_2 ? a0->fiber_channel_speed2 & FC_SP_64 ? 4082 + " 64G" : "" : "", 4083 + a0->fc_sp_cc10 & FC_SP_32 ? " 32G" : "", 4084 + a0->fc_sp_cc10 & FC_SP_16 ? " 16G" : "", 4085 + a0->fc_sp_cc10 & FC_SP_8 ? " 8G" : "", 4086 + a0->fc_sp_cc10 & FC_SP_4 ? " 4G" : "", 4087 + a0->fc_sp_cc10 & FC_SP_2 ? " 2G" : "", 4088 + a0->fc_sp_cc10 & FC_SP_1 ? " 1G" : ""); 4089 + 4090 + if (!(ql2xextended_error_logging & ql_dbg_verbose)) 4091 + return; 4082 4092 4083 4093 memset(str, 0, STR_LEN); 4084 4094 snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name); ··· 8465 8443 } 8466 8444 8467 8445 static int 8446 + qla28xx_get_srisc_addr(scsi_qla_host_t *vha, uint32_t *srisc_addr, 8447 + uint32_t faddr) 8448 + { 8449 + struct qla_hw_data *ha = vha->hw; 8450 + struct req_que *req = ha->req_q_map[0]; 8451 + uint32_t *dcode; 8452 + int rval; 8453 + 8454 + *srisc_addr = 0; 8455 + dcode = (uint32_t *)req->ring; 8456 + 8457 + rval = qla24xx_read_flash_data(vha, dcode, faddr, 10); 8458 + if (rval) { 8459 + ql_log(ql_log_fatal, vha, 0x01aa, 8460 + "-> Failed to read flash addr + size .\n"); 8461 + return QLA_FUNCTION_FAILED; 8462 + } 8463 + 8464 + *srisc_addr = be32_to_cpu((__force __be32)dcode[2]); 8465 + return QLA_SUCCESS; 8466 + } 8467 + 8468 + static int 8469 + qla28xx_load_fw_template(scsi_qla_host_t *vha, uint32_t faddr) 8470 + { 8471 + struct qla_hw_data *ha = vha->hw; 8472 + struct fwdt *fwdt = ha->fwdt; 8473 + struct req_que *req = ha->req_q_map[0]; 8474 + uint32_t risc_size, risc_attr = 0; 8475 + uint templates, segments, fragment; 8476 + uint32_t *dcode; 8477 + ulong dlen; 8478 + int rval; 8479 + uint j; 8480 + 8481 + dcode = (uint32_t *)req->ring; 8482 + segments = FA_RISC_CODE_SEGMENTS; 8483 + 8484 + for (j = 0; j < segments; j++) { 8485 + rval = qla24xx_read_flash_data(vha, dcode, faddr, 10); 8486 + if (rval) { 8487 + ql_log(ql_log_fatal, vha, 0x01a1, 8488 + "-> Failed to read flash addr + size .\n"); 8489 + return QLA_FUNCTION_FAILED; 8490 + } 8491 + 8492 + risc_size = be32_to_cpu((__force __be32)dcode[3]); 8493 + 8494 + if (risc_attr == 0) 8495 + risc_attr = be32_to_cpu((__force __be32)dcode[9]); 8496 + 8497 + dlen = ha->fw_transfer_size >> 2; 8498 + for (fragment = 0; fragment < risc_size; fragment++) { 8499 + if (dlen > risc_size) 8500 + dlen = risc_size; 8501 + 8502 + faddr += dlen; 8503 + risc_size -= dlen; 8504 + } 8505 + } 8506 + 8507 + templates = (risc_attr & BIT_9) ? 2 : 1; 8508 + 8509 + ql_dbg(ql_dbg_init, vha, 0x01a1, "-> templates = %u\n", templates); 8510 + 8511 + for (j = 0; j < templates; j++, fwdt++) { 8512 + vfree(fwdt->template); 8513 + fwdt->template = NULL; 8514 + fwdt->length = 0; 8515 + 8516 + dcode = (uint32_t *)req->ring; 8517 + 8518 + rval = qla24xx_read_flash_data(vha, dcode, faddr, 7); 8519 + if (rval) { 8520 + ql_log(ql_log_fatal, vha, 0x01a2, 8521 + "-> Unable to read template size.\n"); 8522 + goto failed; 8523 + } 8524 + 8525 + risc_size = be32_to_cpu((__force __be32)dcode[2]); 8526 + ql_dbg(ql_dbg_init, vha, 0x01a3, 8527 + "-> fwdt%u template array at %#x (%#x dwords)\n", 8528 + j, faddr, risc_size); 8529 + if (!risc_size || !~risc_size) { 8530 + ql_dbg(ql_dbg_init, vha, 0x01a4, 8531 + "-> fwdt%u failed to read array\n", j); 8532 + goto failed; 8533 + } 8534 + 8535 + /* skip header and ignore checksum */ 8536 + faddr += 7; 8537 + risc_size -= 8; 8538 + 8539 + ql_dbg(ql_dbg_init, vha, 0x01a5, 8540 + "-> fwdt%u template allocate template %#x words...\n", 8541 + j, risc_size); 8542 + fwdt->template = vmalloc(risc_size * sizeof(*dcode)); 8543 + if (!fwdt->template) { 8544 + ql_log(ql_log_warn, vha, 0x01a6, 8545 + "-> fwdt%u failed allocate template.\n", j); 8546 + goto failed; 8547 + } 8548 + 8549 + dcode = fwdt->template; 8550 + rval = qla24xx_read_flash_data(vha, dcode, faddr, risc_size); 8551 + 8552 + if (rval || !qla27xx_fwdt_template_valid(dcode)) { 8553 + ql_log(ql_log_warn, vha, 0x01a7, 8554 + "-> fwdt%u failed template validate (rval %x)\n", 8555 + j, rval); 8556 + goto failed; 8557 + } 8558 + 8559 + dlen = qla27xx_fwdt_template_size(dcode); 8560 + ql_dbg(ql_dbg_init, vha, 0x01a7, 8561 + "-> fwdt%u template size %#lx bytes (%#lx words)\n", 8562 + j, dlen, dlen / sizeof(*dcode)); 8563 + if (dlen > risc_size * sizeof(*dcode)) { 8564 + ql_log(ql_log_warn, vha, 0x01a8, 8565 + "-> fwdt%u template exceeds array (%-lu bytes)\n", 8566 + j, dlen - risc_size * sizeof(*dcode)); 8567 + goto failed; 8568 + } 8569 + 8570 + fwdt->length = dlen; 8571 + ql_dbg(ql_dbg_init, vha, 0x01a9, 8572 + "-> fwdt%u loaded template ok\n", j); 8573 + 8574 + faddr += risc_size + 1; 8575 + } 8576 + 8577 + return QLA_SUCCESS; 8578 + 8579 + failed: 8580 + vfree(fwdt->template); 8581 + fwdt->template = NULL; 8582 + fwdt->length = 0; 8583 + 8584 + return QLA_SUCCESS; 8585 + } 8586 + 8587 + static int 8468 8588 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, 8469 8589 uint32_t faddr) 8470 8590 { ··· 9045 8881 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 9046 8882 { 9047 8883 int rval; 8884 + uint32_t f_region = 0; 9048 8885 struct qla_hw_data *ha = vha->hw; 9049 8886 struct active_regions active_regions = { }; 9050 8887 9051 - if (ql2xfwloadbin == 2) 8888 + if (ql2xfwloadbin == 2 && !IS_QLA28XX(ha)) 9052 8889 goto try_blob_fw; 9053 8890 9054 8891 /* FW Load priority: 9055 - * 1) Firmware residing in flash. 9056 - * 2) Firmware via request-firmware interface (.bin file). 9057 - * 3) Golden-Firmware residing in flash -- (limited operation). 8892 + * 1) If 28xxx, ROM cmd to load flash firmware. 8893 + * 2) Firmware residing in flash. 8894 + * 3) Firmware via request-firmware interface (.bin file). 8895 + * 4) Golden-Firmware residing in flash -- (limited operation). 9058 8896 */ 9059 8897 9060 8898 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 9061 8899 goto try_primary_fw; 9062 8900 9063 8901 qla27xx_get_active_image(vha, &active_regions); 8902 + 8903 + /* For 28XXX, always load the flash firmware using rom mbx */ 8904 + if (IS_QLA28XX_SECURED(ha)) { 8905 + rval = qla28xx_load_flash_firmware(vha); 8906 + if (rval != QLA_SUCCESS) { 8907 + ql_log(ql_log_fatal, vha, 0x019e, 8908 + "Failed to load flash firmware.\n"); 8909 + goto exit_load_risc; 8910 + } 8911 + 8912 + f_region = 8913 + (active_regions.global != QLA27XX_SECONDARY_IMAGE) ? 8914 + ha->flt_region_fw : ha->flt_region_fw_sec; 8915 + 8916 + ql_log(ql_log_info, vha, 0x019f, 8917 + "Load flash firmware successful (%s).\n", 8918 + ((active_regions.global != QLA27XX_SECONDARY_IMAGE) ? 8919 + "Primary" : "Secondary")); 8920 + 8921 + rval = qla28xx_get_srisc_addr(vha, srisc_addr, f_region); 8922 + if (rval != QLA_SUCCESS) { 8923 + ql_log(ql_log_warn, vha, 0x019f, 8924 + "failed to read srisc address\n"); 8925 + goto exit_load_risc; 8926 + } 8927 + 8928 + rval = qla28xx_load_fw_template(vha, f_region); 8929 + if (rval != QLA_SUCCESS) { 8930 + ql_log(ql_log_warn, vha, 0x01a0, 8931 + "failed to read firmware template\n"); 8932 + } 8933 + 8934 + goto exit_load_risc; 8935 + } 9064 8936 9065 8937 if (active_regions.global != QLA27XX_SECONDARY_IMAGE) 9066 8938 goto try_primary_fw; ··· 9127 8927 9128 8928 ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n"); 9129 8929 ha->flags.running_gold_fw = 1; 8930 + 8931 + exit_load_risc: 9130 8932 return rval; 9131 8933 } 9132 8934
+17 -2
drivers/scsi/qla2xxx/qla_isr.c
··· 1676 1676 1677 1677 /* Port logout */ 1678 1678 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]); 1679 - if (!fcport) 1679 + if (!fcport) { 1680 + ql_dbg(ql_dbg_async, vha, 0x5011, 1681 + "Could not find fcport:%04x %04x %04x\n", 1682 + mb[1], mb[2], mb[3]); 1680 1683 break; 1681 - if (atomic_read(&fcport->state) != FCS_ONLINE) 1684 + } 1685 + 1686 + if (atomic_read(&fcport->state) != FCS_ONLINE) { 1687 + ql_dbg(ql_dbg_async, vha, 0x5012, 1688 + "Port state is not online State:0x%x \n", 1689 + atomic_read(&fcport->state)); 1690 + ql_dbg(ql_dbg_async, vha, 0x5012, 1691 + "Scheduling session for deletion \n"); 1692 + fcport->logout_on_delete = 0; 1693 + qlt_schedule_sess_for_deletion(fcport); 1682 1694 break; 1695 + } 1696 + 1683 1697 ql_dbg(ql_dbg_async, vha, 0x508a, 1684 1698 "Marking port lost loopid=%04x portid=%06x.\n", 1685 1699 fcport->loop_id, fcport->d_id.b24); 1700 + 1686 1701 if (qla_ini_mode_enabled(vha)) { 1687 1702 fcport->logout_on_delete = 0; 1688 1703 qlt_schedule_sess_for_deletion(fcport);
+88
drivers/scsi/qla2xxx/qla_mbx.c
··· 43 43 } rom_cmds[] = { 44 44 { MBC_LOAD_RAM }, 45 45 { MBC_EXECUTE_FIRMWARE }, 46 + { MBC_LOAD_FLASH_FIRMWARE }, 46 47 { MBC_READ_RAM_WORD }, 47 48 { MBC_MAILBOX_REGISTER_TEST }, 48 49 { MBC_VERIFY_CHECKSUM }, ··· 824 823 825 824 return rval; 826 825 } 826 + 827 + /* 828 + * qla2x00_load_flash_firmware 829 + * Load firmware from flash. 830 + * 831 + * Input: 832 + * vha = adapter block pointer. 833 + * 834 + * Returns: 835 + * qla28xx local function return status code. 836 + * 837 + * Context: 838 + * Kernel context. 839 + */ 840 + int 841 + qla28xx_load_flash_firmware(scsi_qla_host_t *vha) 842 + { 843 + struct qla_hw_data *ha = vha->hw; 844 + int rval = QLA_COMMAND_ERROR; 845 + mbx_cmd_t mc; 846 + mbx_cmd_t *mcp = &mc; 847 + 848 + if (!IS_QLA28XX(ha)) 849 + return rval; 850 + 851 + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a6, 852 + "Entered %s.\n", __func__); 853 + 854 + mcp->mb[0] = MBC_LOAD_FLASH_FIRMWARE; 855 + mcp->out_mb = MBX_2 | MBX_1 | MBX_0; 856 + mcp->in_mb = MBX_0; 857 + mcp->tov = MBX_TOV_SECONDS; 858 + mcp->flags = 0; 859 + rval = qla2x00_mailbox_command(vha, mcp); 860 + 861 + if (rval != QLA_SUCCESS) { 862 + ql_dbg(ql_log_info, vha, 0x11a7, 863 + "Failed=%x cmd error=%x img error=%x.\n", 864 + rval, mcp->mb[1], mcp->mb[2]); 865 + } else { 866 + ql_dbg(ql_log_info, vha, 0x11a8, 867 + "Done %s.\n", __func__); 868 + } 869 + 870 + return rval; 871 + } 872 + 827 873 828 874 /* 829 875 * qla_get_exlogin_status ··· 7201 7153 __func__); 7202 7154 /* passing all 32 register's contents */ 7203 7155 memcpy(mbx_out, &mcp->mb, 32 * sizeof(uint16_t)); 7156 + } 7157 + 7158 + return rval; 7159 + } 7160 + 7161 + int qla_mpipt_validate_fw(scsi_qla_host_t *vha, u16 img_idx, uint16_t *state) 7162 + { 7163 + struct qla_hw_data *ha = vha->hw; 7164 + mbx_cmd_t mc; 7165 + mbx_cmd_t *mcp = &mc; 7166 + int rval; 7167 + 7168 + if (!IS_QLA28XX(ha)) { 7169 + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s %d\n", __func__, __LINE__); 7170 + return QLA_FUNCTION_FAILED; 7171 + } 7172 + 7173 + if (img_idx > 1) { 7174 + ql_log(ql_log_info, vha, 0xffff, 7175 + "%s %d Invalid flash image index [%d]\n", 7176 + __func__, __LINE__, img_idx); 7177 + return QLA_INVALID_COMMAND; 7178 + } 7179 + 7180 + memset(&mc, 0, sizeof(mc)); 7181 + mcp->mb[0] = MBC_MPI_PASSTHROUGH; 7182 + mcp->mb[1] = MPIPT_SUBCMD_VALIDATE_FW; 7183 + mcp->mb[2] = img_idx; 7184 + mcp->out_mb = MBX_1|MBX_0; 7185 + mcp->in_mb = MBX_2|MBX_1|MBX_0; 7186 + 7187 + /* send mb via iocb */ 7188 + rval = qla24xx_send_mb_cmd(vha, &mc); 7189 + if (rval) { 7190 + ql_log(ql_log_info, vha, 0xffff, "%s:Failed %x (mb=%x,%x)\n", 7191 + __func__, rval, mcp->mb[0], mcp->mb[1]); 7192 + *state = mcp->mb[1]; 7193 + } else { 7194 + ql_log(ql_log_info, vha, 0xffff, "%s: mb=%x,%x,%x\n", __func__, 7195 + mcp->mb[0], mcp->mb[1], mcp->mb[2]); 7204 7196 } 7205 7197 7206 7198 return rval;
+1
drivers/scsi/qla2xxx/qla_nx.h
··· 892 892 #define FA_VPD_SIZE_82XX 0x400 893 893 894 894 #define FA_FLASH_LAYOUT_ADDR_82 0xFC400 895 + #define FA_FLASH_MCU_OFF 0x13000 895 896 896 897 /****************************************************************************** 897 898 *
+9 -7
drivers/scsi/qla2xxx/qla_os.c
··· 402 402 struct req_que **, struct rsp_que **); 403 403 static void qla2x00_free_fw_dump(struct qla_hw_data *); 404 404 static void qla2x00_mem_free(struct qla_hw_data *); 405 - int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, 406 - struct qla_qpair *qpair); 405 + static enum scsi_qc_status qla2xxx_mqueuecommand(struct Scsi_Host *host, 406 + struct scsi_cmnd *cmd, 407 + struct qla_qpair *qpair); 407 408 408 409 /* -------------------------------------------------------------------------- */ 409 410 static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req, ··· 859 858 complete(comp); 860 859 } 861 860 862 - static int 863 - qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 861 + static enum scsi_qc_status qla2xxx_queuecommand(struct Scsi_Host *host, 862 + struct scsi_cmnd *cmd) 864 863 { 865 864 scsi_qla_host_t *vha = shost_priv(host); 866 865 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; ··· 982 981 } 983 982 984 983 /* For MQ supported I/O */ 985 - int 984 + static enum scsi_qc_status 986 985 qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, 987 - struct qla_qpair *qpair) 986 + struct qla_qpair *qpair) 988 987 { 989 988 scsi_qla_host_t *vha = shost_priv(host); 990 989 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; ··· 1184 1183 while ((qla2x00_reset_active(vha) || ha->dpc_active || 1185 1184 ha->flags.mbox_busy) || 1186 1185 test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) || 1187 - test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) { 1186 + test_bit(FX00_TARGET_SCAN, &vha->dpc_flags) || 1187 + (vha->scan.scan_flags & SF_SCANNING)) { 1188 1188 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 1189 1189 break; 1190 1190 msleep(1000);
+29
drivers/scsi/qla2xxx/qla_sup.c
··· 1084 1084 return; 1085 1085 } 1086 1086 1087 + static int qla28xx_validate_mcu_signature(scsi_qla_host_t *vha) 1088 + { 1089 + struct qla_hw_data *ha = vha->hw; 1090 + struct req_que *req = ha->req_q_map[0]; 1091 + uint32_t *dcode = (uint32_t *)req->ring; 1092 + uint32_t signature[2] = {0x000c0000, 0x00050000}; 1093 + int ret = QLA_SUCCESS; 1094 + 1095 + ret = qla24xx_read_flash_data(vha, dcode, FA_FLASH_MCU_OFF >> 2, 2); 1096 + if (ret) { 1097 + ql_log(ql_log_fatal, vha, 0x01ab, 1098 + "-> Failed to read flash mcu signature.\n"); 1099 + ret = QLA_FUNCTION_FAILED; 1100 + goto done; 1101 + } 1102 + 1103 + ql_dbg(ql_dbg_init, vha, 0x01ac, 1104 + "Flash data 0x%08x 0x%08x.\n", dcode[0], dcode[1]); 1105 + 1106 + if (!(dcode[0] == signature[0] && dcode[1] == signature[1])) 1107 + ret = QLA_FUNCTION_FAILED; 1108 + 1109 + done: 1110 + return ret; 1111 + } 1112 + 1087 1113 int 1088 1114 qla2xxx_get_flash_info(scsi_qla_host_t *vha) 1089 1115 { ··· 1121 1095 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && 1122 1096 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 1123 1097 return QLA_SUCCESS; 1098 + 1099 + if (IS_QLA28XX(ha) && !qla28xx_validate_mcu_signature(vha)) 1100 + ha->flags.secure_mcu = 1; 1124 1101 1125 1102 ret = qla2xxx_find_flt_start(vha, &flt_addr); 1126 1103 if (ret != QLA_SUCCESS)
+1 -1
drivers/scsi/qla2xxx/qla_target.c
··· 8390 8390 goto out_plogi_cachep; 8391 8391 } 8392 8392 8393 - qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); 8393 + qla_tgt_wq = alloc_workqueue("qla_tgt_wq", WQ_PERCPU, 0); 8394 8394 if (!qla_tgt_wq) { 8395 8395 ql_log(ql_log_fatal, NULL, 0xe06f, 8396 8396 "alloc_workqueue for qla_tgt_wq failed\n");
+4 -4
drivers/scsi/qla2xxx/qla_version.h
··· 6 6 /* 7 7 * Driver version 8 8 */ 9 - #define QLA2XXX_VERSION "10.02.09.400-k" 9 + #define QLA2XXX_VERSION "10.02.10.100-k" 10 10 11 11 #define QLA_DRIVER_MAJOR_VER 10 12 - #define QLA_DRIVER_MINOR_VER 2 13 - #define QLA_DRIVER_PATCH_VER 9 14 - #define QLA_DRIVER_BETA_VER 400 12 + #define QLA_DRIVER_MINOR_VER 02 13 + #define QLA_DRIVER_PATCH_VER 10 14 + #define QLA_DRIVER_BETA_VER 100
+1 -1
drivers/scsi/qla2xxx/tcm_qla2xxx.c
··· 1902 1902 goto out_fabric; 1903 1903 1904 1904 tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free", 1905 - WQ_MEM_RECLAIM, 0); 1905 + WQ_MEM_RECLAIM | WQ_PERCPU, 0); 1906 1906 if (!tcm_qla2xxx_free_wq) { 1907 1907 ret = -ENOMEM; 1908 1908 goto out_fabric_npiv;
+5 -3
drivers/scsi/qla4xxx/ql4_os.c
··· 155 155 /* 156 156 * SCSI host template entry points 157 157 */ 158 - static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); 158 + static enum scsi_qc_status qla4xxx_queuecommand(struct Scsi_Host *h, 159 + struct scsi_cmnd *cmd); 159 160 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd); 160 161 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); 161 162 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); ··· 4108 4107 * completion handling). Unfortunately, it sometimes calls the scheduler 4109 4108 * in interrupt context which is a big NO! NO!. 4110 4109 **/ 4111 - static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 4110 + static enum scsi_qc_status qla4xxx_queuecommand(struct Scsi_Host *host, 4111 + struct scsi_cmnd *cmd) 4112 4112 { 4113 4113 struct scsi_qla_host *ha = to_qla_host(host); 4114 4114 struct ddb_entry *ddb_entry = cmd->device->hostdata; ··· 8821 8819 } 8822 8820 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); 8823 8821 8824 - ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1, 8822 + ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM | WQ_PERCPU, 1, 8825 8823 ha->host_no); 8826 8824 if (!ha->task_wq) { 8827 8825 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
+1 -1
drivers/scsi/qlogicfas408.c
··· 464 464 * Queued command 465 465 */ 466 466 467 - static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd) 467 + static enum scsi_qc_status qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd) 468 468 { 469 469 void (*done)(struct scsi_cmnd *) = scsi_done; 470 470 struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
+2 -1
drivers/scsi/qlogicfas408.h
··· 104 104 #define get_priv_by_host(x) (struct qlogicfas408_priv *)&((x)->hostdata[0]) 105 105 106 106 irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id); 107 - int qlogicfas408_queuecommand(struct Scsi_Host *h, struct scsi_cmnd * cmd); 107 + enum scsi_qc_status qlogicfas408_queuecommand(struct Scsi_Host *h, 108 + struct scsi_cmnd *cmd); 108 109 int qlogicfas408_biosparam(struct scsi_device * disk, 109 110 struct gendisk *unused, 110 111 sector_t capacity, int ip[]);
+1 -1
drivers/scsi/qlogicpti.c
··· 1015 1015 * 1016 1016 * "This code must fly." -davem 1017 1017 */ 1018 - static int qlogicpti_queuecommand_lck(struct scsi_cmnd *Cmnd) 1018 + static enum scsi_qc_status qlogicpti_queuecommand_lck(struct scsi_cmnd *Cmnd) 1019 1019 { 1020 1020 void (*done)(struct scsi_cmnd *) = scsi_done; 1021 1021 struct Scsi_Host *host = Cmnd->device->host;
+56 -71
drivers/scsi/scsi_debug.c
··· 1371 1371 1372 1372 sbuff = scp->sense_buffer; 1373 1373 if (!sbuff) { 1374 - sdev_printk(KERN_ERR, scp->device, 1375 - "%s: sense_buffer is NULL\n", __func__); 1374 + sdev_printk(KERN_ERR, scp->device, "sense_buffer is NULL\n"); 1376 1375 return; 1377 1376 } 1378 1377 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST; ··· 1403 1404 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq) 1404 1405 { 1405 1406 if (!scp->sense_buffer) { 1406 - sdev_printk(KERN_ERR, scp->device, 1407 - "%s: sense_buffer is NULL\n", __func__); 1407 + sdev_printk(KERN_ERR, scp->device, "sense_buffer is NULL\n"); 1408 1408 return; 1409 1409 } 1410 1410 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); ··· 1421 1423 unsigned int information, unsigned char tape_flags) 1422 1424 { 1423 1425 if (!scp->sense_buffer) { 1424 - sdev_printk(KERN_ERR, scp->device, 1425 - "%s: sense_buffer is NULL\n", __func__); 1426 + sdev_printk(KERN_ERR, scp->device, "sense_buffer is NULL\n"); 1426 1427 return; 1427 1428 } 1428 1429 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); ··· 1449 1452 { 1450 1453 if (sdebug_verbose) { 1451 1454 if (0x1261 == cmd) 1452 - sdev_printk(KERN_INFO, dev, 1453 - "%s: BLKFLSBUF [0x1261]\n", __func__); 1455 + sdev_printk(KERN_INFO, dev, "BLKFLSBUF [0x1261]\n"); 1454 1456 else if (0x5331 == cmd) 1455 1457 sdev_printk(KERN_INFO, dev, 1456 - "%s: CDROM_GET_CAPABILITY [0x5331]\n", 1457 - __func__); 1458 + "CDROM_GET_CAPABILITY [0x5331]\n"); 1458 1459 else 1459 - sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n", 1460 - __func__, cmd); 1460 + sdev_printk(KERN_INFO, dev, "cmd=0x%x\n", cmd); 1461 1461 } 1462 1462 return -EINVAL; 1463 1463 /* return -ENOTTY; // correct return but upsets fdisk */ ··· 1658 1664 1659 1665 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents, 1660 1666 arr, arr_len, skip); 1661 - pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n", 1662 - __func__, off_dst, scsi_bufflen(scp), act_len, 1667 + pr_debug("off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n", 1668 + off_dst, scsi_bufflen(scp), act_len, 1663 1669 scsi_get_resid(scp)); 1664 1670 n = scsi_bufflen(scp) - (off_dst + act_len); 1665 1671 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n)); ··· 3182 3188 return DID_ERROR << 16; 3183 3189 else if (sdebug_verbose && (res < param_len)) 3184 3190 sdev_printk(KERN_INFO, scp->device, 3185 - "%s: cdb indicated=%d, IO sent=%d bytes\n", 3186 - __func__, param_len, res); 3191 + "cdb indicated=%d, IO sent=%d bytes\n", 3192 + param_len, res); 3187 3193 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2); 3188 3194 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6); 3189 3195 off = (mselect6 ? 4 : 8); ··· 5127 5133 if (lbdof == 0) { 5128 5134 if (sdebug_verbose) 5129 5135 sdev_printk(KERN_INFO, scp->device, 5130 - "%s: %s: LB Data Offset field bad\n", 5131 - my_name, __func__); 5136 + "%s: LB Data Offset field bad\n", my_name); 5132 5137 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); 5133 5138 return illegal_condition_result; 5134 5139 } ··· 5135 5142 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) { 5136 5143 if (sdebug_verbose) 5137 5144 sdev_printk(KERN_INFO, scp->device, 5138 - "%s: %s: LBA range descriptors don't fit\n", 5139 - my_name, __func__); 5145 + "%s: LBA range descriptors don't fit\n", my_name); 5140 5146 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); 5141 5147 return illegal_condition_result; 5142 5148 } ··· 5144 5152 return SCSI_MLQUEUE_HOST_BUSY; 5145 5153 if (sdebug_verbose) 5146 5154 sdev_printk(KERN_INFO, scp->device, 5147 - "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n", 5148 - my_name, __func__, lbdof_blen); 5155 + "%s: Fetch header+scatter_list, lbdof_blen=%u\n", 5156 + my_name, lbdof_blen); 5149 5157 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen); 5150 5158 if (res == -1) { 5151 5159 ret = DID_ERROR << 16; ··· 5162 5170 num = get_unaligned_be32(up + 8); 5163 5171 if (sdebug_verbose) 5164 5172 sdev_printk(KERN_INFO, scp->device, 5165 - "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n", 5166 - my_name, __func__, k, lba, num, sg_off); 5173 + "%s: k=%d LBA=0x%llx num=%u sg_off=%u\n", 5174 + my_name, k, lba, num, sg_off); 5167 5175 if (num == 0) 5168 5176 continue; 5169 5177 ret = check_device_access_params(scp, lba, num, true); ··· 5175 5183 if ((cum_lb + num) > bt_len) { 5176 5184 if (sdebug_verbose) 5177 5185 sdev_printk(KERN_INFO, scp->device, 5178 - "%s: %s: sum of blocks > data provided\n", 5179 - my_name, __func__); 5186 + "%s: sum of blocks > data provided\n", 5187 + my_name); 5180 5188 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC, 5181 5189 0); 5182 5190 ret = illegal_condition_result; ··· 5868 5876 goto cleanup; 5869 5877 } else if (sdebug_verbose && (ret < (a_num * lb_size))) { 5870 5878 sdev_printk(KERN_INFO, scp->device, 5871 - "%s: %s: cdb indicated=%u, IO sent=%d bytes\n", 5872 - my_name, __func__, a_num * lb_size, ret); 5879 + "%s: cdb indicated=%u, IO sent=%d bytes\n", 5880 + my_name, a_num * lb_size, ret); 5873 5881 } 5874 5882 if (is_bytchk3) { 5875 5883 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size) ··· 6396 6404 atomic_inc(&sdebug_miss_cpus); 6397 6405 } 6398 6406 6399 - if (!scp) { 6400 - pr_err("scmd=NULL\n"); 6401 - return; 6402 - } 6403 - 6404 6407 spin_lock_irqsave(&sdsc->lock, flags); 6405 6408 aborted = sd_dp->aborted; 6406 6409 if (unlikely(aborted)) ··· 6672 6685 devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev), 6673 6686 sdebug_debugfs_root); 6674 6687 if (IS_ERR_OR_NULL(devip->debugfs_entry)) 6675 - pr_info("%s: failed to create debugfs directory for device %s\n", 6676 - __func__, dev_name(&sdp->sdev_gendev)); 6688 + pr_info("failed to create debugfs directory for device %s\n", 6689 + dev_name(&sdp->sdev_gendev)); 6677 6690 6678 6691 dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp, 6679 6692 &sdebug_error_fops); 6680 6693 if (IS_ERR_OR_NULL(dentry)) 6681 - pr_info("%s: failed to create error file for device %s\n", 6682 - __func__, dev_name(&sdp->sdev_gendev)); 6694 + pr_info("failed to create error file for device %s\n", 6695 + dev_name(&sdp->sdev_gendev)); 6683 6696 6684 6697 return 0; 6685 6698 } ··· 6721 6734 { 6722 6735 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd); 6723 6736 struct sdebug_defer *sd_dp = &sdsc->sd_dp; 6724 - enum sdeb_defer_type defer_t = READ_ONCE(sd_dp->defer_t); 6737 + enum sdeb_defer_type defer_t = sd_dp->defer_t; 6725 6738 6726 6739 lockdep_assert_held(&sdsc->lock); 6727 6740 ··· 6867 6880 6868 6881 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) 6869 6882 sdev_printk(KERN_INFO, SCpnt->device, 6870 - "%s: command%s found\n", __func__, 6883 + "command%s found\n", 6871 6884 aborted ? "" : " not"); 6872 6885 6873 6886 ··· 6955 6968 ++num_dev_resets; 6956 6969 6957 6970 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) 6958 - sdev_printk(KERN_INFO, sdp, "%s\n", __func__); 6971 + sdev_printk(KERN_INFO, sdp, "doing device reset"); 6959 6972 6960 6973 scsi_debug_stop_all_queued(sdp); 6961 6974 if (devip) { ··· 6995 7008 6996 7009 ++num_target_resets; 6997 7010 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) 6998 - sdev_printk(KERN_INFO, sdp, "%s\n", __func__); 7011 + sdev_printk(KERN_INFO, sdp, "doing target reset\n"); 6999 7012 7000 7013 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { 7001 7014 if (devip->target == sdp->id) { ··· 7008 7021 7009 7022 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) 7010 7023 sdev_printk(KERN_INFO, sdp, 7011 - "%s: %d device(s) found in target\n", __func__, k); 7024 + "%d device(s) found in target\n", k); 7012 7025 7013 7026 if (sdebug_fail_target_reset(SCpnt)) { 7014 7027 scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n", ··· 7029 7042 ++num_bus_resets; 7030 7043 7031 7044 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) 7032 - sdev_printk(KERN_INFO, sdp, "%s\n", __func__); 7045 + sdev_printk(KERN_INFO, sdp, "doing bus reset\n"); 7033 7046 7034 7047 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { 7035 7048 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); ··· 7040 7053 7041 7054 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) 7042 7055 sdev_printk(KERN_INFO, sdp, 7043 - "%s: %d device(s) found in host\n", __func__, k); 7056 + "%d device(s) found in host\n", k); 7044 7057 return SUCCESS; 7045 7058 } 7046 7059 ··· 7052 7065 7053 7066 ++num_host_resets; 7054 7067 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) 7055 - sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__); 7068 + sdev_printk(KERN_INFO, SCpnt->device, "doing host reset\n"); 7056 7069 mutex_lock(&sdebug_host_list_mutex); 7057 7070 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { 7058 7071 list_for_each_entry(devip, &sdbg_host->dev_info_list, ··· 7067 7080 stop_all_queued(); 7068 7081 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) 7069 7082 sdev_printk(KERN_INFO, SCpnt->device, 7070 - "%s: %d device(s) found\n", __func__, k); 7083 + "%d device(s) found\n", k); 7071 7084 return SUCCESS; 7072 7085 } 7073 7086 ··· 7218 7231 scsi_result = device_qfull_result; 7219 7232 7220 7233 if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts)) 7221 - sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n", 7222 - __func__, num_in_q); 7234 + sdev_printk(KERN_INFO, sdp, "num_in_q=%d +1, <inject> status: TASK SET FULL\n", 7235 + num_in_q); 7223 7236 } 7224 7237 } 7225 7238 ··· 7245 7258 } 7246 7259 7247 7260 if (unlikely(sdebug_verbose && cmnd->result)) 7248 - sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n", 7249 - __func__, cmnd->result); 7261 + sdev_printk(KERN_INFO, sdp, "non-zero result=0x%x\n", 7262 + cmnd->result); 7250 7263 7251 7264 if (delta_jiff > 0 || ndelay > 0) { 7252 7265 ktime_t kt; ··· 7283 7296 if (polled) { 7284 7297 spin_lock_irqsave(&sdsc->lock, flags); 7285 7298 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt); 7286 - WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL); 7299 + sd_dp->defer_t = SDEB_DEFER_POLL; 7287 7300 spin_unlock_irqrestore(&sdsc->lock, flags); 7288 7301 } else { 7289 7302 /* schedule the invocation of scsi_done() for a later time */ 7290 7303 spin_lock_irqsave(&sdsc->lock, flags); 7291 - WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT); 7304 + sd_dp->defer_t = SDEB_DEFER_HRT; 7292 7305 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED); 7293 7306 /* 7294 7307 * The completion handler will try to grab sqcp->lock, ··· 7312 7325 if (polled) { 7313 7326 spin_lock_irqsave(&sdsc->lock, flags); 7314 7327 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot); 7315 - WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL); 7328 + sd_dp->defer_t = SDEB_DEFER_POLL; 7316 7329 spin_unlock_irqrestore(&sdsc->lock, flags); 7317 7330 } else { 7318 7331 spin_lock_irqsave(&sdsc->lock, flags); 7319 - WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ); 7332 + sd_dp->defer_t = SDEB_DEFER_WQ; 7320 7333 schedule_work(&sd_dp->ew.work); 7321 7334 spin_unlock_irqrestore(&sdsc->lock, flags); 7322 7335 } ··· 8684 8697 8685 8698 sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL); 8686 8699 if (IS_ERR_OR_NULL(sdebug_debugfs_root)) 8687 - pr_info("%s: failed to create initial debugfs directory\n", __func__); 8700 + pr_info("failed to create initial debugfs directory\n"); 8688 8701 8689 8702 for (k = 0; k < hosts_to_add; k++) { 8690 8703 if (want_store && k == 0) { ··· 8800 8813 if (unlikely(res < 0)) { 8801 8814 xa_unlock_irqrestore(per_store_ap, iflags); 8802 8815 kfree(sip); 8803 - pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res); 8816 + pr_warn("xa_alloc() errno=%d\n", -res); 8804 8817 return res; 8805 8818 } 8806 8819 sdeb_most_recent_idx = n_idx; ··· 8857 8870 return (int)n_idx; 8858 8871 err: 8859 8872 sdebug_erase_store((int)n_idx, sip); 8860 - pr_warn("%s: failed, errno=%d\n", __func__, -res); 8873 + pr_warn("failed, errno=%d\n", -res); 8861 8874 return res; 8862 8875 } 8863 8876 ··· 8916 8929 put_device(&sdbg_host->dev); 8917 8930 else 8918 8931 kfree(sdbg_host); 8919 - pr_warn("%s: failed, errno=%d\n", __func__, -error); 8932 + pr_warn("failed, errno=%d\n", -error); 8920 8933 return error; 8921 8934 } 8922 8935 ··· 8984 8997 8985 8998 if (qdepth > SDEBUG_CANQUEUE) { 8986 8999 qdepth = SDEBUG_CANQUEUE; 8987 - pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__, 9000 + pr_warn("requested qdepth [%d] exceeds canqueue [%d], trim\n", 8988 9001 qdepth, SDEBUG_CANQUEUE); 8989 9002 } 8990 9003 if (qdepth < 1) ··· 8996 9009 mutex_unlock(&sdebug_host_list_mutex); 8997 9010 8998 9011 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) 8999 - sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth); 9012 + sdev_printk(KERN_INFO, sdev, "qdepth=%d\n", qdepth); 9000 9013 9001 9014 return sdev->queue_depth; 9002 9015 } ··· 9120 9133 9121 9134 spin_lock_irqsave(&sdsc->lock, flags); 9122 9135 sd_dp = &sdsc->sd_dp; 9123 - if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) { 9136 + if (sd_dp->defer_t != SDEB_DEFER_POLL) { 9124 9137 spin_unlock_irqrestore(&sdsc->lock, flags); 9125 9138 return true; 9126 9139 } ··· 9269 9282 bool res = false; 9270 9283 9271 9284 if (!to_be_aborted_scmd) { 9272 - pr_err("%s: command with tag %#x not found\n", __func__, 9273 - unique_tag); 9285 + pr_err("command with tag %#x not found\n", unique_tag); 9274 9286 return; 9275 9287 } 9276 9288 ··· 9277 9291 res = scsi_debug_stop_cmnd(to_be_aborted_scmd); 9278 9292 9279 9293 if (res) 9280 - pr_info("%s: aborted command with tag %#x\n", 9281 - __func__, unique_tag); 9294 + pr_info("aborted command with tag %#x\n", unique_tag); 9282 9295 else 9283 - pr_err("%s: failed to abort command with tag %#x\n", 9284 - __func__, unique_tag); 9296 + pr_err("failed to abort command with tag %#x\n", unique_tag); 9285 9297 9286 9298 set_host_byte(scp, res ? DID_OK : DID_ERROR); 9287 9299 } 9288 9300 9289 - static int scsi_debug_process_reserved_command(struct Scsi_Host *shost, 9290 - struct scsi_cmnd *scp) 9301 + static enum scsi_qc_status 9302 + scsi_debug_process_reserved_command(struct Scsi_Host *shost, 9303 + struct scsi_cmnd *scp) 9291 9304 { 9292 9305 struct sdebug_internal_cmd *internal_cmd = scsi_cmd_priv(scp); 9293 9306 ··· 9304 9319 return 0; 9305 9320 } 9306 9321 9307 - static int scsi_debug_queuecommand(struct Scsi_Host *shost, 9308 - struct scsi_cmnd *scp) 9322 + static enum scsi_qc_status scsi_debug_queuecommand(struct Scsi_Host *shost, 9323 + struct scsi_cmnd *scp) 9309 9324 { 9310 9325 u8 sdeb_i; 9311 9326 struct scsi_device *sdp = scp->device;
+6 -5
drivers/scsi/scsi_lib.c
··· 76 76 } 77 77 78 78 static void 79 - scsi_set_blocked(struct scsi_cmnd *cmd, int reason) 79 + scsi_set_blocked(struct scsi_cmnd *cmd, enum scsi_qc_status reason) 80 80 { 81 81 struct Scsi_Host *host = cmd->device->host; 82 82 struct scsi_device *device = cmd->device; ··· 139 139 * for a requeue after completion, which should only occur in this 140 140 * file. 141 141 */ 142 - static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) 142 + static void __scsi_queue_insert(struct scsi_cmnd *cmd, 143 + enum scsi_qc_status reason, bool unbusy) 143 144 { 144 145 struct scsi_device *device = cmd->device; 145 146 ··· 180 179 * Context: This could be called either from an interrupt context or a normal 181 180 * process context. 182 181 */ 183 - void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 182 + void scsi_queue_insert(struct scsi_cmnd *cmd, enum scsi_qc_status reason) 184 183 { 185 184 __scsi_queue_insert(cmd, reason, true); 186 185 } ··· 1586 1585 * Return: nonzero return request was rejected and device's queue needs to be 1587 1586 * plugged. 1588 1587 */ 1589 - static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) 1588 + static enum scsi_qc_status scsi_dispatch_cmd(struct scsi_cmnd *cmd) 1590 1589 { 1591 1590 struct Scsi_Host *host = cmd->device->host; 1592 1591 int rtn = 0; ··· 1835 1834 struct Scsi_Host *shost = sdev->host; 1836 1835 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); 1837 1836 blk_status_t ret; 1838 - int reason; 1837 + enum scsi_qc_status reason; 1839 1838 1840 1839 WARN_ON_ONCE(cmd->budget_token < 0); 1841 1840
+2 -1
drivers/scsi/scsi_priv.h
··· 102 102 103 103 /* scsi_lib.c */ 104 104 extern void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd); 105 - extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason); 105 + extern void scsi_queue_insert(struct scsi_cmnd *cmd, 106 + enum scsi_qc_status reason); 106 107 extern void scsi_io_completion(struct scsi_cmnd *, unsigned int); 107 108 extern void scsi_run_host_queues(struct Scsi_Host *shost); 108 109 extern void scsi_requeue_run_queue(struct work_struct *work);
+74 -3
drivers/scsi/scsi_sysfs.c
··· 554 554 return 0; 555 555 } 556 556 557 + static int scsi_bus_probe(struct device *dev) 558 + { 559 + struct scsi_device *sdp = to_scsi_device(dev); 560 + struct scsi_driver *drv = to_scsi_driver(dev->driver); 561 + 562 + if (drv->probe) 563 + return drv->probe(sdp); 564 + else 565 + return 0; 566 + } 567 + 568 + static void scsi_bus_remove(struct device *dev) 569 + { 570 + struct scsi_device *sdp = to_scsi_device(dev); 571 + struct scsi_driver *drv = to_scsi_driver(dev->driver); 572 + 573 + if (drv->remove) 574 + drv->remove(sdp); 575 + } 576 + 577 + static void scsi_bus_shutdown(struct device *dev) 578 + { 579 + struct scsi_device *sdp = to_scsi_device(dev); 580 + struct scsi_driver *drv; 581 + 582 + if (!dev->driver) 583 + return; 584 + 585 + drv = to_scsi_driver(dev->driver); 586 + 587 + if (drv->shutdown) 588 + drv->shutdown(sdp); 589 + } 590 + 591 + 557 592 const struct bus_type scsi_bus_type = { 558 - .name = "scsi", 559 - .match = scsi_bus_match, 593 + .name = "scsi", 594 + .match = scsi_bus_match, 560 595 .uevent = scsi_bus_uevent, 596 + .probe = scsi_bus_probe, 597 + .remove = scsi_bus_remove, 598 + .shutdown = scsi_bus_shutdown, 561 599 #ifdef CONFIG_PM 562 600 .pm = &scsi_bus_pm_ops, 563 601 #endif ··· 1592 1554 } 1593 1555 EXPORT_SYMBOL(scsi_remove_target); 1594 1556 1595 - int __scsi_register_driver(struct device_driver *drv, struct module *owner) 1557 + static int scsi_legacy_probe(struct scsi_device *sdp) 1596 1558 { 1559 + struct device *dev = &sdp->sdev_gendev; 1560 + struct device_driver *driver = dev->driver; 1561 + 1562 + return driver->probe(dev); 1563 + } 1564 + 1565 + static void scsi_legacy_remove(struct scsi_device *sdp) 1566 + { 1567 + struct device *dev = &sdp->sdev_gendev; 1568 + struct device_driver *driver = dev->driver; 1569 + 1570 + driver->remove(dev); 1571 + } 1572 + 1573 + static void scsi_legacy_shutdown(struct scsi_device *sdp) 1574 + { 1575 + struct device *dev = &sdp->sdev_gendev; 1576 + struct device_driver *driver = dev->driver; 1577 + 1578 + driver->shutdown(dev); 1579 + } 1580 + 1581 + int __scsi_register_driver(struct scsi_driver *sdrv, struct module *owner) 1582 + { 1583 + struct device_driver *drv = &sdrv->gendrv; 1584 + 1597 1585 drv->bus = &scsi_bus_type; 1598 1586 drv->owner = owner; 1587 + 1588 + if (!sdrv->probe && drv->probe) 1589 + sdrv->probe = scsi_legacy_probe; 1590 + if (!sdrv->remove && drv->remove) 1591 + sdrv->remove = scsi_legacy_remove; 1592 + if (!sdrv->shutdown && drv->shutdown) 1593 + sdrv->shutdown = scsi_legacy_shutdown; 1599 1594 1600 1595 return driver_register(drv); 1601 1596 }
+42
drivers/scsi/scsi_transport_fc.c
··· 1329 1329 static FC_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR, 1330 1330 show_fc_rport_fast_io_fail_tmo, store_fc_rport_fast_io_fail_tmo); 1331 1331 1332 + #define fc_rport_encryption(name) \ 1333 + static ssize_t fc_rport_encinfo_##name(struct device *cd, \ 1334 + struct device_attribute *attr, \ 1335 + char *buf) \ 1336 + { \ 1337 + struct fc_rport *rport = transport_class_to_rport(cd); \ 1338 + struct Scsi_Host *shost = rport_to_shost(rport); \ 1339 + struct fc_internal *i = to_fc_internal(shost->transportt); \ 1340 + struct fc_encryption_info *info; \ 1341 + ssize_t ret = -ENOENT; \ 1342 + u32 data; \ 1343 + \ 1344 + if (i->f->get_fc_rport_enc_info) { \ 1345 + info = (i->f->get_fc_rport_enc_info)(rport); \ 1346 + if (info) { \ 1347 + data = info->name; \ 1348 + if (!strcmp(#name, "status")) { \ 1349 + ret = scnprintf(buf, \ 1350 + FC_RPORT_ENCRYPTION_STATUS_MAX_LEN, \ 1351 + "%s\n", \ 1352 + data ? "Encrypted" : "Unencrypted"); \ 1353 + } \ 1354 + } \ 1355 + } \ 1356 + return ret; \ 1357 + } \ 1358 + static FC_DEVICE_ATTR(rport, encryption_##name, 0444, fc_rport_encinfo_##name, NULL) \ 1359 + 1360 + fc_rport_encryption(status); 1361 + 1362 + static struct attribute *fc_rport_encryption_attrs[] = { 1363 + &device_attr_rport_encryption_status.attr, 1364 + NULL 1365 + }; 1366 + 1367 + static struct attribute_group fc_rport_encryption_group = { 1368 + .name = "encryption", 1369 + .attrs = fc_rport_encryption_attrs, 1370 + }; 1371 + 1332 1372 #define fc_rport_fpin_statistic(name) \ 1333 1373 static ssize_t fc_rport_fpinstat_##name(struct device *cd, \ 1334 1374 struct device_attribute *attr, \ ··· 2674 2634 i->rport_attr_cont.ac.attrs = &i->rport_attrs[0]; 2675 2635 i->rport_attr_cont.ac.class = &fc_rport_class.class; 2676 2636 i->rport_attr_cont.ac.match = fc_rport_match; 2637 + if (ft->get_fc_rport_enc_info) 2638 + i->rport_attr_cont.encryption = &fc_rport_encryption_group; 2677 2639 i->rport_attr_cont.statistics = &fc_rport_statistics_group; 2678 2640 transport_container_register(&i->rport_attr_cont); 2679 2641
+142 -153
drivers/scsi/sd.c
··· 102 102 103 103 #define SD_MINORS 16 104 104 105 - static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim, 106 - unsigned int mode); 107 105 static void sd_config_write_same(struct scsi_disk *sdkp, 108 106 struct queue_limits *lim); 109 107 static void sd_revalidate_disk(struct gendisk *); 110 - static void sd_unlock_native_capacity(struct gendisk *disk); 111 - static void sd_shutdown(struct device *); 112 - static void scsi_disk_release(struct device *cdev); 113 108 114 109 static DEFINE_IDA(sd_index_ida); 115 110 ··· 115 120 "write through", "none", "write back", 116 121 "write back, no read (daft)" 117 122 }; 123 + 124 + static void sd_disable_discard(struct scsi_disk *sdkp) 125 + { 126 + sdkp->provisioning_mode = SD_LBP_DISABLE; 127 + blk_queue_disable_discard(sdkp->disk->queue); 128 + } 129 + 130 + static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim, 131 + unsigned int mode) 132 + { 133 + unsigned int logical_block_size = sdkp->device->sector_size; 134 + unsigned int max_blocks = 0; 135 + 136 + lim->discard_alignment = sdkp->unmap_alignment * logical_block_size; 137 + lim->discard_granularity = max(sdkp->physical_block_size, 138 + sdkp->unmap_granularity * logical_block_size); 139 + sdkp->provisioning_mode = mode; 140 + 141 + switch (mode) { 142 + 143 + case SD_LBP_FULL: 144 + case SD_LBP_DISABLE: 145 + break; 146 + 147 + case SD_LBP_UNMAP: 148 + max_blocks = min_not_zero(sdkp->max_unmap_blocks, 149 + (u32)SD_MAX_WS16_BLOCKS); 150 + break; 151 + 152 + case SD_LBP_WS16: 153 + if (sdkp->device->unmap_limit_for_ws) 154 + max_blocks = sdkp->max_unmap_blocks; 155 + else 156 + max_blocks = sdkp->max_ws_blocks; 157 + 158 + max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS); 159 + break; 160 + 161 + case SD_LBP_WS10: 162 + if (sdkp->device->unmap_limit_for_ws) 163 + max_blocks = sdkp->max_unmap_blocks; 164 + else 165 + max_blocks = sdkp->max_ws_blocks; 166 + 167 + max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS); 168 + break; 169 + 170 + case SD_LBP_ZERO: 171 + max_blocks = min_not_zero(sdkp->max_ws_blocks, 172 + (u32)SD_MAX_WS10_BLOCKS); 173 + break; 174 + } 175 + 176 + lim->max_hw_discard_sectors = max_blocks * 177 + (logical_block_size >> SECTOR_SHIFT); 178 + } 118 179 119 180 static void sd_set_flush_flag(struct scsi_disk *sdkp, 120 181 struct queue_limits *lim) ··· 749 698 }; 750 699 ATTRIBUTE_GROUPS(sd_disk); 751 700 701 + static void scsi_disk_release(struct device *dev) 702 + { 703 + struct scsi_disk *sdkp = to_scsi_disk(dev); 704 + 705 + ida_free(&sd_index_ida, sdkp->index); 706 + put_device(&sdkp->device->sdev_gendev); 707 + free_opal_dev(sdkp->opal_dev); 708 + 709 + kfree(sdkp); 710 + } 711 + 752 712 static struct class sd_disk_class = { 753 713 .name = "scsi_disk", 754 714 .dev_release = scsi_disk_release, ··· 926 864 scmd->prot_flags &= sd_prot_flag_mask(prot_op); 927 865 928 866 return protect; 929 - } 930 - 931 - static void sd_disable_discard(struct scsi_disk *sdkp) 932 - { 933 - sdkp->provisioning_mode = SD_LBP_DISABLE; 934 - blk_queue_disable_discard(sdkp->disk->queue); 935 - } 936 - 937 - static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim, 938 - unsigned int mode) 939 - { 940 - unsigned int logical_block_size = sdkp->device->sector_size; 941 - unsigned int max_blocks = 0; 942 - 943 - lim->discard_alignment = sdkp->unmap_alignment * logical_block_size; 944 - lim->discard_granularity = max(sdkp->physical_block_size, 945 - sdkp->unmap_granularity * logical_block_size); 946 - sdkp->provisioning_mode = mode; 947 - 948 - switch (mode) { 949 - 950 - case SD_LBP_FULL: 951 - case SD_LBP_DISABLE: 952 - break; 953 - 954 - case SD_LBP_UNMAP: 955 - max_blocks = min_not_zero(sdkp->max_unmap_blocks, 956 - (u32)SD_MAX_WS16_BLOCKS); 957 - break; 958 - 959 - case SD_LBP_WS16: 960 - if (sdkp->device->unmap_limit_for_ws) 961 - max_blocks = sdkp->max_unmap_blocks; 962 - else 963 - max_blocks = sdkp->max_ws_blocks; 964 - 965 - max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS); 966 - break; 967 - 968 - case SD_LBP_WS10: 969 - if (sdkp->device->unmap_limit_for_ws) 970 - max_blocks = sdkp->max_unmap_blocks; 971 - else 972 - max_blocks = sdkp->max_ws_blocks; 973 - 974 - max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS); 975 - break; 976 - 977 - case SD_LBP_ZERO: 978 - max_blocks = min_not_zero(sdkp->max_ws_blocks, 979 - (u32)SD_MAX_WS10_BLOCKS); 980 - break; 981 - } 982 - 983 - lim->max_hw_discard_sectors = max_blocks * 984 - (logical_block_size >> SECTOR_SHIFT); 985 867 } 986 868 987 869 static void *sd_set_special_bvec(struct request *rq, unsigned int data_len) ··· 1682 1676 struct scsi_device *sdp = sdkp->device; 1683 1677 void __user *p = (void __user *)arg; 1684 1678 int error; 1685 - 1686 - SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, " 1687 - "cmd=0x%x\n", disk->disk_name, cmd)); 1679 + 1680 + SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, 1681 + "sd_ioctl: disk=%s, cmd=0x%x\n", 1682 + disk->disk_name, cmd)); 1688 1683 1689 1684 if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO)) 1690 1685 return -ENOIOCTLCMD; ··· 2184 2177 put_device(&sdkp->disk_dev); 2185 2178 } 2186 2179 2187 - static const struct block_device_operations sd_fops = { 2188 - .owner = THIS_MODULE, 2189 - .open = sd_open, 2190 - .release = sd_release, 2191 - .ioctl = sd_ioctl, 2192 - .getgeo = sd_getgeo, 2193 - .compat_ioctl = blkdev_compat_ptr_ioctl, 2194 - .check_events = sd_check_events, 2195 - .unlock_native_capacity = sd_unlock_native_capacity, 2196 - .report_zones = sd_zbc_report_zones, 2197 - .get_unique_id = sd_get_unique_id, 2198 - .free_disk = scsi_disk_free_disk, 2199 - .pr_ops = &sd_pr_ops, 2200 - }; 2201 - 2202 2180 /** 2203 2181 * sd_eh_reset - reset error handling callback 2204 2182 * @scmd: sd-issued command that has failed ··· 2594 2602 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ 2595 2603 2596 2604 if (type > T10_PI_TYPE3_PROTECTION) { 2597 - sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \ 2598 - " protection type %u. Disabling disk!\n", 2605 + sd_printk(KERN_ERR, sdkp, 2606 + "formatted with unsupported protection type %u. Disabling disk!\n", 2599 2607 type); 2600 2608 sdkp->protection_type = 0; 2601 2609 return -ENODEV; ··· 2872 2880 if ((sizeof(sdkp->capacity) > 4) && 2873 2881 (sdkp->capacity > 0xffffffffULL)) { 2874 2882 int old_sector_size = sector_size; 2875 - sd_printk(KERN_NOTICE, sdkp, "Very big device. " 2876 - "Trying to use READ CAPACITY(16).\n"); 2883 + sd_printk(KERN_NOTICE, sdkp, 2884 + "Very big device. Trying to use READ CAPACITY(16).\n"); 2877 2885 sector_size = read_capacity_16(sdkp, sdp, lim, buffer); 2878 2886 if (sector_size < 0) { 2879 2887 sd_printk(KERN_NOTICE, sdkp, ··· 2899 2907 */ 2900 2908 if (sdp->fix_capacity || 2901 2909 (sdp->guess_capacity && (sdkp->capacity & 0x01))) { 2902 - sd_printk(KERN_INFO, sdkp, "Adjusting the sector count " 2903 - "from its reported value: %llu\n", 2904 - (unsigned long long) sdkp->capacity); 2910 + sd_printk(KERN_INFO, sdkp, 2911 + "Adjusting the sector count from its reported value: %llu\n", 2912 + (unsigned long long) sdkp->capacity); 2905 2913 --sdkp->capacity; 2906 2914 } 2907 2915 2908 2916 got_data: 2909 2917 if (sector_size == 0) { 2910 2918 sector_size = 512; 2911 - sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, " 2912 - "assuming 512.\n"); 2919 + sd_printk(KERN_NOTICE, sdkp, 2920 + "Sector size 0 reported, assuming 512.\n"); 2913 2921 } 2914 2922 2915 2923 if (sector_size != 512 && ··· 3114 3122 if (len < 3) 3115 3123 goto bad_sense; 3116 3124 else if (len > SD_BUF_SIZE) { 3117 - sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter " 3118 - "data from %d to %d bytes\n", len, SD_BUF_SIZE); 3125 + sd_first_printk(KERN_NOTICE, sdkp, 3126 + "Truncating mode parameter data from %d to %d bytes\n", 3127 + len, SD_BUF_SIZE); 3119 3128 len = SD_BUF_SIZE; 3120 3129 } 3121 3130 if (modepage == 0x3F && sdp->use_192_bytes_for_3f) ··· 3139 3146 */ 3140 3147 if (len - offset <= 2) { 3141 3148 sd_first_printk(KERN_ERR, sdkp, 3142 - "Incomplete mode parameter " 3143 - "data\n"); 3149 + "Incomplete mode parameter data\n"); 3144 3150 goto defaults; 3145 3151 } else { 3146 3152 modepage = page_code; ··· 3154 3162 offset += 2 + buffer[offset+1]; 3155 3163 else { 3156 3164 sd_first_printk(KERN_ERR, sdkp, 3157 - "Incomplete mode " 3158 - "parameter data\n"); 3165 + "Incomplete mode parameter data\n"); 3159 3166 goto defaults; 3160 3167 } 3161 3168 } ··· 3617 3626 3618 3627 if (min_xfer_bytes & (sdkp->physical_block_size - 1)) { 3619 3628 sd_first_printk(KERN_WARNING, sdkp, 3620 - "Preferred minimum I/O size %u bytes not a " \ 3621 - "multiple of physical block size (%u bytes)\n", 3629 + "Preferred minimum I/O size %u bytes not a multiple of physical block size (%u bytes)\n", 3622 3630 min_xfer_bytes, sdkp->physical_block_size); 3623 3631 sdkp->min_xfer_blocks = 0; 3624 3632 return false; ··· 3647 3657 3648 3658 if (sdkp->opt_xfer_blocks > dev_max) { 3649 3659 sd_first_printk(KERN_WARNING, sdkp, 3650 - "Optimal transfer size %u logical blocks " \ 3651 - "> dev_max (%u logical blocks)\n", 3660 + "Optimal transfer size %u logical blocks > dev_max (%u logical blocks)\n", 3652 3661 sdkp->opt_xfer_blocks, dev_max); 3653 3662 return false; 3654 3663 } 3655 3664 3656 3665 if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) { 3657 3666 sd_first_printk(KERN_WARNING, sdkp, 3658 - "Optimal transfer size %u logical blocks " \ 3659 - "> sd driver limit (%u logical blocks)\n", 3667 + "Optimal transfer size %u logical blocks > sd driver limit (%u logical blocks)\n", 3660 3668 sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS); 3661 3669 return false; 3662 3670 } 3663 3671 3664 3672 if (opt_xfer_bytes < PAGE_SIZE) { 3665 3673 sd_first_printk(KERN_WARNING, sdkp, 3666 - "Optimal transfer size %u bytes < " \ 3667 - "PAGE_SIZE (%u bytes)\n", 3674 + "Optimal transfer size %u bytes < PAGE_SIZE (%u bytes)\n", 3668 3675 opt_xfer_bytes, (unsigned int)PAGE_SIZE); 3669 3676 return false; 3670 3677 } 3671 3678 3672 3679 if (min_xfer_bytes && opt_xfer_bytes % min_xfer_bytes) { 3673 3680 sd_first_printk(KERN_WARNING, sdkp, 3674 - "Optimal transfer size %u bytes not a " \ 3675 - "multiple of preferred minimum block " \ 3676 - "size (%u bytes)\n", 3681 + "Optimal transfer size %u bytes not a multiple of preferred minimum block size (%u bytes)\n", 3677 3682 opt_xfer_bytes, min_xfer_bytes); 3678 3683 return false; 3679 3684 } 3680 3685 3681 3686 if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) { 3682 3687 sd_first_printk(KERN_WARNING, sdkp, 3683 - "Optimal transfer size %u bytes not a " \ 3684 - "multiple of physical block size (%u bytes)\n", 3688 + "Optimal transfer size %u bytes not a multiple of physical block size (%u bytes)\n", 3685 3689 opt_xfer_bytes, sdkp->physical_block_size); 3686 3690 return false; 3687 3691 } ··· 3869 3885 sdev->host->hostt->unlock_native_capacity(sdev); 3870 3886 } 3871 3887 3888 + static const struct block_device_operations sd_fops = { 3889 + .owner = THIS_MODULE, 3890 + .open = sd_open, 3891 + .release = sd_release, 3892 + .ioctl = sd_ioctl, 3893 + .getgeo = sd_getgeo, 3894 + .compat_ioctl = blkdev_compat_ptr_ioctl, 3895 + .check_events = sd_check_events, 3896 + .unlock_native_capacity = sd_unlock_native_capacity, 3897 + .report_zones = sd_zbc_report_zones, 3898 + .get_unique_id = sd_get_unique_id, 3899 + .free_disk = scsi_disk_free_disk, 3900 + .pr_ops = &sd_pr_ops, 3901 + }; 3902 + 3872 3903 /** 3873 3904 * sd_format_disk_name - format disk name 3874 3905 * @prefix: name prefix - ie. "sd" for SCSI disks ··· 3934 3935 * sd_probe - called during driver initialization and whenever a 3935 3936 * new scsi device is attached to the system. It is called once 3936 3937 * for each scsi device (not just disks) present. 3937 - * @dev: pointer to device object 3938 + * @sdp: pointer to device object 3938 3939 * 3939 3940 * Returns 0 if successful (or not interested in this scsi device 3940 3941 * (e.g. scanner)); 1 when there is an error. ··· 3948 3949 * Assume sd_probe is not re-entrant (for time being) 3949 3950 * Also think about sd_probe() and sd_remove() running coincidentally. 3950 3951 **/ 3951 - static int sd_probe(struct device *dev) 3952 + static int sd_probe(struct scsi_device *sdp) 3952 3953 { 3953 - struct scsi_device *sdp = to_scsi_device(dev); 3954 + struct device *dev = &sdp->sdev_gendev; 3954 3955 struct scsi_disk *sdkp; 3955 3956 struct gendisk *gd; 3956 3957 int index; ··· 4086 4087 return error; 4087 4088 } 4088 4089 4089 - /** 4090 - * sd_remove - called whenever a scsi disk (previously recognized by 4091 - * sd_probe) is detached from the system. It is called (potentially 4092 - * multiple times) during sd module unload. 4093 - * @dev: pointer to device object 4094 - * 4095 - * Note: this function is invoked from the scsi mid-level. 4096 - * This function potentially frees up a device name (e.g. /dev/sdc) 4097 - * that could be re-used by a subsequent sd_probe(). 4098 - * This function is not called when the built-in sd driver is "exit-ed". 4099 - **/ 4100 - static int sd_remove(struct device *dev) 4101 - { 4102 - struct scsi_disk *sdkp = dev_get_drvdata(dev); 4103 - 4104 - scsi_autopm_get_device(sdkp->device); 4105 - 4106 - device_del(&sdkp->disk_dev); 4107 - del_gendisk(sdkp->disk); 4108 - if (!sdkp->suspended) 4109 - sd_shutdown(dev); 4110 - 4111 - put_disk(sdkp->disk); 4112 - return 0; 4113 - } 4114 - 4115 - static void scsi_disk_release(struct device *dev) 4116 - { 4117 - struct scsi_disk *sdkp = to_scsi_disk(dev); 4118 - 4119 - ida_free(&sd_index_ida, sdkp->index); 4120 - put_device(&sdkp->device->sdev_gendev); 4121 - free_opal_dev(sdkp->opal_dev); 4122 - 4123 - kfree(sdkp); 4124 - } 4125 - 4126 4090 static int sd_start_stop_device(struct scsi_disk *sdkp, int start) 4127 4091 { 4128 4092 unsigned char cmd[6] = { START_STOP }; /* START_VALID */ ··· 4159 4197 * the normal SCSI command structure. Wait for the command to 4160 4198 * complete. 4161 4199 */ 4162 - static void sd_shutdown(struct device *dev) 4200 + static void sd_shutdown(struct scsi_device *sdp) 4163 4201 { 4202 + struct device *dev = &sdp->sdev_gendev; 4164 4203 struct scsi_disk *sdkp = dev_get_drvdata(dev); 4165 4204 4166 4205 if (!sdkp) ··· 4186 4223 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); 4187 4224 sd_start_stop_device(sdkp, 0); 4188 4225 } 4226 + } 4227 + 4228 + /** 4229 + * sd_remove - called whenever a scsi disk (previously recognized by 4230 + * sd_probe) is detached from the system. It is called (potentially 4231 + * multiple times) during sd module unload. 4232 + * @sdp: pointer to device object 4233 + * 4234 + * Note: this function is invoked from the scsi mid-level. 4235 + * This function potentially frees up a device name (e.g. /dev/sdc) 4236 + * that could be re-used by a subsequent sd_probe(). 4237 + * This function is not called when the built-in sd driver is "exit-ed". 4238 + **/ 4239 + static void sd_remove(struct scsi_device *sdp) 4240 + { 4241 + struct device *dev = &sdp->sdev_gendev; 4242 + struct scsi_disk *sdkp = dev_get_drvdata(dev); 4243 + 4244 + scsi_autopm_get_device(sdkp->device); 4245 + 4246 + device_del(&sdkp->disk_dev); 4247 + del_gendisk(sdkp->disk); 4248 + if (!sdkp->suspended) 4249 + sd_shutdown(sdp); 4250 + 4251 + put_disk(sdkp->disk); 4189 4252 } 4190 4253 4191 4254 static inline bool sd_do_start_stop(struct scsi_device *sdev, bool runtime) ··· 4357 4368 }; 4358 4369 4359 4370 static struct scsi_driver sd_template = { 4371 + .probe = sd_probe, 4372 + .remove = sd_remove, 4373 + .shutdown = sd_shutdown, 4360 4374 .gendrv = { 4361 4375 .name = "sd", 4362 - .probe = sd_probe, 4363 4376 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 4364 - .remove = sd_remove, 4365 - .shutdown = sd_shutdown, 4366 4377 .pm = &sd_pm_ops, 4367 4378 }, 4368 4379 .rescan = sd_rescan, ··· 4406 4417 goto err_out_class; 4407 4418 } 4408 4419 4409 - err = scsi_register_driver(&sd_template.gendrv); 4420 + err = scsi_register_driver(&sd_template); 4410 4421 if (err) 4411 4422 goto err_out_driver; 4412 4423 ··· 4433 4444 4434 4445 SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n")); 4435 4446 4436 - scsi_unregister_driver(&sd_template.gendrv); 4447 + scsi_unregister_driver(&sd_template); 4437 4448 mempool_destroy(sd_page_pool); 4438 4449 4439 4450 class_unregister(&sd_disk_class);
+4 -11
drivers/scsi/ses.c
··· 42 42 return (ses_dev->page2 != NULL); 43 43 } 44 44 45 - static int ses_probe(struct device *dev) 45 + static int ses_probe(struct scsi_device *sdev) 46 46 { 47 - struct scsi_device *sdev = to_scsi_device(dev); 48 47 int err = -ENODEV; 49 48 50 49 if (sdev->type != TYPE_ENCLOSURE) ··· 846 847 return err; 847 848 } 848 849 849 - static int ses_remove(struct device *dev) 850 - { 851 - return 0; 852 - } 853 - 854 850 static void ses_intf_remove_component(struct scsi_device *sdev) 855 851 { 856 852 struct enclosure_device *edev, *prev = NULL; ··· 900 906 }; 901 907 902 908 static struct scsi_driver ses_template = { 909 + .probe = ses_probe, 903 910 .gendrv = { 904 911 .name = "ses", 905 - .probe = ses_probe, 906 - .remove = ses_remove, 907 912 }, 908 913 }; 909 914 ··· 914 921 if (err) 915 922 return err; 916 923 917 - err = scsi_register_driver(&ses_template.gendrv); 924 + err = scsi_register_driver(&ses_template); 918 925 if (err) 919 926 goto out_unreg; 920 927 ··· 927 934 928 935 static void __exit ses_exit(void) 929 936 { 930 - scsi_unregister_driver(&ses_template.gendrv); 937 + scsi_unregister_driver(&ses_template); 931 938 scsi_unregister_interface(&ses_interface); 932 939 } 933 940
+12 -4
drivers/scsi/smartpqi/smartpqi_init.c
··· 1241 1241 dev_err(&ctrl_info->pci_dev->dev, 1242 1242 "RPL returned unsupported data format %u\n", 1243 1243 rpl_response_format); 1244 - return -EINVAL; 1244 + rc = -EINVAL; 1245 + goto out_free_rpl_list; 1245 1246 } else { 1246 1247 dev_warn(&ctrl_info->pci_dev->dev, 1247 1248 "RPL returned extended format 2 instead of 4\n"); ··· 1254 1253 1255 1254 rpl_16byte_wwid_list = kmalloc(struct_size(rpl_16byte_wwid_list, lun_entries, 1256 1255 num_physicals), GFP_KERNEL); 1257 - if (!rpl_16byte_wwid_list) 1258 - return -ENOMEM; 1256 + if (!rpl_16byte_wwid_list) { 1257 + rc = -ENOMEM; 1258 + goto out_free_rpl_list; 1259 + } 1259 1260 1260 1261 put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid), 1261 1262 &rpl_16byte_wwid_list->header.list_length); ··· 1278 1275 *buffer = rpl_16byte_wwid_list; 1279 1276 1280 1277 return 0; 1278 + 1279 + out_free_rpl_list: 1280 + kfree(rpl_list); 1281 + return rc; 1281 1282 } 1282 1283 1283 1284 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) ··· 6054 6047 return false; 6055 6048 } 6056 6049 6057 - static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 6050 + static enum scsi_qc_status pqi_scsi_queue_command(struct Scsi_Host *shost, 6051 + struct scsi_cmnd *scmd) 6058 6052 { 6059 6053 int rc; 6060 6054 struct pqi_ctrl_info *ctrl_info;
+2 -1
drivers/scsi/snic/snic.h
··· 362 362 extern struct workqueue_struct *snic_event_queue; 363 363 extern const struct attribute_group *snic_host_groups[]; 364 364 365 - int snic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); 365 + enum scsi_qc_status snic_queuecommand(struct Scsi_Host *shost, 366 + struct scsi_cmnd *sc); 366 367 int snic_abort_cmd(struct scsi_cmnd *); 367 368 int snic_device_reset(struct scsi_cmnd *); 368 369 int snic_host_reset(struct scsi_cmnd *);
+2 -2
drivers/scsi/snic/snic_scsi.c
··· 315 315 * Routine to send a scsi cdb to LLD 316 316 * Called with host_lock held and interrupts disabled 317 317 */ 318 - int 319 - snic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) 318 + enum scsi_qc_status snic_queuecommand(struct Scsi_Host *shost, 319 + struct scsi_cmnd *sc) 320 320 { 321 321 struct snic_tgt *tgt = NULL; 322 322 struct snic *snic = shost_priv(shost);
+10 -11
drivers/scsi/sr.c
··· 82 82 CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_DVD_RAM|CDC_GENERIC_PACKET| \ 83 83 CDC_MRW|CDC_MRW_W|CDC_RAM) 84 84 85 - static int sr_probe(struct device *); 86 - static int sr_remove(struct device *); 85 + static int sr_probe(struct scsi_device *); 86 + static void sr_remove(struct scsi_device *); 87 87 static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt); 88 88 static int sr_done(struct scsi_cmnd *); 89 89 static int sr_runtime_suspend(struct device *dev); ··· 93 93 }; 94 94 95 95 static struct scsi_driver sr_template = { 96 + .probe = sr_probe, 97 + .remove = sr_remove, 96 98 .gendrv = { 97 99 .name = "sr", 98 - .probe = sr_probe, 99 - .remove = sr_remove, 100 100 .pm = &sr_pm_ops, 101 101 }, 102 102 .init_command = sr_init_command, ··· 616 616 { 617 617 } 618 618 619 - static int sr_probe(struct device *dev) 619 + static int sr_probe(struct scsi_device *sdev) 620 620 { 621 - struct scsi_device *sdev = to_scsi_device(dev); 621 + struct device *dev = &sdev->sdev_gendev; 622 622 struct gendisk *disk; 623 623 struct scsi_cd *cd; 624 624 int minor, error; ··· 982 982 return ret; 983 983 } 984 984 985 - static int sr_remove(struct device *dev) 985 + static void sr_remove(struct scsi_device *sdev) 986 986 { 987 + struct device *dev = &sdev->sdev_gendev; 987 988 struct scsi_cd *cd = dev_get_drvdata(dev); 988 989 989 990 scsi_autopm_get_device(cd->device); 990 991 991 992 del_gendisk(cd->disk); 992 993 put_disk(cd->disk); 993 - 994 - return 0; 995 994 } 996 995 997 996 static int __init init_sr(void) ··· 1000 1001 rc = register_blkdev(SCSI_CDROM_MAJOR, "sr"); 1001 1002 if (rc) 1002 1003 return rc; 1003 - rc = scsi_register_driver(&sr_template.gendrv); 1004 + rc = scsi_register_driver(&sr_template); 1004 1005 if (rc) 1005 1006 unregister_blkdev(SCSI_CDROM_MAJOR, "sr"); 1006 1007 ··· 1009 1010 1010 1011 static void __exit exit_sr(void) 1011 1012 { 1012 - scsi_unregister_driver(&sr_template.gendrv); 1013 + scsi_unregister_driver(&sr_template); 1013 1014 unregister_blkdev(SCSI_CDROM_MAJOR, "sr"); 1014 1015 } 1015 1016
+11 -11
drivers/scsi/st.c
··· 202 202 unsigned long, size_t, int); 203 203 static int sgl_unmap_user_pages(struct st_buffer *, const unsigned int, int); 204 204 205 - static int st_probe(struct device *); 206 - static int st_remove(struct device *); 205 + static int st_probe(struct scsi_device *); 206 + static void st_remove(struct scsi_device *); 207 207 208 208 static struct scsi_driver st_template = { 209 + .probe = st_probe, 210 + .remove = st_remove, 209 211 .gendrv = { 210 212 .name = "st", 211 - .probe = st_probe, 212 - .remove = st_remove, 213 213 .groups = st_drv_groups, 214 214 }, 215 215 }; ··· 4343 4343 } 4344 4344 } 4345 4345 4346 - static int st_probe(struct device *dev) 4346 + static int st_probe(struct scsi_device *SDp) 4347 4347 { 4348 - struct scsi_device *SDp = to_scsi_device(dev); 4348 + struct device *dev = &SDp->sdev_gendev; 4349 4349 struct scsi_tape *tpnt = NULL; 4350 4350 struct st_modedef *STm; 4351 4351 struct st_partstat *STps; ··· 4500 4500 }; 4501 4501 4502 4502 4503 - static int st_remove(struct device *dev) 4503 + static void st_remove(struct scsi_device *SDp) 4504 4504 { 4505 + struct device *dev = &SDp->sdev_gendev; 4505 4506 struct scsi_tape *tpnt = dev_get_drvdata(dev); 4506 4507 int index = tpnt->index; 4507 4508 4508 - scsi_autopm_get_device(to_scsi_device(dev)); 4509 + scsi_autopm_get_device(SDp); 4509 4510 remove_cdevs(tpnt); 4510 4511 4511 4512 mutex_lock(&st_ref_mutex); ··· 4515 4514 spin_lock(&st_index_lock); 4516 4515 idr_remove(&st_index_idr, index); 4517 4516 spin_unlock(&st_index_lock); 4518 - return 0; 4519 4517 } 4520 4518 4521 4519 /** ··· 4577 4577 goto err_class; 4578 4578 } 4579 4579 4580 - err = scsi_register_driver(&st_template.gendrv); 4580 + err = scsi_register_driver(&st_template); 4581 4581 if (err) 4582 4582 goto err_chrdev; 4583 4583 ··· 4593 4593 4594 4594 static void __exit exit_st(void) 4595 4595 { 4596 - scsi_unregister_driver(&st_template.gendrv); 4596 + scsi_unregister_driver(&st_template); 4597 4597 unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0), 4598 4598 ST_MAX_TAPE_ENTRIES); 4599 4599 class_unregister(&st_sysfs_class);
+1 -1
drivers/scsi/stex.c
··· 593 593 return 0; 594 594 } 595 595 596 - static int stex_queuecommand_lck(struct scsi_cmnd *cmd) 596 + static enum scsi_qc_status stex_queuecommand_lck(struct scsi_cmnd *cmd) 597 597 { 598 598 void (*done)(struct scsi_cmnd *) = scsi_done; 599 599 struct st_hba *hba;
+2 -1
drivers/scsi/storvsc_drv.c
··· 1715 1715 return allowed; 1716 1716 } 1717 1717 1718 - static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) 1718 + static enum scsi_qc_status storvsc_queuecommand(struct Scsi_Host *host, 1719 + struct scsi_cmnd *scmnd) 1719 1720 { 1720 1721 int ret; 1721 1722 struct hv_host_device *host_dev = shost_priv(host);
+1 -1
drivers/scsi/sym53c8xx_2/sym_glue.c
··· 485 485 * queuecommand method. Entered with the host adapter lock held and 486 486 * interrupts disabled. 487 487 */ 488 - static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd) 488 + static enum scsi_qc_status sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd) 489 489 { 490 490 struct sym_hcb *np = SYM_SOFTC_PTR(cmd); 491 491 struct sym_ucmd *ucp = SYM_UCMD_PTR(cmd);
+2 -2
drivers/scsi/virtio_scsi.c
··· 561 561 return &vscsi->req_vqs[hwq]; 562 562 } 563 563 564 - static int virtscsi_queuecommand(struct Scsi_Host *shost, 565 - struct scsi_cmnd *sc) 564 + static enum scsi_qc_status virtscsi_queuecommand(struct Scsi_Host *shost, 565 + struct scsi_cmnd *sc) 566 566 { 567 567 struct virtio_scsi *vscsi = shost_priv(shost); 568 568 struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
+1 -1
drivers/scsi/vmw_pvscsi.c
··· 771 771 return 0; 772 772 } 773 773 774 - static int pvscsi_queue_lck(struct scsi_cmnd *cmd) 774 + static enum scsi_qc_status pvscsi_queue_lck(struct scsi_cmnd *cmd) 775 775 { 776 776 struct Scsi_Host *host = cmd->device->host; 777 777 struct pvscsi_adapter *adapter = shost_priv(host);
+1 -1
drivers/scsi/wd33c93.c
··· 302 302 msg[1] = offset; 303 303 } 304 304 305 - static int wd33c93_queuecommand_lck(struct scsi_cmnd *cmd) 305 + static enum scsi_qc_status wd33c93_queuecommand_lck(struct scsi_cmnd *cmd) 306 306 { 307 307 struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd); 308 308 struct WD33C93_hostdata *hostdata;
+2 -1
drivers/scsi/wd33c93.h
··· 332 332 void wd33c93_init (struct Scsi_Host *instance, const wd33c93_regs regs, 333 333 dma_setup_t setup, dma_stop_t stop, int clock_freq); 334 334 int wd33c93_abort (struct scsi_cmnd *cmd); 335 - int wd33c93_queuecommand (struct Scsi_Host *h, struct scsi_cmnd *cmd); 335 + enum scsi_qc_status wd33c93_queuecommand(struct Scsi_Host *h, 336 + struct scsi_cmnd *cmd); 336 337 void wd33c93_intr (struct Scsi_Host *instance); 337 338 int wd33c93_show_info(struct seq_file *, struct Scsi_Host *); 338 339 int wd33c93_write_info(struct Scsi_Host *, char *, int);
+2 -1
drivers/scsi/wd719x.c
··· 204 204 } 205 205 206 206 /* Build a SCB and send it to the card */ 207 - static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) 207 + static enum scsi_qc_status wd719x_queuecommand(struct Scsi_Host *sh, 208 + struct scsi_cmnd *cmd) 208 209 { 209 210 int i, count_sg; 210 211 unsigned long flags;
+2 -2
drivers/scsi/xen-scsifront.c
··· 603 603 wake_up(&info->wq_pause); 604 604 } 605 605 606 - static int scsifront_queuecommand(struct Scsi_Host *shost, 607 - struct scsi_cmnd *sc) 606 + static enum scsi_qc_status scsifront_queuecommand(struct Scsi_Host *shost, 607 + struct scsi_cmnd *sc) 608 608 { 609 609 struct vscsifrnt_info *info = shost_priv(shost); 610 610 struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc);
+2 -1
drivers/target/loopback/tcm_loop.c
··· 165 165 * ->queuecommand can be and usually is called from interrupt context, so 166 166 * defer the actual submission to a workqueue. 167 167 */ 168 - static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) 168 + static enum scsi_qc_status tcm_loop_queuecommand(struct Scsi_Host *sh, 169 + struct scsi_cmnd *sc) 169 170 { 170 171 struct tcm_loop_cmd *tl_cmd = scsi_cmd_priv(sc); 171 172
+59 -9
drivers/target/target_core_configfs.c
··· 288 288 config_item_put(item); 289 289 } 290 290 291 - static struct configfs_group_operations target_core_fabric_group_ops = { 291 + static const struct configfs_group_operations target_core_fabric_group_ops = { 292 292 .make_group = &target_core_register_fabric, 293 293 .drop_item = &target_core_deregister_fabric, 294 294 }; ··· 1741 1741 return len; 1742 1742 } 1743 1743 1744 + static ssize_t target_wwn_pd_text_id_info_show(struct config_item *item, 1745 + char *page) 1746 + { 1747 + return sysfs_emit(page, "%s\n", &to_t10_wwn(item)->pd_text_id_info[0]); 1748 + } 1749 + 1750 + static ssize_t target_wwn_pd_text_id_info_store(struct config_item *item, 1751 + const char *page, size_t count) 1752 + { 1753 + struct t10_wwn *t10_wwn = to_t10_wwn(item); 1754 + struct se_device *dev = t10_wwn->t10_dev; 1755 + 1756 + /* +2 to allow for a trailing (stripped) '\n' and null-terminator */ 1757 + unsigned char buf[PD_TEXT_ID_INFO_LEN + 2]; 1758 + char *stripped; 1759 + 1760 + /* 1761 + * Check to see if any active exports exist. If they do exist, fail 1762 + * here as changing this information on the fly (underneath the 1763 + * initiator side OS dependent multipath code) could cause negative 1764 + * effects. 1765 + */ 1766 + if (dev->export_count) { 1767 + pr_err("Unable to set the peripheral device text id info while active %d exports exist\n", 1768 + dev->export_count); 1769 + return -EINVAL; 1770 + } 1771 + 1772 + if (strscpy(buf, page, sizeof(buf)) < 0) 1773 + return -EOVERFLOW; 1774 + 1775 + /* Strip any newline added from userspace. */ 1776 + stripped = strstrip(buf); 1777 + if (strlen(stripped) >= PD_TEXT_ID_INFO_LEN) { 1778 + pr_err("Emulated peripheral device text id info exceeds PD_TEXT_ID_INFO_LEN: " __stringify(PD_TEXT_ID_INFO_LEN "\n")); 1779 + return -EOVERFLOW; 1780 + } 1781 + 1782 + BUILD_BUG_ON(sizeof(dev->t10_wwn.pd_text_id_info) != PD_TEXT_ID_INFO_LEN); 1783 + strscpy(dev->t10_wwn.pd_text_id_info, stripped, 1784 + sizeof(dev->t10_wwn.pd_text_id_info)); 1785 + 1786 + pr_debug("Target_Core_ConfigFS: Set emulated peripheral dev text id info:" 1787 + " %s\n", dev->t10_wwn.pd_text_id_info); 1788 + 1789 + return count; 1790 + } 1791 + 1744 1792 /* 1745 1793 * Generic wrapper for dumping VPD identifiers by association. 1746 1794 */ ··· 1845 1797 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit); 1846 1798 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port); 1847 1799 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device); 1800 + CONFIGFS_ATTR(target_wwn_, pd_text_id_info); 1848 1801 1849 1802 static struct configfs_attribute *target_core_dev_wwn_attrs[] = { 1850 1803 &target_wwn_attr_vendor_id, ··· 1857 1808 &target_wwn_attr_vpd_assoc_logical_unit, 1858 1809 &target_wwn_attr_vpd_assoc_target_port, 1859 1810 &target_wwn_attr_vpd_assoc_scsi_target_device, 1811 + &target_wwn_attr_pd_text_id_info, 1860 1812 NULL, 1861 1813 }; 1862 1814 ··· 2860 2810 core_alua_free_lu_gp(lu_gp); 2861 2811 } 2862 2812 2863 - static struct configfs_item_operations target_core_alua_lu_gp_ops = { 2813 + static const struct configfs_item_operations target_core_alua_lu_gp_ops = { 2864 2814 .release = target_core_alua_lu_gp_release, 2865 2815 }; 2866 2816 ··· 2917 2867 config_item_put(item); 2918 2868 } 2919 2869 2920 - static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { 2870 + static const struct configfs_group_operations target_core_alua_lu_gps_group_ops = { 2921 2871 .make_group = &target_core_alua_create_lu_gp, 2922 2872 .drop_item = &target_core_alua_drop_lu_gp, 2923 2873 }; ··· 3290 3240 core_alua_free_tg_pt_gp(tg_pt_gp); 3291 3241 } 3292 3242 3293 - static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { 3243 + static const struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { 3294 3244 .release = target_core_alua_tg_pt_gp_release, 3295 3245 }; 3296 3246 ··· 3348 3298 config_item_put(item); 3349 3299 } 3350 3300 3351 - static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { 3301 + static const struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { 3352 3302 .make_group = &target_core_alua_create_tg_pt_gp, 3353 3303 .drop_item = &target_core_alua_drop_tg_pt_gp, 3354 3304 }; ··· 3389 3339 return; 3390 3340 } 3391 3341 3392 - static struct configfs_group_operations target_core_stat_group_ops = { 3342 + static const struct configfs_group_operations target_core_stat_group_ops = { 3393 3343 .make_group = &target_core_stat_mkdir, 3394 3344 .drop_item = &target_core_stat_rmdir, 3395 3345 }; ··· 3516 3466 mutex_unlock(&hba->hba_access_mutex); 3517 3467 } 3518 3468 3519 - static struct configfs_group_operations target_core_hba_group_ops = { 3469 + static const struct configfs_group_operations target_core_hba_group_ops = { 3520 3470 .make_group = target_core_make_subdev, 3521 3471 .drop_item = target_core_drop_subdev, 3522 3472 }; ··· 3595 3545 NULL, 3596 3546 }; 3597 3547 3598 - static struct configfs_item_operations target_core_hba_item_ops = { 3548 + static const struct configfs_item_operations target_core_hba_item_ops = { 3599 3549 .release = target_core_hba_release, 3600 3550 }; 3601 3551 ··· 3676 3626 config_item_put(item); 3677 3627 } 3678 3628 3679 - static struct configfs_group_operations target_core_group_ops = { 3629 + static const struct configfs_group_operations target_core_group_ops = { 3680 3630 .make_group = target_core_call_addhbatotarget, 3681 3631 .drop_item = target_core_call_delhbafromtarget, 3682 3632 };
+15 -15
drivers/target/target_core_fabric_configfs.c
··· 59 59 pr_debug("Setup generic %s\n", __stringify(_name)); \ 60 60 } 61 61 62 - static struct configfs_item_operations target_fabric_port_item_ops; 62 + static const struct configfs_item_operations target_fabric_port_item_ops; 63 63 64 64 /* Start of tfc_tpg_mappedlun_cit */ 65 65 ··· 219 219 core_dev_free_initiator_node_lun_acl(se_tpg, lacl); 220 220 } 221 221 222 - static struct configfs_item_operations target_fabric_mappedlun_item_ops = { 222 + static const struct configfs_item_operations target_fabric_mappedlun_item_ops = { 223 223 .release = target_fabric_mappedlun_release, 224 224 .allow_link = target_fabric_mappedlun_link, 225 225 .drop_link = target_fabric_mappedlun_unlink, ··· 246 246 return; 247 247 } 248 248 249 - static struct configfs_group_operations target_fabric_mappedlun_stat_group_ops = { 249 + static const struct configfs_group_operations target_fabric_mappedlun_stat_group_ops = { 250 250 .make_group = target_core_mappedlun_stat_mkdir, 251 251 .drop_item = target_core_mappedlun_stat_rmdir, 252 252 }; ··· 345 345 core_tpg_del_initiator_node_acl(se_nacl); 346 346 } 347 347 348 - static struct configfs_item_operations target_fabric_nacl_base_item_ops = { 348 + static const struct configfs_item_operations target_fabric_nacl_base_item_ops = { 349 349 .release = target_fabric_nacl_base_release, 350 350 }; 351 351 352 - static struct configfs_group_operations target_fabric_nacl_base_group_ops = { 352 + static const struct configfs_group_operations target_fabric_nacl_base_group_ops = { 353 353 .make_group = target_fabric_make_mappedlun, 354 354 .drop_item = target_fabric_drop_mappedlun, 355 355 }; ··· 433 433 config_item_put(item); 434 434 } 435 435 436 - static struct configfs_group_operations target_fabric_nacl_group_ops = { 436 + static const struct configfs_group_operations target_fabric_nacl_group_ops = { 437 437 .make_group = target_fabric_make_nodeacl, 438 438 .drop_item = target_fabric_drop_nodeacl, 439 439 }; ··· 454 454 tf->tf_ops->fabric_drop_np(se_tpg_np); 455 455 } 456 456 457 - static struct configfs_item_operations target_fabric_np_base_item_ops = { 457 + static const struct configfs_item_operations target_fabric_np_base_item_ops = { 458 458 .release = target_fabric_np_base_release, 459 459 }; 460 460 ··· 499 499 config_item_put(item); 500 500 } 501 501 502 - static struct configfs_group_operations target_fabric_np_group_ops = { 502 + static const struct configfs_group_operations target_fabric_np_group_ops = { 503 503 .make_group = &target_fabric_make_np, 504 504 .drop_item = &target_fabric_drop_np, 505 505 }; ··· 700 700 call_rcu(&lun->rcu_head, target_tpg_free_lun); 701 701 } 702 702 703 - static struct configfs_item_operations target_fabric_port_item_ops = { 703 + static const struct configfs_item_operations target_fabric_port_item_ops = { 704 704 .release = target_fabric_port_release, 705 705 .allow_link = target_fabric_port_link, 706 706 .drop_link = target_fabric_port_unlink, ··· 726 726 return; 727 727 } 728 728 729 - static struct configfs_group_operations target_fabric_port_stat_group_ops = { 729 + static const struct configfs_group_operations target_fabric_port_stat_group_ops = { 730 730 .make_group = target_core_port_stat_mkdir, 731 731 .drop_item = target_core_port_stat_rmdir, 732 732 }; ··· 787 787 config_item_put(item); 788 788 } 789 789 790 - static struct configfs_group_operations target_fabric_lun_group_ops = { 790 + static const struct configfs_group_operations target_fabric_lun_group_ops = { 791 791 .make_group = &target_fabric_make_lun, 792 792 .drop_item = &target_fabric_drop_lun, 793 793 }; ··· 812 812 tf->tf_ops->fabric_drop_tpg(se_tpg); 813 813 } 814 814 815 - static struct configfs_item_operations target_fabric_tpg_base_item_ops = { 815 + static const struct configfs_item_operations target_fabric_tpg_base_item_ops = { 816 816 .release = target_fabric_tpg_release, 817 817 }; 818 818 ··· 998 998 tf->tf_ops->fabric_drop_wwn(wwn); 999 999 } 1000 1000 1001 - static struct configfs_item_operations target_fabric_tpg_item_ops = { 1001 + static const struct configfs_item_operations target_fabric_tpg_item_ops = { 1002 1002 .release = target_fabric_release_wwn, 1003 1003 }; 1004 1004 1005 - static struct configfs_group_operations target_fabric_tpg_group_ops = { 1005 + static const struct configfs_group_operations target_fabric_tpg_group_ops = { 1006 1006 .make_group = target_fabric_make_tpg, 1007 1007 .drop_item = target_fabric_drop_tpg, 1008 1008 }; ··· 1144 1144 config_item_put(item); 1145 1145 } 1146 1146 1147 - static struct configfs_group_operations target_fabric_wwn_group_ops = { 1147 + static const struct configfs_group_operations target_fabric_wwn_group_ops = { 1148 1148 .make_group = target_fabric_make_wwn, 1149 1149 .drop_item = target_fabric_drop_wwn, 1150 1150 };
+86
drivers/target/target_core_spc.c
··· 26 26 #include "target_core_ua.h" 27 27 #include "target_core_xcopy.h" 28 28 29 + #define PD_TEXT_ID_INFO_HDR_LEN 4 30 + 29 31 static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf) 30 32 { 31 33 struct t10_alua_tg_pt_gp *tg_pt_gp; ··· 2002 2000 .enabled = spc_rsoc_enabled, 2003 2001 }; 2004 2002 2003 + static struct target_opcode_descriptor tcm_opcode_report_identifying_information = { 2004 + .support = SCSI_SUPPORT_FULL, 2005 + .serv_action_valid = 1, 2006 + .opcode = MAINTENANCE_IN, 2007 + .service_action = MI_REPORT_IDENTIFYING_INFORMATION, 2008 + .cdb_size = 12, 2009 + .usage_bits = {MAINTENANCE_IN, MI_REPORT_IDENTIFYING_INFORMATION, 2010 + 0x00, 0x00, 2011 + 0x00, 0x00, 0xff, 0xff, 2012 + 0xff, 0xff, 0xff, SCSI_CONTROL_MASK}, 2013 + }; 2014 + 2005 2015 static bool tcm_is_set_tpg_enabled(const struct target_opcode_descriptor *descr, 2006 2016 struct se_cmd *cmd) 2007 2017 { ··· 2101 2087 &tcm_opcode_report_target_pgs, 2102 2088 &tcm_opcode_report_supp_opcodes, 2103 2089 &tcm_opcode_set_tpg, 2090 + &tcm_opcode_report_identifying_information, 2104 2091 }; 2105 2092 2106 2093 static int ··· 2319 2304 return ret; 2320 2305 } 2321 2306 2307 + static sense_reason_t 2308 + spc_fill_pd_text_id_info(struct se_cmd *cmd, u8 *cdb) 2309 + { 2310 + struct se_device *dev = cmd->se_dev; 2311 + unsigned char *buf; 2312 + unsigned char *rbuf; 2313 + u32 buf_len; 2314 + u16 data_len; 2315 + 2316 + buf_len = get_unaligned_be32(&cdb[6]); 2317 + if (buf_len < PD_TEXT_ID_INFO_HDR_LEN) 2318 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2319 + 2320 + data_len = strlen(dev->t10_wwn.pd_text_id_info); 2321 + if (data_len > 0) 2322 + /* trailing null */ 2323 + data_len += 1; 2324 + 2325 + data_len = data_len + PD_TEXT_ID_INFO_HDR_LEN; 2326 + 2327 + if (data_len < buf_len) 2328 + buf_len = data_len; 2329 + 2330 + buf = kzalloc(buf_len, GFP_KERNEL); 2331 + if (!buf) { 2332 + pr_err("Unable to allocate response buffer for IDENTITY INFO\n"); 2333 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2334 + } 2335 + 2336 + scnprintf(&buf[PD_TEXT_ID_INFO_HDR_LEN], buf_len - PD_TEXT_ID_INFO_HDR_LEN, "%s", 2337 + dev->t10_wwn.pd_text_id_info); 2338 + 2339 + put_unaligned_be16(data_len, &buf[2]); 2340 + 2341 + rbuf = transport_kmap_data_sg(cmd); 2342 + if (!rbuf) { 2343 + pr_err("transport_kmap_data_sg() failed in %s\n", __func__); 2344 + kfree(buf); 2345 + return TCM_OUT_OF_RESOURCES; 2346 + } 2347 + 2348 + memcpy(rbuf, buf, buf_len); 2349 + transport_kunmap_data_sg(cmd); 2350 + kfree(buf); 2351 + 2352 + target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, buf_len); 2353 + return TCM_NO_SENSE; 2354 + } 2355 + 2356 + static sense_reason_t 2357 + spc_emulate_report_id_info(struct se_cmd *cmd) 2358 + { 2359 + u8 *cdb = cmd->t_task_cdb; 2360 + sense_reason_t rc; 2361 + 2362 + switch ((cdb[10] >> 1)) { 2363 + case 2: 2364 + rc = spc_fill_pd_text_id_info(cmd, cdb); 2365 + break; 2366 + default: 2367 + return TCM_UNSUPPORTED_SCSI_OPCODE; 2368 + } 2369 + 2370 + return rc; 2371 + } 2372 + 2322 2373 sense_reason_t 2323 2374 spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) 2324 2375 { ··· 2524 2443 MI_REPORT_SUPPORTED_OPERATION_CODES) 2525 2444 cmd->execute_cmd = 2526 2445 spc_emulate_report_supp_op_codes; 2446 + if ((cdb[1] & 0x1f) == 2447 + MI_REPORT_IDENTIFYING_INFORMATION) { 2448 + cmd->execute_cmd = 2449 + spc_emulate_report_id_info; 2450 + } 2527 2451 *size = get_unaligned_be32(&cdb[6]); 2528 2452 } else { 2529 2453 /*
+1 -4
drivers/ufs/core/ufs-mcq.c
··· 431 431 432 432 void ufshcd_mcq_enable_esi(struct ufs_hba *hba) 433 433 { 434 - ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2, 435 - REG_UFS_MEM_CFG); 434 + ufshcd_rmwl(hba, ESI_ENABLE, ESI_ENABLE, REG_UFS_MEM_CFG); 436 435 } 437 436 EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi); 438 437 ··· 444 445 445 446 int ufshcd_mcq_init(struct ufs_hba *hba) 446 447 { 447 - struct Scsi_Host *host = hba->host; 448 448 struct ufs_hw_queue *hwq; 449 449 int ret, i; 450 450 ··· 477 479 mutex_init(&hwq->sq_mutex); 478 480 } 479 481 480 - host->host_tagset = 1; 481 482 return 0; 482 483 } 483 484
+5 -1
drivers/ufs/core/ufs-sysfs.c
··· 141 141 if (kstrtoul(buf, 0, &value)) 142 142 return -EINVAL; 143 143 144 - if (value >= UFS_PM_LVL_MAX) 144 + if (value >= UFS_PM_LVL_MAX || value < hba->pm_lvl_min) 145 145 return -EINVAL; 146 146 147 147 if (ufs_pm_lvl_states[value].dev_state == UFS_DEEPSLEEP_PWR_MODE && ··· 1847 1847 1848 1848 static DEVICE_ATTR_WO(defrag_trigger); 1849 1849 1850 + #define UFS_HID_AVAILABLE_SIZE_INVALID 0xFFFFFFFFU 1850 1851 static ssize_t fragmented_size_show(struct device *dev, 1851 1852 struct device_attribute *attr, char *buf) 1852 1853 { ··· 1859 1858 QUERY_ATTR_IDN_HID_AVAILABLE_SIZE, &value); 1860 1859 if (ret) 1861 1860 return ret; 1861 + 1862 + if (value == UFS_HID_AVAILABLE_SIZE_INVALID) 1863 + return -ENODATA; 1862 1864 1863 1865 return sysfs_emit(buf, "%u\n", value); 1864 1866 }
+1 -6
drivers/ufs/core/ufshcd-priv.h
··· 374 374 */ 375 375 static inline struct scsi_cmnd *ufshcd_tag_to_cmd(struct ufs_hba *hba, u32 tag) 376 376 { 377 - /* 378 - * Host-wide tags are enabled in MCQ mode only. See also the 379 - * host->host_tagset assignment in ufs-mcq.c. 380 - */ 381 - struct blk_mq_tags *tags = hba->host->tag_set.shared_tags ?: 382 - hba->host->tag_set.tags[0]; 377 + struct blk_mq_tags *tags = hba->host->tag_set.shared_tags; 383 378 struct request *rq = blk_mq_tag_to_rq(tags, tag); 384 379 385 380 if (WARN_ON_ONCE(!rq))
+22 -16
drivers/ufs/core/ufshcd.c
··· 284 284 285 285 static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba) 286 286 { 287 - return scsi_host_busy(hba->host) || ufshcd_has_pending_tasks(hba); 287 + return (hba->scsi_host_added && scsi_host_busy(hba->host)) || 288 + ufshcd_has_pending_tasks(hba); 288 289 } 289 290 290 291 static const struct ufs_dev_quirk ufs_fixups[] = { ··· 680 679 681 680 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); 682 681 dev_err(hba->dev, "%d outstanding reqs, tasks=0x%lx\n", 683 - scsi_host_busy(hba->host), hba->outstanding_tasks); 682 + hba->scsi_host_added ? scsi_host_busy(hba->host) : 0, 683 + hba->outstanding_tasks); 684 684 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", 685 685 hba->saved_err, hba->saved_uic_err); 686 686 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", ··· 3034 3032 * 3035 3033 * Return: 0 for success, non-zero in case of failure. 3036 3034 */ 3037 - static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 3035 + static enum scsi_qc_status ufshcd_queuecommand(struct Scsi_Host *host, 3036 + struct scsi_cmnd *cmd) 3038 3037 { 3039 3038 struct ufs_hba *hba = shost_priv(host); 3040 3039 int tag = scsi_cmd_to_rq(cmd)->tag; ··· 3115 3112 return err; 3116 3113 } 3117 3114 3118 - static int ufshcd_queue_reserved_command(struct Scsi_Host *host, 3119 - struct scsi_cmnd *cmd) 3115 + static enum scsi_qc_status ufshcd_queue_reserved_command(struct Scsi_Host *host, 3116 + struct scsi_cmnd *cmd) 3120 3117 { 3121 3118 struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd); 3122 3119 struct request *rq = scsi_cmd_to_rq(cmd); ··· 9322 9319 .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX, 9323 9320 .max_sectors = SZ_1M / SECTOR_SIZE, 9324 9321 .max_host_blocked = 1, 9322 + .host_tagset = true, 9325 9323 .track_queue_depth = 1, 9326 9324 .skip_settle_delay = 1, 9327 9325 .sdev_groups = ufshcd_driver_groups, ··· 9999 9995 10000 9996 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && 10001 9997 req_link_state == UIC_LINK_ACTIVE_STATE) { 9998 + ufshcd_disable_auto_bkops(hba); 9999 + flush_work(&hba->eeh_work); 10002 10000 goto vops_suspend; 10003 10001 } 10004 10002 ··· 10535 10529 EXPORT_SYMBOL(ufshcd_runtime_resume); 10536 10530 #endif /* CONFIG_PM */ 10537 10531 10538 - static void ufshcd_wl_shutdown(struct device *dev) 10532 + static void ufshcd_wl_shutdown(struct scsi_device *sdev) 10539 10533 { 10540 - struct scsi_device *sdev = to_scsi_device(dev); 10541 10534 struct ufs_hba *hba = shost_priv(sdev->host); 10542 10535 10543 10536 down(&hba->host_sem); ··· 11143 11138 } 11144 11139 #endif 11145 11140 11146 - static int ufshcd_wl_probe(struct device *dev) 11141 + static int ufshcd_wl_probe(struct scsi_device *sdev) 11147 11142 { 11148 - struct scsi_device *sdev = to_scsi_device(dev); 11143 + struct device *dev = &sdev->sdev_gendev; 11149 11144 11150 11145 if (!is_device_wlun(sdev)) 11151 11146 return -ENODEV; ··· 11157 11152 return 0; 11158 11153 } 11159 11154 11160 - static int ufshcd_wl_remove(struct device *dev) 11155 + static void ufshcd_wl_remove(struct scsi_device *sdev) 11161 11156 { 11157 + struct device *dev = &sdev->sdev_gendev; 11158 + 11162 11159 pm_runtime_forbid(dev); 11163 - return 0; 11164 11160 } 11165 11161 11166 11162 static const struct dev_pm_ops ufshcd_wl_pm_ops = { ··· 11234 11228 * Hence register a scsi driver for ufs wluns only. 11235 11229 */ 11236 11230 static struct scsi_driver ufs_dev_wlun_template = { 11231 + .probe = ufshcd_wl_probe, 11232 + .remove = ufshcd_wl_remove, 11233 + .shutdown = ufshcd_wl_shutdown, 11237 11234 .gendrv = { 11238 11235 .name = "ufs_device_wlun", 11239 - .probe = ufshcd_wl_probe, 11240 - .remove = ufshcd_wl_remove, 11241 11236 .pm = &ufshcd_wl_pm_ops, 11242 - .shutdown = ufshcd_wl_shutdown, 11243 11237 }, 11244 11238 }; 11245 11239 ··· 11251 11245 11252 11246 ufs_debugfs_init(); 11253 11247 11254 - ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv); 11248 + ret = scsi_register_driver(&ufs_dev_wlun_template); 11255 11249 if (ret) 11256 11250 ufs_debugfs_exit(); 11257 11251 return ret; ··· 11260 11254 static void __exit ufshcd_core_exit(void) 11261 11255 { 11262 11256 ufs_debugfs_exit(); 11263 - scsi_unregister_driver(&ufs_dev_wlun_template.gendrv); 11257 + scsi_unregister_driver(&ufs_dev_wlun_template); 11264 11258 } 11265 11259 11266 11260 module_init(ufshcd_core_init);
+1
drivers/ufs/host/Kconfig
··· 72 72 config SCSI_UFS_MEDIATEK 73 73 tristate "Mediatek specific hooks to UFS controller platform driver" 74 74 depends on SCSI_UFSHCD_PLATFORM && ARCH_MEDIATEK 75 + depends on PM 75 76 depends on RESET_CONTROLLER 76 77 select PHY_MTK_UFS 77 78 select RESET_TI_SYSCON
+10
drivers/ufs/host/ufs-exynos.c
··· 1568 1568 { 1569 1569 struct exynos_ufs *ufs = ufshcd_get_variant(hba); 1570 1570 struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; 1571 + static const union phy_notify phystate = { 1572 + .ufs_state = PHY_UFS_HIBERN8_EXIT 1573 + }; 1571 1574 1572 1575 if (cmd == UIC_CMD_DME_HIBER_EXIT) { 1573 1576 if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) 1574 1577 exynos_ufs_disable_auto_ctrl_hcc(ufs); 1575 1578 exynos_ufs_ungate_clks(ufs); 1579 + 1580 + phy_notify_state(ufs->phy, phystate); 1576 1581 1577 1582 if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) { 1578 1583 static const unsigned int granularity_tbl[] = { ··· 1605 1600 static void exynos_ufs_post_hibern8(struct ufs_hba *hba, enum uic_cmd_dme cmd) 1606 1601 { 1607 1602 struct exynos_ufs *ufs = ufshcd_get_variant(hba); 1603 + static const union phy_notify phystate = { 1604 + .ufs_state = PHY_UFS_HIBERN8_ENTER 1605 + }; 1608 1606 1609 1607 if (cmd == UIC_CMD_DME_HIBER_ENTER) { 1610 1608 ufs->entry_hibern8_t = ktime_get(); 1611 1609 exynos_ufs_gate_clks(ufs); 1612 1610 if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) 1613 1611 exynos_ufs_enable_auto_ctrl_hcc(ufs); 1612 + 1613 + phy_notify_state(ufs->phy, phystate); 1614 1614 } 1615 1615 } 1616 1616
+3 -3
drivers/ufs/host/ufs-mediatek-trace.h
··· 33 33 TP_ARGS(name, scale_up, clk_rate), 34 34 35 35 TP_STRUCT__entry( 36 - __field(const char*, name) 36 + __string(name, name) 37 37 __field(bool, scale_up) 38 38 __field(unsigned long, clk_rate) 39 39 ), 40 40 41 41 TP_fast_assign( 42 - __entry->name = name; 42 + __assign_str(name); 43 43 __entry->scale_up = scale_up; 44 44 __entry->clk_rate = clk_rate; 45 45 ), 46 46 47 47 TP_printk("ufs: clk (%s) scaled %s @ %lu", 48 - __entry->name, 48 + __get_str(name), 49 49 __entry->scale_up ? "up" : "down", 50 50 __entry->clk_rate) 51 51 );
+3 -9
drivers/ufs/host/ufs-mediatek.c
··· 2437 2437 ufshcd_pltfrm_remove(pdev); 2438 2438 } 2439 2439 2440 - #ifdef CONFIG_PM_SLEEP 2441 2440 static int ufs_mtk_system_suspend(struct device *dev) 2442 2441 { 2443 2442 struct ufs_hba *hba = dev_get_drvdata(dev); ··· 2483 2484 2484 2485 return ret; 2485 2486 } 2486 - #endif 2487 2487 2488 - #ifdef CONFIG_PM 2489 2488 static int ufs_mtk_runtime_suspend(struct device *dev) 2490 2489 { 2491 2490 struct ufs_hba *hba = dev_get_drvdata(dev); ··· 2522 2525 2523 2526 return ufshcd_runtime_resume(dev); 2524 2527 } 2525 - #endif 2526 2528 2527 2529 static const struct dev_pm_ops ufs_mtk_pm_ops = { 2528 - SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend, 2529 - ufs_mtk_system_resume) 2530 - SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend, 2531 - ufs_mtk_runtime_resume, NULL) 2530 + SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend, ufs_mtk_system_resume) 2531 + RUNTIME_PM_OPS(ufs_mtk_runtime_suspend, ufs_mtk_runtime_resume, NULL) 2532 2532 .prepare = ufshcd_suspend_prepare, 2533 2533 .complete = ufshcd_resume_complete, 2534 2534 }; ··· 2535 2541 .remove = ufs_mtk_remove, 2536 2542 .driver = { 2537 2543 .name = "ufshcd-mtk", 2538 - .pm = &ufs_mtk_pm_ops, 2544 + .pm = pm_ptr(&ufs_mtk_pm_ops), 2539 2545 .of_match_table = ufs_mtk_of_match, 2540 2546 }, 2541 2547 };
+155 -1
drivers/ufs/host/ufs-qcom.c
··· 14 14 #include <linux/of.h> 15 15 #include <linux/phy/phy.h> 16 16 #include <linux/platform_device.h> 17 + #include <linux/pm_domain.h> 17 18 #include <linux/reset-controller.h> 18 19 #include <linux/time.h> 19 20 #include <linux/unaligned.h> ··· 620 619 return err; 621 620 } 622 621 622 + static int ufs_qcom_fw_managed_hce_enable_notify(struct ufs_hba *hba, 623 + enum ufs_notify_change_status status) 624 + { 625 + struct ufs_qcom_host *host = ufshcd_get_variant(hba); 626 + 627 + switch (status) { 628 + case PRE_CHANGE: 629 + ufs_qcom_select_unipro_mode(host); 630 + break; 631 + case POST_CHANGE: 632 + ufs_qcom_enable_hw_clk_gating(hba); 633 + ufs_qcom_ice_enable(host); 634 + break; 635 + default: 636 + dev_err(hba->dev, "Invalid status %d\n", status); 637 + return -EINVAL; 638 + } 639 + 640 + return 0; 641 + } 642 + 623 643 /** 624 644 * ufs_qcom_cfg_timers - Configure ufs qcom cfg timers 625 645 * ··· 806 784 usleep_range(50, 100); 807 785 ufshcd_writel(hba, reg_val, UFS_MEM_ICE_CFG); 808 786 ufshcd_readl(hba, UFS_MEM_ICE_CFG); 787 + } 788 + 789 + return ufs_qcom_ice_resume(host); 790 + } 791 + 792 + static int ufs_qcom_fw_managed_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, 793 + enum ufs_notify_change_status status) 794 + { 795 + struct ufs_qcom_host *host = ufshcd_get_variant(hba); 796 + 797 + if (status == PRE_CHANGE) 798 + return 0; 799 + 800 + pm_runtime_put_sync(hba->dev); 801 + 802 + return ufs_qcom_ice_suspend(host); 803 + } 804 + 805 + static int ufs_qcom_fw_managed_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) 806 + { 807 + struct ufs_qcom_host *host = ufshcd_get_variant(hba); 808 + int err; 809 + 810 + err = pm_runtime_resume_and_get(hba->dev); 811 + if (err) { 812 + dev_err(hba->dev, "PM runtime resume failed: %d\n", err); 813 + return err; 809 814 } 810 815 811 816 return ufs_qcom_ice_resume(host); ··· 1470 1421 phy_exit(host->generic_phy); 1471 1422 } 1472 1423 1424 + static int ufs_qcom_fw_managed_init(struct ufs_hba *hba) 1425 + { 1426 + struct device *dev = hba->dev; 1427 + struct ufs_qcom_host *host; 1428 + int err; 1429 + 1430 + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); 1431 + if (!host) 1432 + return -ENOMEM; 1433 + 1434 + host->hba = hba; 1435 + ufshcd_set_variant(hba, host); 1436 + 1437 + ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, 1438 + &host->hw_ver.minor, &host->hw_ver.step); 1439 + 1440 + err = ufs_qcom_ice_init(host); 1441 + if (err) 1442 + goto out_variant_clear; 1443 + 1444 + ufs_qcom_get_default_testbus_cfg(host); 1445 + err = ufs_qcom_testbus_config(host); 1446 + if (err) 1447 + /* Failure is non-fatal */ 1448 + dev_warn(dev, "Failed to configure the testbus %d\n", err); 1449 + 1450 + hba->caps |= UFSHCD_CAP_WB_EN; 1451 + 1452 + ufs_qcom_advertise_quirks(hba); 1453 + host->hba->quirks &= ~UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH; 1454 + 1455 + hba->spm_lvl = hba->rpm_lvl = hba->pm_lvl_min = UFS_PM_LVL_5; 1456 + 1457 + ufs_qcom_set_host_params(hba); 1458 + ufs_qcom_parse_gear_limits(hba); 1459 + 1460 + return 0; 1461 + 1462 + out_variant_clear: 1463 + ufshcd_set_variant(hba, NULL); 1464 + return err; 1465 + } 1466 + 1467 + static void ufs_qcom_fw_managed_exit(struct ufs_hba *hba) 1468 + { 1469 + pm_runtime_put_sync(hba->dev); 1470 + } 1471 + 1473 1472 /** 1474 1473 * ufs_qcom_set_clk_40ns_cycles - Configure 40ns clk cycles 1475 1474 * ··· 2047 1950 return 0; 2048 1951 } 2049 1952 1953 + /** 1954 + * ufs_qcom_fw_managed_device_reset - Reset UFS device under FW-managed design 1955 + * @hba: pointer to UFS host bus adapter 1956 + * 1957 + * In the firmware-managed reset model, the power domain is powered on by genpd 1958 + * before the UFS controller driver probes. For subsequent resets (such as 1959 + * suspend/resume or recovery), the UFS driver must explicitly invoke PM runtime 1960 + * 1961 + * Return: 0 on success or a negative error code on failure. 1962 + */ 1963 + static int ufs_qcom_fw_managed_device_reset(struct ufs_hba *hba) 1964 + { 1965 + static bool is_boot = true; 1966 + int err; 1967 + 1968 + /* Skip reset on cold boot; perform it on subsequent calls */ 1969 + if (is_boot) { 1970 + is_boot = false; 1971 + return 0; 1972 + } 1973 + 1974 + pm_runtime_put_sync(hba->dev); 1975 + err = pm_runtime_resume_and_get(hba->dev); 1976 + if (err < 0) { 1977 + dev_err(hba->dev, "PM runtime resume failed: %d\n", err); 1978 + return err; 1979 + } 1980 + 1981 + return 0; 1982 + } 1983 + 2050 1984 static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, 2051 1985 struct devfreq_dev_profile *p, 2052 1986 struct devfreq_simple_ondemand_data *d) ··· 2357 2229 .freq_to_gear_speed = ufs_qcom_freq_to_gear_speed, 2358 2230 }; 2359 2231 2232 + static const struct ufs_hba_variant_ops ufs_hba_qcom_sa8255p_vops = { 2233 + .name = "qcom-sa8255p", 2234 + .init = ufs_qcom_fw_managed_init, 2235 + .exit = ufs_qcom_fw_managed_exit, 2236 + .hce_enable_notify = ufs_qcom_fw_managed_hce_enable_notify, 2237 + .pwr_change_notify = ufs_qcom_pwr_change_notify, 2238 + .apply_dev_quirks = ufs_qcom_apply_dev_quirks, 2239 + .fixup_dev_quirks = ufs_qcom_fixup_dev_quirks, 2240 + .suspend = ufs_qcom_fw_managed_suspend, 2241 + .resume = ufs_qcom_fw_managed_resume, 2242 + .dbg_register_dump = ufs_qcom_dump_dbg_regs, 2243 + .device_reset = ufs_qcom_fw_managed_device_reset, 2244 + }; 2245 + 2360 2246 /** 2361 2247 * ufs_qcom_probe - probe routine of the driver 2362 2248 * @pdev: pointer to Platform device handle ··· 2381 2239 { 2382 2240 int err; 2383 2241 struct device *dev = &pdev->dev; 2242 + const struct ufs_hba_variant_ops *vops; 2243 + const struct ufs_qcom_drvdata *drvdata = device_get_match_data(dev); 2244 + 2245 + if (drvdata && drvdata->vops) 2246 + vops = drvdata->vops; 2247 + else 2248 + vops = &ufs_hba_qcom_vops; 2384 2249 2385 2250 /* Perform generic probe */ 2386 - err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops); 2251 + err = ufshcd_pltfrm_init(pdev, vops); 2387 2252 if (err) 2388 2253 return dev_err_probe(dev, err, "ufshcd_pltfrm_init() failed\n"); 2389 2254 ··· 2418 2269 .no_phy_retention = true, 2419 2270 }; 2420 2271 2272 + static const struct ufs_qcom_drvdata ufs_qcom_sa8255p_drvdata = { 2273 + .vops = &ufs_hba_qcom_sa8255p_vops 2274 + }; 2275 + 2421 2276 static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = { 2422 2277 { .compatible = "qcom,ufshc" }, 2423 2278 { .compatible = "qcom,sm8550-ufshc", .data = &ufs_qcom_sm8550_drvdata }, 2424 2279 { .compatible = "qcom,sm8650-ufshc", .data = &ufs_qcom_sm8550_drvdata }, 2280 + { .compatible = "qcom,sa8255p-ufshc", .data = &ufs_qcom_sa8255p_drvdata }, 2425 2281 {}, 2426 2282 }; 2427 2283 MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
+1
drivers/ufs/host/ufs-qcom.h
··· 313 313 struct ufs_qcom_drvdata { 314 314 enum ufshcd_quirks quirks; 315 315 bool no_phy_retention; 316 + const struct ufs_hba_variant_ops *vops; 316 317 }; 317 318 318 319 static inline u32
+3 -3
drivers/usb/image/microtek.c
··· 355 355 return result ? FAILED : SUCCESS; 356 356 } 357 357 358 - static int 359 - mts_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *srb); 358 + static enum scsi_qc_status mts_scsi_queuecommand(struct Scsi_Host *shost, 359 + struct scsi_cmnd *srb); 360 360 361 361 static void mts_transfer_cleanup( struct urb *transfer ); 362 362 static void mts_do_sg(struct urb * transfer); ··· 559 559 desc->context.data_pipe = pipe; 560 560 } 561 561 562 - static int mts_scsi_queuecommand_lck(struct scsi_cmnd *srb) 562 + static enum scsi_qc_status mts_scsi_queuecommand_lck(struct scsi_cmnd *srb) 563 563 { 564 564 mts_scsi_cmnd_callback callback = scsi_done; 565 565 struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]);
+1 -1
drivers/usb/storage/scsiglue.c
··· 357 357 358 358 /* queue a command */ 359 359 /* This is always called with scsi_lock(host) held */ 360 - static int queuecommand_lck(struct scsi_cmnd *srb) 360 + static enum scsi_qc_status queuecommand_lck(struct scsi_cmnd *srb) 361 361 { 362 362 void (*done)(struct scsi_cmnd *) = scsi_done; 363 363 struct us_data *us = host_to_us(srb->device->host);
+1 -1
drivers/usb/storage/uas.c
··· 636 636 return 0; 637 637 } 638 638 639 - static int uas_queuecommand_lck(struct scsi_cmnd *cmnd) 639 + static enum scsi_qc_status uas_queuecommand_lck(struct scsi_cmnd *cmnd) 640 640 { 641 641 struct scsi_device *sdev = cmnd->device; 642 642 struct uas_dev_info *devinfo = sdev->hostdata;
+2 -1
include/linux/libata.h
··· 1150 1150 #else 1151 1151 #define ATA_SCSI_COMPAT_IOCTL /* empty */ 1152 1152 #endif 1153 - extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd); 1153 + extern enum scsi_qc_status ata_scsi_queuecmd(struct Scsi_Host *h, 1154 + struct scsi_cmnd *cmd); 1154 1155 #if IS_REACHABLE(CONFIG_ATA) 1155 1156 bool ata_scsi_dma_need_drain(struct request *rq); 1156 1157 #else
+1
include/linux/transport_class.h
··· 56 56 struct transport_container { 57 57 struct attribute_container ac; 58 58 const struct attribute_group *statistics; 59 + const struct attribute_group *encryption; 59 60 }; 60 61 61 62 #define attribute_container_to_transport_container(x) \
+2 -1
include/scsi/libfc.h
··· 959 959 /* 960 960 * SCSI INTERACTION LAYER 961 961 *****************************/ 962 - int fc_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); 962 + enum scsi_qc_status fc_queuecommand(struct Scsi_Host *shost, 963 + struct scsi_cmnd *cmnd); 963 964 int fc_eh_abort(struct scsi_cmnd *); 964 965 int fc_eh_device_reset(struct scsi_cmnd *); 965 966 int fc_eh_host_reset(struct scsi_cmnd *);
+2 -1
include/scsi/libiscsi.h
··· 392 392 extern int iscsi_eh_recover_target(struct scsi_cmnd *sc); 393 393 extern int iscsi_eh_session_reset(struct scsi_cmnd *sc); 394 394 extern int iscsi_eh_device_reset(struct scsi_cmnd *sc); 395 - extern int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc); 395 + extern enum scsi_qc_status iscsi_queuecommand(struct Scsi_Host *host, 396 + struct scsi_cmnd *sc); 396 397 extern enum scsi_timeout_action iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc); 397 398 398 399 /*
+2 -1
include/scsi/libsas.h
··· 689 689 690 690 int sas_phy_reset(struct sas_phy *phy, int hard_reset); 691 691 int sas_phy_enable(struct sas_phy *phy, int enable); 692 - extern int sas_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); 692 + extern enum scsi_qc_status sas_queuecommand(struct Scsi_Host *host, 693 + struct scsi_cmnd *cmd); 693 694 extern int sas_target_alloc(struct scsi_target *); 694 695 int sas_sdev_configure(struct scsi_device *dev, struct queue_limits *lim); 695 696 extern int sas_change_queue_depth(struct scsi_device *, int new_depth);
+8 -5
include/scsi/scsi.h
··· 106 106 }; 107 107 108 108 /* 109 - * Midlevel queue return values. 109 + * Status values returned by the .queuecommand() callback if a command has not 110 + * been queued. 110 111 */ 111 - #define SCSI_MLQUEUE_HOST_BUSY 0x1055 112 - #define SCSI_MLQUEUE_DEVICE_BUSY 0x1056 113 - #define SCSI_MLQUEUE_EH_RETRY 0x1057 114 - #define SCSI_MLQUEUE_TARGET_BUSY 0x1058 112 + enum scsi_qc_status { 113 + SCSI_MLQUEUE_HOST_BUSY = 0x1055, 114 + SCSI_MLQUEUE_DEVICE_BUSY = 0x1056, 115 + SCSI_MLQUEUE_EH_RETRY = 0x1057, 116 + SCSI_MLQUEUE_TARGET_BUSY = 0x1058, 117 + }; 115 118 116 119 /* 117 120 * Use these to separate status msg and our bytes
+5 -2
include/scsi/scsi_driver.h
··· 12 12 struct scsi_driver { 13 13 struct device_driver gendrv; 14 14 15 + int (*probe)(struct scsi_device *); 16 + void (*remove)(struct scsi_device *); 17 + void (*shutdown)(struct scsi_device *); 15 18 int (*resume)(struct device *); 16 19 void (*rescan)(struct device *); 17 20 blk_status_t (*init_command)(struct scsi_cmnd *); ··· 28 25 29 26 #define scsi_register_driver(drv) \ 30 27 __scsi_register_driver(drv, THIS_MODULE) 31 - int __scsi_register_driver(struct device_driver *, struct module *); 28 + int __scsi_register_driver(struct scsi_driver *, struct module *); 32 29 #define scsi_unregister_driver(drv) \ 33 - driver_unregister(drv); 30 + driver_unregister(&(drv)->gendrv); 34 31 35 32 extern int scsi_register_interface(struct class_interface *); 36 33 #define scsi_unregister_interface(intf) \
+8 -4
include/scsi/scsi_host.h
··· 84 84 * 85 85 * STATUS: REQUIRED 86 86 */ 87 - int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *); 87 + enum scsi_qc_status (*queuecommand)(struct Scsi_Host *, 88 + struct scsi_cmnd *); 88 89 89 90 /* 90 91 * Queue a reserved command (BLK_MQ_REQ_RESERVED). The .queuecommand() 91 92 * documentation also applies to the .queue_reserved_command() callback. 92 93 */ 93 - int (*queue_reserved_command)(struct Scsi_Host *, struct scsi_cmnd *); 94 + enum scsi_qc_status (*queue_reserved_command)(struct Scsi_Host *, 95 + struct scsi_cmnd *); 94 96 95 97 /* 96 98 * The commit_rqs function is used to trigger a hardware ··· 527 525 * 528 526 */ 529 527 #define DEF_SCSI_QCMD(func_name) \ 530 - int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd) \ 528 + enum scsi_qc_status func_name(struct Scsi_Host *shost, \ 529 + struct scsi_cmnd *cmd) \ 531 530 { \ 532 531 unsigned long irq_flags; \ 533 - int rc; \ 532 + enum scsi_qc_status rc; \ 533 + \ 534 534 spin_lock_irqsave(shost->host_lock, irq_flags); \ 535 535 rc = func_name##_lck(cmd); \ 536 536 spin_unlock_irqrestore(shost->host_lock, irq_flags); \
+12
include/scsi/scsi_transport_fc.h
··· 317 317 u64 cn_device_specific; 318 318 }; 319 319 320 + #define FC_RPORT_ENCRYPTION_STATUS_MAX_LEN 14 321 + /* 322 + * Encryption Information 323 + */ 324 + struct fc_encryption_info { 325 + /* Encryption Status */ 326 + u8 status; 327 + }; 328 + 320 329 /* Macro for use in defining Remote Port attributes */ 321 330 #define FC_RPORT_ATTR(_name,_mode,_show,_store) \ 322 331 struct device_attribute dev_attr_rport_##_name = \ ··· 373 364 u64 port_name; 374 365 u32 port_id; 375 366 u32 roles; 367 + struct fc_encryption_info enc_info; 376 368 enum fc_port_state port_state; /* Will only be ONLINE or UNKNOWN */ 377 369 u32 scsi_target_id; 378 370 u32 fast_io_fail_tmo; ··· 700 690 701 691 struct fc_host_statistics * (*get_fc_host_stats)(struct Scsi_Host *); 702 692 void (*reset_fc_host_stats)(struct Scsi_Host *); 693 + 694 + struct fc_encryption_info * (*get_fc_rport_enc_info)(struct fc_rport *); 703 695 704 696 int (*issue_fc_host_lip)(struct Scsi_Host *); 705 697
+4
include/target/target_core_base.h
··· 108 108 #define SE_MODE_PAGE_BUF 512 109 109 #define SE_SENSE_BUF 96 110 110 111 + /* Peripheral Device Text Identification Information */ 112 + #define PD_TEXT_ID_INFO_LEN 256 113 + 111 114 enum target_submit_type { 112 115 /* Use the fabric driver's default submission type */ 113 116 TARGET_FABRIC_DEFAULT_SUBMIT, ··· 351 348 struct se_device *t10_dev; 352 349 struct config_group t10_wwn_group; 353 350 struct list_head t10_vpd_list; 351 + char pd_text_id_info[PD_TEXT_ID_INFO_LEN]; 354 352 }; 355 353 356 354 struct t10_pr_registration {
+8 -9
include/uapi/scsi/scsi_bsg_ufs.h
··· 94 94 }; 95 95 96 96 /** 97 - * struct utp_upiu_query - upiu request buffer structure for 98 - * query request. 99 - * @opcode: command to perform B-0 100 - * @idn: a value that indicates the particular type of data B-1 101 - * @index: Index to further identify data B-2 102 - * @selector: Index to further identify data B-3 97 + * struct utp_upiu_query - QUERY REQUEST UPIU structure. 98 + * @opcode: query function to perform B-0 99 + * @idn: descriptor or attribute identification number B-1 100 + * @index: Index that further identifies which data to access B-2 101 + * @selector: Index that further identifies which data to access B-3 103 102 * @reserved_osf: spec reserved field B-4,5 104 - * @length: number of descriptor bytes to read/write B-6,7 105 - * @value: Attribute value to be written DW-5 106 - * @reserved: spec reserved DW-6,7 103 + * @length: number of descriptor bytes to read or write B-6,7 104 + * @value: if @opcode == UPIU_QUERY_OPCODE_WRITE_ATTR, the value to be written B-6,7 105 + * @reserved: reserved for future use DW-6,7 107 106 */ 108 107 struct utp_upiu_query { 109 108 __u8 opcode;
+4 -1
include/ufs/ufs.h
··· 21 21 * in this header file of the size of struct utp_upiu_header. 22 22 */ 23 23 static_assert(sizeof(struct utp_upiu_header) == 12); 24 + static_assert(sizeof(struct utp_upiu_query) == 20); 24 25 25 26 #define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req)) 26 27 #define QUERY_DESC_MAX_SIZE 255 ··· 562 561 #define UFS_WB_BUF_REMAIN_PERCENT(val) ((val) / 10) 563 562 564 563 /** 565 - * struct utp_cmd_rsp - Response UPIU structure 564 + * struct utp_cmd_rsp - RESPONSE UPIU structure 566 565 * @residual_transfer_count: Residual transfer count DW-3 567 566 * @reserved: Reserved double words DW-4 to DW-7 568 567 * @sense_data_len: Sense data length DW-8 U16 ··· 574 573 __be16 sense_data_len; 575 574 u8 sense_data[UFS_SENSE_SIZE]; 576 575 }; 576 + 577 + static_assert(sizeof(struct utp_cmd_rsp) == 40); 577 578 578 579 /** 579 580 * struct utp_upiu_rsp - general upiu response structure
+2 -4
include/ufs/ufshcd.h
··· 834 834 * @uic_link_state: active state of the link to the UFS device. 835 835 * @rpm_lvl: desired UFS power management level during runtime PM. 836 836 * @spm_lvl: desired UFS power management level during system PM. 837 + * @pm_lvl_min: minimum supported power management level. 837 838 * @pm_op_in_progress: whether or not a PM operation is in progress. 838 839 * @ahit: value of Auto-Hibernate Idle Timer register. 839 840 * @outstanding_tasks: Bits representing outstanding task requests ··· 973 972 enum ufs_pm_level rpm_lvl; 974 973 /* Desired UFS power management level during system PM */ 975 974 enum ufs_pm_level spm_lvl; 975 + enum ufs_pm_level pm_lvl_min; 976 976 int pm_op_in_progress; 977 977 978 978 /* Auto-Hibernate Idle Timer register value */ ··· 1344 1342 return hba->priv; 1345 1343 } 1346 1344 1347 - #ifdef CONFIG_PM 1348 1345 extern int ufshcd_runtime_suspend(struct device *dev); 1349 1346 extern int ufshcd_runtime_resume(struct device *dev); 1350 - #endif 1351 - #ifdef CONFIG_PM_SLEEP 1352 1347 extern int ufshcd_system_suspend(struct device *dev); 1353 1348 extern int ufshcd_system_resume(struct device *dev); 1354 1349 extern int ufshcd_system_freeze(struct device *dev); 1355 1350 extern int ufshcd_system_thaw(struct device *dev); 1356 1351 extern int ufshcd_system_restore(struct device *dev); 1357 - #endif 1358 1352 1359 1353 extern int ufshcd_dme_reset(struct ufs_hba *hba); 1360 1354 extern int ufshcd_dme_enable(struct ufs_hba *hba);
+1
include/ufs/ufshci.h
··· 288 288 289 289 /* REG_UFS_MEM_CFG - Global Config Registers 300h */ 290 290 #define MCQ_MODE_SELECT BIT(0) 291 + #define ESI_ENABLE BIT(1) 291 292 292 293 /* CQISy - CQ y Interrupt Status Register */ 293 294 #define UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS 0x1