Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
"Usual driver updates (ufs, lpfc, fnic, target, mpi3mr).

The substantive core changes are adding a 'serial' sysfs attribute and
getting sd to support > PAGE_SIZE sectors"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (98 commits)
scsi: target: Don't validate ignored fields in PROUT PREEMPT
scsi: qla2xxx: Use nr_cpu_ids instead of NR_CPUS for qp_cpu_map allocation
scsi: ufs: core: Disable timestamp for Kioxia THGJFJT0E25BAIP
scsi: mpi3mr: Fix typo
scsi: sd: fix missing put_disk() when device_add(&disk_dev) fails
scsi: libsas: Delete unused to_dom_device() and to_dev_attr()
scsi: storvsc: Handle PERSISTENT_RESERVE_IN truncation for Hyper-V vFC
scsi: iscsi_tcp: Remove unneeded selections of CRYPTO and CRYPTO_MD5
scsi: lpfc: Update lpfc version to 15.0.0.0
scsi: lpfc: Add PCI ID support for LPe42100 series adapters
scsi: lpfc: Introduce 128G link speed selection and support
scsi: lpfc: Check ASIC_ID register to aid diagnostics during failed fw updates
scsi: lpfc: Update construction of SGL when XPSGL is enabled
scsi: lpfc: Remove deprecated PBDE feature
scsi: lpfc: Add REG_VFI mailbox cmd error handling
scsi: lpfc: Log MCQE contents for mbox commands with no context
scsi: lpfc: Select mailbox rq_create cmd version based on SLI4 if_type
scsi: lpfc: Break out of IRQ affinity assignment when mask reaches nr_cpu_ids
scsi: ufs: core: Make the header files self-contained
scsi: ufs: core: Remove an include directive from ufshcd-crypto.h
...

+4076 -822
+23
Documentation/ABI/testing/sysfs-driver-ufs
··· 1768 1768 ==================== =========================== 1769 1769 1770 1770 The attribute is read only. 1771 + 1772 + What: /sys/bus/platform/drivers/ufshcd/*/dme_qos_notification 1773 + What: /sys/bus/platform/devices/*.ufs/dme_qos_notification 1774 + Date: March 2026 1775 + Contact: Can Guo <can.guo@oss.qualcomm.com> 1776 + Description: 1777 + This attribute reports and clears pending DME (Device Management 1778 + Entity) Quality of Service (QoS) notifications. This attribute 1779 + is a bitfield with the following bit assignments: 1780 + 1781 + Bit Description 1782 + === ====================================== 1783 + 0 DME QoS Monitor has been reset by host 1784 + 1 QoS from TX is detected 1785 + 2 QoS from RX is detected 1786 + 3 QoS from PA_INIT is detected 1787 + 1788 + Reading this attribute returns the pending DME QoS notification 1789 + bits. Writing '0' to this attribute clears pending DME QoS 1790 + notification bits. Writing any non-zero value is invalid and 1791 + will be rejected. 1792 + 1793 + The attribute is read/write.
+23 -15
Documentation/devicetree/bindings/ufs/qcom,sc7180-ufshc.yaml
··· 15 15 compatible: 16 16 contains: 17 17 enum: 18 + - qcom,milos-ufshc 18 19 - qcom,msm8998-ufshc 19 20 - qcom,qcs8300-ufshc 20 21 - qcom,sa8775p-ufshc ··· 32 31 33 32 properties: 34 33 compatible: 35 - items: 36 - - enum: 37 - - qcom,msm8998-ufshc 38 - - qcom,qcs8300-ufshc 39 - - qcom,sa8775p-ufshc 40 - - qcom,sc7180-ufshc 41 - - qcom,sc7280-ufshc 42 - - qcom,sc8180x-ufshc 43 - - qcom,sc8280xp-ufshc 44 - - qcom,sm8250-ufshc 45 - - qcom,sm8350-ufshc 46 - - qcom,sm8450-ufshc 47 - - qcom,sm8550-ufshc 48 - - const: qcom,ufshc 49 - - const: jedec,ufs-2.0 34 + oneOf: 35 + - items: 36 + - enum: 37 + - qcom,x1e80100-ufshc 38 + - const: qcom,sm8550-ufshc 39 + - const: qcom,ufshc 40 + - items: 41 + - enum: 42 + - qcom,milos-ufshc 43 + - qcom,msm8998-ufshc 44 + - qcom,qcs8300-ufshc 45 + - qcom,sa8775p-ufshc 46 + - qcom,sc7180-ufshc 47 + - qcom,sc7280-ufshc 48 + - qcom,sc8180x-ufshc 49 + - qcom,sc8280xp-ufshc 50 + - qcom,sm8250-ufshc 51 + - qcom,sm8350-ufshc 52 + - qcom,sm8450-ufshc 53 + - qcom,sm8550-ufshc 54 + - const: qcom,ufshc 55 + - const: jedec,ufs-2.0 50 56 51 57 reg: 52 58 maxItems: 1
+14
Documentation/devicetree/bindings/ufs/qcom,sm8650-ufshc.yaml
··· 15 15 compatible: 16 16 contains: 17 17 enum: 18 + - qcom,eliza-ufshc 18 19 - qcom,kaanapali-ufshc 19 20 - qcom,sm8650-ufshc 20 21 - qcom,sm8750-ufshc ··· 26 25 compatible: 27 26 items: 28 27 - enum: 28 + - qcom,eliza-ufshc 29 29 - qcom,kaanapali-ufshc 30 30 - qcom,sm8650-ufshc 31 31 - qcom,sm8750-ufshc ··· 68 66 69 67 allOf: 70 68 - $ref: qcom,ufs-common.yaml 69 + - if: 70 + properties: 71 + compatible: 72 + contains: 73 + enum: 74 + - qcom,eliza-ufshc 75 + then: 76 + properties: 77 + reg: 78 + minItems: 2 79 + reg-names: 80 + minItems: 2 71 81 72 82 unevaluatedProperties: false 73 83
+4 -3
Documentation/devicetree/bindings/ufs/rockchip,rk3576-ufshc.yaml
··· 41 41 maxItems: 1 42 42 43 43 resets: 44 - maxItems: 4 44 + maxItems: 5 45 45 46 46 reset-names: 47 47 items: ··· 49 49 - const: sys 50 50 - const: ufs 51 51 - const: grf 52 + - const: mphy 52 53 53 54 reset-gpios: 54 55 maxItems: 1 ··· 99 98 interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>; 100 99 power-domains = <&power RK3576_PD_USB>; 101 100 resets = <&cru SRST_A_UFS_BIU>, <&cru SRST_A_UFS_SYS>, <&cru SRST_A_UFS>, 102 - <&cru SRST_P_UFS_GRF>; 103 - reset-names = "biu", "sys", "ufs", "grf"; 101 + <&cru SRST_P_UFS_GRF>, <&cru SRST_MPHY_INIT>; 102 + reset-names = "biu", "sys", "ufs", "grf", "mphy"; 104 103 reset-gpios = <&gpio4 RK_PD0 GPIO_ACTIVE_LOW>; 105 104 }; 106 105 };
+1
drivers/infiniband/ulp/srpt/ib_srpt.c
··· 3925 3925 .tfc_wwn_attrs = srpt_wwn_attrs, 3926 3926 .tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs, 3927 3927 3928 + .default_compl_type = TARGET_QUEUE_COMPL, 3928 3929 .default_submit_type = TARGET_DIRECT_SUBMIT, 3929 3930 .direct_submit_supp = 1, 3930 3931 };
+2 -2
drivers/scsi/BusLogic.c
··· 1632 1632 /* 1633 1633 Initialize the Host Adapter Full Model Name from the Model Name. 1634 1634 */ 1635 - strcpy(adapter->full_model, "BusLogic "); 1636 - strcat(adapter->full_model, adapter->model); 1635 + scnprintf(adapter->full_model, sizeof(adapter->full_model), 1636 + "BusLogic %s", adapter->model); 1637 1637 /* 1638 1638 Select an appropriate value for the Tagged Queue Depth either from a 1639 1639 BusLogic Driver Options specification, or based on whether this Host
+1 -2
drivers/scsi/Kconfig
··· 304 304 tristate "iSCSI Initiator over TCP/IP" 305 305 depends on SCSI && INET 306 306 select CRC32 307 - select CRYPTO 308 - select CRYPTO_MD5 309 307 select SCSI_ISCSI_ATTRS 310 308 help 311 309 The iSCSI Driver provides a host with the ability to access storage ··· 1149 1151 depends on NVME_TARGET_FC || NVME_TARGET_FC=n 1150 1152 depends on NVME_FC || NVME_FC=n 1151 1153 select CRC_T10DIF 1154 + select CRC32 1152 1155 select IRQ_POLL 1153 1156 help 1154 1157 This lpfc driver supports the Emulex LightPulse
+1 -1
drivers/scsi/aic7xxx/aicasm/aicasm.h
··· 82 82 extern int dst_mode; 83 83 struct symbol; 84 84 85 - void stop(const char *errstring, int err_code); 85 + void __attribute__((noreturn)) stop(const char *errstring, int err_code); 86 86 void include_file(char *file_name, include_type type); 87 87 void expand_macro(struct symbol *macro_symbol); 88 88 struct instruction *seq_alloc(void);
+1 -1
drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
··· 1104 1104 last_scope = TAILQ_LAST(&scope_context->inner_scope, 1105 1105 scope_tailq); 1106 1106 if (last_scope == NULL 1107 - || last_scope->type == T_ELSE) { 1107 + || last_scope->type == (int)T_ELSE) { 1108 1108 1109 1109 stop("'else if' without leading 'if'", EX_DATAERR); 1110 1110 /* NOTREACHED */
+1 -1
drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
··· 389 389 char c; 390 390 391 391 yptr = yytext; 392 - while (c = *yptr++) { 392 + while ((c = *yptr++)) { 393 393 /* 394 394 * Strip carriage returns. 395 395 */
+2
drivers/scsi/elx/efct/efct_lio.c
··· 1612 1612 .sess_get_initiator_sid = NULL, 1613 1613 .tfc_tpg_base_attrs = efct_lio_tpg_attrs, 1614 1614 .tfc_tpg_attrib_attrs = efct_lio_tpg_attrib_attrs, 1615 + .default_compl_type = TARGET_QUEUE_COMPL, 1615 1616 .default_submit_type = TARGET_DIRECT_SUBMIT, 1616 1617 .direct_submit_supp = 1, 1617 1618 }; ··· 1651 1650 .tfc_tpg_base_attrs = efct_lio_npiv_tpg_attrs, 1652 1651 .tfc_tpg_attrib_attrs = efct_lio_npiv_tpg_attrib_attrs, 1653 1652 1653 + .default_compl_type = TARGET_QUEUE_COMPL, 1654 1654 .default_submit_type = TARGET_DIRECT_SUBMIT, 1655 1655 .direct_submit_supp = 1, 1656 1656 };
+3 -11
drivers/scsi/esas2r/esas2r_log.c
··· 101 101 } 102 102 } 103 103 104 - #pragma GCC diagnostic push 105 - #ifndef __clang__ 106 - #pragma GCC diagnostic ignored "-Wsuggest-attribute=format" 107 - #endif 108 - 109 104 /* 110 105 * the master logging function. this function will format the message as 111 106 * outlined by the formatting string, the input device information and the ··· 113 118 * 114 119 * @return 0 on success, or -1 if an error occurred. 115 120 */ 116 - static int esas2r_log_master(const long level, 117 - const struct device *dev, 118 - const char *format, 119 - va_list args) 121 + static __printf(3, 0) 122 + int esas2r_log_master(const long level, const struct device *dev, 123 + const char *format, va_list args) 120 124 { 121 125 if (level <= event_log_level) { 122 126 unsigned long flags = 0; ··· 168 174 169 175 return 0; 170 176 } 171 - 172 - #pragma GCC diagnostic pop 173 177 174 178 /* 175 179 * formats and logs a message to the system log.
+2 -2
drivers/scsi/fnic/fdls_disc.c
··· 4613 4613 if (!iport->usefip) { 4614 4614 if (iport->flags & FNIC_FIRST_LINK_UP) { 4615 4615 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); 4616 - fnic_scsi_fcpio_reset(iport->fnic); 4616 + fnic_fcpio_reset(iport->fnic); 4617 4617 spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); 4618 4618 4619 4619 iport->flags &= ~FNIC_FIRST_LINK_UP; ··· 5072 5072 iport->fabric.flags = 0; 5073 5073 5074 5074 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); 5075 - fnic_scsi_fcpio_reset(iport->fnic); 5075 + fnic_fcpio_reset(iport->fnic); 5076 5076 spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); 5077 5077 list_for_each_entry_safe(tport, next, &iport->tport_list, links) { 5078 5078 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+1 -1
drivers/scsi/fnic/fip.c
··· 737 737 if (memcmp(iport->selected_fcf.fcf_mac, zmac, ETH_ALEN) != 0) { 738 738 739 739 if (iport->flags & FNIC_FIRST_LINK_UP) { 740 - fnic_scsi_fcpio_reset(iport->fnic); 740 + fnic_fcpio_reset(iport->fnic); 741 741 iport->flags &= ~FNIC_FIRST_LINK_UP; 742 742 } 743 743
+4 -3
drivers/scsi/fnic/fnic.h
··· 30 30 31 31 #define DRV_NAME "fnic" 32 32 #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" 33 - #define DRV_VERSION "1.8.0.2" 33 + #define DRV_VERSION "1.8.0.3" 34 34 #define PFX DRV_NAME ": " 35 35 #define DFX DRV_NAME "%d: " 36 36 ··· 438 438 struct list_head tx_queue; 439 439 mempool_t *frame_pool; 440 440 mempool_t *frame_elem_pool; 441 + mempool_t *frame_recv_pool; 441 442 struct work_struct tport_work; 442 443 struct list_head tport_event_list; 443 444 ··· 513 512 void fnic_reset(struct Scsi_Host *shost); 514 513 int fnic_issue_fc_host_lip(struct Scsi_Host *shost); 515 514 void fnic_get_host_port_state(struct Scsi_Host *shost); 516 - void fnic_scsi_fcpio_reset(struct fnic *fnic); 517 515 int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index); 518 516 int fnic_wq_cmpl_handler(struct fnic *fnic, int); 519 517 int fnic_flogi_reg_handler(struct fnic *fnic, u32); ··· 541 541 } 542 542 void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long); 543 543 void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *); 544 - void fnic_free_txq(struct list_head *head); 544 + void fnic_free_txq(struct fnic *fnic); 545 + void fnic_free_rxq(struct fnic *fnic); 545 546 int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc, 546 547 char **subsys_desc); 547 548 void fnic_fdls_link_status_change(struct fnic *fnic, int linkup);
+91 -21
drivers/scsi/fnic/fnic_fcs.c
··· 291 291 if (fnic->stop_rx_link_events) { 292 292 list_del(&cur_frame->links); 293 293 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); 294 - kfree(cur_frame->fp); 294 + mempool_free(cur_frame->fp, fnic->frame_recv_pool); 295 295 mempool_free(cur_frame, fnic->frame_elem_pool); 296 296 return; 297 297 } ··· 317 317 fnic_fdls_recv_frame(&fnic->iport, cur_frame->fp, 318 318 cur_frame->frame_len, fchdr_offset); 319 319 320 - kfree(cur_frame->fp); 320 + mempool_free(cur_frame->fp, fnic->frame_recv_pool); 321 321 mempool_free(cur_frame, fnic->frame_elem_pool); 322 322 } 323 323 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ··· 337 337 if (fnic->stop_rx_link_events) { 338 338 list_del(&cur_frame->links); 339 339 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); 340 - kfree(cur_frame->fp); 341 - kfree(cur_frame); 340 + mempool_free(cur_frame->fp, fnic->frame_recv_pool); 341 + mempool_free(cur_frame, fnic->frame_elem_pool); 342 342 return; 343 343 } 344 344 ··· 355 355 list_del(&cur_frame->links); 356 356 357 357 if (fdls_fip_recv_frame(fnic, cur_frame->fp)) { 358 - kfree(cur_frame->fp); 359 - kfree(cur_frame); 358 + mempool_free(cur_frame->fp, fnic->frame_recv_pool); 359 + mempool_free(cur_frame, fnic->frame_elem_pool); 360 360 } 361 361 } 362 362 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ··· 375 375 376 376 eh = (struct ethhdr *) fp; 377 377 if ((eh->h_proto == cpu_to_be16(ETH_P_FIP)) && (fnic->iport.usefip)) { 378 - fip_fr_elem = (struct fnic_frame_list *) 379 - kzalloc_obj(struct fnic_frame_list, GFP_ATOMIC); 378 + fip_fr_elem = mempool_alloc(fnic->frame_elem_pool, GFP_ATOMIC); 380 379 if (!fip_fr_elem) 381 380 return 0; 381 + memset(fip_fr_elem, 0, sizeof(struct fnic_frame_list)); 382 382 fip_fr_elem->fp = fp; 383 383 spin_lock_irqsave(&fnic->fnic_lock, flags); 384 384 list_add_tail(&fip_fr_elem->links, &fnic->fip_frame_queue); ··· 519 519 520 520 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 521 521 522 - frame_elem = mempool_alloc(fnic->frame_elem_pool, 523 - GFP_ATOMIC | __GFP_ZERO); 522 + frame_elem = mempool_alloc(fnic->frame_elem_pool, GFP_ATOMIC); 524 523 if (!frame_elem) { 525 524 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, 526 525 "Failed to allocate memory for frame elem"); 527 526 goto drop; 528 527 } 528 + memset(frame_elem, 0, sizeof(struct fnic_frame_list)); 529 529 frame_elem->fp = fp; 530 530 frame_elem->rx_ethhdr_stripped = ethhdr_stripped; 531 531 frame_elem->frame_len = bytes_written; ··· 538 538 return; 539 539 540 540 drop: 541 - kfree(fp); 541 + mempool_free(fp, fnic->frame_recv_pool); 542 542 } 543 543 544 544 static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev, ··· 591 591 int ret; 592 592 593 593 len = FNIC_FRAME_HT_ROOM; 594 - buf = kmalloc(len, GFP_ATOMIC); 594 + buf = mempool_alloc(fnic->frame_recv_pool, GFP_ATOMIC); 595 595 if (!buf) { 596 596 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, 597 597 "Unable to allocate RQ buffer of size: %d\n", len); ··· 609 609 fnic_queue_rq_desc(rq, buf, pa, len); 610 610 return 0; 611 611 free_buf: 612 - kfree(buf); 612 + mempool_free(buf, fnic->frame_recv_pool); 613 613 return ret; 614 614 } 615 615 ··· 621 621 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, 622 622 DMA_FROM_DEVICE); 623 623 624 - kfree(rq_buf); 624 + mempool_free(rq_buf, fnic->frame_recv_pool); 625 625 buf->os_buf = NULL; 626 626 } 627 627 ··· 704 704 */ 705 705 if ((fnic->state != FNIC_IN_FC_MODE) 706 706 && (fnic->state != FNIC_IN_ETH_MODE)) { 707 - frame_elem = mempool_alloc(fnic->frame_elem_pool, 708 - GFP_ATOMIC | __GFP_ZERO); 707 + frame_elem = mempool_alloc(fnic->frame_elem_pool, GFP_ATOMIC); 709 708 if (!frame_elem) { 710 709 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, 711 710 "Failed to allocate memory for frame elem"); 712 711 return -ENOMEM; 713 712 } 713 + memset(frame_elem, 0, sizeof(struct fnic_frame_list)); 714 714 715 715 FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, 716 716 "Queueing FC frame: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x\n", ··· 836 836 return 0; 837 837 } 838 838 839 - void fnic_free_txq(struct list_head *head) 839 + void fnic_free_txq(struct fnic *fnic) 840 840 { 841 841 struct fnic_frame_list *cur_frame, *next; 842 842 843 - list_for_each_entry_safe(cur_frame, next, head, links) { 843 + list_for_each_entry_safe(cur_frame, next, &fnic->tx_queue, links) { 844 844 list_del(&cur_frame->links); 845 - kfree(cur_frame->fp); 846 - kfree(cur_frame); 845 + mempool_free(cur_frame->fp, fnic->frame_pool); 846 + mempool_free(cur_frame, fnic->frame_elem_pool); 847 + } 848 + } 849 + 850 + void fnic_free_rxq(struct fnic *fnic) 851 + { 852 + struct fnic_frame_list *cur_frame, *next; 853 + 854 + list_for_each_entry_safe(cur_frame, next, &fnic->frame_queue, links) { 855 + list_del(&cur_frame->links); 856 + mempool_free(cur_frame->fp, fnic->frame_recv_pool); 857 + mempool_free(cur_frame, fnic->frame_elem_pool); 858 + } 859 + 860 + if (fnic->config.flags & VFCF_FIP_CAPABLE) { 861 + list_for_each_entry_safe(cur_frame, next, 862 + &fnic->fip_frame_queue, links) { 863 + list_del(&cur_frame->links); 864 + mempool_free(cur_frame->fp, fnic->frame_recv_pool); 865 + mempool_free(cur_frame, fnic->frame_elem_pool); 866 + } 847 867 } 848 868 } 849 869 ··· 918 898 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, 919 899 DMA_TO_DEVICE); 920 900 921 - kfree(buf->os_buf); 901 + mempool_free(buf->os_buf, fnic->frame_pool); 922 902 buf->os_buf = NULL; 923 903 } 924 904 ··· 1127 1107 } 1128 1108 spin_unlock_irqrestore(&reset_fnic_list_lock, 1129 1109 reset_fnic_list_lock_flags); 1110 + } 1111 + 1112 + void fnic_fcpio_reset(struct fnic *fnic) 1113 + { 1114 + unsigned long flags; 1115 + enum fnic_state old_state; 1116 + struct fnic_iport_s *iport = &fnic->iport; 1117 + DECLARE_COMPLETION_ONSTACK(fw_reset_done); 1118 + int time_remain; 1119 + 1120 + /* issue fw reset */ 1121 + spin_lock_irqsave(&fnic->fnic_lock, flags); 1122 + if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { 1123 + /* fw reset is in progress, poll for its completion */ 1124 + spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1125 + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, 1126 + "fnic is in unexpected state: %d for fw_reset\n", 1127 + fnic->state); 1128 + return; 1129 + } 1130 + 1131 + old_state = fnic->state; 1132 + fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; 1133 + 1134 + fnic_update_mac_locked(fnic, iport->hwmac); 1135 + fnic->fw_reset_done = &fw_reset_done; 1136 + spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1137 + 1138 + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, 1139 + "Issuing fw reset\n"); 1140 + if (fnic_fw_reset_handler(fnic)) { 1141 + spin_lock_irqsave(&fnic->fnic_lock, flags); 1142 + if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) 1143 + fnic->state = old_state; 1144 + spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1145 + } else { 1146 + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, 1147 + "Waiting for fw completion\n"); 1148 + time_remain = wait_for_completion_timeout(&fw_reset_done, 1149 + msecs_to_jiffies(FNIC_FW_RESET_TIMEOUT)); 1150 + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, 1151 + "Woken up after fw completion timeout\n"); 1152 + if (time_remain == 0) { 1153 + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, 1154 + "FW reset completion timed out after %d ms\n", 1155 + FNIC_FW_RESET_TIMEOUT); 1156 + } 1157 + atomic64_inc(&fnic->fnic_stats.reset_stats.fw_reset_timeouts); 1158 + } 1159 + fnic->fw_reset_done = NULL; 1130 1160 }
+1 -1
drivers/scsi/fnic/fnic_fdls.h
··· 410 410 void fnic_fdls_remove_tport(struct fnic_iport_s *iport, 411 411 struct fnic_tport_s *tport, 412 412 unsigned long flags); 413 + void fnic_fcpio_reset(struct fnic *fnic); 413 414 414 415 /* fip.c */ 415 416 void fnic_fcoe_send_vlan_req(struct fnic *fnic); ··· 423 422 extern void fdls_fabric_timer_callback(struct timer_list *t); 424 423 425 424 /* fnic_scsi.c */ 426 - void fnic_scsi_fcpio_reset(struct fnic *fnic); 427 425 extern void fdls_fabric_timer_callback(struct timer_list *t); 428 426 void fnic_rport_exch_reset(struct fnic *fnic, u32 fcid); 429 427 int fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id,
+25 -3
drivers/scsi/fnic/fnic_main.c
··· 40 40 static struct kmem_cache *fnic_io_req_cache; 41 41 static struct kmem_cache *fdls_frame_cache; 42 42 static struct kmem_cache *fdls_frame_elem_cache; 43 + static struct kmem_cache *fdls_frame_recv_cache; 43 44 static LIST_HEAD(fnic_list); 44 45 static DEFINE_SPINLOCK(fnic_list_lock); 45 46 static DEFINE_IDA(fnic_ida); ··· 555 554 mempool_destroy(fnic->io_req_pool); 556 555 mempool_destroy(fnic->frame_pool); 557 556 mempool_destroy(fnic->frame_elem_pool); 557 + mempool_destroy(fnic->frame_recv_pool); 558 558 for (i = 0; i < FNIC_SGL_NUM_CACHES; i++) 559 559 mempool_destroy(fnic->io_sgl_pool[i]); 560 560 ··· 930 928 } 931 929 fnic->frame_elem_pool = pool; 932 930 931 + pool = mempool_create_slab_pool(FDLS_MIN_FRAMES, 932 + fdls_frame_recv_cache); 933 + if (!pool) { 934 + err = -ENOMEM; 935 + goto err_out_fdls_frame_recv_pool; 936 + } 937 + fnic->frame_recv_pool = pool; 938 + 933 939 /* setup vlan config, hw inserts vlan header */ 934 940 fnic->vlan_hw_insert = 1; 935 941 fnic->vlan_id = 0; ··· 1095 1085 } 1096 1086 vnic_dev_notify_unset(fnic->vdev); 1097 1087 err_out_fnic_notify_set: 1088 + mempool_destroy(fnic->frame_recv_pool); 1089 + err_out_fdls_frame_recv_pool: 1098 1090 mempool_destroy(fnic->frame_elem_pool); 1099 1091 err_out_fdls_frame_elem_pool: 1100 1092 mempool_destroy(fnic->frame_pool); ··· 1169 1157 timer_delete_sync(&fnic->enode_ka_timer); 1170 1158 timer_delete_sync(&fnic->vn_ka_timer); 1171 1159 1172 - fnic_free_txq(&fnic->fip_frame_queue); 1173 1160 fnic_fcoe_reset_vlans(fnic); 1174 1161 } 1175 1162 ··· 1188 1177 list_del(&fnic->list); 1189 1178 spin_unlock_irqrestore(&fnic_list_lock, flags); 1190 1179 1191 - fnic_free_txq(&fnic->frame_queue); 1192 - fnic_free_txq(&fnic->tx_queue); 1180 + fnic_free_rxq(fnic); 1181 + fnic_free_txq(fnic); 1193 1182 1194 1183 vnic_dev_notify_unset(fnic->vdev); 1195 1184 fnic_free_intr(fnic); ··· 1298 1287 goto err_create_fdls_frame_cache_elem; 1299 1288 } 1300 1289 1290 + fdls_frame_recv_cache = kmem_cache_create("fdls_frame_recv", 1291 + FNIC_FRAME_HT_ROOM, 1292 + 0, SLAB_HWCACHE_ALIGN, NULL); 1293 + if (!fdls_frame_recv_cache) { 1294 + pr_err("fnic fdls frame recv cach create failed\n"); 1295 + err = -ENOMEM; 1296 + goto err_create_fdls_frame_recv_cache; 1297 + } 1298 + 1301 1299 fnic_event_queue = 1302 1300 alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_event_wq"); 1303 1301 if (!fnic_event_queue) { ··· 1359 1339 if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON) 1360 1340 destroy_workqueue(reset_fnic_work_queue); 1361 1341 err_create_reset_fnic_workq: 1342 + kmem_cache_destroy(fdls_frame_recv_cache); 1343 + err_create_fdls_frame_recv_cache: 1362 1344 destroy_workqueue(fnic_event_queue); 1363 1345 err_create_fnic_workq: 1364 1346 kmem_cache_destroy(fdls_frame_elem_cache);
+6 -64
drivers/scsi/fnic/fnic_scsi.c
··· 471 471 int sg_count = 0; 472 472 unsigned long flags = 0; 473 473 unsigned long ptr; 474 - int io_lock_acquired = 0; 475 474 uint16_t hwq = 0; 476 475 struct fnic_tport_s *tport = NULL; 477 476 struct rport_dd_data_s *rdd_data; ··· 635 636 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); 636 637 637 638 /* initialize rest of io_req */ 638 - io_lock_acquired = 1; 639 639 io_req->port_id = rport->port_id; 640 640 io_req->start_time = jiffies; 641 641 fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING; ··· 687 689 /* REVISIT: Use per IO lock in the final code */ 688 690 fnic_priv(sc)->flags |= FNIC_IO_ISSUED; 689 691 } 692 + 693 + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); 694 + 690 695 out: 691 696 cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 | 692 697 (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 | ··· 699 698 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, 700 699 mqtag, sc, io_req, sg_count, cmd_trace, 701 700 fnic_flags_and_state(sc)); 702 - 703 - /* if only we issued IO, will we have the io lock */ 704 - if (io_lock_acquired) 705 - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); 706 701 707 702 atomic_dec(&fnic->in_flight); 708 703 atomic_dec(&tport->in_flight); ··· 774 777 */ 775 778 if (ret) { 776 779 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 777 - fnic_free_txq(&fnic->tx_queue); 780 + fnic_free_txq(fnic); 778 781 goto reset_cmpl_handler_end; 779 782 } 780 783 ··· 1969 1972 */ 1970 1973 spin_lock_irqsave(&fnic->fnic_lock, flags); 1971 1974 fnic->iport.state = FNIC_IPORT_STATE_LINK_WAIT; 1972 - spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1973 - 1974 - if (fdls_get_state(&fnic->iport.fabric) != FDLS_STATE_INIT) 1975 - fnic_scsi_fcpio_reset(fnic); 1976 - 1977 - spin_lock_irqsave(&fnic->fnic_lock, flags); 1978 1975 fnic->in_remove = 1; 1979 1976 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1977 + 1978 + fnic_fcpio_reset(fnic); 1980 1979 1981 1980 fnic_flush_tport_event_list(fnic); 1982 1981 fnic_delete_fcp_tports(fnic); ··· 3032 3039 3033 3040 ret = fnic_host_reset(shost); 3034 3041 return ret; 3035 - } 3036 - 3037 - 3038 - void fnic_scsi_fcpio_reset(struct fnic *fnic) 3039 - { 3040 - unsigned long flags; 3041 - enum fnic_state old_state; 3042 - struct fnic_iport_s *iport = &fnic->iport; 3043 - DECLARE_COMPLETION_ONSTACK(fw_reset_done); 3044 - int time_remain; 3045 - 3046 - /* issue fw reset */ 3047 - spin_lock_irqsave(&fnic->fnic_lock, flags); 3048 - if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { 3049 - /* fw reset is in progress, poll for its completion */ 3050 - spin_unlock_irqrestore(&fnic->fnic_lock, flags); 3051 - FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, 3052 - "fnic is in unexpected state: %d for fw_reset\n", 3053 - fnic->state); 3054 - return; 3055 - } 3056 - 3057 - old_state = fnic->state; 3058 - fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; 3059 - 3060 - fnic_update_mac_locked(fnic, iport->hwmac); 3061 - fnic->fw_reset_done = &fw_reset_done; 3062 - spin_unlock_irqrestore(&fnic->fnic_lock, flags); 3063 - 3064 - FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, 3065 - "Issuing fw reset\n"); 3066 - if (fnic_fw_reset_handler(fnic)) { 3067 - spin_lock_irqsave(&fnic->fnic_lock, flags); 3068 - if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) 3069 - fnic->state = old_state; 3070 - spin_unlock_irqrestore(&fnic->fnic_lock, flags); 3071 - } else { 3072 - FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, 3073 - "Waiting for fw completion\n"); 3074 - time_remain = wait_for_completion_timeout(&fw_reset_done, 3075 - msecs_to_jiffies(FNIC_FW_RESET_TIMEOUT)); 3076 - FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, 3077 - "Woken up after fw completion timeout\n"); 3078 - if (time_remain == 0) { 3079 - FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, 3080 - "FW reset completion timed out after %d ms)\n", 3081 - FNIC_FW_RESET_TIMEOUT); 3082 - } 3083 - atomic64_inc(&fnic->fnic_stats.reset_stats.fw_reset_timeouts); 3084 - } 3085 - fnic->fw_reset_done = NULL; 3086 3042 }
+1 -1
drivers/scsi/hisi_sas/hisi_sas_main.c
··· 1326 1326 1327 1327 if (sts && !wait_for_completion_timeout(&completion, 1328 1328 HISI_SAS_WAIT_PHYUP_TIMEOUT)) { 1329 - dev_warn(dev, "phy%d wait phyup timed out for func %d\n", 1329 + dev_warn(dev, "phy%d wait phyup timed out for func %u\n", 1330 1330 phy_no, func); 1331 1331 if (phy->in_reset) 1332 1332 ret = -ETIMEDOUT;
+6 -6
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
··· 432 432 #define CMPLT_HDR_IPTT_OFF 0 433 433 #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) 434 434 #define CMPLT_HDR_DEV_ID_OFF 16 435 - #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF) 435 + #define CMPLT_HDR_DEV_ID_MSK (0xffffU << CMPLT_HDR_DEV_ID_OFF) 436 436 /* dw3 */ 437 437 #define SATA_DISK_IN_ERROR_STATUS_OFF 8 438 438 #define SATA_DISK_IN_ERROR_STATUS_MSK (0x1 << SATA_DISK_IN_ERROR_STATUS_OFF) ··· 444 444 #define FIS_ATA_STATUS_ERR_OFF 18 445 445 #define FIS_ATA_STATUS_ERR_MSK (0x1 << FIS_ATA_STATUS_ERR_OFF) 446 446 #define FIS_TYPE_SDB_OFF 31 447 - #define FIS_TYPE_SDB_MSK (0x1 << FIS_TYPE_SDB_OFF) 447 + #define FIS_TYPE_SDB_MSK (0x1U << FIS_TYPE_SDB_OFF) 448 448 449 449 /* ITCT header */ 450 450 /* qw0 */ ··· 896 896 qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF; 897 897 break; 898 898 default: 899 - dev_warn(dev, "setup itct: unsupported dev type (%d)\n", 899 + dev_warn(dev, "setup itct: unsupported dev type (%u)\n", 900 900 sas_dev->dev_type); 901 901 } 902 902 ··· 2847 2847 static ssize_t intr_conv_v3_hw_show(struct device *dev, 2848 2848 struct device_attribute *attr, char *buf) 2849 2849 { 2850 - return scnprintf(buf, PAGE_SIZE, "%u\n", hisi_sas_intr_conv); 2850 + return scnprintf(buf, PAGE_SIZE, "%d\n", hisi_sas_intr_conv); 2851 2851 } 2852 2852 static DEVICE_ATTR_RO(intr_conv_v3_hw); 2853 2853 ··· 3293 3293 u32 *fix_code = &hisi_hba->debugfs_bist_fixed_code[0]; 3294 3294 struct device *dev = hisi_hba->dev; 3295 3295 3296 - dev_info(dev, "BIST info:phy%d link_rate=%d code_mode=%d path_mode=%d ffe={0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x} fixed_code={0x%x, 0x%x}\n", 3296 + dev_info(dev, "BIST info:phy%u link_rate=%u code_mode=%u path_mode=%u ffe={0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x} fixed_code={0x%x, 0x%x}\n", 3297 3297 phy_no, linkrate, code_mode, path_mode, 3298 3298 ffe[FFE_SAS_1_5_GBPS], ffe[FFE_SAS_3_0_GBPS], 3299 3299 ffe[FFE_SAS_6_0_GBPS], ffe[FFE_SAS_12_0_GBPS], ··· 3650 3650 int i; 3651 3651 3652 3652 for (i = 0; i < reg->count; i++) { 3653 - int off = i * HISI_SAS_REG_MEM_SIZE; 3653 + u32 off = i * HISI_SAS_REG_MEM_SIZE; 3654 3654 const char *name; 3655 3655 3656 3656 name = debugfs_to_reg_name_v3_hw(off, reg->base_off,
+2 -2
drivers/scsi/hpsa.h
··· 164 164 struct ctlr_info { 165 165 unsigned int *reply_map; 166 166 int ctlr; 167 - char devname[8]; 167 + char devname[16]; 168 168 char *product_name; 169 169 struct pci_dev *pdev; 170 170 u32 board_id; ··· 255 255 int remove_in_progress; 256 256 /* Address of h->q[x] is passed to intr handler to know which queue */ 257 257 u8 q[MAX_REPLY_QUEUES]; 258 - char intrname[MAX_REPLY_QUEUES][16]; /* "hpsa0-msix00" names */ 258 + char intrname[MAX_REPLY_QUEUES][32]; /* controller and IRQ names */ 259 259 u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */ 260 260 #define HPSATMF_BITS_SUPPORTED (1 << 0) 261 261 #define HPSATMF_PHYS_LUN_RESET (1 << 1)
+1
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
··· 3968 3968 3969 3969 .tfc_wwn_attrs = ibmvscsis_wwn_attrs, 3970 3970 3971 + .default_compl_type = TARGET_QUEUE_COMPL, 3971 3972 .default_submit_type = TARGET_DIRECT_SUBMIT, 3972 3973 .direct_submit_supp = 1, 3973 3974 };
+1 -1
drivers/scsi/iscsi_tcp.c
··· 267 267 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; 268 268 struct sock *sk = tcp_sw_conn->sock->sk; 269 269 270 - /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ 270 + /* restore socket callbacks, see also: iscsi_sw_tcp_conn_set_callbacks() */ 271 271 write_lock_bh(&sk->sk_callback_lock); 272 272 sk->sk_user_data = NULL; 273 273 sk->sk_data_ready = tcp_sw_conn->old_data_ready;
+11 -11
drivers/scsi/lpfc/lpfc.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * 4 + * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term * 5 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 7 * EMULEX and SLI are trademarks of Emulex. * ··· 552 552 ); 553 553 554 554 __le32 cgn_info_crc; 555 - #define LPFC_CGN_CRC32_MAGIC_NUMBER 0x1EDC6F41 556 - #define LPFC_CGN_CRC32_SEED 0xFFFFFFFF 557 555 }; 558 556 559 557 #define LPFC_CGN_INFO_SZ (sizeof(struct lpfc_cgn_info) - \ ··· 810 812 #define LPFC_USER_LINK_SPEED_16G 16 /* 16 Gigabaud */ 811 813 #define LPFC_USER_LINK_SPEED_32G 32 /* 32 Gigabaud */ 812 814 #define LPFC_USER_LINK_SPEED_64G 64 /* 64 Gigabaud */ 813 - #define LPFC_USER_LINK_SPEED_MAX LPFC_USER_LINK_SPEED_64G 815 + #define LPFC_USER_LINK_SPEED_128G 128 /* 128 Gigabaud */ 816 + #define LPFC_USER_LINK_SPEED_MAX LPFC_USER_LINK_SPEED_128G 814 817 815 - #define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16, 32, 64" 818 + #define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16, 32, 64, 128" 816 819 817 820 enum nemb_type { 818 821 nemb_mse = 1, ··· 1016 1017 #define LPFC_SLI3_CRP_ENABLED 0x08 1017 1018 #define LPFC_SLI3_BG_ENABLED 0x20 1018 1019 #define LPFC_SLI3_DSS_ENABLED 0x40 1019 - #define LPFC_SLI4_PERFH_ENABLED 0x80 1020 1020 #define LPFC_SLI4_PHWQ_ENABLED 0x100 1021 1021 uint32_t iocb_cmd_size; 1022 1022 uint32_t iocb_rsp_size; ··· 1188 1190 uint32_t cfg_ras_fwlog_func; 1189 1191 uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */ 1190 1192 uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */ 1191 - uint32_t cfg_enable_pbde; 1192 1193 uint32_t cfg_enable_mi; 1193 1194 struct nvmet_fc_target_port *targetport; 1194 1195 lpfc_vpd_t vpd; /* vital product data */ ··· 1664 1667 * @mask: Pointer to phba's cpumask member. 1665 1668 * @start: starting cpu index 1666 1669 * 1667 - * Note: If no valid cpu found, then nr_cpu_ids is returned. 1670 + * Returns: next online CPU in @mask on success 1668 1671 * 1672 + * Note: If no valid cpu found, then nr_cpu_ids is returned. 1669 1673 **/ 1670 1674 static __always_inline unsigned int 1671 1675 lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start) ··· 1678 1680 * lpfc_next_present_cpu - Finds next present CPU after n 1679 1681 * @n: the cpu prior to search 1680 1682 * 1681 - * Note: If no next present cpu, then fallback to first present cpu. 1683 + * Returns: next present CPU after CPU @n 1682 1684 * 1685 + * Note: If no next present cpu, then fallback to first present cpu. 1683 1686 **/ 1684 1687 static __always_inline unsigned int lpfc_next_present_cpu(int n) 1685 1688 { ··· 1690 1691 /** 1691 1692 * lpfc_sli4_mod_hba_eq_delay - update EQ delay 1692 1693 * @phba: Pointer to HBA context object. 1693 - * @q: The Event Queue to update. 1694 + * @eq: The Event Queue to update. 1694 1695 * @delay: The delay value (in us) to be written. 1695 1696 * 1696 1697 **/ ··· 1752 1753 * Pr Tag 1 0 N 1753 1754 * Pr Tag 1 1 Y 1754 1755 * Pr Tag 2 * Y 1755 - --------------------------------------------------- 1756 + * --------------------------------------------------- 1756 1757 * 1758 + * Returns: whether VMID is enabled 1757 1759 **/ 1758 1760 static inline int lpfc_is_vmid_enabled(struct lpfc_hba *phba) 1759 1761 {
+14 -13
drivers/scsi/lpfc/lpfc_attr.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * 5 - * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 4 + * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term * 5 + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 7 * EMULEX and SLI are trademarks of Emulex. * 8 8 * www.broadcom.com * ··· 4415 4415 /* 4416 4416 # lpfc_link_speed: Link speed selection for initializing the Fibre Channel 4417 4417 # connection. 4418 - # Value range is [0,16]. Default value is 0. 4418 + # Value range is [0,128]. Default value is 0. 4419 4419 */ 4420 4420 /** 4421 4421 * lpfc_link_speed_store - Set the adapters link speed ··· 4468 4468 "3055 lpfc_link_speed changed from %d to %d %s\n", 4469 4469 phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)"); 4470 4470 4471 - if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || 4472 - ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || 4473 - ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || 4474 - ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) || 4475 - ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) || 4476 - ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb)) || 4477 - ((val == LPFC_USER_LINK_SPEED_32G) && !(phba->lmt & LMT_32Gb)) || 4478 - ((val == LPFC_USER_LINK_SPEED_64G) && !(phba->lmt & LMT_64Gb))) { 4471 + if ((val == LPFC_USER_LINK_SPEED_1G && !(phba->lmt & LMT_1Gb)) || 4472 + (val == LPFC_USER_LINK_SPEED_2G && !(phba->lmt & LMT_2Gb)) || 4473 + (val == LPFC_USER_LINK_SPEED_4G && !(phba->lmt & LMT_4Gb)) || 4474 + (val == LPFC_USER_LINK_SPEED_8G && !(phba->lmt & LMT_8Gb)) || 4475 + (val == LPFC_USER_LINK_SPEED_10G && !(phba->lmt & LMT_10Gb)) || 4476 + (val == LPFC_USER_LINK_SPEED_16G && !(phba->lmt & LMT_16Gb)) || 4477 + (val == LPFC_USER_LINK_SPEED_32G && !(phba->lmt & LMT_32Gb)) || 4478 + (val == LPFC_USER_LINK_SPEED_64G && !(phba->lmt & LMT_64Gb)) || 4479 + (val == LPFC_USER_LINK_SPEED_128G && !(phba->lmt & LMT_128Gb))) { 4479 4480 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4480 4481 "2879 lpfc_link_speed attribute cannot be set " 4481 4482 "to %d. Speed is not supported by this port.\n", ··· 4501 4500 case LPFC_USER_LINK_SPEED_16G: 4502 4501 case LPFC_USER_LINK_SPEED_32G: 4503 4502 case LPFC_USER_LINK_SPEED_64G: 4503 + case LPFC_USER_LINK_SPEED_128G: 4504 4504 prev_val = phba->cfg_link_speed; 4505 4505 phba->cfg_link_speed = val; 4506 4506 if (nolip) ··· 4566 4564 case LPFC_USER_LINK_SPEED_16G: 4567 4565 case LPFC_USER_LINK_SPEED_32G: 4568 4566 case LPFC_USER_LINK_SPEED_64G: 4567 + case LPFC_USER_LINK_SPEED_128G: 4569 4568 phba->cfg_link_speed = val; 4570 4569 return 0; 4571 4570 default: ··· 7469 7466 } 7470 7467 7471 7468 phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1; 7472 - 7473 - phba->cfg_enable_pbde = 0; 7474 7469 7475 7470 /* A value of 0 means use the number of CPUs found in the system */ 7476 7471 if (phba->cfg_hdw_queue == 0)
+3 -2
drivers/scsi/lpfc/lpfc_crtn.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * 4 + * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term * 5 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 7 * EMULEX and SLI are trademarks of Emulex. * ··· 86 86 void lpfc_init_congestion_stat(struct lpfc_hba *phba); 87 87 void lpfc_init_congestion_buf(struct lpfc_hba *phba); 88 88 int lpfc_sli4_cgn_params_read(struct lpfc_hba *phba); 89 - uint32_t lpfc_cgn_calc_crc32(void *bufp, uint32_t sz, uint32_t seed); 89 + uint32_t lpfc_cgn_calc_crc32(const void *data, size_t size); 90 90 int lpfc_config_cgn_signal(struct lpfc_hba *phba); 91 91 int lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total); 92 92 void lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba); ··· 660 660 void lpfc_nvmet_cmd_template(void); 661 661 void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, 662 662 uint32_t stat, uint32_t param); 663 + void lpfc_nvme_flush_abts_list(struct lpfc_hba *phba); 663 664 void lpfc_nvmels_flush_cmd(struct lpfc_hba *phba); 664 665 extern int lpfc_enable_nvmet_cnt; 665 666 extern unsigned long long lpfc_enable_nvmet[];
+7 -6
drivers/scsi/lpfc/lpfc_ct.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * 4 + * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term * 5 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 7 * EMULEX and SLI are trademarks of Emulex. * ··· 2427 2427 2428 2428 /* CGN is only for the physical port, no vports */ 2429 2429 if (lpfc_fdmi_cmd(vport, ndlp, cmd, 2430 - LPFC_FDMI_VENDOR_ATTR_mi) == 0) 2430 + LPFC_FDMI_VENDOR_ATTR_mi) == 0) { 2431 2431 phba->link_flag |= LS_CT_VEN_RPA; 2432 - lpfc_printf_log(phba, KERN_INFO, 2432 + lpfc_printf_log(phba, KERN_INFO, 2433 2433 LOG_DISCOVERY | LOG_ELS, 2434 2434 "6458 Send MI FDMI:%x Flag x%x\n", 2435 2435 phba->sli4_hba.pc_sli4_params.mi_ver, 2436 2436 phba->link_flag); 2437 + } 2437 2438 } else { 2438 2439 lpfc_printf_log(phba, KERN_INFO, 2439 2440 LOG_DISCOVERY | LOG_ELS, ··· 3215 3214 struct lpfc_iocbq *rspiocb); 3216 3215 3217 3216 if (!ndlp) 3218 - return 0; 3217 + goto fdmi_cmd_exit; 3219 3218 3220 3219 cmpl = lpfc_cmpl_ct_disc_fdmi; /* called from discovery */ 3221 3220 ··· 3321 3320 if (vport->port_type != LPFC_PHYSICAL_PORT) { 3322 3321 ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); 3323 3322 if (!ndlp) 3324 - return 0; 3323 + goto fdmi_cmd_free_rspvirt; 3325 3324 } 3326 3325 fallthrough; 3327 3326 case SLI_MGMT_RPA: ··· 3397 3396 if (vport->port_type != LPFC_PHYSICAL_PORT) { 3398 3397 ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); 3399 3398 if (!ndlp) 3400 - return 0; 3399 + goto fdmi_cmd_free_rspvirt; 3401 3400 } 3402 3401 fallthrough; 3403 3402 case SLI_MGMT_DPA:
+3 -2
drivers/scsi/lpfc/lpfc_disc.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * 4 + * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term * 5 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 6 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 7 7 * EMULEX and SLI are trademarks of Emulex. * ··· 137 137 uint16_t nlp_maxframe; /* Max RCV frame size */ 138 138 uint8_t nlp_class_sup; /* Supported Classes */ 139 139 uint8_t nlp_retry; /* used for ELS retries */ 140 - uint8_t nlp_fcp_info; /* class info, bits 0-3 */ 140 + uint8_t nlp_fcp_info; /* class info, bits 0-2 */ 141 + #define NLP_FCP_CLASS_MASK 0x07 /* class info bitmask */ 141 142 #define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ 142 143 u8 nlp_nvme_info; /* NVME NSLER Support */ 143 144 uint8_t vmid_support; /* destination VMID support */
+37 -19
drivers/scsi/lpfc/lpfc_els.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * 4 + * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term * 5 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 7 * EMULEX and SLI are trademarks of Emulex. * ··· 1107 1107 vport->vmid_flag = 0; 1108 1108 } 1109 1109 if (sp->cmn.priority_tagging) 1110 - vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA | 1110 + vport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA | 1111 1111 LPFC_VMID_TYPE_PRIO); 1112 1112 1113 1113 /* ··· 1303 1303 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1304 1304 ndlp->nlp_DID, ELS_CMD_FLOGI); 1305 1305 1306 - if (!elsiocb) 1306 + if (!elsiocb) { 1307 + lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1308 + lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_DISCOVERY, 1309 + "4296 Unable to prepare FLOGI iocb\n"); 1307 1310 return 1; 1311 + } 1308 1312 1309 1313 wqe = &elsiocb->wqe; 1310 1314 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; ··· 1398 1394 phba->sli3_options, 0, 0); 1399 1395 1400 1396 elsiocb->ndlp = lpfc_nlp_get(ndlp); 1401 - if (!elsiocb->ndlp) { 1402 - lpfc_els_free_iocb(phba, elsiocb); 1403 - return 1; 1404 - } 1397 + if (!elsiocb->ndlp) 1398 + goto err_out; 1405 1399 1406 1400 /* Avoid race with FLOGI completion and hba_flags. */ 1407 1401 set_bit(HBA_FLOGI_ISSUED, &phba->hba_flag); ··· 1409 1407 if (rc == IOCB_ERROR) { 1410 1408 clear_bit(HBA_FLOGI_ISSUED, &phba->hba_flag); 1411 1409 clear_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag); 1412 - lpfc_els_free_iocb(phba, elsiocb); 1413 1410 lpfc_nlp_put(ndlp); 1414 - return 1; 1411 + goto err_out; 1415 1412 } 1416 1413 1417 1414 /* Clear external loopback plug detected flag */ ··· 1475 1474 } 1476 1475 1477 1476 return 0; 1477 + 1478 + err_out: 1479 + lpfc_els_free_iocb(phba, elsiocb); 1480 + lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1481 + lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_DISCOVERY, 1482 + "4297 Issue FLOGI: Cannot send IOCB\n"); 1483 + return 1; 1478 1484 } 1479 1485 1480 1486 /** ··· 2649 2641 } 2650 2642 npr->estabImagePair = 1; 2651 2643 npr->readXferRdyDis = 1; 2652 - if (vport->cfg_first_burst_size) 2644 + if (phba->sli_rev == LPFC_SLI_REV4 && 2645 + !test_bit(HBA_FCOE_MODE, &phba->hba_flag) && 2646 + vport->cfg_first_burst_size) 2653 2647 npr->writeXferRdyDis = 1; 2654 2648 2655 2649 /* For FCP support */ ··· 4329 4319 static bool 4330 4320 lpfc_link_is_lds_capable(struct lpfc_hba *phba) 4331 4321 { 4332 - if (!(phba->lmt & LMT_64Gb)) 4322 + if (!(phba->lmt & (LMT_64Gb | LMT_128Gb))) 4333 4323 return false; 4334 4324 if (phba->sli_rev != LPFC_SLI_REV4) 4335 4325 return false; 4336 4326 4337 4327 if (phba->sli4_hba.conf_trunk) { 4338 - if (phba->trunk_link.phy_lnk_speed == LPFC_USER_LINK_SPEED_64G) 4328 + switch (phba->trunk_link.phy_lnk_speed) { 4329 + case LPFC_USER_LINK_SPEED_128G: 4330 + case LPFC_USER_LINK_SPEED_64G: 4339 4331 return true; 4340 - } else if (phba->fc_linkspeed == LPFC_LINK_SPEED_64GHZ) { 4341 - return true; 4332 + default: 4333 + return false; 4334 + } 4342 4335 } 4343 - return false; 4336 + 4337 + switch (phba->fc_linkspeed) { 4338 + case LPFC_LINK_SPEED_128GHZ: 4339 + case LPFC_LINK_SPEED_64GHZ: 4340 + return true; 4341 + default: 4342 + return false; 4343 + } 4344 4344 } 4345 4345 4346 4346 /** ··· 10311 10291 cpu_to_le16(value); 10312 10292 cp->cgn_warn_freq = 10313 10293 cpu_to_le16(value); 10314 - crc = lpfc_cgn_calc_crc32 10315 - (cp, 10316 - LPFC_CGN_INFO_SZ, 10317 - LPFC_CGN_CRC32_SEED); 10294 + crc = lpfc_cgn_calc_crc32( 10295 + cp, LPFC_CGN_INFO_SZ); 10318 10296 cp->cgn_info_crc = cpu_to_le32(crc); 10319 10297 } 10320 10298
+23 -21
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * 4 + * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term * 5 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 7 * EMULEX and SLI are trademarks of Emulex. * ··· 425 425 { 426 426 if (test_and_clear_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags)) { 427 427 clear_bit(NLP_DROPPED, &ndlp->nlp_flag); 428 - lpfc_nlp_get(ndlp); 429 428 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, 430 429 "8438 Devloss timeout reversed on DID x%x " 431 430 "refcnt %d ndlp %p flag x%lx " ··· 3173 3174 return; 3174 3175 } 3175 3176 3176 - lpfc_initial_flogi(vport); 3177 + if (!lpfc_initial_flogi(vport)) { 3178 + lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_ELS, 3179 + "2345 Can't issue initial FLOGI\n"); 3180 + lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3181 + } 3177 3182 mempool_free(mboxq, phba->mbox_mem_pool); 3178 3183 return; 3179 3184 } ··· 3250 3247 return; 3251 3248 } 3252 3249 3253 - if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 3254 - lpfc_initial_fdisc(vport); 3250 + if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { 3251 + if (!lpfc_initial_fdisc(vport)) { 3252 + lpfc_printf_vlog(vport, KERN_WARNING, 3253 + LOG_MBOX | LOG_ELS, 3254 + "2346 Can't issue initial FDISC\n"); 3255 + lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3256 + } 3257 + } 3255 3258 else { 3256 3259 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); 3257 3260 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, ··· 3817 3808 if (phba->cmf_active_mode != LPFC_CFG_OFF) 3818 3809 lpfc_cmf_signal_init(phba); 3819 3810 3820 - if (phba->lmt & LMT_64Gb) 3811 + if (phba->lmt & (LMT_64Gb | LMT_128Gb)) 3821 3812 lpfc_read_lds_params(phba); 3822 3813 3823 3814 } else if (attn_type == LPFC_ATT_LINK_DOWN || ··· 4410 4401 LOG_INIT | LOG_ELS | LOG_DISCOVERY, 4411 4402 "4220 Issue EDC status x%x Data x%x\n", 4412 4403 rc, phba->cgn_init_reg_signal); 4413 - } else if (phba->lmt & LMT_64Gb) { 4404 + } else if (phba->lmt & (LMT_64Gb | LMT_128Gb)) { 4414 4405 /* may send link fault capability descriptor */ 4415 4406 lpfc_issue_els_edc(vport, 0); 4416 4407 } else { ··· 5237 5228 5238 5229 /* 5239 5230 * Free rpi associated with LPFC_NODELIST entry. 5240 - * This routine is called from lpfc_freenode(), when we are removing 5241 - * a LPFC_NODELIST entry. It is also called if the driver initiates a 5242 - * LOGO that completes successfully, and we are waiting to PLOGI back 5243 - * to the remote NPort. In addition, it is called after we receive 5244 - * and unsolicated ELS cmd, send back a rsp, the rsp completes and 5245 - * we are waiting to PLOGI back to the remote NPort. 5231 + * This routine is called if the driver initiates a LOGO that completes 5232 + * successfully, and we are waiting to PLOGI back to the remote NPort. 5233 + * In addition, it is called after we receive and unsolicated ELS cmd, 5234 + * send back a rsp, the rsp completes and we are waiting to PLOGI back 5235 + * to the remote NPort. 5246 5236 */ 5247 5237 int 5248 5238 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) ··· 6607 6599 unsigned long flags; 6608 6600 6609 6601 if (ndlp) { 6610 - lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 6611 - "node get: did:x%x flg:x%lx refcnt:x%x", 6612 - ndlp->nlp_DID, ndlp->nlp_flag, 6613 - kref_read(&ndlp->kref)); 6614 - 6615 6602 /* The check of ndlp usage to prevent incrementing the 6616 6603 * ndlp reference count that is in the process of being 6617 6604 * released. ··· 6614 6611 spin_lock_irqsave(&ndlp->lock, flags); 6615 6612 if (!kref_get_unless_zero(&ndlp->kref)) { 6616 6613 spin_unlock_irqrestore(&ndlp->lock, flags); 6617 - lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, 6618 - "0276 %s: ndlp:x%px refcnt:%d\n", 6619 - __func__, (void *)ndlp, kref_read(&ndlp->kref)); 6614 + pr_info("0276 %s: NDLP x%px has zero reference count. " 6615 + "Exiting\n", __func__, ndlp); 6620 6616 return NULL; 6621 6617 } 6622 6618 spin_unlock_irqrestore(&ndlp->lock, flags);
+2 -1
drivers/scsi/lpfc/lpfc_hw.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * 4 + * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term * 5 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 7 * EMULEX and SLI are trademarks of Emulex. * ··· 1771 1771 #define PCI_DEVICE_ID_LANCER_G6_FC 0xe300 1772 1772 #define PCI_DEVICE_ID_LANCER_G7_FC 0xf400 1773 1773 #define PCI_DEVICE_ID_LANCER_G7P_FC 0xf500 1774 + #define PCI_DEVICE_ID_LANCER_G8_FC 0xd300 1774 1775 #define PCI_DEVICE_ID_SAT_SMB 0xf011 1775 1776 #define PCI_DEVICE_ID_SAT_MID 0xf015 1776 1777 #define PCI_DEVICE_ID_RFLY 0xf095
+20 -17
drivers/scsi/lpfc/lpfc_hw4.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * 5 - * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 4 + * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term * 5 + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 6 * Copyright (C) 2009-2016 Emulex. All rights reserved. * 7 7 * EMULEX and SLI are trademarks of Emulex. * 8 8 * www.broadcom.com * ··· 100 100 #define lpfc_sli_intf_sli_family_MASK 0x0000000F 101 101 #define lpfc_sli_intf_sli_family_WORD word0 102 102 #define LPFC_SLI_INTF_FAMILY_BE2 0x0 103 - #define LPFC_SLI_INTF_FAMILY_BE3 0x1 103 + #define LPFC_SLI_INTF_ASIC_ID 0x1 /* Refer to ASIC_ID register */ 104 + #define LPFC_SLI_INTF_FAMILY_BE3 0x3 104 105 #define LPFC_SLI_INTF_FAMILY_LNCR_A0 0xa 105 106 #define LPFC_SLI_INTF_FAMILY_LNCR_B0 0xb 106 107 #define LPFC_SLI_INTF_FAMILY_G6 0xc ··· 117 116 #define lpfc_sli_intf_func_type_WORD word0 118 117 #define LPFC_SLI_INTF_IF_TYPE_PHYS 0 119 118 #define LPFC_SLI_INTF_IF_TYPE_VIRT 1 119 + }; 120 + 121 + struct lpfc_asic_id { 122 + u32 word0; 123 + #define lpfc_asic_id_gen_num_SHIFT 8 124 + #define lpfc_asic_id_gen_num_MASK 0x000000FF 125 + #define lpfc_asic_id_gen_num_WORD word0 126 + #define LPFC_SLI_INTF_FAMILY_G8 0x10 127 + #define lpfc_asic_id_rev_num_SHIFT 0 128 + #define lpfc_asic_id_rev_num_MASK 0x000000FF 129 + #define lpfc_asic_id_rev_num_WORD word0 120 130 }; 121 131 122 132 #define LPFC_SLI4_MBX_EMBED true ··· 636 624 637 625 #define LPFC_PORT_SEM_UE_RECOVERABLE 0xE000 638 626 #define LPFC_PORT_SEM_MASK 0xF000 627 + 628 + /* The following are config space register offsets */ 629 + #define LPFC_ASIC_ID_OFFSET 0x0308 630 + 639 631 /* The following BAR0 Registers apply to SLI4 if_type 0 UCNAs. */ 640 632 #define LPFC_UERR_STATUS_HI 0x00A4 641 633 #define LPFC_UERR_STATUS_LO 0x00A0 ··· 648 632 649 633 /* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */ 650 634 #define LPFC_SLI_INTF 0x0058 651 - #define LPFC_SLI_ASIC_VER 0x009C 652 635 653 636 #define LPFC_CTL_PORT_SEM_OFFSET 0x400 654 637 #define lpfc_port_smphr_perr_SHIFT 31 ··· 3077 3062 #define lpfc_mbx_rq_ftr_rq_iaar_SHIFT 9 3078 3063 #define lpfc_mbx_rq_ftr_rq_iaar_MASK 0x00000001 3079 3064 #define lpfc_mbx_rq_ftr_rq_iaar_WORD word2 3080 - #define lpfc_mbx_rq_ftr_rq_perfh_SHIFT 11 3081 - #define lpfc_mbx_rq_ftr_rq_perfh_MASK 0x00000001 3082 - #define lpfc_mbx_rq_ftr_rq_perfh_WORD word2 3083 3065 #define lpfc_mbx_rq_ftr_rq_mrqp_SHIFT 16 3084 3066 #define lpfc_mbx_rq_ftr_rq_mrqp_MASK 0x00000001 3085 3067 #define lpfc_mbx_rq_ftr_rq_mrqp_WORD word2 ··· 3108 3096 #define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7 3109 3097 #define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001 3110 3098 #define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3 3111 - #define lpfc_mbx_rq_ftr_rsp_perfh_SHIFT 11 3112 - #define lpfc_mbx_rq_ftr_rsp_perfh_MASK 0x00000001 3113 - #define lpfc_mbx_rq_ftr_rsp_perfh_WORD word3 3114 3099 #define lpfc_mbx_rq_ftr_rsp_mrqp_SHIFT 16 3115 3100 #define lpfc_mbx_rq_ftr_rsp_mrqp_MASK 0x00000001 3116 3101 #define lpfc_mbx_rq_ftr_rsp_mrqp_WORD word3 ··· 3469 3460 #define cfg_pvl_SHIFT 13 3470 3461 #define cfg_pvl_MASK 0x00000001 3471 3462 #define cfg_pvl_WORD word19 3472 - 3473 - #define cfg_pbde_SHIFT 20 3474 - #define cfg_pbde_MASK 0x00000001 3475 - #define cfg_pbde_WORD word19 3476 3463 3477 3464 uint32_t word20; 3478 3465 #define cfg_max_tow_xri_SHIFT 0 ··· 4489 4484 #define wqe_irsp_SHIFT 4 4490 4485 #define wqe_irsp_MASK 0x00000001 4491 4486 #define wqe_irsp_WORD word11 4492 - #define wqe_pbde_SHIFT 5 4493 - #define wqe_pbde_MASK 0x00000001 4494 - #define wqe_pbde_WORD word11 4495 4487 #define wqe_sup_SHIFT 6 4496 4488 #define wqe_sup_MASK 0x00000001 4497 4489 #define wqe_sup_WORD word11 ··· 4980 4978 #define MAGIC_NUMBER_G6 0xFEAA0003 4981 4979 #define MAGIC_NUMBER_G7 0xFEAA0005 4982 4980 #define MAGIC_NUMBER_G7P 0xFEAA0020 4981 + #define MAGIC_NUMBER_G8 0xFEAA0070 4983 4982 4984 4983 struct lpfc_grp_hdr { 4985 4984 uint32_t size;
+3 -1
drivers/scsi/lpfc/lpfc_ids.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * 4 + * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term * 5 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 7 * EMULEX and SLI are trademarks of Emulex. * ··· 117 117 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G7_FC, 118 118 PCI_ANY_ID, PCI_ANY_ID, }, 119 119 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G7P_FC, 120 + PCI_ANY_ID, PCI_ANY_ID, }, 121 + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G8_FC, 120 122 PCI_ANY_ID, PCI_ANY_ID, }, 121 123 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK, 122 124 PCI_ANY_ID, PCI_ANY_ID, },
+51 -66
drivers/scsi/lpfc/lpfc_init.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * 5 - * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 4 + * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term * 5 + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 7 * EMULEX and SLI are trademarks of Emulex. * 8 8 * www.broadcom.com * ··· 22 22 *******************************************************************/ 23 23 24 24 #include <linux/blkdev.h> 25 + #include <linux/crc32.h> 25 26 #include <linux/delay.h> 26 27 #include <linux/dma-mapping.h> 27 28 #include <linux/idr.h> ··· 789 788 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 790 789 !(phba->lmt & LMT_32Gb)) || 791 790 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && 792 - !(phba->lmt & LMT_64Gb))) { 791 + !(phba->lmt & LMT_64Gb)) || 792 + ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_128G) && 793 + !(phba->lmt & LMT_128Gb))) { 793 794 /* Reset link speed to auto */ 794 795 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 795 796 "1302 Invalid speed for this board:%d " ··· 1090 1087 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next; 1091 1088 struct lpfc_sli4_hdw_queue *qp; 1092 1089 LIST_HEAD(aborts); 1093 - LIST_HEAD(nvme_aborts); 1094 1090 LIST_HEAD(nvmet_aborts); 1095 1091 struct lpfc_sglq *sglq_entry = NULL; 1096 1092 int cnt, idx; ··· 1948 1946 1949 1947 lpfc_offline_prep(phba, mbx_action); 1950 1948 lpfc_sli_flush_io_rings(phba); 1949 + lpfc_nvme_flush_abts_list(phba); 1951 1950 lpfc_nvmels_flush_cmd(phba); 1952 1951 lpfc_offline(phba); 1953 1952 /* release interrupt for possible resource change */ ··· 2537 2534 return; 2538 2535 } 2539 2536 2540 - if (phba->lmt & LMT_64Gb) 2537 + if (phba->lmt & LMT_128Gb) 2538 + max_speed = 128; 2539 + else if (phba->lmt & LMT_64Gb) 2541 2540 max_speed = 64; 2542 2541 else if (phba->lmt & LMT_32Gb) 2543 2542 max_speed = 32; ··· 2756 2751 break; 2757 2752 case PCI_DEVICE_ID_LANCER_G7P_FC: 2758 2753 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"}; 2754 + break; 2755 + case PCI_DEVICE_ID_LANCER_G8_FC: 2756 + m = (typeof(m)){"LPe42100", "PCIe", "Fibre Channel Adapter"}; 2759 2757 break; 2760 2758 case PCI_DEVICE_ID_SKYHAWK: 2761 2759 case PCI_DEVICE_ID_SKYHAWK_VF: ··· 5642 5634 cp->cgn_stat_npm = value; 5643 5635 } 5644 5636 5645 - value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 5646 - LPFC_CGN_CRC32_SEED); 5637 + value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ); 5647 5638 cp->cgn_info_crc = cpu_to_le32(value); 5648 5639 } 5649 5640 ··· 5904 5897 cp->cgn_warn_freq = cpu_to_le16(value); 5905 5898 cp->cgn_alarm_freq = cpu_to_le16(value); 5906 5899 5907 - lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 5908 - LPFC_CGN_CRC32_SEED); 5900 + lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ); 5909 5901 cp->cgn_info_crc = cpu_to_le32(lvalue); 5910 5902 5911 5903 hrtimer_forward_now(timer, ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC)); ··· 7127 7121 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 7128 7122 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 7129 7123 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 7130 - crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 7131 - LPFC_CGN_CRC32_SEED); 7124 + crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ); 7132 7125 cp->cgn_info_crc = cpu_to_le32(crc); 7133 7126 } 7134 7127 spin_unlock_irq(&phba->hbalock); ··· 8288 8283 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, 8289 8284 phba->cfg_nvme_seg_cnt); 8290 8285 8291 - i = min(phba->cfg_sg_dma_buf_size, SLI4_PAGE_SIZE); 8286 + i = min_t(u32, phba->cfg_sg_dma_buf_size, SLI4_PAGE_SIZE); 8292 8287 8293 8288 phba->lpfc_sg_dma_buf_pool = 8294 8289 dma_pool_create("lpfc_sg_dma_buf_pool", ··· 10151 10146 phba->cfg_link_speed = 10152 10147 LPFC_USER_LINK_SPEED_64G; 10153 10148 break; 10149 + case LINK_SPEED_128G: 10150 + phba->cfg_link_speed = 10151 + LPFC_USER_LINK_SPEED_128G; 10152 + break; 10154 10153 case 0xffff: 10155 10154 phba->cfg_link_speed = 10156 10155 LPFC_USER_LINK_SPEED_AUTO; ··· 11804 11795 unsigned long bar0map_len, bar1map_len, bar2map_len; 11805 11796 int error; 11806 11797 uint32_t if_type; 11798 + u8 sli_family; 11807 11799 11808 11800 if (!pdev) 11809 11801 return -ENODEV; ··· 11833 11823 "sli_intf reg 0x%x\n", 11834 11824 phba->sli4_hba.sli_intf.word0); 11835 11825 return -ENODEV; 11826 + } 11827 + 11828 + /* Check if ASIC_ID register should be read */ 11829 + sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 11830 + if (sli_family == LPFC_SLI_INTF_ASIC_ID) { 11831 + if (pci_read_config_dword(pdev, LPFC_ASIC_ID_OFFSET, 11832 + &phba->sli4_hba.asic_id.word0)) 11833 + return -ENODEV; 11836 11834 } 11837 11835 11838 11836 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); ··· 13061 13043 /* Iterate to next offline or online cpu in aff_mask */ 13062 13044 cpu = cpumask_next(cpu, aff_mask); 13063 13045 13046 + /* Reached the end of the aff_mask */ 13047 + if (cpu >= nr_cpu_ids) 13048 + break; 13049 + 13064 13050 /* Find next online cpu in aff_mask to set affinity */ 13065 13051 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); 13066 13052 } else if (vectors == 1) { ··· 13517 13495 phba->pport->work_port_events = 0; 13518 13496 } 13519 13497 13520 - static uint32_t 13521 - lpfc_cgn_crc32(uint32_t crc, u8 byte) 13522 - { 13523 - uint32_t msb = 0; 13524 - uint32_t bit; 13525 - 13526 - for (bit = 0; bit < 8; bit++) { 13527 - msb = (crc >> 31) & 1; 13528 - crc <<= 1; 13529 - 13530 - if (msb ^ (byte & 1)) { 13531 - crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER; 13532 - crc |= 1; 13533 - } 13534 - byte >>= 1; 13535 - } 13536 - return crc; 13537 - } 13538 - 13539 - static uint32_t 13540 - lpfc_cgn_reverse_bits(uint32_t wd) 13541 - { 13542 - uint32_t result = 0; 13543 - uint32_t i; 13544 - 13545 - for (i = 0; i < 32; i++) { 13546 - result <<= 1; 13547 - result |= (1 & (wd >> i)); 13548 - } 13549 - return result; 13550 - } 13551 - 13552 13498 /* 13553 13499 * The routine corresponds with the algorithm the HBA firmware 13554 13500 * uses to validate the data integrity. 13555 13501 */ 13556 13502 uint32_t 13557 - lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc) 13503 + lpfc_cgn_calc_crc32(const void *data, size_t size) 13558 13504 { 13559 - uint32_t i; 13560 - uint32_t result; 13561 - uint8_t *data = (uint8_t *)ptr; 13562 - 13563 - for (i = 0; i < byteLen; ++i) 13564 - crc = lpfc_cgn_crc32(crc, data[i]); 13565 - 13566 - result = ~lpfc_cgn_reverse_bits(crc); 13567 - return result; 13505 + return ~crc32c(~0, data, size); 13568 13506 } 13569 13507 13570 13508 void ··· 13573 13591 13574 13592 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13575 13593 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13576 - crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 13594 + crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ); 13577 13595 cp->cgn_info_crc = cpu_to_le32(crc); 13578 13596 13579 13597 phba->cgn_evt_timestamp = jiffies + ··· 13596 13614 memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat)); 13597 13615 13598 13616 lpfc_cgn_update_tstamp(phba, &cp->stat_start); 13599 - crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 13617 + crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ); 13600 13618 cp->cgn_info_crc = cpu_to_le32(crc); 13601 13619 } 13602 13620 ··· 13738 13756 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 13739 13757 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 13740 13758 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 13741 - sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 13759 + sli4_params->rqv = 13760 + (sli4_params->if_type < LPFC_SLI_INTF_IF_TYPE_2) ? 13761 + LPFC_Q_CREATE_VERSION_0 : LPFC_Q_CREATE_VERSION_1; 13742 13762 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); 13743 13763 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); 13744 13764 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); ··· 13802 13818 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 13803 13819 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 13804 13820 13805 - /* Enable embedded Payload BDE if support is indicated */ 13806 - if (bf_get(cfg_pbde, mbx_sli4_parameters)) 13807 - phba->cfg_enable_pbde = 1; 13808 - else 13809 - phba->cfg_enable_pbde = 0; 13810 - 13811 13821 /* 13812 13822 * To support Suppress Response feature we must satisfy 3 conditions. 13813 13823 * lpfc_suppress_rsp module parameter must be set (default). ··· 13836 13858 phba->fcp_embed_io = 0; 13837 13859 13838 13860 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 13839 - "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", 13861 + "6422 XIB %d: FCP %d NVME %d %d %d\n", 13840 13862 bf_get(cfg_xib, mbx_sli4_parameters), 13841 - phba->cfg_enable_pbde, 13842 13863 phba->fcp_embed_io, sli4_params->nvme, 13843 13864 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); 13844 13865 ··· 14502 14525 u8 sli_family; 14503 14526 14504 14527 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 14528 + 14529 + /* Refer to ASIC_ID register case */ 14530 + if (sli_family == LPFC_SLI_INTF_ASIC_ID) 14531 + sli_family = bf_get(lpfc_asic_id_gen_num, 14532 + &phba->sli4_hba.asic_id); 14533 + 14505 14534 /* Three cases: (1) FW was not supported on the detected adapter. 14506 14535 * (2) FW update has been locked out administratively. 14507 14536 * (3) Some other error during FW update. ··· 14520 14537 (sli_family == LPFC_SLI_INTF_FAMILY_G7 && 14521 14538 magic_number != MAGIC_NUMBER_G7) || 14522 14539 (sli_family == LPFC_SLI_INTF_FAMILY_G7P && 14523 - magic_number != MAGIC_NUMBER_G7P)) { 14540 + magic_number != MAGIC_NUMBER_G7P) || 14541 + (sli_family == LPFC_SLI_INTF_FAMILY_G8 && 14542 + magic_number != MAGIC_NUMBER_G8)) { 14524 14543 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14525 14544 "3030 This firmware version is not supported on" 14526 14545 " this HBA model. Device:%x Magic:%x Type:%x "
+5 -2
drivers/scsi/lpfc/lpfc_mbox.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * 4 + * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term * 5 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 7 * EMULEX and SLI are trademarks of Emulex. * ··· 624 624 case LPFC_USER_LINK_SPEED_64G: 625 625 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; 626 626 mb->un.varInitLnk.link_speed = LINK_SPEED_64G; 627 + break; 628 + case LPFC_USER_LINK_SPEED_128G: 629 + mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; 630 + mb->un.varInitLnk.link_speed = LINK_SPEED_128G; 627 631 break; 628 632 case LPFC_USER_LINK_SPEED_AUTO: 629 633 default: ··· 2143 2139 2144 2140 /* Set up host requested features. */ 2145 2141 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); 2146 - bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1); 2147 2142 2148 2143 /* Enable DIF (block guard) only if configured to do so. */ 2149 2144 if (phba->cfg_enable_bg)
+20 -18
drivers/scsi/lpfc/lpfc_nportdisc.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * 4 + * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term * 5 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 7 * EMULEX and SLI are trademarks of Emulex. * ··· 316 316 struct lpfc_iocbq *save_iocb; 317 317 struct lpfc_nodelist *ndlp; 318 318 MAILBOX_t *mb = &login_mbox->u.mb; 319 - 320 - int rc; 319 + int rc = 0; 321 320 322 321 ndlp = login_mbox->ctx_ndlp; 323 322 save_iocb = login_mbox->ctx_u.save_iocb; ··· 345 346 * completes. This ensures, in Pt2Pt, that the PLOGI LS_ACC is sent 346 347 * before the PRLI. 347 348 */ 348 - if (!test_bit(FC_PT2PT, &ndlp->vport->fc_flag)) { 349 + if (!test_bit(FC_PT2PT, &ndlp->vport->fc_flag) || mb->mbxStatus || rc) { 349 350 /* Now process the REG_RPI cmpl */ 350 351 lpfc_mbx_cmpl_reg_login(phba, login_mbox); 351 352 clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); ··· 524 525 /* Issue CONFIG_LINK for SLI3 or REG_VFI for SLI4, 525 526 * to account for updated TOV's / parameters 526 527 */ 527 - if (phba->sli_rev == LPFC_SLI_REV4) 528 - lpfc_issue_reg_vfi(vport); 529 - else { 528 + if (phba->sli_rev == LPFC_SLI_REV4) { 529 + rc = lpfc_issue_reg_vfi(vport); 530 + } else { 530 531 link_mbox = mempool_alloc(phba->mbox_mem_pool, 531 532 GFP_KERNEL); 532 533 if (!link_mbox) 533 - goto out; 534 + goto rsp_rjt; 534 535 lpfc_config_link(phba, link_mbox); 535 536 link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 536 537 link_mbox->vport = vport; ··· 543 544 rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT); 544 545 if (rc == MBX_NOT_FINISHED) { 545 546 mempool_free(link_mbox, phba->mbox_mem_pool); 546 - goto out; 547 + goto rsp_rjt; 547 548 } 548 549 } 549 550 550 551 lpfc_can_disctmo(vport); 552 + if (rc) 553 + goto rsp_rjt; 551 554 } 552 555 553 556 clear_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); ··· 563 562 564 563 login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 565 564 if (!login_mbox) 566 - goto out; 565 + goto rsp_rjt; 567 566 568 567 save_iocb = kzalloc_obj(*save_iocb); 569 568 if (!save_iocb) 570 - goto out; 569 + goto free_login_mbox; 571 570 572 571 /* Save info from cmd IOCB to be used in rsp after all mbox completes */ 573 572 memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb, ··· 587 586 rc = lpfc_reg_rpi(phba, vport->vpi, remote_did, 588 587 (uint8_t *)sp, login_mbox, ndlp->nlp_rpi); 589 588 if (rc) 590 - goto out; 589 + goto free_save_iocb; 591 590 592 591 login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 593 592 login_mbox->vport = vport; ··· 660 659 login_mbox->mbox_cmpl = lpfc_defer_plogi_acc; 661 660 login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 662 661 if (!login_mbox->ctx_ndlp) 663 - goto out; 662 + goto free_save_iocb; 664 663 665 664 login_mbox->ctx_u.save_iocb = save_iocb; /* For PLOGI ACC */ 666 665 ··· 671 670 rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT); 672 671 if (rc == MBX_NOT_FINISHED) { 673 672 lpfc_nlp_put(ndlp); 674 - goto out; 673 + goto free_save_iocb; 675 674 } 676 675 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 677 676 678 677 return 1; 679 - out: 680 - kfree(save_iocb); 681 - if (login_mbox) 682 - mempool_free(login_mbox, phba->mbox_mem_pool); 683 678 679 + free_save_iocb: 680 + kfree(save_iocb); 681 + free_login_mbox: 682 + mempool_free(login_mbox, phba->mbox_mem_pool); 683 + rsp_rjt: 684 684 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 685 685 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; 686 686 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+68 -38
drivers/scsi/lpfc/lpfc_nvme.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * 4 + * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term * 5 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 7 * EMULEX and SLI are trademarks of Emulex. * ··· 1296 1296 /* Word 10 */ 1297 1297 bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG); 1298 1298 1299 - /* Words 13 14 15 are for PBDE support */ 1300 - 1301 1299 /* add the VMID tags as per switch response */ 1302 1300 if (unlikely(lpfc_ncmd->cur_iocbq.cmd_flag & LPFC_IO_VMID)) { 1303 1301 if (phba->pport->vmid_priority_tagging) { ··· 1333 1335 { 1334 1336 struct lpfc_hba *phba = vport->phba; 1335 1337 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; 1336 - union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe; 1337 1338 struct sli4_sge *sgl = lpfc_ncmd->dma_sgl; 1338 1339 struct sli4_hybrid_sgl *sgl_xtra = NULL; 1339 1340 struct scatterlist *data_sg; 1340 - struct sli4_sge *first_data_sgl; 1341 - struct ulp_bde64 *bde; 1342 1341 dma_addr_t physaddr = 0; 1343 1342 uint32_t dma_len = 0; 1344 1343 uint32_t dma_offset = 0; 1345 - int nseg, i, j; 1344 + int nseg, i, j, k; 1346 1345 bool lsp_just_set = false; 1347 1346 1348 1347 /* Fix up the command and response DMA stuff. */ ··· 1356 1361 */ 1357 1362 sgl += 2; 1358 1363 1359 - first_data_sgl = sgl; 1360 1364 lpfc_ncmd->seg_cnt = nCmd->sg_cnt; 1361 1365 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) { 1362 1366 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, ··· 1379 1385 1380 1386 /* for tracking the segment boundaries */ 1381 1387 j = 2; 1388 + k = 5; 1389 + if (unlikely(!phba->cfg_xpsgl)) 1390 + k = 1; 1382 1391 for (i = 0; i < nseg; i++) { 1383 1392 if (data_sg == NULL) { 1384 1393 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, ··· 1400 1403 bf_set(lpfc_sli4_sge_last, sgl, 0); 1401 1404 1402 1405 /* expand the segment */ 1403 - if (!lsp_just_set && 1404 - !((j + 1) % phba->border_sge_num) && 1405 - ((nseg - 1) != i)) { 1406 + if (!lsp_just_set && (nseg != (i + k)) && 1407 + !((j + k) % phba->border_sge_num)) { 1406 1408 /* set LSP type */ 1407 1409 bf_set(lpfc_sli4_sge_type, sgl, 1408 1410 LPFC_SGE_TYPE_LSP); ··· 1424 1428 } 1425 1429 } 1426 1430 1427 - if (!(bf_get(lpfc_sli4_sge_type, sgl) & 1428 - LPFC_SGE_TYPE_LSP)) { 1431 + if (bf_get(lpfc_sli4_sge_type, sgl) != 1432 + LPFC_SGE_TYPE_LSP) { 1429 1433 if ((nseg - 1) == i) 1430 1434 bf_set(lpfc_sli4_sge_last, sgl, 1); 1431 1435 ··· 1446 1450 sgl++; 1447 1451 1448 1452 lsp_just_set = false; 1453 + j++; 1449 1454 } else { 1450 1455 sgl->word2 = cpu_to_le32(sgl->word2); 1451 - 1452 - sgl->sge_len = cpu_to_le32( 1453 - phba->cfg_sg_dma_buf_size); 1456 + /* will remaining SGEs fill the next SGL? */ 1457 + if ((nseg - i) < phba->border_sge_num) 1458 + sgl->sge_len = 1459 + cpu_to_le32((nseg - i) * 1460 + sizeof(*sgl)); 1461 + else 1462 + sgl->sge_len = 1463 + cpu_to_le32(phba->cfg_sg_dma_buf_size); 1454 1464 1455 1465 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 1456 1466 i = i - 1; 1457 1467 1458 1468 lsp_just_set = true; 1469 + j += k; 1470 + k = 1; 1459 1471 } 1460 - 1461 - j++; 1462 1472 } 1463 - 1464 - /* PBDE support for first data SGE only */ 1465 - if (nseg == 1 && phba->cfg_enable_pbde) { 1466 - /* Words 13-15 */ 1467 - bde = (struct ulp_bde64 *) 1468 - &wqe->words[13]; 1469 - bde->addrLow = first_data_sgl->addr_lo; 1470 - bde->addrHigh = first_data_sgl->addr_hi; 1471 - bde->tus.f.bdeSize = 1472 - le32_to_cpu(first_data_sgl->sge_len); 1473 - bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1474 - bde->tus.w = cpu_to_le32(bde->tus.w); 1475 - 1476 - /* Word 11 - set PBDE bit */ 1477 - bf_set(wqe_pbde, &wqe->generic.wqe_com, 1); 1478 - } else { 1479 - memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); 1480 - /* Word 11 - PBDE bit disabled by default template */ 1481 - } 1482 - 1483 1473 } else { 1484 1474 lpfc_ncmd->seg_cnt = 0; 1485 1475 ··· 2825 2843 2826 2844 memcpy(&pwqeIn->wcqe_cmpl, wcqep, sizeof(*wcqep)); 2827 2845 (pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn); 2846 + #endif 2847 + } 2848 + 2849 + /** 2850 + * lpfc_nvme_flush_abts_list - Clean up nvme commands from the abts list 2851 + * @phba: Pointer to HBA context object. 2852 + * 2853 + **/ 2854 + void 2855 + lpfc_nvme_flush_abts_list(struct lpfc_hba *phba) 2856 + { 2857 + #if (IS_ENABLED(CONFIG_NVME_FC)) 2858 + struct lpfc_io_buf *psb, *psb_next; 2859 + struct lpfc_sli4_hdw_queue *qp; 2860 + LIST_HEAD(aborts); 2861 + int i; 2862 + 2863 + /* abts_xxxx_buf_list_lock required because worker thread uses this 2864 + * list. 2865 + */ 2866 + spin_lock_irq(&phba->hbalock); 2867 + for (i = 0; i < phba->cfg_hdw_queue; i++) { 2868 + qp = &phba->sli4_hba.hdwq[i]; 2869 + 2870 + spin_lock(&qp->abts_io_buf_list_lock); 2871 + list_for_each_entry_safe(psb, psb_next, 2872 + &qp->lpfc_abts_io_buf_list, list) { 2873 + if (!(psb->cur_iocbq.cmd_flag & LPFC_IO_NVME)) 2874 + continue; 2875 + list_move(&psb->list, &aborts); 2876 + qp->abts_nvme_io_bufs--; 2877 + } 2878 + spin_unlock(&qp->abts_io_buf_list_lock); 2879 + } 2880 + spin_unlock_irq(&phba->hbalock); 2881 + 2882 + list_for_each_entry_safe(psb, psb_next, &aborts, list) { 2883 + list_del_init(&psb->list); 2884 + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2885 + "6195 %s: lpfc_ncmd x%px flags x%x " 2886 + "cmd_flag x%x xri x%x\n", __func__, 2887 + psb, psb->flags, 2888 + psb->cur_iocbq.cmd_flag, 2889 + psb->cur_iocbq.sli4_xritag); 2890 + psb->flags &= ~LPFC_SBUF_XBUSY; 2891 + psb->status = IOSTAT_SUCCESS; 2892 + lpfc_sli4_nvme_pci_offline_aborted(phba, psb); 2893 + } 2828 2894 #endif 2829 2895 } 2830 2896
+4 -31
drivers/scsi/lpfc/lpfc_nvmet.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * 4 + * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term * 5 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 7 * EMULEX and SLI are trademarks of Emulex. * ··· 118 118 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 119 119 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); 120 120 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); 121 - bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0); 122 121 123 122 /* Word 12 - fcp_data_len is variable */ 124 - 125 - /* Word 13, 14, 15 - PBDE is zero */ 126 123 127 124 /* TRECEIVE template */ 128 125 wqe = &lpfc_treceive_cmd_template; ··· 155 158 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12); 156 159 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1); 157 160 158 - /* Word 11 - pbde is variable */ 161 + /* Word 11 */ 159 162 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE); 160 163 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 161 164 bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0); 162 165 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0); 163 166 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0); 164 - bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1); 165 167 166 168 /* Word 12 - fcp_data_len is variable */ 167 - 168 - /* Word 13, 14, 15 - PBDE is variable */ 169 169 170 170 /* TRSP template */ 171 171 wqe = &lpfc_trsp_cmd_template; ··· 201 207 bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0); 202 208 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0); 203 209 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0); 204 - bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0); 205 210 206 211 /* Word 12, 13, 14, 15 - is zero */ 207 212 } ··· 2715 2722 struct ulp_bde64 *bde; 2716 2723 dma_addr_t physaddr; 2717 2724 int i, cnt, nsegs; 2718 - bool use_pbde = false; 2719 2725 int xc = 1; 2720 2726 2721 2727 if (!lpfc_is_link_up(phba)) { ··· 2899 2907 if (!xc) 2900 2908 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0); 2901 2909 2902 - /* Word 11 - check for pbde */ 2903 - if (nsegs == 1 && phba->cfg_enable_pbde) { 2904 - use_pbde = true; 2905 - /* Word 11 - PBDE bit already preset by template */ 2906 - } else { 2907 - /* Overwrite default template setting */ 2908 - bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0); 2909 - } 2910 - 2911 2910 /* Word 12 */ 2912 2911 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; 2913 2912 ··· 3006 3023 } 3007 3024 3008 3025 bde = (struct ulp_bde64 *)&wqe->words[13]; 3009 - if (use_pbde) { 3010 - /* decrement sgl ptr backwards once to first data sge */ 3011 - sgl--; 3012 3026 3013 - /* Words 13-15 (PBDE) */ 3014 - bde->addrLow = sgl->addr_lo; 3015 - bde->addrHigh = sgl->addr_hi; 3016 - bde->tus.f.bdeSize = le32_to_cpu(sgl->sge_len); 3017 - bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 3018 - bde->tus.w = cpu_to_le32(bde->tus.w); 3019 - } else { 3020 - memset(bde, 0, sizeof(struct ulp_bde64)); 3021 - } 3027 + memset(bde, 0, sizeof(struct ulp_bde64)); 3028 + 3022 3029 ctxp->state = LPFC_NVME_STE_DATA; 3023 3030 ctxp->entry_cnt++; 3024 3031 return nvmewqe;
+73 -74
drivers/scsi/lpfc/lpfc_scsi.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * 4 + * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term * 5 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 7 * EMULEX and SLI are trademarks of Emulex. * ··· 1938 1938 uint32_t dma_len; 1939 1939 uint32_t dma_offset = 0; 1940 1940 struct sli4_hybrid_sgl *sgl_xtra = NULL; 1941 - int j; 1941 + int j, k; 1942 1942 bool lsp_just_set = false; 1943 1943 1944 1944 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); ··· 2001 2001 /* assumption: caller has already run dma_map_sg on command data */ 2002 2002 sgde = scsi_sglist(sc); 2003 2003 j = 3; 2004 + k = 5; 2005 + if (unlikely(!phba->cfg_xpsgl)) 2006 + k = 1; 2004 2007 for (i = 0; i < datasegcnt; i++) { 2005 2008 /* clear it */ 2006 2009 sgl->word2 = 0; 2007 2010 2008 - /* do we need to expand the segment */ 2009 - if (!lsp_just_set && !((j + 1) % phba->border_sge_num) && 2010 - ((datasegcnt - 1) != i)) { 2011 + /* do we need to expand the segment? */ 2012 + if (!lsp_just_set && (datasegcnt != (i + k)) && 2013 + !((j + k) % phba->border_sge_num)) { 2011 2014 /* set LSP type */ 2012 2015 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); 2013 2016 ··· 2029 2026 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 2030 2027 } 2031 2028 2032 - if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) { 2029 + if (bf_get(lpfc_sli4_sge_type, sgl) != LPFC_SGE_TYPE_LSP) { 2033 2030 if ((datasegcnt - 1) == i) 2034 2031 bf_set(lpfc_sli4_sge_last, sgl, 1); 2035 2032 physaddr = sg_dma_address(sgde); ··· 2046 2043 2047 2044 sgl++; 2048 2045 num_sge++; 2046 + j++; 2049 2047 lsp_just_set = false; 2050 - 2051 2048 } else { 2052 2049 sgl->word2 = cpu_to_le32(sgl->word2); 2053 - sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); 2054 - 2050 + /* will remaining SGEs fill the next SGL? */ 2051 + if ((datasegcnt - i) < phba->border_sge_num) 2052 + sgl->sge_len = cpu_to_le32((datasegcnt - i) * 2053 + sizeof(*sgl)); 2054 + else 2055 + sgl->sge_len = 2056 + cpu_to_le32(phba->cfg_sg_dma_buf_size); 2055 2057 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2056 2058 i = i - 1; 2057 - 2059 + j += k; 2058 2060 lsp_just_set = true; 2061 + k = 1; 2059 2062 } 2060 - 2061 - j++; 2062 - 2063 2063 } 2064 2064 2065 2065 out: ··· 2115 2109 struct scatterlist *sgde = NULL; /* s/g data entry */ 2116 2110 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 2117 2111 struct sli4_sge_diseed *diseed = NULL; 2112 + struct sli4_sge_le *lsp_sgl = NULL; 2118 2113 dma_addr_t dataphysaddr, protphysaddr; 2119 2114 unsigned short curr_prot = 0; 2120 2115 unsigned int split_offset; ··· 2132 2125 uint32_t rc; 2133 2126 #endif 2134 2127 uint32_t checking = 1; 2135 - uint32_t dma_offset = 0, num_sge = 0; 2136 - int j = 2; 2128 + uint32_t dma_offset = 0, num_sge = 0, lsp_len; 2129 + int j = 2, k = 4; 2137 2130 struct sli4_hybrid_sgl *sgl_xtra = NULL; 2138 2131 2139 2132 sgpe = scsi_prot_sglist(sc); ··· 2164 2157 } 2165 2158 #endif 2166 2159 2160 + if (unlikely(!phba->cfg_xpsgl)) 2161 + k = 0; 2167 2162 split_offset = 0; 2168 2163 do { 2169 2164 /* Check to see if we ran out of space */ ··· 2173 2164 !(phba->cfg_xpsgl)) 2174 2165 return num_sge + 3; 2175 2166 2176 - /* DISEED and DIF have to be together */ 2177 - if (!((j + 1) % phba->border_sge_num) || 2178 - !((j + 2) % phba->border_sge_num) || 2179 - !((j + 3) % phba->border_sge_num)) { 2167 + /* DISEED and DIF have to be together */ 2168 + if (!((j + k + 1) % phba->border_sge_num) || 2169 + !((j + k + 2) % phba->border_sge_num) || 2170 + !((j + k + 3) % phba->border_sge_num)) { 2180 2171 sgl->word2 = 0; 2181 2172 2182 2173 /* set LSP type */ ··· 2195 2186 2196 2187 sgl->word2 = cpu_to_le32(sgl->word2); 2197 2188 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); 2189 + if (lsp_sgl) { 2190 + j++; 2191 + if (j % phba->border_sge_num) { 2192 + lsp_len = j * (sizeof(*sgl)); 2193 + lsp_sgl->sge_len = cpu_to_le32(lsp_len); 2194 + } 2195 + } 2196 + lsp_sgl = (struct sli4_sge_le *)sgl; 2198 2197 2199 2198 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2200 2199 j = 0; 2200 + k = 0; 2201 2201 } 2202 2202 2203 2203 /* setup DISEED with what we have */ ··· 2309 2291 return 0; 2310 2292 } 2311 2293 2312 - if (!((j + 1) % phba->border_sge_num)) { 2294 + if (!((j + k + 1) % phba->border_sge_num)) { 2313 2295 sgl->word2 = 0; 2314 2296 2315 2297 /* set LSP type */ ··· 2331 2313 sgl->word2 = cpu_to_le32(sgl->word2); 2332 2314 sgl->sge_len = cpu_to_le32( 2333 2315 phba->cfg_sg_dma_buf_size); 2316 + lsp_sgl = (struct sli4_sge_le *)sgl; 2334 2317 2335 2318 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2319 + j = 0; 2320 + k = 0; 2336 2321 } else { 2337 2322 dataphysaddr = sg_dma_address(sgde) + 2338 2323 split_offset; ··· 2383 2362 2384 2363 /* Move to the next s/g segment if possible */ 2385 2364 sgde = sg_next(sgde); 2386 - 2387 2365 sgl++; 2366 + j++; 2388 2367 } 2389 - 2390 - j++; 2391 2368 } 2392 2369 2393 2370 if (protgroup_offset) { ··· 2400 2381 sgl--; 2401 2382 bf_set(lpfc_sli4_sge_last, sgl, 1); 2402 2383 alldone = 1; 2384 + 2385 + /* Reset length in previous LSP where necessary */ 2386 + if (lsp_sgl) { 2387 + if (j % phba->border_sge_num) { 2388 + lsp_len = j * (sizeof(*sgl)); 2389 + lsp_sgl->sge_len = cpu_to_le32(lsp_len); 2390 + } 2391 + } 2403 2392 } else if (curr_prot < protcnt) { 2404 2393 /* advance to next prot buffer */ 2405 2394 sgpe = sg_next(sgpe); ··· 2419 2392 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2420 2393 "9085 BLKGRD: bug in %s\n", __func__); 2421 2394 } 2422 - 2423 2395 } while (!alldone); 2424 2396 2425 2397 out: ··· 3076 3050 struct scatterlist *sgel = NULL; 3077 3051 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 3078 3052 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 3079 - struct sli4_sge *first_data_sgl; 3080 3053 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 3081 3054 struct lpfc_vport *vport = phba->pport; 3082 3055 union lpfc_wqe128 *wqe = &pwqeq->wqe; 3083 3056 dma_addr_t physaddr; 3084 3057 uint32_t dma_len; 3085 3058 uint32_t dma_offset = 0; 3086 - int nseg, i, j; 3087 - struct ulp_bde64 *bde; 3059 + int nseg, i, j, k; 3088 3060 bool lsp_just_set = false; 3089 3061 struct sli4_hybrid_sgl *sgl_xtra = NULL; 3090 3062 ··· 3109 3085 bf_set(lpfc_sli4_sge_last, sgl, 0); 3110 3086 sgl->word2 = cpu_to_le32(sgl->word2); 3111 3087 sgl += 1; 3112 - first_data_sgl = sgl; 3113 3088 lpfc_cmd->seg_cnt = nseg; 3114 3089 if (!phba->cfg_xpsgl && 3115 3090 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { ··· 3137 3114 /* for tracking segment boundaries */ 3138 3115 sgel = scsi_sglist(scsi_cmnd); 3139 3116 j = 2; 3117 + k = 5; 3118 + if (unlikely(!phba->cfg_xpsgl)) 3119 + k = 1; 3140 3120 for (i = 0; i < nseg; i++) { 3141 3121 sgl->word2 = 0; 3142 3122 if (nseg == 1) { ··· 3150 3124 bf_set(lpfc_sli4_sge_last, sgl, 0); 3151 3125 3152 3126 /* do we need to expand the segment */ 3153 - if (!lsp_just_set && 3154 - !((j + 1) % phba->border_sge_num) && 3155 - ((nseg - 1) != i)) { 3127 + if (!lsp_just_set && (nseg != (i + k)) && 3128 + !((j + k) % phba->border_sge_num)) { 3156 3129 /* set LSP type */ 3157 3130 bf_set(lpfc_sli4_sge_type, sgl, 3158 3131 LPFC_SGE_TYPE_LSP); ··· 3175 3150 } 3176 3151 } 3177 3152 3178 - if (!(bf_get(lpfc_sli4_sge_type, sgl) & 3179 - LPFC_SGE_TYPE_LSP)) { 3153 + if (bf_get(lpfc_sli4_sge_type, sgl) != 3154 + LPFC_SGE_TYPE_LSP) { 3180 3155 if ((nseg - 1) == i) 3181 3156 bf_set(lpfc_sli4_sge_last, sgl, 1); 3182 3157 ··· 3196 3171 3197 3172 sgl++; 3198 3173 lsp_just_set = false; 3199 - 3174 + j++; 3200 3175 } else { 3201 3176 sgl->word2 = cpu_to_le32(sgl->word2); 3202 - sgl->sge_len = cpu_to_le32( 3203 - phba->cfg_sg_dma_buf_size); 3204 - 3177 + /* will remaining SGEs fill the next SGL? */ 3178 + if ((nseg - i) < phba->border_sge_num) 3179 + sgl->sge_len = 3180 + cpu_to_le32((nseg - i) * 3181 + sizeof(*sgl)); 3182 + else 3183 + sgl->sge_len = 3184 + cpu_to_le32(phba->cfg_sg_dma_buf_size); 3205 3185 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 3206 3186 i = i - 1; 3207 3187 3208 3188 lsp_just_set = true; 3189 + j += k; 3190 + k = 1; 3209 3191 } 3210 - 3211 - j++; 3212 - } 3213 - 3214 - /* PBDE support for first data SGE only. 3215 - * For FCoE, we key off Performance Hints. 3216 - * For FC, we key off lpfc_enable_pbde. 3217 - */ 3218 - if (nseg == 1 && 3219 - ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || 3220 - phba->cfg_enable_pbde)) { 3221 - /* Words 13-15 */ 3222 - bde = (struct ulp_bde64 *) 3223 - &wqe->words[13]; 3224 - bde->addrLow = first_data_sgl->addr_lo; 3225 - bde->addrHigh = first_data_sgl->addr_hi; 3226 - bde->tus.f.bdeSize = 3227 - le32_to_cpu(first_data_sgl->sge_len); 3228 - bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 3229 - bde->tus.w = cpu_to_le32(bde->tus.w); 3230 - 3231 - /* Word 11 - set PBDE bit */ 3232 - bf_set(wqe_pbde, &wqe->generic.wqe_com, 1); 3233 - } else { 3234 - memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); 3235 - /* Word 11 - PBDE bit disabled by default template */ 3236 3192 } 3237 3193 } else { 3238 3194 sgl += 1; ··· 3221 3215 sgl->word2 = le32_to_cpu(sgl->word2); 3222 3216 bf_set(lpfc_sli4_sge_last, sgl, 1); 3223 3217 sgl->word2 = cpu_to_le32(sgl->word2); 3224 - 3225 - if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || 3226 - phba->cfg_enable_pbde) { 3227 - bde = (struct ulp_bde64 *) 3228 - &wqe->words[13]; 3229 - memset(bde, 0, (sizeof(uint32_t) * 3)); 3230 - } 3231 3218 } 3232 3219 3233 3220 /* ··· 4664 4665 else 4665 4666 piocbq->iocb.ulpFCP2Rcvy = 0; 4666 4667 4667 - piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); 4668 + piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & NLP_FCP_CLASS_MASK); 4668 4669 piocbq->io_buf = lpfc_cmd; 4669 4670 if (!piocbq->cmd_cmpl) 4670 4671 piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl; ··· 4776 4777 bf_set(wqe_erp, &wqe->generic.wqe_com, 1); 4777 4778 4778 4779 bf_set(wqe_class, &wqe->generic.wqe_com, 4779 - (pnode->nlp_fcp_info & 0x0f)); 4780 + (pnode->nlp_fcp_info & NLP_FCP_CLASS_MASK)); 4780 4781 4781 4782 /* Word 8 */ 4782 4783 wqe->generic.wqe_com.abort_tag = pwqeq->iotag; ··· 4876 4877 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 4877 4878 piocb->ulpContext = ndlp->nlp_rpi; 4878 4879 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0; 4879 - piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 4880 + piocb->ulpClass = (ndlp->nlp_fcp_info & NLP_FCP_CLASS_MASK); 4880 4881 piocb->ulpPU = 0; 4881 4882 piocb->un.fcpi.fcpi_parm = 0; 4882 4883 ··· 4944 4945 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, 4945 4946 ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0)); 4946 4947 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, 4947 - (ndlp->nlp_fcp_info & 0x0f)); 4948 + (ndlp->nlp_fcp_info & NLP_FCP_CLASS_MASK)); 4948 4949 4949 4950 /* ulpTimeout is only one byte */ 4950 4951 if (lpfc_cmd->timeout > 0xff) {
+46 -66
drivers/scsi/lpfc/lpfc_sli.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * 4 + * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term * 5 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 7 * EMULEX and SLI are trademarks of Emulex. * ··· 136 136 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0); 137 137 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1); 138 138 139 - /* Word 11 - pbde is variable */ 139 + /* Word 11 */ 140 140 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN); 141 141 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 142 - bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0); 143 142 144 143 /* Word 12 - is zero */ 145 - 146 - /* Word 13, 14, 15 - PBDE is variable */ 147 144 148 145 /* IWRITE template */ 149 146 wqe = &lpfc_iwrite_cmd_template; ··· 173 176 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); 174 177 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); 175 178 176 - /* Word 11 - pbde is variable */ 179 + /* Word 11 */ 177 180 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT); 178 181 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 179 - bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); 180 182 181 183 /* Word 12 - is zero */ 182 - 183 - /* Word 13, 14, 15 - PBDE is variable */ 184 184 185 185 /* ICMND template */ 186 186 wqe = &lpfc_icmnd_cmd_template; ··· 211 217 /* Word 11 */ 212 218 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN); 213 219 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 214 - bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0); 215 220 216 221 /* Word 12, 13, 14, 15 - is zero */ 217 222 } ··· 4565 4572 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 4566 4573 { 4567 4574 LIST_HEAD(tx_completions); 4568 - LIST_HEAD(txcmplq_completions); 4575 + spinlock_t *plock; /* for transmit queue access */ 4569 4576 struct lpfc_iocbq *iocb, *next_iocb; 4570 4577 int offline; 4571 4578 4572 - if (pring->ringno == LPFC_ELS_RING) { 4579 + if (phba->sli_rev >= LPFC_SLI_REV4) 4580 + plock = &pring->ring_lock; 4581 + else 4582 + plock = &phba->hbalock; 4583 + 4584 + if (pring->ringno == LPFC_ELS_RING) 4573 4585 lpfc_fabric_abort_hba(phba); 4574 - } 4586 + 4575 4587 offline = pci_channel_offline(phba->pcidev); 4576 4588 4577 - /* Error everything on txq and txcmplq 4578 - * First do the txq. 4579 - */ 4580 - if (phba->sli_rev >= LPFC_SLI_REV4) { 4581 - spin_lock_irq(&pring->ring_lock); 4582 - list_splice_init(&pring->txq, &tx_completions); 4583 - pring->txq_cnt = 0; 4584 - 4585 - if (offline) { 4586 - list_splice_init(&pring->txcmplq, 4587 - &txcmplq_completions); 4588 - } else { 4589 - /* Next issue ABTS for everything on the txcmplq */ 4590 - list_for_each_entry_safe(iocb, next_iocb, 4591 - &pring->txcmplq, list) 4592 - lpfc_sli_issue_abort_iotag(phba, pring, 4593 - iocb, NULL); 4594 - } 4595 - spin_unlock_irq(&pring->ring_lock); 4596 - } else { 4597 - spin_lock_irq(&phba->hbalock); 4598 - list_splice_init(&pring->txq, &tx_completions); 4599 - pring->txq_cnt = 0; 4600 - 4601 - if (offline) { 4602 - list_splice_init(&pring->txcmplq, &txcmplq_completions); 4603 - } else { 4604 - /* Next issue ABTS for everything on the txcmplq */ 4605 - list_for_each_entry_safe(iocb, next_iocb, 4606 - &pring->txcmplq, list) 4607 - lpfc_sli_issue_abort_iotag(phba, pring, 4608 - iocb, NULL); 4609 - } 4610 - spin_unlock_irq(&phba->hbalock); 4611 - } 4589 + /* Cancel everything on txq */ 4590 + spin_lock_irq(plock); 4591 + list_splice_init(&pring->txq, &tx_completions); 4592 + pring->txq_cnt = 0; 4612 4593 4613 4594 if (offline) { 4614 - /* Cancel all the IOCBs from the completions list */ 4615 - lpfc_sli_cancel_iocbs(phba, &txcmplq_completions, 4616 - IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 4595 + /* Cancel everything on txcmplq */ 4596 + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 4597 + iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; 4598 + list_splice_init(&pring->txcmplq, &tx_completions); 4599 + pring->txcmplq_cnt = 0; 4617 4600 } else { 4618 - /* Make sure HBA is alive */ 4619 - lpfc_issue_hb_tmo(phba); 4601 + /* Issue ABTS for everything on the txcmplq */ 4602 + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 4603 + lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); 4620 4604 } 4605 + spin_unlock_irq(plock); 4606 + 4607 + if (!offline) 4608 + lpfc_issue_hb_tmo(phba); 4609 + 4621 4610 /* Cancel all the IOCBs from the completions list */ 4622 4611 lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT, 4623 4612 IOERR_SLI_ABORTED); ··· 8723 8748 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8724 8749 "0378 No support for fcpi mode.\n"); 8725 8750 ftr_rsp++; 8726 - } 8727 - 8728 - /* Performance Hints are ONLY for FCoE */ 8729 - if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 8730 - if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 8731 - phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 8732 - else 8733 - phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; 8734 8751 } 8735 8752 8736 8753 /* ··· 14322 14355 /* Get the reference to the active mbox command */ 14323 14356 spin_lock_irqsave(&phba->hbalock, iflags); 14324 14357 pmb = phba->sli.mbox_active; 14358 + spin_unlock_irqrestore(&phba->hbalock, iflags); 14325 14359 if (unlikely(!pmb)) { 14326 14360 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14327 - "1832 No pending MBOX command to handle\n"); 14328 - spin_unlock_irqrestore(&phba->hbalock, iflags); 14361 + "1832 No pending MBOX command to handle, " 14362 + "mcqe: x%08x x%08x x%08x x%08x\n", 14363 + mcqe->word0, mcqe->mcqe_tag0, 14364 + mcqe->mcqe_tag1, mcqe->trailer); 14329 14365 goto out_no_mqe_complete; 14330 14366 } 14331 - spin_unlock_irqrestore(&phba->hbalock, iflags); 14332 14367 mqe = &pmb->u.mqe; 14333 14368 pmbox = (MAILBOX_t *)&pmb->u.mqe; 14334 14369 mbox = phba->mbox; ··· 14705 14736 atomic_read(&tgtp->rcv_fcp_cmd_out), 14706 14737 atomic_read(&tgtp->xmt_fcp_release)); 14707 14738 } 14739 + hrq->RQ_discard_frm++; 14708 14740 fallthrough; 14709 - 14710 14741 case FC_STATUS_INSUFF_BUF_NEED_BUF: 14742 + /* Unexpected event - bump the counter for support. */ 14711 14743 hrq->RQ_no_posted_buf++; 14712 - /* Post more buffers if possible */ 14744 + 14745 + lpfc_log_msg(phba, KERN_WARNING, 14746 + LOG_ELS | LOG_DISCOVERY | LOG_SLI, 14747 + "6423 RQE completion Status x%x, needed x%x " 14748 + "discarded x%x\n", status, 14749 + hrq->RQ_no_posted_buf - hrq->RQ_discard_frm, 14750 + hrq->RQ_discard_frm); 14751 + 14752 + /* For SLI3, post more buffers if possible. No action for SLI4. 14753 + * SLI4 is reposting immediately after processing the RQE. 14754 + */ 14713 14755 set_bit(HBA_POST_RECEIVE_BUFFER, &phba->hba_flag); 14714 14756 workposted = true; 14715 14757 break;
+5 -1
drivers/scsi/lpfc/lpfc_sli4.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * 4 + * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term * 5 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 6 * Copyright (C) 2009-2016 Emulex. All rights reserved. * 7 7 * EMULEX and SLI are trademarks of Emulex. * ··· 246 246 uint32_t q_cnt_2; 247 247 uint32_t q_cnt_3; 248 248 uint64_t q_cnt_4; 249 + uint32_t q_cnt_5; 250 + 249 251 /* defines for EQ stats */ 250 252 #define EQ_max_eqe q_cnt_1 251 253 #define EQ_no_entry q_cnt_2 ··· 270 268 #define RQ_no_buf_found q_cnt_2 271 269 #define RQ_buf_posted q_cnt_3 272 270 #define RQ_rcv_buf q_cnt_4 271 + #define RQ_discard_frm q_cnt_5 273 272 274 273 struct work_struct irqwork; 275 274 struct work_struct spwork; ··· 841 838 uint32_t ue_to_sr; 842 839 uint32_t ue_to_rp; 843 840 struct lpfc_register sli_intf; 841 + struct lpfc_register asic_id; 844 842 struct lpfc_pc_sli4_params pc_sli4_params; 845 843 struct lpfc_bbscn_params bbscn_params; 846 844 struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */
+3 -3
drivers/scsi/lpfc/lpfc_version.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * 4 + * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term * 5 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 7 * EMULEX and SLI are trademarks of Emulex. * ··· 20 20 * included with this package. * 21 21 *******************************************************************/ 22 22 23 - #define LPFC_DRIVER_VERSION "14.4.0.13" 23 + #define LPFC_DRIVER_VERSION "15.0.0.0" 24 24 #define LPFC_DRIVER_NAME "lpfc" 25 25 26 26 /* Used for SLI 2/3 */ ··· 32 32 33 33 #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ 34 34 LPFC_DRIVER_VERSION 35 - #define LPFC_COPYRIGHT "Copyright (C) 2017-2025 Broadcom. All Rights " \ 35 + #define LPFC_COPYRIGHT "Copyright (C) 2017-2026 Broadcom. All Rights " \ 36 36 "Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \ 37 37 "and/or its subsidiaries."
+9 -6
drivers/scsi/megaraid/megaraid_sas_base.c
··· 6365 6365 6366 6366 megasas_setup_jbod_map(instance); 6367 6367 6368 - if (megasas_get_device_list(instance) != SUCCESS) { 6369 - dev_err(&instance->pdev->dev, 6370 - "%s: megasas_get_device_list failed\n", 6371 - __func__); 6372 - goto fail_get_ld_pd_list; 6368 + scoped_guard(mutex, &instance->reset_mutex) { 6369 + if (megasas_get_device_list(instance) != SUCCESS) { 6370 + dev_err(&instance->pdev->dev, 6371 + "%s: megasas_get_device_list failed\n", 6372 + __func__); 6373 + goto fail_get_ld_pd_list; 6374 + } 6373 6375 } 6374 6376 6375 6377 /* stream detection initialization */ ··· 6470 6468 } 6471 6469 6472 6470 if (instance->snapdump_wait_time) { 6473 - megasas_get_snapdump_properties(instance); 6471 + scoped_guard(mutex, &instance->reset_mutex) 6472 + megasas_get_snapdump_properties(instance); 6474 6473 dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n", 6475 6474 instance->snapdump_wait_time); 6476 6475 }
+16
drivers/scsi/mpi3mr/mpi3mr.h
··· 159 159 /* Controller Reset related definitions */ 160 160 #define MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT 5 161 161 #define MPI3MR_MAX_RESET_RETRY_COUNT 3 162 + #define MPI3MR_MAX_SHUTDOWN_RETRY_COUNT 2 162 163 163 164 /* ResponseCode definitions */ 164 165 #define MPI3MR_RI_MASK_RESPCODE (0x000000FF) ··· 324 323 MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT = 29, 325 324 MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT = 30, 326 325 MPI3MR_RESET_FROM_TRIGGER = 31, 326 + MPI3MR_RESET_FROM_INVALID_COMPLETION = 32, 327 327 }; 328 328 329 329 #define MPI3MR_RESET_REASON_OSTYPE_LINUX 1 ··· 430 428 * @q_segments: Segment descriptor pointer 431 429 * @q_segment_list: Segment list base virtual address 432 430 * @q_segment_list_dma: Segment list base DMA address 431 + * @last_full_host_tag: Hosttag of last IO returned to SML 432 + * due to queue full 433 + * @qfull_io_count: Number of IOs returned back to SML 434 + * due to queue full 435 + * @qfull_instances: Total queue full occurrences.One occurrence 436 + * starts with queue full detection and ends 437 + * with queue full breaks. 438 + * 433 439 */ 434 440 struct op_req_qinfo { 435 441 u16 ci; ··· 451 441 struct segments *q_segments; 452 442 void *q_segment_list; 453 443 dma_addr_t q_segment_list_dma; 444 + u16 last_full_host_tag; 445 + u64 qfull_io_count; 446 + u32 qfull_instances; 447 + 454 448 }; 455 449 456 450 /** ··· 1197 1183 * @num_tb_segs: Number of Segments in Trace buffer 1198 1184 * @trace_buf_pool: DMA pool for Segmented trace buffer segments 1199 1185 * @trace_buf: Trace buffer segments memory descriptor 1186 + * @invalid_io_comp: Invalid IO completion 1200 1187 */ 1201 1188 struct mpi3mr_ioc { 1202 1189 struct list_head list; ··· 1409 1394 u32 num_tb_segs; 1410 1395 struct dma_pool *trace_buf_pool; 1411 1396 struct segments *trace_buf; 1397 + u8 invalid_io_comp; 1412 1398 1413 1399 }; 1414 1400
+50 -7
drivers/scsi/mpi3mr/mpi3mr_fw.c
··· 996 996 { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" }, 997 997 { MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"}, 998 998 { MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" }, 999 + { MPI3MR_RESET_FROM_INVALID_COMPLETION, "invalid cmd completion" }, 999 1000 }; 1000 1001 1001 1002 /** ··· 2372 2371 op_req_q->ci = 0; 2373 2372 op_req_q->pi = 0; 2374 2373 op_req_q->reply_qid = reply_qid; 2374 + op_req_q->last_full_host_tag = MPI3MR_HOSTTAG_INVALID; 2375 + op_req_q->qfull_io_count = 0; 2376 + op_req_q->qfull_instances = 0; 2375 2377 spin_lock_init(&op_req_q->q_lock); 2376 2378 2377 2379 if (!op_req_q->q_segments) { ··· 2561 2557 u16 req_sz = mrioc->facts.op_req_sz; 2562 2558 struct segments *segments = op_req_q->q_segments; 2563 2559 struct op_reply_qinfo *op_reply_q = NULL; 2560 + struct mpi3_scsi_io_request *scsiio_req = 2561 + (struct mpi3_scsi_io_request *)req; 2564 2562 2565 2563 reply_qidx = op_req_q->reply_qid - 1; 2566 2564 op_reply_q = mrioc->op_reply_qinfo + reply_qidx; ··· 2580 2574 mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q); 2581 2575 2582 2576 if (mpi3mr_check_req_qfull(op_req_q)) { 2577 + 2578 + if (op_req_q->last_full_host_tag == 2579 + MPI3MR_HOSTTAG_INVALID) 2580 + op_req_q->qfull_instances++; 2581 + 2582 + op_req_q->last_full_host_tag = scsiio_req->host_tag; 2583 + op_req_q->qfull_io_count++; 2583 2584 retval = -EAGAIN; 2584 2585 goto out; 2585 2586 } 2586 2587 } 2588 + 2589 + if (op_req_q->last_full_host_tag != MPI3MR_HOSTTAG_INVALID) 2590 + op_req_q->last_full_host_tag = MPI3MR_HOSTTAG_INVALID; 2587 2591 2588 2592 if (mrioc->reset_in_progress) { 2589 2593 ioc_err(mrioc, "OpReqQ submit reset in progress\n"); ··· 2715 2699 * mpi3mr_sync_timestamp - Issue time stamp sync request 2716 2700 * @mrioc: Adapter reference 2717 2701 * 2718 - * Issue IO unit control MPI request to synchornize firmware 2702 + * Issue IO unit control MPI request to synchronize firmware 2719 2703 * timestamp with host time. 2720 2704 * 2721 2705 * Return: 0 on success, non-zero on failure. ··· 2902 2886 ioc_err(mrioc, 2903 2887 "flush pending commands for unrecoverable controller\n"); 2904 2888 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 2889 + return; 2890 + } 2891 + 2892 + if (mrioc->invalid_io_comp) { 2893 + mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_INVALID_COMPLETION, 1); 2905 2894 return; 2906 2895 } 2907 2896 ··· 4855 4834 mrioc->req_qinfo[i].qid = 0; 4856 4835 mrioc->req_qinfo[i].reply_qid = 0; 4857 4836 spin_lock_init(&mrioc->req_qinfo[i].q_lock); 4837 + mrioc->req_qinfo[i].last_full_host_tag = 0; 4858 4838 mpi3mr_memset_op_req_q_buffers(mrioc, i); 4859 4839 } 4860 4840 } ··· 5072 5050 */ 5073 5051 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc) 5074 5052 { 5075 - u32 ioc_config, ioc_status; 5076 - u8 retval = 1; 5077 - u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10; 5053 + u32 ioc_config, ioc_status, shutdown_action; 5054 + u8 retval = 1, retry = 0; 5055 + u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10, timeout_remaining = 0; 5078 5056 5079 5057 ioc_info(mrioc, "Issuing shutdown Notification\n"); 5080 5058 if (mrioc->unrecoverable) { ··· 5089 5067 return; 5090 5068 } 5091 5069 5070 + shutdown_action = MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL | 5071 + MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ; 5092 5072 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 5093 - ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL; 5094 - ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ; 5073 + ioc_config |= shutdown_action; 5095 5074 5096 5075 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 5097 5076 5098 5077 if (mrioc->facts.shutdown_timeout) 5099 5078 timeout = mrioc->facts.shutdown_timeout * 10; 5079 + timeout_remaining = timeout; 5100 5080 5101 5081 do { 5102 5082 ioc_status = readl(&mrioc->sysif_regs->ioc_status); ··· 5107 5083 retval = 0; 5108 5084 break; 5109 5085 } 5086 + if (mrioc->unrecoverable) 5087 + break; 5088 + if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { 5089 + mpi3mr_print_fault_info(mrioc); 5090 + if (retry >= MPI3MR_MAX_SHUTDOWN_RETRY_COUNT) 5091 + break; 5092 + if (mpi3mr_issue_reset(mrioc, 5093 + MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 5094 + MPI3MR_RESET_FROM_CTLR_CLEANUP)) 5095 + break; 5096 + ioc_config = 5097 + readl(&mrioc->sysif_regs->ioc_configuration); 5098 + ioc_config |= shutdown_action; 5099 + writel(ioc_config, 5100 + &mrioc->sysif_regs->ioc_configuration); 5101 + timeout_remaining = timeout; 5102 + retry++; 5103 + } 5110 5104 msleep(100); 5111 - } while (--timeout); 5105 + } while (--timeout_remaining); 5112 5106 5113 5107 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 5114 5108 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); ··· 5700 5658 ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME); 5701 5659 5702 5660 out: 5661 + mrioc->invalid_io_comp = 0; 5703 5662 if (!retval) { 5704 5663 mrioc->diagsave_timeout = 0; 5705 5664 mrioc->reset_in_progress = 0;
+9 -2
drivers/scsi/mpi3mr/mpi3mr_os.c
··· 3459 3459 } 3460 3460 scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx); 3461 3461 if (!scmd) { 3462 - panic("%s: Cannot Identify scmd for host_tag 0x%x\n", 3463 - mrioc->name, host_tag); 3462 + ioc_err(mrioc, "Cannot Identify scmd for host_tag 0x%x", host_tag); 3463 + ioc_err(mrioc, 3464 + "reply_desc_type(%d) host_tag(%d(0x%04x)): qid(%d): command issued to\n" 3465 + "handle(0x%04x) returned with ioc_status(0x%04x), log_info(0x%08x),\n" 3466 + "scsi_state(0x%02x), scsi_status(0x%02x), xfer_count(%d), resp_data(0x%08x)\n", 3467 + reply_desc_type, host_tag, host_tag, qidx+1, dev_handle, ioc_status, 3468 + ioc_loginfo, scsi_state, scsi_status, xfer_count, 3469 + resp_data); 3470 + mrioc->invalid_io_comp = 1; 3464 3471 goto out; 3465 3472 } 3466 3473 priv = scsi_cmd_priv(scmd);
+61 -1
drivers/scsi/qla2xxx/qla_attr.c
··· 1638 1638 { 1639 1639 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1640 1640 int rval = QLA_FUNCTION_FAILED; 1641 - uint16_t state[6]; 1641 + uint16_t state[16]; 1642 1642 uint32_t pstate; 1643 1643 1644 1644 if (IS_QLAFX00(vha->hw)) { ··· 2402 2402 vha->dport_data[0], vha->dport_data[1], 2403 2403 vha->dport_data[2], vha->dport_data[3]); 2404 2404 } 2405 + 2406 + static ssize_t 2407 + qla2x00_mpi_fw_state_show(struct device *dev, struct device_attribute *attr, 2408 + char *buf) 2409 + { 2410 + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2411 + int rval = QLA_FUNCTION_FAILED; 2412 + u16 state[16]; 2413 + u16 mpi_state; 2414 + struct qla_hw_data *ha = vha->hw; 2415 + 2416 + if (!(IS_QLA27XX(ha) || IS_QLA28XX(ha))) 2417 + return scnprintf(buf, PAGE_SIZE, 2418 + "MPI state reporting is not supported for this HBA.\n"); 2419 + 2420 + memset(state, 0, sizeof(state)); 2421 + 2422 + mutex_lock(&vha->hw->optrom_mutex); 2423 + if (qla2x00_chip_is_down(vha)) { 2424 + mutex_unlock(&vha->hw->optrom_mutex); 2425 + ql_dbg(ql_dbg_user, vha, 0x70df, 2426 + "ISP reset is in progress, failing mpi_fw_state.\n"); 2427 + return -EBUSY; 2428 + } else if (vha->hw->flags.eeh_busy) { 2429 + mutex_unlock(&vha->hw->optrom_mutex); 2430 + ql_dbg(ql_dbg_user, vha, 0x70ea, 2431 + "HBA in PCI error state, failing mpi_fw_state.\n"); 2432 + return -EBUSY; 2433 + } 2434 + 2435 + rval = qla2x00_get_firmware_state(vha, state); 2436 + mutex_unlock(&vha->hw->optrom_mutex); 2437 + if (rval != QLA_SUCCESS) { 2438 + ql_dbg(ql_dbg_user, vha, 0x70eb, 2439 + "MB Command to retrieve MPI state failed (%d), failing mpi_fw_state.\n", 2440 + rval); 2441 + return -EIO; 2442 + } 2443 + 2444 + mpi_state = state[11]; 2445 + 2446 + if (!(mpi_state & BIT_15)) 2447 + return scnprintf(buf, PAGE_SIZE, 2448 + "MPI firmware state reporting is not supported by this firmware. (0x%02x)\n", 2449 + mpi_state); 2450 + 2451 + if (!(mpi_state & BIT_8)) 2452 + return scnprintf(buf, PAGE_SIZE, 2453 + "MPI firmware is disabled. (0x%02x)\n", 2454 + mpi_state); 2455 + 2456 + return scnprintf(buf, PAGE_SIZE, 2457 + "MPI firmware is enabled, state is %s. (0x%02x)\n", 2458 + mpi_state & BIT_9 ? "active" : "inactive", 2459 + mpi_state); 2460 + } 2461 + 2405 2462 static DEVICE_ATTR(dport_diagnostics, 0444, 2406 2463 qla2x00_dport_diagnostics_show, NULL); 2407 2464 ··· 2526 2469 qla2x00_port_speed_store); 2527 2470 static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL); 2528 2471 static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL); 2472 + static DEVICE_ATTR(mpi_fw_state, 0444, qla2x00_mpi_fw_state_show, NULL); 2473 + 2529 2474 2530 2475 static struct attribute *qla2x00_host_attrs[] = { 2531 2476 &dev_attr_driver_version.attr.attr, ··· 2576 2517 &dev_attr_qlini_mode.attr, 2577 2518 &dev_attr_ql2xiniexchg.attr, 2578 2519 &dev_attr_ql2xexchoffld.attr, 2520 + &dev_attr_mpi_fw_state.attr, 2579 2521 NULL, 2580 2522 }; 2581 2523
+1 -1
drivers/scsi/qla2xxx/qla_init.c
··· 4914 4914 unsigned long wtime, mtime, cs84xx_time; 4915 4915 uint16_t min_wait; /* Minimum wait time if loop is down */ 4916 4916 uint16_t wait_time; /* Wait time if loop is coming ready */ 4917 - uint16_t state[6]; 4917 + uint16_t state[16]; 4918 4918 struct qla_hw_data *ha = vha->hw; 4919 4919 4920 4920 if (IS_QLAFX00(vha->hw))
+1 -1
drivers/scsi/qla2xxx/qla_inline.h
··· 621 621 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 622 622 623 623 if (!ha->qp_cpu_map) { 624 - ha->qp_cpu_map = kzalloc_objs(struct qla_qpair *, NR_CPUS); 624 + ha->qp_cpu_map = kzalloc_objs(struct qla_qpair *, nr_cpu_ids); 625 625 if (!ha->qp_cpu_map) { 626 626 ql_log(ql_log_fatal, vha, 0x0180, 627 627 "Unable to allocate memory for qp_cpu_map ptrs.\n");
+9
drivers/scsi/qla2xxx/qla_mbx.c
··· 2268 2268 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 2269 2269 else 2270 2270 mcp->in_mb = MBX_1|MBX_0; 2271 + 2272 + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 2273 + mcp->mb[12] = 0; 2274 + mcp->out_mb |= MBX_12; 2275 + mcp->in_mb |= MBX_12; 2276 + } 2277 + 2271 2278 mcp->tov = MBX_TOV_SECONDS; 2272 2279 mcp->flags = 0; 2273 2280 rval = qla2x00_mailbox_command(vha, mcp); ··· 2287 2280 states[3] = mcp->mb[4]; 2288 2281 states[4] = mcp->mb[5]; 2289 2282 states[5] = mcp->mb[6]; /* DPORT status */ 2283 + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) 2284 + states[11] = mcp->mb[12]; /* MPI state. */ 2290 2285 } 2291 2286 2292 2287 if (rval != QLA_SUCCESS) {
+2
drivers/scsi/qla2xxx/tcm_qla2xxx.c
··· 1841 1841 .tfc_tpg_base_attrs = tcm_qla2xxx_tpg_attrs, 1842 1842 .tfc_tpg_attrib_attrs = tcm_qla2xxx_tpg_attrib_attrs, 1843 1843 1844 + .default_compl_type = TARGET_QUEUE_COMPL, 1844 1845 .default_submit_type = TARGET_DIRECT_SUBMIT, 1845 1846 .direct_submit_supp = 1, 1846 1847 }; ··· 1882 1881 1883 1882 .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, 1884 1883 1884 + .default_compl_type = TARGET_QUEUE_COMPL, 1885 1885 .default_submit_type = TARGET_DIRECT_SUBMIT, 1886 1886 .direct_submit_supp = 1, 1887 1887 };
+47
drivers/scsi/scsi_lib.c
··· 13 13 #include <linux/bitops.h> 14 14 #include <linux/blkdev.h> 15 15 #include <linux/completion.h> 16 + #include <linux/ctype.h> 16 17 #include <linux/kernel.h> 17 18 #include <linux/export.h> 18 19 #include <linux/init.h> ··· 3460 3459 return id_size; 3461 3460 } 3462 3461 EXPORT_SYMBOL(scsi_vpd_lun_id); 3462 + 3463 + /** 3464 + * scsi_vpd_lun_serial - return a unique device serial number 3465 + * @sdev: SCSI device 3466 + * @sn: buffer for the serial number 3467 + * @sn_size: size of the buffer 3468 + * 3469 + * Copies the device serial number into @sn based on the information in 3470 + * the VPD page 0x80 of the device. The string will be null terminated 3471 + * and have leading and trailing whitespace stripped. 3472 + * 3473 + * Returns the length of the serial number or error on failure. 3474 + */ 3475 + int scsi_vpd_lun_serial(struct scsi_device *sdev, char *sn, size_t sn_size) 3476 + { 3477 + const struct scsi_vpd *vpd_pg80; 3478 + const unsigned char *d; 3479 + int len; 3480 + 3481 + guard(rcu)(); 3482 + vpd_pg80 = rcu_dereference(sdev->vpd_pg80); 3483 + if (!vpd_pg80) 3484 + return -ENXIO; 3485 + 3486 + len = vpd_pg80->len - 4; 3487 + d = vpd_pg80->data + 4; 3488 + 3489 + /* Skip leading spaces */ 3490 + while (len > 0 && isspace(*d)) { 3491 + len--; 3492 + d++; 3493 + } 3494 + 3495 + /* Skip trailing spaces */ 3496 + while (len > 0 && isspace(d[len - 1])) 3497 + len--; 3498 + 3499 + if (sn_size < len + 1) 3500 + return -EINVAL; 3501 + 3502 + memcpy(sn, d, len); 3503 + sn[len] = '\0'; 3504 + 3505 + return len; 3506 + } 3507 + EXPORT_SYMBOL(scsi_vpd_lun_serial); 3463 3508 3464 3509 /** 3465 3510 * scsi_vpd_tpg_id - return a target port group identifier
+2 -8
drivers/scsi/scsi_scan.c
··· 1940 1940 static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost) 1941 1941 { 1942 1942 struct async_scan_data *data = NULL; 1943 - unsigned long flags; 1944 1943 1945 1944 if (strncmp(scsi_scan_type, "sync", 4) == 0) 1946 1945 return NULL; ··· 1958 1959 goto err; 1959 1960 init_completion(&data->prev_finished); 1960 1961 1961 - spin_lock_irqsave(shost->host_lock, flags); 1962 - shost->async_scan = 1; 1963 - spin_unlock_irqrestore(shost->host_lock, flags); 1962 + shost->async_scan = true; 1964 1963 mutex_unlock(&shost->scan_mutex); 1965 1964 1966 1965 spin_lock(&async_scan_lock); ··· 1986 1989 static void scsi_finish_async_scan(struct async_scan_data *data) 1987 1990 { 1988 1991 struct Scsi_Host *shost; 1989 - unsigned long flags; 1990 1992 1991 1993 if (!data) 1992 1994 return; ··· 2005 2009 2006 2010 scsi_sysfs_add_devices(shost); 2007 2011 2008 - spin_lock_irqsave(shost->host_lock, flags); 2009 - shost->async_scan = 0; 2010 - spin_unlock_irqrestore(shost->host_lock, flags); 2012 + shost->async_scan = false; 2011 2013 2012 2014 mutex_unlock(&shost->scan_mutex); 2013 2015
+16
drivers/scsi/scsi_sysfs.c
··· 1051 1051 } 1052 1052 static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL); 1053 1053 1054 + static ssize_t 1055 + sdev_show_serial(struct device *dev, struct device_attribute *attr, char *buf) 1056 + { 1057 + struct scsi_device *sdev = to_scsi_device(dev); 1058 + ssize_t ret; 1059 + 1060 + ret = scsi_vpd_lun_serial(sdev, buf, PAGE_SIZE - 1); 1061 + if (ret < 0) 1062 + return ret; 1063 + 1064 + buf[ret] = '\n'; 1065 + return ret + 1; 1066 + } 1067 + static DEVICE_ATTR(serial, S_IRUGO, sdev_show_serial, NULL); 1068 + 1054 1069 #define BLIST_FLAG_NAME(name) \ 1055 1070 [const_ilog2((__force __u64)BLIST_##name)] = #name 1056 1071 static const char *const sdev_bflags_name[] = { ··· 1310 1295 &dev_attr_device_busy.attr, 1311 1296 &dev_attr_vendor.attr, 1312 1297 &dev_attr_model.attr, 1298 + &dev_attr_serial.attr, 1313 1299 &dev_attr_rev.attr, 1314 1300 &dev_attr_rescan.attr, 1315 1301 &dev_attr_delete.attr,
+69 -12
drivers/scsi/sd.c
··· 107 107 static void sd_revalidate_disk(struct gendisk *); 108 108 109 109 static DEFINE_IDA(sd_index_ida); 110 + static DEFINE_MUTEX(sd_mutex_lock); 110 111 111 112 static mempool_t *sd_page_pool; 113 + static mempool_t *sd_large_page_pool; 114 + static atomic_t sd_large_page_pool_users = ATOMIC_INIT(0); 112 115 static struct lock_class_key sd_bio_compl_lkclass; 113 116 114 117 static const char *sd_cache_types[] = { 115 118 "write through", "none", "write back", 116 119 "write back, no read (daft)" 117 120 }; 121 + 122 + static int sd_large_pool_create(void) 123 + { 124 + mutex_lock(&sd_mutex_lock); 125 + if (!sd_large_page_pool) { 126 + sd_large_page_pool = mempool_create_page_pool( 127 + SD_MEMPOOL_SIZE, get_order(BLK_MAX_BLOCK_SIZE)); 128 + if (!sd_large_page_pool) { 129 + printk(KERN_ERR "sd: can't create large page mempool\n"); 130 + mutex_unlock(&sd_mutex_lock); 131 + return -ENOMEM; 132 + } 133 + } 134 + atomic_inc(&sd_large_page_pool_users); 135 + mutex_unlock(&sd_mutex_lock); 136 + return 0; 137 + } 138 + 139 + static void sd_large_pool_destroy(void) 140 + { 141 + mutex_lock(&sd_mutex_lock); 142 + if (atomic_dec_and_test(&sd_large_page_pool_users)) { 143 + mempool_destroy(sd_large_page_pool); 144 + sd_large_page_pool = NULL; 145 + } 146 + mutex_unlock(&sd_mutex_lock); 147 + } 118 148 119 149 static void sd_disable_discard(struct scsi_disk *sdkp) 120 150 { ··· 958 928 return protect; 959 929 } 960 930 961 - static void *sd_set_special_bvec(struct request *rq, unsigned int data_len) 931 + static void *sd_set_special_bvec(struct scsi_cmnd *cmd, unsigned int data_len) 962 932 { 963 933 struct page *page; 934 + struct request *rq = scsi_cmd_to_rq(cmd); 935 + struct scsi_device *sdp = cmd->device; 936 + unsigned sector_size = sdp->sector_size; 937 + unsigned int nr_pages = DIV_ROUND_UP(sector_size, PAGE_SIZE); 938 + int n; 964 939 965 - page = mempool_alloc(sd_page_pool, GFP_ATOMIC); 940 + if (sector_size > PAGE_SIZE) 941 + page = mempool_alloc(sd_large_page_pool, GFP_ATOMIC); 942 + else 943 + page = mempool_alloc(sd_page_pool, GFP_ATOMIC); 966 944 if (!page) 967 945 return NULL; 968 - clear_highpage(page); 946 + 947 + for (n = 0; n < nr_pages; n++) 948 + clear_highpage(page + n); 969 949 bvec_set_page(&rq->special_vec, page, data_len, 0); 970 950 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 971 951 return bvec_virt(&rq->special_vec); ··· 991 951 unsigned int data_len = 24; 992 952 char *buf; 993 953 994 - buf = sd_set_special_bvec(rq, data_len); 954 + buf = sd_set_special_bvec(cmd, data_len); 995 955 if (!buf) 996 956 return BLK_STS_RESOURCE; 997 957 ··· 1080 1040 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 1081 1041 u32 data_len = sdp->sector_size; 1082 1042 1083 - if (!sd_set_special_bvec(rq, data_len)) 1043 + if (!sd_set_special_bvec(cmd, data_len)) 1084 1044 return BLK_STS_RESOURCE; 1085 1045 1086 1046 cmd->cmd_len = 16; ··· 1107 1067 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 1108 1068 u32 data_len = sdp->sector_size; 1109 1069 1110 - if (!sd_set_special_bvec(rq, data_len)) 1070 + if (!sd_set_special_bvec(cmd, data_len)) 1111 1071 return BLK_STS_RESOURCE; 1112 1072 1113 1073 cmd->cmd_len = 10; ··· 1553 1513 static void sd_uninit_command(struct scsi_cmnd *SCpnt) 1554 1514 { 1555 1515 struct request *rq = scsi_cmd_to_rq(SCpnt); 1516 + struct scsi_device *sdp = SCpnt->device; 1517 + unsigned sector_size = sdp->sector_size; 1556 1518 1557 - if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1558 - mempool_free(rq->special_vec.bv_page, sd_page_pool); 1519 + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) { 1520 + if (sector_size > PAGE_SIZE) 1521 + mempool_free(rq->special_vec.bv_page, sd_large_page_pool); 1522 + else 1523 + mempool_free(rq->special_vec.bv_page, sd_page_pool); 1524 + } 1559 1525 } 1560 1526 1561 1527 static bool sd_need_revalidate(struct gendisk *disk, struct scsi_disk *sdkp) ··· 2958 2912 "Sector size 0 reported, assuming 512.\n"); 2959 2913 } 2960 2914 2961 - if (sector_size != 512 && 2962 - sector_size != 1024 && 2963 - sector_size != 2048 && 2964 - sector_size != 4096) { 2915 + if (blk_validate_block_size(sector_size)) { 2965 2916 sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n", 2966 2917 sector_size); 2967 2918 /* ··· 4061 4018 error = device_add(&sdkp->disk_dev); 4062 4019 if (error) { 4063 4020 put_device(&sdkp->disk_dev); 4021 + put_disk(gd); 4064 4022 goto out; 4065 4023 } 4066 4024 ··· 4087 4043 sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS; 4088 4044 4089 4045 sd_revalidate_disk(gd); 4046 + if (sdp->sector_size > PAGE_SIZE) { 4047 + if (sd_large_pool_create()) { 4048 + error = -ENOMEM; 4049 + goto out_free_index; 4050 + } 4051 + } 4090 4052 4091 4053 if (sdp->removable) { 4092 4054 gd->flags |= GENHD_FL_REMOVABLE; ··· 4110 4060 if (error) { 4111 4061 device_unregister(&sdkp->disk_dev); 4112 4062 put_disk(gd); 4063 + if (sdp->sector_size > PAGE_SIZE) 4064 + sd_large_pool_destroy(); 4113 4065 goto out; 4114 4066 } 4115 4067 ··· 4264 4212 sd_shutdown(sdp); 4265 4213 4266 4214 put_disk(sdkp->disk); 4215 + 4216 + if (sdp->sector_size > PAGE_SIZE) 4217 + sd_large_pool_destroy(); 4267 4218 } 4268 4219 4269 4220 static inline bool sd_do_start_stop(struct scsi_device *sdev, bool runtime) ··· 4490 4435 4491 4436 scsi_unregister_driver(&sd_template); 4492 4437 mempool_destroy(sd_page_pool); 4438 + if (sd_large_page_pool) 4439 + mempool_destroy(sd_large_page_pool); 4493 4440 4494 4441 class_unregister(&sd_disk_class); 4495 4442
+38 -50
drivers/scsi/sg.c
··· 81 81 82 82 #define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ) 83 83 84 - static int sg_big_buff = SG_DEF_RESERVED_SIZE; 85 84 /* N.B. This variable is readable and writeable via 86 - /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer 87 - of this size (or less if there is not enough memory) will be reserved 88 - for use by this file descriptor. [Deprecated usage: this variable is also 89 - readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into 90 - the kernel (i.e. it is not a module).] */ 91 - static int def_reserved_size = -1; /* picks up init parameter */ 85 + * /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer 86 + * of this size (or less if there is not enough memory) will be reserved 87 + * for use by this file descriptor. 88 + */ 89 + 90 + /* picks up init parameter */ 91 + static int def_reserved_size = SG_DEF_RESERVED_SIZE; 92 92 static int sg_allow_dio = SG_ALLOW_DIO_DEF; 93 93 94 94 static int scatter_elem_sz = SG_SCATTER_SZ; ··· 1623 1623 } 1624 1624 1625 1625 module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR); 1626 - module_param_named(def_reserved_size, def_reserved_size, int, 1627 - S_IRUGO | S_IWUSR); 1628 1626 module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR); 1627 + 1628 + static int def_reserved_size_set(const char *val, const struct kernel_param *kp) 1629 + { 1630 + int size, ret; 1631 + 1632 + if (!val) 1633 + return -EINVAL; 1634 + 1635 + ret = kstrtoint(val, 0, &size); 1636 + if (ret) 1637 + return ret; 1638 + 1639 + /* limit to 1 MB */ 1640 + if (size < 0 || size > 1048576) 1641 + return -ERANGE; 1642 + 1643 + def_reserved_size = size; 1644 + return 0; 1645 + } 1646 + 1647 + static const struct kernel_param_ops def_reserved_size_ops = { 1648 + .set = def_reserved_size_set, 1649 + .get = param_get_int, 1650 + }; 1651 + 1652 + module_param_cb(def_reserved_size, &def_reserved_size_ops, &def_reserved_size, 1653 + S_IRUGO | S_IWUSR); 1629 1654 1630 1655 MODULE_AUTHOR("Douglas Gilbert"); 1631 1656 MODULE_DESCRIPTION("SCSI generic (sg) driver"); ··· 1663 1638 MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd"); 1664 1639 MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))"); 1665 1640 1666 - #ifdef CONFIG_SYSCTL 1667 - #include <linux/sysctl.h> 1668 - 1669 - static const struct ctl_table sg_sysctls[] = { 1670 - { 1671 - .procname = "sg-big-buff", 1672 - .data = &sg_big_buff, 1673 - .maxlen = sizeof(int), 1674 - .mode = 0444, 1675 - .proc_handler = proc_dointvec, 1676 - }, 1677 - }; 1678 - 1679 - static struct ctl_table_header *hdr; 1680 - static void register_sg_sysctls(void) 1681 - { 1682 - if (!hdr) 1683 - hdr = register_sysctl("kernel", sg_sysctls); 1684 - } 1685 - 1686 - static void unregister_sg_sysctls(void) 1687 - { 1688 - unregister_sysctl_table(hdr); 1689 - } 1690 - #else 1691 - #define register_sg_sysctls() do { } while (0) 1692 - #define unregister_sg_sysctls() do { } while (0) 1693 - #endif /* CONFIG_SYSCTL */ 1694 - 1695 1641 static int __init 1696 1642 init_sg(void) 1697 1643 { ··· 1672 1676 scatter_elem_sz = PAGE_SIZE; 1673 1677 scatter_elem_sz_prev = scatter_elem_sz; 1674 1678 } 1675 - if (def_reserved_size >= 0) 1676 - sg_big_buff = def_reserved_size; 1677 - else 1678 - def_reserved_size = sg_big_buff; 1679 1679 1680 1680 rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), 1681 1681 SG_MAX_DEVS, "sg"); ··· 1689 1697 return 0; 1690 1698 } 1691 1699 class_unregister(&sg_sysfs_class); 1692 - register_sg_sysctls(); 1693 1700 err_out: 1694 1701 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS); 1695 1702 return rc; ··· 1697 1706 static void __exit 1698 1707 exit_sg(void) 1699 1708 { 1700 - unregister_sg_sysctls(); 1701 1709 #ifdef CONFIG_SCSI_PROC_FS 1702 1710 remove_proc_subtree("scsi/sg", NULL); 1703 1711 #endif /* CONFIG_SCSI_PROC_FS */ ··· 2172 2182 write_unlock_irqrestore(&sdp->sfd_lock, iflags); 2173 2183 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, 2174 2184 "sg_add_sfp: sfp=0x%p\n", sfp)); 2175 - if (unlikely(sg_big_buff != def_reserved_size)) 2176 - sg_big_buff = def_reserved_size; 2177 2185 2178 - bufflen = min_t(int, sg_big_buff, 2186 + bufflen = min_t(int, def_reserved_size, 2179 2187 max_sectors_bytes(sdp->device->request_queue)); 2180 2188 sg_build_reserve(sfp, bufflen); 2181 2189 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, ··· 2401 2413 2402 2414 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file) 2403 2415 { 2404 - return single_open(file, sg_proc_seq_show_int, &sg_big_buff); 2416 + return single_open(file, sg_proc_seq_show_int, &def_reserved_size); 2405 2417 } 2406 2418 2407 2419 static ssize_t ··· 2418 2430 if (err) 2419 2431 return err; 2420 2432 if (k <= 1048576) { /* limit "big buff" to 1 MB */ 2421 - sg_big_buff = k; 2433 + def_reserved_size = k; 2422 2434 return count; 2423 2435 } 2424 2436 return -ERANGE; ··· 2591 2603 2592 2604 if (it && (0 == it->index)) 2593 2605 seq_printf(s, "max_active_device=%d def_reserved_size=%d\n", 2594 - (int)it->max, sg_big_buff); 2606 + (int)it->max, def_reserved_size); 2595 2607 2596 2608 read_lock_irqsave(&sg_index_lock, iflags); 2597 2609 sdp = it ? sg_lookup_dev(it->index) : NULL;
+21 -11
drivers/scsi/storvsc_drv.c
··· 1131 1131 kfree(payload); 1132 1132 } 1133 1133 1134 + /* 1135 + * The current SCSI handling on the host side does not correctly handle: 1136 + * INQUIRY with page code 0x80, MODE_SENSE / MODE_SENSE_10 with cmd[2] == 0x1c, 1137 + * and (for FC) MAINTENANCE_IN / PERSISTENT_RESERVE_IN passthrough. 1138 + */ 1139 + static bool storvsc_host_mishandles_cmd(u8 opcode, struct hv_device *device) 1140 + { 1141 + switch (opcode) { 1142 + case INQUIRY: 1143 + case MODE_SENSE: 1144 + case MODE_SENSE_10: 1145 + return true; 1146 + case MAINTENANCE_IN: 1147 + case PERSISTENT_RESERVE_IN: 1148 + return hv_dev_is_fc(device); 1149 + default: 1150 + return false; 1151 + } 1152 + } 1153 + 1134 1154 static void storvsc_on_io_completion(struct storvsc_device *stor_device, 1135 1155 struct vstor_packet *vstor_packet, 1136 1156 struct storvsc_cmd_request *request) ··· 1161 1141 stor_pkt = &request->vstor_packet; 1162 1142 1163 1143 /* 1164 - * The current SCSI handling on the host side does 1165 - * not correctly handle: 1166 - * INQUIRY command with page code parameter set to 0x80 1167 - * MODE_SENSE and MODE_SENSE_10 command with cmd[2] == 0x1c 1168 - * MAINTENANCE_IN is not supported by HyperV FC passthrough 1169 - * 1170 1144 * Setup srb and scsi status so this won't be fatal. 1171 1145 * We do this so we can distinguish truly fatal failues 1172 1146 * (srb status == 0x4) and off-line the device in that case. 1173 1147 */ 1174 1148 1175 - if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) || 1176 - (stor_pkt->vm_srb.cdb[0] == MODE_SENSE) || 1177 - (stor_pkt->vm_srb.cdb[0] == MODE_SENSE_10) || 1178 - (stor_pkt->vm_srb.cdb[0] == MAINTENANCE_IN && 1179 - hv_dev_is_fc(device))) { 1149 + if (storvsc_host_mishandles_cmd(stor_pkt->vm_srb.cdb[0], device)) { 1180 1150 vstor_packet->vm_srb.scsi_status = 0; 1181 1151 vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS; 1182 1152 }
+6 -8
drivers/scsi/virtio_scsi.c
··· 233 233 virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free); 234 234 }; 235 235 236 - static void virtscsi_handle_event(struct work_struct *work); 237 236 238 237 static int virtscsi_kick_event(struct virtio_scsi *vscsi, 239 238 struct virtio_scsi_event_node *event_node) ··· 241 242 struct scatterlist sg; 242 243 unsigned long flags; 243 244 244 - INIT_WORK(&event_node->work, virtscsi_handle_event); 245 245 sg_init_one(&sg, event_node->event, sizeof(struct virtio_scsi_event)); 246 246 247 247 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); ··· 982 984 983 985 virtio_device_ready(vdev); 984 986 985 - if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) 986 - virtscsi_kick_event_all(vscsi); 987 + for (int i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) 988 + INIT_WORK(&vscsi->event_list[i].work, virtscsi_handle_event); 989 + 990 + virtscsi_kick_event_all(vscsi); 987 991 988 992 scsi_scan_host(shost); 989 993 return 0; ··· 1002 1002 struct Scsi_Host *shost = virtio_scsi_host(vdev); 1003 1003 struct virtio_scsi *vscsi = shost_priv(shost); 1004 1004 1005 - if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) 1006 - virtscsi_cancel_event_work(vscsi); 1005 + virtscsi_cancel_event_work(vscsi); 1007 1006 1008 1007 scsi_remove_host(shost); 1009 1008 virtscsi_remove_vqs(vdev); ··· 1028 1029 1029 1030 virtio_device_ready(vdev); 1030 1031 1031 - if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) 1032 - virtscsi_kick_event_all(vscsi); 1032 + virtscsi_kick_event_all(vscsi); 1033 1033 1034 1034 return err; 1035 1035 }
+1
drivers/target/iscsi/iscsi_target_configfs.c
··· 1591 1591 1592 1592 .write_pending_must_be_called = 1, 1593 1593 1594 + .default_compl_type = TARGET_QUEUE_COMPL, 1594 1595 .default_submit_type = TARGET_DIRECT_SUBMIT, 1595 1596 .direct_submit_supp = 1, 1596 1597 };
+1
drivers/target/loopback/tcm_loop.c
··· 1147 1147 .tfc_wwn_attrs = tcm_loop_wwn_attrs, 1148 1148 .tfc_tpg_base_attrs = tcm_loop_tpg_attrs, 1149 1149 .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs, 1150 + .default_compl_type = TARGET_QUEUE_COMPL, 1150 1151 .default_submit_type = TARGET_QUEUE_SUBMIT, 1151 1152 .direct_submit_supp = 0, 1152 1153 };
+1
drivers/target/sbp/sbp_target.c
··· 2278 2278 .tfc_tpg_base_attrs = sbp_tpg_base_attrs, 2279 2279 .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs, 2280 2280 2281 + .default_compl_type = TARGET_QUEUE_COMPL, 2281 2282 .default_submit_type = TARGET_DIRECT_SUBMIT, 2282 2283 .direct_submit_supp = 1, 2283 2284 };
+22
drivers/target/target_core_configfs.c
··· 575 575 DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len); 576 576 DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc); 577 577 DEF_CONFIGFS_ATTRIB_SHOW(submit_type); 578 + DEF_CONFIGFS_ATTRIB_SHOW(complete_type); 578 579 DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_len); 579 580 DEF_CONFIGFS_ATTRIB_SHOW(atomic_alignment); 580 581 DEF_CONFIGFS_ATTRIB_SHOW(atomic_granularity); ··· 1267 1266 return count; 1268 1267 } 1269 1268 1269 + static ssize_t complete_type_store(struct config_item *item, const char *page, 1270 + size_t count) 1271 + { 1272 + struct se_dev_attrib *da = to_attrib(item); 1273 + int ret; 1274 + u8 val; 1275 + 1276 + ret = kstrtou8(page, 0, &val); 1277 + if (ret < 0) 1278 + return ret; 1279 + 1280 + if (val > TARGET_QUEUE_COMPL) 1281 + return -EINVAL; 1282 + 1283 + da->complete_type = val; 1284 + return count; 1285 + } 1286 + 1270 1287 CONFIGFS_ATTR(, emulate_model_alias); 1271 1288 CONFIGFS_ATTR(, emulate_dpo); 1272 1289 CONFIGFS_ATTR(, emulate_fua_write); ··· 1321 1302 CONFIGFS_ATTR(, alua_support); 1322 1303 CONFIGFS_ATTR(, pgr_support); 1323 1304 CONFIGFS_ATTR(, submit_type); 1305 + CONFIGFS_ATTR(, complete_type); 1324 1306 CONFIGFS_ATTR_RO(, atomic_max_len); 1325 1307 CONFIGFS_ATTR_RO(, atomic_alignment); 1326 1308 CONFIGFS_ATTR_RO(, atomic_granularity); ··· 1370 1350 &attr_pgr_support, 1371 1351 &attr_emulate_rsoc, 1372 1352 &attr_submit_type, 1353 + &attr_complete_type, 1373 1354 &attr_atomic_alignment, 1374 1355 &attr_atomic_max_len, 1375 1356 &attr_atomic_granularity, ··· 1394 1373 &attr_alua_support, 1395 1374 &attr_pgr_support, 1396 1375 &attr_submit_type, 1376 + &attr_complete_type, 1397 1377 NULL, 1398 1378 }; 1399 1379 EXPORT_SYMBOL(passthrough_attrib_attrs);
+1
drivers/target/target_core_device.c
··· 813 813 DA_UNMAP_ZEROES_DATA_DEFAULT; 814 814 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 815 815 dev->dev_attrib.submit_type = TARGET_FABRIC_DEFAULT_SUBMIT; 816 + dev->dev_attrib.complete_type = TARGET_FABRIC_DEFAULT_COMPL; 816 817 817 818 /* Skip allocating lun_stats since we can't export them. */ 818 819 xcopy_lun = &dev->xcopy_lun;
+24
drivers/target/target_core_fabric_configfs.c
··· 1066 1066 CONFIGFS_ATTR(target_fabric_wwn_, cmd_completion_affinity); 1067 1067 1068 1068 static ssize_t 1069 + target_fabric_wwn_default_complete_type_show(struct config_item *item, 1070 + char *page) 1071 + { 1072 + struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn, 1073 + param_group); 1074 + return sysfs_emit(page, "%u\n", 1075 + wwn->wwn_tf->tf_ops->default_compl_type); 1076 + } 1077 + CONFIGFS_ATTR_RO(target_fabric_wwn_, default_complete_type); 1078 + 1079 + static ssize_t 1080 + target_fabric_wwn_direct_complete_supported_show(struct config_item *item, 1081 + char *page) 1082 + { 1083 + struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn, 1084 + param_group); 1085 + return sysfs_emit(page, "%u\n", 1086 + wwn->wwn_tf->tf_ops->direct_compl_supp); 1087 + } 1088 + CONFIGFS_ATTR_RO(target_fabric_wwn_, direct_complete_supported); 1089 + 1090 + static ssize_t 1069 1091 target_fabric_wwn_default_submit_type_show(struct config_item *item, 1070 1092 char *page) 1071 1093 { ··· 1111 1089 1112 1090 static struct configfs_attribute *target_fabric_wwn_param_attrs[] = { 1113 1091 &target_fabric_wwn_attr_cmd_completion_affinity, 1092 + &target_fabric_wwn_attr_default_complete_type, 1093 + &target_fabric_wwn_attr_direct_complete_supported, 1114 1094 &target_fabric_wwn_attr_default_submit_type, 1115 1095 &target_fabric_wwn_attr_direct_submit_supported, 1116 1096 NULL,
+32 -27
drivers/target/target_core_pr.c
··· 2809 2809 } 2810 2810 2811 2811 static sense_reason_t 2812 - core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, 2812 + core_scsi3_emulate_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, 2813 2813 u64 sa_res_key, enum preempt_type preempt_type) 2814 2814 { 2815 2815 struct se_device *dev = cmd->se_dev; ··· 2838 2838 core_scsi3_put_pr_reg(pr_reg_n); 2839 2839 return TCM_RESERVATION_CONFLICT; 2840 2840 } 2841 - if (scope != PR_SCOPE_LU_SCOPE) { 2842 - pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); 2843 - core_scsi3_put_pr_reg(pr_reg_n); 2844 - return TCM_INVALID_PARAMETER_LIST; 2845 - } 2846 2841 2847 2842 spin_lock(&dev->dev_reservation_lock); 2848 2843 pr_res_holder = dev->dev_pr_res_holder; ··· 2851 2856 core_scsi3_put_pr_reg(pr_reg_n); 2852 2857 return TCM_INVALID_PARAMETER_LIST; 2853 2858 } 2859 + 2860 + /* Validate TYPE and SCOPE fields if they will be used */ 2861 + if (pr_res_holder && 2862 + (pr_res_holder->pr_res_key == sa_res_key || 2863 + (all_reg && !sa_res_key))) { 2864 + switch (type) { 2865 + case PR_TYPE_WRITE_EXCLUSIVE: 2866 + case PR_TYPE_EXCLUSIVE_ACCESS: 2867 + case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: 2868 + case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: 2869 + case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: 2870 + case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: 2871 + break; 2872 + default: 2873 + pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s" 2874 + " Type: 0x%02x\n", 2875 + (preempt_type == PREEMPT_AND_ABORT) ? 2876 + "_AND_ABORT" : "", type); 2877 + spin_unlock(&dev->dev_reservation_lock); 2878 + core_scsi3_put_pr_reg(pr_reg_n); 2879 + return TCM_INVALID_CDB_FIELD; 2880 + } 2881 + 2882 + if (scope != PR_SCOPE_LU_SCOPE) { 2883 + pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); 2884 + spin_unlock(&dev->dev_reservation_lock); 2885 + core_scsi3_put_pr_reg(pr_reg_n); 2886 + return TCM_INVALID_PARAMETER_LIST; 2887 + } 2888 + } 2889 + 2854 2890 /* 2855 2891 * From spc4r17, section 5.7.11.4.4 Removing Registrations: 2856 2892 * ··· 3143 3117 core_scsi3_pr_generation(cmd->se_dev); 3144 3118 return 0; 3145 3119 } 3146 - 3147 - static sense_reason_t 3148 - core_scsi3_emulate_pro_preempt(struct se_cmd *cmd, int type, int scope, 3149 - u64 res_key, u64 sa_res_key, enum preempt_type preempt_type) 3150 - { 3151 - switch (type) { 3152 - case PR_TYPE_WRITE_EXCLUSIVE: 3153 - case PR_TYPE_EXCLUSIVE_ACCESS: 3154 - case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: 3155 - case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: 3156 - case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: 3157 - case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: 3158 - return core_scsi3_pro_preempt(cmd, type, scope, res_key, 3159 - sa_res_key, preempt_type); 3160 - default: 3161 - pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s" 3162 - " Type: 0x%02x\n", (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "", type); 3163 - return TCM_INVALID_CDB_FIELD; 3164 - } 3165 - } 3166 - 3167 3120 3168 3121 static sense_reason_t 3169 3122 core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
+2 -1
drivers/target/target_core_sbc.c
··· 1187 1187 goto err; 1188 1188 } 1189 1189 1190 - if (lba + range > dev->transport->get_blocks(dev) + 1) { 1190 + if (lba + range < lba || 1191 + lba + range > dev->transport->get_blocks(dev) + 1) { 1191 1192 ret = TCM_ADDRESS_OUT_OF_RANGE; 1192 1193 goto err; 1193 1194 }
+53 -15
drivers/target/target_core_transport.c
··· 902 902 return false; 903 903 } 904 904 905 + static void target_complete(struct se_cmd *cmd, int success) 906 + { 907 + struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn; 908 + struct se_dev_attrib *da; 909 + u8 compl_type; 910 + int cpu; 911 + 912 + if (!wwn) { 913 + cpu = cmd->cpuid; 914 + goto queue_work; 915 + } 916 + 917 + da = &cmd->se_dev->dev_attrib; 918 + if (da->complete_type == TARGET_FABRIC_DEFAULT_COMPL) 919 + compl_type = wwn->wwn_tf->tf_ops->default_compl_type; 920 + else if (da->complete_type == TARGET_DIRECT_COMPL && 921 + wwn->wwn_tf->tf_ops->direct_compl_supp) 922 + compl_type = TARGET_DIRECT_COMPL; 923 + else 924 + compl_type = TARGET_QUEUE_COMPL; 925 + 926 + if (compl_type == TARGET_DIRECT_COMPL) { 927 + /* 928 + * Failure handling and processing secondary stages of 929 + * complex commands can be too heavy to handle from the 930 + * fabric driver so always defer. 931 + */ 932 + if (success && !cmd->transport_complete_callback) { 933 + target_complete_ok_work(&cmd->work); 934 + return; 935 + } 936 + 937 + compl_type = TARGET_QUEUE_COMPL; 938 + } 939 + 940 + queue_work: 941 + INIT_WORK(&cmd->work, success ? target_complete_ok_work : 942 + target_complete_failure_work); 943 + 944 + if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID) 945 + cpu = cmd->cpuid; 946 + else 947 + cpu = wwn->cmd_compl_affinity; 948 + 949 + queue_work_on(cpu, target_completion_wq, &cmd->work); 950 + } 951 + 905 952 /* May be called from interrupt context so must not sleep. */ 906 953 void target_complete_cmd_with_sense(struct se_cmd *cmd, u8 scsi_status, 907 954 sense_reason_t sense_reason) 908 955 { 909 - struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn; 910 - int success, cpu; 911 956 unsigned long flags; 957 + int success; 912 958 913 959 if (target_cmd_interrupted(cmd)) 914 960 return; ··· 979 933 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 980 934 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 981 935 982 - INIT_WORK(&cmd->work, success ? target_complete_ok_work : 983 - target_complete_failure_work); 984 - 985 - if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID) 986 - cpu = cmd->cpuid; 987 - else 988 - cpu = wwn->cmd_compl_affinity; 989 - 990 - queue_work_on(cpu, target_completion_wq, &cmd->work); 936 + target_complete(cmd, success); 991 937 } 992 938 EXPORT_SYMBOL(target_complete_cmd_with_sense); 993 939 ··· 1150 1112 } 1151 1113 1152 1114 if (p_buf) 1153 - strncpy(p_buf, buf, p_buf_len); 1115 + strscpy(p_buf, buf, p_buf_len); 1154 1116 else 1155 1117 pr_debug("%s", buf); 1156 1118 } ··· 1200 1162 } 1201 1163 1202 1164 if (p_buf) 1203 - strncpy(p_buf, buf, p_buf_len); 1165 + strscpy(p_buf, buf, p_buf_len); 1204 1166 else 1205 1167 pr_debug("%s", buf); 1206 1168 ··· 1260 1222 if (p_buf) { 1261 1223 if (p_buf_len < strlen(buf)+1) 1262 1224 return -EINVAL; 1263 - strncpy(p_buf, buf, p_buf_len); 1225 + strscpy(p_buf, buf, p_buf_len); 1264 1226 } else { 1265 1227 pr_debug("%s", buf); 1266 1228 } ··· 1314 1276 } 1315 1277 1316 1278 if (p_buf) 1317 - strncpy(p_buf, buf, p_buf_len); 1279 + strscpy(p_buf, buf, p_buf_len); 1318 1280 else 1319 1281 pr_debug("%s", buf); 1320 1282
+1
drivers/target/tcm_fc/tfc_conf.c
··· 434 434 .tfc_wwn_attrs = ft_wwn_attrs, 435 435 .tfc_tpg_nacl_base_attrs = ft_nacl_base_attrs, 436 436 437 + .default_compl_type = TARGET_QUEUE_COMPL, 437 438 .default_submit_type = TARGET_DIRECT_SUBMIT, 438 439 .direct_submit_supp = 1, 439 440 };
+1 -1
drivers/ufs/core/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 3 3 obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o 4 - ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o 4 + ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o ufs-txeq.o 5 5 ufshcd-core-$(CONFIG_RPMB) += ufs-rpmb.o 6 6 ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o 7 7 ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
+290
drivers/ufs/core/ufs-debugfs.c
··· 209 209 { } 210 210 }; 211 211 212 + static int ufs_tx_eq_params_show(struct seq_file *s, void *data) 213 + { 214 + const char *file_name = s->file->f_path.dentry->d_name.name; 215 + u32 gear = (u32)(uintptr_t)s->file->f_inode->i_private; 216 + struct ufs_hba *hba = hba_from_file(s->file); 217 + struct ufshcd_tx_eq_settings *settings; 218 + struct ufs_pa_layer_attr *pwr_info; 219 + struct ufshcd_tx_eq_params *params; 220 + u32 rate = hba->pwr_info.hs_rate; 221 + u32 num_lanes; 222 + int lane; 223 + 224 + if (!ufshcd_is_tx_eq_supported(hba)) 225 + return -EOPNOTSUPP; 226 + 227 + if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX) { 228 + seq_printf(s, "Invalid gear selected: %u\n", gear); 229 + return 0; 230 + } 231 + 232 + if (!hba->max_pwr_info.is_valid) { 233 + seq_puts(s, "Max power info is invalid\n"); 234 + return 0; 235 + } 236 + 237 + pwr_info = &hba->max_pwr_info.info; 238 + params = &hba->tx_eq_params[gear - 1]; 239 + if (!params->is_valid) { 240 + seq_printf(s, "TX EQ params are invalid for HS-G%u, Rate-%s\n", 241 + gear, ufs_hs_rate_to_str(rate)); 242 + return 0; 243 + } 244 + 245 + if (strcmp(file_name, "host_tx_eq_params") == 0) { 246 + settings = params->host; 247 + num_lanes = pwr_info->lane_tx; 248 + seq_printf(s, "Host TX EQ PreShoot Cap: 0x%02x, DeEmphasis Cap: 0x%02x\n", 249 + hba->host_preshoot_cap, hba->host_deemphasis_cap); 250 + } else if (strcmp(file_name, "device_tx_eq_params") == 0) { 251 + settings = params->device; 252 + num_lanes = pwr_info->lane_rx; 253 + seq_printf(s, "Device TX EQ PreShoot Cap: 0x%02x, DeEmphasis Cap: 0x%02x\n", 254 + hba->device_preshoot_cap, hba->device_deemphasis_cap); 255 + } else { 256 + return -ENOENT; 257 + } 258 + 259 + seq_printf(s, "TX EQ setting for HS-G%u, Rate-%s:\n", gear, 260 + ufs_hs_rate_to_str(rate)); 261 + for (lane = 0; lane < num_lanes; lane++) 262 + seq_printf(s, "TX Lane %d - PreShoot: %d, DeEmphasis: %d, Pre-Coding %senabled\n", 263 + lane, settings[lane].preshoot, 264 + settings[lane].deemphasis, 265 + settings[lane].precode_en ? "" : "not "); 266 + 267 + return 0; 268 + } 269 + 270 + static int ufs_tx_eq_params_open(struct inode *inode, struct file *file) 271 + { 272 + return single_open(file, ufs_tx_eq_params_show, inode->i_private); 273 + } 274 + 275 + static const struct file_operations ufs_tx_eq_params_fops = { 276 + .owner = THIS_MODULE, 277 + .open = ufs_tx_eq_params_open, 278 + .read = seq_read, 279 + .llseek = seq_lseek, 280 + .release = single_release, 281 + }; 282 + 283 + static const struct ufs_debugfs_attr ufs_tx_eq_attrs[] = { 284 + { "host_tx_eq_params", 0400, &ufs_tx_eq_params_fops }, 285 + { "device_tx_eq_params", 0400, &ufs_tx_eq_params_fops }, 286 + { } 287 + }; 288 + 289 + static int ufs_tx_eqtr_record_show(struct seq_file *s, void *data) 290 + { 291 + const char *file_name = s->file->f_path.dentry->d_name.name; 292 + u8 (*fom_array)[TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS]; 293 + u32 gear = (u32)(uintptr_t)s->file->f_inode->i_private; 294 + unsigned long preshoot_bitmap, deemphasis_bitmap; 295 + struct ufs_hba *hba = hba_from_file(s->file); 296 + struct ufs_pa_layer_attr *pwr_info; 297 + struct ufshcd_tx_eq_params *params; 298 + struct ufshcd_tx_eqtr_record *rec; 299 + u32 rate = hba->pwr_info.hs_rate; 300 + u8 preshoot, deemphasis; 301 + u32 num_lanes; 302 + char name[32]; 303 + int lane; 304 + 305 + if (!ufshcd_is_tx_eq_supported(hba)) 306 + return -EOPNOTSUPP; 307 + 308 + if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX) { 309 + seq_printf(s, "Invalid gear selected: %u\n", gear); 310 + return 0; 311 + } 312 + 313 + if (!hba->max_pwr_info.is_valid) { 314 + seq_puts(s, "Max power info is invalid\n"); 315 + return 0; 316 + } 317 + 318 + pwr_info = &hba->max_pwr_info.info; 319 + params = &hba->tx_eq_params[gear - 1]; 320 + if (!params->is_valid) { 321 + seq_printf(s, "TX EQ params are invalid for HS-G%u, Rate-%s\n", 322 + gear, ufs_hs_rate_to_str(rate)); 323 + return 0; 324 + } 325 + 326 + rec = params->eqtr_record; 327 + if (!rec || !rec->last_record_index) { 328 + seq_printf(s, "No TX EQTR records found for HS-G%u, Rate-%s.\n", 329 + gear, ufs_hs_rate_to_str(rate)); 330 + return 0; 331 + } 332 + 333 + if (strcmp(file_name, "host_tx_eqtr_record") == 0) { 334 + preshoot_bitmap = (hba->host_preshoot_cap << 0x1) | 0x1; 335 + deemphasis_bitmap = (hba->host_deemphasis_cap << 0x1) | 0x1; 336 + num_lanes = pwr_info->lane_tx; 337 + fom_array = rec->host_fom; 338 + snprintf(name, sizeof(name), "%s", "Host"); 339 + } else if (strcmp(file_name, "device_tx_eqtr_record") == 0) { 340 + preshoot_bitmap = (hba->device_preshoot_cap << 0x1) | 0x1; 341 + deemphasis_bitmap = (hba->device_deemphasis_cap << 0x1) | 0x1; 342 + num_lanes = pwr_info->lane_rx; 343 + fom_array = rec->device_fom; 344 + snprintf(name, sizeof(name), "%s", "Device"); 345 + } else { 346 + return -ENOENT; 347 + } 348 + 349 + seq_printf(s, "%s TX EQTR record summary -\n", name); 350 + seq_printf(s, "Target Power Mode: HS-G%u, Rate-%s\n", gear, 351 + ufs_hs_rate_to_str(rate)); 352 + seq_printf(s, "Most recent record index: %d\n", 353 + rec->last_record_index); 354 + seq_printf(s, "Most recent record timestamp: %llu us\n", 355 + ktime_to_us(rec->last_record_ts)); 356 + 357 + for (lane = 0; lane < num_lanes; lane++) { 358 + seq_printf(s, "\nTX Lane %d FOM - %s\n", lane, "PreShoot\\DeEmphasis"); 359 + seq_puts(s, "\\"); 360 + /* Print DeEmphasis header as X-axis. */ 361 + for (deemphasis = 0; deemphasis < TX_HS_NUM_DEEMPHASIS; deemphasis++) 362 + seq_printf(s, "%8d%s", deemphasis, " "); 363 + seq_puts(s, "\n"); 364 + /* Print matrix rows with PreShoot as Y-axis. */ 365 + for (preshoot = 0; preshoot < TX_HS_NUM_PRESHOOT; preshoot++) { 366 + seq_printf(s, "%d", preshoot); 367 + for (deemphasis = 0; deemphasis < TX_HS_NUM_DEEMPHASIS; deemphasis++) { 368 + if (test_bit(preshoot, &preshoot_bitmap) && 369 + test_bit(deemphasis, &deemphasis_bitmap)) { 370 + u8 fom = fom_array[lane][preshoot][deemphasis]; 371 + u8 fom_val = fom & RX_FOM_VALUE_MASK; 372 + bool precode_en = fom & RX_FOM_PRECODING_EN_BIT; 373 + 374 + if (ufshcd_is_txeq_presets_used(hba) && 375 + !ufshcd_is_txeq_preset_selected(preshoot, deemphasis)) 376 + seq_printf(s, "%8s%s", "-", " "); 377 + else 378 + seq_printf(s, "%8u%s", fom_val, 379 + precode_en ? "*" : " "); 380 + } else { 381 + seq_printf(s, "%8s%s", "x", " "); 382 + } 383 + } 384 + seq_puts(s, "\n"); 385 + } 386 + } 387 + 388 + return 0; 389 + } 390 + 391 + static int ufs_tx_eqtr_record_open(struct inode *inode, struct file *file) 392 + { 393 + return single_open(file, ufs_tx_eqtr_record_show, inode->i_private); 394 + } 395 + 396 + static const struct file_operations ufs_tx_eqtr_record_fops = { 397 + .owner = THIS_MODULE, 398 + .open = ufs_tx_eqtr_record_open, 399 + .read = seq_read, 400 + .llseek = seq_lseek, 401 + .release = single_release, 402 + }; 403 + 404 + static ssize_t ufs_tx_eq_ctrl_write(struct file *file, const char __user *buf, 405 + size_t count, loff_t *ppos) 406 + { 407 + u32 gear = (u32)(uintptr_t)file->f_inode->i_private; 408 + struct ufs_hba *hba = hba_from_file(file); 409 + char kbuf[32]; 410 + int ret; 411 + 412 + if (count >= sizeof(kbuf)) 413 + return -EINVAL; 414 + 415 + if (copy_from_user(kbuf, buf, count)) 416 + return -EFAULT; 417 + 418 + if (!ufshcd_is_tx_eq_supported(hba)) 419 + return -EOPNOTSUPP; 420 + 421 + if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL || 422 + !hba->max_pwr_info.is_valid) 423 + return -EBUSY; 424 + 425 + if (!hba->ufs_device_wlun) 426 + return -ENODEV; 427 + 428 + kbuf[count] = '\0'; 429 + 430 + if (sysfs_streq(kbuf, "retrain")) { 431 + ret = ufs_debugfs_get_user_access(hba); 432 + if (ret) 433 + return ret; 434 + ret = ufshcd_retrain_tx_eq(hba, gear); 435 + ufs_debugfs_put_user_access(hba); 436 + } else { 437 + /* Unknown operation */ 438 + return -EINVAL; 439 + } 440 + 441 + return ret ? ret : count; 442 + } 443 + 444 + static int ufs_tx_eq_ctrl_show(struct seq_file *s, void *data) 445 + { 446 + seq_puts(s, "write 'retrain' to retrain TX Equalization settings\n"); 447 + return 0; 448 + } 449 + 450 + static int ufs_tx_eq_ctrl_open(struct inode *inode, struct file *file) 451 + { 452 + return single_open(file, ufs_tx_eq_ctrl_show, inode->i_private); 453 + } 454 + 455 + static const struct file_operations ufs_tx_eq_ctrl_fops = { 456 + .owner = THIS_MODULE, 457 + .open = ufs_tx_eq_ctrl_open, 458 + .read = seq_read, 459 + .llseek = seq_lseek, 460 + .write = ufs_tx_eq_ctrl_write, 461 + .release = single_release, 462 + }; 463 + 464 + static const struct ufs_debugfs_attr ufs_tx_eqtr_attrs[] = { 465 + { "host_tx_eqtr_record", 0400, &ufs_tx_eqtr_record_fops }, 466 + { "device_tx_eqtr_record", 0400, &ufs_tx_eqtr_record_fops }, 467 + { "tx_eq_ctrl", 0600, &ufs_tx_eq_ctrl_fops }, 468 + { } 469 + }; 470 + 212 471 void ufs_debugfs_hba_init(struct ufs_hba *hba) 213 472 { 214 473 const struct ufs_debugfs_attr *attr; ··· 489 230 hba, &ee_usr_mask_fops); 490 231 debugfs_create_u32("exception_event_rate_limit_ms", 0600, hba->debugfs_root, 491 232 &hba->debugfs_ee_rate_limit_ms); 233 + 234 + if (!(hba->caps & UFSHCD_CAP_TX_EQUALIZATION)) 235 + return; 236 + 237 + for (u32 gear = UFS_HS_G1; gear <= UFS_HS_GEAR_MAX; gear++) { 238 + struct dentry *txeq_dir; 239 + char name[32]; 240 + 241 + snprintf(name, sizeof(name), "tx_eq_hs_gear%d", gear); 242 + txeq_dir = debugfs_create_dir(name, hba->debugfs_root); 243 + if (IS_ERR_OR_NULL(txeq_dir)) 244 + return; 245 + 246 + d_inode(txeq_dir)->i_private = hba; 247 + 248 + /* Create files for TX Equalization parameters */ 249 + for (attr = ufs_tx_eq_attrs; attr->name; attr++) 250 + debugfs_create_file(attr->name, attr->mode, txeq_dir, 251 + (void *)(uintptr_t)gear, 252 + attr->fops); 253 + 254 + /* TX EQTR is supported for HS-G4 and higher Gears */ 255 + if (gear < UFS_HS_G4) 256 + continue; 257 + 258 + /* Create files for TX EQTR related attributes */ 259 + for (attr = ufs_tx_eqtr_attrs; attr->name; attr++) 260 + debugfs_create_file(attr->name, attr->mode, txeq_dir, 261 + (void *)(uintptr_t)gear, 262 + attr->fops); 263 + } 492 264 } 493 265 494 266 void ufs_debugfs_hba_exit(struct ufs_hba *hba)
+3
drivers/ufs/core/ufs-debugfs.h
··· 5 5 #ifndef __UFS_DEBUGFS_H__ 6 6 #define __UFS_DEBUGFS_H__ 7 7 8 + #include <linux/init.h> 9 + #include <linux/types.h> 10 + 8 11 struct ufs_hba; 9 12 10 13 #ifdef CONFIG_DEBUG_FS
+2
drivers/ufs/core/ufs-fault-injection.h
··· 6 6 #include <linux/kconfig.h> 7 7 #include <linux/types.h> 8 8 9 + struct ufs_hba; 10 + 9 11 #ifdef CONFIG_SCSI_UFS_FAULT_INJECTION 10 12 void ufs_fault_inject_hba_init(struct ufs_hba *hba); 11 13 bool ufs_trigger_eh(struct ufs_hba *hba);
+28 -2
drivers/ufs/core/ufs-mcq.c
··· 31 31 32 32 #define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\ 33 33 UFSHCD_ERROR_MASK |\ 34 - MCQ_CQ_EVENT_STATUS) 34 + MCQ_CQ_EVENT_STATUS |\ 35 + MCQ_IAG_EVENT_STATUS) 35 36 36 37 /* Max mcq register polling time in microseconds */ 37 38 #define MCQ_POLL_US 500000 ··· 273 272 } 274 273 EXPORT_SYMBOL_GPL(ufshcd_mcq_write_cqis); 275 274 275 + u32 ufshcd_mcq_read_mcqiacr(struct ufs_hba *hba, int i) 276 + { 277 + return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_MCQIACR); 278 + } 279 + 280 + void ufshcd_mcq_write_mcqiacr(struct ufs_hba *hba, u32 val, int i) 281 + { 282 + writel(val, mcq_opr_base(hba, OPR_CQIS, i) + REG_MCQIACR); 283 + } 284 + 276 285 /* 277 - * Current MCQ specification doesn't provide a Task Tag or its equivalent in 286 + * UFSHCI 4.0 MCQ specification doesn't provide a Task Tag or its equivalent in 278 287 * the Completion Queue Entry. Find the Task Tag using an indirect method. 288 + * UFSHCI 4.1 and above can directly return the Task Tag in the Completion Queue 289 + * Entry. 279 290 */ 280 291 static int ufshcd_mcq_get_tag(struct ufs_hba *hba, struct cq_entry *cqe) 281 292 { 282 293 u64 addr; 294 + 295 + if (hba->ufs_version >= ufshci_version(4, 1)) 296 + return cqe->task_tag; 283 297 284 298 /* sizeof(struct utp_transfer_cmd_desc) must be a multiple of 128 */ 285 299 BUILD_BUG_ON(sizeof(struct utp_transfer_cmd_desc) & GENMASK(6, 0)); ··· 317 301 ufshcd_compl_one_cqe(hba, tag, cqe); 318 302 /* After processed the cqe, mark it empty (invalid) entry */ 319 303 cqe->command_desc_base_addr = 0; 304 + } else { 305 + dev_err(hba->dev, "Abnormal CQ entry!\n"); 320 306 } 321 307 } 322 308 309 + /* 310 + * This function is called from the UFS error handler with the UFS host 311 + * controller disabled (HCE = 0). Reading host controller registers, e.g. the 312 + * CQ tail pointer (CQTPy), may not be safe with the host controller disabled. 313 + * Hence, iterate over all completion queue entries. This won't result in 314 + * double completions because ufshcd_mcq_process_cqe() clears a CQE after it 315 + * has been processed. 316 + */ 323 317 void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba, 324 318 struct ufs_hw_queue *hwq) 325 319 {
+30
drivers/ufs/core/ufs-sysfs.c
··· 605 605 return sysfs_emit(buf, "%llu\n", exception_id); 606 606 } 607 607 608 + static ssize_t dme_qos_notification_show(struct device *dev, 609 + struct device_attribute *attr, 610 + char *buf) 611 + { 612 + struct ufs_hba *hba = dev_get_drvdata(dev); 613 + 614 + return sysfs_emit(buf, "0x%x\n", atomic_read(&hba->dme_qos_notification)); 615 + } 616 + 617 + static ssize_t dme_qos_notification_store(struct device *dev, 618 + struct device_attribute *attr, 619 + const char *buf, size_t count) 620 + { 621 + struct ufs_hba *hba = dev_get_drvdata(dev); 622 + unsigned int value; 623 + 624 + if (kstrtouint(buf, 0, &value)) 625 + return -EINVAL; 626 + 627 + /* the only supported usecase is to reset the dme_qos_notification */ 628 + if (value) 629 + return -EINVAL; 630 + 631 + atomic_set(&hba->dme_qos_notification, 0); 632 + 633 + return count; 634 + } 635 + 608 636 static DEVICE_ATTR_RW(rpm_lvl); 609 637 static DEVICE_ATTR_RO(rpm_target_dev_state); 610 638 static DEVICE_ATTR_RO(rpm_target_link_state); ··· 649 621 static DEVICE_ATTR_RO(critical_health); 650 622 static DEVICE_ATTR_RW(device_lvl_exception_count); 651 623 static DEVICE_ATTR_RO(device_lvl_exception_id); 624 + static DEVICE_ATTR_RW(dme_qos_notification); 652 625 653 626 static struct attribute *ufs_sysfs_ufshcd_attrs[] = { 654 627 &dev_attr_rpm_lvl.attr, ··· 668 639 &dev_attr_critical_health.attr, 669 640 &dev_attr_device_lvl_exception_count.attr, 670 641 &dev_attr_device_lvl_exception_id.attr, 642 + &dev_attr_dme_qos_notification.attr, 671 643 NULL 672 644 }; 673 645
+1293
drivers/ufs/core/ufs-txeq.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (C) 2026 Qualcomm Technologies, Inc. 4 + * 5 + * Author: 6 + * Can Guo <can.guo@oss.qualcomm.com> 7 + */ 8 + 9 + #include <linux/bitops.h> 10 + #include <linux/delay.h> 11 + #include <linux/errno.h> 12 + #include <linux/kernel.h> 13 + #include <ufs/ufshcd.h> 14 + #include <ufs/unipro.h> 15 + #include "ufshcd-priv.h" 16 + 17 + static bool use_adaptive_txeq; 18 + module_param(use_adaptive_txeq, bool, 0644); 19 + MODULE_PARM_DESC(use_adaptive_txeq, "Find and apply optimal TX Equalization settings before changing Power Mode (default: false)"); 20 + 21 + static int txeq_gear_set(const char *val, const struct kernel_param *kp) 22 + { 23 + return param_set_uint_minmax(val, kp, UFS_HS_G1, UFS_HS_GEAR_MAX); 24 + } 25 + 26 + static const struct kernel_param_ops txeq_gear_ops = { 27 + .set = txeq_gear_set, 28 + .get = param_get_uint, 29 + }; 30 + 31 + static unsigned int adaptive_txeq_gear = UFS_HS_G6; 32 + module_param_cb(adaptive_txeq_gear, &txeq_gear_ops, &adaptive_txeq_gear, 0644); 33 + MODULE_PARM_DESC(adaptive_txeq_gear, "For HS-Gear[n] and above, adaptive txeq shall be used"); 34 + 35 + static bool use_txeq_presets; 36 + module_param(use_txeq_presets, bool, 0644); 37 + MODULE_PARM_DESC(use_txeq_presets, "Use only the 8 TX Equalization Presets (pre-defined Pre-Shoot & De-Emphasis combinations) for TX EQTR (default: false)"); 38 + 39 + static bool txeq_presets_selected[UFS_TX_EQ_PRESET_MAX] = {[0 ... (UFS_TX_EQ_PRESET_MAX - 1)] = 1}; 40 + module_param_array(txeq_presets_selected, bool, NULL, 0644); 41 + MODULE_PARM_DESC(txeq_presets_selected, "Use only the selected Presets out of the 8 TX Equalization Presets for TX EQTR"); 42 + 43 + /* 44 + * ufs_tx_eq_preset - Table of minimum required list of presets. 45 + * 46 + * A HS-G6 capable M-TX shall support the presets defined in M-PHY v6.0 spec. 47 + * Preset Pre-Shoot(dB) De-Emphasis(dB) 48 + * P0 0.0 0.0 49 + * P1 0.0 0.8 50 + * P2 0.0 1.6 51 + * P3 0.8 0.0 52 + * P4 1.6 0.0 53 + * P5 0.8 0.8 54 + * P6 0.8 1.6 55 + * P7 1.6 0.8 56 + */ 57 + static const struct __ufs_tx_eq_preset { 58 + u8 preshoot; 59 + u8 deemphasis; 60 + } ufs_tx_eq_preset[UFS_TX_EQ_PRESET_MAX] = { 61 + [UFS_TX_EQ_PRESET_P0] = {UFS_TX_HS_PRESHOOT_DB_0P0, UFS_TX_HS_DEEMPHASIS_DB_0P0}, 62 + [UFS_TX_EQ_PRESET_P1] = {UFS_TX_HS_PRESHOOT_DB_0P0, UFS_TX_HS_DEEMPHASIS_DB_0P8}, 63 + [UFS_TX_EQ_PRESET_P2] = {UFS_TX_HS_PRESHOOT_DB_0P0, UFS_TX_HS_DEEMPHASIS_DB_1P6}, 64 + [UFS_TX_EQ_PRESET_P3] = {UFS_TX_HS_PRESHOOT_DB_0P8, UFS_TX_HS_DEEMPHASIS_DB_0P0}, 65 + [UFS_TX_EQ_PRESET_P4] = {UFS_TX_HS_PRESHOOT_DB_1P6, UFS_TX_HS_DEEMPHASIS_DB_0P0}, 66 + [UFS_TX_EQ_PRESET_P5] = {UFS_TX_HS_PRESHOOT_DB_0P8, UFS_TX_HS_DEEMPHASIS_DB_0P8}, 67 + [UFS_TX_EQ_PRESET_P6] = {UFS_TX_HS_PRESHOOT_DB_0P8, UFS_TX_HS_DEEMPHASIS_DB_1P6}, 68 + [UFS_TX_EQ_PRESET_P7] = {UFS_TX_HS_PRESHOOT_DB_1P6, UFS_TX_HS_DEEMPHASIS_DB_0P8}, 69 + }; 70 + 71 + /* 72 + * pa_peer_rx_adapt_initial - Table of UniPro PA_PeerRxHSGnAdaptInitial 73 + * attribute IDs for High Speed (HS) Gears. 74 + * 75 + * This table maps HS Gears to their respective UniPro PA_PeerRxHSGnAdaptInitial 76 + * attribute IDs. Entries for Gears 1-3 are 0 (unsupported). 77 + */ 78 + static const u32 pa_peer_rx_adapt_initial[UFS_HS_GEAR_MAX] = { 79 + 0, 80 + 0, 81 + 0, 82 + PA_PEERRXHSG4ADAPTINITIAL, 83 + PA_PEERRXHSG5ADAPTINITIAL, 84 + PA_PEERRXHSG6ADAPTINITIALL0L3 85 + }; 86 + 87 + /* 88 + * rx_adapt_initial_cap - Table of M-PHY RX_HS_Gn_ADAPT_INITIAL_Capability 89 + * attribute IDs for High Speed (HS) Gears. 90 + * 91 + * This table maps HS Gears to their respective M-PHY 92 + * RX_HS_Gn_ADAPT_INITIAL_Capability attribute IDs. Entries for Gears 1-3 are 0 93 + * (unsupported). 94 + */ 95 + static const u32 rx_adapt_initial_cap[UFS_HS_GEAR_MAX] = { 96 + 0, 97 + 0, 98 + 0, 99 + RX_HS_G4_ADAPT_INITIAL_CAP, 100 + RX_HS_G5_ADAPT_INITIAL_CAP, 101 + RX_HS_G6_ADAPT_INITIAL_CAP 102 + }; 103 + 104 + /* 105 + * pa_tx_eq_setting - Table of UniPro PA_TxEQGnSetting attribute IDs for High 106 + * Speed (HS) Gears. 107 + * 108 + * This table maps HS Gears to their respective UniPro PA_TxEQGnSetting 109 + * attribute IDs. 110 + */ 111 + static const u32 pa_tx_eq_setting[UFS_HS_GEAR_MAX] = { 112 + PA_TXEQG1SETTING, 113 + PA_TXEQG2SETTING, 114 + PA_TXEQG3SETTING, 115 + PA_TXEQG4SETTING, 116 + PA_TXEQG5SETTING, 117 + PA_TXEQG6SETTING 118 + }; 119 + 120 + /** 121 + * ufshcd_configure_precoding - Configure Pre-Coding for all active lanes 122 + * @hba: per adapter instance 123 + * @params: TX EQ parameters data structure 124 + * 125 + * Bit[7] in RX_FOM indicates that the receiver needs to enable Pre-Coding when 126 + * set. Pre-Coding must be enabled on both the transmitter and receiver to 127 + * ensure proper operation. 128 + * 129 + * Returns 0 on success, non-zero error code otherwise 130 + */ 131 + static int ufshcd_configure_precoding(struct ufs_hba *hba, 132 + struct ufshcd_tx_eq_params *params) 133 + { 134 + struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; 135 + u32 local_precode_en = 0; 136 + u32 peer_precode_en = 0; 137 + int lane, ret; 138 + 139 + /* Enable Pre-Coding for Host's TX & Device's RX pair */ 140 + for (lane = 0; lane < pwr_info->lane_tx; lane++) { 141 + if (params->host[lane].precode_en) { 142 + local_precode_en |= PRECODEEN_TX_BIT(lane); 143 + peer_precode_en |= PRECODEEN_RX_BIT(lane); 144 + } 145 + } 146 + 147 + /* Enable Pre-Coding for Device's TX & Host's RX pair */ 148 + for (lane = 0; lane < pwr_info->lane_rx; lane++) { 149 + if (params->device[lane].precode_en) { 150 + peer_precode_en |= PRECODEEN_TX_BIT(lane); 151 + local_precode_en |= PRECODEEN_RX_BIT(lane); 152 + } 153 + } 154 + 155 + if (!local_precode_en && !peer_precode_en) { 156 + dev_dbg(hba->dev, "Pre-Coding is not required for Host and Device\n"); 157 + return 0; 158 + } 159 + 160 + /* Set local PA_PreCodeEn */ 161 + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PRECODEEN), local_precode_en); 162 + if (ret) { 163 + dev_err(hba->dev, "Failed to set local PA_PreCodeEn: %d\n", ret); 164 + return ret; 165 + } 166 + 167 + /* Set peer PA_PreCodeEn */ 168 + ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_PRECODEEN), peer_precode_en); 169 + if (ret) { 170 + dev_err(hba->dev, "Failed to set peer PA_PreCodeEn: %d\n", ret); 171 + return ret; 172 + } 173 + 174 + dev_dbg(hba->dev, "Local PA_PreCodeEn: 0x%02x, Peer PA_PreCodeEn: 0x%02x\n", 175 + local_precode_en, peer_precode_en); 176 + 177 + return 0; 178 + } 179 + 180 + void ufshcd_print_tx_eq_params(struct ufs_hba *hba) 181 + { 182 + struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; 183 + struct ufshcd_tx_eq_params *params; 184 + u32 gear = hba->pwr_info.gear_tx; 185 + int lane; 186 + 187 + if (!ufshcd_is_tx_eq_supported(hba)) 188 + return; 189 + 190 + if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX) 191 + return; 192 + 193 + params = &hba->tx_eq_params[gear - 1]; 194 + if (!params->is_valid || !params->is_applied) 195 + return; 196 + 197 + for (lane = 0; lane < pwr_info->lane_tx; lane++) 198 + dev_dbg(hba->dev, "Host TX Lane %d: PreShoot %u, DeEmphasis %u, FOM %u, PreCodeEn %d\n", 199 + lane, params->host[lane].preshoot, 200 + params->host[lane].deemphasis, 201 + params->host[lane].fom_val, 202 + params->host[lane].precode_en); 203 + 204 + for (lane = 0; lane < pwr_info->lane_rx; lane++) 205 + dev_dbg(hba->dev, "Device TX Lane %d: PreShoot %u, DeEmphasis %u, FOM %u, PreCodeEn %d\n", 206 + lane, params->device[lane].preshoot, 207 + params->device[lane].deemphasis, 208 + params->device[lane].fom_val, 209 + params->device[lane].precode_en); 210 + } 211 + 212 + static inline u32 213 + ufshcd_compose_tx_eq_setting(struct ufshcd_tx_eq_settings *settings, 214 + int num_lanes) 215 + { 216 + u32 setting = 0; 217 + int lane; 218 + 219 + for (lane = 0; lane < num_lanes; lane++, settings++) { 220 + setting |= TX_HS_PRESHOOT_BITS(lane, settings->preshoot); 221 + setting |= TX_HS_DEEMPHASIS_BITS(lane, settings->deemphasis); 222 + } 223 + 224 + return setting; 225 + } 226 + 227 + /** 228 + * ufshcd_apply_tx_eq_settings - Apply TX Equalization settings for target gear 229 + * @hba: per adapter instance 230 + * @params: TX EQ parameters data structure 231 + * @gear: target gear 232 + * 233 + * Returns 0 on success, negative error code otherwise 234 + */ 235 + int ufshcd_apply_tx_eq_settings(struct ufs_hba *hba, 236 + struct ufshcd_tx_eq_params *params, u32 gear) 237 + { 238 + struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; 239 + u32 setting; 240 + int ret; 241 + 242 + /* Compose settings for Host's TX Lanes */ 243 + setting = ufshcd_compose_tx_eq_setting(params->host, pwr_info->lane_tx); 244 + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(pa_tx_eq_setting[gear - 1]), setting); 245 + if (ret) 246 + return ret; 247 + 248 + /* Compose settings for Device's TX Lanes */ 249 + setting = ufshcd_compose_tx_eq_setting(params->device, pwr_info->lane_rx); 250 + ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(pa_tx_eq_setting[gear - 1]), setting); 251 + if (ret) 252 + return ret; 253 + 254 + /* Configure Pre-Coding */ 255 + if (gear >= UFS_HS_G6) { 256 + ret = ufshcd_configure_precoding(hba, params); 257 + if (ret) { 258 + dev_err(hba->dev, "Failed to configure pre-coding: %d\n", ret); 259 + return ret; 260 + } 261 + } 262 + 263 + return 0; 264 + } 265 + EXPORT_SYMBOL_GPL(ufshcd_apply_tx_eq_settings); 266 + 267 + /** 268 + * ufshcd_evaluate_tx_eqtr_fom - Evaluate TX EQTR FOM results 269 + * @hba: per adapter instance 270 + * @pwr_mode: target power mode containing gear and rate information 271 + * @eqtr_data: TX EQTR data structure 272 + * @h_iter: host TX EQTR iterator data structure 273 + * @d_iter: device TX EQTR iterator data structure 274 + * 275 + * Evaluate TX EQTR FOM results, update host and device TX EQTR data accordingy 276 + * if FOM have been improved compared to previous iteration, and record TX EQTR 277 + * FOM results. 278 + */ 279 + static void ufshcd_evaluate_tx_eqtr_fom(struct ufs_hba *hba, 280 + struct ufs_pa_layer_attr *pwr_mode, 281 + struct ufshcd_tx_eqtr_data *eqtr_data, 282 + struct tx_eqtr_iter *h_iter, 283 + struct tx_eqtr_iter *d_iter) 284 + { 285 + u8 preshoot, deemphasis, fom_value; 286 + bool precode_en; 287 + int lane; 288 + 289 + for (lane = 0; h_iter->is_updated && lane < pwr_mode->lane_tx; lane++) { 290 + preshoot = h_iter->preshoot; 291 + deemphasis = h_iter->deemphasis; 292 + fom_value = h_iter->fom[lane] & RX_FOM_VALUE_MASK; 293 + precode_en = h_iter->fom[lane] & RX_FOM_PRECODING_EN_BIT; 294 + 295 + /* Record host TX EQTR FOM */ 296 + eqtr_data->host_fom[lane][preshoot][deemphasis] = h_iter->fom[lane]; 297 + 298 + /* Check if FOM has been improved for host's TX Lanes */ 299 + if (fom_value > eqtr_data->host[lane].fom_val) { 300 + eqtr_data->host[lane].preshoot = preshoot; 301 + eqtr_data->host[lane].deemphasis = deemphasis; 302 + eqtr_data->host[lane].fom_val = fom_value; 303 + eqtr_data->host[lane].precode_en = precode_en; 304 + } 305 + 306 + dev_dbg(hba->dev, "TX EQTR: Host TX Lane %d: PreShoot %u, DeEmphasis %u, FOM value %u, PreCodeEn %d\n", 307 + lane, preshoot, deemphasis, fom_value, precode_en); 308 + } 309 + 310 + for (lane = 0; d_iter->is_updated && lane < pwr_mode->lane_rx; lane++) { 311 + preshoot = d_iter->preshoot; 312 + deemphasis = d_iter->deemphasis; 313 + fom_value = d_iter->fom[lane] & RX_FOM_VALUE_MASK; 314 + precode_en = d_iter->fom[lane] & RX_FOM_PRECODING_EN_BIT; 315 + 316 + /* Record device TX EQTR FOM */ 317 + eqtr_data->device_fom[lane][preshoot][deemphasis] = d_iter->fom[lane]; 318 + 319 + /* Check if FOM has been improved for Device's TX Lanes */ 320 + if (fom_value > eqtr_data->device[lane].fom_val) { 321 + eqtr_data->device[lane].preshoot = preshoot; 322 + eqtr_data->device[lane].deemphasis = deemphasis; 323 + eqtr_data->device[lane].fom_val = fom_value; 324 + eqtr_data->device[lane].precode_en = precode_en; 325 + } 326 + 327 + dev_dbg(hba->dev, "TX EQTR: Device TX Lane %d: PreShoot %u, DeEmphasis %u, FOM value %u, PreCodeEn %d\n", 328 + lane, preshoot, deemphasis, fom_value, precode_en); 329 + } 330 + } 331 + 332 + /** 333 + * ufshcd_get_rx_fom - Get Figure of Merit (FOM) for both sides 334 + * @hba: per adapter instance 335 + * @pwr_mode: target power mode containing gear and rate information 336 + * @h_iter: host TX EQTR iterator data structure 337 + * @d_iter: device TX EQTR iterator data structure 338 + * 339 + * Returns 0 on success, negative error code otherwise 340 + */ 341 + static int ufshcd_get_rx_fom(struct ufs_hba *hba, 342 + struct ufs_pa_layer_attr *pwr_mode, 343 + struct tx_eqtr_iter *h_iter, 344 + struct tx_eqtr_iter *d_iter) 345 + { 346 + int lane, ret; 347 + u32 fom; 348 + 349 + /* Get FOM of host's TX lanes from device's RX_FOM. */ 350 + for (lane = 0; lane < pwr_mode->lane_tx; lane++) { 351 + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB_SEL(RX_FOM, 352 + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)), 353 + &fom); 354 + if (ret) 355 + return ret; 356 + 357 + h_iter->fom[lane] = (u8)fom; 358 + } 359 + 360 + /* Get FOM of device's TX lanes from host's RX_FOM. */ 361 + for (lane = 0; lane < pwr_mode->lane_rx; lane++) { 362 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_FOM, 363 + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)), 364 + &fom); 365 + if (ret) 366 + return ret; 367 + 368 + d_iter->fom[lane] = (u8)fom; 369 + } 370 + 371 + ret = ufshcd_vops_get_rx_fom(hba, pwr_mode, h_iter, d_iter); 372 + if (ret) 373 + dev_err(hba->dev, "Failed to get FOM via vops: %d\n", ret); 374 + 375 + return ret; 376 + } 377 + 378 + bool ufshcd_is_txeq_presets_used(struct ufs_hba *hba) 379 + { 380 + return use_txeq_presets; 381 + } 382 + 383 + bool ufshcd_is_txeq_preset_selected(u8 preshoot, u8 deemphasis) 384 + { 385 + int i; 386 + 387 + for (i = 0; i < UFS_TX_EQ_PRESET_MAX; i++) { 388 + if (!txeq_presets_selected[i]) 389 + continue; 390 + 391 + if (preshoot == ufs_tx_eq_preset[i].preshoot && 392 + deemphasis == ufs_tx_eq_preset[i].deemphasis) 393 + return true; 394 + } 395 + 396 + return false; 397 + } 398 + 399 + /** 400 + * tx_eqtr_iter_try_update - Try to update a TX EQTR iterator 401 + * @iter: TX EQTR iterator data structure 402 + * @preshoot: PreShoot value 403 + * @deemphasis: DeEmphasis value 404 + * 405 + * This function validates whether the provided PreShoot and DeEmphasis 406 + * combination can be used or not. If yes, it updates the TX EQTR iterator with 407 + * the provided PreShoot and DeEmphasis, it also sets the is_updated flag 408 + * to indicate the iterator has been updated. 409 + */ 410 + static void tx_eqtr_iter_try_update(struct tx_eqtr_iter *iter, 411 + u8 preshoot, u8 deemphasis) 412 + { 413 + if (!test_bit(preshoot, &iter->preshoot_bitmap) || 414 + !test_bit(deemphasis, &iter->deemphasis_bitmap) || 415 + (use_txeq_presets && !ufshcd_is_txeq_preset_selected(preshoot, deemphasis))) { 416 + iter->is_updated = false; 417 + return; 418 + } 419 + 420 + iter->preshoot = preshoot; 421 + iter->deemphasis = deemphasis; 422 + iter->is_updated = true; 423 + } 424 + 425 + /** 426 + * tx_eqtr_iter_update() - Update host and deviceTX EQTR iterators 427 + * @preshoot: PreShoot value 428 + * @deemphasis: DeEmphasis value 429 + * @h_iter: Host TX EQTR iterator data structure 430 + * @d_iter: Device TX EQTR iterator data structure 431 + * 432 + * Updates host and device TX Equalization training iterators with the 433 + * provided PreShoot and DeEmphasis. 434 + * 435 + * Return: true if host and/or device TX Equalization training iterator has 436 + * been updated to the provided PreShoot and DeEmphasis, false otherwise. 437 + */ 438 + static bool tx_eqtr_iter_update(u8 preshoot, u8 deemphasis, 439 + struct tx_eqtr_iter *h_iter, 440 + struct tx_eqtr_iter *d_iter) 441 + { 442 + tx_eqtr_iter_try_update(h_iter, preshoot, deemphasis); 443 + tx_eqtr_iter_try_update(d_iter, preshoot, deemphasis); 444 + 445 + return h_iter->is_updated || d_iter->is_updated; 446 + } 447 + 448 + /** 449 + * ufshcd_tx_eqtr_iter_init - Initialize host and device TX EQTR iterators 450 + * @hba: per adapter instance 451 + * @h_iter: host TX EQTR iterator data structure 452 + * @d_iter: device TX EQTR iterator data structure 453 + * 454 + * This function initializes the TX EQTR iterator structures for both host and 455 + * device by reading their TX equalization capabilities. The capabilities are 456 + * cached in the hba structure to avoid redundant DME operations in subsequent 457 + * calls. In the TX EQTR procedure, the iterator structures are updated by 458 + * tx_eqtr_iter_update() to systematically iterate through supported TX 459 + * Equalization setting combinations. 460 + * 461 + * Returns 0 on success, negative error code otherwise 462 + */ 463 + static int ufshcd_tx_eqtr_iter_init(struct ufs_hba *hba, 464 + struct tx_eqtr_iter *h_iter, 465 + struct tx_eqtr_iter *d_iter) 466 + { 467 + u32 cap; 468 + int ret; 469 + 470 + if (!hba->host_preshoot_cap) { 471 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(TX_HS_PRESHOOT_SETTING_CAP), &cap); 472 + if (ret) 473 + return ret; 474 + 475 + hba->host_preshoot_cap = cap & TX_EQTR_CAP_MASK; 476 + } 477 + 478 + if (!hba->host_deemphasis_cap) { 479 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(TX_HS_DEEMPHASIS_SETTING_CAP), &cap); 480 + if (ret) 481 + return ret; 482 + 483 + hba->host_deemphasis_cap = cap & TX_EQTR_CAP_MASK; 484 + } 485 + 486 + if (!hba->device_preshoot_cap) { 487 + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(TX_HS_PRESHOOT_SETTING_CAP), &cap); 488 + if (ret) 489 + return ret; 490 + 491 + hba->device_preshoot_cap = cap & TX_EQTR_CAP_MASK; 492 + } 493 + 494 + if (!hba->device_deemphasis_cap) { 495 + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(TX_HS_DEEMPHASIS_SETTING_CAP), &cap); 496 + if (ret) 497 + return ret; 498 + 499 + hba->device_deemphasis_cap = cap & TX_EQTR_CAP_MASK; 500 + } 501 + 502 + /* 503 + * Support PreShoot & DeEmphasis of value 0 is mandatory, hence they are 504 + * not reflected in PreShoot/DeEmphasis capabilities. Left shift the 505 + * capability bitmap by 1 and set bit[0] to reflect value 0 is 506 + * supported, such that test_bit() can be used later for convenience. 507 + */ 508 + h_iter->preshoot_bitmap = (hba->host_preshoot_cap << 0x1) | 0x1; 509 + h_iter->deemphasis_bitmap = (hba->host_deemphasis_cap << 0x1) | 0x1; 510 + d_iter->preshoot_bitmap = (hba->device_preshoot_cap << 0x1) | 0x1; 511 + d_iter->deemphasis_bitmap = (hba->device_deemphasis_cap << 0x1) | 0x1; 512 + 513 + return 0; 514 + } 515 + 516 + /** 517 + * adapt_cap_to_t_adapt - Calculate TAdapt from adapt capability 518 + * @adapt_cap: Adapt capability 519 + * 520 + * For NRZ: 521 + * IF (ADAPT_range = FINE) 522 + * TADAPT = 650 x (ADAPT_length + 1) 523 + * ELSE (IF ADAPT_range = COARSE) 524 + * TADAPT = 650 x 2^ADAPT_length 525 + * 526 + * Returns calculated TAdapt value in term of Unit Intervals (UI) 527 + */ 528 + static inline u64 adapt_cap_to_t_adapt(u32 adapt_cap) 529 + { 530 + u64 tadapt; 531 + u8 adapt_length = adapt_cap & ADAPT_LENGTH_MASK; 532 + 533 + if (!IS_ADAPT_RANGE_COARSE(adapt_cap)) 534 + tadapt = TADAPT_FACTOR * (adapt_length + 1); 535 + else 536 + tadapt = TADAPT_FACTOR * (1 << adapt_length); 537 + 538 + return tadapt; 539 + } 540 + 541 + /** 542 + * adapt_cap_to_t_adapt_l0l3 - Calculate TAdapt_L0_L3 from adapt capability 543 + * @adapt_cap: Adapt capability 544 + * 545 + * For PAM-4: 546 + * IF (ADAPT_range = FINE) 547 + * TADAPT_L0_L3 = 2^9 x ADAPT_length 548 + * ELSE IF (ADAPT_range = COARSE) 549 + * TADAPT_L0_L3 = 2^9 x (2^ADAPT_length) 550 + * 551 + * Returns calculated TAdapt value in term of Unit Intervals (UI) 552 + */ 553 + static inline u64 adapt_cap_to_t_adapt_l0l3(u32 adapt_cap) 554 + { 555 + u64 tadapt; 556 + u8 adapt_length = adapt_cap & ADAPT_LENGTH_MASK; 557 + 558 + if (!IS_ADAPT_RANGE_COARSE(adapt_cap)) 559 + tadapt = TADAPT_L0L3_FACTOR * adapt_length; 560 + else 561 + tadapt = TADAPT_L0L3_FACTOR * (1 << adapt_length); 562 + 563 + return tadapt; 564 + } 565 + 566 + /** 567 + * adapt_cap_to_t_adapt_l0l1l2l3 - Calculate TAdapt_L0_L1_L2_L3 from adapt capability 568 + * @adapt_cap: Adapt capability 569 + * 570 + * For PAM-4: 571 + * IF (ADAPT_range_L0_L1_L2_L3 = FINE) 572 + * TADAPT_L0_L1_L2_L3 = 2^15 x (ADAPT_length_L0_L1_L2_L3 + 1) 573 + * ELSE IF (ADAPT_range_L0_L1_L2_L3 = COARSE) 574 + * TADAPT_L0_L1_L2_L3 = 2^15 x 2^ADAPT_length_L0_L1_L2_L3 575 + * 576 + * Returns calculated TAdapt value in term of Unit Intervals (UI) 577 + */ 578 + static inline u64 adapt_cap_to_t_adapt_l0l1l2l3(u32 adapt_cap) 579 + { 580 + u64 tadapt; 581 + u8 adapt_length = adapt_cap & ADAPT_LENGTH_MASK; 582 + 583 + if (!IS_ADAPT_RANGE_COARSE(adapt_cap)) 584 + tadapt = TADAPT_L0L1L2L3_FACTOR * (adapt_length + 1); 585 + else 586 + tadapt = TADAPT_L0L1L2L3_FACTOR * (1 << adapt_length); 587 + 588 + return tadapt; 589 + } 590 + 591 + /** 592 + * ufshcd_setup_tx_eqtr_adapt_length - Setup TX adapt length for EQTR 593 + * @hba: per adapter instance 594 + * @params: TX EQ parameters data structure 595 + * @gear: target gear for EQTR 596 + * 597 + * This function determines and configures the proper TX adapt length (TAdapt) 598 + * for the TX EQTR procedure based on the target gear and RX adapt capabilities 599 + * of both host and device. 600 + * 601 + * Guidelines from MIPI UniPro v3.0 spec - select the minimum Adapt Length for 602 + * the Equalization Training procedure based on the following conditions: 603 + * 604 + * If the target High-Speed Gear n is HS-G4 or HS-G5: 605 + * PA_TxAdaptLength_EQTR[7:0] >= Max (10us, RX_HS_Gn_ADAPT_INITIAL_Capability, 606 + * PA_PeerRxHsGnAdaptInitial) 607 + * PA_TxAdaptLength_EQTR[7:0] shall be shorter than PACP_REQUEST_TIMER (10ms) 608 + * PA_TxAdaptLength_EQTR[15:8] is not relevant for HS-G4 and HS-G5. This field 609 + * is set to 255 (reserved value). 610 + * 611 + * If the target High-Speed Gear n is HS-G6: 612 + * PA_TxAdapthLength_EQTR >= 10us 613 + * PA_TxAdapthLength_EQTR[7:0] >= Max (RX_HS_G6_ADAPT_INITIAL_Capability, 614 + * PA_PeerRxHsG6AdaptInitialL0L3) 615 + * PA_TxAdapthLength_EQTR[15:8] >= Max (RX_HS_G6_ADAPT_INITIAL_L0_L1_L2_L3_Capability, 616 + * PA_PeerRxHsG6AdaptInitialL0L1L2L3) 617 + * PA_TxAdaptLength_EQTR shall be shorter than PACP_REQUEST_TIMER value of 10ms. 618 + * 619 + * Since adapt capabilities encode both range (fine/coarse) and length values, 620 + * direct comparison is not possible. This function converts adapt capabilities 621 + * to actual time durations in Unit Intervals (UI) using the Adapt time 622 + * calculation formular in M-PHY v6.0 spec (Table 8), then selects the maximum 623 + * to ensure both host and device use adequate TX adapt length. 624 + * 625 + * Returns 0 on success, negative error code otherwise 626 + */ 627 + static int ufshcd_setup_tx_eqtr_adapt_length(struct ufs_hba *hba, 628 + struct ufshcd_tx_eq_params *params, 629 + u32 gear) 630 + { 631 + struct ufshcd_tx_eqtr_record *rec = params->eqtr_record; 632 + u32 adapt_eqtr; 633 + int ret; 634 + 635 + if (rec && rec->saved_adapt_eqtr) { 636 + adapt_eqtr = rec->saved_adapt_eqtr; 637 + goto set_adapt_eqtr; 638 + } 639 + 640 + if (gear == UFS_HS_G4 || gear == UFS_HS_G5) { 641 + u64 t_adapt, t_adapt_local, t_adapt_peer; 642 + u32 adapt_cap_local, adapt_cap_peer, adapt_length; 643 + 644 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(rx_adapt_initial_cap[gear - 1], 645 + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), 646 + &adapt_cap_local); 647 + if (ret) 648 + return ret; 649 + 650 + if (adapt_cap_local > ADAPT_LENGTH_MAX) { 651 + dev_err(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_CAP (0x%x) exceeds MAX\n", 652 + gear, adapt_cap_local); 653 + return -EINVAL; 654 + } 655 + 656 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(pa_peer_rx_adapt_initial[gear - 1]), 657 + &adapt_cap_peer); 658 + if (ret) 659 + return ret; 660 + 661 + if (adapt_cap_peer > ADAPT_LENGTH_MAX) { 662 + dev_err(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_CAP (0x%x) exceeds MAX\n", 663 + gear, adapt_cap_peer); 664 + return -EINVAL; 665 + } 666 + 667 + t_adapt_local = adapt_cap_to_t_adapt(adapt_cap_local); 668 + t_adapt_peer = adapt_cap_to_t_adapt(adapt_cap_peer); 669 + t_adapt = max(t_adapt_local, t_adapt_peer); 670 + 671 + dev_dbg(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_CAP = 0x%x\n", 672 + gear, adapt_cap_local); 673 + dev_dbg(hba->dev, "peer RX_HS_G%u_ADAPT_INITIAL_CAP = 0x%x\n", 674 + gear, adapt_cap_peer); 675 + dev_dbg(hba->dev, "t_adapt_local = %llu UI, t_adapt_peer = %llu UI\n", 676 + t_adapt_local, t_adapt_peer); 677 + dev_dbg(hba->dev, "TAdapt %llu UI selected for TX EQTR\n", 678 + t_adapt); 679 + 680 + adapt_length = (t_adapt_local >= t_adapt_peer) ? 681 + adapt_cap_local : adapt_cap_peer; 682 + 683 + if (gear == UFS_HS_G4 && t_adapt < TX_EQTR_HS_G4_MIN_T_ADAPT) { 684 + dev_dbg(hba->dev, "TAdapt %llu UI is too short for TX EQTR for HS-G%u, use default Adapt 0x%x\n", 685 + t_adapt, gear, TX_EQTR_HS_G4_ADAPT_DEFAULT); 686 + adapt_length = TX_EQTR_HS_G4_ADAPT_DEFAULT; 687 + } else if (gear == UFS_HS_G5 && t_adapt < TX_EQTR_HS_G5_MIN_T_ADAPT) { 688 + dev_dbg(hba->dev, "TAdapt %llu UI is too short for TX EQTR for HS-G%u, use default Adapt 0x%x\n", 689 + t_adapt, gear, TX_EQTR_HS_G5_ADAPT_DEFAULT); 690 + adapt_length = TX_EQTR_HS_G5_ADAPT_DEFAULT; 691 + } 692 + 693 + adapt_eqtr = adapt_length | 694 + (TX_EQTR_ADAPT_RESERVED << TX_EQTR_ADAPT_LENGTH_L0L1L2L3_SHIFT); 695 + } else if (gear == UFS_HS_G6) { 696 + u64 t_adapt, t_adapt_l0l3, t_adapt_l0l3_local, t_adapt_l0l3_peer; 697 + u64 t_adapt_l0l1l2l3, t_adapt_l0l1l2l3_local, t_adapt_l0l1l2l3_peer; 698 + u32 adapt_l0l3_cap_local, adapt_l0l3_cap_peer, adapt_length_l0l3; 699 + u32 adapt_l0l1l2l3_cap_local, adapt_l0l1l2l3_cap_peer, adapt_length_l0l1l2l3; 700 + 701 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(rx_adapt_initial_cap[gear - 1], 702 + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), 703 + &adapt_l0l3_cap_local); 704 + if (ret) 705 + return ret; 706 + 707 + if (adapt_l0l3_cap_local > ADAPT_L0L3_LENGTH_MAX) { 708 + dev_err(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_CAP (0x%x) exceeds MAX\n", 709 + gear, adapt_l0l3_cap_local); 710 + return -EINVAL; 711 + } 712 + 713 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(pa_peer_rx_adapt_initial[gear - 1]), 714 + &adapt_l0l3_cap_peer); 715 + if (ret) 716 + return ret; 717 + 718 + if (adapt_l0l3_cap_peer > ADAPT_L0L3_LENGTH_MAX) { 719 + dev_err(hba->dev, "peer RX_HS_G%u_ADAPT_INITIAL_CAP (0x%x) exceeds MAX\n", 720 + gear, adapt_l0l3_cap_peer); 721 + return -EINVAL; 722 + } 723 + 724 + t_adapt_l0l3_local = adapt_cap_to_t_adapt_l0l3(adapt_l0l3_cap_local); 725 + t_adapt_l0l3_peer = adapt_cap_to_t_adapt_l0l3(adapt_l0l3_cap_peer); 726 + 727 + dev_dbg(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_CAP = 0x%x\n", 728 + gear, adapt_l0l3_cap_local); 729 + dev_dbg(hba->dev, "peer RX_HS_G%u_ADAPT_INITIAL_CAP = 0x%x\n", 730 + gear, adapt_l0l3_cap_peer); 731 + dev_dbg(hba->dev, "t_adapt_l0l3_local = %llu UI, t_adapt_l0l3_peer = %llu UI\n", 732 + t_adapt_l0l3_local, t_adapt_l0l3_peer); 733 + 734 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_HS_G6_ADAPT_INITIAL_L0L1L2L3_CAP, 735 + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), 736 + &adapt_l0l1l2l3_cap_local); 737 + if (ret) 738 + return ret; 739 + 740 + if (adapt_l0l1l2l3_cap_local > ADAPT_L0L1L2L3_LENGTH_MAX) { 741 + dev_err(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_L0L1L2L3_CAP (0x%x) exceeds MAX\n", 742 + gear, adapt_l0l1l2l3_cap_local); 743 + return -EINVAL; 744 + } 745 + 746 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTINITIALL0L1L2L3), 747 + &adapt_l0l1l2l3_cap_peer); 748 + if (ret) 749 + return ret; 750 + 751 + if (adapt_l0l1l2l3_cap_peer > ADAPT_L0L1L2L3_LENGTH_MAX) { 752 + dev_err(hba->dev, "peer RX_HS_G%u_ADAPT_INITIAL_L0L1L2L3_CAP (0x%x) exceeds MAX\n", 753 + gear, adapt_l0l1l2l3_cap_peer); 754 + return -EINVAL; 755 + } 756 + 757 + t_adapt_l0l1l2l3_local = adapt_cap_to_t_adapt_l0l1l2l3(adapt_l0l1l2l3_cap_local); 758 + t_adapt_l0l1l2l3_peer = adapt_cap_to_t_adapt_l0l1l2l3(adapt_l0l1l2l3_cap_peer); 759 + 760 + dev_dbg(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_L0L1L2L3_CAP = 0x%x\n", 761 + gear, adapt_l0l1l2l3_cap_local); 762 + dev_dbg(hba->dev, "peer RX_HS_G%u_ADAPT_INITIAL_L0L1L2L3_CAP = 0x%x\n", 763 + gear, adapt_l0l1l2l3_cap_peer); 764 + dev_dbg(hba->dev, "t_adapt_l0l1l2l3_local = %llu UI, t_adapt_l0l1l2l3_peer = %llu UI\n", 765 + t_adapt_l0l1l2l3_local, t_adapt_l0l1l2l3_peer); 766 + 767 + t_adapt_l0l1l2l3 = max(t_adapt_l0l1l2l3_local, t_adapt_l0l1l2l3_peer); 768 + t_adapt_l0l3 = max(t_adapt_l0l3_local, t_adapt_l0l3_peer); 769 + t_adapt = t_adapt_l0l3 + t_adapt_l0l1l2l3; 770 + 771 + dev_dbg(hba->dev, "TAdapt %llu PAM-4 UI selected for TX EQTR\n", 772 + t_adapt); 773 + 774 + adapt_length_l0l3 = (t_adapt_l0l3_local >= t_adapt_l0l3_peer) ? 775 + adapt_l0l3_cap_local : adapt_l0l3_cap_peer; 776 + adapt_length_l0l1l2l3 = (t_adapt_l0l1l2l3_local >= t_adapt_l0l1l2l3_peer) ? 777 + adapt_l0l1l2l3_cap_local : adapt_l0l1l2l3_cap_peer; 778 + 779 + if (t_adapt < TX_EQTR_HS_G6_MIN_T_ADAPT) { 780 + dev_dbg(hba->dev, "TAdapt %llu UI is too short for TX EQTR for HS-G%u, use default Adapt 0x%x\n", 781 + t_adapt, gear, TX_EQTR_HS_G6_ADAPT_DEFAULT); 782 + adapt_length_l0l3 = TX_EQTR_HS_G6_ADAPT_DEFAULT; 783 + } 784 + 785 + adapt_eqtr = adapt_length_l0l3 | 786 + (adapt_length_l0l1l2l3 << TX_EQTR_ADAPT_LENGTH_L0L1L2L3_SHIFT); 787 + } else { 788 + return -EINVAL; 789 + } 790 + 791 + if (rec) 792 + rec->saved_adapt_eqtr = (u16)adapt_eqtr; 793 + 794 + set_adapt_eqtr: 795 + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXADAPTLENGTH_EQTR), adapt_eqtr); 796 + if (ret) 797 + dev_err(hba->dev, "Failed to set adapt length for TX EQTR: %d\n", ret); 798 + else 799 + dev_dbg(hba->dev, "PA_TXADAPTLENGTH_EQTR configured to 0x%08x\n", adapt_eqtr); 800 + 801 + return ret; 802 + } 803 + 804 + /** 805 + * ufshcd_compose_tx_eqtr_setting - Compose TX EQTR setting 806 + * @iter: TX EQTR iterator data structure 807 + * @num_lanes: number of active lanes 808 + * 809 + * Returns composed TX EQTR setting, same setting is used for all active lanes 810 + */ 811 + static inline u32 ufshcd_compose_tx_eqtr_setting(struct tx_eqtr_iter *iter, 812 + int num_lanes) 813 + { 814 + u32 setting = 0; 815 + int lane; 816 + 817 + for (lane = 0; lane < num_lanes; lane++) { 818 + setting |= TX_HS_PRESHOOT_BITS(lane, iter->preshoot); 819 + setting |= TX_HS_DEEMPHASIS_BITS(lane, iter->deemphasis); 820 + } 821 + 822 + return setting; 823 + } 824 + 825 + /** 826 + * ufshcd_apply_tx_eqtr_settings - Apply TX EQTR setting 827 + * @hba: per adapter instance 828 + * @pwr_mode: target power mode containing gear and rate information 829 + * @h_iter: host TX EQTR iterator data structure 830 + * @d_iter: device TX EQTR iterator data structure 831 + * 832 + * Returns 0 on success, negative error code otherwise 833 + */ 834 + static int ufshcd_apply_tx_eqtr_settings(struct ufs_hba *hba, 835 + struct ufs_pa_layer_attr *pwr_mode, 836 + struct tx_eqtr_iter *h_iter, 837 + struct tx_eqtr_iter *d_iter) 838 + { 839 + u32 setting; 840 + int ret; 841 + 842 + setting = ufshcd_compose_tx_eqtr_setting(h_iter, pwr_mode->lane_tx); 843 + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXEQTRSETTING), setting); 844 + if (ret) 845 + return ret; 846 + 847 + setting = ufshcd_compose_tx_eqtr_setting(d_iter, pwr_mode->lane_rx); 848 + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PEERTXEQTRSETTING), setting); 849 + if (ret) 850 + return ret; 851 + 852 + ret = ufshcd_vops_apply_tx_eqtr_settings(hba, pwr_mode, h_iter, d_iter); 853 + 854 + return ret; 855 + } 856 + 857 + /** 858 + * ufshcd_update_tx_eq_params - Update TX Equalization params 859 + * @params: TX EQ parameters data structure 860 + * @pwr_mode: target power mode containing gear and rate 861 + * @eqtr_data: TX EQTR data structure 862 + * 863 + * Update TX Equalization params using results from TX EQTR data. Check also 864 + * the TX EQTR FOM value for each TX lane in the TX EQTR data. If a TX lane got 865 + * a FOM value of 0, restore the TX Equalization settings from the last known 866 + * valid TX Equalization params for that specific TX lane. 867 + */ 868 + static inline void 869 + ufshcd_update_tx_eq_params(struct ufshcd_tx_eq_params *params, 870 + struct ufs_pa_layer_attr *pwr_mode, 871 + struct ufshcd_tx_eqtr_data *eqtr_data) 872 + { 873 + struct ufshcd_tx_eqtr_record *rec = params->eqtr_record; 874 + 875 + if (params->is_valid) { 876 + int lane; 877 + 878 + for (lane = 0; lane < pwr_mode->lane_tx; lane++) 879 + if (eqtr_data->host[lane].fom_val == 0) 880 + eqtr_data->host[lane] = params->host[lane]; 881 + 882 + for (lane = 0; lane < pwr_mode->lane_rx; lane++) 883 + if (eqtr_data->device[lane].fom_val == 0) 884 + eqtr_data->device[lane] = params->device[lane]; 885 + } 886 + 887 + memcpy(params->host, eqtr_data->host, sizeof(params->host)); 888 + memcpy(params->device, eqtr_data->device, sizeof(params->device)); 889 + 890 + if (!rec) 891 + return; 892 + 893 + memcpy(rec->host_fom, eqtr_data->host_fom, sizeof(rec->host_fom)); 894 + memcpy(rec->device_fom, eqtr_data->device_fom, sizeof(rec->device_fom)); 895 + rec->last_record_ts = ktime_get(); 896 + rec->last_record_index++; 897 + } 898 + 899 + /** 900 + * __ufshcd_tx_eqtr - TX Equalization Training (EQTR) procedure 901 + * @hba: per adapter instance 902 + * @params: TX EQ parameters data structure 903 + * @pwr_mode: target power mode containing gear and rate information 904 + * 905 + * This function implements the complete TX EQTR procedure as defined in UFSHCI 906 + * v5.0 specification. It iterates through all possible combinations of PreShoot 907 + * and DeEmphasis settings to find the optimal TX Equalization settings for all 908 + * active lanes. 909 + * 910 + * Returns 0 on success, negative error code otherwise 911 + */ 912 + static int __ufshcd_tx_eqtr(struct ufs_hba *hba, 913 + struct ufshcd_tx_eq_params *params, 914 + struct ufs_pa_layer_attr *pwr_mode) 915 + { 916 + struct ufshcd_tx_eqtr_data *eqtr_data __free(kfree) = 917 + kzalloc(sizeof(*eqtr_data), GFP_KERNEL); 918 + struct tx_eqtr_iter h_iter = {}; 919 + struct tx_eqtr_iter d_iter = {}; 920 + u32 gear = pwr_mode->gear_tx; 921 + u8 preshoot, deemphasis; 922 + ktime_t start; 923 + int ret; 924 + 925 + if (!eqtr_data) 926 + return -ENOMEM; 927 + 928 + dev_info(hba->dev, "Start TX EQTR procedure for HS-G%u, Rate-%s, RX Lanes: %u, TX Lanes: %u\n", 929 + gear, ufs_hs_rate_to_str(pwr_mode->hs_rate), 930 + pwr_mode->lane_rx, pwr_mode->lane_tx); 931 + 932 + start = ktime_get(); 933 + 934 + /* Step 1 - Determine the TX Adapt Length for EQTR */ 935 + ret = ufshcd_setup_tx_eqtr_adapt_length(hba, params, gear); 936 + if (ret) { 937 + dev_err(hba->dev, "Failed to setup TX EQTR Adaptation length: %d\n", ret); 938 + return ret; 939 + } 940 + 941 + /* Step 2 - Determine TX Equalization setting capabilities */ 942 + ret = ufshcd_tx_eqtr_iter_init(hba, &h_iter, &d_iter); 943 + if (ret) { 944 + dev_err(hba->dev, "Failed to init TX EQTR data: %d\n", ret); 945 + return ret; 946 + } 947 + 948 + /* TX EQTR main loop */ 949 + for (preshoot = 0; preshoot < TX_HS_NUM_PRESHOOT; preshoot++) { 950 + for (deemphasis = 0; deemphasis < TX_HS_NUM_DEEMPHASIS; deemphasis++) { 951 + if (!tx_eqtr_iter_update(preshoot, deemphasis, &h_iter, &d_iter)) 952 + continue; 953 + 954 + /* Step 3 - Apply TX EQTR settings */ 955 + ret = ufshcd_apply_tx_eqtr_settings(hba, pwr_mode, &h_iter, &d_iter); 956 + if (ret) { 957 + dev_err(hba->dev, "Failed to apply TX EQTR settings (PreShoot %u, DeEmphasis %u): %d\n", 958 + preshoot, deemphasis, ret); 959 + return ret; 960 + } 961 + 962 + /* Step 4 - Trigger UIC TX EQTR */ 963 + ret = ufshcd_uic_tx_eqtr(hba, gear); 964 + if (ret) { 965 + dev_err(hba->dev, "Failed to trigger UIC TX EQTR for target gear %u: %d\n", 966 + gear, ret); 967 + return ret; 968 + } 969 + 970 + /* Step 5 - Get FOM */ 971 + ret = ufshcd_get_rx_fom(hba, pwr_mode, &h_iter, &d_iter); 972 + if (ret) { 973 + dev_err(hba->dev, "Failed to get RX_FOM: %d\n", 974 + ret); 975 + return ret; 976 + } 977 + 978 + ufshcd_evaluate_tx_eqtr_fom(hba, pwr_mode, eqtr_data, &h_iter, &d_iter); 979 + } 980 + } 981 + 982 + dev_info(hba->dev, "TX EQTR procedure completed! Time elapsed: %llu ms\n", 983 + ktime_to_ms(ktime_sub(ktime_get(), start))); 984 + 985 + ufshcd_update_tx_eq_params(params, pwr_mode, eqtr_data); 986 + 987 + return ret; 988 + } 989 + 990 + /** 991 + * ufshcd_tx_eqtr_prepare - Prepare UFS link for TX EQTR procedure 992 + * @hba: per adapter instance 993 + * @pwr_mode: target power mode containing gear and rate 994 + * 995 + * This function prepares the UFS link for TX Equalization Training (EQTR) by 996 + * establishing the proper initial conditions required by the EQTR procedure. 997 + * It ensures that EQTR starts from the most reliable Power Mode (HS-G1) with 998 + * all connected lanes activated and sets host TX HS Adapt Type to INITIAL. 999 + * 1000 + * Returns 0 on successful preparation, negative error code on failure 1001 + */ 1002 + static int ufshcd_tx_eqtr_prepare(struct ufs_hba *hba, 1003 + struct ufs_pa_layer_attr *pwr_mode) 1004 + { 1005 + struct ufs_pa_layer_attr pwr_mode_hs_g1 = { 1006 + /* TX EQTR shall be initiated from the most reliable HS-G1 */ 1007 + .gear_rx = UFS_HS_G1, 1008 + .gear_tx = UFS_HS_G1, 1009 + .lane_rx = pwr_mode->lane_rx, 1010 + .lane_tx = pwr_mode->lane_tx, 1011 + .pwr_rx = FAST_MODE, 1012 + .pwr_tx = FAST_MODE, 1013 + /* Use the target power mode's HS rate */ 1014 + .hs_rate = pwr_mode->hs_rate, 1015 + }; 1016 + u32 rate = pwr_mode->hs_rate; 1017 + int ret; 1018 + 1019 + /* Change power mode to HS-G1, activate all connected lanes. */ 1020 + ret = ufshcd_change_power_mode(hba, &pwr_mode_hs_g1, 1021 + UFSHCD_PMC_POLICY_DONT_FORCE); 1022 + if (ret) { 1023 + dev_err(hba->dev, "TX EQTR: Failed to change power mode to HS-G1, Rate-%s: %d\n", 1024 + ufs_hs_rate_to_str(rate), ret); 1025 + return ret; 1026 + } 1027 + 1028 + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE), 1029 + PA_INITIAL_ADAPT); 1030 + if (ret) 1031 + dev_err(hba->dev, "TX EQTR: Failed to set Host Adapt type to INITIAL: %d\n", 1032 + ret); 1033 + 1034 + return ret; 1035 + } 1036 + 1037 + static void ufshcd_tx_eqtr_unprepare(struct ufs_hba *hba, 1038 + struct ufs_pa_layer_attr *pwr_mode) 1039 + { 1040 + int err; 1041 + 1042 + if (pwr_mode->pwr_rx == SLOWAUTO_MODE || pwr_mode->hs_rate == 0) 1043 + return; 1044 + 1045 + err = ufshcd_change_power_mode(hba, pwr_mode, 1046 + UFSHCD_PMC_POLICY_DONT_FORCE); 1047 + if (err) 1048 + dev_err(hba->dev, "%s: Failed to restore Power Mode: %d\n", 1049 + __func__, err); 1050 + } 1051 + 1052 + /** 1053 + * ufshcd_tx_eqtr - Perform TX EQTR procedures with vops callbacks 1054 + * @hba: per adapter instance 1055 + * @params: TX EQ parameters data structure to populate 1056 + * @pwr_mode: target power mode containing gear and rate information 1057 + * 1058 + * This is the main entry point for performing TX Equalization Training (EQTR) 1059 + * procedure as defined in UFSCHI v5.0 specification. It serves as a wrapper 1060 + * around __ufshcd_tx_eqtr() to provide vops support through the variant 1061 + * operations framework. 1062 + * 1063 + * Returns 0 on success, negative error code on failure 1064 + */ 1065 + static int ufshcd_tx_eqtr(struct ufs_hba *hba, 1066 + struct ufshcd_tx_eq_params *params, 1067 + struct ufs_pa_layer_attr *pwr_mode) 1068 + { 1069 + struct ufs_pa_layer_attr old_pwr_info; 1070 + int ret; 1071 + 1072 + if (!params->eqtr_record) { 1073 + params->eqtr_record = devm_kzalloc(hba->dev, 1074 + sizeof(*params->eqtr_record), 1075 + GFP_KERNEL); 1076 + if (!params->eqtr_record) 1077 + return -ENOMEM; 1078 + } 1079 + 1080 + memcpy(&old_pwr_info, &hba->pwr_info, sizeof(struct ufs_pa_layer_attr)); 1081 + 1082 + ret = ufshcd_tx_eqtr_prepare(hba, pwr_mode); 1083 + if (ret) { 1084 + dev_err(hba->dev, "Failed to prepare TX EQTR: %d\n", ret); 1085 + goto out; 1086 + } 1087 + 1088 + ret = ufshcd_vops_tx_eqtr_notify(hba, PRE_CHANGE, pwr_mode); 1089 + if (ret) 1090 + goto out; 1091 + 1092 + ret = __ufshcd_tx_eqtr(hba, params, pwr_mode); 1093 + if (ret) 1094 + goto out; 1095 + 1096 + ret = ufshcd_vops_tx_eqtr_notify(hba, POST_CHANGE, pwr_mode); 1097 + 1098 + out: 1099 + if (ret) 1100 + ufshcd_tx_eqtr_unprepare(hba, &old_pwr_info); 1101 + 1102 + return ret; 1103 + } 1104 + 1105 + /** 1106 + * ufshcd_config_tx_eq_settings - Configure TX Equalization settings 1107 + * @hba: per adapter instance 1108 + * @pwr_mode: target power mode containing gear and rate information 1109 + * @force_tx_eqtr: execute the TX EQTR procedure 1110 + * 1111 + * This function finds and sets the TX Equalization settings for the given 1112 + * target power mode. 1113 + * 1114 + * Returns 0 on success, error code otherwise 1115 + */ 1116 + int ufshcd_config_tx_eq_settings(struct ufs_hba *hba, 1117 + struct ufs_pa_layer_attr *pwr_mode, 1118 + bool force_tx_eqtr) 1119 + { 1120 + struct ufshcd_tx_eq_params *params; 1121 + u32 gear, rate; 1122 + 1123 + if (!ufshcd_is_tx_eq_supported(hba) || !use_adaptive_txeq) 1124 + return 0; 1125 + 1126 + if (!hba->max_pwr_info.is_valid) { 1127 + dev_err(hba->dev, "Max power info is invalid\n"); 1128 + return -EINVAL; 1129 + } 1130 + 1131 + if (!pwr_mode) { 1132 + dev_err(hba->dev, "Target power mode is NULL\n"); 1133 + return -EINVAL; 1134 + } 1135 + 1136 + gear = pwr_mode->gear_tx; 1137 + rate = pwr_mode->hs_rate; 1138 + 1139 + if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX) { 1140 + dev_err(hba->dev, "Invalid HS-Gear (%u) for TX Equalization\n", 1141 + gear); 1142 + return -EINVAL; 1143 + } else if (gear < max_t(u32, adaptive_txeq_gear, UFS_HS_G4)) { 1144 + /* TX EQTR is supported for HS-G4 and higher Gears */ 1145 + return 0; 1146 + } 1147 + 1148 + if (rate != PA_HS_MODE_A && rate != PA_HS_MODE_B) { 1149 + dev_err(hba->dev, "Invalid HS-Rate (%u) for TX Equalization\n", 1150 + rate); 1151 + return -EINVAL; 1152 + } 1153 + 1154 + params = &hba->tx_eq_params[gear - 1]; 1155 + if (!params->is_valid || force_tx_eqtr) { 1156 + int ret; 1157 + 1158 + ret = ufshcd_tx_eqtr(hba, params, pwr_mode); 1159 + if (ret) { 1160 + dev_err(hba->dev, "Failed to train TX Equalization for HS-G%u, Rate-%s: %d\n", 1161 + gear, ufs_hs_rate_to_str(rate), ret); 1162 + return ret; 1163 + } 1164 + 1165 + /* Mark TX Equalization settings as valid */ 1166 + params->is_valid = true; 1167 + params->is_applied = false; 1168 + } 1169 + 1170 + if (params->is_valid && !params->is_applied) { 1171 + int ret; 1172 + 1173 + ret = ufshcd_apply_tx_eq_settings(hba, params, gear); 1174 + if (ret) { 1175 + dev_err(hba->dev, "Failed to apply TX Equalization settings for HS-G%u, Rate-%s: %d\n", 1176 + gear, ufs_hs_rate_to_str(rate), ret); 1177 + return ret; 1178 + } 1179 + 1180 + params->is_applied = true; 1181 + } 1182 + 1183 + return 0; 1184 + } 1185 + 1186 + /** 1187 + * ufshcd_apply_valid_tx_eq_settings - Apply valid TX Equalization settings 1188 + * @hba: per-adapter instance 1189 + * 1190 + * This function iterates through all supported High-Speed (HS) gears and 1191 + * applies valid TX Equalization settings to both Host and Device. 1192 + */ 1193 + void ufshcd_apply_valid_tx_eq_settings(struct ufs_hba *hba) 1194 + { 1195 + struct ufshcd_tx_eq_params *params; 1196 + int gear, err; 1197 + 1198 + if (!ufshcd_is_tx_eq_supported(hba)) 1199 + return; 1200 + 1201 + if (!hba->max_pwr_info.is_valid) { 1202 + dev_err(hba->dev, "Max power info is invalid, cannot apply TX Equalization settings\n"); 1203 + return; 1204 + } 1205 + 1206 + for (gear = UFS_HS_G1; gear <= UFS_HS_GEAR_MAX; gear++) { 1207 + params = &hba->tx_eq_params[gear - 1]; 1208 + 1209 + if (params->is_valid) { 1210 + err = ufshcd_apply_tx_eq_settings(hba, params, gear); 1211 + if (err) { 1212 + params->is_applied = false; 1213 + dev_err(hba->dev, "Failed to apply TX Equalization settings for HS-G%u: %d\n", 1214 + gear, err); 1215 + } else { 1216 + params->is_applied = true; 1217 + } 1218 + } 1219 + } 1220 + } 1221 + 1222 + /** 1223 + * ufshcd_retrain_tx_eq - Retrain TX Equalization and apply new settings 1224 + * @hba: per-adapter instance 1225 + * @gear: target High-Speed (HS) gear for retraining 1226 + * 1227 + * This function initiates a refresh of the TX Equalization settings for a 1228 + * specific HS gear. It scales the clocks to maximum frequency, negotiates the 1229 + * power mode with the device, retrains TX EQ and applies new TX EQ settings 1230 + * by conducting a Power Mode change. 1231 + * 1232 + * Returns 0 on success, non-zero error code otherwise 1233 + */ 1234 + int ufshcd_retrain_tx_eq(struct ufs_hba *hba, u32 gear) 1235 + { 1236 + struct ufs_pa_layer_attr new_pwr_info, final_params = {}; 1237 + int ret; 1238 + 1239 + if (!ufshcd_is_tx_eq_supported(hba) || !use_adaptive_txeq) 1240 + return -EOPNOTSUPP; 1241 + 1242 + if (gear < adaptive_txeq_gear) 1243 + return -ERANGE; 1244 + 1245 + ufshcd_hold(hba); 1246 + 1247 + ret = ufshcd_pause_command_processing(hba, 1 * USEC_PER_SEC); 1248 + if (ret) { 1249 + ufshcd_release(hba); 1250 + return ret; 1251 + } 1252 + 1253 + /* scale up clocks to max frequency before TX EQTR */ 1254 + if (ufshcd_is_clkscaling_supported(hba)) 1255 + ufshcd_scale_clks(hba, ULONG_MAX, true); 1256 + 1257 + new_pwr_info = hba->pwr_info; 1258 + new_pwr_info.gear_tx = gear; 1259 + new_pwr_info.gear_rx = gear; 1260 + 1261 + ret = ufshcd_vops_negotiate_pwr_mode(hba, &new_pwr_info, &final_params); 1262 + if (ret) 1263 + memcpy(&final_params, &new_pwr_info, sizeof(final_params)); 1264 + 1265 + if (final_params.gear_tx != gear) { 1266 + dev_err(hba->dev, "Negotiated Gear (%u) does not match target Gear (%u)\n", 1267 + final_params.gear_tx, gear); 1268 + ret = -EINVAL; 1269 + goto out; 1270 + } 1271 + 1272 + ret = ufshcd_config_tx_eq_settings(hba, &final_params, true); 1273 + if (ret) { 1274 + dev_err(hba->dev, "Failed to config TX Equalization for HS-G%u, Rate-%s: %d\n", 1275 + final_params.gear_tx, 1276 + ufs_hs_rate_to_str(final_params.hs_rate), ret); 1277 + goto out; 1278 + } 1279 + 1280 + /* Change Power Mode to apply the new TX EQ settings */ 1281 + ret = ufshcd_change_power_mode(hba, &final_params, 1282 + UFSHCD_PMC_POLICY_FORCE); 1283 + if (ret) 1284 + dev_err(hba->dev, "%s: Failed to change Power Mode to HS-G%u, Rate-%s: %d\n", 1285 + __func__, final_params.gear_tx, 1286 + ufs_hs_rate_to_str(final_params.hs_rate), ret); 1287 + 1288 + out: 1289 + ufshcd_resume_command_processing(hba); 1290 + ufshcd_release(hba); 1291 + 1292 + return ret; 1293 + }
-1
drivers/ufs/core/ufshcd-crypto.h
··· 8 8 9 9 #include <scsi/scsi_cmnd.h> 10 10 #include <ufs/ufshcd.h> 11 - #include "ufshcd-priv.h" 12 11 #include <ufs/ufshci.h> 13 12 14 13 #ifdef CONFIG_SCSI_UFS_CRYPTO
+59 -2
drivers/ufs/core/ufshcd-priv.h
··· 76 76 bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd); 77 77 int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag); 78 78 int ufshcd_mcq_abort(struct scsi_cmnd *cmd); 79 + u32 ufshcd_mcq_read_mcqiacr(struct ufs_hba *hba, int i); 80 + void ufshcd_mcq_write_mcqiacr(struct ufs_hba *hba, u32 val, int i); 79 81 int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag); 80 82 void ufshcd_release_scsi_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd); 83 + int ufshcd_pause_command_processing(struct ufs_hba *hba, u64 timeout_us); 84 + void ufshcd_resume_command_processing(struct ufs_hba *hba); 85 + int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, bool scale_up); 81 86 82 87 /** 83 88 * enum ufs_descr_fmt - UFS string descriptor format ··· 107 102 108 103 int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable); 109 104 int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id); 105 + 106 + int ufshcd_uic_tx_eqtr(struct ufs_hba *hba, int gear); 107 + void ufshcd_apply_valid_tx_eq_settings(struct ufs_hba *hba); 108 + int ufshcd_config_tx_eq_settings(struct ufs_hba *hba, 109 + struct ufs_pa_layer_attr *pwr_mode, 110 + bool force_tx_eqtr); 111 + void ufshcd_print_tx_eq_params(struct ufs_hba *hba); 112 + bool ufshcd_is_txeq_presets_used(struct ufs_hba *hba); 113 + bool ufshcd_is_txeq_preset_selected(u8 preshoot, u8 deemphasis); 114 + int ufshcd_retrain_tx_eq(struct ufs_hba *hba, u32 gear); 110 115 111 116 /* Wrapper functions for safely calling variant operations */ 112 117 static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) ··· 182 167 return 0; 183 168 } 184 169 170 + static inline int ufshcd_vops_negotiate_pwr_mode(struct ufs_hba *hba, 171 + const struct ufs_pa_layer_attr *dev_max_params, 172 + struct ufs_pa_layer_attr *dev_req_params) 173 + { 174 + if (hba->vops && hba->vops->negotiate_pwr_mode) 175 + return hba->vops->negotiate_pwr_mode(hba, dev_max_params, 176 + dev_req_params); 177 + 178 + return -ENOTSUPP; 179 + } 180 + 185 181 static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba, 186 182 enum ufs_notify_change_status status, 187 - const struct ufs_pa_layer_attr *dev_max_params, 188 183 struct ufs_pa_layer_attr *dev_req_params) 189 184 { 190 185 if (hba->vops && hba->vops->pwr_change_notify) 191 186 return hba->vops->pwr_change_notify(hba, status, 192 - dev_max_params, dev_req_params); 187 + dev_req_params); 193 188 194 189 return -ENOTSUPP; 195 190 } ··· 308 283 { 309 284 if (hba->vops && hba->vops->freq_to_gear_speed) 310 285 return hba->vops->freq_to_gear_speed(hba, freq); 286 + 287 + return 0; 288 + } 289 + 290 + static inline int ufshcd_vops_get_rx_fom(struct ufs_hba *hba, 291 + struct ufs_pa_layer_attr *pwr_mode, 292 + struct tx_eqtr_iter *h_iter, 293 + struct tx_eqtr_iter *d_iter) 294 + { 295 + if (hba->vops && hba->vops->get_rx_fom) 296 + return hba->vops->get_rx_fom(hba, pwr_mode, h_iter, d_iter); 297 + 298 + return 0; 299 + } 300 + 301 + static inline int ufshcd_vops_apply_tx_eqtr_settings(struct ufs_hba *hba, 302 + struct ufs_pa_layer_attr *pwr_mode, 303 + struct tx_eqtr_iter *h_iter, 304 + struct tx_eqtr_iter *d_iter) 305 + { 306 + if (hba->vops && hba->vops->apply_tx_eqtr_settings) 307 + return hba->vops->apply_tx_eqtr_settings(hba, pwr_mode, h_iter, d_iter); 308 + 309 + return 0; 310 + } 311 + 312 + static inline int ufshcd_vops_tx_eqtr_notify(struct ufs_hba *hba, 313 + enum ufs_notify_change_status status, 314 + struct ufs_pa_layer_attr *pwr_mode) 315 + { 316 + if (hba->vops && hba->vops->tx_eqtr_notify) 317 + return hba->vops->tx_eqtr_notify(hba, status, pwr_mode); 311 318 312 319 return 0; 313 320 }
+219 -41
drivers/ufs/core/ufshcd.c
··· 316 316 .model = "THGLF2G9D8KBADG", 317 317 .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE }, 318 318 { .wmanufacturerid = UFS_VENDOR_TOSHIBA, 319 + .model = "THGJFJT0E25BAIP", 320 + .quirk = UFS_DEVICE_QUIRK_NO_TIMESTAMP_SUPPORT }, 321 + { .wmanufacturerid = UFS_VENDOR_TOSHIBA, 319 322 .model = "THGJFJT1E45BATP", 320 323 .quirk = UFS_DEVICE_QUIRK_NO_TIMESTAMP_SUPPORT }, 321 324 {} ··· 337 334 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); 338 335 static void ufshcd_resume_clkscaling(struct ufs_hba *hba); 339 336 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba); 340 - static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, 341 - bool scale_up); 342 337 static irqreturn_t ufshcd_intr(int irq, void *__hba); 343 - static int ufshcd_change_power_mode(struct ufs_hba *hba, 344 - struct ufs_pa_layer_attr *pwr_mode); 345 338 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on); 346 339 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on); 347 340 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, ··· 1211 1212 * 1212 1213 * Return: 0 if successful; < 0 upon failure. 1213 1214 */ 1214 - static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, 1215 - bool scale_up) 1215 + int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, bool scale_up) 1216 1216 { 1217 1217 int ret = 0; 1218 1218 ktime_t start = ktime_get(); ··· 1365 1367 } 1366 1368 1367 1369 /** 1370 + * ufshcd_pause_command_processing - Pause command processing 1371 + * @hba: per-adapter instance 1372 + * @timeout_us: timeout in microseconds to wait for pending commands to finish 1373 + * 1374 + * This function stops new command submissions and waits for existing commands 1375 + * to complete. 1376 + * 1377 + * Return: 0 on success, %-EBUSY if commands did not finish within @timeout_us. 1378 + * On failure, all acquired locks are released and the tagset is unquiesced. 1379 + */ 1380 + int ufshcd_pause_command_processing(struct ufs_hba *hba, u64 timeout_us) 1381 + { 1382 + int ret = 0; 1383 + 1384 + mutex_lock(&hba->host->scan_mutex); 1385 + blk_mq_quiesce_tagset(&hba->host->tag_set); 1386 + down_write(&hba->clk_scaling_lock); 1387 + 1388 + if (ufshcd_wait_for_pending_cmds(hba, timeout_us)) { 1389 + ret = -EBUSY; 1390 + up_write(&hba->clk_scaling_lock); 1391 + blk_mq_unquiesce_tagset(&hba->host->tag_set); 1392 + mutex_unlock(&hba->host->scan_mutex); 1393 + } 1394 + 1395 + return ret; 1396 + } 1397 + 1398 + /** 1399 + * ufshcd_resume_command_processing - Resume command processing 1400 + * @hba: per-adapter instance 1401 + * 1402 + * This function resumes command submissions. 1403 + */ 1404 + void ufshcd_resume_command_processing(struct ufs_hba *hba) 1405 + { 1406 + up_write(&hba->clk_scaling_lock); 1407 + blk_mq_unquiesce_tagset(&hba->host->tag_set); 1408 + mutex_unlock(&hba->host->scan_mutex); 1409 + } 1410 + 1411 + /** 1368 1412 * ufshcd_scale_gear - scale up/down UFS gear 1369 1413 * @hba: per adapter instance 1370 1414 * @target_gear: target gear to scale to ··· 1451 1411 1452 1412 config_pwr_mode: 1453 1413 /* check if the power mode needs to be changed or not? */ 1454 - ret = ufshcd_config_pwr_mode(hba, &new_pwr_info); 1414 + ret = ufshcd_config_pwr_mode(hba, &new_pwr_info, 1415 + UFSHCD_PMC_POLICY_DONT_FORCE); 1455 1416 if (ret) 1456 1417 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)", 1457 1418 __func__, ret, ··· 4293 4252 pwr_mode_change = true; 4294 4253 } 4295 4254 if (pwr_mode_change) { 4296 - ret = ufshcd_change_power_mode(hba, &temp_pwr_info); 4255 + ret = ufshcd_change_power_mode(hba, &temp_pwr_info, 4256 + UFSHCD_PMC_POLICY_DONT_FORCE); 4297 4257 if (ret) 4298 4258 goto out; 4299 4259 } ··· 4318 4276 4319 4277 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) 4320 4278 && pwr_mode_change) 4321 - ufshcd_change_power_mode(hba, &orig_pwr_info); 4279 + ufshcd_change_power_mode(hba, &orig_pwr_info, 4280 + UFSHCD_PMC_POLICY_DONT_FORCE); 4322 4281 out: 4323 4282 return ret; 4324 4283 } ··· 4386 4343 ret = __ufshcd_send_uic_cmd(hba, cmd); 4387 4344 if (ret) { 4388 4345 dev_err(hba->dev, 4389 - "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", 4390 - cmd->command, cmd->argument3, ret); 4346 + "pwr ctrl cmd 0x%x with (MIBattribute 0x%x, mode 0x%x) uic error %d\n", 4347 + cmd->command, UIC_GET_ATTR_ID(cmd->argument1), 4348 + cmd->argument3, ret); 4391 4349 goto out; 4392 4350 } 4393 4351 4394 4352 if (!wait_for_completion_timeout(hba->uic_async_done, 4395 4353 msecs_to_jiffies(uic_cmd_timeout))) { 4396 4354 dev_err(hba->dev, 4397 - "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n", 4398 - cmd->command, cmd->argument3); 4355 + "pwr ctrl cmd 0x%x with (MIBattribute 0x%x, mode 0x%x) completion timeout\n", 4356 + cmd->command, UIC_GET_ATTR_ID(cmd->argument1), 4357 + cmd->argument3); 4399 4358 4400 4359 if (!cmd->cmd_active) { 4401 4360 dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n", ··· 4413 4368 status = ufshcd_get_upmcrs(hba); 4414 4369 if (status != PWR_LOCAL) { 4415 4370 dev_err(hba->dev, 4416 - "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n", 4417 - cmd->command, status); 4371 + "pwr ctrl cmd 0x%x with (MIBattribute 0x%x, mode 0x%x) failed, host upmcrs:0x%x\n", 4372 + cmd->command, UIC_GET_ATTR_ID(cmd->argument1), 4373 + cmd->argument3, status); 4418 4374 ret = (status != PWR_OK) ? status : -1; 4419 4375 } 4420 4376 out: 4421 4377 if (ret) { 4422 4378 ufshcd_print_host_state(hba); 4423 4379 ufshcd_print_pwr_info(hba); 4380 + ufshcd_print_tx_eq_params(hba); 4424 4381 ufshcd_print_evt_hist(hba); 4425 4382 } 4426 4383 ··· 4436 4389 out_unlock: 4437 4390 spin_unlock_irqrestore(hba->host->host_lock, flags); 4438 4391 mutex_unlock(&hba->uic_cmd_mutex); 4392 + 4393 + return ret; 4394 + } 4395 + 4396 + /** 4397 + * ufshcd_uic_tx_eqtr - Perform UIC TX Equalization Training 4398 + * @hba: per adapter instance 4399 + * @gear: target gear for EQTR 4400 + * 4401 + * Returns 0 on success, negative error code otherwise 4402 + */ 4403 + int ufshcd_uic_tx_eqtr(struct ufs_hba *hba, int gear) 4404 + { 4405 + struct uic_command uic_cmd = { 4406 + .command = UIC_CMD_DME_SET, 4407 + .argument1 = UIC_ARG_MIB(PA_EQTR_GEAR), 4408 + .argument3 = gear, 4409 + }; 4410 + int ret; 4411 + 4412 + ufshcd_hold(hba); 4413 + ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 4414 + ufshcd_release(hba); 4439 4415 4440 4416 return ret; 4441 4417 } ··· 4726 4656 return 0; 4727 4657 } 4728 4658 4729 - static int ufshcd_change_power_mode(struct ufs_hba *hba, 4730 - struct ufs_pa_layer_attr *pwr_mode) 4659 + /** 4660 + * ufshcd_dme_change_power_mode() - UniPro DME Power Mode change sequence 4661 + * @hba: per-adapter instance 4662 + * @pwr_mode: pointer to the target power mode (gear/lane) attributes 4663 + * @pmc_policy: Power Mode change policy 4664 + * 4665 + * This function handles the low-level DME (Device Management Entity) 4666 + * configuration required to transition the UFS link to a new power mode. It 4667 + * performs the following steps: 4668 + * 1. Checks if the requested mode matches the current state. 4669 + * 2. Sets M-PHY and UniPro attributes including Gear (PA_RXGEAR/TXGEAR), 4670 + * Lanes, Termination, and HS Series (PA_HSSERIES). 4671 + * 3. Configures default UniPro timeout values (DL_FC0, etc.) unless 4672 + * explicitly skipped via quirks. 4673 + * 4. Triggers the actual hardware mode change via ufshcd_uic_change_pwr_mode(). 4674 + * 5. Updates the HBA's cached power information on success. 4675 + * 4676 + * Return: 0 on success, non-zero error code on failure. 4677 + */ 4678 + static int ufshcd_dme_change_power_mode(struct ufs_hba *hba, 4679 + struct ufs_pa_layer_attr *pwr_mode, 4680 + enum ufshcd_pmc_policy pmc_policy) 4731 4681 { 4732 4682 int ret; 4733 4683 4734 4684 /* if already configured to the requested pwr_mode */ 4735 - if (!hba->force_pmc && 4685 + if (pmc_policy == UFSHCD_PMC_POLICY_DONT_FORCE && 4736 4686 pwr_mode->gear_rx == hba->pwr_info.gear_rx && 4737 4687 pwr_mode->gear_tx == hba->pwr_info.gear_tx && 4738 4688 pwr_mode->lane_rx == hba->pwr_info.lane_rx && ··· 4832 4742 } 4833 4743 4834 4744 /** 4745 + * ufshcd_change_power_mode() - Change UFS Link Power Mode 4746 + * @hba: per-adapter instance 4747 + * @pwr_mode: pointer to the target power mode (gear/lane) attributes 4748 + * @pmc_policy: Power Mode change policy 4749 + * 4750 + * This function handles the high-level sequence for changing the UFS link 4751 + * power mode. It triggers vendor-specific pre-change notification, 4752 + * executes the DME (Device Management Entity) power mode change sequence, 4753 + * and, upon success, triggers vendor-specific post-change notification. 4754 + * 4755 + * Return: 0 on success, non-zero error code on failure. 4756 + */ 4757 + int ufshcd_change_power_mode(struct ufs_hba *hba, 4758 + struct ufs_pa_layer_attr *pwr_mode, 4759 + enum ufshcd_pmc_policy pmc_policy) 4760 + { 4761 + int ret; 4762 + 4763 + ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, pwr_mode); 4764 + 4765 + ret = ufshcd_dme_change_power_mode(hba, pwr_mode, pmc_policy); 4766 + 4767 + if (!ret) 4768 + ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, pwr_mode); 4769 + 4770 + return ret; 4771 + } 4772 + EXPORT_SYMBOL_GPL(ufshcd_change_power_mode); 4773 + 4774 + /** 4835 4775 * ufshcd_config_pwr_mode - configure a new power mode 4836 4776 * @hba: per-adapter instance 4837 4777 * @desired_pwr_mode: desired power configuration 4778 + * @pmc_policy: Power Mode change policy 4838 4779 * 4839 4780 * Return: 0 upon success; < 0 upon failure. 4840 4781 */ 4841 4782 int ufshcd_config_pwr_mode(struct ufs_hba *hba, 4842 - struct ufs_pa_layer_attr *desired_pwr_mode) 4783 + struct ufs_pa_layer_attr *desired_pwr_mode, 4784 + enum ufshcd_pmc_policy pmc_policy) 4843 4785 { 4844 4786 struct ufs_pa_layer_attr final_params = { 0 }; 4845 4787 int ret; 4846 4788 4847 - ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, 4848 - desired_pwr_mode, &final_params); 4789 + ret = ufshcd_vops_negotiate_pwr_mode(hba, desired_pwr_mode, 4790 + &final_params); 4791 + if (ret) { 4792 + if (ret != -ENOTSUPP) 4793 + dev_err(hba->dev, "Failed to negotiate power mode: %d, use desired as is\n", 4794 + ret); 4849 4795 4850 - if (ret) 4851 4796 memcpy(&final_params, desired_pwr_mode, sizeof(final_params)); 4797 + } 4852 4798 4853 - ret = ufshcd_change_power_mode(hba, &final_params); 4799 + ret = ufshcd_config_tx_eq_settings(hba, &final_params, false); 4800 + if (ret) 4801 + dev_warn(hba->dev, "Failed to configure TX Equalization for HS-G%u, Rate-%s: %d\n", 4802 + final_params.gear_tx, 4803 + ufs_hs_rate_to_str(final_params.hs_rate), ret); 4854 4804 4855 - if (!ret) 4856 - ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL, 4857 - &final_params); 4858 - 4859 - return ret; 4805 + return ufshcd_change_power_mode(hba, &final_params, pmc_policy); 4860 4806 } 4861 4807 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode); 4862 4808 ··· 5706 5580 5707 5581 guard(spinlock_irqsave)(hba->host->host_lock); 5708 5582 cmd = hba->active_uic_cmd; 5709 - if (!cmd) 5583 + if (!cmd) { 5584 + dev_err(hba->dev, 5585 + "No active UIC command. Maybe a timeout occurred?\n"); 5710 5586 return retval; 5587 + } 5711 5588 5712 5589 if (ufshcd_is_auto_hibern8_error(hba, intr_status)) 5713 5590 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); ··· 6914 6785 spin_unlock_irqrestore(hba->host->host_lock, flags); 6915 6786 ufshcd_print_host_state(hba); 6916 6787 ufshcd_print_pwr_info(hba); 6788 + ufshcd_print_tx_eq_params(hba); 6917 6789 ufshcd_print_evt_hist(hba); 6918 6790 ufshcd_print_tmrs(hba, hba->outstanding_tasks); 6919 6791 ufshcd_print_trs_all(hba, pr_prdt); ··· 6973 6843 * are sent via bsg and/or sysfs. 6974 6844 */ 6975 6845 down_write(&hba->clk_scaling_lock); 6976 - hba->force_pmc = true; 6977 - pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info)); 6846 + pmc_err = ufshcd_config_pwr_mode(hba, &hba->pwr_info, 6847 + UFSHCD_PMC_POLICY_FORCE); 6978 6848 if (pmc_err) { 6979 6849 needs_reset = true; 6980 6850 dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n", 6981 6851 __func__, pmc_err); 6982 6852 } 6983 - hba->force_pmc = false; 6984 6853 ufshcd_print_pwr_info(hba); 6985 6854 up_write(&hba->clk_scaling_lock); 6986 6855 spin_lock_irqsave(hba->host->host_lock, flags); ··· 7105 6976 } 7106 6977 7107 6978 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); 7108 - if ((reg & UIC_DME_ERROR) && 7109 - (reg & UIC_DME_ERROR_CODE_MASK)) { 6979 + if (reg & UIC_DME_ERROR) { 7110 6980 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg); 7111 - hba->uic_error |= UFSHCD_UIC_DME_ERROR; 6981 + 6982 + if (reg & UIC_DME_ERROR_CODE_MASK) 6983 + hba->uic_error |= UFSHCD_UIC_DME_ERROR; 6984 + 6985 + if (reg & UIC_DME_QOS_MASK) { 6986 + atomic_set(&hba->dme_qos_notification, 6987 + reg & UIC_DME_QOS_MASK); 6988 + if (hba->dme_qos_sysfs_handle) 6989 + sysfs_notify_dirent(hba->dme_qos_sysfs_handle); 6990 + } 6991 + 7112 6992 retval |= IRQ_HANDLED; 7113 6993 } 7114 6994 ··· 7187 7049 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, 7188 7050 "host_regs: "); 7189 7051 ufshcd_print_pwr_info(hba); 7052 + ufshcd_print_tx_eq_params(hba); 7190 7053 } 7191 7054 ufshcd_schedule_eh_work(hba); 7192 7055 retval |= IRQ_HANDLED; ··· 7236 7097 /** 7237 7098 * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events 7238 7099 * @hba: per adapter instance 7100 + * @reset_iag: true, to reset MCQ IAG counter and timer of the CQ 7239 7101 * 7240 7102 * Return: IRQ_HANDLED if interrupt is handled. 7241 7103 */ 7242 - static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba) 7104 + static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba, bool reset_iag) 7243 7105 { 7244 7106 struct ufs_hw_queue *hwq; 7245 7107 unsigned long outstanding_cqs; 7246 7108 unsigned int nr_queues; 7247 7109 int i, ret; 7248 - u32 events; 7110 + u32 events, reg; 7249 7111 7250 7112 ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs); 7251 7113 if (ret) ··· 7260 7120 events = ufshcd_mcq_read_cqis(hba, i); 7261 7121 if (events) 7262 7122 ufshcd_mcq_write_cqis(hba, events, i); 7123 + 7124 + if (reset_iag) { 7125 + reg = ufshcd_mcq_read_mcqiacr(hba, i); 7126 + reg |= INT_AGGR_COUNTER_AND_TIMER_RESET; 7127 + ufshcd_mcq_write_mcqiacr(hba, reg, i); 7128 + } 7263 7129 7264 7130 if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS) 7265 7131 ufshcd_mcq_poll_cqe_lock(hba, hwq); ··· 7300 7154 retval |= ufshcd_transfer_req_compl(hba); 7301 7155 7302 7156 if (intr_status & MCQ_CQ_EVENT_STATUS) 7303 - retval |= ufshcd_handle_mcq_cq_events(hba); 7157 + retval |= ufshcd_handle_mcq_cq_events(hba, false); 7158 + 7159 + if (intr_status & MCQ_IAG_EVENT_STATUS) 7160 + retval |= ufshcd_handle_mcq_cq_events(hba, true); 7304 7161 7305 7162 return retval; 7306 7163 } ··· 7371 7222 struct ufs_hba *hba = __hba; 7372 7223 u32 intr_status, enabled_intr_status; 7373 7224 7374 - /* Move interrupt handling to thread when MCQ & ESI are not enabled */ 7375 - if (!hba->mcq_enabled || !hba->mcq_esi_enabled) 7225 + /* 7226 + * Handle interrupt in thread if MCQ or ESI is disabled, 7227 + * and no active UIC command. 7228 + */ 7229 + if ((!hba->mcq_enabled || !hba->mcq_esi_enabled) && 7230 + !hba->active_uic_cmd) 7376 7231 return IRQ_WAKE_THREAD; 7377 7232 7378 7233 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); ··· 7983 7830 ufshcd_print_evt_hist(hba); 7984 7831 ufshcd_print_host_state(hba); 7985 7832 ufshcd_print_pwr_info(hba); 7833 + ufshcd_print_tx_eq_params(hba); 7986 7834 ufshcd_print_tr(hba, cmd, true); 7987 7835 } else { 7988 7836 ufshcd_print_tr(hba, cmd, false); ··· 8961 8807 8962 8808 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME) 8963 8809 ufshcd_quirk_override_pa_h8time(hba); 8810 + 8811 + ufshcd_apply_valid_tx_eq_settings(hba); 8964 8812 } 8965 8813 8966 8814 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) ··· 9267 9111 9268 9112 /* UFS device is also active now */ 9269 9113 ufshcd_set_ufs_dev_active(hba); 9114 + 9115 + /* Indicate that DME QoS Monitor has been reset */ 9116 + atomic_set(&hba->dme_qos_notification, 0x1); 9117 + if (hba->dme_qos_sysfs_handle) 9118 + sysfs_notify_dirent(hba->dme_qos_sysfs_handle); 9119 + 9270 9120 ufshcd_force_reset_auto_bkops(hba); 9271 9121 9272 9122 ufshcd_set_timestamp_attr(hba); ··· 9287 9125 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) 9288 9126 ufshcd_set_dev_ref_clk(hba); 9289 9127 /* Gear up to HS gear. */ 9290 - ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); 9128 + ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info, 9129 + UFSHCD_PMC_POLICY_DONT_FORCE); 9291 9130 if (ret) { 9292 9131 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", 9293 9132 __func__, ret); ··· 9906 9743 hba->is_powered = false; 9907 9744 ufs_put_device_desc(hba); 9908 9745 } 9746 + sysfs_put(hba->dme_qos_sysfs_handle); 9909 9747 } 9910 9748 9911 9749 static int ufshcd_execute_start_stop(struct scsi_device *sdev, ··· 10106 9942 #ifdef CONFIG_PM 10107 9943 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) 10108 9944 { 9945 + bool vcc_on = false; 10109 9946 int ret = 0; 10110 9947 10111 9948 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && 10112 9949 !hba->dev_info.is_lu_power_on_wp) { 10113 9950 ret = ufshcd_setup_vreg(hba, true); 9951 + vcc_on = true; 10114 9952 } else if (!ufshcd_is_ufs_dev_active(hba)) { 10115 9953 if (!ufshcd_is_link_active(hba)) { 10116 9954 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); ··· 10123 9957 goto vccq_lpm; 10124 9958 } 10125 9959 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); 9960 + vcc_on = true; 10126 9961 } 10127 9962 goto out; 10128 9963 ··· 10132 9965 vcc_disable: 10133 9966 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); 10134 9967 out: 9968 + /* 9969 + * On platforms with a slow VCC ramp-up, a delay is needed after 9970 + * turning on VCC to ensure the voltage is stable before the 9971 + * reference clock is enabled. 9972 + */ 9973 + if (hba->quirks & UFSHCD_QUIRK_VCC_ON_DELAY && !ret && vcc_on && 9974 + hba->vreg_info.vcc && !hba->vreg_info.vcc->always_on) 9975 + usleep_range(1000, 1100); 9976 + 10135 9977 return ret; 10136 9978 } 10137 9979 #endif /* CONFIG_PM */ ··· 11246 11070 goto out_disable; 11247 11071 11248 11072 ufs_sysfs_add_nodes(hba->dev); 11073 + hba->dme_qos_sysfs_handle = sysfs_get_dirent(hba->dev->kobj.sd, 11074 + "dme_qos_notification"); 11249 11075 async_schedule(ufshcd_async_scan, hba); 11250 11076 11251 11077 device_enable_async_suspend(dev);
-3
drivers/ufs/host/ufs-amd-versal2.c
··· 443 443 } 444 444 445 445 static int ufs_versal2_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status, 446 - const struct ufs_pa_layer_attr *dev_max_params, 447 446 struct ufs_pa_layer_attr *dev_req_params) 448 447 { 449 448 struct ufs_versal2_host *host = ufshcd_get_variant(hba); ··· 450 451 int ret = 0; 451 452 452 453 if (status == PRE_CHANGE) { 453 - memcpy(dev_req_params, dev_max_params, sizeof(struct ufs_pa_layer_attr)); 454 - 455 454 /* If it is not a calibrated part, switch PWRMODE to SLOW_MODE */ 456 455 if (!host->attcompval0 && !host->attcompval1 && !host->ctlecompval0 && 457 456 !host->ctlecompval1) {
+17 -17
drivers/ufs/host/ufs-exynos.c
··· 818 818 } 819 819 820 820 static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba, 821 - const struct ufs_pa_layer_attr *dev_max_params, 822 821 struct ufs_pa_layer_attr *dev_req_params) 823 822 { 824 823 struct exynos_ufs *ufs = ufshcd_get_variant(hba); 825 824 struct phy *generic_phy = ufs->phy; 826 - struct ufs_host_params host_params; 827 825 int ret; 828 826 829 827 if (!dev_req_params) { 830 828 pr_err("%s: incoming dev_req_params is NULL\n", __func__); 831 829 ret = -EINVAL; 832 - goto out; 833 - } 834 - 835 - ufshcd_init_host_params(&host_params); 836 - 837 - /* This driver only support symmetric gear setting e.g. hs_tx_gear == hs_rx_gear */ 838 - host_params.hs_tx_gear = exynos_ufs_get_hs_gear(hba); 839 - host_params.hs_rx_gear = exynos_ufs_get_hs_gear(hba); 840 - 841 - ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params); 842 - if (ret) { 843 - pr_err("%s: failed to determine capabilities\n", __func__); 844 830 goto out; 845 831 } 846 832 ··· 1663 1677 return ret; 1664 1678 } 1665 1679 1680 + static int exynos_ufs_negotiate_pwr_mode(struct ufs_hba *hba, 1681 + const struct ufs_pa_layer_attr *dev_max_params, 1682 + struct ufs_pa_layer_attr *dev_req_params) 1683 + { 1684 + struct ufs_host_params host_params; 1685 + 1686 + ufshcd_init_host_params(&host_params); 1687 + 1688 + /* This driver only support symmetric gear setting e.g. hs_tx_gear == hs_rx_gear */ 1689 + host_params.hs_tx_gear = exynos_ufs_get_hs_gear(hba); 1690 + host_params.hs_rx_gear = exynos_ufs_get_hs_gear(hba); 1691 + 1692 + return ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params); 1693 + } 1694 + 1666 1695 static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba, 1667 1696 enum ufs_notify_change_status status, 1668 - const struct ufs_pa_layer_attr *dev_max_params, 1669 1697 struct ufs_pa_layer_attr *dev_req_params) 1670 1698 { 1671 1699 int ret = 0; 1672 1700 1673 1701 switch (status) { 1674 1702 case PRE_CHANGE: 1675 - ret = exynos_ufs_pre_pwr_mode(hba, dev_max_params, 1676 - dev_req_params); 1703 + ret = exynos_ufs_pre_pwr_mode(hba, dev_req_params); 1677 1704 break; 1678 1705 case POST_CHANGE: 1679 1706 ret = exynos_ufs_post_pwr_mode(hba, dev_req_params); ··· 2014 2015 .exit = exynos_ufs_exit, 2015 2016 .hce_enable_notify = exynos_ufs_hce_enable_notify, 2016 2017 .link_startup_notify = exynos_ufs_link_startup_notify, 2018 + .negotiate_pwr_mode = exynos_ufs_negotiate_pwr_mode, 2017 2019 .pwr_change_notify = exynos_ufs_pwr_change_notify, 2018 2020 .setup_clocks = exynos_ufs_setup_clocks, 2019 2021 .setup_xfer_req = exynos_ufs_specify_nexus_t_xfer_req,
+13 -10
drivers/ufs/host/ufs-hisi.c
··· 298 298 ufshcd_init_host_params(host_params); 299 299 } 300 300 301 + static int ufs_hisi_negotiate_pwr_mode(struct ufs_hba *hba, 302 + const struct ufs_pa_layer_attr *dev_max_params, 303 + struct ufs_pa_layer_attr *dev_req_params) 304 + { 305 + struct ufs_host_params host_params; 306 + 307 + ufs_hisi_set_dev_cap(&host_params); 308 + 309 + return ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params); 310 + } 311 + 301 312 static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba) 302 313 { 303 314 struct ufs_hisi_host *host = ufshcd_get_variant(hba); ··· 373 362 374 363 static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba, 375 364 enum ufs_notify_change_status status, 376 - const struct ufs_pa_layer_attr *dev_max_params, 377 365 struct ufs_pa_layer_attr *dev_req_params) 378 366 { 379 - struct ufs_host_params host_params; 380 367 int ret = 0; 381 368 382 369 if (!dev_req_params) { ··· 386 377 387 378 switch (status) { 388 379 case PRE_CHANGE: 389 - ufs_hisi_set_dev_cap(&host_params); 390 - ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params); 391 - if (ret) { 392 - dev_err(hba->dev, 393 - "%s: failed to determine capabilities\n", __func__); 394 - goto out; 395 - } 396 - 397 380 ufs_hisi_pwr_change_pre_change(hba); 398 381 break; 399 382 case POST_CHANGE: ··· 544 543 .name = "hi3660", 545 544 .init = ufs_hi3660_init, 546 545 .link_startup_notify = ufs_hisi_link_startup_notify, 546 + .negotiate_pwr_mode = ufs_hisi_negotiate_pwr_mode, 547 547 .pwr_change_notify = ufs_hisi_pwr_change_notify, 548 548 .suspend = ufs_hisi_suspend, 549 549 .resume = ufs_hisi_resume, ··· 554 552 .name = "hi3670", 555 553 .init = ufs_hi3670_init, 556 554 .link_startup_notify = ufs_hisi_link_startup_notify, 555 + .negotiate_pwr_mode = ufs_hisi_negotiate_pwr_mode, 557 556 .pwr_change_notify = ufs_hisi_pwr_change_notify, 558 557 .suspend = ufs_hisi_suspend, 559 558 .resume = ufs_hisi_resume,
+31 -20
drivers/ufs/host/ufs-mediatek.c
··· 1317 1317 return err; 1318 1318 } 1319 1319 1320 + static int ufs_mtk_negotiate_pwr_mode(struct ufs_hba *hba, 1321 + const struct ufs_pa_layer_attr *dev_max_params, 1322 + struct ufs_pa_layer_attr *dev_req_params) 1323 + { 1324 + struct ufs_host_params host_params; 1325 + 1326 + ufshcd_init_host_params(&host_params); 1327 + host_params.hs_rx_gear = UFS_HS_G5; 1328 + host_params.hs_tx_gear = UFS_HS_G5; 1329 + 1330 + if (dev_max_params->pwr_rx == SLOW_MODE || 1331 + dev_max_params->pwr_tx == SLOW_MODE) 1332 + host_params.desired_working_mode = UFS_PWM_MODE; 1333 + 1334 + return ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params); 1335 + } 1336 + 1320 1337 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba, 1321 1338 struct ufs_pa_layer_attr *dev_req_params) 1322 1339 { ··· 1389 1372 } 1390 1373 1391 1374 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, 1392 - const struct ufs_pa_layer_attr *dev_max_params, 1393 1375 struct ufs_pa_layer_attr *dev_req_params) 1394 1376 { 1395 1377 struct ufs_mtk_host *host = ufshcd_get_variant(hba); 1396 - struct ufs_host_params host_params; 1397 - int ret; 1398 - 1399 - ufshcd_init_host_params(&host_params); 1400 - host_params.hs_rx_gear = UFS_HS_G5; 1401 - host_params.hs_tx_gear = UFS_HS_G5; 1402 - 1403 - if (dev_max_params->pwr_rx == SLOW_MODE || 1404 - dev_max_params->pwr_tx == SLOW_MODE) 1405 - host_params.desired_working_mode = UFS_PWM_MODE; 1406 - 1407 - ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params); 1408 - if (ret) { 1409 - pr_info("%s: failed to determine capabilities\n", 1410 - __func__); 1411 - } 1378 + int ret = 0; 1412 1379 1413 1380 if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) { 1414 1381 ufs_mtk_adjust_sync_length(hba); ··· 1504 1503 1505 1504 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba, 1506 1505 enum ufs_notify_change_status stage, 1507 - const struct ufs_pa_layer_attr *dev_max_params, 1508 1506 struct ufs_pa_layer_attr *dev_req_params) 1509 1507 { 1510 1508 int ret = 0; ··· 1515 1515 reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER); 1516 1516 ufs_mtk_auto_hibern8_disable(hba); 1517 1517 } 1518 - ret = ufs_mtk_pre_pwr_change(hba, dev_max_params, 1519 - dev_req_params); 1518 + ret = ufs_mtk_pre_pwr_change(hba, dev_req_params); 1520 1519 break; 1521 1520 case POST_CHANGE: 1522 1521 if (ufshcd_is_auto_hibern8_supported(hba)) ··· 1959 1960 1960 1961 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba) 1961 1962 { 1963 + struct ufs_mtk_host *host = ufshcd_get_variant(hba); 1964 + 1962 1965 ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups); 1963 1966 1964 1967 if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc) { ··· 1971 1970 */ 1972 1971 hba->dev_quirks &= ~UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM; 1973 1972 } 1973 + 1974 + /* 1975 + * Add a delay after enabling UFS5 VCC to ensure the voltage 1976 + * is stable before the refclk is enabled. 1977 + */ 1978 + if (hba->dev_info.wspecversion >= 0x0500 && 1979 + (host->ip_ver == IP_VER_MT6995_A0 || 1980 + host->ip_ver == IP_VER_MT6995_B0)) 1981 + hba->quirks |= UFSHCD_QUIRK_VCC_ON_DELAY; 1974 1982 1975 1983 ufs_mtk_vreg_fix_vcc(hba); 1976 1984 ufs_mtk_vreg_fix_vccqx(hba); ··· 2328 2318 .setup_clocks = ufs_mtk_setup_clocks, 2329 2319 .hce_enable_notify = ufs_mtk_hce_enable_notify, 2330 2320 .link_startup_notify = ufs_mtk_link_startup_notify, 2321 + .negotiate_pwr_mode = ufs_mtk_negotiate_pwr_mode, 2331 2322 .pwr_change_notify = ufs_mtk_pwr_change_notify, 2332 2323 .apply_dev_quirks = ufs_mtk_apply_dev_quirks, 2333 2324 .fixup_dev_quirks = ufs_mtk_fixup_dev_quirks,
+4
drivers/ufs/host/ufs-mediatek.h
··· 220 220 IP_VER_MT6991_B0 = 0x10470000, 221 221 IP_VER_MT6993 = 0x10480000, 222 222 223 + /* UFSHCI 5.0 */ 224 + IP_VER_MT6995_A0 = 0x10490000, 225 + IP_VER_MT6995_B0 = 0x10500000, 226 + 223 227 IP_VER_NONE = 0xFFFFFFFF 224 228 }; 225 229
+580 -11
drivers/ufs/host/ufs-qcom.c
··· 966 966 } 967 967 } 968 968 969 - static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, 970 - enum ufs_notify_change_status status, 971 - const struct ufs_pa_layer_attr *dev_max_params, 972 - struct ufs_pa_layer_attr *dev_req_params) 969 + static int ufs_qcom_negotiate_pwr_mode(struct ufs_hba *hba, 970 + const struct ufs_pa_layer_attr *dev_max_params, 971 + struct ufs_pa_layer_attr *dev_req_params) 973 972 { 974 973 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 975 974 struct ufs_host_params *host_params = &host->host_params; 975 + 976 + return ufshcd_negotiate_pwr_params(host_params, dev_max_params, dev_req_params); 977 + } 978 + 979 + static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, 980 + enum ufs_notify_change_status status, 981 + struct ufs_pa_layer_attr *dev_req_params) 982 + { 983 + struct ufs_qcom_host *host = ufshcd_get_variant(hba); 976 984 int ret = 0; 977 985 978 986 if (!dev_req_params) { ··· 990 982 991 983 switch (status) { 992 984 case PRE_CHANGE: 993 - ret = ufshcd_negotiate_pwr_params(host_params, dev_max_params, dev_req_params); 994 - if (ret) { 995 - dev_err(hba->dev, "%s: failed to determine capabilities\n", 996 - __func__); 997 - return ret; 998 - } 999 - 1000 985 /* 1001 986 * During UFS driver probe, always update the PHY gear to match the negotiated 1002 987 * gear, so that, if quirk UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is enabled, ··· 1069 1068 dev_err(hba->dev, "Failed (%d) set PA_TX_HSG1_SYNC_LENGTH\n", err); 1070 1069 } 1071 1070 1071 + /** 1072 + * ufs_qcom_double_t_adapt_l0l1l2l3 - Create a new adapt that doubles the 1073 + * adaptation duration TADAPT_L0_L1_L2_L3 derived from the old adapt. 1074 + * 1075 + * @old_adapt: Original ADAPT_L0_L1_L2_L3 capability 1076 + * 1077 + * ADAPT_length_L0_L1_L2_L3 formula from M-PHY spec: 1078 + * if (ADAPT_range_L0_L1_L2_L3 == COARSE) { 1079 + * ADAPT_length_L0_L1_L2_L3 = [0, 12] 1080 + * ADAPT_L0_L1_L2_L3 = 215 x 2^ADAPT_length_L0_L1_L2_L3 1081 + * } else if (ADAPT_range_L0_L1_L2_L3 == FINE) { 1082 + * ADAPT_length_L0_L1_L2_L3 = [0, 127] 1083 + * TADAPT_L0_L1_L2_L3 = 215 x (ADAPT_length_L0_L1_L2_L3 + 1) 1084 + * } 1085 + * 1086 + * To double the adaptation duration TADAPT_L0_L1_L2_L3: 1087 + * 1. If adapt range is COARSE (1'b1), new adapt = old adapt + 1. 1088 + * 2. If adapt range is FINE (1'b0): 1089 + * a) If old adapt length is < 64, (new adapt + 1) = 2 * (old adapt + 1). 1090 + * b) If old adapt length is >= 64, set new adapt to 0x88 using COARSE 1091 + * range, because new adapt get from equation in a) shall exceed 127. 1092 + * 1093 + * Examples: 1094 + * ADAPT_range_L0_L1_L2_L3 | ADAPT_length_L0_L1_L2_L3 | TADAPT_L0_L1_L2_L3 (PAM-4 UI) 1095 + * 0 3 131072 1096 + * 0 7 262144 1097 + * 0 63 2097152 1098 + * 0 64 2129920 1099 + * 0 127 4194304 1100 + * 1 8 8388608 1101 + * 1 9 16777216 1102 + * 1 10 33554432 1103 + * 1 11 67108864 1104 + * 1 12 134217728 1105 + * 1106 + * Return: new adapt. 1107 + */ 1108 + static u32 ufs_qcom_double_t_adapt_l0l1l2l3(u32 old_adapt) 1109 + { 1110 + u32 adapt_length = old_adapt & ADAPT_LENGTH_MASK; 1111 + u32 new_adapt; 1112 + 1113 + if (IS_ADAPT_RANGE_COARSE(old_adapt)) { 1114 + new_adapt = (adapt_length + 1) | ADAPT_RANGE_BIT; 1115 + } else { 1116 + if (adapt_length < 64) 1117 + new_adapt = (adapt_length << 1) + 1; 1118 + else 1119 + /* 1120 + * 0x88 is the very coarse Adapt value which is two 1121 + * times of the largest fine Adapt value (0x7F) 1122 + */ 1123 + new_adapt = 0x88; 1124 + } 1125 + 1126 + return new_adapt; 1127 + } 1128 + 1129 + static void ufs_qcom_limit_max_gear(struct ufs_hba *hba, 1130 + enum ufs_hs_gear_tag gear) 1131 + { 1132 + struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1133 + struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; 1134 + struct ufs_host_params *host_params = &host->host_params; 1135 + 1136 + host_params->hs_tx_gear = gear; 1137 + host_params->hs_rx_gear = gear; 1138 + pwr_info->gear_tx = gear; 1139 + pwr_info->gear_rx = gear; 1140 + 1141 + dev_warn(hba->dev, "Limited max gear of host and device to HS-G%d\n", gear); 1142 + } 1143 + 1144 + static void ufs_qcom_fixup_tx_adapt_l0l1l2l3(struct ufs_hba *hba) 1145 + { 1146 + struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1147 + struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; 1148 + struct ufs_host_params *host_params = &host->host_params; 1149 + u32 old_adapt, new_adapt, actual_adapt; 1150 + bool limit_speed = false; 1151 + int err; 1152 + 1153 + if (host->hw_ver.major != 0x7 || host->hw_ver.minor > 0x1 || 1154 + host_params->hs_tx_gear <= UFS_HS_G5 || 1155 + pwr_info->gear_tx <= UFS_HS_G5) 1156 + return; 1157 + 1158 + err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTINITIALL0L1L2L3), &old_adapt); 1159 + if (err) 1160 + goto out; 1161 + 1162 + if (old_adapt > ADAPT_L0L1L2L3_LENGTH_MAX) { 1163 + dev_err(hba->dev, "PA_PeerRxHsG6AdaptInitialL0L1L2L3 value (0x%x) exceeds MAX\n", 1164 + old_adapt); 1165 + err = -ERANGE; 1166 + goto out; 1167 + } 1168 + 1169 + new_adapt = ufs_qcom_double_t_adapt_l0l1l2l3(old_adapt); 1170 + dev_dbg(hba->dev, "Original PA_PeerRxHsG6AdaptInitialL0L1L2L3 = 0x%x, new value = 0x%x\n", 1171 + old_adapt, new_adapt); 1172 + 1173 + /* 1174 + * 0x8C is the max possible value allowed by UniPro v3.0 spec, some HWs 1175 + * can accept 0x8D but some cannot. 1176 + */ 1177 + if (new_adapt <= ADAPT_L0L1L2L3_LENGTH_MAX || 1178 + (new_adapt == ADAPT_L0L1L2L3_LENGTH_MAX + 1 && host->hw_ver.minor == 0x1)) { 1179 + err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTINITIALL0L1L2L3), 1180 + new_adapt); 1181 + if (err) 1182 + goto out; 1183 + 1184 + err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTINITIALL0L1L2L3), 1185 + &actual_adapt); 1186 + if (err) 1187 + goto out; 1188 + 1189 + if (actual_adapt != new_adapt) { 1190 + limit_speed = true; 1191 + dev_warn(hba->dev, "PA_PeerRxHsG6AdaptInitialL0L1L2L3 0x%x, expect 0x%x\n", 1192 + actual_adapt, new_adapt); 1193 + } 1194 + } else { 1195 + limit_speed = true; 1196 + dev_warn(hba->dev, "New PA_PeerRxHsG6AdaptInitialL0L1L2L3 (0x%x) is too large!\n", 1197 + new_adapt); 1198 + } 1199 + 1200 + err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTREFRESHL0L1L2L3), &old_adapt); 1201 + if (err) 1202 + goto out; 1203 + 1204 + if (old_adapt > ADAPT_L0L1L2L3_LENGTH_MAX) { 1205 + dev_err(hba->dev, "PA_PeerRxHsG6AdaptRefreshL0L1L2L3 value (0x%x) exceeds MAX\n", 1206 + old_adapt); 1207 + err = -ERANGE; 1208 + goto out; 1209 + } 1210 + 1211 + new_adapt = ufs_qcom_double_t_adapt_l0l1l2l3(old_adapt); 1212 + dev_dbg(hba->dev, "Original PA_PeerRxHsG6AdaptRefreshL0L1L2L3 = 0x%x, new value = 0x%x\n", 1213 + old_adapt, new_adapt); 1214 + 1215 + /* 1216 + * 0x8C is the max possible value allowed by UniPro v3.0 spec, some HWs 1217 + * can accept 0x8D but some cannot. 1218 + */ 1219 + if (new_adapt <= ADAPT_L0L1L2L3_LENGTH_MAX || 1220 + (new_adapt == ADAPT_L0L1L2L3_LENGTH_MAX + 1 && host->hw_ver.minor == 0x1)) { 1221 + err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTREFRESHL0L1L2L3), 1222 + new_adapt); 1223 + if (err) 1224 + goto out; 1225 + 1226 + err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTREFRESHL0L1L2L3), 1227 + &actual_adapt); 1228 + if (err) 1229 + goto out; 1230 + 1231 + if (actual_adapt != new_adapt) { 1232 + limit_speed = true; 1233 + dev_warn(hba->dev, "PA_PeerRxHsG6AdaptRefreshL0L1L2L3 0x%x, expect 0x%x\n", 1234 + new_adapt, actual_adapt); 1235 + } 1236 + } else { 1237 + limit_speed = true; 1238 + dev_warn(hba->dev, "New PA_PeerRxHsG6AdaptRefreshL0L1L2L3 (0x%x) is too large!\n", 1239 + new_adapt); 1240 + } 1241 + 1242 + out: 1243 + if (limit_speed || err) 1244 + ufs_qcom_limit_max_gear(hba, UFS_HS_G5); 1245 + } 1246 + 1072 1247 static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) 1073 1248 { 1074 1249 int err = 0; 1250 + 1251 + ufs_qcom_fixup_tx_adapt_l0l1l2l3(hba); 1075 1252 1076 1253 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) 1077 1254 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); ··· 1384 1205 1385 1206 static void ufs_qcom_set_caps(struct ufs_hba *hba) 1386 1207 { 1208 + struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1209 + 1387 1210 hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; 1388 1211 hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING; 1389 1212 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; 1390 1213 hba->caps |= UFSHCD_CAP_WB_EN; 1391 1214 hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE; 1392 1215 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; 1216 + 1217 + if (host->hw_ver.major >= 0x7) 1218 + hba->caps |= UFSHCD_CAP_TX_EQUALIZATION; 1393 1219 1394 1220 ufs_qcom_set_host_caps(hba); 1395 1221 } ··· 2510 2326 return min_t(u32, gear, hba->max_pwr_info.info.gear_rx); 2511 2327 } 2512 2328 2329 + static int ufs_qcom_host_eom_config(struct ufs_hba *hba, int lane, 2330 + const struct ufs_eom_coord *eom_coord, 2331 + u32 target_test_count) 2332 + { 2333 + enum ufs_eom_eye_mask eye_mask = eom_coord->eye_mask; 2334 + int v_step = eom_coord->v_step; 2335 + int t_step = eom_coord->t_step; 2336 + u32 volt_step, timing_step; 2337 + int ret; 2338 + 2339 + if (abs(v_step) > UFS_QCOM_EOM_VOLTAGE_STEPS_MAX) { 2340 + dev_err(hba->dev, "Invalid EOM Voltage Step: %d\n", v_step); 2341 + return -ERANGE; 2342 + } 2343 + 2344 + if (abs(t_step) > UFS_QCOM_EOM_TIMING_STEPS_MAX) { 2345 + dev_err(hba->dev, "Invalid EOM Timing Step: %d\n", t_step); 2346 + return -ERANGE; 2347 + } 2348 + 2349 + if (v_step < 0) 2350 + volt_step = RX_EYEMON_NEGATIVE_STEP_BIT | (u32)(-v_step); 2351 + else 2352 + volt_step = (u32)v_step; 2353 + 2354 + if (t_step < 0) 2355 + timing_step = RX_EYEMON_NEGATIVE_STEP_BIT | (u32)(-t_step); 2356 + else 2357 + timing_step = (u32)t_step; 2358 + 2359 + ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_ENABLE, 2360 + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)), 2361 + BIT(eye_mask) | RX_EYEMON_EXTENDED_VRANGE_BIT); 2362 + if (ret) { 2363 + dev_err(hba->dev, "Failed to enable Host EOM on Lane %d: %d\n", 2364 + lane, ret); 2365 + return ret; 2366 + } 2367 + 2368 + ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_TIMING_STEPS, 2369 + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)), 2370 + timing_step); 2371 + if (ret) { 2372 + dev_err(hba->dev, "Failed to set Host EOM timing step on Lane %d: %d\n", 2373 + lane, ret); 2374 + return ret; 2375 + } 2376 + 2377 + ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_VOLTAGE_STEPS, 2378 + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)), 2379 + volt_step); 2380 + if (ret) { 2381 + dev_err(hba->dev, "Failed to set Host EOM voltage step on Lane %d: %d\n", 2382 + lane, ret); 2383 + return ret; 2384 + } 2385 + 2386 + ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_TARGET_TEST_COUNT, 2387 + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)), 2388 + target_test_count); 2389 + if (ret) 2390 + dev_err(hba->dev, "Failed to set Host EOM target test count on Lane %d: %d\n", 2391 + lane, ret); 2392 + 2393 + return ret; 2394 + } 2395 + 2396 + static int ufs_qcom_host_eom_may_stop(struct ufs_hba *hba, int lane, 2397 + u32 target_test_count, u32 *err_count) 2398 + { 2399 + u32 start, tested_count, error_count; 2400 + int ret; 2401 + 2402 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_EYEMON_START, 2403 + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)), 2404 + &start); 2405 + if (ret) { 2406 + dev_err(hba->dev, "Failed to get Host EOM start status on Lane %d: %d\n", 2407 + lane, ret); 2408 + return ret; 2409 + } 2410 + 2411 + if (start & 0x1) 2412 + return -EAGAIN; 2413 + 2414 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_EYEMON_TESTED_COUNT, 2415 + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)), 2416 + &tested_count); 2417 + if (ret) { 2418 + dev_err(hba->dev, "Failed to get Host EOM tested count on Lane %d: %d\n", 2419 + lane, ret); 2420 + return ret; 2421 + } 2422 + 2423 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_EYEMON_ERROR_COUNT, 2424 + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)), 2425 + &error_count); 2426 + if (ret) { 2427 + dev_err(hba->dev, "Failed to get Host EOM error count on Lane %d: %d\n", 2428 + lane, ret); 2429 + return ret; 2430 + } 2431 + 2432 + /* EOM can stop */ 2433 + if ((tested_count >= target_test_count - 3) || error_count > 0) { 2434 + *err_count = error_count; 2435 + 2436 + /* Disable EOM */ 2437 + ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_ENABLE, 2438 + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)), 2439 + 0x0); 2440 + if (ret) { 2441 + dev_err(hba->dev, "Failed to disable Host EOM on Lane %d: %d\n", 2442 + lane, ret); 2443 + return ret; 2444 + } 2445 + } else { 2446 + return -EAGAIN; 2447 + } 2448 + 2449 + return 0; 2450 + } 2451 + 2452 + static int ufs_qcom_host_eom_scan(struct ufs_hba *hba, int num_lanes, 2453 + const struct ufs_eom_coord *eom_coord, 2454 + u32 target_test_count, u32 *err_count) 2455 + { 2456 + bool eom_stopped[PA_MAXDATALANES] = { 0 }; 2457 + int lane, ret; 2458 + u32 setting; 2459 + 2460 + if (!err_count || !eom_coord) 2461 + return -EINVAL; 2462 + 2463 + if (target_test_count < UFS_QCOM_EOM_TARGET_TEST_COUNT_MIN) { 2464 + dev_err(hba->dev, "Target test count (%u) too small for Host EOM\n", 2465 + target_test_count); 2466 + return -ERANGE; 2467 + } 2468 + 2469 + for (lane = 0; lane < num_lanes; lane++) { 2470 + ret = ufs_qcom_host_eom_config(hba, lane, eom_coord, 2471 + target_test_count); 2472 + if (ret) { 2473 + dev_err(hba->dev, "Failed to config Host RX EOM: %d\n", ret); 2474 + return ret; 2475 + } 2476 + } 2477 + 2478 + /* 2479 + * Trigger a PACP_PWR_req to kick start EOM, but not to really change 2480 + * the Power Mode. 2481 + */ 2482 + ret = ufshcd_uic_change_pwr_mode(hba, FAST_MODE << 4 | FAST_MODE); 2483 + if (ret) { 2484 + dev_err(hba->dev, "Failed to change power mode to kick start Host EOM: %d\n", 2485 + ret); 2486 + return ret; 2487 + } 2488 + 2489 + more_burst: 2490 + /* Create burst on Host RX Lane. */ 2491 + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &setting); 2492 + 2493 + for (lane = 0; lane < num_lanes; lane++) { 2494 + if (eom_stopped[lane]) 2495 + continue; 2496 + 2497 + ret = ufs_qcom_host_eom_may_stop(hba, lane, target_test_count, 2498 + &err_count[lane]); 2499 + if (!ret) { 2500 + eom_stopped[lane] = true; 2501 + } else if (ret == -EAGAIN) { 2502 + /* Need more burst to excercise EOM */ 2503 + goto more_burst; 2504 + } else { 2505 + dev_err(hba->dev, "Failed to stop Host EOM: %d\n", ret); 2506 + return ret; 2507 + } 2508 + 2509 + dev_dbg(hba->dev, "Host RX Lane %d EOM, v_step %d, t_step %d, error count %u\n", 2510 + lane, eom_coord->v_step, eom_coord->t_step, 2511 + err_count[lane]); 2512 + } 2513 + 2514 + return 0; 2515 + } 2516 + 2517 + static int ufs_qcom_host_sw_rx_fom(struct ufs_hba *hba, int num_lanes, u32 *fom) 2518 + { 2519 + const struct ufs_eom_coord *eom_coord = sw_rx_fom_eom_coords_g6; 2520 + u32 eom_err_count[PA_MAXDATALANES] = { 0 }; 2521 + u32 curr_ahit; 2522 + int lane, i, ret; 2523 + 2524 + if (!fom) 2525 + return -EINVAL; 2526 + 2527 + /* Stop the auto hibernate idle timer */ 2528 + curr_ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER); 2529 + if (curr_ahit) 2530 + ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER); 2531 + 2532 + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE), PA_NO_ADAPT); 2533 + if (ret) { 2534 + dev_err(hba->dev, "Failed to select NO_ADAPT before starting Host EOM: %d\n", ret); 2535 + goto out; 2536 + } 2537 + 2538 + for (i = 0; i < SW_RX_FOM_EOM_COORDS; i++, eom_coord++) { 2539 + ret = ufs_qcom_host_eom_scan(hba, num_lanes, eom_coord, 2540 + UFS_QCOM_EOM_TARGET_TEST_COUNT_G6, 2541 + eom_err_count); 2542 + if (ret) { 2543 + dev_err(hba->dev, "Failed to run Host EOM scan: %d\n", ret); 2544 + break; 2545 + } 2546 + 2547 + for (lane = 0; lane < num_lanes; lane++) { 2548 + /* Bad coordinates have no weights */ 2549 + if (eom_err_count[lane]) 2550 + continue; 2551 + fom[lane] += SW_RX_FOM_EOM_COORDS_WEIGHT; 2552 + } 2553 + } 2554 + 2555 + out: 2556 + /* Restore the auto hibernate idle timer */ 2557 + if (curr_ahit) 2558 + ufshcd_writel(hba, curr_ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); 2559 + 2560 + return ret; 2561 + } 2562 + 2563 + static int ufs_qcom_get_rx_fom(struct ufs_hba *hba, 2564 + struct ufs_pa_layer_attr *pwr_mode, 2565 + struct tx_eqtr_iter *h_iter, 2566 + struct tx_eqtr_iter *d_iter) 2567 + { 2568 + struct ufshcd_tx_eq_params *params __free(kfree) = 2569 + kzalloc(sizeof(*params), GFP_KERNEL); 2570 + struct ufs_qcom_host *host = ufshcd_get_variant(hba); 2571 + struct ufs_pa_layer_attr old_pwr_info; 2572 + u32 fom[PA_MAXDATALANES] = { 0 }; 2573 + u32 gear = pwr_mode->gear_tx; 2574 + u32 rate = pwr_mode->hs_rate; 2575 + int lane, ret; 2576 + 2577 + if (host->hw_ver.major != 0x7 || host->hw_ver.minor > 0x1 || 2578 + gear <= UFS_HS_G5 || !d_iter || !d_iter->is_updated) 2579 + return 0; 2580 + 2581 + if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX) 2582 + return -ERANGE; 2583 + 2584 + if (!params) 2585 + return -ENOMEM; 2586 + 2587 + memcpy(&old_pwr_info, &hba->pwr_info, sizeof(struct ufs_pa_layer_attr)); 2588 + 2589 + memcpy(params, &hba->tx_eq_params[gear - 1], sizeof(struct ufshcd_tx_eq_params)); 2590 + for (lane = 0; lane < pwr_mode->lane_rx; lane++) { 2591 + params->device[lane].preshoot = d_iter->preshoot; 2592 + params->device[lane].deemphasis = d_iter->deemphasis; 2593 + } 2594 + 2595 + /* Use TX EQTR settings as Device's TX Equalization settings. */ 2596 + ret = ufshcd_apply_tx_eq_settings(hba, params, gear); 2597 + if (ret) { 2598 + dev_err(hba->dev, "%s: Failed to apply TX EQ settings for HS-G%u: %d\n", 2599 + __func__, gear, ret); 2600 + return ret; 2601 + } 2602 + 2603 + /* Force PMC to target HS Gear to use new TX Equalization settings. */ 2604 + ret = ufshcd_change_power_mode(hba, pwr_mode, UFSHCD_PMC_POLICY_FORCE); 2605 + if (ret) { 2606 + dev_err(hba->dev, "%s: Failed to change power mode to HS-G%u, Rate-%s: %d\n", 2607 + __func__, gear, ufs_hs_rate_to_str(rate), ret); 2608 + return ret; 2609 + } 2610 + 2611 + ret = ufs_qcom_host_sw_rx_fom(hba, pwr_mode->lane_rx, fom); 2612 + if (ret) { 2613 + dev_err(hba->dev, "Failed to get SW FOM of TX (PreShoot: %u, DeEmphasis: %u): %d\n", 2614 + d_iter->preshoot, d_iter->deemphasis, ret); 2615 + return ret; 2616 + } 2617 + 2618 + /* Restore Device's TX Equalization settings. */ 2619 + ret = ufshcd_apply_tx_eq_settings(hba, &hba->tx_eq_params[gear - 1], gear); 2620 + if (ret) { 2621 + dev_err(hba->dev, "%s: Failed to apply TX EQ settings for HS-G%u: %d\n", 2622 + __func__, gear, ret); 2623 + return ret; 2624 + } 2625 + 2626 + /* Restore Power Mode. */ 2627 + ret = ufshcd_change_power_mode(hba, &old_pwr_info, UFSHCD_PMC_POLICY_FORCE); 2628 + if (ret) { 2629 + dev_err(hba->dev, "%s: Failed to restore power mode to HS-G%u: %d\n", 2630 + __func__, old_pwr_info.gear_tx, ret); 2631 + return ret; 2632 + } 2633 + 2634 + for (lane = 0; lane < pwr_mode->lane_rx; lane++) 2635 + d_iter->fom[lane] = fom[lane]; 2636 + 2637 + return 0; 2638 + } 2639 + 2640 + static int ufs_qcom_apply_tx_eqtr_settings(struct ufs_hba *hba, 2641 + struct ufs_pa_layer_attr *pwr_mode, 2642 + struct tx_eqtr_iter *h_iter, 2643 + struct tx_eqtr_iter *d_iter) 2644 + { 2645 + struct ufs_qcom_host *host = ufshcd_get_variant(hba); 2646 + u32 setting = 0; 2647 + int lane; 2648 + 2649 + if (host->hw_ver.major != 0x7 || host->hw_ver.minor > 0x1) 2650 + return 0; 2651 + 2652 + for (lane = 0; lane < pwr_mode->lane_tx; lane++) { 2653 + setting |= TX_HS_PRESHOOT_BITS(lane, h_iter->preshoot); 2654 + setting |= TX_HS_DEEMPHASIS_BITS(lane, h_iter->deemphasis); 2655 + } 2656 + 2657 + return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXEQG1SETTING), setting); 2658 + } 2659 + 2660 + static int ufs_qcom_tx_eqtr_notify(struct ufs_hba *hba, 2661 + enum ufs_notify_change_status status, 2662 + struct ufs_pa_layer_attr *pwr_mode) 2663 + { 2664 + struct ufs_qcom_host *host = ufshcd_get_variant(hba); 2665 + struct ufs_pa_layer_attr pwr_mode_hs_g1 = { 2666 + .gear_rx = UFS_HS_G1, 2667 + .gear_tx = UFS_HS_G1, 2668 + .lane_rx = pwr_mode->lane_rx, 2669 + .lane_tx = pwr_mode->lane_tx, 2670 + .pwr_rx = FAST_MODE, 2671 + .pwr_tx = FAST_MODE, 2672 + .hs_rate = pwr_mode->hs_rate, 2673 + }; 2674 + u32 gear = pwr_mode->gear_tx; 2675 + u32 rate = pwr_mode->hs_rate; 2676 + int ret; 2677 + 2678 + if (host->hw_ver.major != 0x7 || host->hw_ver.minor > 0x1) 2679 + return 0; 2680 + 2681 + if (status == PRE_CHANGE) { 2682 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TXEQG1SETTING), 2683 + &host->saved_tx_eq_g1_setting); 2684 + if (ret) 2685 + return ret; 2686 + 2687 + /* PMC to target HS Gear. */ 2688 + ret = ufshcd_change_power_mode(hba, pwr_mode, 2689 + UFSHCD_PMC_POLICY_DONT_FORCE); 2690 + if (ret) 2691 + dev_err(hba->dev, "%s: Failed to PMC to target HS-G%u, Rate-%s: %d\n", 2692 + __func__, gear, ufs_hs_rate_to_str(rate), ret); 2693 + } else { 2694 + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXEQG1SETTING), 2695 + host->saved_tx_eq_g1_setting); 2696 + if (ret) 2697 + return ret; 2698 + 2699 + /* PMC back to HS-G1. */ 2700 + ret = ufshcd_change_power_mode(hba, &pwr_mode_hs_g1, 2701 + UFSHCD_PMC_POLICY_DONT_FORCE); 2702 + if (ret) 2703 + dev_err(hba->dev, "%s: Failed to PMC to HS-G1, Rate-%s: %d\n", 2704 + __func__, ufs_hs_rate_to_str(rate), ret); 2705 + } 2706 + 2707 + return ret; 2708 + } 2709 + 2513 2710 /* 2514 2711 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations 2515 2712 * ··· 2906 2341 .setup_clocks = ufs_qcom_setup_clocks, 2907 2342 .hce_enable_notify = ufs_qcom_hce_enable_notify, 2908 2343 .link_startup_notify = ufs_qcom_link_startup_notify, 2344 + .negotiate_pwr_mode = ufs_qcom_negotiate_pwr_mode, 2909 2345 .pwr_change_notify = ufs_qcom_pwr_change_notify, 2910 2346 .apply_dev_quirks = ufs_qcom_apply_dev_quirks, 2911 2347 .fixup_dev_quirks = ufs_qcom_fixup_dev_quirks, ··· 2921 2355 .get_outstanding_cqs = ufs_qcom_get_outstanding_cqs, 2922 2356 .config_esi = ufs_qcom_config_esi, 2923 2357 .freq_to_gear_speed = ufs_qcom_freq_to_gear_speed, 2358 + .get_rx_fom = ufs_qcom_get_rx_fom, 2359 + .apply_tx_eqtr_settings = ufs_qcom_apply_tx_eqtr_settings, 2360 + .tx_eqtr_notify = ufs_qcom_tx_eqtr_notify, 2924 2361 }; 2925 2362 2926 2363 static const struct ufs_hba_variant_ops ufs_hba_qcom_sa8255p_vops = {
+42
drivers/ufs/host/ufs-qcom.h
··· 33 33 #define DL_VS_CLK_CFG_MASK GENMASK(9, 0) 34 34 #define DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN BIT(9) 35 35 36 + #define UFS_QCOM_EOM_VOLTAGE_STEPS_MAX 127 37 + #define UFS_QCOM_EOM_TIMING_STEPS_MAX 63 38 + #define UFS_QCOM_EOM_TARGET_TEST_COUNT_MIN 8 39 + #define UFS_QCOM_EOM_TARGET_TEST_COUNT_G6 0x3F 40 + 41 + #define SW_RX_FOM_EOM_COORDS 23 42 + #define SW_RX_FOM_EOM_COORDS_WEIGHT (127 / SW_RX_FOM_EOM_COORDS) 43 + 44 + struct ufs_eom_coord { 45 + int t_step; 46 + int v_step; 47 + u8 eye_mask; 48 + }; 49 + 50 + static const struct ufs_eom_coord sw_rx_fom_eom_coords_g6[SW_RX_FOM_EOM_COORDS] = { 51 + [0] = { -2, -15, UFS_EOM_EYE_MASK_M }, 52 + [1] = { 0, -15, UFS_EOM_EYE_MASK_M }, 53 + [2] = { 2, -15, UFS_EOM_EYE_MASK_M }, 54 + [3] = { -4, -10, UFS_EOM_EYE_MASK_M }, 55 + [4] = { -2, -10, UFS_EOM_EYE_MASK_M }, 56 + [5] = { 0, -10, UFS_EOM_EYE_MASK_M }, 57 + [6] = { 2, -10, UFS_EOM_EYE_MASK_M }, 58 + [7] = { 4, -10, UFS_EOM_EYE_MASK_M }, 59 + [8] = { -6, 0, UFS_EOM_EYE_MASK_M }, 60 + [9] = { -4, 0, UFS_EOM_EYE_MASK_M }, 61 + [10] = { -2, 0, UFS_EOM_EYE_MASK_M }, 62 + [11] = { 0, 0, UFS_EOM_EYE_MASK_M }, 63 + [12] = { 2, 0, UFS_EOM_EYE_MASK_M }, 64 + [13] = { 4, 0, UFS_EOM_EYE_MASK_M }, 65 + [14] = { 6, 0, UFS_EOM_EYE_MASK_M }, 66 + [15] = { -4, 10, UFS_EOM_EYE_MASK_M }, 67 + [16] = { -2, 10, UFS_EOM_EYE_MASK_M }, 68 + [17] = { 0, 10, UFS_EOM_EYE_MASK_M }, 69 + [18] = { 2, 10, UFS_EOM_EYE_MASK_M }, 70 + [19] = { 4, 10, UFS_EOM_EYE_MASK_M }, 71 + [20] = { -2, 15, UFS_EOM_EYE_MASK_M }, 72 + [21] = { 0, 15, UFS_EOM_EYE_MASK_M }, 73 + [22] = { 2, 15, UFS_EOM_EYE_MASK_M }, 74 + }; 75 + 36 76 /* Qualcomm MCQ Configuration */ 37 77 #define UFS_QCOM_MCQCAP_QCFGPTR 224 /* 0xE0 in hex */ 38 78 #define UFS_QCOM_MCQ_CONFIG_OFFSET (UFS_QCOM_MCQCAP_QCFGPTR * 0x200) /* 0x1C000 */ ··· 348 308 u32 phy_gear; 349 309 350 310 bool esi_enabled; 311 + 312 + u32 saved_tx_eq_g1_setting; 351 313 }; 352 314 353 315 struct ufs_qcom_drvdata {
-1
drivers/ufs/host/ufs-rockchip.c
··· 6 6 */ 7 7 8 8 #include <linux/clk.h> 9 - #include <linux/gpio.h> 10 9 #include <linux/gpio/consumer.h> 11 10 #include <linux/mfd/syscon.h> 12 11 #include <linux/of.h>
-3
drivers/ufs/host/ufs-sprd.c
··· 161 161 162 162 static int sprd_ufs_pwr_change_notify(struct ufs_hba *hba, 163 163 enum ufs_notify_change_status status, 164 - const struct ufs_pa_layer_attr *dev_max_params, 165 164 struct ufs_pa_layer_attr *dev_req_params) 166 165 { 167 166 struct ufs_sprd_host *host = ufshcd_get_variant(hba); 168 167 169 168 if (status == PRE_CHANGE) { 170 - memcpy(dev_req_params, dev_max_params, 171 - sizeof(struct ufs_pa_layer_attr)); 172 169 if (host->unipro_ver >= UFS_UNIPRO_VER_1_8) 173 170 ufshcd_dme_configure_adapt(hba, dev_req_params->gear_tx, 174 171 PA_INITIAL_ADAPT);
+4 -4
drivers/ufs/host/ufshcd-pci.c
··· 145 145 146 146 pwr_info.lane_rx = lanes; 147 147 pwr_info.lane_tx = lanes; 148 - ret = ufshcd_config_pwr_mode(hba, &pwr_info); 148 + ret = ufshcd_change_power_mode(hba, &pwr_info, 149 + UFSHCD_PMC_POLICY_DONT_FORCE); 149 150 if (ret) 150 151 dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n", 151 152 __func__, lanes, ret); ··· 155 154 156 155 static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba, 157 156 enum ufs_notify_change_status status, 158 - const struct ufs_pa_layer_attr *dev_max_params, 159 157 struct ufs_pa_layer_attr *dev_req_params) 160 158 { 161 159 int err = 0; 162 160 163 161 switch (status) { 164 162 case PRE_CHANGE: 165 - if (ufshcd_is_hs_mode(dev_max_params) && 163 + if (ufshcd_is_hs_mode(dev_req_params) && 166 164 (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2)) 167 165 ufs_intel_set_lanes(hba, 2); 168 - memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params)); 169 166 break; 170 167 case POST_CHANGE: 171 168 if (ufshcd_is_hs_mode(dev_req_params)) { ··· 694 695 { PCI_VDEVICE(INTEL, 0x7747), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, 695 696 { PCI_VDEVICE(INTEL, 0xE447), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, 696 697 { PCI_VDEVICE(INTEL, 0x4D47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, 698 + { PCI_VDEVICE(INTEL, 0xD335), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, 697 699 { } /* terminate list */ 698 700 }; 699 701
+1
drivers/usb/gadget/function/f_tcm.c
··· 2030 2030 .tfc_wwn_attrs = usbg_wwn_attrs, 2031 2031 .tfc_tpg_base_attrs = usbg_base_attrs, 2032 2032 2033 + .default_compl_type = TARGET_QUEUE_COMPL, 2033 2034 .default_submit_type = TARGET_DIRECT_SUBMIT, 2034 2035 .direct_submit_supp = 1, 2035 2036 };
+2
drivers/vhost/scsi.c
··· 2950 2950 .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs, 2951 2951 .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs, 2952 2952 2953 + .default_compl_type = TARGET_QUEUE_COMPL, 2954 + .direct_compl_supp = 1, 2953 2955 .default_submit_type = TARGET_QUEUE_SUBMIT, 2954 2956 .direct_submit_supp = 1, 2955 2957 };
+1
drivers/xen/xen-scsiback.c
··· 1832 1832 .tfc_tpg_base_attrs = scsiback_tpg_attrs, 1833 1833 .tfc_tpg_param_attrs = scsiback_param_attrs, 1834 1834 1835 + .default_compl_type = TARGET_QUEUE_COMPL, 1835 1836 .default_submit_type = TARGET_DIRECT_SUBMIT, 1836 1837 .direct_submit_supp = 1, 1837 1838 };
-4
include/scsi/libsas.h
··· 62 62 63 63 /* ---------- Expander Devices ---------- */ 64 64 65 - #define to_dom_device(_obj) container_of(_obj, struct domain_device, dev_obj) 66 - #define to_dev_attr(_attr) container_of(_attr, struct domain_dev_attribute,\ 67 - attr) 68 - 69 65 enum routing_attribute { 70 66 DIRECT_ROUTING, 71 67 SUBTRACTIVE_ROUTING,
+1
include/scsi/scsi_device.h
··· 571 571 extern void sdev_disable_disk_events(struct scsi_device *sdev); 572 572 extern void sdev_enable_disk_events(struct scsi_device *sdev); 573 573 extern int scsi_vpd_lun_id(struct scsi_device *, char *, size_t); 574 + extern int scsi_vpd_lun_serial(struct scsi_device *, char *, size_t); 574 575 extern int scsi_vpd_tpg_id(struct scsi_device *, int *); 575 576 576 577 #ifdef CONFIG_PM
+4 -3
include/scsi/scsi_host.h
··· 660 660 */ 661 661 unsigned nr_hw_queues; 662 662 unsigned nr_maps; 663 + 664 + /* Asynchronous scan in progress */ 665 + bool async_scan __guarded_by(&scan_mutex); 666 + 663 667 unsigned active_mode:2; 664 668 665 669 /* ··· 681 677 682 678 /* Task mgmt function in progress */ 683 679 unsigned tmf_in_progress:1; 684 - 685 - /* Asynchronous scan in progress */ 686 - unsigned async_scan:1; 687 680 688 681 /* Don't resume host in EH */ 689 682 unsigned eh_noresume:1;
+10
include/target/target_core_base.h
··· 111 111 /* Peripheral Device Text Identification Information */ 112 112 #define PD_TEXT_ID_INFO_LEN 256 113 113 114 + enum target_compl_type { 115 + /* Use the fabric driver's default completion type */ 116 + TARGET_FABRIC_DEFAULT_COMPL, 117 + /* Complete from the backend calling context */ 118 + TARGET_DIRECT_COMPL, 119 + /* Defer completion to the LIO workqueue */ 120 + TARGET_QUEUE_COMPL, 121 + }; 122 + 114 123 enum target_submit_type { 115 124 /* Use the fabric driver's default submission type */ 116 125 TARGET_FABRIC_DEFAULT_SUBMIT, ··· 750 741 u32 atomic_granularity; 751 742 u32 atomic_max_with_boundary; 752 743 u32 atomic_max_boundary; 744 + u8 complete_type; 753 745 u8 submit_type; 754 746 struct se_device *da_dev; 755 747 struct config_group da_group;
+9 -3
include/target/target_core_fabric.h
··· 119 119 */ 120 120 unsigned int write_pending_must_be_called:1; 121 121 /* 122 + * Set this if the driver does not require calling queue_data_in 123 + * queue_status and check_stop_free from a worker thread when 124 + * completing successful commands. 125 + */ 126 + unsigned int direct_compl_supp:1; 127 + /* 122 128 * Set this if the driver supports submitting commands to the backend 123 129 * from target_submit/target_submit_cmd. 124 130 */ 125 131 unsigned int direct_submit_supp:1; 126 - /* 127 - * Set this to a target_submit_type value. 128 - */ 132 + /* Set this to a target_submit_type value. */ 129 133 u8 default_submit_type; 134 + /* Set this to the target_compl_type value. */ 135 + u8 default_compl_type; 130 136 }; 131 137 132 138 int target_register_template(const struct target_core_fabric_ops *fo);
+1 -1
include/uapi/scsi/fc/fc_els.h
··· 1030 1030 */ 1031 1031 __be32 event_count; /* minimum number of event 1032 1032 * occurrences during the event 1033 - * threshold to caause the LI event 1033 + * threshold to cause the LI event 1034 1034 */ 1035 1035 __be32 pname_count; /* number of portname_list elements */ 1036 1036 __be64 pname_list[]; /* list of N_Port_Names accessible
+179 -10
include/ufs/ufshcd.h
··· 287 287 struct ufs_pa_layer_attr info; 288 288 }; 289 289 290 + #define UFS_MAX_LANES 2 291 + 292 + /** 293 + * struct tx_eqtr_iter - TX Equalization Training iterator 294 + * @preshoot_bitmap: PreShoot bitmap 295 + * @deemphasis_bitmap: DeEmphasis bitmap 296 + * @preshoot: PreShoot value 297 + * @deemphasis: DeEmphasis value 298 + * @fom: Figure-of-Merit read out from RX_FOM 299 + * @is_updated: Flag to indicate if updated since previous iteration 300 + */ 301 + struct tx_eqtr_iter { 302 + unsigned long preshoot_bitmap; 303 + unsigned long deemphasis_bitmap; 304 + u8 preshoot; 305 + u8 deemphasis; 306 + u8 fom[UFS_MAX_LANES]; 307 + bool is_updated; 308 + }; 309 + 310 + /** 311 + * struct ufshcd_tx_eq_settings - TX Equalization settings 312 + * @preshoot: PreShoot value 313 + * @deemphasis: DeEmphasis value 314 + * @fom_val: Figure-of-Merit value read out from RX_FOM (Bit[6:0]) 315 + * @precode_en: Flag to indicate whether need to enable pre-coding 316 + */ 317 + struct ufshcd_tx_eq_settings { 318 + u8 preshoot; 319 + u8 deemphasis; 320 + u8 fom_val; 321 + bool precode_en; 322 + }; 323 + 324 + /** 325 + * struct ufshcd_tx_eqtr_data - Data used during TX Equalization Training procedure 326 + * @host: Optimal TX EQ settings identified for host TX Lanes during TX EQTR 327 + * @device: Optimal TX EQ settings identified for device TX Lanes during TX EQTR 328 + * @host_fom: Host TX EQTR FOM record 329 + * @device_fom: Device TX EQTR FOM record 330 + */ 331 + struct ufshcd_tx_eqtr_data { 332 + struct ufshcd_tx_eq_settings host[UFS_MAX_LANES]; 333 + struct ufshcd_tx_eq_settings device[UFS_MAX_LANES]; 334 + u8 host_fom[UFS_MAX_LANES][TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS]; 335 + u8 device_fom[UFS_MAX_LANES][TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS]; 336 + }; 337 + 338 + /** 339 + * struct ufshcd_tx_eqtr_record - TX Equalization Training record 340 + * @host_fom: Host TX EQTR FOM record 341 + * @device_fom: Device TX EQTR FOM record 342 + * @last_record_ts: Timestamp of the most recent TX EQTR record 343 + * @last_record_index: Index of the most recent TX EQTR record 344 + * @saved_adapt_eqtr: Saved Adaptation length setting for TX EQTR 345 + */ 346 + struct ufshcd_tx_eqtr_record { 347 + u8 host_fom[UFS_MAX_LANES][TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS]; 348 + u8 device_fom[UFS_MAX_LANES][TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS]; 349 + ktime_t last_record_ts; 350 + u16 last_record_index; 351 + u16 saved_adapt_eqtr; 352 + }; 353 + 354 + /** 355 + * struct ufshcd_tx_eq_params - TX Equalization parameters structure 356 + * @host: TX EQ settings for host TX Lanes 357 + * @device: TX EQ settings for device TX Lanes 358 + * @eqtr_record: Pointer to TX EQTR record 359 + * @is_valid: True if parameter contains valid TX Equalization settings 360 + * @is_applied: True if settings have been applied to UniPro of both sides 361 + */ 362 + struct ufshcd_tx_eq_params { 363 + struct ufshcd_tx_eq_settings host[UFS_MAX_LANES]; 364 + struct ufshcd_tx_eq_settings device[UFS_MAX_LANES]; 365 + struct ufshcd_tx_eqtr_record *eqtr_record; 366 + bool is_valid; 367 + bool is_applied; 368 + }; 369 + 290 370 /** 291 371 * struct ufs_hba_variant_ops - variant specific callbacks 292 372 * @name: variant name ··· 382 302 * variant specific Uni-Pro initialization. 383 303 * @link_startup_notify: called before and after Link startup is carried out 384 304 * to allow variant specific Uni-Pro initialization. 305 + * @negotiate_pwr_mode: called to negotiate power mode. 385 306 * @pwr_change_notify: called before and after a power mode change 386 307 * is carried out to allow vendor spesific capabilities 387 - * to be set. PRE_CHANGE can modify final_params based 388 - * on desired_pwr_mode, but POST_CHANGE must not alter 389 - * the final_params parameter 308 + * to be set. 390 309 * @setup_xfer_req: called before any transfer request is issued 391 310 * to set some things 392 311 * @setup_task_mgmt: called before any task management request is issued ··· 410 331 * @config_esi: called to config Event Specific Interrupt 411 332 * @config_scsi_dev: called to configure SCSI device parameters 412 333 * @freq_to_gear_speed: called to map clock frequency to the max supported gear speed 334 + * @apply_tx_eqtr_settings: called to apply settings for TX Equalization 335 + * Training settings. 336 + * @get_rx_fom: called to get Figure of Merit (FOM) value. 337 + * @tx_eqtr_notify: called before and after TX Equalization Training procedure 338 + * to allow platform vendor specific configs to take place. 413 339 */ 414 340 struct ufs_hba_variant_ops { 415 341 const char *name; ··· 431 347 enum ufs_notify_change_status); 432 348 int (*link_startup_notify)(struct ufs_hba *, 433 349 enum ufs_notify_change_status); 434 - int (*pwr_change_notify)(struct ufs_hba *, 435 - enum ufs_notify_change_status status, 436 - const struct ufs_pa_layer_attr *desired_pwr_mode, 437 - struct ufs_pa_layer_attr *final_params); 350 + int (*negotiate_pwr_mode)(struct ufs_hba *hba, 351 + const struct ufs_pa_layer_attr *desired_pwr_mode, 352 + struct ufs_pa_layer_attr *final_params); 353 + int (*pwr_change_notify)(struct ufs_hba *hba, 354 + enum ufs_notify_change_status status, 355 + struct ufs_pa_layer_attr *final_params); 438 356 void (*setup_xfer_req)(struct ufs_hba *hba, int tag, 439 357 bool is_scsi_cmd); 440 358 void (*setup_task_mgmt)(struct ufs_hba *, int, u8); ··· 466 380 int (*config_esi)(struct ufs_hba *hba); 467 381 void (*config_scsi_dev)(struct scsi_device *sdev); 468 382 u32 (*freq_to_gear_speed)(struct ufs_hba *hba, unsigned long freq); 383 + int (*get_rx_fom)(struct ufs_hba *hba, 384 + struct ufs_pa_layer_attr *pwr_mode, 385 + struct tx_eqtr_iter *h_iter, 386 + struct tx_eqtr_iter *d_iter); 387 + int (*apply_tx_eqtr_settings)(struct ufs_hba *hba, 388 + struct ufs_pa_layer_attr *pwr_mode, 389 + struct tx_eqtr_iter *h_iter, 390 + struct tx_eqtr_iter *d_iter); 391 + int (*tx_eqtr_notify)(struct ufs_hba *hba, 392 + enum ufs_notify_change_status status, 393 + struct ufs_pa_layer_attr *pwr_mode); 469 394 }; 470 395 471 396 /* clock gating state */ ··· 623 526 UFSHCD_STATE_EH_SCHEDULED_NON_FATAL, 624 527 UFSHCD_STATE_EH_SCHEDULED_FATAL, 625 528 UFSHCD_STATE_ERROR, 529 + }; 530 + 531 + /** 532 + * enum ufshcd_pmc_policy - Power Mode change policy 533 + * @UFSHCD_PMC_POLICY_DONT_FORCE: Do not force a Power Mode change. 534 + * @UFSHCD_PMC_POLICY_FORCE: Force a Power Mode change even if current Power 535 + * Mode is same as target Power Mode. 536 + */ 537 + enum ufshcd_pmc_policy { 538 + UFSHCD_PMC_POLICY_DONT_FORCE, 539 + UFSHCD_PMC_POLICY_FORCE, 626 540 }; 627 541 628 542 enum ufshcd_quirks { ··· 798 690 * because it causes link startup to become unreliable. 799 691 */ 800 692 UFSHCD_QUIRK_PERFORM_LINK_STARTUP_ONCE = 1 << 26, 693 + 694 + /* 695 + * On some platforms, the VCC regulator has a slow ramp-up time. Add a 696 + * delay after enabling VCC to ensure it's stable. 697 + */ 698 + UFSHCD_QUIRK_VCC_ON_DELAY = 1 << 27, 801 699 }; 802 700 803 701 enum ufshcd_caps { ··· 881 767 * WriteBooster when scaling the clock down. 882 768 */ 883 769 UFSHCD_CAP_WB_WITH_CLK_SCALING = 1 << 12, 770 + 771 + /* 772 + * This capability allows the host controller driver to apply TX 773 + * Equalization settings discovered from UFS attributes, variant 774 + * specific operations and TX Equaliztion Training procedure. 775 + */ 776 + UFSHCD_CAP_TX_EQUALIZATION = 1 << 13, 884 777 }; 885 778 886 779 struct ufs_hba_variant_params { ··· 1002 881 * @saved_uic_err: sticky UIC error mask 1003 882 * @ufs_stats: various error counters 1004 883 * @force_reset: flag to force eh_work perform a full reset 1005 - * @force_pmc: flag to force a power mode change 1006 884 * @silence_err_logs: flag to silence error logs 1007 885 * @dev_cmd: ufs device management command information 1008 886 * @last_dme_cmd_tstamp: time stamp of the last completed DME command ··· 1063 943 * @critical_health_count: count of critical health exceptions 1064 944 * @dev_lvl_exception_count: count of device level exceptions since last reset 1065 945 * @dev_lvl_exception_id: vendor specific information about the device level exception event. 946 + * @dme_qos_notification: Bitfield of pending DME Quality of Service (QoS) 947 + * events. Bits[3:1] reflect the corresponding bits of UIC DME Error Code 948 + * field within the Host Controller's UECDME register. Bit[0] is a flag 949 + * indicating that the DME QoS Monitor has been reset by the host. 950 + * @dme_qos_sysfs_handle: handle for 'dme_qos_notification' sysfs entry 1066 951 * @rpmbs: list of OP-TEE RPMB devices (one per RPMB region) 952 + * @host_preshoot_cap: a bitfield to indicate supported PreShoot dBs of host's TX lanes, cache of 953 + * host M-PHY TX_HS_PreShoot_Setting_Capability Attribute (ID 0x15) 954 + * @host_deemphasis_cap: a bitfield to indicate supported DeEmphasis dBs of host's TX lanes, cache 955 + * of host M-PHY TX_HS_DeEmphasis_Setting_Capability Attribute (ID 0x12) 956 + * @device_preshoot_cap: a bitfield to indicate supported PreShoot dBs of device's TX lanes, cache 957 + * of device M-PHY TX_HS_PreShoot_Setting_Capability Attribute (ID 0x15) 958 + * @device_deemphasis_cap: a bitfield to indicate supported DeEmphasis dBs of device's TX lanes, 959 + * cache of device M-PHY TX_HS_DeEmphasis_Setting_Capability Attribute (ID 0x12) 960 + * @tx_eq_params: TX Equalization settings 1067 961 */ 1068 962 struct ufs_hba { 1069 963 void __iomem *mmio_base; ··· 1169 1035 u32 saved_uic_err; 1170 1036 struct ufs_stats ufs_stats; 1171 1037 bool force_reset; 1172 - bool force_pmc; 1173 1038 bool silence_err_logs; 1174 1039 1175 1040 /* Device management request data */ ··· 1249 1116 int critical_health_count; 1250 1117 atomic_t dev_lvl_exception_count; 1251 1118 u64 dev_lvl_exception_id; 1119 + 1120 + atomic_t dme_qos_notification; 1121 + struct kernfs_node *dme_qos_sysfs_handle; 1122 + 1252 1123 u32 vcc_off_delay_us; 1253 1124 struct list_head rpmbs; 1125 + 1126 + u8 host_preshoot_cap; 1127 + u8 host_deemphasis_cap; 1128 + u8 device_preshoot_cap; 1129 + u8 device_deemphasis_cap; 1130 + struct ufshcd_tx_eq_params tx_eq_params[UFS_HS_GEAR_MAX]; 1254 1131 }; 1255 1132 1256 1133 /** ··· 1405 1262 return hba->caps & UFSHCD_CAP_WB_WITH_CLK_SCALING; 1406 1263 } 1407 1264 1265 + static inline bool ufshcd_is_tx_eq_supported(struct ufs_hba *hba) 1266 + { 1267 + return hba->caps & UFSHCD_CAP_TX_EQUALIZATION && 1268 + hba->ufs_version >= ufshci_version(5, 0) && 1269 + hba->dev_info.wspecversion >= 0x500; 1270 + } 1271 + 1408 1272 #define ufsmcq_writel(hba, val, reg) \ 1409 1273 writel((val), (hba)->mcq_base + (reg)) 1410 1274 #define ufsmcq_readl(hba, reg) \ ··· 1426 1276 writel((val), (hba)->mmio_base + (reg)) 1427 1277 #define ufshcd_readl(hba, reg) \ 1428 1278 readl((hba)->mmio_base + (reg)) 1279 + 1280 + static inline const char *ufs_hs_rate_to_str(enum ufs_hs_gear_rate rate) 1281 + { 1282 + switch (rate) { 1283 + case PA_HS_MODE_A: 1284 + return "A"; 1285 + case PA_HS_MODE_B: 1286 + return "B"; 1287 + default: 1288 + return "Unknown"; 1289 + } 1290 + } 1429 1291 1430 1292 /** 1431 1293 * ufshcd_rmwl - perform read/modify/write for a controller register ··· 1523 1361 u8 attr_set, u32 mib_val, u8 peer); 1524 1362 extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, 1525 1363 u32 *mib_val, u8 peer); 1364 + extern int ufshcd_change_power_mode(struct ufs_hba *hba, 1365 + struct ufs_pa_layer_attr *pwr_mode, 1366 + enum ufshcd_pmc_policy pmc_policy); 1526 1367 extern int ufshcd_config_pwr_mode(struct ufs_hba *hba, 1527 - struct ufs_pa_layer_attr *desired_pwr_mode); 1368 + struct ufs_pa_layer_attr *desired_pwr_mode, 1369 + enum ufshcd_pmc_policy pmc_policy); 1528 1370 extern int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode); 1371 + extern int ufshcd_apply_tx_eq_settings(struct ufs_hba *hba, 1372 + struct ufshcd_tx_eq_params *params, 1373 + u32 gear); 1529 1374 1530 1375 /* UIC command interfaces for DME primitives */ 1531 1376 #define DME_LOCAL 0
+3
include/ufs/ufshci.h
··· 115 115 enum { 116 116 REG_CQIS = 0x0, 117 117 REG_CQIE = 0x4, 118 + REG_MCQIACR = 0x8, 118 119 }; 119 120 120 121 enum { ··· 189 188 #define SYSTEM_BUS_FATAL_ERROR 0x20000 190 189 #define CRYPTO_ENGINE_FATAL_ERROR 0x40000 191 190 #define MCQ_CQ_EVENT_STATUS 0x100000 191 + #define MCQ_IAG_EVENT_STATUS 0x200000 192 192 193 193 #define UFSHCD_UIC_HIBERN8_MASK (UIC_HIBERNATE_ENTER |\ 194 194 UIC_HIBERNATE_EXIT) ··· 273 271 /* UECDME - Host UIC Error Code DME 48h */ 274 272 #define UIC_DME_ERROR 0x80000000 275 273 #define UIC_DME_ERROR_CODE_MASK 0x1 274 + #define UIC_DME_QOS_MASK 0xE 276 275 277 276 /* UTRIACR - Interrupt Aggregation control register - 0x4Ch */ 278 277 #define INT_AGGR_TIMEOUT_VAL_MASK 0xFF
+139 -2
include/ufs/unipro.h
··· 10 10 * M-TX Configuration Attributes 11 11 */ 12 12 #define TX_HIBERN8TIME_CAPABILITY 0x000F 13 + #define TX_HS_DEEMPHASIS_SETTING_CAP 0x0012 14 + #define TX_HS_PRESHOOT_SETTING_CAP 0x0015 13 15 #define TX_MODE 0x0021 14 16 #define TX_HSRATE_SERIES 0x0022 15 17 #define TX_HSGEAR 0x0023 ··· 32 30 #define TX_LCC_SEQUENCER 0x0032 33 31 #define TX_MIN_ACTIVATETIME 0x0033 34 32 #define TX_PWM_G6_G7_SYNC_LENGTH 0x0034 33 + #define TX_HS_DEEMPHASIS_SETTING 0x0037 34 + #define TX_HS_PRESHOOT_SETTING 0x003B 35 35 #define TX_REFCLKFREQ 0x00EB 36 36 #define TX_CFGCLKFREQVAL 0x00EC 37 37 #define CFGEXTRATTR 0x00F0 ··· 42 38 /* 43 39 * M-RX Configuration Attributes 44 40 */ 41 + #define RX_HS_G5_ADAPT_INITIAL_CAP 0x0074 42 + #define RX_HS_G6_ADAPT_INITIAL_CAP 0x007B 43 + #define RX_HS_G6_ADAPT_INITIAL_L0L1L2L3_CAP 0x007D 45 44 #define RX_HS_G1_SYNC_LENGTH_CAP 0x008B 46 45 #define RX_HS_G1_PREP_LENGTH_CAP 0x008C 47 46 #define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F ··· 57 50 #define RX_HIBERN8TIME_CAP 0x0092 58 51 #define RX_ADV_HIBERN8TIME_CAP 0x0099 59 52 #define RX_ADV_MIN_ACTIVATETIME_CAP 0x009A 53 + #define RX_HS_G4_ADAPT_INITIAL_CAP 0x009F 60 54 #define RX_MODE 0x00A1 61 55 #define RX_HSRATE_SERIES 0x00A2 62 56 #define RX_HSGEAR 0x00A3 ··· 72 64 #define CFGRXCDR8 0x00BA 73 65 #define CFGRXOVR8 0x00BD 74 66 #define CFGRXOVR6 0x00BF 67 + #define RX_FOM 0x00C2 75 68 #define RXDIRECTCTRL2 0x00C7 76 69 #define CFGRXOVR4 0x00E9 77 70 #define RX_REFCLKFREQ 0x00EB 78 71 #define RX_CFGCLKFREQVAL 0x00EC 79 72 #define CFGWIDEINLN 0x00F0 73 + #define RX_EYEMON_CAP 0x00F1 74 + #define RX_EYEMON_TIMING_MAX_STEPS_CAP 0x00F2 75 + #define RX_EYEMON_TIMING_MAX_OFFSET_CAP 0x00F3 76 + #define RX_EYEMON_VOLTAGE_MAX_STEPS_CAP 0x00F4 77 + #define RX_EYEMON_VOLTAGE_MAX_OFFSET_CAP 0x00F5 78 + #define RX_EYEMON_ENABLE 0x00F6 79 + #define RX_EYEMON_TIMING_STEPS 0x00F7 80 + #define RX_EYEMON_VOLTAGE_STEPS 0x00F8 81 + #define RX_EYEMON_TARGET_TEST_COUNT 0x00F9 82 + #define RX_EYEMON_TESTED_COUNT 0x00FA 83 + #define RX_EYEMON_ERROR_COUNT 0x00FB 84 + #define RX_EYEMON_START 0x00FC 85 + #define RX_EYEMON_EXTENDED_ERROR_COUNT 0x00FD 86 + 80 87 #define ENARXDIRECTCFG4 0x00F2 81 88 #define ENARXDIRECTCFG3 0x00F3 82 89 #define ENARXDIRECTCFG2 0x00F4 83 90 91 + #define RX_EYEMON_NEGATIVE_STEP_BIT BIT(6) 92 + #define RX_EYEMON_EXTENDED_VRANGE_BIT BIT(6) 84 93 85 94 #define is_mphy_tx_attr(attr) (attr < RX_MODE) 86 95 #define RX_ADV_FINE_GRAN_STEP(x) ((((x) & 0x3) << 1) | 0x1) ··· 123 98 #define CBPRGTUNING UNIPRO_CB_OFFSET(0x00FB) 124 99 125 100 #define UNIPRO_CB_OFFSET(x) (0x8000 | x) 101 + 102 + #define ADAPT_LENGTH_MASK 0x7F 103 + #define ADAPT_RANGE_BIT BIT(7) 104 + #define IS_ADAPT_RANGE_COARSE(x) ((x) & ADAPT_RANGE_BIT) 105 + 106 + /* Adapt definitions */ 107 + #define ADAPT_LENGTH_MAX 0x91 108 + #define ADAPT_L0L3_LENGTH_MAX 0x90 109 + #define ADAPT_L0L1L2L3_LENGTH_MAX 0x8C 110 + #define TADAPT_FACTOR 650 111 + #define TADAPT_L0L3_FACTOR (1 << 9) 112 + #define TADAPT_L0L1L2L3_FACTOR (1 << 15) 126 113 127 114 /* 128 115 * PHY Adapter attributes ··· 201 164 #define PA_PACPERRORCOUNT 0x15C1 202 165 #define PA_PHYTESTCONTROL 0x15C2 203 166 #define PA_TXHSG4SYNCLENGTH 0x15D0 167 + #define PA_PEERRXHSG4ADAPTINITIAL 0x15D3 204 168 #define PA_TXHSADAPTTYPE 0x15D4 205 169 #define PA_TXHSG5SYNCLENGTH 0x15D6 170 + #define PA_PEERRXHSG5ADAPTINITIAL 0x15D9 171 + #define PA_PEERRXHSG6ADAPTREFRESHL0L1L2L3 0x15DE 172 + #define PA_PEERRXHSG6ADAPTINITIALL0L3 0x15DF 173 + #define PA_PEERRXHSG6ADAPTINITIALL0L1L2L3 0x15E0 174 + #define PA_TXEQG1SETTING 0x15E1 175 + #define PA_TXEQG2SETTING 0x15E2 176 + #define PA_TXEQG3SETTING 0x15E3 177 + #define PA_TXEQG4SETTING 0x15E4 178 + #define PA_TXEQG5SETTING 0x15E5 179 + #define PA_TXEQG6SETTING 0x15E6 180 + #define PA_TXEQTRSETTING 0x15E7 181 + #define PA_PEERTXEQTRSETTING 0x15E8 182 + #define PA_PRECODEEN 0x15E9 183 + #define PA_EQTR_GEAR 0x15EA 184 + #define PA_TXADAPTLENGTH_EQTR 0x15EB 206 185 207 - /* Adpat type for PA_TXHSADAPTTYPE attribute */ 186 + /* Adapt type for PA_TXHSADAPTTYPE attribute */ 208 187 #define PA_REFRESH_ADAPT 0x00 209 188 #define PA_INITIAL_ADAPT 0x01 210 189 #define PA_NO_ADAPT 0x03 ··· 239 186 240 187 /* PHY Adapter Protocol Constants */ 241 188 #define PA_MAXDATALANES 4 189 + 190 + /* 191 + * TX EQTR's minimum TAdapt should not be less than 10us. 192 + * This value is rounded up into the nearest Unit Intervals (UI) 193 + */ 194 + #define TX_EQTR_HS_G4_MIN_T_ADAPT 166400 195 + #define TX_EQTR_HS_G5_MIN_T_ADAPT 332800 196 + #define TX_EQTR_HS_G6_MIN_T_ADAPT 262144 197 + 198 + #define TX_EQTR_HS_G4_ADAPT_DEFAULT 0x88 199 + #define TX_EQTR_HS_G5_ADAPT_DEFAULT 0x89 200 + #define TX_EQTR_HS_G6_ADAPT_DEFAULT 0x89 201 + 202 + #define TX_EQTR_CAP_MASK 0x7F 203 + 204 + #define TX_EQTR_ADAPT_LENGTH_L0L1L2L3_SHIFT 8 205 + #define TX_EQTR_ADAPT_RESERVED 0xFF 206 + 207 + #define TX_HS_NUM_PRESHOOT 8 208 + #define TX_HS_NUM_DEEMPHASIS 8 209 + #define TX_HS_PRESHOOT_SHIFT 4 210 + #define TX_HS_DEEMPHASIS_SHIFT 4 211 + #define TX_HS_PRESHOOT_OFFSET 0 212 + #define TX_HS_DEEMPHASIS_OFFSET 16 213 + 214 + #define TX_HS_PRESHOOT_LANE_SHIFT(lane) \ 215 + (TX_HS_PRESHOOT_OFFSET + (lane) * TX_HS_PRESHOOT_SHIFT) 216 + #define TX_HS_DEEMPHASIS_LANE_SHIFT(lane) \ 217 + (TX_HS_DEEMPHASIS_OFFSET + (lane) * TX_HS_DEEMPHASIS_SHIFT) 218 + 219 + #define TX_HS_PRESHOOT_BITS(lane, val) \ 220 + ((val) << TX_HS_PRESHOOT_LANE_SHIFT(lane)) 221 + #define TX_HS_DEEMPHASIS_BITS(lane, val) \ 222 + ((val) << TX_HS_DEEMPHASIS_LANE_SHIFT(lane)) 223 + 224 + #define RX_FOM_VALUE_MASK 0x7F 225 + #define RX_FOM_PRECODING_EN_BIT BIT(7) 226 + 227 + #define PRECODEEN_TX_OFFSET 0 228 + #define PRECODEEN_RX_OFFSET 4 229 + #define PRECODEEN_TX_BIT(lane) (1 << (PRECODEEN_TX_OFFSET + (lane))) 230 + #define PRECODEEN_RX_BIT(lane) (1 << (PRECODEEN_RX_OFFSET + (lane))) 231 + 232 + enum ufs_tx_eq_preset { 233 + UFS_TX_EQ_PRESET_P0, 234 + UFS_TX_EQ_PRESET_P1, 235 + UFS_TX_EQ_PRESET_P2, 236 + UFS_TX_EQ_PRESET_P3, 237 + UFS_TX_EQ_PRESET_P4, 238 + UFS_TX_EQ_PRESET_P5, 239 + UFS_TX_EQ_PRESET_P6, 240 + UFS_TX_EQ_PRESET_P7, 241 + UFS_TX_EQ_PRESET_MAX, 242 + }; 243 + 244 + enum ufs_tx_hs_preshoot { 245 + UFS_TX_HS_PRESHOOT_DB_0P0, 246 + UFS_TX_HS_PRESHOOT_DB_0P4, 247 + UFS_TX_HS_PRESHOOT_DB_0P8, 248 + UFS_TX_HS_PRESHOOT_DB_1P2, 249 + UFS_TX_HS_PRESHOOT_DB_1P6, 250 + UFS_TX_HS_PRESHOOT_DB_2P5, 251 + UFS_TX_HS_PRESHOOT_DB_3P5, 252 + UFS_TX_HS_PRESHOOT_DB_4P7, 253 + }; 254 + 255 + enum ufs_tx_hs_deemphasis { 256 + UFS_TX_HS_DEEMPHASIS_DB_0P0, 257 + UFS_TX_HS_DEEMPHASIS_DB_0P8, 258 + UFS_TX_HS_DEEMPHASIS_DB_1P6, 259 + UFS_TX_HS_DEEMPHASIS_DB_2P5, 260 + UFS_TX_HS_DEEMPHASIS_DB_3P5, 261 + UFS_TX_HS_DEEMPHASIS_DB_4P7, 262 + UFS_TX_HS_DEEMPHASIS_DB_6P0, 263 + UFS_TX_HS_DEEMPHASIS_DB_7P6, 264 + }; 265 + 266 + enum ufs_eom_eye_mask { 267 + UFS_EOM_EYE_MASK_M, 268 + UFS_EOM_EYE_MASK_L, 269 + UFS_EOM_EYE_MASK_U, 270 + }; 242 271 243 272 #define DL_FC0ProtectionTimeOutVal_Default 8191 244 273 #define DL_TC0ReplayTimeOutVal_Default 65535 ··· 368 233 UFS_HS_G2, /* HS Gear 2 */ 369 234 UFS_HS_G3, /* HS Gear 3 */ 370 235 UFS_HS_G4, /* HS Gear 4 */ 371 - UFS_HS_G5 /* HS Gear 5 */ 236 + UFS_HS_G5, /* HS Gear 5 */ 237 + UFS_HS_G6, /* HS Gear 6 */ 238 + UFS_HS_GEAR_MAX = UFS_HS_G6, 372 239 }; 373 240 374 241 enum ufs_lanes {