Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at master 2917 lines 86 kB view raw
1/******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 ********************************************************************/ 23#include <linux/pci.h> 24#include <linux/slab.h> 25#include <linux/interrupt.h> 26#include <linux/delay.h> 27#include <linux/unaligned.h> 28#include <linux/crc-t10dif.h> 29#include <net/checksum.h> 30 31#include <scsi/scsi.h> 32#include <scsi/scsi_device.h> 33#include <scsi/scsi_eh.h> 34#include <scsi/scsi_host.h> 35#include <scsi/scsi_tcq.h> 36#include <scsi/scsi_transport_fc.h> 37#include <scsi/fc/fc_fs.h> 38 39#include "lpfc_version.h" 40#include "lpfc_hw4.h" 41#include "lpfc_hw.h" 42#include "lpfc_sli.h" 43#include "lpfc_sli4.h" 44#include "lpfc_nl.h" 45#include "lpfc_disc.h" 46#include "lpfc.h" 47#include "lpfc_nvme.h" 48#include "lpfc_scsi.h" 49#include "lpfc_logmsg.h" 50#include "lpfc_crtn.h" 51#include "lpfc_vport.h" 52#include "lpfc_debugfs.h" 53 54/* NVME initiator-based functions */ 55 56static struct lpfc_io_buf * 57lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 58 int idx, int expedite); 59 60static void 61lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *); 62 63static struct nvme_fc_port_template lpfc_nvme_template; 64 65/** 66 * lpfc_nvme_create_queue - 67 * @pnvme_lport: Transport localport that LS is to be issued from 68 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. 69 * @qsize: Size of the queue in bytes 70 * @handle: An opaque driver handle used in follow-up calls. 71 * 72 * Driver registers this routine to preallocate and initialize any 73 * internal data structures to bind the @qidx to its internal IO queues. 74 * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ. 75 * 76 * Return value : 77 * 0 - Success 78 * -EINVAL - Unsupported input value. 79 * -ENOMEM - Could not alloc necessary memory 80 **/ 81static int 82lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, 83 unsigned int qidx, u16 qsize, 84 void **handle) 85{ 86 struct lpfc_nvme_lport *lport; 87 struct lpfc_vport *vport; 88 struct lpfc_nvme_qhandle *qhandle; 89 char *str; 90 91 if (!pnvme_lport->private) 92 return -ENOMEM; 93 94 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 95 vport = lport->vport; 96 97 if (!vport || test_bit(FC_UNLOADING, &vport->load_flag) || 98 test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) 99 return -ENODEV; 100 101 qhandle = kzalloc_obj(struct lpfc_nvme_qhandle); 102 if (qhandle == NULL) 103 return -ENOMEM; 104 105 qhandle->cpu_id = raw_smp_processor_id(); 106 qhandle->qidx = qidx; 107 /* 108 * NVME qidx == 0 is the admin queue, so both admin queue 109 * and first IO queue will use MSI-X vector and associated 110 * EQ/CQ/WQ at index 0. After that they are sequentially assigned. 111 */ 112 if (qidx) { 113 str = "IO "; /* IO queue */ 114 qhandle->index = ((qidx - 1) % 115 lpfc_nvme_template.max_hw_queues); 116 } else { 117 str = "ADM"; /* Admin queue */ 118 qhandle->index = qidx; 119 } 120 121 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 122 "6073 Binding %s HdwQueue %d (cpu %d) to " 123 "hdw_queue %d qhandle x%px\n", str, 124 qidx, qhandle->cpu_id, qhandle->index, qhandle); 125 *handle = (void *)qhandle; 126 return 0; 127} 128 129/** 130 * lpfc_nvme_delete_queue - 131 * @pnvme_lport: Transport localport that LS is to be issued from 132 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. 133 * @handle: An opaque driver handle from lpfc_nvme_create_queue 134 * 135 * Driver registers this routine to free 136 * any internal data structures to bind the @qidx to its internal 137 * IO queues. 138 * 139 * Return value : 140 * 0 - Success 141 * TODO: What are the failure codes. 142 **/ 143static void 144lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport, 145 unsigned int qidx, 146 void *handle) 147{ 148 struct lpfc_nvme_lport *lport; 149 struct lpfc_vport *vport; 150 151 if (!pnvme_lport->private) 152 return; 153 154 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 155 vport = lport->vport; 156 157 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 158 "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n", 159 lport, qidx, handle); 160 kfree(handle); 161} 162 163static void 164lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport) 165{ 166 struct lpfc_nvme_lport *lport = localport->private; 167 168 lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME, 169 "6173 localport x%px delete complete\n", 170 lport); 171 172 /* release any threads waiting for the unreg to complete */ 173 if (lport->vport->localport) 174 complete(lport->lport_unreg_cmp); 175} 176 177/* lpfc_nvme_remoteport_delete 178 * 179 * @remoteport: Pointer to an nvme transport remoteport instance. 180 * 181 * This is a template downcall. NVME transport calls this function 182 * when it has completed the unregistration of a previously 183 * registered remoteport. 184 * 185 * Return value : 186 * None 187 */ 188static void 189lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport) 190{ 191 struct lpfc_nvme_rport *rport = remoteport->private; 192 struct lpfc_vport *vport; 193 struct lpfc_nodelist *ndlp; 194 u32 fc4_xpt_flags; 195 196 ndlp = rport->ndlp; 197 if (!ndlp) { 198 pr_err("**** %s: NULL ndlp on rport x%px remoteport x%px\n", 199 __func__, rport, remoteport); 200 goto rport_err; 201 } 202 203 vport = ndlp->vport; 204 if (!vport) { 205 pr_err("**** %s: Null vport on ndlp x%px, ste x%x rport x%px\n", 206 __func__, ndlp, ndlp->nlp_state, rport); 207 goto rport_err; 208 } 209 210 fc4_xpt_flags = NVME_XPT_REGD | SCSI_XPT_REGD; 211 212 /* Remove this rport from the lport's list - memory is owned by the 213 * transport. Remove the ndlp reference for the NVME transport before 214 * calling state machine to remove the node. 215 */ 216 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 217 "6146 remoteport delete of remoteport x%px, ndlp x%px " 218 "DID x%x xflags x%x\n", 219 remoteport, ndlp, ndlp->nlp_DID, ndlp->fc4_xpt_flags); 220 spin_lock_irq(&ndlp->lock); 221 222 /* The register rebind might have occurred before the delete 223 * downcall. Guard against this race. 224 */ 225 if (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT) 226 ndlp->fc4_xpt_flags &= ~(NVME_XPT_UNREG_WAIT | NVME_XPT_REGD); 227 228 spin_unlock_irq(&ndlp->lock); 229 230 /* On a devloss timeout event, one more put is executed provided the 231 * NVME and SCSI rport unregister requests are complete. 232 */ 233 if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags)) 234 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 235 236 rport_err: 237 return; 238} 239 240/** 241 * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request 242 * @phba: pointer to lpfc hba data structure. 243 * @axchg: pointer to exchange context for the NVME LS request 244 * 245 * This routine is used for processing an asynchronously received NVME LS 246 * request. Any remaining validation is done and the LS is then forwarded 247 * to the nvme-fc transport via nvme_fc_rcv_ls_req(). 248 * 249 * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing) 250 * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done. 251 * __lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg. 252 * 253 * Returns 0 if LS was handled and delivered to the transport 254 * Returns 1 if LS failed to be handled and should be dropped 255 */ 256int 257lpfc_nvme_handle_lsreq(struct lpfc_hba *phba, 258 struct lpfc_async_xchg_ctx *axchg) 259{ 260#if (IS_ENABLED(CONFIG_NVME_FC)) 261 struct lpfc_vport *vport; 262 struct lpfc_nvme_rport *lpfc_rport; 263 struct nvme_fc_remote_port *remoteport; 264 struct lpfc_nvme_lport *lport; 265 uint32_t *payload = axchg->payload; 266 int rc; 267 268 vport = axchg->ndlp->vport; 269 lpfc_rport = axchg->ndlp->nrport; 270 if (!lpfc_rport) 271 return -EINVAL; 272 273 remoteport = lpfc_rport->remoteport; 274 if (!vport->localport || 275 test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) 276 return -EINVAL; 277 278 lport = vport->localport->private; 279 if (!lport) 280 return -EINVAL; 281 282 rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload, 283 axchg->size); 284 285 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 286 "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x " 287 "%08x %08x %08x\n", 288 axchg->size, rc, 289 *payload, *(payload+1), *(payload+2), 290 *(payload+3), *(payload+4), *(payload+5)); 291 292 if (!rc) 293 return 0; 294#endif 295 return 1; 296} 297 298/** 299 * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME 300 * LS request. 301 * @phba: Pointer to HBA context object 302 * @vport: The local port that issued the LS 303 * @cmdwqe: Pointer to driver command WQE object. 304 * @wcqe: Pointer to driver response CQE object. 305 * 306 * This function is the generic completion handler for NVME LS requests. 307 * The function updates any states and statistics, calls the transport 308 * ls_req done() routine, then tears down the command and buffers used 309 * for the LS request. 310 **/ 311void 312__lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport, 313 struct lpfc_iocbq *cmdwqe, 314 struct lpfc_wcqe_complete *wcqe) 315{ 316 struct nvmefc_ls_req *pnvme_lsreq; 317 struct lpfc_dmabuf *buf_ptr; 318 struct lpfc_nodelist *ndlp; 319 int status; 320 321 pnvme_lsreq = cmdwqe->context_un.nvme_lsreq; 322 ndlp = cmdwqe->ndlp; 323 buf_ptr = cmdwqe->bpl_dmabuf; 324 325 status = bf_get(lpfc_wcqe_c_status, wcqe); 326 327 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 328 "6047 NVMEx LS REQ x%px cmpl DID %x Xri: %x " 329 "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px " 330 "ndlp:x%px\n", 331 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, 332 cmdwqe->sli4_xritag, status, 333 (wcqe->parameter & 0xffff), 334 cmdwqe, pnvme_lsreq, cmdwqe->bpl_dmabuf, 335 ndlp); 336 337 lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n", 338 cmdwqe->sli4_xritag, status, wcqe->parameter); 339 340 if (buf_ptr) { 341 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 342 kfree(buf_ptr); 343 cmdwqe->bpl_dmabuf = NULL; 344 } 345 if (pnvme_lsreq->done) { 346 if (status != CQE_STATUS_SUCCESS) 347 status = -ENXIO; 348 pnvme_lsreq->done(pnvme_lsreq, status); 349 } else { 350 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 351 "6046 NVMEx cmpl without done call back? " 352 "Data x%px DID %x Xri: %x status %x\n", 353 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, 354 cmdwqe->sli4_xritag, status); 355 } 356 if (ndlp) { 357 lpfc_nlp_put(ndlp); 358 cmdwqe->ndlp = NULL; 359 } 360 lpfc_sli_release_iocbq(phba, cmdwqe); 361} 362 363static void 364lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 365 struct lpfc_iocbq *rspwqe) 366{ 367 struct lpfc_vport *vport = cmdwqe->vport; 368 struct lpfc_nvme_lport *lport; 369 uint32_t status; 370 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; 371 372 status = bf_get(lpfc_wcqe_c_status, wcqe); 373 374 if (vport->localport) { 375 lport = (struct lpfc_nvme_lport *)vport->localport->private; 376 if (lport) { 377 atomic_inc(&lport->fc4NvmeLsCmpls); 378 if (status) { 379 if (bf_get(lpfc_wcqe_c_xb, wcqe)) 380 atomic_inc(&lport->cmpl_ls_xb); 381 atomic_inc(&lport->cmpl_ls_err); 382 } 383 } 384 } 385 386 __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe); 387} 388 389static int 390lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, 391 struct lpfc_dmabuf *inp, 392 struct nvmefc_ls_req *pnvme_lsreq, 393 void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, 394 struct lpfc_iocbq *), 395 struct lpfc_nodelist *ndlp, uint32_t num_entry, 396 uint32_t tmo, uint8_t retry) 397{ 398 struct lpfc_hba *phba = vport->phba; 399 union lpfc_wqe128 *wqe; 400 struct lpfc_iocbq *genwqe; 401 struct ulp_bde64 *bpl; 402 struct ulp_bde64 bde; 403 int i, rc, xmit_len, first_len; 404 405 /* Allocate buffer for command WQE */ 406 genwqe = lpfc_sli_get_iocbq(phba); 407 if (genwqe == NULL) 408 return 1; 409 410 wqe = &genwqe->wqe; 411 /* Initialize only 64 bytes */ 412 memset(wqe, 0, sizeof(union lpfc_wqe)); 413 414 genwqe->bpl_dmabuf = bmp; 415 genwqe->cmd_flag |= LPFC_IO_NVME_LS; 416 417 /* Save for completion so we can release these resources */ 418 genwqe->ndlp = lpfc_nlp_get(ndlp); 419 if (!genwqe->ndlp) { 420 dev_warn(&phba->pcidev->dev, 421 "Warning: Failed node ref, not sending LS_REQ\n"); 422 lpfc_sli_release_iocbq(phba, genwqe); 423 return 1; 424 } 425 426 genwqe->context_un.nvme_lsreq = pnvme_lsreq; 427 /* Fill in payload, bp points to frame payload */ 428 429 if (!tmo) 430 /* FC spec states we need 3 * ratov for CT requests */ 431 tmo = (3 * phba->fc_ratov); 432 433 /* For this command calculate the xmit length of the request bde. */ 434 xmit_len = 0; 435 first_len = 0; 436 bpl = (struct ulp_bde64 *)bmp->virt; 437 for (i = 0; i < num_entry; i++) { 438 bde.tus.w = bpl[i].tus.w; 439 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 440 break; 441 xmit_len += bde.tus.f.bdeSize; 442 if (i == 0) 443 first_len = xmit_len; 444 } 445 446 genwqe->num_bdes = num_entry; 447 genwqe->hba_wqidx = 0; 448 449 /* Words 0 - 2 */ 450 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 451 wqe->generic.bde.tus.f.bdeSize = first_len; 452 wqe->generic.bde.addrLow = bpl[0].addrLow; 453 wqe->generic.bde.addrHigh = bpl[0].addrHigh; 454 455 /* Word 3 */ 456 wqe->gen_req.request_payload_len = first_len; 457 458 /* Word 4 */ 459 460 /* Word 5 */ 461 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0); 462 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1); 463 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1); 464 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ); 465 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME); 466 467 /* Word 6 */ 468 bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com, 469 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 470 bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag); 471 472 /* Word 7 */ 473 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, tmo); 474 bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3); 475 bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE); 476 bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI); 477 478 /* Word 8 */ 479 wqe->gen_req.wqe_com.abort_tag = genwqe->iotag; 480 481 /* Word 9 */ 482 bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag); 483 484 /* Word 10 */ 485 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 486 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 487 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 488 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 489 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 490 491 /* Word 11 */ 492 bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 493 bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND); 494 495 496 /* Issue GEN REQ WQE for NPORT <did> */ 497 genwqe->cmd_cmpl = cmpl; 498 genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT; 499 genwqe->vport = vport; 500 genwqe->retry = retry; 501 502 lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n", 503 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID); 504 505 rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe); 506 if (rc) { 507 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 508 "6045 Issue GEN REQ WQE to NPORT x%x " 509 "Data: x%x x%x rc x%x\n", 510 ndlp->nlp_DID, genwqe->iotag, 511 vport->port_state, rc); 512 lpfc_nlp_put(ndlp); 513 lpfc_sli_release_iocbq(phba, genwqe); 514 return 1; 515 } 516 517 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS, 518 "6050 Issue GEN REQ WQE to NPORT x%x " 519 "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px " 520 "bmp:x%px xmit:%d 1st:%d\n", 521 ndlp->nlp_DID, genwqe->sli4_xritag, 522 vport->port_state, 523 genwqe, pnvme_lsreq, bmp, xmit_len, first_len); 524 return 0; 525} 526 527 528/** 529 * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request 530 * @vport: The local port issuing the LS 531 * @ndlp: The remote port to send the LS to 532 * @pnvme_lsreq: Pointer to LS request structure from the transport 533 * @gen_req_cmp: Completion call-back 534 * 535 * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST 536 * WQE to perform the LS operation. 537 * 538 * Return value : 539 * 0 - Success 540 * non-zero: various error codes, in form of -Exxx 541 **/ 542int 543__lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 544 struct nvmefc_ls_req *pnvme_lsreq, 545 void (*gen_req_cmp)(struct lpfc_hba *phba, 546 struct lpfc_iocbq *cmdwqe, 547 struct lpfc_iocbq *rspwqe)) 548{ 549 struct lpfc_dmabuf *bmp; 550 struct ulp_bde64 *bpl; 551 int ret; 552 uint16_t ntype, nstate; 553 554 if (!ndlp) { 555 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 556 "6051 NVMEx LS REQ: Bad NDLP x%px, Failing " 557 "LS Req\n", 558 ndlp); 559 return -ENODEV; 560 } 561 562 ntype = ndlp->nlp_type; 563 nstate = ndlp->nlp_state; 564 if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) || 565 (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) { 566 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 567 "6088 NVMEx LS REQ: Fail DID x%06x not " 568 "ready for IO. Type x%x, State x%x\n", 569 ndlp->nlp_DID, ntype, nstate); 570 return -ENODEV; 571 } 572 if (test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) 573 return -ENODEV; 574 575 if (!vport->phba->sli4_hba.nvmels_wq) 576 return -ENOMEM; 577 578 /* 579 * there are two dma buf in the request, actually there is one and 580 * the second one is just the start address + cmd size. 581 * Before calling lpfc_nvme_gen_req these buffers need to be wrapped 582 * in a lpfc_dmabuf struct. When freeing we just free the wrapper 583 * because the nvem layer owns the data bufs. 584 * We do not have to break these packets open, we don't care what is 585 * in them. And we do not have to look at the resonse data, we only 586 * care that we got a response. All of the caring is going to happen 587 * in the nvme-fc layer. 588 */ 589 590 bmp = kmalloc_obj(*bmp); 591 if (!bmp) { 592 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 593 "6044 NVMEx LS REQ: Could not alloc LS buf " 594 "for DID %x\n", 595 ndlp->nlp_DID); 596 return -ENOMEM; 597 } 598 599 bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys)); 600 if (!bmp->virt) { 601 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 602 "6042 NVMEx LS REQ: Could not alloc mbuf " 603 "for DID %x\n", 604 ndlp->nlp_DID); 605 kfree(bmp); 606 return -ENOMEM; 607 } 608 609 INIT_LIST_HEAD(&bmp->list); 610 611 bpl = (struct ulp_bde64 *)bmp->virt; 612 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma)); 613 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma)); 614 bpl->tus.f.bdeFlags = 0; 615 bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen; 616 bpl->tus.w = le32_to_cpu(bpl->tus.w); 617 bpl++; 618 619 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma)); 620 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma)); 621 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 622 bpl->tus.f.bdeSize = pnvme_lsreq->rsplen; 623 bpl->tus.w = le32_to_cpu(bpl->tus.w); 624 625 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 626 "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, " 627 "rqstlen:%d rsplen:%d %pad %pad\n", 628 ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen, 629 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, 630 &pnvme_lsreq->rspdma); 631 632 ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr, 633 pnvme_lsreq, gen_req_cmp, ndlp, 2, 634 pnvme_lsreq->timeout, 0); 635 if (ret != WQE_SUCCESS) { 636 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 637 "6052 NVMEx REQ: EXIT. issue ls wqe failed " 638 "lsreq x%px Status %x DID %x\n", 639 pnvme_lsreq, ret, ndlp->nlp_DID); 640 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys); 641 kfree(bmp); 642 return -EIO; 643 } 644 645 return 0; 646} 647 648/** 649 * lpfc_nvme_ls_req - Issue an NVME Link Service request 650 * @pnvme_lport: Transport localport that LS is to be issued from. 651 * @pnvme_rport: Transport remoteport that LS is to be sent to. 652 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS 653 * 654 * Driver registers this routine to handle any link service request 655 * from the nvme_fc transport to a remote nvme-aware port. 656 * 657 * Return value : 658 * 0 - Success 659 * non-zero: various error codes, in form of -Exxx 660 **/ 661static int 662lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, 663 struct nvme_fc_remote_port *pnvme_rport, 664 struct nvmefc_ls_req *pnvme_lsreq) 665{ 666 struct lpfc_nvme_lport *lport; 667 struct lpfc_nvme_rport *rport; 668 struct lpfc_vport *vport; 669 int ret; 670 671 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 672 rport = (struct lpfc_nvme_rport *)pnvme_rport->private; 673 if (unlikely(!lport) || unlikely(!rport)) 674 return -EINVAL; 675 676 vport = lport->vport; 677 if (test_bit(FC_UNLOADING, &vport->load_flag) || 678 test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) 679 return -ENODEV; 680 681 atomic_inc(&lport->fc4NvmeLsRequests); 682 683 ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq, 684 lpfc_nvme_ls_req_cmp); 685 if (ret) 686 atomic_inc(&lport->xmt_ls_err); 687 688 return ret; 689} 690 691/** 692 * __lpfc_nvme_ls_abort - Generic service routine to abort a prior 693 * NVME LS request 694 * @vport: The local port that issued the LS 695 * @ndlp: The remote port the LS was sent to 696 * @pnvme_lsreq: Pointer to LS request structure from the transport 697 * 698 * The driver validates the ndlp, looks for the LS, and aborts the 699 * LS if found. 700 * 701 * Returns: 702 * 0 : if LS found and aborted 703 * non-zero: various error conditions in form -Exxx 704 **/ 705int 706__lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 707 struct nvmefc_ls_req *pnvme_lsreq) 708{ 709 struct lpfc_hba *phba = vport->phba; 710 struct lpfc_sli_ring *pring; 711 struct lpfc_iocbq *wqe, *next_wqe; 712 bool foundit = false; 713 714 if (!ndlp) { 715 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 716 "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID " 717 "x%06x, Failing LS Req\n", 718 ndlp, ndlp ? ndlp->nlp_DID : 0); 719 return -EINVAL; 720 } 721 722 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS, 723 "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq " 724 "x%px rqstlen:%d rsplen:%d %pad %pad\n", 725 pnvme_lsreq, pnvme_lsreq->rqstlen, 726 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, 727 &pnvme_lsreq->rspdma); 728 729 /* 730 * Lock the ELS ring txcmplq and look for the wqe that matches 731 * this ELS. If found, issue an abort on the wqe. 732 */ 733 pring = phba->sli4_hba.nvmels_wq->pring; 734 spin_lock_irq(&phba->hbalock); 735 spin_lock(&pring->ring_lock); 736 list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) { 737 if (wqe->context_un.nvme_lsreq == pnvme_lsreq) { 738 wqe->cmd_flag |= LPFC_DRIVER_ABORTED; 739 foundit = true; 740 break; 741 } 742 } 743 spin_unlock(&pring->ring_lock); 744 745 if (foundit) 746 lpfc_sli_issue_abort_iotag(phba, pring, wqe, NULL); 747 spin_unlock_irq(&phba->hbalock); 748 749 if (foundit) 750 return 0; 751 752 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS, 753 "6213 NVMEx LS REQ Abort: Unable to locate req x%px\n", 754 pnvme_lsreq); 755 return -EINVAL; 756} 757 758static int 759lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport, 760 struct nvme_fc_remote_port *remoteport, 761 struct nvmefc_ls_rsp *ls_rsp) 762{ 763 struct lpfc_async_xchg_ctx *axchg = 764 container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp); 765 struct lpfc_nvme_lport *lport; 766 int rc; 767 768 if (test_bit(FC_UNLOADING, &axchg->phba->pport->load_flag)) 769 return -ENODEV; 770 771 lport = (struct lpfc_nvme_lport *)localport->private; 772 773 rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp); 774 775 if (rc) { 776 /* 777 * unless the failure is due to having already sent 778 * the response, an abort will be generated for the 779 * exchange if the rsp can't be sent. 780 */ 781 if (rc != -EALREADY) 782 atomic_inc(&lport->xmt_ls_abort); 783 return rc; 784 } 785 786 return 0; 787} 788 789/** 790 * lpfc_nvme_ls_abort - Abort a prior NVME LS request 791 * @pnvme_lport: Transport localport that LS is to be issued from. 792 * @pnvme_rport: Transport remoteport that LS is to be sent to. 793 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS 794 * 795 * Driver registers this routine to abort a NVME LS request that is 796 * in progress (from the transports perspective). 797 **/ 798static void 799lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport, 800 struct nvme_fc_remote_port *pnvme_rport, 801 struct nvmefc_ls_req *pnvme_lsreq) 802{ 803 struct lpfc_nvme_lport *lport; 804 struct lpfc_vport *vport; 805 struct lpfc_nodelist *ndlp; 806 int ret; 807 808 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 809 if (unlikely(!lport)) 810 return; 811 vport = lport->vport; 812 813 if (test_bit(FC_UNLOADING, &vport->load_flag)) 814 return; 815 816 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); 817 818 ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq); 819 if (!ret) 820 atomic_inc(&lport->xmt_ls_abort); 821} 822 823/* Fix up the existing sgls for NVME IO. */ 824static inline void 825lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport, 826 struct lpfc_io_buf *lpfc_ncmd, 827 struct nvmefc_fcp_req *nCmd) 828{ 829 struct lpfc_hba *phba = vport->phba; 830 struct sli4_sge *sgl; 831 union lpfc_wqe128 *wqe; 832 uint32_t *wptr, *dptr; 833 834 /* 835 * Get a local pointer to the built-in wqe and correct 836 * the cmd size to match NVME's 96 bytes and fix 837 * the dma address. 838 */ 839 840 wqe = &lpfc_ncmd->cur_iocbq.wqe; 841 842 /* 843 * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to 844 * match NVME. NVME sends 96 bytes. Also, use the 845 * nvme commands command and response dma addresses 846 * rather than the virtual memory to ease the restore 847 * operation. 848 */ 849 sgl = lpfc_ncmd->dma_sgl; 850 sgl->sge_len = cpu_to_le32(nCmd->cmdlen); 851 if (phba->cfg_nvme_embed_cmd) { 852 sgl->addr_hi = 0; 853 sgl->addr_lo = 0; 854 855 /* Word 0-2 - NVME CMND IU (embedded payload) */ 856 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED; 857 wqe->generic.bde.tus.f.bdeSize = 56; 858 wqe->generic.bde.addrHigh = 0; 859 wqe->generic.bde.addrLow = 64; /* Word 16 */ 860 861 /* Word 10 - dbde is 0, wqes is 1 in template */ 862 863 /* 864 * Embed the payload in the last half of the WQE 865 * WQE words 16-30 get the NVME CMD IU payload 866 * 867 * WQE words 16-19 get payload Words 1-4 868 * WQE words 20-21 get payload Words 6-7 869 * WQE words 22-29 get payload Words 16-23 870 */ 871 wptr = &wqe->words[16]; /* WQE ptr */ 872 dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */ 873 dptr++; /* Skip Word 0 in payload */ 874 875 *wptr++ = *dptr++; /* Word 1 */ 876 *wptr++ = *dptr++; /* Word 2 */ 877 *wptr++ = *dptr++; /* Word 3 */ 878 *wptr++ = *dptr++; /* Word 4 */ 879 dptr++; /* Skip Word 5 in payload */ 880 *wptr++ = *dptr++; /* Word 6 */ 881 *wptr++ = *dptr++; /* Word 7 */ 882 dptr += 8; /* Skip Words 8-15 in payload */ 883 *wptr++ = *dptr++; /* Word 16 */ 884 *wptr++ = *dptr++; /* Word 17 */ 885 *wptr++ = *dptr++; /* Word 18 */ 886 *wptr++ = *dptr++; /* Word 19 */ 887 *wptr++ = *dptr++; /* Word 20 */ 888 *wptr++ = *dptr++; /* Word 21 */ 889 *wptr++ = *dptr++; /* Word 22 */ 890 *wptr = *dptr; /* Word 23 */ 891 } else { 892 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma)); 893 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma)); 894 895 /* Word 0-2 - NVME CMND IU Inline BDE */ 896 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 897 wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen; 898 wqe->generic.bde.addrHigh = sgl->addr_hi; 899 wqe->generic.bde.addrLow = sgl->addr_lo; 900 901 /* Word 10 */ 902 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); 903 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0); 904 } 905 906 sgl++; 907 908 /* Setup the physical region for the FCP RSP */ 909 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma)); 910 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma)); 911 sgl->word2 = le32_to_cpu(sgl->word2); 912 if (nCmd->sg_cnt) 913 bf_set(lpfc_sli4_sge_last, sgl, 0); 914 else 915 bf_set(lpfc_sli4_sge_last, sgl, 1); 916 sgl->word2 = cpu_to_le32(sgl->word2); 917 sgl->sge_len = cpu_to_le32(nCmd->rsplen); 918} 919 920 921/* 922 * lpfc_nvme_io_cmd_cmpl - Complete an NVME-over-FCP IO 923 * 924 * Driver registers this routine as it io request handler. This 925 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq 926 * data structure to the rport indicated in @lpfc_nvme_rport. 927 * 928 * Return value : 929 * 0 - Success 930 * TODO: What are the failure codes. 931 **/ 932static void 933lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, 934 struct lpfc_iocbq *pwqeOut) 935{ 936 struct lpfc_io_buf *lpfc_ncmd = pwqeIn->io_buf; 937 struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl; 938 struct lpfc_vport *vport = pwqeIn->vport; 939 struct nvmefc_fcp_req *nCmd; 940 struct nvme_fc_ersp_iu *ep; 941 struct nvme_fc_cmd_iu *cp; 942 struct lpfc_nodelist *ndlp; 943 struct lpfc_nvme_fcpreq_priv *freqpriv; 944 struct lpfc_nvme_lport *lport; 945 uint32_t code, status, idx; 946 uint16_t cid, sqhd, data; 947 uint32_t *ptr; 948 uint32_t lat; 949 bool call_done = false; 950#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 951 int cpu; 952#endif 953 bool offline = false; 954 955 /* Sanity check on return of outstanding command */ 956 if (!lpfc_ncmd) { 957 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 958 "6071 Null lpfc_ncmd pointer. No " 959 "release, skip completion\n"); 960 return; 961 } 962 963 /* Guard against abort handler being called at same time */ 964 spin_lock(&lpfc_ncmd->buf_lock); 965 966 if (!lpfc_ncmd->nvmeCmd) { 967 spin_unlock(&lpfc_ncmd->buf_lock); 968 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 969 "6066 Missing cmpl ptrs: lpfc_ncmd x%px, " 970 "nvmeCmd x%px\n", 971 lpfc_ncmd, lpfc_ncmd->nvmeCmd); 972 973 /* Release the lpfc_ncmd regardless of the missing elements. */ 974 lpfc_release_nvme_buf(phba, lpfc_ncmd); 975 return; 976 } 977 nCmd = lpfc_ncmd->nvmeCmd; 978 status = bf_get(lpfc_wcqe_c_status, wcqe); 979 980 idx = lpfc_ncmd->cur_iocbq.hba_wqidx; 981 phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++; 982 983 if (unlikely(status && vport->localport)) { 984 lport = (struct lpfc_nvme_lport *)vport->localport->private; 985 if (lport) { 986 if (bf_get(lpfc_wcqe_c_xb, wcqe)) 987 atomic_inc(&lport->cmpl_fcp_xb); 988 atomic_inc(&lport->cmpl_fcp_err); 989 } 990 } 991 992 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n", 993 lpfc_ncmd->cur_iocbq.sli4_xritag, 994 status, wcqe->parameter); 995 /* 996 * Catch race where our node has transitioned, but the 997 * transport is still transitioning. 998 */ 999 ndlp = lpfc_ncmd->ndlp; 1000 if (!ndlp) { 1001 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1002 "6062 Ignoring NVME cmpl. No ndlp\n"); 1003 goto out_err; 1004 } 1005 1006 code = bf_get(lpfc_wcqe_c_code, wcqe); 1007 if (code == CQE_CODE_NVME_ERSP) { 1008 /* For this type of CQE, we need to rebuild the rsp */ 1009 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr; 1010 1011 /* 1012 * Get Command Id from cmd to plug into response. This 1013 * code is not needed in the next NVME Transport drop. 1014 */ 1015 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr; 1016 cid = cp->sqe.common.command_id; 1017 1018 /* 1019 * RSN is in CQE word 2 1020 * SQHD is in CQE Word 3 bits 15:0 1021 * Cmd Specific info is in CQE Word 1 1022 * and in CQE Word 0 bits 15:0 1023 */ 1024 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe); 1025 1026 /* Now lets build the NVME ERSP IU */ 1027 ep->iu_len = cpu_to_be16(8); 1028 ep->rsn = wcqe->parameter; 1029 ep->xfrd_len = cpu_to_be32(nCmd->payload_length); 1030 ep->rsvd12 = 0; 1031 ptr = (uint32_t *)&ep->cqe.result.u64; 1032 *ptr++ = wcqe->total_data_placed; 1033 data = bf_get(lpfc_wcqe_c_ersp0, wcqe); 1034 *ptr = (uint32_t)data; 1035 ep->cqe.sq_head = sqhd; 1036 ep->cqe.sq_id = nCmd->sqid; 1037 ep->cqe.command_id = cid; 1038 ep->cqe.status = 0; 1039 1040 lpfc_ncmd->status = IOSTAT_SUCCESS; 1041 lpfc_ncmd->result = 0; 1042 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN; 1043 nCmd->transferred_length = nCmd->payload_length; 1044 } else { 1045 lpfc_ncmd->status = status; 1046 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK); 1047 1048 /* For NVME, the only failure path that results in an 1049 * IO error is when the adapter rejects it. All other 1050 * conditions are a success case and resolved by the 1051 * transport. 1052 * IOSTAT_FCP_RSP_ERROR means: 1053 * 1. Length of data received doesn't match total 1054 * transfer length in WQE 1055 * 2. If the RSP payload does NOT match these cases: 1056 * a. RSP length 12/24 bytes and all zeros 1057 * b. NVME ERSP 1058 */ 1059 switch (lpfc_ncmd->status) { 1060 case IOSTAT_SUCCESS: 1061 nCmd->transferred_length = wcqe->total_data_placed; 1062 nCmd->rcv_rsplen = 0; 1063 nCmd->status = 0; 1064 break; 1065 case IOSTAT_FCP_RSP_ERROR: 1066 nCmd->transferred_length = wcqe->total_data_placed; 1067 nCmd->rcv_rsplen = wcqe->parameter; 1068 nCmd->status = 0; 1069 1070 /* Get the NVME cmd details for this unique error. */ 1071 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr; 1072 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr; 1073 1074 /* Check if this is really an ERSP */ 1075 if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) { 1076 lpfc_ncmd->status = IOSTAT_SUCCESS; 1077 lpfc_ncmd->result = 0; 1078 1079 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 1080 "6084 NVME FCP_ERR ERSP: " 1081 "xri %x placed x%x opcode x%x cmd_id " 1082 "x%x cqe_status x%x\n", 1083 lpfc_ncmd->cur_iocbq.sli4_xritag, 1084 wcqe->total_data_placed, 1085 cp->sqe.common.opcode, 1086 cp->sqe.common.command_id, 1087 ep->cqe.status); 1088 break; 1089 } 1090 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1091 "6081 NVME Completion Protocol Error: " 1092 "xri %x status x%x result x%x " 1093 "placed x%x opcode x%x cmd_id x%x, " 1094 "cqe_status x%x\n", 1095 lpfc_ncmd->cur_iocbq.sli4_xritag, 1096 lpfc_ncmd->status, lpfc_ncmd->result, 1097 wcqe->total_data_placed, 1098 cp->sqe.common.opcode, 1099 cp->sqe.common.command_id, 1100 ep->cqe.status); 1101 break; 1102 case IOSTAT_LOCAL_REJECT: 1103 /* Let fall through to set command final state. */ 1104 if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED) 1105 lpfc_printf_vlog(vport, KERN_INFO, 1106 LOG_NVME_IOERR, 1107 "6032 Delay Aborted cmd x%px " 1108 "nvme cmd x%px, xri x%x, " 1109 "xb %d\n", 1110 lpfc_ncmd, nCmd, 1111 lpfc_ncmd->cur_iocbq.sli4_xritag, 1112 bf_get(lpfc_wcqe_c_xb, wcqe)); 1113 fallthrough; 1114 default: 1115out_err: 1116 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1117 "6072 NVME Completion Error: xri %x " 1118 "status x%x result x%x [x%x] " 1119 "placed x%x\n", 1120 lpfc_ncmd->cur_iocbq.sli4_xritag, 1121 lpfc_ncmd->status, lpfc_ncmd->result, 1122 wcqe->parameter, 1123 wcqe->total_data_placed); 1124 nCmd->transferred_length = 0; 1125 nCmd->rcv_rsplen = 0; 1126 nCmd->status = NVME_SC_INTERNAL; 1127 if (pci_channel_offline(vport->phba->pcidev) || 1128 lpfc_ncmd->result == IOERR_SLI_DOWN) 1129 offline = true; 1130 } 1131 } 1132 1133 /* pick up SLI4 exhange busy condition */ 1134 if (bf_get(lpfc_wcqe_c_xb, wcqe) && !offline) 1135 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY; 1136 else 1137 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; 1138 1139 /* Update stats and complete the IO. There is 1140 * no need for dma unprep because the nvme_transport 1141 * owns the dma address. 1142 */ 1143#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1144 if (lpfc_ncmd->ts_cmd_start) { 1145 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp; 1146 lpfc_ncmd->ts_data_io = ktime_get_ns(); 1147 phba->ktime_last_cmd = lpfc_ncmd->ts_data_io; 1148 lpfc_io_ktime(phba, lpfc_ncmd); 1149 } 1150 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) { 1151 cpu = raw_smp_processor_id(); 1152 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); 1153 if (lpfc_ncmd->cpu != cpu) 1154 lpfc_printf_vlog(vport, 1155 KERN_INFO, LOG_NVME_IOERR, 1156 "6701 CPU Check cmpl: " 1157 "cpu %d expect %d\n", 1158 cpu, lpfc_ncmd->cpu); 1159 } 1160#endif 1161 1162 /* NVME targets need completion held off until the abort exchange 1163 * completes unless the NVME Rport is getting unregistered. 1164 */ 1165 1166 if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) { 1167 freqpriv = nCmd->private; 1168 freqpriv->nvme_buf = NULL; 1169 lpfc_ncmd->nvmeCmd = NULL; 1170 call_done = true; 1171 } 1172 spin_unlock(&lpfc_ncmd->buf_lock); 1173 1174 /* Check if IO qualified for CMF */ 1175 if (phba->cmf_active_mode != LPFC_CFG_OFF && 1176 nCmd->io_dir == NVMEFC_FCP_READ && 1177 nCmd->payload_length) { 1178 /* Used when calculating average latency */ 1179 lat = ktime_get_ns() - lpfc_ncmd->rx_cmd_start; 1180 lpfc_update_cmf_cmpl(phba, lat, nCmd->payload_length, NULL); 1181 } 1182 1183 if (call_done) 1184 nCmd->done(nCmd); 1185 1186 /* Call release with XB=1 to queue the IO into the abort list. */ 1187 lpfc_release_nvme_buf(phba, lpfc_ncmd); 1188} 1189 1190 1191/** 1192 * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO 1193 * @vport: pointer to a host virtual N_Port data structure 1194 * @lpfc_ncmd: Pointer to lpfc scsi command 1195 * @pnode: pointer to a node-list data structure 1196 * @cstat: pointer to the control status structure 1197 * 1198 * Driver registers this routine as it io request handler. This 1199 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq 1200 * data structure to the rport indicated in @lpfc_nvme_rport. 1201 * 1202 * Return value : 1203 * 0 - Success 1204 * TODO: What are the failure codes. 1205 **/ 1206static int 1207lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, 1208 struct lpfc_io_buf *lpfc_ncmd, 1209 struct lpfc_nodelist *pnode, 1210 struct lpfc_fc4_ctrl_stat *cstat) 1211{ 1212 struct lpfc_hba *phba = vport->phba; 1213 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; 1214 struct nvme_common_command *sqe; 1215 struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq; 1216 union lpfc_wqe128 *wqe = &pwqeq->wqe; 1217 uint32_t req_len; 1218 1219 /* 1220 * There are three possibilities here - use scatter-gather segment, use 1221 * the single mapping, or neither. 1222 */ 1223 if (nCmd->sg_cnt) { 1224 if (nCmd->io_dir == NVMEFC_FCP_WRITE) { 1225 /* From the iwrite template, initialize words 7 - 11 */ 1226 memcpy(&wqe->words[7], 1227 &lpfc_iwrite_cmd_template.words[7], 1228 sizeof(uint32_t) * 5); 1229 1230 /* Word 4 */ 1231 wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length; 1232 1233 /* Word 5 */ 1234 if ((phba->cfg_nvme_enable_fb) && 1235 test_bit(NLP_FIRSTBURST, &pnode->nlp_flag)) { 1236 req_len = lpfc_ncmd->nvmeCmd->payload_length; 1237 wqe->fcp_iwrite.initial_xfer_len = min(req_len, 1238 pnode->nvme_fb_size); 1239 } else { 1240 wqe->fcp_iwrite.initial_xfer_len = 0; 1241 } 1242 cstat->output_requests++; 1243 } else { 1244 /* From the iread template, initialize words 7 - 11 */ 1245 memcpy(&wqe->words[7], 1246 &lpfc_iread_cmd_template.words[7], 1247 sizeof(uint32_t) * 5); 1248 1249 /* Word 4 */ 1250 wqe->fcp_iread.total_xfer_len = nCmd->payload_length; 1251 1252 /* Word 5 */ 1253 wqe->fcp_iread.rsrvd5 = 0; 1254 1255 /* For a CMF Managed port, iod must be zero'ed */ 1256 if (phba->cmf_active_mode == LPFC_CFG_MANAGED) 1257 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, 1258 LPFC_WQE_IOD_NONE); 1259 cstat->input_requests++; 1260 } 1261 } else { 1262 /* From the icmnd template, initialize words 4 - 11 */ 1263 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], 1264 sizeof(uint32_t) * 8); 1265 cstat->control_requests++; 1266 } 1267 1268 if (pnode->nlp_nvme_info & NLP_NVME_NSLER) { 1269 bf_set(wqe_erp, &wqe->generic.wqe_com, 1); 1270 sqe = &((struct nvme_fc_cmd_iu *) 1271 nCmd->cmdaddr)->sqe.common; 1272 if (sqe->opcode == nvme_admin_async_event) 1273 bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1); 1274 } 1275 1276 /* 1277 * Finish initializing those WQE fields that are independent 1278 * of the nvme_cmnd request_buffer 1279 */ 1280 1281 /* Word 3 */ 1282 bf_set(payload_offset_len, &wqe->fcp_icmd, 1283 (nCmd->rsplen + nCmd->cmdlen)); 1284 1285 /* Word 6 */ 1286 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 1287 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]); 1288 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); 1289 1290 /* Word 8 */ 1291 wqe->generic.wqe_com.abort_tag = pwqeq->iotag; 1292 1293 /* Word 9 */ 1294 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); 1295 1296 /* Word 10 */ 1297 bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG); 1298 1299 /* add the VMID tags as per switch response */ 1300 if (unlikely(lpfc_ncmd->cur_iocbq.cmd_flag & LPFC_IO_VMID)) { 1301 if (phba->pport->vmid_priority_tagging) { 1302 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); 1303 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 1304 lpfc_ncmd->cur_iocbq.vmid_tag.cs_ctl_vmid); 1305 } else { 1306 bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1); 1307 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); 1308 wqe->words[31] = lpfc_ncmd->cur_iocbq.vmid_tag.app_id; 1309 } 1310 } 1311 1312 pwqeq->vport = vport; 1313 return 0; 1314} 1315 1316 1317/** 1318 * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO 1319 * @vport: pointer to a host virtual N_Port data structure 1320 * @lpfc_ncmd: Pointer to lpfc scsi command 1321 * 1322 * Driver registers this routine as it io request handler. This 1323 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq 1324 * data structure to the rport indicated in @lpfc_nvme_rport. 1325 * 1326 * Return value : 1327 * 0 - Success 1328 * TODO: What are the failure codes. 1329 **/ 1330static int 1331lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, 1332 struct lpfc_io_buf *lpfc_ncmd) 1333{ 1334 struct lpfc_hba *phba = vport->phba; 1335 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; 1336 struct sli4_sge *sgl = lpfc_ncmd->dma_sgl; 1337 struct sli4_hybrid_sgl *sgl_xtra = NULL; 1338 struct scatterlist *data_sg; 1339 dma_addr_t physaddr = 0; 1340 uint32_t dma_len = 0; 1341 uint32_t dma_offset = 0; 1342 int nseg, i, j, k; 1343 bool lsp_just_set = false; 1344 1345 /* Fix up the command and response DMA stuff. */ 1346 lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd); 1347 1348 /* 1349 * There are three possibilities here - use scatter-gather segment, use 1350 * the single mapping, or neither. 1351 */ 1352 if (nCmd->sg_cnt) { 1353 /* 1354 * Jump over the cmd and rsp SGEs. The fix routine 1355 * has already adjusted for this. 1356 */ 1357 sgl += 2; 1358 1359 lpfc_ncmd->seg_cnt = nCmd->sg_cnt; 1360 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) { 1361 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1362 "6058 Too many sg segments from " 1363 "NVME Transport. Max %d, " 1364 "nvmeIO sg_cnt %d\n", 1365 phba->cfg_nvme_seg_cnt + 1, 1366 lpfc_ncmd->seg_cnt); 1367 lpfc_ncmd->seg_cnt = 0; 1368 return 1; 1369 } 1370 1371 /* 1372 * The driver established a maximum scatter-gather segment count 1373 * during probe that limits the number of sg elements in any 1374 * single nvme command. Just run through the seg_cnt and format 1375 * the sge's. 1376 */ 1377 nseg = nCmd->sg_cnt; 1378 data_sg = nCmd->first_sgl; 1379 1380 /* for tracking the segment boundaries */ 1381 j = 2; 1382 k = 5; 1383 if (unlikely(!phba->cfg_xpsgl)) 1384 k = 1; 1385 for (i = 0; i < nseg; i++) { 1386 if (data_sg == NULL) { 1387 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1388 "6059 dptr err %d, nseg %d\n", 1389 i, nseg); 1390 lpfc_ncmd->seg_cnt = 0; 1391 return 1; 1392 } 1393 1394 sgl->word2 = 0; 1395 if (nseg == 1) { 1396 bf_set(lpfc_sli4_sge_last, sgl, 1); 1397 bf_set(lpfc_sli4_sge_type, sgl, 1398 LPFC_SGE_TYPE_DATA); 1399 } else { 1400 bf_set(lpfc_sli4_sge_last, sgl, 0); 1401 1402 /* expand the segment */ 1403 if (!lsp_just_set && (nseg != (i + k)) && 1404 !((j + k) % phba->border_sge_num)) { 1405 /* set LSP type */ 1406 bf_set(lpfc_sli4_sge_type, sgl, 1407 LPFC_SGE_TYPE_LSP); 1408 1409 sgl_xtra = lpfc_get_sgl_per_hdwq( 1410 phba, lpfc_ncmd); 1411 1412 if (unlikely(!sgl_xtra)) { 1413 lpfc_ncmd->seg_cnt = 0; 1414 return 1; 1415 } 1416 sgl->addr_lo = cpu_to_le32(putPaddrLow( 1417 sgl_xtra->dma_phys_sgl)); 1418 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 1419 sgl_xtra->dma_phys_sgl)); 1420 1421 } else { 1422 bf_set(lpfc_sli4_sge_type, sgl, 1423 LPFC_SGE_TYPE_DATA); 1424 } 1425 } 1426 1427 if (bf_get(lpfc_sli4_sge_type, sgl) != 1428 LPFC_SGE_TYPE_LSP) { 1429 if ((nseg - 1) == i) 1430 bf_set(lpfc_sli4_sge_last, sgl, 1); 1431 1432 physaddr = sg_dma_address(data_sg); 1433 dma_len = sg_dma_len(data_sg); 1434 sgl->addr_lo = cpu_to_le32( 1435 putPaddrLow(physaddr)); 1436 sgl->addr_hi = cpu_to_le32( 1437 putPaddrHigh(physaddr)); 1438 1439 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 1440 sgl->word2 = cpu_to_le32(sgl->word2); 1441 sgl->sge_len = cpu_to_le32(dma_len); 1442 1443 dma_offset += dma_len; 1444 data_sg = sg_next(data_sg); 1445 1446 sgl++; 1447 1448 lsp_just_set = false; 1449 j++; 1450 } else { 1451 sgl->word2 = cpu_to_le32(sgl->word2); 1452 /* will remaining SGEs fill the next SGL? */ 1453 if ((nseg - i) < phba->border_sge_num) 1454 sgl->sge_len = 1455 cpu_to_le32((nseg - i) * 1456 sizeof(*sgl)); 1457 else 1458 sgl->sge_len = 1459 cpu_to_le32(phba->cfg_sg_dma_buf_size); 1460 1461 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 1462 i = i - 1; 1463 1464 lsp_just_set = true; 1465 j += k; 1466 k = 1; 1467 } 1468 } 1469 } else { 1470 lpfc_ncmd->seg_cnt = 0; 1471 1472 /* For this clause to be valid, the payload_length 1473 * and sg_cnt must zero. 1474 */ 1475 if (nCmd->payload_length != 0) { 1476 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1477 "6063 NVME DMA Prep Err: sg_cnt %d " 1478 "payload_length x%x\n", 1479 nCmd->sg_cnt, nCmd->payload_length); 1480 return 1; 1481 } 1482 } 1483 return 0; 1484} 1485 1486/** 1487 * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO 1488 * @pnvme_lport: Pointer to the driver's local port data 1489 * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq 1490 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue 1491 * @pnvme_fcreq: IO request from nvme fc to driver. 1492 * 1493 * Driver registers this routine as it io request handler. This 1494 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq 1495 * data structure to the rport indicated in @lpfc_nvme_rport. 1496 * 1497 * Return value : 1498 * 0 - Success 1499 * TODO: What are the failure codes. 1500 **/ 1501static int 1502lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, 1503 struct nvme_fc_remote_port *pnvme_rport, 1504 void *hw_queue_handle, 1505 struct nvmefc_fcp_req *pnvme_fcreq) 1506{ 1507 int ret = 0; 1508 int expedite = 0; 1509 int idx, cpu; 1510 struct lpfc_nvme_lport *lport; 1511 struct lpfc_fc4_ctrl_stat *cstat; 1512 struct lpfc_vport *vport; 1513 struct lpfc_hba *phba; 1514 struct lpfc_nodelist *ndlp; 1515 struct lpfc_io_buf *lpfc_ncmd; 1516 struct lpfc_nvme_rport *rport; 1517 struct lpfc_nvme_qhandle *lpfc_queue_info; 1518 struct lpfc_nvme_fcpreq_priv *freqpriv; 1519 struct nvme_common_command *sqe; 1520 uint64_t start = 0; 1521#if (IS_ENABLED(CONFIG_NVME_FC)) 1522 u8 *uuid = NULL; 1523 int err; 1524 enum dma_data_direction iodir; 1525#endif 1526 1527 /* Validate pointers. LLDD fault handling with transport does 1528 * have timing races. 1529 */ 1530 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 1531 if (unlikely(!lport)) { 1532 ret = -EINVAL; 1533 goto out_fail; 1534 } 1535 1536 vport = lport->vport; 1537 1538 if (unlikely(!hw_queue_handle)) { 1539 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1540 "6117 Fail IO, NULL hw_queue_handle\n"); 1541 atomic_inc(&lport->xmt_fcp_err); 1542 ret = -EBUSY; 1543 goto out_fail; 1544 } 1545 1546 phba = vport->phba; 1547 1548 if ((unlikely(test_bit(FC_UNLOADING, &vport->load_flag))) || 1549 test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) { 1550 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1551 "6124 Fail IO, Driver unload\n"); 1552 atomic_inc(&lport->xmt_fcp_err); 1553 ret = -ENODEV; 1554 goto out_fail; 1555 } 1556 1557 freqpriv = pnvme_fcreq->private; 1558 if (unlikely(!freqpriv)) { 1559 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1560 "6158 Fail IO, NULL request data\n"); 1561 atomic_inc(&lport->xmt_fcp_err); 1562 ret = -EINVAL; 1563 goto out_fail; 1564 } 1565 1566#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1567 if (phba->ktime_on) 1568 start = ktime_get_ns(); 1569#endif 1570 rport = (struct lpfc_nvme_rport *)pnvme_rport->private; 1571 lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle; 1572 1573 /* 1574 * Catch race where our node has transitioned, but the 1575 * transport is still transitioning. 1576 */ 1577 ndlp = rport->ndlp; 1578 if (!ndlp) { 1579 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR, 1580 "6053 Busy IO, ndlp not ready: rport x%px " 1581 "ndlp x%px, DID x%06x\n", 1582 rport, ndlp, pnvme_rport->port_id); 1583 atomic_inc(&lport->xmt_fcp_err); 1584 ret = -EBUSY; 1585 goto out_fail; 1586 } 1587 1588 /* The remote node has to be a mapped target or it's an error. */ 1589 if ((ndlp->nlp_type & NLP_NVME_TARGET) && 1590 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 1591 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR, 1592 "6036 Fail IO, DID x%06x not ready for " 1593 "IO. State x%x, Type x%x Flg x%x\n", 1594 pnvme_rport->port_id, 1595 ndlp->nlp_state, ndlp->nlp_type, 1596 ndlp->fc4_xpt_flags); 1597 atomic_inc(&lport->xmt_fcp_bad_ndlp); 1598 ret = -EBUSY; 1599 goto out_fail; 1600 1601 } 1602 1603 /* Currently only NVME Keep alive commands should be expedited 1604 * if the driver runs out of a resource. These should only be 1605 * issued on the admin queue, qidx 0 1606 */ 1607 if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) { 1608 sqe = &((struct nvme_fc_cmd_iu *) 1609 pnvme_fcreq->cmdaddr)->sqe.common; 1610 if (sqe->opcode == nvme_admin_keep_alive) 1611 expedite = 1; 1612 } 1613 1614 /* Check if IO qualifies for CMF */ 1615 if (phba->cmf_active_mode != LPFC_CFG_OFF && 1616 pnvme_fcreq->io_dir == NVMEFC_FCP_READ && 1617 pnvme_fcreq->payload_length) { 1618 ret = lpfc_update_cmf_cmd(phba, pnvme_fcreq->payload_length); 1619 if (ret) { 1620 ret = -EBUSY; 1621 goto out_fail; 1622 } 1623 /* Get start time for IO latency */ 1624 start = ktime_get_ns(); 1625 } 1626 1627 /* The node is shared with FCP IO, make sure the IO pending count does 1628 * not exceed the programmed depth. 1629 */ 1630 if (lpfc_ndlp_check_qdepth(phba, ndlp)) { 1631 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) && 1632 !expedite) { 1633 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1634 "6174 Fail IO, ndlp qdepth exceeded: " 1635 "idx %d DID %x pend %d qdepth %d\n", 1636 lpfc_queue_info->index, ndlp->nlp_DID, 1637 atomic_read(&ndlp->cmd_pending), 1638 ndlp->cmd_qdepth); 1639 atomic_inc(&lport->xmt_fcp_qdepth); 1640 ret = -EBUSY; 1641 goto out_fail1; 1642 } 1643 } 1644 1645 /* Lookup Hardware Queue index based on fcp_io_sched module parameter */ 1646 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { 1647 idx = lpfc_queue_info->index; 1648 } else { 1649 cpu = raw_smp_processor_id(); 1650 idx = phba->sli4_hba.cpu_map[cpu].hdwq; 1651 } 1652 1653 lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite); 1654 if (lpfc_ncmd == NULL) { 1655 atomic_inc(&lport->xmt_fcp_noxri); 1656 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1657 "6065 Fail IO, driver buffer pool is empty: " 1658 "idx %d DID %x\n", 1659 lpfc_queue_info->index, ndlp->nlp_DID); 1660 ret = -EBUSY; 1661 goto out_fail1; 1662 } 1663#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1664 if (start) { 1665 lpfc_ncmd->ts_cmd_start = start; 1666 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd; 1667 } else { 1668 lpfc_ncmd->ts_cmd_start = 0; 1669 } 1670#endif 1671 lpfc_ncmd->rx_cmd_start = start; 1672 1673 /* 1674 * Store the data needed by the driver to issue, abort, and complete 1675 * an IO. 1676 * Do not let the IO hang out forever. There is no midlayer issuing 1677 * an abort so inform the FW of the maximum IO pending time. 1678 */ 1679 freqpriv->nvme_buf = lpfc_ncmd; 1680 lpfc_ncmd->nvmeCmd = pnvme_fcreq; 1681 lpfc_ncmd->ndlp = ndlp; 1682 lpfc_ncmd->qidx = lpfc_queue_info->qidx; 1683 1684#if (IS_ENABLED(CONFIG_NVME_FC)) 1685 /* check the necessary and sufficient condition to support VMID */ 1686 if (lpfc_is_vmid_enabled(phba) && 1687 (ndlp->vmid_support || 1688 phba->pport->vmid_priority_tagging == 1689 LPFC_VMID_PRIO_TAG_ALL_TARGETS)) { 1690 /* is the I/O generated by a VM, get the associated virtual */ 1691 /* entity id */ 1692 uuid = nvme_fc_io_getuuid(pnvme_fcreq); 1693 1694 if (uuid) { 1695 if (pnvme_fcreq->io_dir == NVMEFC_FCP_WRITE) 1696 iodir = DMA_TO_DEVICE; 1697 else if (pnvme_fcreq->io_dir == NVMEFC_FCP_READ) 1698 iodir = DMA_FROM_DEVICE; 1699 else 1700 iodir = DMA_NONE; 1701 1702 err = lpfc_vmid_get_appid(vport, uuid, iodir, 1703 (union lpfc_vmid_io_tag *) 1704 &lpfc_ncmd->cur_iocbq.vmid_tag); 1705 if (!err) 1706 lpfc_ncmd->cur_iocbq.cmd_flag |= LPFC_IO_VMID; 1707 } 1708 } 1709#endif 1710 1711 /* 1712 * Issue the IO on the WQ indicated by index in the hw_queue_handle. 1713 * This identfier was create in our hardware queue create callback 1714 * routine. The driver now is dependent on the IO queue steering from 1715 * the transport. We are trusting the upper NVME layers know which 1716 * index to use and that they have affinitized a CPU to this hardware 1717 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ. 1718 */ 1719 lpfc_ncmd->cur_iocbq.hba_wqidx = idx; 1720 cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat; 1721 1722 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat); 1723 ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd); 1724 if (ret) { 1725 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1726 "6175 Fail IO, Prep DMA: " 1727 "idx %d DID %x\n", 1728 lpfc_queue_info->index, ndlp->nlp_DID); 1729 atomic_inc(&lport->xmt_fcp_err); 1730 ret = -ENOMEM; 1731 goto out_free_nvme_buf; 1732 } 1733 1734 lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n", 1735 lpfc_ncmd->cur_iocbq.sli4_xritag, 1736 lpfc_queue_info->index, ndlp->nlp_DID); 1737 1738 ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq); 1739 if (ret) { 1740 atomic_inc(&lport->xmt_fcp_wqerr); 1741 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1742 "6113 Fail IO, Could not issue WQE err %x " 1743 "sid: x%x did: x%x oxid: x%x\n", 1744 ret, vport->fc_myDID, ndlp->nlp_DID, 1745 lpfc_ncmd->cur_iocbq.sli4_xritag); 1746 goto out_free_nvme_buf; 1747 } 1748 1749 if (phba->cfg_xri_rebalancing) 1750 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no); 1751 1752#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1753 if (lpfc_ncmd->ts_cmd_start) 1754 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns(); 1755 1756 if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) { 1757 cpu = raw_smp_processor_id(); 1758 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); 1759 lpfc_ncmd->cpu = cpu; 1760 if (idx != cpu) 1761 lpfc_printf_vlog(vport, 1762 KERN_INFO, LOG_NVME_IOERR, 1763 "6702 CPU Check cmd: " 1764 "cpu %d wq %d\n", 1765 lpfc_ncmd->cpu, 1766 lpfc_queue_info->index); 1767 } 1768#endif 1769 return 0; 1770 1771 out_free_nvme_buf: 1772 if (lpfc_ncmd->nvmeCmd->sg_cnt) { 1773 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE) 1774 cstat->output_requests--; 1775 else 1776 cstat->input_requests--; 1777 } else 1778 cstat->control_requests--; 1779 lpfc_release_nvme_buf(phba, lpfc_ncmd); 1780 out_fail1: 1781 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, 1782 pnvme_fcreq->payload_length, NULL); 1783 out_fail: 1784 return ret; 1785} 1786 1787/** 1788 * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request. 1789 * @phba: Pointer to HBA context object 1790 * @cmdiocb: Pointer to command iocb object. 1791 * @rspiocb: Pointer to response iocb object. 1792 * 1793 * This is the callback function for any NVME FCP IO that was aborted. 1794 * 1795 * Return value: 1796 * None 1797 **/ 1798void 1799lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1800 struct lpfc_iocbq *rspiocb) 1801{ 1802 struct lpfc_wcqe_complete *abts_cmpl = &rspiocb->wcqe_cmpl; 1803 1804 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 1805 "6145 ABORT_XRI_CN completing on rpi x%x " 1806 "original iotag x%x, abort cmd iotag x%x " 1807 "req_tag x%x, status x%x, hwstatus x%x\n", 1808 bf_get(wqe_ctxt_tag, &cmdiocb->wqe.generic.wqe_com), 1809 get_job_abtsiotag(phba, cmdiocb), cmdiocb->iotag, 1810 bf_get(lpfc_wcqe_c_request_tag, abts_cmpl), 1811 bf_get(lpfc_wcqe_c_status, abts_cmpl), 1812 bf_get(lpfc_wcqe_c_hw_status, abts_cmpl)); 1813 lpfc_sli_release_iocbq(phba, cmdiocb); 1814} 1815 1816/** 1817 * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS 1818 * @pnvme_lport: Pointer to the driver's local port data 1819 * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq 1820 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue 1821 * @pnvme_fcreq: IO request from nvme fc to driver. 1822 * 1823 * Driver registers this routine as its nvme request io abort handler. This 1824 * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq 1825 * data structure to the rport indicated in @lpfc_nvme_rport. This routine 1826 * is executed asynchronously - one the target is validated as "MAPPED" and 1827 * ready for IO, the driver issues the abort request and returns. 1828 * 1829 * Return value: 1830 * None 1831 **/ 1832static void 1833lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, 1834 struct nvme_fc_remote_port *pnvme_rport, 1835 void *hw_queue_handle, 1836 struct nvmefc_fcp_req *pnvme_fcreq) 1837{ 1838 struct lpfc_nvme_lport *lport; 1839 struct lpfc_vport *vport; 1840 struct lpfc_hba *phba; 1841 struct lpfc_io_buf *lpfc_nbuf; 1842 struct lpfc_iocbq *nvmereq_wqe; 1843 struct lpfc_nvme_fcpreq_priv *freqpriv; 1844 unsigned long flags; 1845 int ret_val; 1846 1847 /* Validate pointers. LLDD fault handling with transport does 1848 * have timing races. 1849 */ 1850 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 1851 if (unlikely(!lport)) 1852 return; 1853 1854 vport = lport->vport; 1855 1856 if (unlikely(!hw_queue_handle)) { 1857 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, 1858 "6129 Fail Abort, HW Queue Handle NULL.\n"); 1859 return; 1860 } 1861 1862 phba = vport->phba; 1863 freqpriv = pnvme_fcreq->private; 1864 1865 if (unlikely(!freqpriv)) 1866 return; 1867 if (test_bit(FC_UNLOADING, &vport->load_flag)) 1868 return; 1869 1870 /* Announce entry to new IO submit field. */ 1871 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, 1872 "6002 Abort Request to rport DID x%06x " 1873 "for nvme_fc_req x%px\n", 1874 pnvme_rport->port_id, 1875 pnvme_fcreq); 1876 1877 lpfc_nbuf = freqpriv->nvme_buf; 1878 if (!lpfc_nbuf) { 1879 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1880 "6140 NVME IO req has no matching lpfc nvme " 1881 "io buffer. Skipping abort req.\n"); 1882 return; 1883 } else if (!lpfc_nbuf->nvmeCmd) { 1884 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1885 "6141 lpfc NVME IO req has no nvme_fcreq " 1886 "io buffer. Skipping abort req.\n"); 1887 return; 1888 } 1889 1890 /* driver queued commands are in process of being flushed */ 1891 if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) { 1892 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1893 "6139 Driver in reset cleanup - flushing " 1894 "NVME Req now. hba_flag x%lx\n", 1895 phba->hba_flag); 1896 return; 1897 } 1898 1899 /* Guard against IO completion being called at same time */ 1900 spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags); 1901 spin_lock(&phba->hbalock); 1902 1903 nvmereq_wqe = &lpfc_nbuf->cur_iocbq; 1904 1905 /* 1906 * The lpfc_nbuf and the mapped nvme_fcreq in the driver's 1907 * state must match the nvme_fcreq passed by the nvme 1908 * transport. If they don't match, it is likely the driver 1909 * has already completed the NVME IO and the nvme transport 1910 * has not seen it yet. 1911 */ 1912 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) { 1913 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1914 "6143 NVME req mismatch: " 1915 "lpfc_nbuf x%px nvmeCmd x%px, " 1916 "pnvme_fcreq x%px. Skipping Abort xri x%x\n", 1917 lpfc_nbuf, lpfc_nbuf->nvmeCmd, 1918 pnvme_fcreq, nvmereq_wqe->sli4_xritag); 1919 goto out_unlock; 1920 } 1921 1922 /* Don't abort IOs no longer on the pending queue. */ 1923 if (!(nvmereq_wqe->cmd_flag & LPFC_IO_ON_TXCMPLQ)) { 1924 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1925 "6142 NVME IO req x%px not queued - skipping " 1926 "abort req xri x%x\n", 1927 pnvme_fcreq, nvmereq_wqe->sli4_xritag); 1928 goto out_unlock; 1929 } 1930 1931 atomic_inc(&lport->xmt_fcp_abort); 1932 lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n", 1933 nvmereq_wqe->sli4_xritag, 1934 nvmereq_wqe->hba_wqidx, pnvme_rport->port_id); 1935 1936 /* Outstanding abort is in progress */ 1937 if (nvmereq_wqe->cmd_flag & LPFC_DRIVER_ABORTED) { 1938 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1939 "6144 Outstanding NVME I/O Abort Request " 1940 "still pending on nvme_fcreq x%px, " 1941 "lpfc_ncmd x%px xri x%x\n", 1942 pnvme_fcreq, lpfc_nbuf, 1943 nvmereq_wqe->sli4_xritag); 1944 goto out_unlock; 1945 } 1946 1947 ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe, 1948 lpfc_nvme_abort_fcreq_cmpl); 1949 1950 spin_unlock(&phba->hbalock); 1951 spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags); 1952 1953 /* Make sure HBA is alive */ 1954 lpfc_issue_hb_tmo(phba); 1955 1956 if (ret_val != WQE_SUCCESS) { 1957 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1958 "6137 Failed abts issue_wqe with status x%x " 1959 "for nvme_fcreq x%px.\n", 1960 ret_val, pnvme_fcreq); 1961 return; 1962 } 1963 1964 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, 1965 "6138 Transport Abort NVME Request Issued for " 1966 "ox_id x%x\n", 1967 nvmereq_wqe->sli4_xritag); 1968 return; 1969 1970out_unlock: 1971 spin_unlock(&phba->hbalock); 1972 spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags); 1973 return; 1974} 1975 1976/* Declare and initialization an instance of the FC NVME template. */ 1977static struct nvme_fc_port_template lpfc_nvme_template = { 1978 /* initiator-based functions */ 1979 .localport_delete = lpfc_nvme_localport_delete, 1980 .remoteport_delete = lpfc_nvme_remoteport_delete, 1981 .create_queue = lpfc_nvme_create_queue, 1982 .delete_queue = lpfc_nvme_delete_queue, 1983 .ls_req = lpfc_nvme_ls_req, 1984 .fcp_io = lpfc_nvme_fcp_io_submit, 1985 .ls_abort = lpfc_nvme_ls_abort, 1986 .fcp_abort = lpfc_nvme_fcp_abort, 1987 .xmt_ls_rsp = lpfc_nvme_xmt_ls_rsp, 1988 1989 .max_hw_queues = 1, 1990 .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS, 1991 .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS, 1992 .dma_boundary = 0xFFFFFFFF, 1993 1994 /* Sizes of additional private data for data structures. 1995 * No use for the last two sizes at this time. 1996 */ 1997 .local_priv_sz = sizeof(struct lpfc_nvme_lport), 1998 .remote_priv_sz = sizeof(struct lpfc_nvme_rport), 1999 .lsrqst_priv_sz = 0, 2000 .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv), 2001}; 2002 2003/* 2004 * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA 2005 * 2006 * This routine removes a nvme buffer from head of @hdwq io_buf_list 2007 * and returns to caller. 2008 * 2009 * Return codes: 2010 * NULL - Error 2011 * Pointer to lpfc_nvme_buf - Success 2012 **/ 2013static struct lpfc_io_buf * 2014lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 2015 int idx, int expedite) 2016{ 2017 struct lpfc_io_buf *lpfc_ncmd; 2018 struct lpfc_sli4_hdw_queue *qp; 2019 struct sli4_sge *sgl; 2020 struct lpfc_iocbq *pwqeq; 2021 union lpfc_wqe128 *wqe; 2022 2023 lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite); 2024 2025 if (lpfc_ncmd) { 2026 pwqeq = &(lpfc_ncmd->cur_iocbq); 2027 wqe = &pwqeq->wqe; 2028 2029 /* Setup key fields in buffer that may have been changed 2030 * if other protocols used this buffer. 2031 */ 2032 pwqeq->cmd_flag = LPFC_IO_NVME; 2033 pwqeq->cmd_cmpl = lpfc_nvme_io_cmd_cmpl; 2034 lpfc_ncmd->start_time = jiffies; 2035 lpfc_ncmd->flags = 0; 2036 2037 /* Rsp SGE will be filled in when we rcv an IO 2038 * from the NVME Layer to be sent. 2039 * The cmd is going to be embedded so we need a SKIP SGE. 2040 */ 2041 sgl = lpfc_ncmd->dma_sgl; 2042 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); 2043 bf_set(lpfc_sli4_sge_last, sgl, 0); 2044 sgl->word2 = cpu_to_le32(sgl->word2); 2045 /* Fill in word 3 / sgl_len during cmd submission */ 2046 2047 /* Initialize 64 bytes only */ 2048 memset(wqe, 0, sizeof(union lpfc_wqe)); 2049 2050 if (lpfc_ndlp_check_qdepth(phba, ndlp)) { 2051 atomic_inc(&ndlp->cmd_pending); 2052 lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH; 2053 } 2054 2055 } else { 2056 qp = &phba->sli4_hba.hdwq[idx]; 2057 qp->empty_io_bufs++; 2058 } 2059 2060 return lpfc_ncmd; 2061} 2062 2063/** 2064 * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list. 2065 * @phba: The Hba for which this call is being executed. 2066 * @lpfc_ncmd: The nvme buffer which is being released. 2067 * 2068 * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba 2069 * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer 2070 * and cannot be reused for at least RA_TOV amount of time if it was 2071 * aborted. 2072 **/ 2073static void 2074lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd) 2075{ 2076 struct lpfc_sli4_hdw_queue *qp; 2077 unsigned long iflag = 0; 2078 2079 if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp) 2080 atomic_dec(&lpfc_ncmd->ndlp->cmd_pending); 2081 2082 lpfc_ncmd->ndlp = NULL; 2083 lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH; 2084 2085 qp = lpfc_ncmd->hdwq; 2086 if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) { 2087 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2088 "6310 XB release deferred for " 2089 "ox_id x%x on reqtag x%x\n", 2090 lpfc_ncmd->cur_iocbq.sli4_xritag, 2091 lpfc_ncmd->cur_iocbq.iotag); 2092 2093 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); 2094 list_add_tail(&lpfc_ncmd->list, 2095 &qp->lpfc_abts_io_buf_list); 2096 qp->abts_nvme_io_bufs++; 2097 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); 2098 } else 2099 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp); 2100} 2101 2102/** 2103 * lpfc_nvme_create_localport - Create/Bind an nvme localport instance. 2104 * @vport: the lpfc_vport instance requesting a localport. 2105 * 2106 * This routine is invoked to create an nvme localport instance to bind 2107 * to the nvme_fc_transport. It is called once during driver load 2108 * like lpfc_create_shost after all other services are initialized. 2109 * It requires a vport, vpi, and wwns at call time. Other localport 2110 * parameters are modified as the driver's FCID and the Fabric WWN 2111 * are established. 2112 * 2113 * Return codes 2114 * 0 - successful 2115 * -ENOMEM - no heap memory available 2116 * other values - from nvme registration upcall 2117 **/ 2118int 2119lpfc_nvme_create_localport(struct lpfc_vport *vport) 2120{ 2121 int ret = 0; 2122 struct lpfc_hba *phba = vport->phba; 2123 struct nvme_fc_port_info nfcp_info; 2124 struct nvme_fc_local_port *localport; 2125 struct lpfc_nvme_lport *lport; 2126 2127 /* Initialize this localport instance. The vport wwn usage ensures 2128 * that NPIV is accounted for. 2129 */ 2130 memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info)); 2131 nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR; 2132 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); 2133 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn); 2134 2135 /* We need to tell the transport layer + 1 because it takes page 2136 * alignment into account. When space for the SGL is allocated we 2137 * allocate + 3, one for cmd, one for rsp and one for this alignment 2138 */ 2139 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; 2140 2141 /* Advertise how many hw queues we support based on cfg_hdw_queue, 2142 * which will not exceed cpu count. 2143 */ 2144 lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue; 2145 2146 if (!IS_ENABLED(CONFIG_NVME_FC)) 2147 return ret; 2148 2149 /* localport is allocated from the stack, but the registration 2150 * call allocates heap memory as well as the private area. 2151 */ 2152 2153 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template, 2154 &vport->phba->pcidev->dev, &localport); 2155 if (!ret) { 2156 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC, 2157 "6005 Successfully registered local " 2158 "NVME port num %d, localP x%px, private " 2159 "x%px, sg_seg %d\n", 2160 localport->port_num, localport, 2161 localport->private, 2162 lpfc_nvme_template.max_sgl_segments); 2163 2164 /* Private is our lport size declared in the template. */ 2165 lport = (struct lpfc_nvme_lport *)localport->private; 2166 vport->localport = localport; 2167 lport->vport = vport; 2168 vport->nvmei_support = 1; 2169 2170 atomic_set(&lport->xmt_fcp_noxri, 0); 2171 atomic_set(&lport->xmt_fcp_bad_ndlp, 0); 2172 atomic_set(&lport->xmt_fcp_qdepth, 0); 2173 atomic_set(&lport->xmt_fcp_err, 0); 2174 atomic_set(&lport->xmt_fcp_wqerr, 0); 2175 atomic_set(&lport->xmt_fcp_abort, 0); 2176 atomic_set(&lport->xmt_ls_abort, 0); 2177 atomic_set(&lport->xmt_ls_err, 0); 2178 atomic_set(&lport->cmpl_fcp_xb, 0); 2179 atomic_set(&lport->cmpl_fcp_err, 0); 2180 atomic_set(&lport->cmpl_ls_xb, 0); 2181 atomic_set(&lport->cmpl_ls_err, 0); 2182 2183 atomic_set(&lport->fc4NvmeLsRequests, 0); 2184 atomic_set(&lport->fc4NvmeLsCmpls, 0); 2185 } 2186 2187 return ret; 2188} 2189 2190#if (IS_ENABLED(CONFIG_NVME_FC)) 2191/* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg. 2192 * 2193 * The driver has to wait for the host nvme transport to callback 2194 * indicating the localport has successfully unregistered all 2195 * resources. Since this is an uninterruptible wait, loop every ten 2196 * seconds and print a message indicating no progress. 2197 * 2198 * An uninterruptible wait is used because of the risk of transport-to- 2199 * driver state mismatch. 2200 */ 2201static void 2202lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, 2203 struct lpfc_nvme_lport *lport, 2204 struct completion *lport_unreg_cmp) 2205{ 2206 u32 wait_tmo; 2207 int ret, i, pending = 0; 2208 struct lpfc_sli_ring *pring; 2209 struct lpfc_hba *phba = vport->phba; 2210 struct lpfc_sli4_hdw_queue *qp; 2211 int abts_scsi, abts_nvme; 2212 u16 nvmels_cnt; 2213 2214 /* Host transport has to clean up and confirm requiring an indefinite 2215 * wait. Print a message if a 10 second wait expires and renew the 2216 * wait. This is unexpected. 2217 */ 2218 wait_tmo = secs_to_jiffies(LPFC_NVME_WAIT_TMO); 2219 while (true) { 2220 ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo); 2221 if (unlikely(!ret)) { 2222 pending = 0; 2223 abts_scsi = 0; 2224 abts_nvme = 0; 2225 nvmels_cnt = 0; 2226 for (i = 0; i < phba->cfg_hdw_queue; i++) { 2227 qp = &phba->sli4_hba.hdwq[i]; 2228 if (!vport->localport || !qp || !qp->io_wq) 2229 return; 2230 2231 pring = qp->io_wq->pring; 2232 if (!pring) 2233 continue; 2234 pending += pring->txcmplq_cnt; 2235 abts_scsi += qp->abts_scsi_io_bufs; 2236 abts_nvme += qp->abts_nvme_io_bufs; 2237 } 2238 if (phba->sli4_hba.nvmels_wq) { 2239 pring = phba->sli4_hba.nvmels_wq->pring; 2240 if (pring) 2241 nvmels_cnt = pring->txcmplq_cnt; 2242 } 2243 if (!vport->localport || 2244 test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) || 2245 phba->link_state == LPFC_HBA_ERROR || 2246 test_bit(FC_UNLOADING, &vport->load_flag)) 2247 return; 2248 2249 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2250 "6176 Lport x%px Localport x%px wait " 2251 "timed out. Pending %d [%d:%d:%d]. " 2252 "Renewing.\n", 2253 lport, vport->localport, pending, 2254 abts_scsi, abts_nvme, nvmels_cnt); 2255 continue; 2256 } 2257 break; 2258 } 2259 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 2260 "6177 Lport x%px Localport x%px Complete Success\n", 2261 lport, vport->localport); 2262} 2263#endif 2264 2265/** 2266 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport. 2267 * @vport: pointer to a host virtual N_Port data structure 2268 * 2269 * This routine is invoked to destroy all lports bound to the phba. 2270 * The lport memory was allocated by the nvme fc transport and is 2271 * released there. This routine ensures all rports bound to the 2272 * lport have been disconnected. 2273 * 2274 **/ 2275void 2276lpfc_nvme_destroy_localport(struct lpfc_vport *vport) 2277{ 2278#if (IS_ENABLED(CONFIG_NVME_FC)) 2279 struct nvme_fc_local_port *localport; 2280 struct lpfc_nvme_lport *lport; 2281 int ret; 2282 DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp); 2283 2284 if (vport->nvmei_support == 0) 2285 return; 2286 2287 localport = vport->localport; 2288 if (!localport) 2289 return; 2290 lport = (struct lpfc_nvme_lport *)localport->private; 2291 2292 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 2293 "6011 Destroying NVME localport x%px\n", 2294 localport); 2295 2296 /* lport's rport list is clear. Unregister 2297 * lport and release resources. 2298 */ 2299 lport->lport_unreg_cmp = &lport_unreg_cmp; 2300 ret = nvme_fc_unregister_localport(localport); 2301 2302 /* Wait for completion. This either blocks 2303 * indefinitely or succeeds 2304 */ 2305 lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp); 2306 vport->localport = NULL; 2307 2308 /* Regardless of the unregister upcall response, clear 2309 * nvmei_support. All rports are unregistered and the 2310 * driver will clean up. 2311 */ 2312 vport->nvmei_support = 0; 2313 if (ret == 0) { 2314 lpfc_printf_vlog(vport, 2315 KERN_INFO, LOG_NVME_DISC, 2316 "6009 Unregistered lport Success\n"); 2317 } else { 2318 lpfc_printf_vlog(vport, 2319 KERN_INFO, LOG_NVME_DISC, 2320 "6010 Unregistered lport " 2321 "Failed, status x%x\n", 2322 ret); 2323 } 2324#endif 2325} 2326 2327void 2328lpfc_nvme_update_localport(struct lpfc_vport *vport) 2329{ 2330#if (IS_ENABLED(CONFIG_NVME_FC)) 2331 struct nvme_fc_local_port *localport; 2332 struct lpfc_nvme_lport *lport; 2333 2334 localport = vport->localport; 2335 if (!localport) { 2336 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME, 2337 "6710 Update NVME fail. No localport\n"); 2338 return; 2339 } 2340 lport = (struct lpfc_nvme_lport *)localport->private; 2341 if (!lport) { 2342 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME, 2343 "6171 Update NVME fail. localP x%px, No lport\n", 2344 localport); 2345 return; 2346 } 2347 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 2348 "6012 Update NVME lport x%px did x%x\n", 2349 localport, vport->fc_myDID); 2350 2351 localport->port_id = vport->fc_myDID; 2352 if (localport->port_id == 0) 2353 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY; 2354 else 2355 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR; 2356 2357 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2358 "6030 bound lport x%px to DID x%06x\n", 2359 lport, localport->port_id); 2360#endif 2361} 2362 2363int 2364lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2365{ 2366#if (IS_ENABLED(CONFIG_NVME_FC)) 2367 int ret = 0; 2368 struct nvme_fc_local_port *localport; 2369 struct lpfc_nvme_lport *lport; 2370 struct lpfc_nvme_rport *rport; 2371 struct lpfc_nvme_rport *oldrport; 2372 struct nvme_fc_remote_port *remote_port; 2373 struct nvme_fc_port_info rpinfo; 2374 struct lpfc_nodelist *prev_ndlp = NULL; 2375 struct fc_rport *srport = ndlp->rport; 2376 2377 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, 2378 "6006 Register NVME PORT. DID x%06x nlptype x%x\n", 2379 ndlp->nlp_DID, ndlp->nlp_type); 2380 2381 localport = vport->localport; 2382 if (!localport) 2383 return 0; 2384 2385 lport = (struct lpfc_nvme_lport *)localport->private; 2386 2387 /* NVME rports are not preserved across devloss. 2388 * Just register this instance. Note, rpinfo->dev_loss_tmo 2389 * is left 0 to indicate accept transport defaults. The 2390 * driver communicates port role capabilities consistent 2391 * with the PRLI response data. 2392 */ 2393 memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info)); 2394 rpinfo.port_id = ndlp->nlp_DID; 2395 if (ndlp->nlp_type & NLP_NVME_TARGET) 2396 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET; 2397 if (ndlp->nlp_type & NLP_NVME_INITIATOR) 2398 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR; 2399 2400 if (ndlp->nlp_type & NLP_NVME_DISCOVERY) 2401 rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY; 2402 2403 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); 2404 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); 2405 if (srport) 2406 rpinfo.dev_loss_tmo = srport->dev_loss_tmo; 2407 else 2408 rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo; 2409 2410 spin_lock_irq(&ndlp->lock); 2411 2412 /* If an oldrport exists, so does the ndlp reference. If not 2413 * a new reference is needed because either the node has never 2414 * been registered or it's been unregistered and getting deleted. 2415 */ 2416 oldrport = lpfc_ndlp_get_nrport(ndlp); 2417 if (oldrport) { 2418 prev_ndlp = oldrport->ndlp; 2419 spin_unlock_irq(&ndlp->lock); 2420 } else { 2421 spin_unlock_irq(&ndlp->lock); 2422 if (!lpfc_nlp_get(ndlp)) { 2423 dev_warn(&vport->phba->pcidev->dev, 2424 "Warning - No node ref - exit register\n"); 2425 return 0; 2426 } 2427 } 2428 2429 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port); 2430 if (!ret) { 2431 /* If the ndlp already has an nrport, this is just 2432 * a resume of the existing rport. Else this is a 2433 * new rport. 2434 */ 2435 /* Guard against an unregister/reregister 2436 * race that leaves the WAIT flag set. 2437 */ 2438 spin_lock_irq(&ndlp->lock); 2439 ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT; 2440 ndlp->fc4_xpt_flags |= NVME_XPT_REGD; 2441 spin_unlock_irq(&ndlp->lock); 2442 rport = remote_port->private; 2443 if (oldrport) { 2444 2445 /* Sever the ndlp<->rport association 2446 * before dropping the ndlp ref from 2447 * register. 2448 */ 2449 spin_lock_irq(&ndlp->lock); 2450 ndlp->nrport = NULL; 2451 ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT; 2452 spin_unlock_irq(&ndlp->lock); 2453 rport->ndlp = NULL; 2454 rport->remoteport = NULL; 2455 2456 /* Reference only removed if previous NDLP is no longer 2457 * active. It might be just a swap and removing the 2458 * reference would cause a premature cleanup. 2459 */ 2460 if (prev_ndlp && prev_ndlp != ndlp) { 2461 if (!prev_ndlp->nrport) 2462 lpfc_nlp_put(prev_ndlp); 2463 } 2464 } 2465 2466 /* Clean bind the rport to the ndlp. */ 2467 rport->remoteport = remote_port; 2468 rport->lport = lport; 2469 rport->ndlp = ndlp; 2470 spin_lock_irq(&ndlp->lock); 2471 ndlp->nrport = rport; 2472 spin_unlock_irq(&ndlp->lock); 2473 lpfc_printf_vlog(vport, KERN_INFO, 2474 LOG_NVME_DISC | LOG_NODE, 2475 "6022 Bind lport x%px to remoteport x%px " 2476 "rport x%px WWNN 0x%llx, " 2477 "Rport WWPN 0x%llx DID " 2478 "x%06x Role x%x, ndlp %p prev_ndlp x%px\n", 2479 lport, remote_port, rport, 2480 rpinfo.node_name, rpinfo.port_name, 2481 rpinfo.port_id, rpinfo.port_role, 2482 ndlp, prev_ndlp); 2483 } else { 2484 lpfc_printf_vlog(vport, KERN_ERR, 2485 LOG_TRACE_EVENT, 2486 "6031 RemotePort Registration failed " 2487 "err: %d, DID x%06x ref %u\n", 2488 ret, ndlp->nlp_DID, kref_read(&ndlp->kref)); 2489 2490 /* Only release reference if one was taken for this request */ 2491 if (!oldrport) 2492 lpfc_nlp_put(ndlp); 2493 } 2494 2495 return ret; 2496#else 2497 return 0; 2498#endif 2499} 2500 2501/* 2502 * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport 2503 * 2504 * If the ndlp represents an NVME Target, that we are logged into, 2505 * ping the NVME FC Transport layer to initiate a device rescan 2506 * on this remote NPort. 2507 */ 2508void 2509lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2510{ 2511#if (IS_ENABLED(CONFIG_NVME_FC)) 2512 struct lpfc_nvme_rport *nrport; 2513 struct nvme_fc_remote_port *remoteport = NULL; 2514 2515 spin_lock_irq(&ndlp->lock); 2516 nrport = lpfc_ndlp_get_nrport(ndlp); 2517 if (nrport) 2518 remoteport = nrport->remoteport; 2519 spin_unlock_irq(&ndlp->lock); 2520 2521 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2522 "6170 Rescan NPort DID x%06x type x%x " 2523 "state x%x nrport x%px remoteport x%px\n", 2524 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state, 2525 nrport, remoteport); 2526 2527 if (!nrport || !remoteport) 2528 goto rescan_exit; 2529 2530 /* Rescan an NVME target in MAPPED state with DISCOVERY role set */ 2531 if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY && 2532 ndlp->nlp_state == NLP_STE_MAPPED_NODE) { 2533 nvme_fc_rescan_remoteport(remoteport); 2534 2535 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2536 "6172 NVME rescanned DID x%06x " 2537 "port_state x%x\n", 2538 ndlp->nlp_DID, remoteport->port_state); 2539 } 2540 return; 2541 rescan_exit: 2542 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2543 "6169 Skip NVME Rport Rescan, NVME remoteport " 2544 "unregistered\n"); 2545#endif 2546} 2547 2548/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport. 2549 * 2550 * There is no notion of Devloss or rport recovery from the current 2551 * nvme_transport perspective. Loss of an rport just means IO cannot 2552 * be sent and recovery is completely up to the initator. 2553 * For now, the driver just unbinds the DID and port_role so that 2554 * no further IO can be issued. 2555 */ 2556void 2557lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2558{ 2559#if (IS_ENABLED(CONFIG_NVME_FC)) 2560 int ret; 2561 struct nvme_fc_local_port *localport; 2562 struct lpfc_nvme_lport *lport; 2563 struct lpfc_nvme_rport *rport; 2564 struct nvme_fc_remote_port *remoteport = NULL; 2565 2566 localport = vport->localport; 2567 2568 /* This is fundamental error. The localport is always 2569 * available until driver unload. Just exit. 2570 */ 2571 if (!localport) 2572 return; 2573 2574 lport = (struct lpfc_nvme_lport *)localport->private; 2575 if (!lport) 2576 goto input_err; 2577 2578 spin_lock_irq(&ndlp->lock); 2579 rport = lpfc_ndlp_get_nrport(ndlp); 2580 if (rport) 2581 remoteport = rport->remoteport; 2582 spin_unlock_irq(&ndlp->lock); 2583 if (!remoteport) 2584 goto input_err; 2585 2586 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2587 "6033 Unreg nvme remoteport x%px, portname x%llx, " 2588 "port_id x%06x, portstate x%x port type x%x " 2589 "refcnt %d\n", 2590 remoteport, remoteport->port_name, 2591 remoteport->port_id, remoteport->port_state, 2592 ndlp->nlp_type, kref_read(&ndlp->kref)); 2593 2594 /* Sanity check ndlp type. Only call for NVME ports. Don't 2595 * clear any rport state until the transport calls back. 2596 */ 2597 2598 if ((ndlp->nlp_type & NLP_NVME_TARGET) || 2599 (remoteport->port_role & FC_PORT_ROLE_NVME_TARGET)) { 2600 /* No concern about the role change on the nvme remoteport. 2601 * The transport will update it. 2602 */ 2603 spin_lock_irq(&ndlp->lock); 2604 ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT; 2605 spin_unlock_irq(&ndlp->lock); 2606 2607 /* Don't let the host nvme transport keep sending keep-alives 2608 * on this remoteport. Vport is unloading, no recovery. The 2609 * return values is ignored. The upcall is a courtesy to the 2610 * transport. 2611 */ 2612 if (test_bit(FC_UNLOADING, &vport->load_flag) || 2613 unlikely(vport->phba->link_state == LPFC_HBA_ERROR)) 2614 (void)nvme_fc_set_remoteport_devloss(remoteport, 0); 2615 2616 ret = nvme_fc_unregister_remoteport(remoteport); 2617 2618 /* The driver no longer knows if the nrport memory is valid. 2619 * because the controller teardown process has begun and 2620 * is asynchronous. Break the binding in the ndlp. Also 2621 * remove the register ndlp reference to setup node release. 2622 */ 2623 ndlp->nrport = NULL; 2624 lpfc_nlp_put(ndlp); 2625 if (ret != 0) { 2626 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2627 "6167 NVME unregister failed %d " 2628 "port_state x%x\n", 2629 ret, remoteport->port_state); 2630 2631 if (test_bit(FC_UNLOADING, &vport->load_flag)) { 2632 /* Only 1 thread can drop the initial node 2633 * reference. Check if another thread has set 2634 * NLP_DROPPED. 2635 */ 2636 if (!test_and_set_bit(NLP_DROPPED, 2637 &ndlp->nlp_flag)) { 2638 lpfc_nlp_put(ndlp); 2639 return; 2640 } 2641 } 2642 } 2643 } 2644 return; 2645 2646 input_err: 2647#endif 2648 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2649 "6168 State error: lport x%px, rport x%px FCID x%06x\n", 2650 vport->localport, ndlp->rport, ndlp->nlp_DID); 2651} 2652 2653/** 2654 * lpfc_sli4_nvme_pci_offline_aborted - Fast-path process of NVME xri abort 2655 * @phba: pointer to lpfc hba data structure. 2656 * @lpfc_ncmd: The nvme job structure for the request being aborted. 2657 * 2658 * This routine is invoked by the worker thread to process a SLI4 fast-path 2659 * NVME aborted xri. Aborted NVME IO commands are completed to the transport 2660 * here. 2661 **/ 2662void 2663lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba, 2664 struct lpfc_io_buf *lpfc_ncmd) 2665{ 2666 struct nvmefc_fcp_req *nvme_cmd = NULL; 2667 2668 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2669 "6533 %s nvme_cmd %p tag x%x abort complete and " 2670 "xri released\n", __func__, 2671 lpfc_ncmd->nvmeCmd, 2672 lpfc_ncmd->cur_iocbq.iotag); 2673 2674 /* Aborted NVME commands are required to not complete 2675 * before the abort exchange command fully completes. 2676 * Once completed, it is available via the put list. 2677 */ 2678 if (lpfc_ncmd->nvmeCmd) { 2679 nvme_cmd = lpfc_ncmd->nvmeCmd; 2680 nvme_cmd->transferred_length = 0; 2681 nvme_cmd->rcv_rsplen = 0; 2682 nvme_cmd->status = NVME_SC_INTERNAL; 2683 nvme_cmd->done(nvme_cmd); 2684 lpfc_ncmd->nvmeCmd = NULL; 2685 } 2686 lpfc_release_nvme_buf(phba, lpfc_ncmd); 2687} 2688 2689/** 2690 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort 2691 * @phba: pointer to lpfc hba data structure. 2692 * @axri: pointer to the fcp xri abort wcqe structure. 2693 * @lpfc_ncmd: The nvme job structure for the request being aborted. 2694 * 2695 * This routine is invoked by the worker thread to process a SLI4 fast-path 2696 * NVME aborted xri. Aborted NVME IO commands are completed to the transport 2697 * here. 2698 **/ 2699void 2700lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, 2701 struct sli4_wcqe_xri_aborted *axri, 2702 struct lpfc_io_buf *lpfc_ncmd) 2703{ 2704 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 2705 struct nvmefc_fcp_req *nvme_cmd = NULL; 2706 struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp; 2707 2708 2709 if (ndlp) 2710 lpfc_sli4_abts_err_handler(phba, ndlp, axri); 2711 2712 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2713 "6311 nvme_cmd %p xri x%x tag x%x abort complete and " 2714 "xri released\n", 2715 lpfc_ncmd->nvmeCmd, xri, 2716 lpfc_ncmd->cur_iocbq.iotag); 2717 2718 /* Aborted NVME commands are required to not complete 2719 * before the abort exchange command fully completes. 2720 * Once completed, it is available via the put list. 2721 */ 2722 if (lpfc_ncmd->nvmeCmd) { 2723 nvme_cmd = lpfc_ncmd->nvmeCmd; 2724 nvme_cmd->done(nvme_cmd); 2725 lpfc_ncmd->nvmeCmd = NULL; 2726 } 2727 lpfc_release_nvme_buf(phba, lpfc_ncmd); 2728} 2729 2730/** 2731 * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete 2732 * @phba: Pointer to HBA context object. 2733 * 2734 * This function flushes all wqes in the nvme rings and frees all resources 2735 * in the txcmplq. This function does not issue abort wqes for the IO 2736 * commands in txcmplq, they will just be returned with 2737 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 2738 * slot has been permanently disabled. 2739 **/ 2740void 2741lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba) 2742{ 2743 struct lpfc_sli_ring *pring; 2744 u32 i, wait_cnt = 0; 2745 2746 if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq) 2747 return; 2748 2749 /* Cycle through all IO rings and make sure all outstanding 2750 * WQEs have been removed from the txcmplqs. 2751 */ 2752 for (i = 0; i < phba->cfg_hdw_queue; i++) { 2753 if (!phba->sli4_hba.hdwq[i].io_wq) 2754 continue; 2755 pring = phba->sli4_hba.hdwq[i].io_wq->pring; 2756 2757 if (!pring) 2758 continue; 2759 2760 /* Retrieve everything on the txcmplq */ 2761 while (!list_empty(&pring->txcmplq)) { 2762 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 2763 wait_cnt++; 2764 2765 /* The sleep is 10mS. Every ten seconds, 2766 * dump a message. Something is wrong. 2767 */ 2768 if ((wait_cnt % 1000) == 0) { 2769 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2770 "6178 NVME IO not empty, " 2771 "cnt %d\n", wait_cnt); 2772 } 2773 } 2774 } 2775 2776 /* Make sure HBA is alive */ 2777 lpfc_issue_hb_tmo(phba); 2778 2779} 2780 2781void 2782lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, 2783 uint32_t stat, uint32_t param) 2784{ 2785#if (IS_ENABLED(CONFIG_NVME_FC)) 2786 struct lpfc_io_buf *lpfc_ncmd; 2787 struct nvmefc_fcp_req *nCmd; 2788 struct lpfc_wcqe_complete wcqe; 2789 struct lpfc_wcqe_complete *wcqep = &wcqe; 2790 2791 lpfc_ncmd = pwqeIn->io_buf; 2792 if (!lpfc_ncmd) { 2793 lpfc_sli_release_iocbq(phba, pwqeIn); 2794 return; 2795 } 2796 /* For abort iocb just return, IO iocb will do a done call */ 2797 if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) == 2798 CMD_ABORT_XRI_CX) { 2799 lpfc_sli_release_iocbq(phba, pwqeIn); 2800 return; 2801 } 2802 2803 spin_lock(&lpfc_ncmd->buf_lock); 2804 nCmd = lpfc_ncmd->nvmeCmd; 2805 if (!nCmd) { 2806 spin_unlock(&lpfc_ncmd->buf_lock); 2807 lpfc_release_nvme_buf(phba, lpfc_ncmd); 2808 return; 2809 } 2810 spin_unlock(&lpfc_ncmd->buf_lock); 2811 2812 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, 2813 "6194 NVME Cancel xri %x\n", 2814 lpfc_ncmd->cur_iocbq.sli4_xritag); 2815 2816 wcqep->word0 = 0; 2817 bf_set(lpfc_wcqe_c_status, wcqep, stat); 2818 wcqep->parameter = param; 2819 wcqep->total_data_placed = 0; 2820 wcqep->word3 = 0; /* xb is 0 */ 2821 2822 /* Call release with XB=1 to queue the IO into the abort list. */ 2823 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 2824 bf_set(lpfc_wcqe_c_xb, wcqep, 1); 2825 2826 memcpy(&pwqeIn->wcqe_cmpl, wcqep, sizeof(*wcqep)); 2827 (pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn); 2828#endif 2829} 2830 2831/** 2832 * lpfc_nvme_flush_abts_list - Clean up nvme commands from the abts list 2833 * @phba: Pointer to HBA context object. 2834 * 2835 **/ 2836void 2837lpfc_nvme_flush_abts_list(struct lpfc_hba *phba) 2838{ 2839#if (IS_ENABLED(CONFIG_NVME_FC)) 2840 struct lpfc_io_buf *psb, *psb_next; 2841 struct lpfc_sli4_hdw_queue *qp; 2842 LIST_HEAD(aborts); 2843 int i; 2844 2845 /* abts_xxxx_buf_list_lock required because worker thread uses this 2846 * list. 2847 */ 2848 spin_lock_irq(&phba->hbalock); 2849 for (i = 0; i < phba->cfg_hdw_queue; i++) { 2850 qp = &phba->sli4_hba.hdwq[i]; 2851 2852 spin_lock(&qp->abts_io_buf_list_lock); 2853 list_for_each_entry_safe(psb, psb_next, 2854 &qp->lpfc_abts_io_buf_list, list) { 2855 if (!(psb->cur_iocbq.cmd_flag & LPFC_IO_NVME)) 2856 continue; 2857 list_move(&psb->list, &aborts); 2858 qp->abts_nvme_io_bufs--; 2859 } 2860 spin_unlock(&qp->abts_io_buf_list_lock); 2861 } 2862 spin_unlock_irq(&phba->hbalock); 2863 2864 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 2865 list_del_init(&psb->list); 2866 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2867 "6195 %s: lpfc_ncmd x%px flags x%x " 2868 "cmd_flag x%x xri x%x\n", __func__, 2869 psb, psb->flags, 2870 psb->cur_iocbq.cmd_flag, 2871 psb->cur_iocbq.sli4_xritag); 2872 psb->flags &= ~LPFC_SBUF_XBUSY; 2873 psb->status = IOSTAT_SUCCESS; 2874 lpfc_sli4_nvme_pci_offline_aborted(phba, psb); 2875 } 2876#endif 2877} 2878 2879/** 2880 * lpfc_nvmels_flush_cmd - Clean up outstanding nvmels commands for a port 2881 * @phba: Pointer to HBA context object. 2882 * 2883 **/ 2884void 2885lpfc_nvmels_flush_cmd(struct lpfc_hba *phba) 2886{ 2887#if (IS_ENABLED(CONFIG_NVME_FC)) 2888 LIST_HEAD(cancel_list); 2889 struct lpfc_sli_ring *pring = NULL; 2890 struct lpfc_iocbq *piocb, *tmp_iocb; 2891 unsigned long iflags; 2892 2893 if (phba->sli4_hba.nvmels_wq) 2894 pring = phba->sli4_hba.nvmels_wq->pring; 2895 2896 if (unlikely(!pring)) 2897 return; 2898 2899 spin_lock_irqsave(&phba->hbalock, iflags); 2900 spin_lock(&pring->ring_lock); 2901 list_splice_init(&pring->txq, &cancel_list); 2902 pring->txq_cnt = 0; 2903 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 2904 if (piocb->cmd_flag & LPFC_IO_NVME_LS) { 2905 list_move_tail(&piocb->list, &cancel_list); 2906 pring->txcmplq_cnt--; 2907 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; 2908 } 2909 } 2910 spin_unlock(&pring->ring_lock); 2911 spin_unlock_irqrestore(&phba->hbalock, iflags); 2912 2913 if (!list_empty(&cancel_list)) 2914 lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT, 2915 IOERR_SLI_DOWN); 2916#endif 2917}