Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <linux/unaligned.h>
28#include <linux/crc-t10dif.h>
29#include <net/checksum.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_tcq.h>
36#include <scsi/scsi_transport_fc.h>
37#include <scsi/fc/fc_fs.h>
38
39#include "lpfc_version.h"
40#include "lpfc_hw4.h"
41#include "lpfc_hw.h"
42#include "lpfc_sli.h"
43#include "lpfc_sli4.h"
44#include "lpfc_nl.h"
45#include "lpfc_disc.h"
46#include "lpfc.h"
47#include "lpfc_scsi.h"
48#include "lpfc_nvme.h"
49#include "lpfc_logmsg.h"
50#include "lpfc_crtn.h"
51#include "lpfc_vport.h"
52#include "lpfc_debugfs.h"
53
54static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
55 struct lpfc_async_xchg_ctx *,
56 dma_addr_t rspbuf,
57 uint16_t rspsize);
58static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
59 struct lpfc_async_xchg_ctx *);
60static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
61 struct lpfc_async_xchg_ctx *,
62 uint32_t, uint16_t);
63static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
64 struct lpfc_async_xchg_ctx *,
65 uint32_t, uint16_t);
66static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
67 struct lpfc_async_xchg_ctx *);
68static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
69
70static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
71
72static union lpfc_wqe128 lpfc_tsend_cmd_template;
73static union lpfc_wqe128 lpfc_treceive_cmd_template;
74static union lpfc_wqe128 lpfc_trsp_cmd_template;
75
76/* Setup WQE templates for NVME IOs */
77void
78lpfc_nvmet_cmd_template(void)
79{
80 union lpfc_wqe128 *wqe;
81
82 /* TSEND template */
83 wqe = &lpfc_tsend_cmd_template;
84 memset(wqe, 0, sizeof(union lpfc_wqe128));
85
86 /* Word 0, 1, 2 - BDE is variable */
87
88 /* Word 3 - payload_offset_len is zero */
89
90 /* Word 4 - relative_offset is variable */
91
92 /* Word 5 - is zero */
93
94 /* Word 6 - ctxt_tag, xri_tag is variable */
95
96 /* Word 7 - wqe_ar is variable */
97 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
98 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
99 bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
100 bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
101 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
102
103 /* Word 8 - abort_tag is variable */
104
105 /* Word 9 - reqtag, rcvoxid is variable */
106
107 /* Word 10 - wqes, xc is variable */
108 bf_set(wqe_xchg, &wqe->fcp_tsend.wqe_com, LPFC_NVME_XCHG);
109 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
110 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
111 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
112 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
113 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
114
115 /* Word 11 - sup, irsp, irsplen is variable */
116 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
117 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
118 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
119 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
120 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
121
122 /* Word 12 - fcp_data_len is variable */
123
124 /* TRECEIVE template */
125 wqe = &lpfc_treceive_cmd_template;
126 memset(wqe, 0, sizeof(union lpfc_wqe128));
127
128 /* Word 0, 1, 2 - BDE is variable */
129
130 /* Word 3 */
131 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
132
133 /* Word 4 - relative_offset is variable */
134
135 /* Word 5 - is zero */
136
137 /* Word 6 - ctxt_tag, xri_tag is variable */
138
139 /* Word 7 */
140 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
141 bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
142 bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
143 bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
144 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
145
146 /* Word 8 - abort_tag is variable */
147
148 /* Word 9 - reqtag, rcvoxid is variable */
149
150 /* Word 10 - xc is variable */
151 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
152 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
153 bf_set(wqe_xchg, &wqe->fcp_treceive.wqe_com, LPFC_NVME_XCHG);
154 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
155 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
156 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
157
158 /* Word 11 */
159 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
160 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
161 bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
162 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
163 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
164
165 /* Word 12 - fcp_data_len is variable */
166
167 /* TRSP template */
168 wqe = &lpfc_trsp_cmd_template;
169 memset(wqe, 0, sizeof(union lpfc_wqe128));
170
171 /* Word 0, 1, 2 - BDE is variable */
172
173 /* Word 3 - response_len is variable */
174
175 /* Word 4, 5 - is zero */
176
177 /* Word 6 - ctxt_tag, xri_tag is variable */
178
179 /* Word 7 */
180 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
181 bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
182 bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
183 bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
184 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
185
186 /* Word 8 - abort_tag is variable */
187
188 /* Word 9 - reqtag is variable */
189
190 /* Word 10 wqes, xc is variable */
191 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
192 bf_set(wqe_xchg, &wqe->fcp_trsp.wqe_com, LPFC_NVME_XCHG);
193 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
194 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
195 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
196 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
197
198 /* Word 11 irsp, irsplen is variable */
199 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
200 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
201 bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
202 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
203 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
204
205 /* Word 12, 13, 14, 15 - is zero */
206}
207
208#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
209static struct lpfc_async_xchg_ctx *
210lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
211{
212 struct lpfc_async_xchg_ctx *ctxp;
213 unsigned long iflag;
214 bool found = false;
215
216 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
217 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
218 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
219 continue;
220
221 found = true;
222 break;
223 }
224 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
225 if (found)
226 return ctxp;
227
228 return NULL;
229}
230
231static struct lpfc_async_xchg_ctx *
232lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
233{
234 struct lpfc_async_xchg_ctx *ctxp;
235 unsigned long iflag;
236 bool found = false;
237
238 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
239 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
240 if (ctxp->oxid != oxid || ctxp->sid != sid)
241 continue;
242
243 found = true;
244 break;
245 }
246 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
247 if (found)
248 return ctxp;
249
250 return NULL;
251}
252#endif
253
254static void
255lpfc_nvmet_defer_release(struct lpfc_hba *phba,
256 struct lpfc_async_xchg_ctx *ctxp)
257{
258 lockdep_assert_held(&ctxp->ctxlock);
259
260 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
261 "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
262 ctxp->oxid, ctxp->flag);
263
264 if (ctxp->flag & LPFC_NVME_CTX_RLS)
265 return;
266
267 ctxp->flag |= LPFC_NVME_CTX_RLS;
268 spin_lock(&phba->sli4_hba.t_active_list_lock);
269 list_del(&ctxp->list);
270 spin_unlock(&phba->sli4_hba.t_active_list_lock);
271 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
272 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
273 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
274}
275
276/**
277 * __lpfc_nvme_xmt_ls_rsp_cmp - Generic completion handler for the
278 * transmission of an NVME LS response.
279 * @phba: Pointer to HBA context object.
280 * @cmdwqe: Pointer to driver command WQE object.
281 * @rspwqe: Pointer to driver response WQE object.
282 *
283 * The function is called from SLI ring event handler with no
284 * lock held. The function frees memory resources used for the command
285 * used to send the NVME LS RSP.
286 **/
287void
288__lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
289 struct lpfc_iocbq *rspwqe)
290{
291 struct lpfc_async_xchg_ctx *axchg = cmdwqe->context_un.axchg;
292 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
293 struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
294 uint32_t status, result;
295
296 status = bf_get(lpfc_wcqe_c_status, wcqe);
297 result = wcqe->parameter;
298
299 if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) {
300 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
301 "6410 NVMEx LS cmpl state mismatch IO x%x: "
302 "%d %d\n",
303 axchg->oxid, axchg->state, axchg->entry_cnt);
304 }
305
306 lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x result x%x\n",
307 axchg->oxid, status, result);
308
309 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
310 "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
311 status, result, axchg->oxid);
312
313 lpfc_nlp_put(cmdwqe->ndlp);
314 cmdwqe->context_un.axchg = NULL;
315 cmdwqe->bpl_dmabuf = NULL;
316 lpfc_sli_release_iocbq(phba, cmdwqe);
317 ls_rsp->done(ls_rsp);
318 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
319 "6200 NVMEx LS rsp cmpl done status %d oxid x%x\n",
320 status, axchg->oxid);
321 kfree(axchg);
322}
323
324/**
325 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
326 * @phba: Pointer to HBA context object.
327 * @cmdwqe: Pointer to driver command WQE object.
328 * @rspwqe: Pointer to driver response WQE object.
329 *
330 * The function is called from SLI ring event handler with no
331 * lock held. This function is the completion handler for NVME LS commands
332 * The function updates any states and statistics, then calls the
333 * generic completion handler to free resources.
334 **/
335static void
336lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
337 struct lpfc_iocbq *rspwqe)
338{
339 struct lpfc_nvmet_tgtport *tgtp;
340 uint32_t status, result;
341 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
342
343 if (!phba->targetport)
344 goto finish;
345
346 status = bf_get(lpfc_wcqe_c_status, wcqe);
347 result = wcqe->parameter;
348
349 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
350 if (tgtp) {
351 if (status) {
352 atomic_inc(&tgtp->xmt_ls_rsp_error);
353 if (result == IOERR_ABORT_REQUESTED)
354 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
355 if (bf_get(lpfc_wcqe_c_xb, wcqe))
356 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
357 } else {
358 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
359 }
360 }
361
362finish:
363 __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, rspwqe);
364}
365
366/**
367 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
368 * @phba: HBA buffer is associated with
369 * @ctx_buf: ctx buffer context
370 *
371 * Description: Frees the given DMA buffer in the appropriate way given by
372 * reposting it to its associated RQ so it can be reused.
373 *
374 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
375 *
376 * Returns: None
377 **/
378void
379lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
380{
381#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
382 struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
383 struct lpfc_nvmet_tgtport *tgtp;
384 struct fc_frame_header *fc_hdr;
385 struct rqb_dmabuf *nvmebuf;
386 struct lpfc_nvmet_ctx_info *infop;
387 uint32_t size, oxid, sid;
388 int cpu;
389 unsigned long iflag;
390
391 if (ctxp->state == LPFC_NVME_STE_FREE) {
392 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
393 "6411 NVMET free, already free IO x%x: %d %d\n",
394 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
395 }
396
397 if (ctxp->rqb_buffer) {
398 spin_lock_irqsave(&ctxp->ctxlock, iflag);
399 nvmebuf = ctxp->rqb_buffer;
400 /* check if freed in another path whilst acquiring lock */
401 if (nvmebuf) {
402 ctxp->rqb_buffer = NULL;
403 if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
404 ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ;
405 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
406 nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
407 nvmebuf);
408 } else {
409 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
410 /* repost */
411 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
412 }
413 } else {
414 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
415 }
416 }
417 ctxp->state = LPFC_NVME_STE_FREE;
418
419 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
420 if (phba->sli4_hba.nvmet_io_wait_cnt) {
421 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
422 nvmebuf, struct rqb_dmabuf,
423 hbuf.list);
424 phba->sli4_hba.nvmet_io_wait_cnt--;
425 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
426 iflag);
427
428 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
429 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
430 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
431 size = nvmebuf->bytes_recv;
432 sid = sli4_sid_from_fc_hdr(fc_hdr);
433
434 ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
435 ctxp->wqeq = NULL;
436 ctxp->offset = 0;
437 ctxp->phba = phba;
438 ctxp->size = size;
439 ctxp->oxid = oxid;
440 ctxp->sid = sid;
441 ctxp->state = LPFC_NVME_STE_RCV;
442 ctxp->entry_cnt = 1;
443 ctxp->flag = 0;
444 ctxp->ctxbuf = ctx_buf;
445 ctxp->rqb_buffer = (void *)nvmebuf;
446 spin_lock_init(&ctxp->ctxlock);
447
448#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
449 /* NOTE: isr time stamp is stale when context is re-assigned*/
450 if (ctxp->ts_isr_cmd) {
451 ctxp->ts_cmd_nvme = 0;
452 ctxp->ts_nvme_data = 0;
453 ctxp->ts_data_wqput = 0;
454 ctxp->ts_isr_data = 0;
455 ctxp->ts_data_nvme = 0;
456 ctxp->ts_nvme_status = 0;
457 ctxp->ts_status_wqput = 0;
458 ctxp->ts_isr_status = 0;
459 ctxp->ts_status_nvme = 0;
460 }
461#endif
462 atomic_inc(&tgtp->rcv_fcp_cmd_in);
463
464 /* Indicate that a replacement buffer has been posted */
465 spin_lock_irqsave(&ctxp->ctxlock, iflag);
466 ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ;
467 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
468
469 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
470 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
471 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
472 "6181 Unable to queue deferred work "
473 "for oxid x%x. "
474 "FCP Drop IO [x%x x%x x%x]\n",
475 ctxp->oxid,
476 atomic_read(&tgtp->rcv_fcp_cmd_in),
477 atomic_read(&tgtp->rcv_fcp_cmd_out),
478 atomic_read(&tgtp->xmt_fcp_release));
479
480 spin_lock_irqsave(&ctxp->ctxlock, iflag);
481 lpfc_nvmet_defer_release(phba, ctxp);
482 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
483 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
484 }
485 return;
486 }
487 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
488
489 /*
490 * Use the CPU context list, from the MRQ the IO was received on
491 * (ctxp->idx), to save context structure.
492 */
493 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
494 list_del_init(&ctxp->list);
495 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
496 cpu = raw_smp_processor_id();
497 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
498 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
499 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
500 infop->nvmet_ctx_list_cnt++;
501 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
502#endif
503}
504
505#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
506static void
507lpfc_nvmet_ktime(struct lpfc_hba *phba,
508 struct lpfc_async_xchg_ctx *ctxp)
509{
510 uint64_t seg1, seg2, seg3, seg4, seg5;
511 uint64_t seg6, seg7, seg8, seg9, seg10;
512 uint64_t segsum;
513
514 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
515 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
516 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
517 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
518 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
519 return;
520
521 if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
522 return;
523 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
524 return;
525 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
526 return;
527 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
528 return;
529 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
530 return;
531 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
532 return;
533 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
534 return;
535 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
536 return;
537 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
538 return;
539 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
540 return;
541 /*
542 * Segment 1 - Time from FCP command received by MSI-X ISR
543 * to FCP command is passed to NVME Layer.
544 * Segment 2 - Time from FCP command payload handed
545 * off to NVME Layer to Driver receives a Command op
546 * from NVME Layer.
547 * Segment 3 - Time from Driver receives a Command op
548 * from NVME Layer to Command is put on WQ.
549 * Segment 4 - Time from Driver WQ put is done
550 * to MSI-X ISR for Command cmpl.
551 * Segment 5 - Time from MSI-X ISR for Command cmpl to
552 * Command cmpl is passed to NVME Layer.
553 * Segment 6 - Time from Command cmpl is passed to NVME
554 * Layer to Driver receives a RSP op from NVME Layer.
555 * Segment 7 - Time from Driver receives a RSP op from
556 * NVME Layer to WQ put is done on TRSP FCP Status.
557 * Segment 8 - Time from Driver WQ put is done on TRSP
558 * FCP Status to MSI-X ISR for TRSP cmpl.
559 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
560 * TRSP cmpl is passed to NVME Layer.
561 * Segment 10 - Time from FCP command received by
562 * MSI-X ISR to command is completed on wire.
563 * (Segments 1 thru 8) for READDATA / WRITEDATA
564 * (Segments 1 thru 4) for READDATA_RSP
565 */
566 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
567 segsum = seg1;
568
569 seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
570 if (segsum > seg2)
571 return;
572 seg2 -= segsum;
573 segsum += seg2;
574
575 seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
576 if (segsum > seg3)
577 return;
578 seg3 -= segsum;
579 segsum += seg3;
580
581 seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
582 if (segsum > seg4)
583 return;
584 seg4 -= segsum;
585 segsum += seg4;
586
587 seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
588 if (segsum > seg5)
589 return;
590 seg5 -= segsum;
591 segsum += seg5;
592
593
594 /* For auto rsp commands seg6 thru seg10 will be 0 */
595 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
596 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
597 if (segsum > seg6)
598 return;
599 seg6 -= segsum;
600 segsum += seg6;
601
602 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
603 if (segsum > seg7)
604 return;
605 seg7 -= segsum;
606 segsum += seg7;
607
608 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
609 if (segsum > seg8)
610 return;
611 seg8 -= segsum;
612 segsum += seg8;
613
614 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
615 if (segsum > seg9)
616 return;
617 seg9 -= segsum;
618 segsum += seg9;
619
620 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
621 return;
622 seg10 = (ctxp->ts_isr_status -
623 ctxp->ts_isr_cmd);
624 } else {
625 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
626 return;
627 seg6 = 0;
628 seg7 = 0;
629 seg8 = 0;
630 seg9 = 0;
631 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
632 }
633
634 phba->ktime_seg1_total += seg1;
635 if (seg1 < phba->ktime_seg1_min)
636 phba->ktime_seg1_min = seg1;
637 else if (seg1 > phba->ktime_seg1_max)
638 phba->ktime_seg1_max = seg1;
639
640 phba->ktime_seg2_total += seg2;
641 if (seg2 < phba->ktime_seg2_min)
642 phba->ktime_seg2_min = seg2;
643 else if (seg2 > phba->ktime_seg2_max)
644 phba->ktime_seg2_max = seg2;
645
646 phba->ktime_seg3_total += seg3;
647 if (seg3 < phba->ktime_seg3_min)
648 phba->ktime_seg3_min = seg3;
649 else if (seg3 > phba->ktime_seg3_max)
650 phba->ktime_seg3_max = seg3;
651
652 phba->ktime_seg4_total += seg4;
653 if (seg4 < phba->ktime_seg4_min)
654 phba->ktime_seg4_min = seg4;
655 else if (seg4 > phba->ktime_seg4_max)
656 phba->ktime_seg4_max = seg4;
657
658 phba->ktime_seg5_total += seg5;
659 if (seg5 < phba->ktime_seg5_min)
660 phba->ktime_seg5_min = seg5;
661 else if (seg5 > phba->ktime_seg5_max)
662 phba->ktime_seg5_max = seg5;
663
664 phba->ktime_data_samples++;
665 if (!seg6)
666 goto out;
667
668 phba->ktime_seg6_total += seg6;
669 if (seg6 < phba->ktime_seg6_min)
670 phba->ktime_seg6_min = seg6;
671 else if (seg6 > phba->ktime_seg6_max)
672 phba->ktime_seg6_max = seg6;
673
674 phba->ktime_seg7_total += seg7;
675 if (seg7 < phba->ktime_seg7_min)
676 phba->ktime_seg7_min = seg7;
677 else if (seg7 > phba->ktime_seg7_max)
678 phba->ktime_seg7_max = seg7;
679
680 phba->ktime_seg8_total += seg8;
681 if (seg8 < phba->ktime_seg8_min)
682 phba->ktime_seg8_min = seg8;
683 else if (seg8 > phba->ktime_seg8_max)
684 phba->ktime_seg8_max = seg8;
685
686 phba->ktime_seg9_total += seg9;
687 if (seg9 < phba->ktime_seg9_min)
688 phba->ktime_seg9_min = seg9;
689 else if (seg9 > phba->ktime_seg9_max)
690 phba->ktime_seg9_max = seg9;
691out:
692 phba->ktime_seg10_total += seg10;
693 if (seg10 < phba->ktime_seg10_min)
694 phba->ktime_seg10_min = seg10;
695 else if (seg10 > phba->ktime_seg10_max)
696 phba->ktime_seg10_max = seg10;
697 phba->ktime_status_samples++;
698}
699#endif
700
701/**
702 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
703 * @phba: Pointer to HBA context object.
704 * @cmdwqe: Pointer to driver command WQE object.
705 * @rspwqe: Pointer to driver response WQE object.
706 *
707 * The function is called from SLI ring event handler with no
708 * lock held. This function is the completion handler for NVME FCP commands
709 * The function frees memory resources used for the NVME commands.
710 **/
711static void
712lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
713 struct lpfc_iocbq *rspwqe)
714{
715 struct lpfc_nvmet_tgtport *tgtp;
716 struct nvmefc_tgt_fcp_req *rsp;
717 struct lpfc_async_xchg_ctx *ctxp;
718 uint32_t status, result, op, logerr;
719 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
720#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
721 int id;
722#endif
723
724 ctxp = cmdwqe->context_un.axchg;
725 ctxp->flag &= ~LPFC_NVME_IO_INP;
726
727 rsp = &ctxp->hdlrctx.fcp_req;
728 op = rsp->op;
729
730 status = bf_get(lpfc_wcqe_c_status, wcqe);
731 result = wcqe->parameter;
732
733 if (phba->targetport)
734 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
735 else
736 tgtp = NULL;
737
738 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
739 ctxp->oxid, op, status);
740
741 if (status) {
742 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
743 rsp->transferred_length = 0;
744 if (tgtp) {
745 atomic_inc(&tgtp->xmt_fcp_rsp_error);
746 if (result == IOERR_ABORT_REQUESTED)
747 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
748 }
749
750 logerr = LOG_NVME_IOERR;
751
752 /* pick up SLI4 exhange busy condition */
753 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
754 ctxp->flag |= LPFC_NVME_XBUSY;
755 logerr |= LOG_NVME_ABTS;
756 if (tgtp)
757 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
758
759 } else {
760 ctxp->flag &= ~LPFC_NVME_XBUSY;
761 }
762
763 lpfc_printf_log(phba, KERN_INFO, logerr,
764 "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
765 "XBUSY:x%x\n",
766 ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
767 status, result, ctxp->flag);
768
769 } else {
770 rsp->fcp_error = NVME_SC_SUCCESS;
771 if (op == NVMET_FCOP_RSP)
772 rsp->transferred_length = rsp->rsplen;
773 else
774 rsp->transferred_length = rsp->transfer_length;
775 if (tgtp)
776 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
777 }
778
779 if ((op == NVMET_FCOP_READDATA_RSP) ||
780 (op == NVMET_FCOP_RSP)) {
781 /* Sanity check */
782 ctxp->state = LPFC_NVME_STE_DONE;
783 ctxp->entry_cnt++;
784
785#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
786 if (ctxp->ts_cmd_nvme) {
787 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
788 ctxp->ts_isr_data =
789 cmdwqe->isr_timestamp;
790 ctxp->ts_data_nvme =
791 ktime_get_ns();
792 ctxp->ts_nvme_status =
793 ctxp->ts_data_nvme;
794 ctxp->ts_status_wqput =
795 ctxp->ts_data_nvme;
796 ctxp->ts_isr_status =
797 ctxp->ts_data_nvme;
798 ctxp->ts_status_nvme =
799 ctxp->ts_data_nvme;
800 } else {
801 ctxp->ts_isr_status =
802 cmdwqe->isr_timestamp;
803 ctxp->ts_status_nvme =
804 ktime_get_ns();
805 }
806 }
807#endif
808 rsp->done(rsp);
809#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
810 if (ctxp->ts_cmd_nvme)
811 lpfc_nvmet_ktime(phba, ctxp);
812#endif
813 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
814 } else {
815 ctxp->entry_cnt++;
816 memset_startat(cmdwqe, 0, cmd_flag);
817#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
818 if (ctxp->ts_cmd_nvme) {
819 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
820 ctxp->ts_data_nvme = ktime_get_ns();
821 }
822#endif
823 rsp->done(rsp);
824 }
825#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
826 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
827 id = raw_smp_processor_id();
828 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
829 if (ctxp->cpu != id)
830 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
831 "6704 CPU Check cmdcmpl: "
832 "cpu %d expect %d\n",
833 id, ctxp->cpu);
834 }
835#endif
836}
837
838/**
839 * __lpfc_nvme_xmt_ls_rsp - Generic service routine to issue transmit
840 * an NVME LS rsp for a prior NVME LS request that was received.
841 * @axchg: pointer to exchange context for the NVME LS request the response
842 * is for.
843 * @ls_rsp: pointer to the transport LS RSP that is to be sent
844 * @xmt_ls_rsp_cmp: completion routine to call upon RSP transmit done
845 *
846 * This routine is used to format and send a WQE to transmit a NVME LS
847 * Response. The response is for a prior NVME LS request that was
848 * received and posted to the transport.
849 *
850 * Returns:
851 * 0 : if response successfully transmit
852 * non-zero : if response failed to transmit, of the form -Exxx.
853 **/
854int
855__lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
856 struct nvmefc_ls_rsp *ls_rsp,
857 void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
858 struct lpfc_iocbq *cmdwqe,
859 struct lpfc_iocbq *rspwqe))
860{
861 struct lpfc_hba *phba = axchg->phba;
862 struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer;
863 struct lpfc_iocbq *nvmewqeq;
864 struct lpfc_dmabuf dmabuf;
865 struct ulp_bde64 bpl;
866 int rc;
867
868 if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
869 return -ENODEV;
870
871 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
872 "6023 NVMEx LS rsp oxid x%x\n", axchg->oxid);
873
874 if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) {
875 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
876 "6412 NVMEx LS rsp state mismatch "
877 "oxid x%x: %d %d\n",
878 axchg->oxid, axchg->state, axchg->entry_cnt);
879 return -EALREADY;
880 }
881 axchg->state = LPFC_NVME_STE_LS_RSP;
882 axchg->entry_cnt++;
883
884 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma,
885 ls_rsp->rsplen);
886 if (nvmewqeq == NULL) {
887 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
888 "6150 NVMEx LS Drop Rsp x%x: Prep\n",
889 axchg->oxid);
890 rc = -ENOMEM;
891 goto out_free_buf;
892 }
893
894 /* Save numBdes for bpl2sgl */
895 nvmewqeq->num_bdes = 1;
896 nvmewqeq->hba_wqidx = 0;
897 nvmewqeq->bpl_dmabuf = &dmabuf;
898 dmabuf.virt = &bpl;
899 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
900 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
901 bpl.tus.f.bdeSize = ls_rsp->rsplen;
902 bpl.tus.f.bdeFlags = 0;
903 bpl.tus.w = le32_to_cpu(bpl.tus.w);
904 /*
905 * Note: although we're using stack space for the dmabuf, the
906 * call to lpfc_sli4_issue_wqe is synchronous, so it will not
907 * be referenced after it returns back to this routine.
908 */
909
910 nvmewqeq->cmd_cmpl = xmt_ls_rsp_cmp;
911 nvmewqeq->context_un.axchg = axchg;
912
913 lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
914 axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen);
915
916 rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq);
917
918 /* clear to be sure there's no reference */
919 nvmewqeq->bpl_dmabuf = NULL;
920
921 if (rc == WQE_SUCCESS) {
922 /*
923 * Okay to repost buffer here, but wait till cmpl
924 * before freeing ctxp and iocbq.
925 */
926 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
927 return 0;
928 }
929
930 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
931 "6151 NVMEx LS RSP x%x: failed to transmit %d\n",
932 axchg->oxid, rc);
933
934 rc = -ENXIO;
935
936 lpfc_nlp_put(nvmewqeq->ndlp);
937
938out_free_buf:
939 /* Give back resources */
940 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
941
942 /*
943 * As transport doesn't track completions of responses, if the rsp
944 * fails to send, the transport will effectively ignore the rsp
945 * and consider the LS done. However, the driver has an active
946 * exchange open for the LS - so be sure to abort the exchange
947 * if the response isn't sent.
948 */
949 lpfc_nvme_unsol_ls_issue_abort(phba, axchg, axchg->sid, axchg->oxid);
950 return rc;
951}
952
953/**
954 * lpfc_nvmet_xmt_ls_rsp - Transmit NVME LS response
955 * @tgtport: pointer to target port that NVME LS is to be transmit from.
956 * @ls_rsp: pointer to the transport LS RSP that is to be sent
957 *
958 * Driver registers this routine to transmit responses for received NVME
959 * LS requests.
960 *
961 * This routine is used to format and send a WQE to transmit a NVME LS
962 * Response. The ls_rsp is used to reverse-map the LS to the original
963 * NVME LS request sequence, which provides addressing information for
964 * the remote port the LS to be sent to, as well as the exchange id
965 * that is the LS is bound to.
966 *
967 * Returns:
968 * 0 : if response successfully transmit
969 * non-zero : if response failed to transmit, of the form -Exxx.
970 **/
971static int
972lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
973 struct nvmefc_ls_rsp *ls_rsp)
974{
975 struct lpfc_async_xchg_ctx *axchg =
976 container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
977 struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
978 int rc;
979
980 if (test_bit(FC_UNLOADING, &axchg->phba->pport->load_flag))
981 return -ENODEV;
982
983 rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp);
984
985 if (rc) {
986 atomic_inc(&nvmep->xmt_ls_drop);
987 /*
988 * unless the failure is due to having already sent
989 * the response, an abort will be generated for the
990 * exchange if the rsp can't be sent.
991 */
992 if (rc != -EALREADY)
993 atomic_inc(&nvmep->xmt_ls_abort);
994 return rc;
995 }
996
997 atomic_inc(&nvmep->xmt_ls_rsp);
998 return 0;
999}
1000
1001static int
1002lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
1003 struct nvmefc_tgt_fcp_req *rsp)
1004{
1005 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1006 struct lpfc_async_xchg_ctx *ctxp =
1007 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1008 struct lpfc_hba *phba = ctxp->phba;
1009 struct lpfc_queue *wq;
1010 struct lpfc_iocbq *nvmewqeq;
1011 struct lpfc_sli_ring *pring;
1012 unsigned long iflags;
1013 int rc;
1014#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1015 int id;
1016#endif
1017
1018 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
1019 rc = -ENODEV;
1020 goto aerr;
1021 }
1022
1023#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1024 if (ctxp->ts_cmd_nvme) {
1025 if (rsp->op == NVMET_FCOP_RSP)
1026 ctxp->ts_nvme_status = ktime_get_ns();
1027 else
1028 ctxp->ts_nvme_data = ktime_get_ns();
1029 }
1030
1031 /* Setup the hdw queue if not already set */
1032 if (!ctxp->hdwq)
1033 ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
1034
1035 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
1036 id = raw_smp_processor_id();
1037 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1038 if (rsp->hwqid != id)
1039 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1040 "6705 CPU Check OP: "
1041 "cpu %d expect %d\n",
1042 id, rsp->hwqid);
1043 ctxp->cpu = id; /* Setup cpu for cmpl check */
1044 }
1045#endif
1046
1047 /* Sanity check */
1048 if ((ctxp->flag & LPFC_NVME_ABTS_RCV) ||
1049 (ctxp->state == LPFC_NVME_STE_ABORT)) {
1050 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1051 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1052 "6102 IO oxid x%x aborted\n",
1053 ctxp->oxid);
1054 rc = -ENXIO;
1055 goto aerr;
1056 }
1057
1058 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
1059 if (nvmewqeq == NULL) {
1060 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1061 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1062 "6152 FCP Drop IO x%x: Prep\n",
1063 ctxp->oxid);
1064 rc = -ENXIO;
1065 goto aerr;
1066 }
1067
1068 nvmewqeq->cmd_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
1069 nvmewqeq->context_un.axchg = ctxp;
1070 nvmewqeq->cmd_flag |= LPFC_IO_NVMET;
1071 ctxp->wqeq->hba_wqidx = rsp->hwqid;
1072
1073 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
1074 ctxp->oxid, rsp->op, rsp->rsplen);
1075
1076 ctxp->flag |= LPFC_NVME_IO_INP;
1077 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1078 if (rc == WQE_SUCCESS) {
1079#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1080 if (!ctxp->ts_cmd_nvme)
1081 return 0;
1082 if (rsp->op == NVMET_FCOP_RSP)
1083 ctxp->ts_status_wqput = ktime_get_ns();
1084 else
1085 ctxp->ts_data_wqput = ktime_get_ns();
1086#endif
1087 return 0;
1088 }
1089
1090 if (rc == -EBUSY) {
1091 /*
1092 * WQ was full, so queue nvmewqeq to be sent after
1093 * WQE release CQE
1094 */
1095 ctxp->flag |= LPFC_NVME_DEFER_WQFULL;
1096 wq = ctxp->hdwq->io_wq;
1097 pring = wq->pring;
1098 spin_lock_irqsave(&pring->ring_lock, iflags);
1099 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
1100 wq->q_flag |= HBA_NVMET_WQFULL;
1101 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1102 atomic_inc(&lpfc_nvmep->defer_wqfull);
1103 return 0;
1104 }
1105
1106 /* Give back resources */
1107 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1108 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1109 "6153 FCP Drop IO x%x: Issue: %d\n",
1110 ctxp->oxid, rc);
1111
1112 ctxp->wqeq->hba_wqidx = 0;
1113 nvmewqeq->context_un.axchg = NULL;
1114 nvmewqeq->bpl_dmabuf = NULL;
1115 rc = -EBUSY;
1116aerr:
1117 return rc;
1118}
1119
1120static void
1121lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1122{
1123 struct lpfc_nvmet_tgtport *tport = targetport->private;
1124
1125 /* release any threads waiting for the unreg to complete */
1126 if (tport->phba->targetport)
1127 complete(tport->tport_unreg_cmp);
1128}
1129
1130static void
1131lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1132 struct nvmefc_tgt_fcp_req *req)
1133{
1134 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1135 struct lpfc_async_xchg_ctx *ctxp =
1136 container_of(req, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1137 struct lpfc_hba *phba = ctxp->phba;
1138 struct lpfc_queue *wq;
1139 unsigned long flags;
1140
1141 if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
1142 return;
1143
1144 if (!ctxp->hdwq)
1145 ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1146
1147 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1148 "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
1149 ctxp->oxid, ctxp->flag, ctxp->state);
1150
1151 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1152 ctxp->oxid, ctxp->flag, ctxp->state);
1153
1154 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1155
1156 spin_lock_irqsave(&ctxp->ctxlock, flags);
1157
1158 /* Since iaab/iaar are NOT set, we need to check
1159 * if the firmware is in process of aborting IO
1160 */
1161 if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) {
1162 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1163 return;
1164 }
1165 ctxp->flag |= LPFC_NVME_ABORT_OP;
1166
1167 if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) {
1168 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1169 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1170 ctxp->oxid);
1171 wq = ctxp->hdwq->io_wq;
1172 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1173 return;
1174 }
1175 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1176
1177 /* A state of LPFC_NVME_STE_RCV means we have just received
1178 * the NVME command and have not started processing it.
1179 * (by issuing any IO WQEs on this exchange yet)
1180 */
1181 if (ctxp->state == LPFC_NVME_STE_RCV)
1182 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1183 ctxp->oxid);
1184 else
1185 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1186 ctxp->oxid);
1187}
1188
1189static void
1190lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1191 struct nvmefc_tgt_fcp_req *rsp)
1192{
1193 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1194 struct lpfc_async_xchg_ctx *ctxp =
1195 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1196 struct lpfc_hba *phba = ctxp->phba;
1197 unsigned long flags;
1198 bool aborting = false;
1199
1200 spin_lock_irqsave(&ctxp->ctxlock, flags);
1201 if (ctxp->flag & LPFC_NVME_XBUSY)
1202 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1203 "6027 NVMET release with XBUSY flag x%x"
1204 " oxid x%x\n",
1205 ctxp->flag, ctxp->oxid);
1206 else if (ctxp->state != LPFC_NVME_STE_DONE &&
1207 ctxp->state != LPFC_NVME_STE_ABORT)
1208 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1209 "6413 NVMET release bad state %d %d oxid x%x\n",
1210 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1211
1212 if ((ctxp->flag & LPFC_NVME_ABORT_OP) ||
1213 (ctxp->flag & LPFC_NVME_XBUSY)) {
1214 aborting = true;
1215 /* let the abort path do the real release */
1216 lpfc_nvmet_defer_release(phba, ctxp);
1217 }
1218 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1219
1220 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1221 ctxp->state, aborting);
1222
1223 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1224 ctxp->flag &= ~LPFC_NVME_TNOTIFY;
1225
1226 if (aborting)
1227 return;
1228
1229 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1230}
1231
1232static void
1233lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1234 struct nvmefc_tgt_fcp_req *rsp)
1235{
1236 struct lpfc_nvmet_tgtport *tgtp;
1237 struct lpfc_async_xchg_ctx *ctxp =
1238 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1239 struct rqb_dmabuf *nvmebuf;
1240 struct lpfc_hba *phba = ctxp->phba;
1241 unsigned long iflag;
1242
1243
1244 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1245 ctxp->oxid, ctxp->size, raw_smp_processor_id());
1246
1247 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1248 nvmebuf = ctxp->rqb_buffer;
1249 if (!nvmebuf) {
1250 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1251 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1252 "6425 Defer rcv: no buffer oxid x%x: "
1253 "flg %x ste %x\n",
1254 ctxp->oxid, ctxp->flag, ctxp->state);
1255 return;
1256 }
1257 ctxp->rqb_buffer = NULL;
1258 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1259
1260 tgtp = phba->targetport->private;
1261 if (tgtp)
1262 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1263
1264 /* Free the nvmebuf since a new buffer already replaced it */
1265 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1266}
1267
1268/**
1269 * lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request
1270 * @phba: Pointer to HBA context object
1271 * @cmdwqe: Pointer to driver command WQE object.
1272 * @rspwqe: Pointer to driver response WQE object.
1273 *
1274 * This function is the completion handler for NVME LS requests.
1275 * The function updates any states and statistics, then calls the
1276 * generic completion handler to finish completion of the request.
1277 **/
1278static void
1279lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1280 struct lpfc_iocbq *rspwqe)
1281{
1282 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
1283 __lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe);
1284}
1285
1286/**
1287 * lpfc_nvmet_ls_req - Issue an Link Service request
1288 * @targetport: pointer to target instance registered with nvmet transport.
1289 * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv.
1290 * Driver sets this value to the ndlp pointer.
1291 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
1292 *
1293 * Driver registers this routine to handle any link service request
1294 * from the nvme_fc transport to a remote nvme-aware port.
1295 *
1296 * Return value :
1297 * 0 - Success
1298 * non-zero: various error codes, in form of -Exxx
1299 **/
1300static int
1301lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport,
1302 void *hosthandle,
1303 struct nvmefc_ls_req *pnvme_lsreq)
1304{
1305 struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1306 struct lpfc_hba *phba;
1307 struct lpfc_nodelist *ndlp;
1308 int ret;
1309 u32 hstate;
1310
1311 if (!lpfc_nvmet)
1312 return -EINVAL;
1313
1314 phba = lpfc_nvmet->phba;
1315 if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
1316 return -EINVAL;
1317
1318 hstate = atomic_read(&lpfc_nvmet->state);
1319 if (hstate == LPFC_NVMET_INV_HOST_ACTIVE)
1320 return -EACCES;
1321
1322 ndlp = (struct lpfc_nodelist *)hosthandle;
1323
1324 ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq,
1325 lpfc_nvmet_ls_req_cmp);
1326
1327 return ret;
1328}
1329
1330/**
1331 * lpfc_nvmet_ls_abort - Abort a prior NVME LS request
1332 * @targetport: Transport targetport, that LS was issued from.
1333 * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv.
1334 * Driver sets this value to the ndlp pointer.
1335 * @pnvme_lsreq: the transport nvme_ls_req structure for LS to be aborted
1336 *
1337 * Driver registers this routine to abort an NVME LS request that is
1338 * in progress (from the transports perspective).
1339 **/
1340static void
1341lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport,
1342 void *hosthandle,
1343 struct nvmefc_ls_req *pnvme_lsreq)
1344{
1345 struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1346 struct lpfc_hba *phba;
1347 struct lpfc_nodelist *ndlp;
1348 int ret;
1349
1350 phba = lpfc_nvmet->phba;
1351 if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
1352 return;
1353
1354 ndlp = (struct lpfc_nodelist *)hosthandle;
1355
1356 ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq);
1357 if (!ret)
1358 atomic_inc(&lpfc_nvmet->xmt_ls_abort);
1359}
1360
1361static int
1362lpfc_nvmet_host_traddr(void *hosthandle, u64 *wwnn, u64 *wwpn)
1363{
1364 struct lpfc_nodelist *ndlp = hosthandle;
1365
1366 *wwnn = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1367 *wwpn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1368 return 0;
1369}
1370
1371static void
1372lpfc_nvmet_host_release(void *hosthandle)
1373{
1374 struct lpfc_nodelist *ndlp = hosthandle;
1375 struct lpfc_hba *phba = ndlp->phba;
1376 struct lpfc_nvmet_tgtport *tgtp;
1377
1378 if (!phba->targetport || !phba->targetport->private)
1379 return;
1380
1381 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1382 "6202 NVMET XPT releasing hosthandle x%px "
1383 "DID x%x xflags x%x refcnt %d\n",
1384 hosthandle, ndlp->nlp_DID, ndlp->fc4_xpt_flags,
1385 kref_read(&ndlp->kref));
1386 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1387 spin_lock_irq(&ndlp->lock);
1388 ndlp->fc4_xpt_flags &= ~NLP_XPT_HAS_HH;
1389 spin_unlock_irq(&ndlp->lock);
1390 lpfc_nlp_put(ndlp);
1391 atomic_set(&tgtp->state, 0);
1392}
1393
1394static void
1395lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
1396{
1397 struct lpfc_nvmet_tgtport *tgtp;
1398 struct lpfc_hba *phba;
1399 uint32_t rc;
1400
1401 tgtp = tgtport->private;
1402 phba = tgtp->phba;
1403
1404 rc = lpfc_issue_els_rscn(phba->pport, 0);
1405 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1406 "6420 NVMET subsystem change: Notification %s\n",
1407 (rc) ? "Failed" : "Sent");
1408}
1409
1410static struct nvmet_fc_target_template lpfc_tgttemplate = {
1411 .targetport_delete = lpfc_nvmet_targetport_delete,
1412 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
1413 .fcp_op = lpfc_nvmet_xmt_fcp_op,
1414 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
1415 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1416 .defer_rcv = lpfc_nvmet_defer_rcv,
1417 .discovery_event = lpfc_nvmet_discovery_event,
1418 .ls_req = lpfc_nvmet_ls_req,
1419 .ls_abort = lpfc_nvmet_ls_abort,
1420 .host_release = lpfc_nvmet_host_release,
1421 .host_traddr = lpfc_nvmet_host_traddr,
1422
1423 .max_hw_queues = 1,
1424 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1425 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1426 .dma_boundary = 0xFFFFFFFF,
1427
1428 /* optional features */
1429 .target_features = 0,
1430 /* sizes of additional private data for data structures */
1431 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1432 .lsrqst_priv_sz = 0,
1433};
1434
1435static void
1436__lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1437 struct lpfc_nvmet_ctx_info *infop)
1438{
1439 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1440 unsigned long flags;
1441
1442 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1443 list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1444 &infop->nvmet_ctx_list, list) {
1445 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1446 list_del_init(&ctx_buf->list);
1447 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1448
1449 spin_lock(&phba->hbalock);
1450 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1451 spin_unlock(&phba->hbalock);
1452
1453 ctx_buf->sglq->state = SGL_FREED;
1454 ctx_buf->sglq->ndlp = NULL;
1455
1456 spin_lock(&phba->sli4_hba.sgl_list_lock);
1457 list_add_tail(&ctx_buf->sglq->list,
1458 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1459 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1460
1461 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1462 kfree(ctx_buf->context);
1463 }
1464 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1465}
1466
1467static void
1468lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1469{
1470 struct lpfc_nvmet_ctx_info *infop;
1471 int i, j;
1472
1473 /* The first context list, MRQ 0 CPU 0 */
1474 infop = phba->sli4_hba.nvmet_ctx_info;
1475 if (!infop)
1476 return;
1477
1478 /* Cycle the entire CPU context list for every MRQ */
1479 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1480 for_each_present_cpu(j) {
1481 infop = lpfc_get_ctx_list(phba, j, i);
1482 __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1483 }
1484 }
1485 kfree(phba->sli4_hba.nvmet_ctx_info);
1486 phba->sli4_hba.nvmet_ctx_info = NULL;
1487}
1488
1489static int
1490lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1491{
1492 struct lpfc_nvmet_ctxbuf *ctx_buf;
1493 struct lpfc_iocbq *nvmewqe;
1494 union lpfc_wqe128 *wqe;
1495 struct lpfc_nvmet_ctx_info *last_infop;
1496 struct lpfc_nvmet_ctx_info *infop;
1497 int i, j, idx, cpu;
1498
1499 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1500 "6403 Allocate NVMET resources for %d XRIs\n",
1501 phba->sli4_hba.nvmet_xri_cnt);
1502
1503 phba->sli4_hba.nvmet_ctx_info = kzalloc_objs(struct lpfc_nvmet_ctx_info,
1504 phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq);
1505 if (!phba->sli4_hba.nvmet_ctx_info) {
1506 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1507 "6419 Failed allocate memory for "
1508 "nvmet context lists\n");
1509 return -ENOMEM;
1510 }
1511
1512 /*
1513 * Assuming X CPUs in the system, and Y MRQs, allocate some
1514 * lpfc_nvmet_ctx_info structures as follows:
1515 *
1516 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1517 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1518 * ...
1519 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1520 *
1521 * Each line represents a MRQ "silo" containing an entry for
1522 * every CPU.
1523 *
1524 * MRQ X is initially assumed to be associated with CPU X, thus
1525 * contexts are initially distributed across all MRQs using
1526 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1527 * freed, the are freed to the MRQ silo based on the CPU number
1528 * of the IO completion. Thus a context that was allocated for MRQ A
1529 * whose IO completed on CPU B will be freed to cpuB/mrqA.
1530 */
1531 for_each_possible_cpu(i) {
1532 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1533 infop = lpfc_get_ctx_list(phba, i, j);
1534 INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1535 spin_lock_init(&infop->nvmet_ctx_list_lock);
1536 infop->nvmet_ctx_list_cnt = 0;
1537 }
1538 }
1539
1540 /*
1541 * Setup the next CPU context info ptr for each MRQ.
1542 * MRQ 0 will cycle thru CPUs 0 - X separately from
1543 * MRQ 1 cycling thru CPUs 0 - X, and so on.
1544 */
1545 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1546 last_infop = lpfc_get_ctx_list(phba,
1547 cpumask_first(cpu_present_mask),
1548 j);
1549 for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) {
1550 infop = lpfc_get_ctx_list(phba, i, j);
1551 infop->nvmet_ctx_next_cpu = last_infop;
1552 last_infop = infop;
1553 }
1554 }
1555
1556 /* For all nvmet xris, allocate resources needed to process a
1557 * received command on a per xri basis.
1558 */
1559 idx = 0;
1560 cpu = cpumask_first(cpu_present_mask);
1561 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1562 ctx_buf = kzalloc_obj(*ctx_buf);
1563 if (!ctx_buf) {
1564 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1565 "6404 Ran out of memory for NVMET\n");
1566 return -ENOMEM;
1567 }
1568
1569 ctx_buf->context = kzalloc_obj(*ctx_buf->context);
1570 if (!ctx_buf->context) {
1571 kfree(ctx_buf);
1572 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1573 "6405 Ran out of NVMET "
1574 "context memory\n");
1575 return -ENOMEM;
1576 }
1577 ctx_buf->context->ctxbuf = ctx_buf;
1578 ctx_buf->context->state = LPFC_NVME_STE_FREE;
1579
1580 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1581 if (!ctx_buf->iocbq) {
1582 kfree(ctx_buf->context);
1583 kfree(ctx_buf);
1584 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1585 "6406 Ran out of NVMET iocb/WQEs\n");
1586 return -ENOMEM;
1587 }
1588 ctx_buf->iocbq->cmd_flag = LPFC_IO_NVMET;
1589 nvmewqe = ctx_buf->iocbq;
1590 wqe = &nvmewqe->wqe;
1591
1592 /* Initialize WQE */
1593 memset(wqe, 0, sizeof(*wqe));
1594
1595 ctx_buf->iocbq->cmd_dmabuf = NULL;
1596 spin_lock(&phba->sli4_hba.sgl_list_lock);
1597 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1598 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1599 if (!ctx_buf->sglq) {
1600 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1601 kfree(ctx_buf->context);
1602 kfree(ctx_buf);
1603 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1604 "6407 Ran out of NVMET XRIs\n");
1605 return -ENOMEM;
1606 }
1607 INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
1608
1609 /*
1610 * Add ctx to MRQidx context list. Our initial assumption
1611 * is MRQidx will be associated with CPUidx. This association
1612 * can change on the fly.
1613 */
1614 infop = lpfc_get_ctx_list(phba, cpu, idx);
1615 spin_lock(&infop->nvmet_ctx_list_lock);
1616 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1617 infop->nvmet_ctx_list_cnt++;
1618 spin_unlock(&infop->nvmet_ctx_list_lock);
1619
1620 /* Spread ctx structures evenly across all MRQs */
1621 idx++;
1622 if (idx >= phba->cfg_nvmet_mrq) {
1623 idx = 0;
1624 cpu = cpumask_first(cpu_present_mask);
1625 continue;
1626 }
1627 cpu = lpfc_next_present_cpu(cpu);
1628 }
1629
1630 for_each_present_cpu(i) {
1631 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1632 infop = lpfc_get_ctx_list(phba, i, j);
1633 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1634 "6408 TOTAL NVMET ctx for CPU %d "
1635 "MRQ %d: cnt %d nextcpu x%px\n",
1636 i, j, infop->nvmet_ctx_list_cnt,
1637 infop->nvmet_ctx_next_cpu);
1638 }
1639 }
1640 return 0;
1641}
1642
1643int
1644lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1645{
1646 struct lpfc_vport *vport = phba->pport;
1647 struct lpfc_nvmet_tgtport *tgtp;
1648 struct nvmet_fc_port_info pinfo;
1649 int error;
1650
1651 if (phba->targetport)
1652 return 0;
1653
1654 error = lpfc_nvmet_setup_io_context(phba);
1655 if (error)
1656 return error;
1657
1658 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1659 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1660 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1661 pinfo.port_id = vport->fc_myDID;
1662
1663 /* We need to tell the transport layer + 1 because it takes page
1664 * alignment into account. When space for the SGL is allocated we
1665 * allocate + 3, one for cmd, one for rsp and one for this alignment
1666 */
1667 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1668 lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1669 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1670
1671#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1672 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1673 &phba->pcidev->dev,
1674 &phba->targetport);
1675#else
1676 error = -ENOENT;
1677#endif
1678 if (error) {
1679 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1680 "6025 Cannot register NVME targetport x%x: "
1681 "portnm %llx nodenm %llx segs %d qs %d\n",
1682 error,
1683 pinfo.port_name, pinfo.node_name,
1684 lpfc_tgttemplate.max_sgl_segments,
1685 lpfc_tgttemplate.max_hw_queues);
1686 phba->targetport = NULL;
1687 phba->nvmet_support = 0;
1688
1689 lpfc_nvmet_cleanup_io_context(phba);
1690
1691 } else {
1692 tgtp = (struct lpfc_nvmet_tgtport *)
1693 phba->targetport->private;
1694 tgtp->phba = phba;
1695
1696 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1697 "6026 Registered NVME "
1698 "targetport: x%px, private x%px "
1699 "portnm %llx nodenm %llx segs %d qs %d\n",
1700 phba->targetport, tgtp,
1701 pinfo.port_name, pinfo.node_name,
1702 lpfc_tgttemplate.max_sgl_segments,
1703 lpfc_tgttemplate.max_hw_queues);
1704
1705 atomic_set(&tgtp->rcv_ls_req_in, 0);
1706 atomic_set(&tgtp->rcv_ls_req_out, 0);
1707 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1708 atomic_set(&tgtp->xmt_ls_abort, 0);
1709 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1710 atomic_set(&tgtp->xmt_ls_rsp, 0);
1711 atomic_set(&tgtp->xmt_ls_drop, 0);
1712 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1713 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1714 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1715 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1716 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1717 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1718 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1719 atomic_set(&tgtp->xmt_fcp_drop, 0);
1720 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1721 atomic_set(&tgtp->xmt_fcp_read, 0);
1722 atomic_set(&tgtp->xmt_fcp_write, 0);
1723 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1724 atomic_set(&tgtp->xmt_fcp_release, 0);
1725 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1726 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1727 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1728 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1729 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1730 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1731 atomic_set(&tgtp->xmt_fcp_abort, 0);
1732 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1733 atomic_set(&tgtp->xmt_abort_unsol, 0);
1734 atomic_set(&tgtp->xmt_abort_sol, 0);
1735 atomic_set(&tgtp->xmt_abort_rsp, 0);
1736 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1737 atomic_set(&tgtp->defer_ctx, 0);
1738 atomic_set(&tgtp->defer_fod, 0);
1739 atomic_set(&tgtp->defer_wqfull, 0);
1740 }
1741 return error;
1742}
1743
1744int
1745lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1746{
1747 struct lpfc_vport *vport = phba->pport;
1748
1749 if (!phba->targetport)
1750 return 0;
1751
1752 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1753 "6007 Update NVMET port x%px did x%x\n",
1754 phba->targetport, vport->fc_myDID);
1755
1756 phba->targetport->port_id = vport->fc_myDID;
1757 return 0;
1758}
1759
1760/**
1761 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1762 * @phba: pointer to lpfc hba data structure.
1763 * @axri: pointer to the nvmet xri abort wcqe structure.
1764 *
1765 * This routine is invoked by the worker thread to process a SLI4 fast-path
1766 * NVMET aborted xri.
1767 **/
1768void
1769lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1770 struct sli4_wcqe_xri_aborted *axri)
1771{
1772#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1773 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1774 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1775 struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1776 struct lpfc_nvmet_tgtport *tgtp;
1777 struct nvmefc_tgt_fcp_req *req = NULL;
1778 struct lpfc_nodelist *ndlp;
1779 unsigned long iflag = 0;
1780 int rrq_empty = 0;
1781 bool released = false;
1782
1783 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1784 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1785
1786 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1787 return;
1788
1789 if (phba->targetport) {
1790 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1791 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1792 }
1793
1794 spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1795 list_for_each_entry_safe(ctxp, next_ctxp,
1796 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1797 list) {
1798 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1799 continue;
1800
1801 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
1802 iflag);
1803
1804 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1805 /* Check if we already received a free context call
1806 * and we have completed processing an abort situation.
1807 */
1808 if (ctxp->flag & LPFC_NVME_CTX_RLS &&
1809 !(ctxp->flag & LPFC_NVME_ABORT_OP)) {
1810 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1811 list_del_init(&ctxp->list);
1812 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1813 released = true;
1814 }
1815 ctxp->flag &= ~LPFC_NVME_XBUSY;
1816 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1817
1818 spin_lock_irqsave(&phba->rrq_list_lock, iflag);
1819 rrq_empty = list_empty(&phba->active_rrq_list);
1820 spin_unlock_irqrestore(&phba->rrq_list_lock, iflag);
1821 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1822 if (ndlp &&
1823 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1824 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1825 lpfc_set_rrq_active(phba, ndlp,
1826 ctxp->ctxbuf->sglq->sli4_lxritag,
1827 rxid, 1);
1828 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1829 }
1830
1831 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1832 "6318 XB aborted oxid x%x flg x%x (%x)\n",
1833 ctxp->oxid, ctxp->flag, released);
1834 if (released)
1835 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1836
1837 if (rrq_empty)
1838 lpfc_worker_wake_up(phba);
1839 return;
1840 }
1841 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1842 ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
1843 if (ctxp) {
1844 /*
1845 * Abort already done by FW, so BA_ACC sent.
1846 * However, the transport may be unaware.
1847 */
1848 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1849 "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1850 "flag x%x oxid x%x rxid x%x\n",
1851 xri, ctxp->state, ctxp->flag, ctxp->oxid,
1852 rxid);
1853
1854 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1855 ctxp->flag |= LPFC_NVME_ABTS_RCV;
1856 ctxp->state = LPFC_NVME_STE_ABORT;
1857 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1858
1859 lpfc_nvmeio_data(phba,
1860 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1861 xri, raw_smp_processor_id(), 0);
1862
1863 req = &ctxp->hdlrctx.fcp_req;
1864 if (req)
1865 nvmet_fc_rcv_fcp_abort(phba->targetport, req);
1866 }
1867#endif
1868}
1869
1870int
1871lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1872 struct fc_frame_header *fc_hdr)
1873{
1874#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1875 struct lpfc_hba *phba = vport->phba;
1876 struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1877 struct nvmefc_tgt_fcp_req *rsp;
1878 uint32_t sid;
1879 uint16_t oxid, xri;
1880 unsigned long iflag = 0;
1881
1882 sid = sli4_sid_from_fc_hdr(fc_hdr);
1883 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1884
1885 spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1886 list_for_each_entry_safe(ctxp, next_ctxp,
1887 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1888 list) {
1889 if (ctxp->oxid != oxid || ctxp->sid != sid)
1890 continue;
1891
1892 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1893
1894 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
1895 iflag);
1896 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1897 ctxp->flag |= LPFC_NVME_ABTS_RCV;
1898 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1899
1900 lpfc_nvmeio_data(phba,
1901 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1902 xri, raw_smp_processor_id(), 0);
1903
1904 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1905 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1906
1907 rsp = &ctxp->hdlrctx.fcp_req;
1908 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1909
1910 /* Respond with BA_ACC accordingly */
1911 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1912 return 0;
1913 }
1914 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1915 /* check the wait list */
1916 if (phba->sli4_hba.nvmet_io_wait_cnt) {
1917 struct rqb_dmabuf *nvmebuf;
1918 struct fc_frame_header *fc_hdr_tmp;
1919 u32 sid_tmp;
1920 u16 oxid_tmp;
1921 bool found = false;
1922
1923 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1924
1925 /* match by oxid and s_id */
1926 list_for_each_entry(nvmebuf,
1927 &phba->sli4_hba.lpfc_nvmet_io_wait_list,
1928 hbuf.list) {
1929 fc_hdr_tmp = (struct fc_frame_header *)
1930 (nvmebuf->hbuf.virt);
1931 oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
1932 sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
1933 if (oxid_tmp != oxid || sid_tmp != sid)
1934 continue;
1935
1936 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1937 "6321 NVMET Rcv ABTS oxid x%x from x%x "
1938 "is waiting for a ctxp\n",
1939 oxid, sid);
1940
1941 list_del_init(&nvmebuf->hbuf.list);
1942 phba->sli4_hba.nvmet_io_wait_cnt--;
1943 found = true;
1944 break;
1945 }
1946 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1947 iflag);
1948
1949 /* free buffer since already posted a new DMA buffer to RQ */
1950 if (found) {
1951 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1952 /* Respond with BA_ACC accordingly */
1953 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1954 return 0;
1955 }
1956 }
1957
1958 /* check active list */
1959 ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
1960 if (ctxp) {
1961 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1962
1963 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1964 ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP);
1965 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1966
1967 lpfc_nvmeio_data(phba,
1968 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1969 xri, raw_smp_processor_id(), 0);
1970
1971 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1972 "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
1973 "flag x%x state x%x\n",
1974 ctxp->oxid, xri, ctxp->flag, ctxp->state);
1975
1976 if (ctxp->flag & LPFC_NVME_TNOTIFY) {
1977 /* Notify the transport */
1978 nvmet_fc_rcv_fcp_abort(phba->targetport,
1979 &ctxp->hdlrctx.fcp_req);
1980 } else {
1981 cancel_work_sync(&ctxp->ctxbuf->defer_work);
1982 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1983 lpfc_nvmet_defer_release(phba, ctxp);
1984 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1985 }
1986 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1987 ctxp->oxid);
1988
1989 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1990 return 0;
1991 }
1992
1993 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
1994 oxid, raw_smp_processor_id(), 1);
1995
1996 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1997 "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
1998
1999 /* Respond with BA_RJT accordingly */
2000 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
2001#endif
2002 return 0;
2003}
2004
2005static void
2006lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
2007 struct lpfc_async_xchg_ctx *ctxp)
2008{
2009 struct lpfc_sli_ring *pring;
2010 struct lpfc_iocbq *nvmewqeq;
2011 struct lpfc_iocbq *next_nvmewqeq;
2012 unsigned long iflags;
2013 struct lpfc_wcqe_complete wcqe;
2014 struct lpfc_wcqe_complete *wcqep;
2015
2016 pring = wq->pring;
2017 wcqep = &wcqe;
2018
2019 /* Fake an ABORT error code back to cmpl routine */
2020 memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
2021 bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
2022 wcqep->parameter = IOERR_ABORT_REQUESTED;
2023
2024 spin_lock_irqsave(&pring->ring_lock, iflags);
2025 list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
2026 &wq->wqfull_list, list) {
2027 if (ctxp) {
2028 /* Checking for a specific IO to flush */
2029 if (nvmewqeq->context_un.axchg == ctxp) {
2030 list_del(&nvmewqeq->list);
2031 spin_unlock_irqrestore(&pring->ring_lock,
2032 iflags);
2033 memcpy(&nvmewqeq->wcqe_cmpl, wcqep,
2034 sizeof(*wcqep));
2035 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
2036 nvmewqeq);
2037 return;
2038 }
2039 continue;
2040 } else {
2041 /* Flush all IOs */
2042 list_del(&nvmewqeq->list);
2043 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2044 memcpy(&nvmewqeq->wcqe_cmpl, wcqep, sizeof(*wcqep));
2045 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, nvmewqeq);
2046 spin_lock_irqsave(&pring->ring_lock, iflags);
2047 }
2048 }
2049 if (!ctxp)
2050 wq->q_flag &= ~HBA_NVMET_WQFULL;
2051 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2052}
2053
2054void
2055lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
2056 struct lpfc_queue *wq)
2057{
2058#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2059 struct lpfc_sli_ring *pring;
2060 struct lpfc_iocbq *nvmewqeq;
2061 struct lpfc_async_xchg_ctx *ctxp;
2062 unsigned long iflags;
2063 int rc;
2064
2065 /*
2066 * Some WQE slots are available, so try to re-issue anything
2067 * on the WQ wqfull_list.
2068 */
2069 pring = wq->pring;
2070 spin_lock_irqsave(&pring->ring_lock, iflags);
2071 while (!list_empty(&wq->wqfull_list)) {
2072 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
2073 list);
2074 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2075 ctxp = nvmewqeq->context_un.axchg;
2076 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
2077 spin_lock_irqsave(&pring->ring_lock, iflags);
2078 if (rc == -EBUSY) {
2079 /* WQ was full again, so put it back on the list */
2080 list_add(&nvmewqeq->list, &wq->wqfull_list);
2081 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2082 return;
2083 }
2084 if (rc == WQE_SUCCESS) {
2085#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2086 if (ctxp->ts_cmd_nvme) {
2087 if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP)
2088 ctxp->ts_status_wqput = ktime_get_ns();
2089 else
2090 ctxp->ts_data_wqput = ktime_get_ns();
2091 }
2092#endif
2093 } else {
2094 WARN_ON(rc);
2095 }
2096 }
2097 wq->q_flag &= ~HBA_NVMET_WQFULL;
2098 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2099
2100#endif
2101}
2102
2103void
2104lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
2105{
2106#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2107 struct lpfc_nvmet_tgtport *tgtp;
2108 struct lpfc_queue *wq;
2109 uint32_t qidx;
2110 DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
2111
2112 if (phba->nvmet_support == 0)
2113 return;
2114 if (phba->targetport) {
2115 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2116 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
2117 wq = phba->sli4_hba.hdwq[qidx].io_wq;
2118 lpfc_nvmet_wqfull_flush(phba, wq, NULL);
2119 }
2120 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
2121 nvmet_fc_unregister_targetport(phba->targetport);
2122 if (!wait_for_completion_timeout(&tport_unreg_cmp,
2123 msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
2124 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2125 "6179 Unreg targetport x%px timeout "
2126 "reached.\n", phba->targetport);
2127 lpfc_nvmet_cleanup_io_context(phba);
2128 }
2129 phba->targetport = NULL;
2130#endif
2131}
2132
2133/**
2134 * lpfc_nvmet_handle_lsreq - Process an NVME LS request
2135 * @phba: pointer to lpfc hba data structure.
2136 * @axchg: pointer to exchange context for the NVME LS request
2137 *
2138 * This routine is used for processing an asynchronously received NVME LS
2139 * request. Any remaining validation is done and the LS is then forwarded
2140 * to the nvmet-fc transport via nvmet_fc_rcv_ls_req().
2141 *
2142 * The calling sequence should be: nvmet_fc_rcv_ls_req() -> (processing)
2143 * -> lpfc_nvmet_xmt_ls_rsp/cmp -> req->done.
2144 * lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
2145 *
2146 * Returns 0 if LS was handled and delivered to the transport
2147 * Returns 1 if LS failed to be handled and should be dropped
2148 */
2149int
2150lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba,
2151 struct lpfc_async_xchg_ctx *axchg)
2152{
2153#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2154 struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private;
2155 uint32_t *payload = axchg->payload;
2156 int rc;
2157
2158 atomic_inc(&tgtp->rcv_ls_req_in);
2159
2160 /*
2161 * Driver passes the ndlp as the hosthandle argument allowing
2162 * the transport to generate LS requests for any associateions
2163 * that are created.
2164 */
2165 rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp,
2166 axchg->payload, axchg->size);
2167
2168 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2169 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
2170 "%08x %08x %08x\n", axchg->size, rc,
2171 *payload, *(payload+1), *(payload+2),
2172 *(payload+3), *(payload+4), *(payload+5));
2173
2174 if (!rc) {
2175 atomic_inc(&tgtp->rcv_ls_req_out);
2176 return 0;
2177 }
2178
2179 atomic_inc(&tgtp->rcv_ls_req_drop);
2180#endif
2181 return 1;
2182}
2183
2184static void
2185lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
2186{
2187#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2188 struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
2189 struct lpfc_hba *phba = ctxp->phba;
2190 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
2191 struct lpfc_nvmet_tgtport *tgtp;
2192 uint32_t *payload, qno;
2193 uint32_t rc;
2194 unsigned long iflags;
2195
2196 if (!nvmebuf) {
2197 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2198 "6159 process_rcv_fcp_req, nvmebuf is NULL, "
2199 "oxid: x%x flg: x%x state: x%x\n",
2200 ctxp->oxid, ctxp->flag, ctxp->state);
2201 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2202 lpfc_nvmet_defer_release(phba, ctxp);
2203 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2204 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
2205 ctxp->oxid);
2206 return;
2207 }
2208
2209 if (ctxp->flag & LPFC_NVME_ABTS_RCV) {
2210 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2211 "6324 IO oxid x%x aborted\n",
2212 ctxp->oxid);
2213 return;
2214 }
2215
2216 payload = (uint32_t *)(nvmebuf->dbuf.virt);
2217 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2218 ctxp->flag |= LPFC_NVME_TNOTIFY;
2219#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2220 if (ctxp->ts_isr_cmd)
2221 ctxp->ts_cmd_nvme = ktime_get_ns();
2222#endif
2223 /*
2224 * The calling sequence should be:
2225 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
2226 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2227 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
2228 * the NVME command / FC header is stored.
2229 * A buffer has already been reposted for this IO, so just free
2230 * the nvmebuf.
2231 */
2232 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req,
2233 payload, ctxp->size);
2234 /* Process FCP command */
2235 if (rc == 0) {
2236 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2237 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2238 if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) ||
2239 (nvmebuf != ctxp->rqb_buffer)) {
2240 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2241 return;
2242 }
2243 ctxp->rqb_buffer = NULL;
2244 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2245 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2246 return;
2247 }
2248
2249 /* Processing of FCP command is deferred */
2250 if (rc == -EOVERFLOW) {
2251 lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
2252 "from %06x\n",
2253 ctxp->oxid, ctxp->size, ctxp->sid);
2254 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2255 atomic_inc(&tgtp->defer_fod);
2256 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2257 if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
2258 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2259 return;
2260 }
2261 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2262 /*
2263 * Post a replacement DMA buffer to RQ and defer
2264 * freeing rcv buffer till .defer_rcv callback
2265 */
2266 qno = nvmebuf->idx;
2267 lpfc_post_rq_buffer(
2268 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2269 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2270 return;
2271 }
2272 ctxp->flag &= ~LPFC_NVME_TNOTIFY;
2273 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2274 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2275 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2276 ctxp->oxid, rc,
2277 atomic_read(&tgtp->rcv_fcp_cmd_in),
2278 atomic_read(&tgtp->rcv_fcp_cmd_out),
2279 atomic_read(&tgtp->xmt_fcp_release));
2280 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2281 ctxp->oxid, ctxp->size, ctxp->sid);
2282 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2283 lpfc_nvmet_defer_release(phba, ctxp);
2284 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2285 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
2286#endif
2287}
2288
2289static void
2290lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
2291{
2292#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2293 struct lpfc_nvmet_ctxbuf *ctx_buf =
2294 container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
2295
2296 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2297#endif
2298}
2299
2300static struct lpfc_nvmet_ctxbuf *
2301lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
2302 struct lpfc_nvmet_ctx_info *current_infop)
2303{
2304#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2305 struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
2306 struct lpfc_nvmet_ctx_info *get_infop;
2307 int i;
2308
2309 /*
2310 * The current_infop for the MRQ a NVME command IU was received
2311 * on is empty. Our goal is to replenish this MRQs context
2312 * list from a another CPUs.
2313 *
2314 * First we need to pick a context list to start looking on.
2315 * nvmet_ctx_start_cpu has available context the last time
2316 * we needed to replenish this CPU where nvmet_ctx_next_cpu
2317 * is just the next sequential CPU for this MRQ.
2318 */
2319 if (current_infop->nvmet_ctx_start_cpu)
2320 get_infop = current_infop->nvmet_ctx_start_cpu;
2321 else
2322 get_infop = current_infop->nvmet_ctx_next_cpu;
2323
2324 for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
2325 if (get_infop == current_infop) {
2326 get_infop = get_infop->nvmet_ctx_next_cpu;
2327 continue;
2328 }
2329 spin_lock(&get_infop->nvmet_ctx_list_lock);
2330
2331 /* Just take the entire context list, if there are any */
2332 if (get_infop->nvmet_ctx_list_cnt) {
2333 list_splice_init(&get_infop->nvmet_ctx_list,
2334 ¤t_infop->nvmet_ctx_list);
2335 current_infop->nvmet_ctx_list_cnt =
2336 get_infop->nvmet_ctx_list_cnt - 1;
2337 get_infop->nvmet_ctx_list_cnt = 0;
2338 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2339
2340 current_infop->nvmet_ctx_start_cpu = get_infop;
2341 list_remove_head(¤t_infop->nvmet_ctx_list,
2342 ctx_buf, struct lpfc_nvmet_ctxbuf,
2343 list);
2344 return ctx_buf;
2345 }
2346
2347 /* Otherwise, move on to the next CPU for this MRQ */
2348 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2349 get_infop = get_infop->nvmet_ctx_next_cpu;
2350 }
2351
2352#endif
2353 /* Nothing found, all contexts for the MRQ are in-flight */
2354 return NULL;
2355}
2356
2357/**
2358 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
2359 * @phba: pointer to lpfc hba data structure.
2360 * @idx: relative index of MRQ vector
2361 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
2362 * @isr_timestamp: in jiffies.
2363 * @cqflag: cq processing information regarding workload.
2364 *
2365 * This routine is used for processing the WQE associated with a unsolicited
2366 * event. It first determines whether there is an existing ndlp that matches
2367 * the DID from the unsolicited WQE. If not, it will create a new one with
2368 * the DID from the unsolicited WQE. The ELS command from the unsolicited
2369 * WQE is then used to invoke the proper routine and to set up proper state
2370 * of the discovery state machine.
2371 **/
2372static void
2373lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
2374 uint32_t idx,
2375 struct rqb_dmabuf *nvmebuf,
2376 uint64_t isr_timestamp,
2377 uint8_t cqflag)
2378{
2379 struct lpfc_async_xchg_ctx *ctxp;
2380 struct lpfc_nvmet_tgtport *tgtp;
2381 struct fc_frame_header *fc_hdr;
2382 struct lpfc_nvmet_ctxbuf *ctx_buf;
2383 struct lpfc_nvmet_ctx_info *current_infop;
2384 uint32_t size, oxid, sid, qno;
2385 unsigned long iflag;
2386 int current_cpu;
2387
2388 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
2389 return;
2390
2391 ctx_buf = NULL;
2392 if (!nvmebuf || !phba->targetport) {
2393 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2394 "6157 NVMET FCP Drop IO\n");
2395 if (nvmebuf)
2396 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2397 return;
2398 }
2399
2400 /*
2401 * Get a pointer to the context list for this MRQ based on
2402 * the CPU this MRQ IRQ is associated with. If the CPU association
2403 * changes from our initial assumption, the context list could
2404 * be empty, thus it would need to be replenished with the
2405 * context list from another CPU for this MRQ.
2406 */
2407 current_cpu = raw_smp_processor_id();
2408 current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
2409 spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag);
2410 if (current_infop->nvmet_ctx_list_cnt) {
2411 list_remove_head(¤t_infop->nvmet_ctx_list,
2412 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
2413 current_infop->nvmet_ctx_list_cnt--;
2414 } else {
2415 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
2416 }
2417 spin_unlock_irqrestore(¤t_infop->nvmet_ctx_list_lock, iflag);
2418
2419 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
2420 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2421 size = nvmebuf->bytes_recv;
2422
2423#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2424 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
2425 this_cpu_inc(phba->sli4_hba.c_stat->rcv_io);
2426 if (idx != current_cpu)
2427 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2428 "6703 CPU Check rcv: "
2429 "cpu %d expect %d\n",
2430 current_cpu, idx);
2431 }
2432#endif
2433
2434 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
2435 oxid, size, raw_smp_processor_id());
2436
2437 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2438
2439 if (!ctx_buf) {
2440 /* Queue this NVME IO to process later */
2441 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
2442 list_add_tail(&nvmebuf->hbuf.list,
2443 &phba->sli4_hba.lpfc_nvmet_io_wait_list);
2444 phba->sli4_hba.nvmet_io_wait_cnt++;
2445 phba->sli4_hba.nvmet_io_wait_total++;
2446 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
2447 iflag);
2448
2449 /* Post a brand new DMA buffer to RQ */
2450 qno = nvmebuf->idx;
2451 lpfc_post_rq_buffer(
2452 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2453 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2454
2455 atomic_inc(&tgtp->defer_ctx);
2456 return;
2457 }
2458
2459 sid = sli4_sid_from_fc_hdr(fc_hdr);
2460
2461 ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
2462 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
2463 list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
2464 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
2465 if (ctxp->state != LPFC_NVME_STE_FREE) {
2466 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2467 "6414 NVMET Context corrupt %d %d oxid x%x\n",
2468 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2469 }
2470 ctxp->wqeq = NULL;
2471 ctxp->offset = 0;
2472 ctxp->phba = phba;
2473 ctxp->size = size;
2474 ctxp->oxid = oxid;
2475 ctxp->sid = sid;
2476 ctxp->idx = idx;
2477 ctxp->state = LPFC_NVME_STE_RCV;
2478 ctxp->entry_cnt = 1;
2479 ctxp->flag = 0;
2480 ctxp->ctxbuf = ctx_buf;
2481 ctxp->rqb_buffer = (void *)nvmebuf;
2482 ctxp->hdwq = NULL;
2483 spin_lock_init(&ctxp->ctxlock);
2484
2485#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2486 if (isr_timestamp)
2487 ctxp->ts_isr_cmd = isr_timestamp;
2488 ctxp->ts_cmd_nvme = 0;
2489 ctxp->ts_nvme_data = 0;
2490 ctxp->ts_data_wqput = 0;
2491 ctxp->ts_isr_data = 0;
2492 ctxp->ts_data_nvme = 0;
2493 ctxp->ts_nvme_status = 0;
2494 ctxp->ts_status_wqput = 0;
2495 ctxp->ts_isr_status = 0;
2496 ctxp->ts_status_nvme = 0;
2497#endif
2498
2499 atomic_inc(&tgtp->rcv_fcp_cmd_in);
2500 /* check for cq processing load */
2501 if (!cqflag) {
2502 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2503 return;
2504 }
2505
2506 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
2507 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2508 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2509 "6325 Unable to queue work for oxid x%x. "
2510 "FCP Drop IO [x%x x%x x%x]\n",
2511 ctxp->oxid,
2512 atomic_read(&tgtp->rcv_fcp_cmd_in),
2513 atomic_read(&tgtp->rcv_fcp_cmd_out),
2514 atomic_read(&tgtp->xmt_fcp_release));
2515
2516 spin_lock_irqsave(&ctxp->ctxlock, iflag);
2517 lpfc_nvmet_defer_release(phba, ctxp);
2518 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
2519 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2520 }
2521}
2522
2523/**
2524 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2525 * @phba: pointer to lpfc hba data structure.
2526 * @idx: relative index of MRQ vector
2527 * @nvmebuf: pointer to received nvme data structure.
2528 * @isr_timestamp: in jiffies.
2529 * @cqflag: cq processing information regarding workload.
2530 *
2531 * This routine is used to process an unsolicited event received from a SLI
2532 * (Service Level Interface) ring. The actual processing of the data buffer
2533 * associated with the unsolicited event is done by invoking the routine
2534 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2535 * SLI RQ on which the unsolicited event was received.
2536 **/
2537void
2538lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2539 uint32_t idx,
2540 struct rqb_dmabuf *nvmebuf,
2541 uint64_t isr_timestamp,
2542 uint8_t cqflag)
2543{
2544 if (!nvmebuf) {
2545 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2546 "3167 NVMET FCP Drop IO\n");
2547 return;
2548 }
2549 if (phba->nvmet_support == 0) {
2550 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2551 return;
2552 }
2553 lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
2554}
2555
2556/**
2557 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2558 * @phba: pointer to a host N_Port data structure.
2559 * @ctxp: Context info for NVME LS Request
2560 * @rspbuf: DMA buffer of NVME command.
2561 * @rspsize: size of the NVME command.
2562 *
2563 * This routine is used for allocating a lpfc-WQE data structure from
2564 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2565 * passed into the routine for discovery state machine to issue an Extended
2566 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2567 * and preparation routine that is used by all the discovery state machine
2568 * routines and the NVME command-specific fields will be later set up by
2569 * the individual discovery machine routines after calling this routine
2570 * allocating and preparing a generic WQE data structure. It fills in the
2571 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2572 * payload and response payload (if expected). The reference count on the
2573 * ndlp is incremented by 1 and the reference to the ndlp is put into
2574 * context1 of the WQE data structure for this WQE to hold the ndlp
2575 * reference for the command's callback function to access later.
2576 *
2577 * Return code
2578 * Pointer to the newly allocated/prepared nvme wqe data structure
2579 * NULL - when nvme wqe data structure allocation/preparation failed
2580 **/
2581static struct lpfc_iocbq *
2582lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2583 struct lpfc_async_xchg_ctx *ctxp,
2584 dma_addr_t rspbuf, uint16_t rspsize)
2585{
2586 struct lpfc_nodelist *ndlp;
2587 struct lpfc_iocbq *nvmewqe;
2588 union lpfc_wqe128 *wqe;
2589
2590 if (!lpfc_is_link_up(phba)) {
2591 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2592 "6104 NVMET prep LS wqe: link err: "
2593 "NPORT x%x oxid:x%x ste %d\n",
2594 ctxp->sid, ctxp->oxid, ctxp->state);
2595 return NULL;
2596 }
2597
2598 /* Allocate buffer for command wqe */
2599 nvmewqe = lpfc_sli_get_iocbq(phba);
2600 if (nvmewqe == NULL) {
2601 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2602 "6105 NVMET prep LS wqe: No WQE: "
2603 "NPORT x%x oxid x%x ste %d\n",
2604 ctxp->sid, ctxp->oxid, ctxp->state);
2605 return NULL;
2606 }
2607
2608 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2609 if (!ndlp ||
2610 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2611 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2612 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2613 "6106 NVMET prep LS wqe: No ndlp: "
2614 "NPORT x%x oxid x%x ste %d\n",
2615 ctxp->sid, ctxp->oxid, ctxp->state);
2616 goto nvme_wqe_free_wqeq_exit;
2617 }
2618 ctxp->wqeq = nvmewqe;
2619
2620 /* prevent preparing wqe with NULL ndlp reference */
2621 nvmewqe->ndlp = lpfc_nlp_get(ndlp);
2622 if (!nvmewqe->ndlp)
2623 goto nvme_wqe_free_wqeq_exit;
2624 nvmewqe->context_un.axchg = ctxp;
2625
2626 wqe = &nvmewqe->wqe;
2627 memset(wqe, 0, sizeof(union lpfc_wqe));
2628
2629 /* Words 0 - 2 */
2630 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2631 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2632 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2633 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2634
2635 /* Word 3 */
2636
2637 /* Word 4 */
2638
2639 /* Word 5 */
2640 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2641 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2642 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2643 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2644 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2645
2646 /* Word 6 */
2647 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2648 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2649 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2650
2651 /* Word 7 */
2652 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2653 CMD_XMIT_SEQUENCE64_WQE);
2654 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2655 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2656 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2657
2658 /* Word 8 */
2659 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2660
2661 /* Word 9 */
2662 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2663 /* Needs to be set by caller */
2664 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2665
2666 /* Word 10 */
2667 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2668 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2669 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2670 LPFC_WQE_LENLOC_WORD12);
2671 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2672
2673 /* Word 11 */
2674 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2675 LPFC_WQE_CQ_ID_DEFAULT);
2676 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2677 OTHER_COMMAND);
2678
2679 /* Word 12 */
2680 wqe->xmit_sequence.xmit_len = rspsize;
2681
2682 nvmewqe->retry = 1;
2683 nvmewqe->vport = phba->pport;
2684 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2685 nvmewqe->cmd_flag |= LPFC_IO_NVME_LS;
2686
2687 /* Xmit NVMET response to remote NPORT <did> */
2688 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2689 "6039 Xmit NVMET LS response to remote "
2690 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2691 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2692 rspsize);
2693 return nvmewqe;
2694
2695nvme_wqe_free_wqeq_exit:
2696 nvmewqe->context_un.axchg = NULL;
2697 nvmewqe->ndlp = NULL;
2698 nvmewqe->bpl_dmabuf = NULL;
2699 lpfc_sli_release_iocbq(phba, nvmewqe);
2700 return NULL;
2701}
2702
2703
2704static struct lpfc_iocbq *
2705lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2706 struct lpfc_async_xchg_ctx *ctxp)
2707{
2708 struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req;
2709 struct lpfc_nvmet_tgtport *tgtp;
2710 struct sli4_sge *sgl;
2711 struct lpfc_nodelist *ndlp;
2712 struct lpfc_iocbq *nvmewqe;
2713 struct scatterlist *sgel;
2714 union lpfc_wqe128 *wqe;
2715 struct ulp_bde64 *bde;
2716 dma_addr_t physaddr;
2717 int i, cnt, nsegs;
2718 int xc = 1;
2719
2720 if (!lpfc_is_link_up(phba)) {
2721 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2722 "6107 NVMET prep FCP wqe: link err:"
2723 "NPORT x%x oxid x%x ste %d\n",
2724 ctxp->sid, ctxp->oxid, ctxp->state);
2725 return NULL;
2726 }
2727
2728 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2729 if (!ndlp ||
2730 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2731 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2732 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2733 "6108 NVMET prep FCP wqe: no ndlp: "
2734 "NPORT x%x oxid x%x ste %d\n",
2735 ctxp->sid, ctxp->oxid, ctxp->state);
2736 return NULL;
2737 }
2738
2739 if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2740 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2741 "6109 NVMET prep FCP wqe: seg cnt err: "
2742 "NPORT x%x oxid x%x ste %d cnt %d\n",
2743 ctxp->sid, ctxp->oxid, ctxp->state,
2744 phba->cfg_nvme_seg_cnt);
2745 return NULL;
2746 }
2747 nsegs = rsp->sg_cnt;
2748
2749 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2750 nvmewqe = ctxp->wqeq;
2751 if (nvmewqe == NULL) {
2752 /* Allocate buffer for command wqe */
2753 nvmewqe = ctxp->ctxbuf->iocbq;
2754 if (nvmewqe == NULL) {
2755 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2756 "6110 NVMET prep FCP wqe: No "
2757 "WQE: NPORT x%x oxid x%x ste %d\n",
2758 ctxp->sid, ctxp->oxid, ctxp->state);
2759 return NULL;
2760 }
2761 ctxp->wqeq = nvmewqe;
2762 xc = 0; /* create new XRI */
2763 nvmewqe->sli4_lxritag = NO_XRI;
2764 nvmewqe->sli4_xritag = NO_XRI;
2765 }
2766
2767 /* Sanity check */
2768 if (((ctxp->state == LPFC_NVME_STE_RCV) &&
2769 (ctxp->entry_cnt == 1)) ||
2770 (ctxp->state == LPFC_NVME_STE_DATA)) {
2771 wqe = &nvmewqe->wqe;
2772 } else {
2773 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2774 "6111 Wrong state NVMET FCP: %d cnt %d\n",
2775 ctxp->state, ctxp->entry_cnt);
2776 return NULL;
2777 }
2778
2779 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2780 switch (rsp->op) {
2781 case NVMET_FCOP_READDATA:
2782 case NVMET_FCOP_READDATA_RSP:
2783 /* From the tsend template, initialize words 7 - 11 */
2784 memcpy(&wqe->words[7],
2785 &lpfc_tsend_cmd_template.words[7],
2786 sizeof(uint32_t) * 5);
2787
2788 /* Words 0 - 2 : The first sg segment */
2789 sgel = &rsp->sg[0];
2790 physaddr = sg_dma_address(sgel);
2791 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2792 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2793 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2794 wqe->fcp_tsend.bde.addrHigh =
2795 cpu_to_le32(putPaddrHigh(physaddr));
2796
2797 /* Word 3 */
2798 wqe->fcp_tsend.payload_offset_len = 0;
2799
2800 /* Word 4 */
2801 wqe->fcp_tsend.relative_offset = ctxp->offset;
2802
2803 /* Word 5 */
2804 wqe->fcp_tsend.reserved = 0;
2805
2806 /* Word 6 */
2807 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2808 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2809 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2810 nvmewqe->sli4_xritag);
2811
2812 /* Word 7 - set ar later */
2813
2814 /* Word 8 */
2815 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2816
2817 /* Word 9 */
2818 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2819 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2820
2821 /* Word 10 - set wqes later, in template xc=1 */
2822 if (!xc)
2823 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2824
2825 /* Word 12 */
2826 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2827
2828 /* Setup 2 SKIP SGEs */
2829 sgl->addr_hi = 0;
2830 sgl->addr_lo = 0;
2831 sgl->word2 = 0;
2832 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2833 sgl->word2 = cpu_to_le32(sgl->word2);
2834 sgl->sge_len = 0;
2835 sgl++;
2836 sgl->addr_hi = 0;
2837 sgl->addr_lo = 0;
2838 sgl->word2 = 0;
2839 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2840 sgl->word2 = cpu_to_le32(sgl->word2);
2841 sgl->sge_len = 0;
2842 sgl++;
2843 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2844 atomic_inc(&tgtp->xmt_fcp_read_rsp);
2845
2846 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2847
2848 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2849 if (test_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag))
2850 bf_set(wqe_sup,
2851 &wqe->fcp_tsend.wqe_com, 1);
2852 } else {
2853 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2854 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2855 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2856 ((rsp->rsplen >> 2) - 1));
2857 memcpy(&wqe->words[16], rsp->rspaddr,
2858 rsp->rsplen);
2859 }
2860 } else {
2861 atomic_inc(&tgtp->xmt_fcp_read);
2862
2863 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2864 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2865 }
2866 break;
2867
2868 case NVMET_FCOP_WRITEDATA:
2869 /* From the treceive template, initialize words 3 - 11 */
2870 memcpy(&wqe->words[3],
2871 &lpfc_treceive_cmd_template.words[3],
2872 sizeof(uint32_t) * 9);
2873
2874 /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */
2875 wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
2876 wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
2877 wqe->fcp_treceive.bde.addrLow = 0;
2878 wqe->fcp_treceive.bde.addrHigh = 0;
2879
2880 /* Word 4 */
2881 wqe->fcp_treceive.relative_offset = ctxp->offset;
2882
2883 /* Word 6 */
2884 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2885 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2886 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2887 nvmewqe->sli4_xritag);
2888
2889 /* Word 7 */
2890
2891 /* Word 8 */
2892 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2893
2894 /* Word 9 */
2895 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2896 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2897
2898 /* Word 10 - in template xc=1 */
2899 if (!xc)
2900 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2901
2902 /* Word 12 */
2903 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2904
2905 /* Setup 2 SKIP SGEs */
2906 sgl->addr_hi = 0;
2907 sgl->addr_lo = 0;
2908 sgl->word2 = 0;
2909 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2910 sgl->word2 = cpu_to_le32(sgl->word2);
2911 sgl->sge_len = 0;
2912 sgl++;
2913 sgl->addr_hi = 0;
2914 sgl->addr_lo = 0;
2915 sgl->word2 = 0;
2916 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2917 sgl->word2 = cpu_to_le32(sgl->word2);
2918 sgl->sge_len = 0;
2919 sgl++;
2920 atomic_inc(&tgtp->xmt_fcp_write);
2921 break;
2922
2923 case NVMET_FCOP_RSP:
2924 /* From the treceive template, initialize words 4 - 11 */
2925 memcpy(&wqe->words[4],
2926 &lpfc_trsp_cmd_template.words[4],
2927 sizeof(uint32_t) * 8);
2928
2929 /* Words 0 - 2 */
2930 physaddr = rsp->rspdma;
2931 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2932 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2933 wqe->fcp_trsp.bde.addrLow =
2934 cpu_to_le32(putPaddrLow(physaddr));
2935 wqe->fcp_trsp.bde.addrHigh =
2936 cpu_to_le32(putPaddrHigh(physaddr));
2937
2938 /* Word 3 */
2939 wqe->fcp_trsp.response_len = rsp->rsplen;
2940
2941 /* Word 6 */
2942 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2943 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2944 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2945 nvmewqe->sli4_xritag);
2946
2947 /* Word 7 */
2948
2949 /* Word 8 */
2950 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2951
2952 /* Word 9 */
2953 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2954 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2955
2956 /* Word 10 */
2957 if (xc)
2958 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2959
2960 /* Word 11 */
2961 /* In template wqes=0 irsp=0 irsplen=0 - good response */
2962 if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2963 /* Bad response - embed it */
2964 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2965 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2966 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2967 ((rsp->rsplen >> 2) - 1));
2968 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2969 }
2970
2971 /* Word 12 */
2972 wqe->fcp_trsp.rsvd_12_15[0] = 0;
2973
2974 /* Use rspbuf, NOT sg list */
2975 nsegs = 0;
2976 sgl->word2 = 0;
2977 atomic_inc(&tgtp->xmt_fcp_rsp);
2978 break;
2979
2980 default:
2981 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2982 "6064 Unknown Rsp Op %d\n",
2983 rsp->op);
2984 return NULL;
2985 }
2986
2987 nvmewqe->retry = 1;
2988 nvmewqe->vport = phba->pport;
2989 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2990 nvmewqe->ndlp = ndlp;
2991
2992 for_each_sg(rsp->sg, sgel, nsegs, i) {
2993 physaddr = sg_dma_address(sgel);
2994 cnt = sg_dma_len(sgel);
2995 sgl->addr_hi = putPaddrHigh(physaddr);
2996 sgl->addr_lo = putPaddrLow(physaddr);
2997 sgl->word2 = 0;
2998 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2999 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
3000 if ((i+1) == rsp->sg_cnt)
3001 bf_set(lpfc_sli4_sge_last, sgl, 1);
3002 sgl->word2 = cpu_to_le32(sgl->word2);
3003 sgl->sge_len = cpu_to_le32(cnt);
3004 sgl++;
3005 ctxp->offset += cnt;
3006 }
3007
3008 bde = (struct ulp_bde64 *)&wqe->words[13];
3009
3010 memset(bde, 0, sizeof(struct ulp_bde64));
3011
3012 ctxp->state = LPFC_NVME_STE_DATA;
3013 ctxp->entry_cnt++;
3014 return nvmewqe;
3015}
3016
3017/**
3018 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
3019 * @phba: Pointer to HBA context object.
3020 * @cmdwqe: Pointer to driver command WQE object.
3021 * @rspwqe: Pointer to driver response WQE object.
3022 *
3023 * The function is called from SLI ring event handler with no
3024 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
3025 * The function frees memory resources used for the NVME commands.
3026 **/
3027static void
3028lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3029 struct lpfc_iocbq *rspwqe)
3030{
3031 struct lpfc_async_xchg_ctx *ctxp;
3032 struct lpfc_nvmet_tgtport *tgtp;
3033 uint32_t result;
3034 unsigned long flags;
3035 bool released = false;
3036 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
3037
3038 ctxp = cmdwqe->context_un.axchg;
3039 result = wcqe->parameter;
3040
3041 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3042 if (ctxp->flag & LPFC_NVME_ABORT_OP)
3043 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3044
3045 spin_lock_irqsave(&ctxp->ctxlock, flags);
3046 ctxp->state = LPFC_NVME_STE_DONE;
3047
3048 /* Check if we already received a free context call
3049 * and we have completed processing an abort situation.
3050 */
3051 if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3052 !(ctxp->flag & LPFC_NVME_XBUSY)) {
3053 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3054 list_del_init(&ctxp->list);
3055 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3056 released = true;
3057 }
3058 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3059 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3060 atomic_inc(&tgtp->xmt_abort_rsp);
3061
3062 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3063 "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
3064 "WCQE: %08x %08x %08x %08x\n",
3065 ctxp->oxid, ctxp->flag, released,
3066 wcqe->word0, wcqe->total_data_placed,
3067 result, wcqe->word3);
3068
3069 cmdwqe->rsp_dmabuf = NULL;
3070 cmdwqe->bpl_dmabuf = NULL;
3071 /*
3072 * if transport has released ctx, then can reuse it. Otherwise,
3073 * will be recycled by transport release call.
3074 */
3075 if (released)
3076 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3077
3078 /* This is the iocbq for the abort, not the command */
3079 lpfc_sli_release_iocbq(phba, cmdwqe);
3080
3081 /* Since iaab/iaar are NOT set, there is no work left.
3082 * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
3083 * should have been called already.
3084 */
3085}
3086
3087/**
3088 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
3089 * @phba: Pointer to HBA context object.
3090 * @cmdwqe: Pointer to driver command WQE object.
3091 * @rspwqe: Pointer to driver response WQE object.
3092 *
3093 * The function is called from SLI ring event handler with no
3094 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
3095 * The function frees memory resources used for the NVME commands.
3096 **/
3097static void
3098lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3099 struct lpfc_iocbq *rspwqe)
3100{
3101 struct lpfc_async_xchg_ctx *ctxp;
3102 struct lpfc_nvmet_tgtport *tgtp;
3103 unsigned long flags;
3104 uint32_t result;
3105 bool released = false;
3106 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
3107
3108 ctxp = cmdwqe->context_un.axchg;
3109 result = wcqe->parameter;
3110
3111 if (!ctxp) {
3112 /* if context is clear, related io alrady complete */
3113 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3114 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
3115 wcqe->word0, wcqe->total_data_placed,
3116 result, wcqe->word3);
3117 return;
3118 }
3119
3120 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3121 spin_lock_irqsave(&ctxp->ctxlock, flags);
3122 if (ctxp->flag & LPFC_NVME_ABORT_OP)
3123 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3124
3125 /* Sanity check */
3126 if (ctxp->state != LPFC_NVME_STE_ABORT) {
3127 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3128 "6112 ABTS Wrong state:%d oxid x%x\n",
3129 ctxp->state, ctxp->oxid);
3130 }
3131
3132 /* Check if we already received a free context call
3133 * and we have completed processing an abort situation.
3134 */
3135 ctxp->state = LPFC_NVME_STE_DONE;
3136 if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3137 !(ctxp->flag & LPFC_NVME_XBUSY)) {
3138 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3139 list_del_init(&ctxp->list);
3140 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3141 released = true;
3142 }
3143 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3144 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3145 atomic_inc(&tgtp->xmt_abort_rsp);
3146
3147 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3148 "6316 ABTS cmpl oxid x%x flg x%x (%x) "
3149 "WCQE: %08x %08x %08x %08x\n",
3150 ctxp->oxid, ctxp->flag, released,
3151 wcqe->word0, wcqe->total_data_placed,
3152 result, wcqe->word3);
3153
3154 cmdwqe->rsp_dmabuf = NULL;
3155 cmdwqe->bpl_dmabuf = NULL;
3156 /*
3157 * if transport has released ctx, then can reuse it. Otherwise,
3158 * will be recycled by transport release call.
3159 */
3160 if (released)
3161 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3162
3163 /* Since iaab/iaar are NOT set, there is no work left.
3164 * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
3165 * should have been called already.
3166 */
3167}
3168
3169/**
3170 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
3171 * @phba: Pointer to HBA context object.
3172 * @cmdwqe: Pointer to driver command WQE object.
3173 * @rspwqe: Pointer to driver response WQE object.
3174 *
3175 * The function is called from SLI ring event handler with no
3176 * lock held. This function is the completion handler for NVME ABTS for LS cmds
3177 * The function frees memory resources used for the NVME commands.
3178 **/
3179static void
3180lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3181 struct lpfc_iocbq *rspwqe)
3182{
3183 struct lpfc_async_xchg_ctx *ctxp;
3184 struct lpfc_nvmet_tgtport *tgtp;
3185 uint32_t result;
3186 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
3187
3188 ctxp = cmdwqe->context_un.axchg;
3189 result = wcqe->parameter;
3190
3191 if (phba->nvmet_support) {
3192 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3193 atomic_inc(&tgtp->xmt_ls_abort_cmpl);
3194 }
3195
3196 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3197 "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
3198 ctxp, wcqe->word0, wcqe->total_data_placed,
3199 result, wcqe->word3);
3200
3201 if (!ctxp) {
3202 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3203 "6415 NVMET LS Abort No ctx: WCQE: "
3204 "%08x %08x %08x %08x\n",
3205 wcqe->word0, wcqe->total_data_placed,
3206 result, wcqe->word3);
3207
3208 lpfc_sli_release_iocbq(phba, cmdwqe);
3209 return;
3210 }
3211
3212 if (ctxp->state != LPFC_NVME_STE_LS_ABORT) {
3213 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3214 "6416 NVMET LS abort cmpl state mismatch: "
3215 "oxid x%x: %d %d\n",
3216 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3217 }
3218
3219 cmdwqe->rsp_dmabuf = NULL;
3220 cmdwqe->bpl_dmabuf = NULL;
3221 lpfc_sli_release_iocbq(phba, cmdwqe);
3222 kfree(ctxp);
3223}
3224
3225static int
3226lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
3227 struct lpfc_async_xchg_ctx *ctxp,
3228 uint32_t sid, uint16_t xri)
3229{
3230 struct lpfc_nvmet_tgtport *tgtp = NULL;
3231 struct lpfc_iocbq *abts_wqeq;
3232 union lpfc_wqe128 *wqe_abts;
3233 struct lpfc_nodelist *ndlp;
3234
3235 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3236 "6067 ABTS: sid %x xri x%x/x%x\n",
3237 sid, xri, ctxp->wqeq->sli4_xritag);
3238
3239 if (phba->nvmet_support && phba->targetport)
3240 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3241
3242 ndlp = lpfc_findnode_did(phba->pport, sid);
3243 if (!ndlp ||
3244 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3245 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3246 if (tgtp)
3247 atomic_inc(&tgtp->xmt_abort_rsp_error);
3248 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3249 "6134 Drop ABTS - wrong NDLP state x%x.\n",
3250 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3251
3252 /* No failure to an ABTS request. */
3253 return 0;
3254 }
3255
3256 abts_wqeq = ctxp->wqeq;
3257 wqe_abts = &abts_wqeq->wqe;
3258
3259 /*
3260 * Since we zero the whole WQE, we need to ensure we set the WQE fields
3261 * that were initialized in lpfc_sli4_nvmet_alloc.
3262 */
3263 memset(wqe_abts, 0, sizeof(union lpfc_wqe));
3264
3265 /* Word 5 */
3266 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
3267 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
3268 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
3269 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
3270 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
3271
3272 /* Word 6 */
3273 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
3274 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
3275 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
3276 abts_wqeq->sli4_xritag);
3277
3278 /* Word 7 */
3279 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
3280 CMD_XMIT_SEQUENCE64_WQE);
3281 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
3282 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
3283 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
3284
3285 /* Word 8 */
3286 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
3287
3288 /* Word 9 */
3289 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
3290 /* Needs to be set by caller */
3291 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
3292
3293 /* Word 10 */
3294 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
3295 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
3296 LPFC_WQE_LENLOC_WORD12);
3297 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
3298 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
3299
3300 /* Word 11 */
3301 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
3302 LPFC_WQE_CQ_ID_DEFAULT);
3303 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
3304 OTHER_COMMAND);
3305
3306 abts_wqeq->vport = phba->pport;
3307 abts_wqeq->ndlp = ndlp;
3308 abts_wqeq->context_un.axchg = ctxp;
3309 abts_wqeq->bpl_dmabuf = NULL;
3310 abts_wqeq->num_bdes = 0;
3311 /* hba_wqidx should already be setup from command we are aborting */
3312 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
3313 abts_wqeq->iocb.ulpLe = 1;
3314
3315 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3316 "6069 Issue ABTS to xri x%x reqtag x%x\n",
3317 xri, abts_wqeq->iotag);
3318 return 1;
3319}
3320
3321static int
3322lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
3323 struct lpfc_async_xchg_ctx *ctxp,
3324 uint32_t sid, uint16_t xri)
3325{
3326 struct lpfc_nvmet_tgtport *tgtp;
3327 struct lpfc_iocbq *abts_wqeq;
3328 struct lpfc_nodelist *ndlp;
3329 unsigned long flags;
3330 bool ia;
3331 int rc;
3332
3333 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3334 if (!ctxp->wqeq) {
3335 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3336 ctxp->wqeq->hba_wqidx = 0;
3337 }
3338
3339 ndlp = lpfc_findnode_did(phba->pport, sid);
3340 if (!ndlp ||
3341 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3342 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3343 atomic_inc(&tgtp->xmt_abort_rsp_error);
3344 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3345 "6160 Drop ABORT - wrong NDLP state x%x.\n",
3346 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3347
3348 /* No failure to an ABTS request. */
3349 spin_lock_irqsave(&ctxp->ctxlock, flags);
3350 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3351 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3352 return 0;
3353 }
3354
3355 /* Issue ABTS for this WQE based on iotag */
3356 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3357 spin_lock_irqsave(&ctxp->ctxlock, flags);
3358 if (!ctxp->abort_wqeq) {
3359 atomic_inc(&tgtp->xmt_abort_rsp_error);
3360 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3361 "6161 ABORT failed: No wqeqs: "
3362 "xri: x%x\n", ctxp->oxid);
3363 /* No failure to an ABTS request. */
3364 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3365 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3366 return 0;
3367 }
3368 abts_wqeq = ctxp->abort_wqeq;
3369 ctxp->state = LPFC_NVME_STE_ABORT;
3370 ia = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? true : false;
3371 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3372
3373 /* Announce entry to new IO submit field. */
3374 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3375 "6162 ABORT Request to rport DID x%06x "
3376 "for xri x%x x%x\n",
3377 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3378
3379 /* If the hba is getting reset, this flag is set. It is
3380 * cleared when the reset is complete and rings reestablished.
3381 */
3382 /* driver queued commands are in process of being flushed */
3383 if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) {
3384 atomic_inc(&tgtp->xmt_abort_rsp_error);
3385 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3386 "6163 Driver in reset cleanup - flushing "
3387 "NVME Req now. hba_flag x%lx oxid x%x\n",
3388 phba->hba_flag, ctxp->oxid);
3389 lpfc_sli_release_iocbq(phba, abts_wqeq);
3390 spin_lock_irqsave(&ctxp->ctxlock, flags);
3391 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3392 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3393 return 0;
3394 }
3395
3396 spin_lock_irqsave(&phba->hbalock, flags);
3397 /* Outstanding abort is in progress */
3398 if (abts_wqeq->cmd_flag & LPFC_DRIVER_ABORTED) {
3399 spin_unlock_irqrestore(&phba->hbalock, flags);
3400 atomic_inc(&tgtp->xmt_abort_rsp_error);
3401 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3402 "6164 Outstanding NVME I/O Abort Request "
3403 "still pending on oxid x%x\n",
3404 ctxp->oxid);
3405 lpfc_sli_release_iocbq(phba, abts_wqeq);
3406 spin_lock_irqsave(&ctxp->ctxlock, flags);
3407 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3408 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3409 return 0;
3410 }
3411
3412 /* Ready - mark outstanding as aborted by driver. */
3413 abts_wqeq->cmd_flag |= LPFC_DRIVER_ABORTED;
3414
3415 lpfc_sli_prep_abort_xri(phba, abts_wqeq, ctxp->wqeq->sli4_xritag,
3416 abts_wqeq->iotag, CLASS3,
3417 LPFC_WQE_CQ_ID_DEFAULT, ia, true);
3418
3419 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3420 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3421 abts_wqeq->cmd_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3422 abts_wqeq->cmd_flag |= LPFC_IO_NVME;
3423 abts_wqeq->context_un.axchg = ctxp;
3424 abts_wqeq->vport = phba->pport;
3425 if (!ctxp->hdwq)
3426 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3427
3428 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3429 spin_unlock_irqrestore(&phba->hbalock, flags);
3430 if (rc == WQE_SUCCESS) {
3431 atomic_inc(&tgtp->xmt_abort_sol);
3432 return 0;
3433 }
3434
3435 atomic_inc(&tgtp->xmt_abort_rsp_error);
3436 spin_lock_irqsave(&ctxp->ctxlock, flags);
3437 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3438 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3439 lpfc_sli_release_iocbq(phba, abts_wqeq);
3440 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3441 "6166 Failed ABORT issue_wqe with status x%x "
3442 "for oxid x%x.\n",
3443 rc, ctxp->oxid);
3444 return 1;
3445}
3446
3447static int
3448lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3449 struct lpfc_async_xchg_ctx *ctxp,
3450 uint32_t sid, uint16_t xri)
3451{
3452 struct lpfc_nvmet_tgtport *tgtp;
3453 struct lpfc_iocbq *abts_wqeq;
3454 unsigned long flags;
3455 bool released = false;
3456 int rc;
3457
3458 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3459 if (!ctxp->wqeq) {
3460 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3461 ctxp->wqeq->hba_wqidx = 0;
3462 }
3463
3464 if (ctxp->state == LPFC_NVME_STE_FREE) {
3465 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3466 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3467 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3468 rc = WQE_BUSY;
3469 goto aerr;
3470 }
3471 ctxp->state = LPFC_NVME_STE_ABORT;
3472 ctxp->entry_cnt++;
3473 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3474 if (rc == 0)
3475 goto aerr;
3476
3477 spin_lock_irqsave(&phba->hbalock, flags);
3478 abts_wqeq = ctxp->wqeq;
3479 abts_wqeq->cmd_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3480 abts_wqeq->cmd_flag |= LPFC_IO_NVMET;
3481 if (!ctxp->hdwq)
3482 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3483
3484 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3485 spin_unlock_irqrestore(&phba->hbalock, flags);
3486 if (rc == WQE_SUCCESS) {
3487 return 0;
3488 }
3489
3490aerr:
3491 spin_lock_irqsave(&ctxp->ctxlock, flags);
3492 if (ctxp->flag & LPFC_NVME_CTX_RLS) {
3493 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3494 list_del_init(&ctxp->list);
3495 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3496 released = true;
3497 }
3498 ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS);
3499 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3500
3501 atomic_inc(&tgtp->xmt_abort_rsp_error);
3502 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3503 "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
3504 "(%x)\n",
3505 ctxp->oxid, rc, released);
3506 if (released)
3507 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3508 return 1;
3509}
3510
3511/**
3512 * lpfc_nvme_unsol_ls_issue_abort - issue ABTS on an exchange received
3513 * via async frame receive where the frame is not handled.
3514 * @phba: pointer to adapter structure
3515 * @ctxp: pointer to the asynchronously received received sequence
3516 * @sid: address of the remote port to send the ABTS to
3517 * @xri: oxid value to for the ABTS (other side's exchange id).
3518 **/
3519int
3520lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
3521 struct lpfc_async_xchg_ctx *ctxp,
3522 uint32_t sid, uint16_t xri)
3523{
3524 struct lpfc_nvmet_tgtport *tgtp = NULL;
3525 struct lpfc_iocbq *abts_wqeq;
3526 unsigned long flags;
3527 int rc;
3528
3529 if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3530 (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3531 ctxp->state = LPFC_NVME_STE_LS_ABORT;
3532 ctxp->entry_cnt++;
3533 } else {
3534 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3535 "6418 NVMET LS abort state mismatch "
3536 "IO x%x: %d %d\n",
3537 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3538 ctxp->state = LPFC_NVME_STE_LS_ABORT;
3539 }
3540
3541 if (phba->nvmet_support && phba->targetport)
3542 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3543
3544 if (!ctxp->wqeq) {
3545 /* Issue ABTS for this WQE based on iotag */
3546 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3547 if (!ctxp->wqeq) {
3548 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3549 "6068 Abort failed: No wqeqs: "
3550 "xri: x%x\n", xri);
3551 /* No failure to an ABTS request. */
3552 kfree(ctxp);
3553 return 0;
3554 }
3555 }
3556 abts_wqeq = ctxp->wqeq;
3557
3558 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3559 rc = WQE_BUSY;
3560 goto out;
3561 }
3562
3563 spin_lock_irqsave(&phba->hbalock, flags);
3564 abts_wqeq->cmd_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3565 abts_wqeq->cmd_flag |= LPFC_IO_NVME_LS;
3566 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3567 spin_unlock_irqrestore(&phba->hbalock, flags);
3568 if (rc == WQE_SUCCESS) {
3569 if (tgtp)
3570 atomic_inc(&tgtp->xmt_abort_unsol);
3571 return 0;
3572 }
3573out:
3574 if (tgtp)
3575 atomic_inc(&tgtp->xmt_abort_rsp_error);
3576 abts_wqeq->rsp_dmabuf = NULL;
3577 abts_wqeq->bpl_dmabuf = NULL;
3578 lpfc_sli_release_iocbq(phba, abts_wqeq);
3579 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3580 "6056 Failed to Issue ABTS. Status x%x\n", rc);
3581 return 1;
3582}
3583
3584/**
3585 * lpfc_nvmet_invalidate_host
3586 *
3587 * @phba: pointer to the driver instance bound to an adapter port.
3588 * @ndlp: pointer to an lpfc_nodelist type
3589 *
3590 * This routine upcalls the nvmet transport to invalidate an NVME
3591 * host to which this target instance had active connections.
3592 */
3593void
3594lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3595{
3596 u32 ndlp_has_hh;
3597 struct lpfc_nvmet_tgtport *tgtp;
3598
3599 lpfc_printf_log(phba, KERN_INFO,
3600 LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC,
3601 "6203 Invalidating hosthandle x%px\n",
3602 ndlp);
3603
3604 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3605 atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE);
3606
3607 spin_lock_irq(&ndlp->lock);
3608 ndlp_has_hh = ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH;
3609 spin_unlock_irq(&ndlp->lock);
3610
3611 /* Do not invalidate any nodes that do not have a hosthandle.
3612 * The host_release callbk will cause a node reference
3613 * count imbalance and a crash.
3614 */
3615 if (!ndlp_has_hh) {
3616 lpfc_printf_log(phba, KERN_INFO,
3617 LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC,
3618 "6204 Skip invalidate on node x%px DID x%x\n",
3619 ndlp, ndlp->nlp_DID);
3620 return;
3621 }
3622
3623#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
3624 /* Need to get the nvmet_fc_target_port pointer here.*/
3625 nvmet_fc_invalidate_host(phba->targetport, ndlp);
3626#endif
3627}