Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
4 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 */
6#include <linux/mempool.h>
7#include <linux/errno.h>
8#include <linux/init.h>
9#include <linux/workqueue.h>
10#include <linux/pci.h>
11#include <linux/scatterlist.h>
12#include <linux/skbuff.h>
13#include <linux/spinlock.h>
14#include <linux/etherdevice.h>
15#include <linux/if_ether.h>
16#include <linux/if_vlan.h>
17#include <linux/delay.h>
18#include <linux/gfp.h>
19#include <scsi/scsi.h>
20#include <scsi/scsi_host.h>
21#include <scsi/scsi_device.h>
22#include <scsi/scsi_cmnd.h>
23#include <scsi/scsi_tcq.h>
24#include <scsi/fc/fc_els.h>
25#include <scsi/fc/fc_fcoe.h>
26#include <scsi/fc_frame.h>
27#include <scsi/scsi_transport_fc.h>
28#include "fnic_io.h"
29#include "fnic.h"
30
31static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
32
33const char *fnic_state_str[] = {
34 [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
35 [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
36 [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE",
37 [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
38};
39
40static const char *fnic_ioreq_state_str[] = {
41 [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
42 [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
43 [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
44 [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
45 [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
46};
47
48static const char *fcpio_status_str[] = {
49 [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
50 [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
51 [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
52 [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
53 [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
54 [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
55 [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
56 [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
57 [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
58 [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
59 [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
60 [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
61 [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
62 [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
63 [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
64 [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
65 [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
66 [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
67 [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
68};
69
70enum terminate_io_return {
71 TERM_SUCCESS = 0,
72 TERM_NO_SC = 1,
73 TERM_IO_REQ_NOT_FOUND,
74 TERM_ANOTHER_PORT,
75 TERM_GSTATE,
76 TERM_IO_BLOCKED,
77 TERM_OUT_OF_WQ_DESC,
78 TERM_TIMED_OUT,
79 TERM_MISC,
80};
81
82const char *fnic_state_to_str(unsigned int state)
83{
84 if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
85 return "unknown";
86
87 return fnic_state_str[state];
88}
89
90static const char *fnic_ioreq_state_to_str(unsigned int state)
91{
92 if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
93 !fnic_ioreq_state_str[state])
94 return "unknown";
95
96 return fnic_ioreq_state_str[state];
97}
98
99static const char *fnic_fcpio_status_to_str(unsigned int status)
100{
101 if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
102 return "unknown";
103
104 return fcpio_status_str[status];
105}
106
107/*
108 * Unmap the data buffer and sense buffer for an io_req,
109 * also unmap and free the device-private scatter/gather list.
110 */
111static void fnic_release_ioreq_buf(struct fnic *fnic,
112 struct fnic_io_req *io_req,
113 struct scsi_cmnd *sc)
114{
115 if (io_req->sgl_list_pa)
116 dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
117 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
118 DMA_TO_DEVICE);
119 scsi_dma_unmap(sc);
120
121 if (io_req->sgl_cnt)
122 mempool_free(io_req->sgl_list_alloc,
123 fnic->io_sgl_pool[io_req->sgl_type]);
124 if (io_req->sense_buf_pa)
125 dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
126 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
127}
128
129static bool
130fnic_count_portid_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc,
131 void *data1, void *data2)
132{
133 u32 *portid = data1;
134 unsigned int *count = data2;
135 struct fnic_io_req *io_req = fnic_priv(sc)->io_req;
136
137 if (!io_req || (*portid && (io_req->port_id != *portid)))
138 return true;
139
140 *count += 1;
141 return true;
142}
143
144unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid)
145{
146 unsigned int count = 0;
147
148 fnic_scsi_io_iter(fnic, fnic_count_portid_ioreqs_iter,
149 &portid, &count);
150
151 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
152 "portid = 0x%x count = %u\n", portid, count);
153 return count;
154}
155
156unsigned int fnic_count_all_ioreqs(struct fnic *fnic)
157{
158 return fnic_count_ioreqs(fnic, 0);
159}
160
161static bool
162fnic_count_lun_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc,
163 void *data1, void *data2)
164{
165 struct scsi_device *scsi_device = data1;
166 unsigned int *count = data2;
167
168 if (sc->device != scsi_device || !fnic_priv(sc)->io_req)
169 return true;
170
171 *count += 1;
172 return true;
173}
174
175unsigned int
176fnic_count_lun_ioreqs(struct fnic *fnic, struct scsi_device *scsi_device)
177{
178 unsigned int count = 0;
179
180 fnic_scsi_io_iter(fnic, fnic_count_lun_ioreqs_iter,
181 scsi_device, &count);
182
183 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
184 "lun = %p count = %u\n", scsi_device, count);
185 return count;
186}
187
188/* Free up Copy Wq descriptors. Called with copy_wq lock held */
189static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq, unsigned int hwq)
190{
191 /* if no Ack received from firmware, then nothing to clean */
192 if (!fnic->fw_ack_recd[hwq])
193 return 1;
194
195 /*
196 * Update desc_available count based on number of freed descriptors
197 * Account for wraparound
198 */
199 if (wq->to_clean_index <= fnic->fw_ack_index[hwq])
200 wq->ring.desc_avail += (fnic->fw_ack_index[hwq]
201 - wq->to_clean_index + 1);
202 else
203 wq->ring.desc_avail += (wq->ring.desc_count
204 - wq->to_clean_index
205 + fnic->fw_ack_index[hwq] + 1);
206
207 /*
208 * just bump clean index to ack_index+1 accounting for wraparound
209 * this will essentially free up all descriptors between
210 * to_clean_index and fw_ack_index, both inclusive
211 */
212 wq->to_clean_index =
213 (fnic->fw_ack_index[hwq] + 1) % wq->ring.desc_count;
214
215 /* we have processed the acks received so far */
216 fnic->fw_ack_recd[hwq] = 0;
217 return 0;
218}
219
220
221/*
222 * __fnic_set_state_flags
223 * Sets/Clears bits in fnic's state_flags
224 **/
225void
226__fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
227 unsigned long clearbits)
228{
229 unsigned long flags = 0;
230
231 spin_lock_irqsave(&fnic->fnic_lock, flags);
232
233 if (clearbits)
234 fnic->state_flags &= ~st_flags;
235 else
236 fnic->state_flags |= st_flags;
237
238 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
239
240 return;
241}
242
243
244/*
245 * fnic_fw_reset_handler
246 * Routine to send reset msg to fw
247 */
248int fnic_fw_reset_handler(struct fnic *fnic)
249{
250 struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0];
251 int ret = 0;
252 unsigned long flags;
253 unsigned int ioreq_count;
254
255 /* indicate fwreset to io path */
256 fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
257 ioreq_count = fnic_count_all_ioreqs(fnic);
258
259 /* wait for io cmpl */
260 while (atomic_read(&fnic->in_flight))
261 schedule_timeout(msecs_to_jiffies(1));
262
263 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
264
265 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
266 free_wq_copy_descs(fnic, wq, 0);
267
268 if (!vnic_wq_copy_desc_avail(wq))
269 ret = -EAGAIN;
270 else {
271 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
272 "ioreq_count: %u\n", ioreq_count);
273 fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
274 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
275 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
276 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
277 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
278 atomic64_read(
279 &fnic->fnic_stats.fw_stats.active_fw_reqs));
280 }
281
282 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
283
284 if (!ret) {
285 atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
286 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
287 "Issued fw reset\n");
288 } else {
289 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
290 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
291 "Failed to issue fw reset\n");
292 }
293
294 return ret;
295}
296
297
298/*
299 * fnic_flogi_reg_handler
300 * Routine to send flogi register msg to fw
301 */
302int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
303{
304 struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0];
305 enum fcpio_flogi_reg_format_type format;
306 u8 gw_mac[ETH_ALEN];
307 int ret = 0;
308 unsigned long flags;
309 struct fnic_iport_s *iport = &fnic->iport;
310
311 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
312
313 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
314 free_wq_copy_descs(fnic, wq, 0);
315
316 if (!vnic_wq_copy_desc_avail(wq)) {
317 ret = -EAGAIN;
318 goto flogi_reg_ioreq_end;
319 }
320
321 memcpy(gw_mac, fnic->iport.fcfmac, ETH_ALEN);
322 format = FCPIO_FLOGI_REG_GW_DEST;
323
324 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
325 fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
326 fc_id, gw_mac,
327 fnic->iport.fpma,
328 iport->r_a_tov, iport->e_d_tov);
329 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
330 "FLOGI FIP reg issued fcid: 0x%x src %p dest %p\n",
331 fc_id, fnic->iport.fpma, gw_mac);
332 } else {
333 fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
334 format, fc_id, gw_mac);
335 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
336 "FLOGI reg issued fcid 0x%x dest %p\n",
337 fc_id, gw_mac);
338 }
339
340 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
341 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
342 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
343 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
344 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
345
346flogi_reg_ioreq_end:
347 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
348 return ret;
349}
350
351/*
352 * fnic_queue_wq_copy_desc
353 * Routine to enqueue a wq copy desc
354 */
355static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
356 struct vnic_wq_copy *wq,
357 struct fnic_io_req *io_req,
358 struct scsi_cmnd *sc,
359 int sg_count,
360 uint32_t mqtag,
361 uint16_t hwq)
362{
363 struct scatterlist *sg;
364 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
365 struct host_sg_desc *desc;
366 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
367 unsigned int i;
368 int flags;
369 u8 exch_flags;
370 struct scsi_lun fc_lun;
371 struct fnic_tport_s *tport;
372 struct rport_dd_data_s *rdd_data;
373
374 rdd_data = rport->dd_data;
375 tport = rdd_data->tport;
376
377 if (sg_count) {
378 /* For each SGE, create a device desc entry */
379 desc = io_req->sgl_list;
380 for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
381 desc->addr = cpu_to_le64(sg_dma_address(sg));
382 desc->len = cpu_to_le32(sg_dma_len(sg));
383 desc->_resvd = 0;
384 desc++;
385 }
386
387 io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev,
388 io_req->sgl_list,
389 sizeof(io_req->sgl_list[0]) * sg_count,
390 DMA_TO_DEVICE);
391 if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) {
392 printk(KERN_ERR "DMA mapping failed\n");
393 return SCSI_MLQUEUE_HOST_BUSY;
394 }
395 }
396
397 io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev,
398 sc->sense_buffer,
399 SCSI_SENSE_BUFFERSIZE,
400 DMA_FROM_DEVICE);
401 if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) {
402 dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
403 sizeof(io_req->sgl_list[0]) * sg_count,
404 DMA_TO_DEVICE);
405 printk(KERN_ERR "DMA mapping failed\n");
406 return SCSI_MLQUEUE_HOST_BUSY;
407 }
408
409 int_to_scsilun(sc->device->lun, &fc_lun);
410
411 /* Enqueue the descriptor in the Copy WQ */
412 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq])
413 free_wq_copy_descs(fnic, wq, hwq);
414
415 if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
416 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
417 "fnic_queue_wq_copy_desc failure - no descriptors\n");
418 atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
419 return SCSI_MLQUEUE_HOST_BUSY;
420 }
421
422 flags = 0;
423 if (sc->sc_data_direction == DMA_FROM_DEVICE)
424 flags = FCPIO_ICMND_RDDATA;
425 else if (sc->sc_data_direction == DMA_TO_DEVICE)
426 flags = FCPIO_ICMND_WRDATA;
427
428 exch_flags = 0;
429 if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
430 (tport->tgt_flags & FDLS_FC_RP_FLAGS_RETRY))
431 exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
432
433 fnic_queue_wq_copy_desc_icmnd_16(wq, mqtag,
434 0, exch_flags, io_req->sgl_cnt,
435 SCSI_SENSE_BUFFERSIZE,
436 io_req->sgl_list_pa,
437 io_req->sense_buf_pa,
438 0, /* scsi cmd ref, always 0 */
439 FCPIO_ICMND_PTA_SIMPLE,
440 /* scsi pri and tag */
441 flags, /* command flags */
442 sc->cmnd, sc->cmd_len,
443 scsi_bufflen(sc),
444 fc_lun.scsi_lun, io_req->port_id,
445 tport->max_payload_size,
446 tport->r_a_tov, tport->e_d_tov);
447
448 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
449 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
450 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
451 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
452 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
453
454 return 0;
455}
456
457enum scsi_qc_status fnic_queuecommand(struct Scsi_Host *shost,
458 struct scsi_cmnd *sc)
459{
460 struct request *const rq = scsi_cmd_to_rq(sc);
461 uint32_t mqtag = 0;
462 void (*done)(struct scsi_cmnd *) = scsi_done;
463 struct fc_rport *rport;
464 struct fnic_io_req *io_req = NULL;
465 struct fnic *fnic = *((struct fnic **) shost_priv(sc->device->host));
466 struct fnic_iport_s *iport = NULL;
467 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
468 struct vnic_wq_copy *wq;
469 int ret = 1;
470 u64 cmd_trace;
471 int sg_count = 0;
472 unsigned long flags = 0;
473 unsigned long ptr;
474 uint16_t hwq = 0;
475 struct fnic_tport_s *tport = NULL;
476 struct rport_dd_data_s *rdd_data;
477 uint16_t lun0_delay = 0;
478
479 rport = starget_to_rport(scsi_target(sc->device));
480 if (!rport) {
481 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
482 "returning DID_NO_CONNECT for IO as rport is NULL\n");
483 sc->result = DID_NO_CONNECT << 16;
484 done(sc);
485 return 0;
486 }
487
488 ret = fc_remote_port_chkready(rport);
489 if (ret) {
490 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
491 "rport is not ready\n");
492 atomic64_inc(&fnic_stats->misc_stats.tport_not_ready);
493 sc->result = ret;
494 done(sc);
495 return 0;
496 }
497
498 mqtag = blk_mq_unique_tag(rq);
499 spin_lock_irqsave(&fnic->fnic_lock, flags);
500 iport = &fnic->iport;
501
502 if (iport->state != FNIC_IPORT_STATE_READY) {
503 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
504 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
505 "returning DID_NO_CONNECT for IO as iport state: %d\n",
506 iport->state);
507 sc->result = DID_NO_CONNECT << 16;
508 done(sc);
509 return 0;
510 }
511
512 /* fc_remote_port_add() may have added the tport to
513 * fc_transport but dd_data not yet set
514 */
515 rdd_data = rport->dd_data;
516 tport = rdd_data->tport;
517 if (!tport || (rdd_data->iport != iport)) {
518 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
519 "dd_data not yet set in SCSI for rport portid: 0x%x\n",
520 rport->port_id);
521 tport = fnic_find_tport_by_fcid(iport, rport->port_id);
522 if (!tport) {
523 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
524 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
525 "returning DID_BUS_BUSY for IO as tport not found for: 0x%x\n",
526 rport->port_id);
527 sc->result = DID_BUS_BUSY << 16;
528 done(sc);
529 return 0;
530 }
531
532 /* Re-assign same params as in fnic_fdls_add_tport */
533 rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN;
534 rport->supported_classes =
535 FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET;
536 /* the dd_data is allocated by fctransport of size dd_fcrport_size */
537 rdd_data = rport->dd_data;
538 rdd_data->tport = tport;
539 rdd_data->iport = iport;
540 tport->rport = rport;
541 tport->flags |= FNIC_FDLS_SCSI_REGISTERED;
542 }
543
544 if ((tport->state != FDLS_TGT_STATE_READY)
545 && (tport->state != FDLS_TGT_STATE_ADISC)) {
546 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
547 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
548 "returning DID_NO_CONNECT for IO as tport state: %d\n",
549 tport->state);
550 sc->result = DID_NO_CONNECT << 16;
551 done(sc);
552 return 0;
553 }
554
555 atomic_inc(&fnic->in_flight);
556 atomic_inc(&tport->in_flight);
557
558 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) {
559 atomic_dec(&fnic->in_flight);
560 atomic_dec(&tport->in_flight);
561 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
562 return SCSI_MLQUEUE_HOST_BUSY;
563 }
564
565 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) {
566 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
567 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
568 "fnic flags FW reset: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n",
569 fnic->state_flags);
570 return SCSI_MLQUEUE_HOST_BUSY;
571 }
572
573 if (!tport->lun0_delay) {
574 lun0_delay = 1;
575 tport->lun0_delay++;
576 }
577
578 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
579
580 fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED;
581 fnic_priv(sc)->flags = FNIC_NO_FLAGS;
582
583 /* Get a new io_req for this SCSI IO */
584 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
585 if (!io_req) {
586 atomic64_inc(&fnic_stats->io_stats.alloc_failures);
587 ret = SCSI_MLQUEUE_HOST_BUSY;
588 goto out;
589 }
590 memset(io_req, 0, sizeof(*io_req));
591
592 /* Map the data buffer */
593 sg_count = scsi_dma_map(sc);
594 if (sg_count < 0) {
595 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
596 mqtag, sc, 0, sc->cmnd[0], sg_count, fnic_priv(sc)->state);
597 mempool_free(io_req, fnic->io_req_pool);
598 goto out;
599 }
600
601 io_req->tport = tport;
602 /* Determine the type of scatter/gather list we need */
603 io_req->sgl_cnt = sg_count;
604 io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
605 if (sg_count > FNIC_DFLT_SG_DESC_CNT)
606 io_req->sgl_type = FNIC_SGL_CACHE_MAX;
607
608 if (sg_count) {
609 io_req->sgl_list =
610 mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
611 GFP_ATOMIC);
612 if (!io_req->sgl_list) {
613 atomic64_inc(&fnic_stats->io_stats.alloc_failures);
614 ret = SCSI_MLQUEUE_HOST_BUSY;
615 scsi_dma_unmap(sc);
616 mempool_free(io_req, fnic->io_req_pool);
617 goto out;
618 }
619
620 /* Cache sgl list allocated address before alignment */
621 io_req->sgl_list_alloc = io_req->sgl_list;
622 ptr = (unsigned long) io_req->sgl_list;
623 if (ptr % FNIC_SG_DESC_ALIGN) {
624 io_req->sgl_list = (struct host_sg_desc *)
625 (((unsigned long) ptr
626 + FNIC_SG_DESC_ALIGN - 1)
627 & ~(FNIC_SG_DESC_ALIGN - 1));
628 }
629 }
630
631 /*
632 * Will acquire lock before setting to IO initialized.
633 */
634 hwq = blk_mq_unique_tag_to_hwq(mqtag);
635 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
636
637 /* initialize rest of io_req */
638 io_req->port_id = rport->port_id;
639 io_req->start_time = jiffies;
640 fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
641 fnic_priv(sc)->io_req = io_req;
642 fnic_priv(sc)->flags |= FNIC_IO_INITIALIZED;
643 io_req->sc = sc;
644
645 if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) {
646 WARN(1, "fnic<%d>: %s: hwq: %d tag 0x%x already exists\n",
647 fnic->fnic_num, __func__, hwq, blk_mq_unique_tag_to_tag(mqtag));
648 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
649 return SCSI_MLQUEUE_HOST_BUSY;
650 }
651
652 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = io_req;
653 io_req->tag = mqtag;
654
655 /* create copy wq desc and enqueue it */
656 wq = &fnic->hw_copy_wq[hwq];
657 atomic64_inc(&fnic_stats->io_stats.ios[hwq]);
658 ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count, mqtag, hwq);
659 if (ret) {
660 /*
661 * In case another thread cancelled the request,
662 * refetch the pointer under the lock.
663 */
664 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
665 mqtag, sc, 0, 0, 0, fnic_flags_and_state(sc));
666 io_req = fnic_priv(sc)->io_req;
667 fnic_priv(sc)->io_req = NULL;
668 if (io_req)
669 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = NULL;
670 fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
671 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
672 if (io_req) {
673 fnic_release_ioreq_buf(fnic, io_req, sc);
674 mempool_free(io_req, fnic->io_req_pool);
675 }
676 atomic_dec(&fnic->in_flight);
677 atomic_dec(&tport->in_flight);
678 return ret;
679 } else {
680 atomic64_inc(&fnic_stats->io_stats.active_ios);
681 atomic64_inc(&fnic_stats->io_stats.num_ios);
682 if (atomic64_read(&fnic_stats->io_stats.active_ios) >
683 atomic64_read(&fnic_stats->io_stats.max_active_ios))
684 atomic64_set(&fnic_stats->io_stats.max_active_ios,
685 atomic64_read(&fnic_stats->io_stats.active_ios));
686
687 /* REVISIT: Use per IO lock in the final code */
688 fnic_priv(sc)->flags |= FNIC_IO_ISSUED;
689 }
690
691 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
692
693out:
694 cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
695 (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
696 (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
697 sc->cmnd[5]);
698
699 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
700 mqtag, sc, io_req, sg_count, cmd_trace,
701 fnic_flags_and_state(sc));
702
703 atomic_dec(&fnic->in_flight);
704 atomic_dec(&tport->in_flight);
705
706 if (lun0_delay) {
707 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
708 "LUN0 delay\n");
709 mdelay(LUN0_DELAY_TIME);
710 }
711
712 return ret;
713}
714
715
716/*
717 * fnic_fcpio_fw_reset_cmpl_handler
718 * Routine to handle fw reset completion
719 */
720static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
721 struct fcpio_fw_req *desc)
722{
723 u8 type;
724 u8 hdr_status;
725 struct fcpio_tag tag;
726 int ret = 0;
727 unsigned long flags;
728 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
729
730 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
731
732 atomic64_inc(&reset_stats->fw_reset_completions);
733
734 /* Clean up all outstanding io requests */
735 fnic_cleanup_io(fnic, SCSI_NO_TAG);
736
737 atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
738 atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
739 atomic64_set(&fnic->io_cmpl_skip, 0);
740
741 spin_lock_irqsave(&fnic->fnic_lock, flags);
742
743 /* fnic should be in FC_TRANS_ETH_MODE */
744 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
745 /* Check status of reset completion */
746 if (!hdr_status) {
747 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
748 "reset cmpl success\n");
749 /* Ready to send flogi out */
750 fnic->state = FNIC_IN_ETH_MODE;
751 } else {
752 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
753 "reset failed with header status: %s\n",
754 fnic_fcpio_status_to_str(hdr_status));
755
756 fnic->state = FNIC_IN_FC_MODE;
757 atomic64_inc(&reset_stats->fw_reset_failures);
758 ret = -1;
759 }
760 } else {
761 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
762 "Unexpected state while processing reset completion: %s\n",
763 fnic_state_to_str(fnic->state));
764 atomic64_inc(&reset_stats->fw_reset_failures);
765 ret = -1;
766 }
767
768 if (fnic->fw_reset_done)
769 complete(fnic->fw_reset_done);
770
771 /*
772 * If fnic is being removed, or fw reset failed
773 * free the flogi frame. Else, send it out
774 */
775 if (ret) {
776 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
777 fnic_free_txq(fnic);
778 goto reset_cmpl_handler_end;
779 }
780
781 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
782
783 queue_work(fnic_event_queue, &fnic->flush_work);
784
785 reset_cmpl_handler_end:
786 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
787
788 return ret;
789}
790
791/*
792 * fnic_fcpio_flogi_reg_cmpl_handler
793 * Routine to handle flogi register completion
794 */
795static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
796 struct fcpio_fw_req *desc)
797{
798 u8 type;
799 u8 hdr_status;
800 struct fcpio_tag tag;
801 int ret = 0;
802 unsigned long flags;
803
804 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
805
806 /* Update fnic state based on status of flogi reg completion */
807 spin_lock_irqsave(&fnic->fnic_lock, flags);
808
809 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
810
811 /* Check flogi registration completion status */
812 if (!hdr_status) {
813 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
814 "FLOGI reg succeeded\n");
815 fnic->state = FNIC_IN_FC_MODE;
816 } else {
817 FNIC_SCSI_DBG(KERN_DEBUG,
818 fnic->host, fnic->fnic_num,
819 "fnic flogi reg failed: %s\n",
820 fnic_fcpio_status_to_str(hdr_status));
821 fnic->state = FNIC_IN_ETH_MODE;
822 ret = -1;
823 }
824 } else {
825 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
826 "Unexpected fnic state %s while"
827 " processing flogi reg completion\n",
828 fnic_state_to_str(fnic->state));
829 ret = -1;
830 }
831
832 if (!ret) {
833 if (fnic->stop_rx_link_events) {
834 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
835 goto reg_cmpl_handler_end;
836 }
837 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
838
839 queue_work(fnic_event_queue, &fnic->flush_work);
840 queue_work(fnic_event_queue, &fnic->frame_work);
841 } else {
842 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
843 }
844
845reg_cmpl_handler_end:
846 return ret;
847}
848
849static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
850 u16 request_out)
851{
852 if (wq->to_clean_index <= wq->to_use_index) {
853 /* out of range, stale request_out index */
854 if (request_out < wq->to_clean_index ||
855 request_out >= wq->to_use_index)
856 return 0;
857 } else {
858 /* out of range, stale request_out index */
859 if (request_out < wq->to_clean_index &&
860 request_out >= wq->to_use_index)
861 return 0;
862 }
863 /* request_out index is in range */
864 return 1;
865}
866
867
868/*
869 * Mark that ack received and store the Ack index. If there are multiple
870 * acks received before Tx thread cleans it up, the latest value will be
871 * used which is correct behavior. This state should be in the copy Wq
872 * instead of in the fnic
873 */
874static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
875 unsigned int cq_index,
876 struct fcpio_fw_req *desc)
877{
878 struct vnic_wq_copy *wq;
879 u16 request_out = desc->u.ack.request_out;
880 unsigned long flags;
881 u64 *ox_id_tag = (u64 *)(void *)desc;
882 unsigned int wq_index = cq_index;
883
884 /* mark the ack state */
885 wq = &fnic->hw_copy_wq[cq_index];
886 spin_lock_irqsave(&fnic->wq_copy_lock[wq_index], flags);
887
888 fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
889 if (is_ack_index_in_range(wq, request_out)) {
890 fnic->fw_ack_index[wq_index] = request_out;
891 fnic->fw_ack_recd[wq_index] = 1;
892 } else
893 atomic64_inc(
894 &fnic->fnic_stats.misc_stats.ack_index_out_of_range);
895
896 spin_unlock_irqrestore(&fnic->wq_copy_lock[wq_index], flags);
897 FNIC_TRACE(fnic_fcpio_ack_handler,
898 fnic->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
899 ox_id_tag[4], ox_id_tag[5]);
900}
901
902/*
903 * fnic_fcpio_icmnd_cmpl_handler
904 * Routine to handle icmnd completions
905 */
906static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_index,
907 struct fcpio_fw_req *desc)
908{
909 u8 type;
910 u8 hdr_status;
911 struct fcpio_tag ftag;
912 u32 id;
913 u64 xfer_len = 0;
914 struct fcpio_icmnd_cmpl *icmnd_cmpl;
915 struct fnic_io_req *io_req;
916 struct scsi_cmnd *sc;
917 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
918 unsigned long flags;
919 u64 cmd_trace;
920 unsigned long start_time;
921 unsigned long io_duration_time;
922 unsigned int hwq = 0;
923 unsigned int mqtag = 0;
924 unsigned int tag = 0;
925
926 /* Decode the cmpl description to get the io_req id */
927 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag);
928 fcpio_tag_id_dec(&ftag, &id);
929 icmnd_cmpl = &desc->u.icmnd_cmpl;
930
931 mqtag = id;
932 tag = blk_mq_unique_tag_to_tag(mqtag);
933 hwq = blk_mq_unique_tag_to_hwq(mqtag);
934
935 if (hwq != cq_index) {
936 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
937 "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
938 hwq, mqtag, tag, cq_index);
939 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
940 "hdr status: %s icmnd completion on the wrong queue\n",
941 fnic_fcpio_status_to_str(hdr_status));
942 }
943
944 if (tag >= fnic->fnic_max_tag_id) {
945 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
946 "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
947 hwq, mqtag, tag, cq_index);
948 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
949 "hdr status: %s Out of range tag\n",
950 fnic_fcpio_status_to_str(hdr_status));
951 return;
952 }
953 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
954
955 sc = scsi_host_find_tag(fnic->host, id);
956 WARN_ON_ONCE(!sc);
957 if (!sc) {
958 atomic64_inc(&fnic_stats->io_stats.sc_null);
959 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
960 shost_printk(KERN_ERR, fnic->host,
961 "icmnd_cmpl sc is null - "
962 "hdr status = %s tag = 0x%x desc = 0x%p\n",
963 fnic_fcpio_status_to_str(hdr_status), id, desc);
964 FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
965 fnic->host->host_no, id,
966 ((u64)icmnd_cmpl->_resvd0[1] << 16 |
967 (u64)icmnd_cmpl->_resvd0[0]),
968 ((u64)hdr_status << 16 |
969 (u64)icmnd_cmpl->scsi_status << 8 |
970 (u64)icmnd_cmpl->flags), desc,
971 (u64)icmnd_cmpl->residual, 0);
972 return;
973 }
974
975 io_req = fnic_priv(sc)->io_req;
976 if (fnic->sw_copy_wq[hwq].io_req_table[tag] != io_req) {
977 WARN(1, "%s: %d: hwq: %d mqtag: 0x%x tag: 0x%x io_req tag mismatch\n",
978 __func__, __LINE__, hwq, mqtag, tag);
979 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
980 return;
981 }
982
983 WARN_ON_ONCE(!io_req);
984 if (!io_req) {
985 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
986 fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL;
987 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
988 shost_printk(KERN_ERR, fnic->host,
989 "icmnd_cmpl io_req is null - "
990 "hdr status = %s tag = 0x%x sc 0x%p\n",
991 fnic_fcpio_status_to_str(hdr_status), id, sc);
992 return;
993 }
994 start_time = io_req->start_time;
995
996 /* firmware completed the io */
997 io_req->io_completed = 1;
998
999 /*
1000 * if SCSI-ML has already issued abort on this command,
1001 * set completion of the IO. The abts path will clean it up
1002 */
1003 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
1004
1005 /*
1006 * set the FNIC_IO_DONE so that this doesn't get
1007 * flagged as 'out of order' if it was not aborted
1008 */
1009 fnic_priv(sc)->flags |= FNIC_IO_DONE;
1010 fnic_priv(sc)->flags |= FNIC_IO_ABTS_PENDING;
1011 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1012 if(FCPIO_ABORTED == hdr_status)
1013 fnic_priv(sc)->flags |= FNIC_IO_ABORTED;
1014
1015 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1016 "icmnd_cmpl abts pending "
1017 "hdr status = %s tag = 0x%x sc = 0x%p "
1018 "scsi_status = %x residual = %d\n",
1019 fnic_fcpio_status_to_str(hdr_status),
1020 id, sc,
1021 icmnd_cmpl->scsi_status,
1022 icmnd_cmpl->residual);
1023 return;
1024 }
1025
1026 /* Mark the IO as complete */
1027 fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
1028
1029 icmnd_cmpl = &desc->u.icmnd_cmpl;
1030
1031 switch (hdr_status) {
1032 case FCPIO_SUCCESS:
1033 sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
1034 xfer_len = scsi_bufflen(sc);
1035
1036 if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) {
1037 xfer_len -= icmnd_cmpl->residual;
1038 scsi_set_resid(sc, icmnd_cmpl->residual);
1039 }
1040
1041 if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION)
1042 atomic64_inc(&fnic_stats->misc_stats.check_condition);
1043
1044 if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
1045 atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
1046
1047 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1048 "xfer_len: %llu", xfer_len);
1049 break;
1050
1051 case FCPIO_TIMEOUT: /* request was timed out */
1052 atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
1053 sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
1054 break;
1055
1056 case FCPIO_ABORTED: /* request was aborted */
1057 atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
1058 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
1059 break;
1060
1061 case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
1062 atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
1063 scsi_set_resid(sc, icmnd_cmpl->residual);
1064 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
1065 break;
1066
1067 case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
1068 atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
1069 sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
1070 break;
1071
1072 case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
1073 atomic64_inc(&fnic_stats->io_stats.io_not_found);
1074 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
1075 break;
1076
1077 case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
1078 atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
1079 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
1080 break;
1081
1082 case FCPIO_FW_ERR: /* request was terminated due fw error */
1083 atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
1084 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
1085 break;
1086
1087 case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
1088 atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
1089 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
1090 break;
1091
1092 case FCPIO_INVALID_HEADER: /* header contains invalid data */
1093 case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
1094 case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
1095 default:
1096 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
1097 break;
1098 }
1099
1100 /* Break link with the SCSI command */
1101 fnic_priv(sc)->io_req = NULL;
1102 io_req->sc = NULL;
1103 fnic_priv(sc)->flags |= FNIC_IO_DONE;
1104 fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL;
1105
1106 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1107
1108 if (hdr_status != FCPIO_SUCCESS) {
1109 atomic64_inc(&fnic_stats->io_stats.io_failures);
1110 shost_printk(KERN_ERR, fnic->host, "hdr status = %s\n",
1111 fnic_fcpio_status_to_str(hdr_status));
1112 }
1113
1114 fnic_release_ioreq_buf(fnic, io_req, sc);
1115
1116 cmd_trace = ((u64)hdr_status << 56) |
1117 (u64)icmnd_cmpl->scsi_status << 48 |
1118 (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
1119 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1120 (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
1121
1122 FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
1123 sc->device->host->host_no, id, sc,
1124 ((u64)icmnd_cmpl->_resvd0[1] << 56 |
1125 (u64)icmnd_cmpl->_resvd0[0] << 48 |
1126 jiffies_to_msecs(jiffies - start_time)),
1127 desc, cmd_trace, fnic_flags_and_state(sc));
1128
1129 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
1130 fnic_stats->host_stats.fcp_input_requests++;
1131 fnic->fcp_input_bytes += xfer_len;
1132 } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
1133 fnic_stats->host_stats.fcp_output_requests++;
1134 fnic->fcp_output_bytes += xfer_len;
1135 } else
1136 fnic_stats->host_stats.fcp_control_requests++;
1137
1138 /* Call SCSI completion function to complete the IO */
1139 scsi_done(sc);
1140
1141 mempool_free(io_req, fnic->io_req_pool);
1142
1143 atomic64_dec(&fnic_stats->io_stats.active_ios);
1144 if (atomic64_read(&fnic->io_cmpl_skip))
1145 atomic64_dec(&fnic->io_cmpl_skip);
1146 else
1147 atomic64_inc(&fnic_stats->io_stats.io_completions);
1148
1149
1150 io_duration_time = jiffies_to_msecs(jiffies) -
1151 jiffies_to_msecs(start_time);
1152
1153 if(io_duration_time <= 10)
1154 atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
1155 else if(io_duration_time <= 100)
1156 atomic64_inc(&fnic_stats->io_stats.io_btw_10_to_100_msec);
1157 else if(io_duration_time <= 500)
1158 atomic64_inc(&fnic_stats->io_stats.io_btw_100_to_500_msec);
1159 else if(io_duration_time <= 5000)
1160 atomic64_inc(&fnic_stats->io_stats.io_btw_500_to_5000_msec);
1161 else if(io_duration_time <= 10000)
1162 atomic64_inc(&fnic_stats->io_stats.io_btw_5000_to_10000_msec);
1163 else if(io_duration_time <= 30000)
1164 atomic64_inc(&fnic_stats->io_stats.io_btw_10000_to_30000_msec);
1165 else {
1166 atomic64_inc(&fnic_stats->io_stats.io_greater_than_30000_msec);
1167
1168 if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time))
1169 atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time);
1170 }
1171}
1172
1173/* fnic_fcpio_itmf_cmpl_handler
1174 * Routine to handle itmf completions
1175 */
1176static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_index,
1177 struct fcpio_fw_req *desc)
1178{
1179 u8 type;
1180 u8 hdr_status;
1181 struct fcpio_tag ftag;
1182 u32 id;
1183 struct scsi_cmnd *sc = NULL;
1184 struct fnic_io_req *io_req;
1185 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1186 struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
1187 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1188 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1189 unsigned long flags;
1190 unsigned long start_time;
1191 unsigned int hwq = cq_index;
1192 unsigned int mqtag;
1193 unsigned int tag;
1194
1195 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag);
1196 fcpio_tag_id_dec(&ftag, &id);
1197
1198 mqtag = id & FNIC_TAG_MASK;
1199 tag = blk_mq_unique_tag_to_tag(id & FNIC_TAG_MASK);
1200 hwq = blk_mq_unique_tag_to_hwq(id & FNIC_TAG_MASK);
1201
1202 if (hwq != cq_index) {
1203 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
1204 "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
1205 hwq, mqtag, tag, cq_index);
1206 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
1207 "hdr status: %s ITMF completion on the wrong queue\n",
1208 fnic_fcpio_status_to_str(hdr_status));
1209 }
1210
1211 if (tag > fnic->fnic_max_tag_id) {
1212 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
1213 "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
1214 hwq, mqtag, tag, cq_index);
1215 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
1216 "hdr status: %s Tag out of range\n",
1217 fnic_fcpio_status_to_str(hdr_status));
1218 return;
1219 } else if ((tag == fnic->fnic_max_tag_id) && !(id & FNIC_TAG_DEV_RST)) {
1220 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
1221 "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
1222 hwq, mqtag, tag, cq_index);
1223 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
1224 "hdr status: %s Tag out of range\n",
1225 fnic_fcpio_status_to_str(hdr_status));
1226 return;
1227 }
1228
1229 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
1230
1231 /* If it is sg3utils allocated SC then tag_id
1232 * is max_tag_id and SC is retrieved from io_req
1233 */
1234 if ((mqtag == fnic->fnic_max_tag_id) && (id & FNIC_TAG_DEV_RST)) {
1235 io_req = fnic->sw_copy_wq[hwq].io_req_table[tag];
1236 if (io_req)
1237 sc = io_req->sc;
1238 } else {
1239 sc = scsi_host_find_tag(fnic->host, id & FNIC_TAG_MASK);
1240 }
1241
1242 WARN_ON_ONCE(!sc);
1243 if (!sc) {
1244 atomic64_inc(&fnic_stats->io_stats.sc_null);
1245 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1246 shost_printk(KERN_ERR, fnic->host,
1247 "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
1248 fnic_fcpio_status_to_str(hdr_status), tag);
1249 return;
1250 }
1251
1252 io_req = fnic_priv(sc)->io_req;
1253 WARN_ON_ONCE(!io_req);
1254 if (!io_req) {
1255 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1256 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1257 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
1258 shost_printk(KERN_ERR, fnic->host,
1259 "itmf_cmpl io_req is null - "
1260 "hdr status = %s tag = 0x%x sc 0x%p\n",
1261 fnic_fcpio_status_to_str(hdr_status), tag, sc);
1262 return;
1263 }
1264 start_time = io_req->start_time;
1265
1266 if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
1267 /* Abort and terminate completion of device reset req */
1268 /* REVISIT : Add asserts about various flags */
1269 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1270 "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Abt/term completion received\n",
1271 hwq, mqtag, tag,
1272 fnic_fcpio_status_to_str(hdr_status));
1273 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
1274 fnic_priv(sc)->abts_status = hdr_status;
1275 fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
1276 if (io_req->abts_done)
1277 complete(io_req->abts_done);
1278 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1279 } else if (id & FNIC_TAG_ABORT) {
1280 /* Completion of abort cmd */
1281 shost_printk(KERN_DEBUG, fnic->host,
1282 "hwq: %d mqtag: 0x%x tag: 0x%x Abort header status: %s\n",
1283 hwq, mqtag, tag,
1284 fnic_fcpio_status_to_str(hdr_status));
1285 switch (hdr_status) {
1286 case FCPIO_SUCCESS:
1287 break;
1288 case FCPIO_TIMEOUT:
1289 if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
1290 atomic64_inc(&abts_stats->abort_fw_timeouts);
1291 else
1292 atomic64_inc(
1293 &term_stats->terminate_fw_timeouts);
1294 break;
1295 case FCPIO_ITMF_REJECTED:
1296 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1297 "abort reject recd. id %d\n",
1298 (int)(id & FNIC_TAG_MASK));
1299 break;
1300 case FCPIO_IO_NOT_FOUND:
1301 if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
1302 atomic64_inc(&abts_stats->abort_io_not_found);
1303 else
1304 atomic64_inc(
1305 &term_stats->terminate_io_not_found);
1306 break;
1307 default:
1308 if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
1309 atomic64_inc(&abts_stats->abort_failures);
1310 else
1311 atomic64_inc(
1312 &term_stats->terminate_failures);
1313 break;
1314 }
1315 if (fnic_priv(sc)->state != FNIC_IOREQ_ABTS_PENDING) {
1316 /* This is a late completion. Ignore it */
1317 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1318 return;
1319 }
1320
1321 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
1322 fnic_priv(sc)->abts_status = hdr_status;
1323
1324 /* If the status is IO not found consider it as success */
1325 if (hdr_status == FCPIO_IO_NOT_FOUND)
1326 fnic_priv(sc)->abts_status = FCPIO_SUCCESS;
1327
1328 if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
1329 atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
1330
1331 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1332 "abts cmpl recd. id %d status %s\n",
1333 (int)(id & FNIC_TAG_MASK),
1334 fnic_fcpio_status_to_str(hdr_status));
1335
1336 /*
1337 * If scsi_eh thread is blocked waiting for abts to complete,
1338 * signal completion to it. IO will be cleaned in the thread
1339 * else clean it in this context
1340 */
1341 if (io_req->abts_done) {
1342 complete(io_req->abts_done);
1343 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1344 shost_printk(KERN_INFO, fnic->host,
1345 "hwq: %d mqtag: 0x%x tag: 0x%x Waking up abort thread\n",
1346 hwq, mqtag, tag);
1347 } else {
1348 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1349 "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Completing IO\n",
1350 hwq, mqtag,
1351 tag, fnic_fcpio_status_to_str(hdr_status));
1352 fnic_priv(sc)->io_req = NULL;
1353 sc->result = (DID_ERROR << 16);
1354 fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL;
1355 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1356
1357 fnic_release_ioreq_buf(fnic, io_req, sc);
1358 mempool_free(io_req, fnic->io_req_pool);
1359 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1360 sc->device->host->host_no, id,
1361 sc,
1362 jiffies_to_msecs(jiffies - start_time),
1363 desc,
1364 (((u64)hdr_status << 40) |
1365 (u64)sc->cmnd[0] << 32 |
1366 (u64)sc->cmnd[2] << 24 |
1367 (u64)sc->cmnd[3] << 16 |
1368 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1369 fnic_flags_and_state(sc));
1370 scsi_done(sc);
1371 atomic64_dec(&fnic_stats->io_stats.active_ios);
1372 if (atomic64_read(&fnic->io_cmpl_skip))
1373 atomic64_dec(&fnic->io_cmpl_skip);
1374 else
1375 atomic64_inc(&fnic_stats->io_stats.io_completions);
1376 }
1377 } else if (id & FNIC_TAG_DEV_RST) {
1378 /* Completion of device reset */
1379 shost_printk(KERN_INFO, fnic->host,
1380 "hwq: %d mqtag: 0x%x tag: 0x%x DR hst: %s\n",
1381 hwq, mqtag,
1382 tag, fnic_fcpio_status_to_str(hdr_status));
1383 fnic_priv(sc)->lr_status = hdr_status;
1384 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
1385 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1386 fnic_priv(sc)->flags |= FNIC_DEV_RST_ABTS_PENDING;
1387 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1388 sc->device->host->host_no, id, sc,
1389 jiffies_to_msecs(jiffies - start_time),
1390 desc, 0, fnic_flags_and_state(sc));
1391 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1392 "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Terminate pending\n",
1393 hwq, mqtag,
1394 tag, fnic_fcpio_status_to_str(hdr_status));
1395 return;
1396 }
1397 if (fnic_priv(sc)->flags & FNIC_DEV_RST_TIMED_OUT) {
1398 /* Need to wait for terminate completion */
1399 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1400 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1401 sc->device->host->host_no, id, sc,
1402 jiffies_to_msecs(jiffies - start_time),
1403 desc, 0, fnic_flags_and_state(sc));
1404 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1405 "dev reset cmpl recd after time out. "
1406 "id %d status %s\n",
1407 (int)(id & FNIC_TAG_MASK),
1408 fnic_fcpio_status_to_str(hdr_status));
1409 return;
1410 }
1411 fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
1412 fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
1413 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1414 "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s DR completion received\n",
1415 hwq, mqtag,
1416 tag, fnic_fcpio_status_to_str(hdr_status));
1417 if (io_req->dr_done)
1418 complete(io_req->dr_done);
1419 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1420
1421 } else {
1422 shost_printk(KERN_ERR, fnic->host,
1423 "%s: Unexpected itmf io state: hwq: %d tag 0x%x %s\n",
1424 __func__, hwq, id, fnic_ioreq_state_to_str(fnic_priv(sc)->state));
1425 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1426 }
1427
1428}
1429
1430/*
1431 * fnic_fcpio_cmpl_handler
1432 * Routine to service the cq for wq_copy
1433 */
1434static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
1435 unsigned int cq_index,
1436 struct fcpio_fw_req *desc)
1437{
1438 struct fnic *fnic = vnic_dev_priv(vdev);
1439
1440 switch (desc->hdr.type) {
1441 case FCPIO_ICMND_CMPL: /* fw completed a command */
1442 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1443 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1444 case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1445 case FCPIO_RESET_CMPL: /* fw completed reset */
1446 atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1447 break;
1448 default:
1449 break;
1450 }
1451
1452 cq_index -= fnic->copy_wq_base;
1453
1454 switch (desc->hdr.type) {
1455 case FCPIO_ACK: /* fw copied copy wq desc to its queue */
1456 fnic_fcpio_ack_handler(fnic, cq_index, desc);
1457 break;
1458
1459 case FCPIO_ICMND_CMPL: /* fw completed a command */
1460 fnic_fcpio_icmnd_cmpl_handler(fnic, cq_index, desc);
1461 break;
1462
1463 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1464 fnic_fcpio_itmf_cmpl_handler(fnic, cq_index, desc);
1465 break;
1466
1467 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1468 case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1469 fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
1470 break;
1471
1472 case FCPIO_RESET_CMPL: /* fw completed reset */
1473 fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
1474 break;
1475
1476 default:
1477 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1478 "firmware completion type %d\n",
1479 desc->hdr.type);
1480 break;
1481 }
1482
1483 return 0;
1484}
1485
1486/*
1487 * fnic_wq_copy_cmpl_handler
1488 * Routine to process wq copy
1489 */
1490int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index)
1491{
1492 unsigned int cur_work_done;
1493 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1494 u64 start_jiffies = 0;
1495 u64 end_jiffies = 0;
1496 u64 delta_jiffies = 0;
1497 u64 delta_ms = 0;
1498
1499 start_jiffies = jiffies;
1500 cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
1501 fnic_fcpio_cmpl_handler,
1502 copy_work_to_do);
1503 end_jiffies = jiffies;
1504 delta_jiffies = end_jiffies - start_jiffies;
1505 if (delta_jiffies > (u64) atomic64_read(&misc_stats->max_isr_jiffies)) {
1506 atomic64_set(&misc_stats->max_isr_jiffies, delta_jiffies);
1507 delta_ms = jiffies_to_msecs(delta_jiffies);
1508 atomic64_set(&misc_stats->max_isr_time_ms, delta_ms);
1509 atomic64_set(&misc_stats->corr_work_done, cur_work_done);
1510 }
1511
1512 return cur_work_done;
1513}
1514
1515static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
1516{
1517 struct request *const rq = scsi_cmd_to_rq(sc);
1518 struct fnic *fnic = data;
1519 struct fnic_io_req *io_req;
1520 unsigned long start_time = 0;
1521 unsigned long flags;
1522 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1523 uint16_t hwq = 0;
1524 int tag;
1525 int mqtag;
1526
1527 mqtag = blk_mq_unique_tag(rq);
1528 hwq = blk_mq_unique_tag_to_hwq(mqtag);
1529 tag = blk_mq_unique_tag_to_tag(mqtag);
1530
1531 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
1532
1533 fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL;
1534
1535 io_req = fnic_priv(sc)->io_req;
1536 if (!io_req) {
1537 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1538 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
1539 "hwq: %d mqtag: 0x%x tag: 0x%x flags: 0x%x No ioreq. Returning\n",
1540 hwq, mqtag, tag, fnic_priv(sc)->flags);
1541 return true;
1542 }
1543
1544 if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
1545 !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
1546 /*
1547 * We will be here only when FW completes reset
1548 * without sending completions for outstanding ios.
1549 */
1550 fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
1551 if (io_req && io_req->dr_done)
1552 complete(io_req->dr_done);
1553 else if (io_req && io_req->abts_done)
1554 complete(io_req->abts_done);
1555
1556 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1557 return true;
1558 } else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
1559 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1560 return true;
1561 }
1562
1563 fnic_priv(sc)->io_req = NULL;
1564 io_req->sc = NULL;
1565 start_time = io_req->start_time;
1566 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1567
1568 /*
1569 * If there is a scsi_cmnd associated with this io_req, then
1570 * free the corresponding state
1571 */
1572 fnic_release_ioreq_buf(fnic, io_req, sc);
1573 mempool_free(io_req, fnic->io_req_pool);
1574
1575 sc->result = DID_TRANSPORT_DISRUPTED << 16;
1576 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
1577 "mqtag: 0x%x tag: 0x%x sc: 0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
1578 mqtag, tag, sc, (jiffies - start_time));
1579
1580 if (atomic64_read(&fnic->io_cmpl_skip))
1581 atomic64_dec(&fnic->io_cmpl_skip);
1582 else
1583 atomic64_inc(&fnic_stats->io_stats.io_completions);
1584
1585 FNIC_TRACE(fnic_cleanup_io,
1586 sc->device->host->host_no, tag, sc,
1587 jiffies_to_msecs(jiffies - start_time),
1588 0, ((u64) sc->cmnd[0] << 32 |
1589 (u64) sc->cmnd[2] << 24 |
1590 (u64) sc->cmnd[3] << 16 |
1591 (u64) sc->cmnd[4] << 8 | sc->cmnd[5]),
1592 (((u64) fnic_priv(sc)->flags << 32) | fnic_priv(sc)->
1593 state));
1594
1595 /* Complete the command to SCSI */
1596 scsi_done(sc);
1597 return true;
1598}
1599
1600static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
1601{
1602 unsigned int io_count = 0;
1603 unsigned long flags;
1604 struct fnic_io_req *io_req = NULL;
1605 struct scsi_cmnd *sc = NULL;
1606
1607 io_count = fnic_count_all_ioreqs(fnic);
1608 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1609 "Outstanding ioreq count: %d active io count: %lld Waiting\n",
1610 io_count,
1611 atomic64_read(&fnic->fnic_stats.io_stats.active_ios));
1612
1613 scsi_host_busy_iter(fnic->host,
1614 fnic_cleanup_io_iter, fnic);
1615
1616 /* with sg3utils device reset, SC needs to be retrieved from ioreq */
1617 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1618 io_req = fnic->sw_copy_wq[0].io_req_table[fnic->fnic_max_tag_id];
1619 if (io_req) {
1620 sc = io_req->sc;
1621 if (sc) {
1622 if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
1623 && !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
1624 fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
1625 if (io_req && io_req->dr_done)
1626 complete(io_req->dr_done);
1627 }
1628 }
1629 }
1630 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1631
1632 while ((io_count = fnic_count_all_ioreqs(fnic))) {
1633 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1634 "Outstanding ioreq count: %d active io count: %lld Waiting\n",
1635 io_count,
1636 atomic64_read(&fnic->fnic_stats.io_stats.active_ios));
1637
1638 schedule_timeout(msecs_to_jiffies(100));
1639 }
1640}
1641
1642void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
1643 struct fcpio_host_req *desc)
1644{
1645 u32 id;
1646 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1647 struct fnic_io_req *io_req;
1648 struct scsi_cmnd *sc;
1649 unsigned long flags;
1650 unsigned long start_time = 0;
1651 uint16_t hwq;
1652
1653 /* get the tag reference */
1654 fcpio_tag_id_dec(&desc->hdr.tag, &id);
1655 id &= FNIC_TAG_MASK;
1656
1657 if (id >= fnic->fnic_max_tag_id)
1658 return;
1659
1660 sc = scsi_host_find_tag(fnic->host, id);
1661 if (!sc)
1662 return;
1663
1664 hwq = blk_mq_unique_tag_to_hwq(id);
1665 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
1666
1667 /* Get the IO context which this desc refers to */
1668 io_req = fnic_priv(sc)->io_req;
1669
1670 /* fnic interrupts are turned off by now */
1671
1672 if (!io_req) {
1673 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1674 goto wq_copy_cleanup_scsi_cmd;
1675 }
1676
1677 fnic_priv(sc)->io_req = NULL;
1678 io_req->sc = NULL;
1679 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(id)] = NULL;
1680
1681 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1682
1683 start_time = io_req->start_time;
1684 fnic_release_ioreq_buf(fnic, io_req, sc);
1685 mempool_free(io_req, fnic->io_req_pool);
1686
1687wq_copy_cleanup_scsi_cmd:
1688 sc->result = DID_NO_CONNECT << 16;
1689 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "wq_copy_cleanup_handler:"
1690 " DID_NO_CONNECT\n");
1691
1692 FNIC_TRACE(fnic_wq_copy_cleanup_handler,
1693 sc->device->host->host_no, id, sc,
1694 jiffies_to_msecs(jiffies - start_time),
1695 0, ((u64)sc->cmnd[0] << 32 |
1696 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1697 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1698 fnic_flags_and_state(sc));
1699
1700 scsi_done(sc);
1701}
1702
1703static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1704 u32 task_req, u8 *fc_lun,
1705 struct fnic_io_req *io_req,
1706 unsigned int hwq)
1707{
1708 struct vnic_wq_copy *wq = &fnic->hw_copy_wq[hwq];
1709 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1710 unsigned long flags;
1711 struct fnic_tport_s *tport = io_req->tport;
1712
1713 spin_lock_irqsave(&fnic->fnic_lock, flags);
1714 if (unlikely(fnic_chk_state_flags_locked(fnic,
1715 FNIC_FLAGS_IO_BLOCKED))) {
1716 atomic_dec(&fnic->in_flight);
1717 atomic_dec(&tport->in_flight);
1718 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1719 return 1;
1720 } else
1721 atomic_inc(&fnic->in_flight);
1722 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1723
1724 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
1725
1726 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq])
1727 free_wq_copy_descs(fnic, wq, hwq);
1728
1729 if (!vnic_wq_copy_desc_avail(wq)) {
1730 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1731 atomic_dec(&fnic->in_flight);
1732 atomic_dec(&tport->in_flight);
1733 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1734 "fnic_queue_abort_io_req: failure: no descriptors\n");
1735 atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
1736 return 1;
1737 }
1738 fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
1739 0, task_req, tag, fc_lun, io_req->port_id,
1740 fnic->config.ra_tov, fnic->config.ed_tov);
1741
1742 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1743 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
1744 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
1745 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
1746 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
1747
1748 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1749 atomic_dec(&fnic->in_flight);
1750
1751 return 0;
1752}
1753
1754struct fnic_rport_abort_io_iter_data {
1755 struct fnic *fnic;
1756 u32 port_id;
1757 int term_cnt;
1758};
1759
1760static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
1761{
1762 struct request *const rq = scsi_cmd_to_rq(sc);
1763 struct fnic_rport_abort_io_iter_data *iter_data = data;
1764 struct fnic *fnic = iter_data->fnic;
1765 int abt_tag = 0;
1766 struct fnic_io_req *io_req;
1767 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
1768 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1769 struct scsi_lun fc_lun;
1770 enum fnic_ioreq_state old_ioreq_state;
1771 uint16_t hwq = 0;
1772 unsigned long flags;
1773
1774 abt_tag = blk_mq_unique_tag(rq);
1775 hwq = blk_mq_unique_tag_to_hwq(abt_tag);
1776
1777 if (!sc) {
1778 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1779 "sc is NULL abt_tag: 0x%x hwq: %d\n", abt_tag, hwq);
1780 return true;
1781 }
1782
1783 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
1784 io_req = fnic_priv(sc)->io_req;
1785 if (!io_req || io_req->port_id != iter_data->port_id) {
1786 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1787 return true;
1788 }
1789
1790 if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
1791 !(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) {
1792 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
1793 "hwq: %d abt_tag: 0x%x flags: 0x%x Device reset is not pending\n",
1794 hwq, abt_tag, fnic_priv(sc)->flags);
1795 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1796 return true;
1797 }
1798
1799 /*
1800 * Found IO that is still pending with firmware and
1801 * belongs to rport that went away
1802 */
1803 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
1804 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1805 return true;
1806 }
1807
1808 if (io_req->abts_done) {
1809 shost_printk(KERN_ERR, fnic->host,
1810 "fnic_rport_exch_reset: io_req->abts_done is set state is %s\n",
1811 fnic_ioreq_state_to_str(fnic_priv(sc)->state));
1812 }
1813
1814 if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) {
1815 shost_printk(KERN_ERR, fnic->host,
1816 "rport_exch_reset IO not yet issued %p abt_tag 0x%x",
1817 sc, abt_tag);
1818 shost_printk(KERN_ERR, fnic->host,
1819 "flags %x state %d\n", fnic_priv(sc)->flags,
1820 fnic_priv(sc)->state);
1821 }
1822 old_ioreq_state = fnic_priv(sc)->state;
1823 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
1824 fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
1825
1826 if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
1827 atomic64_inc(&reset_stats->device_reset_terminates);
1828 abt_tag |= FNIC_TAG_DEV_RST;
1829 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1830 "dev reset sc 0x%p\n", sc);
1831 }
1832 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1833 "fnic_rport_exch_reset: dev rst sc 0x%p\n", sc);
1834 WARN_ON_ONCE(io_req->abts_done);
1835 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1836 "fnic_rport_reset_exch: Issuing abts\n");
1837
1838 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1839
1840 /* Queue the abort command to firmware */
1841 int_to_scsilun(sc->device->lun, &fc_lun);
1842
1843 if (fnic_queue_abort_io_req(fnic, abt_tag,
1844 FCPIO_ITMF_ABT_TASK_TERM,
1845 fc_lun.scsi_lun, io_req, hwq)) {
1846 /*
1847 * Revert the cmd state back to old state, if
1848 * it hasn't changed in between. This cmd will get
1849 * aborted later by scsi_eh, or cleaned up during
1850 * lun reset
1851 */
1852 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
1853 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
1854 "hwq: %d abt_tag: 0x%x flags: 0x%x Queuing abort failed\n",
1855 hwq, abt_tag, fnic_priv(sc)->flags);
1856 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
1857 fnic_priv(sc)->state = old_ioreq_state;
1858 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1859 } else {
1860 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
1861 if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
1862 fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
1863 else
1864 fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
1865 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1866 atomic64_inc(&term_stats->terminates);
1867 iter_data->term_cnt++;
1868 }
1869
1870 return true;
1871}
1872
1873void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1874{
1875 unsigned int io_count = 0;
1876 unsigned long flags;
1877 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1878 struct fnic_rport_abort_io_iter_data iter_data = {
1879 .fnic = fnic,
1880 .port_id = port_id,
1881 .term_cnt = 0,
1882 };
1883
1884 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1885 "fnic rport exchange reset for tport: 0x%06x\n",
1886 port_id);
1887
1888 if (fnic->in_remove)
1889 return;
1890
1891 io_count = fnic_count_ioreqs(fnic, port_id);
1892 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1893 "Starting terminates: rport:0x%x portid-io-count: %d active-io-count: %lld\n",
1894 port_id, io_count,
1895 atomic64_read(&fnic->fnic_stats.io_stats.active_ios));
1896
1897 spin_lock_irqsave(&fnic->fnic_lock, flags);
1898 /* Bump in_flight counter to hold off fnic_fw_reset_handler. */
1899 atomic_inc(&fnic->in_flight);
1900 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) {
1901 atomic_dec(&fnic->in_flight);
1902 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1903 return;
1904 }
1905 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1906
1907 scsi_host_busy_iter(fnic->host, fnic_rport_abort_io_iter,
1908 &iter_data);
1909
1910 if (iter_data.term_cnt > atomic64_read(&term_stats->max_terminates))
1911 atomic64_set(&term_stats->max_terminates, iter_data.term_cnt);
1912
1913 atomic_dec(&fnic->in_flight);
1914
1915 while ((io_count = fnic_count_ioreqs(fnic, port_id)))
1916 schedule_timeout(msecs_to_jiffies(1000));
1917
1918 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1919 "rport: 0x%x remaining portid-io-count: %d ",
1920 port_id, io_count);
1921}
1922
1923void fnic_terminate_rport_io(struct fc_rport *rport)
1924{
1925 struct fnic_tport_s *tport;
1926 struct rport_dd_data_s *rdd_data;
1927 struct fnic_iport_s *iport = NULL;
1928 struct fnic *fnic = NULL;
1929
1930 if (!rport) {
1931 pr_err("rport is NULL\n");
1932 return;
1933 }
1934
1935 rdd_data = rport->dd_data;
1936 if (rdd_data) {
1937 tport = rdd_data->tport;
1938 if (!tport) {
1939 pr_err(
1940 "term rport io called after tport is deleted. Returning 0x%8x\n",
1941 rport->port_id);
1942 } else {
1943 pr_err(
1944 "term rport io called after tport is set 0x%8x\n",
1945 rport->port_id);
1946 pr_err(
1947 "tport maybe rediscovered\n");
1948
1949 iport = (struct fnic_iport_s *) tport->iport;
1950 fnic = iport->fnic;
1951 fnic_rport_exch_reset(fnic, rport->port_id);
1952 }
1953 }
1954}
1955
1956/*
1957 * FCP-SCSI specific handling for module unload
1958 *
1959 */
1960void fnic_scsi_unload(struct fnic *fnic)
1961{
1962 unsigned long flags;
1963
1964 /*
1965 * Mark state so that the workqueue thread stops forwarding
1966 * received frames and link events to the local port. ISR and
1967 * other threads that can queue work items will also stop
1968 * creating work items on the fnic workqueue
1969 */
1970 spin_lock_irqsave(&fnic->fnic_lock, flags);
1971 fnic->iport.state = FNIC_IPORT_STATE_LINK_WAIT;
1972 fnic->in_remove = 1;
1973 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1974
1975 fnic_fcpio_reset(fnic);
1976
1977 fnic_flush_tport_event_list(fnic);
1978 fnic_delete_fcp_tports(fnic);
1979}
1980
1981void fnic_scsi_unload_cleanup(struct fnic *fnic)
1982{
1983 int hwq = 0;
1984
1985 fc_remove_host(fnic->host);
1986 scsi_remove_host(fnic->host);
1987 for (hwq = 0; hwq < fnic->wq_copy_count; hwq++)
1988 kfree(fnic->sw_copy_wq[hwq].io_req_table);
1989}
1990
1991/*
1992 * This function is exported to SCSI for sending abort cmnds.
1993 * A SCSI IO is represented by a io_req in the driver.
1994 * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
1995 */
1996int fnic_abort_cmd(struct scsi_cmnd *sc)
1997{
1998 struct request *const rq = scsi_cmd_to_rq(sc);
1999 struct fnic_iport_s *iport;
2000 struct fnic_tport_s *tport;
2001 struct fnic *fnic;
2002 struct fnic_io_req *io_req = NULL;
2003 struct fc_rport *rport;
2004 struct rport_dd_data_s *rdd_data;
2005 unsigned long flags;
2006 unsigned long start_time = 0;
2007 int ret = SUCCESS;
2008 u32 task_req = 0;
2009 struct scsi_lun fc_lun;
2010 struct fnic_stats *fnic_stats;
2011 struct abort_stats *abts_stats;
2012 struct terminate_stats *term_stats;
2013 enum fnic_ioreq_state old_ioreq_state;
2014 int mqtag;
2015 unsigned long abt_issued_time;
2016 uint16_t hwq = 0;
2017
2018 DECLARE_COMPLETION_ONSTACK(tm_done);
2019
2020 /* Wait for rport to unblock */
2021 fc_block_scsi_eh(sc);
2022
2023 /* Get local-port, check ready and link up */
2024 fnic = *((struct fnic **) shost_priv(sc->device->host));
2025
2026 spin_lock_irqsave(&fnic->fnic_lock, flags);
2027 iport = &fnic->iport;
2028
2029 fnic_stats = &fnic->fnic_stats;
2030 abts_stats = &fnic->fnic_stats.abts_stats;
2031 term_stats = &fnic->fnic_stats.term_stats;
2032
2033 rport = starget_to_rport(scsi_target(sc->device));
2034 mqtag = blk_mq_unique_tag(rq);
2035 hwq = blk_mq_unique_tag_to_hwq(mqtag);
2036
2037 fnic_priv(sc)->flags = FNIC_NO_FLAGS;
2038
2039 rdd_data = rport->dd_data;
2040 tport = rdd_data->tport;
2041
2042 if (!tport) {
2043 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
2044 "Abort cmd called after tport delete! rport fcid: 0x%x",
2045 rport->port_id);
2046 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
2047 "lun: %llu hwq: 0x%x mqtag: 0x%x Op: 0x%x flags: 0x%x\n",
2048 sc->device->lun, hwq, mqtag,
2049 sc->cmnd[0], fnic_priv(sc)->flags);
2050 ret = FAILED;
2051 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2052 goto fnic_abort_cmd_end;
2053 }
2054
2055 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2056 "Abort cmd called rport fcid: 0x%x lun: %llu hwq: 0x%x mqtag: 0x%x",
2057 rport->port_id, sc->device->lun, hwq, mqtag);
2058
2059 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2060 "Op: 0x%x flags: 0x%x\n",
2061 sc->cmnd[0],
2062 fnic_priv(sc)->flags);
2063
2064 if (iport->state != FNIC_IPORT_STATE_READY) {
2065 atomic64_inc(&fnic_stats->misc_stats.iport_not_ready);
2066 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2067 "iport NOT in READY state");
2068 ret = FAILED;
2069 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2070 goto fnic_abort_cmd_end;
2071 }
2072
2073 if ((tport->state != FDLS_TGT_STATE_READY) &&
2074 (tport->state != FDLS_TGT_STATE_ADISC)) {
2075 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
2076 "tport state: %d\n", tport->state);
2077 ret = FAILED;
2078 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2079 goto fnic_abort_cmd_end;
2080 }
2081
2082 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2083 /*
2084 * Avoid a race between SCSI issuing the abort and the device
2085 * completing the command.
2086 *
2087 * If the command is already completed by the fw cmpl code,
2088 * we just return SUCCESS from here. This means that the abort
2089 * succeeded. In the SCSI ML, since the timeout for command has
2090 * happened, the completion wont actually complete the command
2091 * and it will be considered as an aborted command
2092 *
2093 * .io_req will not be cleared except while holding io_req_lock.
2094 */
2095 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2096 io_req = fnic_priv(sc)->io_req;
2097 if (!io_req) {
2098 ret = FAILED;
2099 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2100 goto fnic_abort_cmd_end;
2101 }
2102
2103 io_req->abts_done = &tm_done;
2104
2105 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
2106 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2107 goto wait_pending;
2108 }
2109
2110 abt_issued_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
2111 if (abt_issued_time <= 6000)
2112 atomic64_inc(&abts_stats->abort_issued_btw_0_to_6_sec);
2113 else if (abt_issued_time > 6000 && abt_issued_time <= 20000)
2114 atomic64_inc(&abts_stats->abort_issued_btw_6_to_20_sec);
2115 else if (abt_issued_time > 20000 && abt_issued_time <= 30000)
2116 atomic64_inc(&abts_stats->abort_issued_btw_20_to_30_sec);
2117 else if (abt_issued_time > 30000 && abt_issued_time <= 40000)
2118 atomic64_inc(&abts_stats->abort_issued_btw_30_to_40_sec);
2119 else if (abt_issued_time > 40000 && abt_issued_time <= 50000)
2120 atomic64_inc(&abts_stats->abort_issued_btw_40_to_50_sec);
2121 else if (abt_issued_time > 50000 && abt_issued_time <= 60000)
2122 atomic64_inc(&abts_stats->abort_issued_btw_50_to_60_sec);
2123 else
2124 atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec);
2125
2126 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
2127 "CDB Opcode: 0x%02x Abort issued time: %lu msec\n",
2128 sc->cmnd[0], abt_issued_time);
2129 /*
2130 * Command is still pending, need to abort it
2131 * If the firmware completes the command after this point,
2132 * the completion wont be done till mid-layer, since abort
2133 * has already started.
2134 */
2135 old_ioreq_state = fnic_priv(sc)->state;
2136 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
2137 fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
2138
2139 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2140
2141 /*
2142 * Check readiness of the remote port. If the path to remote
2143 * port is up, then send abts to the remote port to terminate
2144 * the IO. Else, just locally terminate the IO in the firmware
2145 */
2146 if (fc_remote_port_chkready(rport) == 0)
2147 task_req = FCPIO_ITMF_ABT_TASK;
2148 else {
2149 atomic64_inc(&fnic_stats->misc_stats.tport_not_ready);
2150 task_req = FCPIO_ITMF_ABT_TASK_TERM;
2151 }
2152
2153 /* Now queue the abort command to firmware */
2154 int_to_scsilun(sc->device->lun, &fc_lun);
2155
2156 if (fnic_queue_abort_io_req(fnic, mqtag, task_req, fc_lun.scsi_lun,
2157 io_req, hwq)) {
2158 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2159 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
2160 fnic_priv(sc)->state = old_ioreq_state;
2161 io_req = fnic_priv(sc)->io_req;
2162 if (io_req)
2163 io_req->abts_done = NULL;
2164 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2165 ret = FAILED;
2166 goto fnic_abort_cmd_end;
2167 }
2168 if (task_req == FCPIO_ITMF_ABT_TASK) {
2169 fnic_priv(sc)->flags |= FNIC_IO_ABTS_ISSUED;
2170 atomic64_inc(&fnic_stats->abts_stats.aborts);
2171 } else {
2172 fnic_priv(sc)->flags |= FNIC_IO_TERM_ISSUED;
2173 atomic64_inc(&fnic_stats->term_stats.terminates);
2174 }
2175
2176 /*
2177 * We queued an abort IO, wait for its completion.
2178 * Once the firmware completes the abort command, it will
2179 * wake up this thread.
2180 */
2181 wait_pending:
2182 wait_for_completion_timeout(&tm_done,
2183 msecs_to_jiffies
2184 (2 * fnic->config.ra_tov +
2185 fnic->config.ed_tov));
2186
2187 /* Check the abort status */
2188 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2189
2190 io_req = fnic_priv(sc)->io_req;
2191 if (!io_req) {
2192 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
2193 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2194 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
2195 ret = FAILED;
2196 goto fnic_abort_cmd_end;
2197 }
2198 io_req->abts_done = NULL;
2199
2200 /* fw did not complete abort, timed out */
2201 if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
2202 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2203 if (task_req == FCPIO_ITMF_ABT_TASK) {
2204 atomic64_inc(&abts_stats->abort_drv_timeouts);
2205 } else {
2206 atomic64_inc(&term_stats->terminate_drv_timeouts);
2207 }
2208 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_TIMED_OUT;
2209 ret = FAILED;
2210 goto fnic_abort_cmd_end;
2211 }
2212
2213 /* IO out of order */
2214
2215 if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
2216 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2217 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
2218 "Issuing host reset due to out of order IO\n");
2219
2220 ret = FAILED;
2221 goto fnic_abort_cmd_end;
2222 }
2223
2224 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
2225
2226 start_time = io_req->start_time;
2227 /*
2228 * firmware completed the abort, check the status,
2229 * free the io_req if successful. If abort fails,
2230 * Device reset will clean the I/O.
2231 */
2232 if (fnic_priv(sc)->abts_status == FCPIO_SUCCESS ||
2233 (fnic_priv(sc)->abts_status == FCPIO_ABORTED)) {
2234 fnic_priv(sc)->io_req = NULL;
2235 io_req->sc = NULL;
2236 } else {
2237 ret = FAILED;
2238 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2239 goto fnic_abort_cmd_end;
2240 }
2241
2242 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = NULL;
2243 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2244
2245 fnic_release_ioreq_buf(fnic, io_req, sc);
2246 mempool_free(io_req, fnic->io_req_pool);
2247
2248 /* Call SCSI completion function to complete the IO */
2249 sc->result = DID_ABORT << 16;
2250 scsi_done(sc);
2251 atomic64_dec(&fnic_stats->io_stats.active_ios);
2252 if (atomic64_read(&fnic->io_cmpl_skip))
2253 atomic64_dec(&fnic->io_cmpl_skip);
2254 else
2255 atomic64_inc(&fnic_stats->io_stats.io_completions);
2256
2257fnic_abort_cmd_end:
2258 FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, mqtag, sc,
2259 jiffies_to_msecs(jiffies - start_time),
2260 0, ((u64)sc->cmnd[0] << 32 |
2261 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2262 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2263 fnic_flags_and_state(sc));
2264
2265 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
2266 "Returning from abort cmd type %x %s\n", task_req,
2267 (ret == SUCCESS) ?
2268 "SUCCESS" : "FAILED");
2269 return ret;
2270}
2271
2272static inline int fnic_queue_dr_io_req(struct fnic *fnic,
2273 struct scsi_cmnd *sc,
2274 struct fnic_io_req *io_req)
2275{
2276 struct vnic_wq_copy *wq;
2277 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
2278 struct scsi_lun fc_lun;
2279 int ret = 0;
2280 unsigned long flags;
2281 uint16_t hwq = 0;
2282 uint32_t tag = 0;
2283 struct fnic_tport_s *tport = io_req->tport;
2284
2285 tag = io_req->tag;
2286 hwq = blk_mq_unique_tag_to_hwq(tag);
2287 wq = &fnic->hw_copy_wq[hwq];
2288
2289 spin_lock_irqsave(&fnic->fnic_lock, flags);
2290 if (unlikely(fnic_chk_state_flags_locked(fnic,
2291 FNIC_FLAGS_IO_BLOCKED))) {
2292 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2293 return FAILED;
2294 } else {
2295 atomic_inc(&fnic->in_flight);
2296 atomic_inc(&tport->in_flight);
2297 }
2298 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2299
2300 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2301
2302 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq])
2303 free_wq_copy_descs(fnic, wq, hwq);
2304
2305 if (!vnic_wq_copy_desc_avail(wq)) {
2306 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
2307 "queue_dr_io_req failure - no descriptors\n");
2308 atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
2309 ret = -EAGAIN;
2310 goto lr_io_req_end;
2311 }
2312
2313 /* fill in the lun info */
2314 int_to_scsilun(sc->device->lun, &fc_lun);
2315
2316 tag |= FNIC_TAG_DEV_RST;
2317 fnic_queue_wq_copy_desc_itmf(wq, tag,
2318 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
2319 fc_lun.scsi_lun, io_req->port_id,
2320 fnic->config.ra_tov, fnic->config.ed_tov);
2321
2322 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
2323 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
2324 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
2325 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
2326 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
2327
2328lr_io_req_end:
2329 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2330 atomic_dec(&fnic->in_flight);
2331 atomic_dec(&tport->in_flight);
2332
2333 return ret;
2334}
2335
2336struct fnic_pending_aborts_iter_data {
2337 struct fnic *fnic;
2338 struct scsi_cmnd *lr_sc;
2339 struct scsi_device *lun_dev;
2340 int ret;
2341};
2342
2343static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
2344{
2345 struct request *const rq = scsi_cmd_to_rq(sc);
2346 struct fnic_pending_aborts_iter_data *iter_data = data;
2347 struct fnic *fnic = iter_data->fnic;
2348 struct scsi_device *lun_dev = iter_data->lun_dev;
2349 unsigned long abt_tag = 0;
2350 uint16_t hwq = 0;
2351 struct fnic_io_req *io_req;
2352 unsigned long flags;
2353 struct scsi_lun fc_lun;
2354 DECLARE_COMPLETION_ONSTACK(tm_done);
2355 enum fnic_ioreq_state old_ioreq_state;
2356
2357 if (sc == iter_data->lr_sc || sc->device != lun_dev)
2358 return true;
2359
2360 abt_tag = blk_mq_unique_tag(rq);
2361 hwq = blk_mq_unique_tag_to_hwq(abt_tag);
2362
2363 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2364 io_req = fnic_priv(sc)->io_req;
2365 if (!io_req) {
2366 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2367 return true;
2368 }
2369
2370 /*
2371 * Found IO that is still pending with firmware and
2372 * belongs to the LUN that we are resetting
2373 */
2374 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
2375 "Found IO in %s on lun\n",
2376 fnic_ioreq_state_to_str(fnic_priv(sc)->state));
2377
2378 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
2379 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2380 return true;
2381 }
2382 if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
2383 (!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) {
2384 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2385 "dev rst not pending sc 0x%p\n", sc);
2386 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2387 return true;
2388 }
2389
2390 if (io_req->abts_done)
2391 shost_printk(KERN_ERR, fnic->host,
2392 "%s: io_req->abts_done is set state is %s\n",
2393 __func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state));
2394 old_ioreq_state = fnic_priv(sc)->state;
2395 /*
2396 * Any pending IO issued prior to reset is expected to be
2397 * in abts pending state, if not we need to set
2398 * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
2399 * When IO is completed, the IO will be handed over and
2400 * handled in this function.
2401 */
2402 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
2403
2404 BUG_ON(io_req->abts_done);
2405
2406 if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
2407 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2408 "dev rst sc 0x%p\n", sc);
2409 }
2410
2411 fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
2412 io_req->abts_done = &tm_done;
2413 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2414
2415 /* Now queue the abort command to firmware */
2416 int_to_scsilun(sc->device->lun, &fc_lun);
2417
2418 if (fnic_queue_abort_io_req(fnic, abt_tag,
2419 FCPIO_ITMF_ABT_TASK_TERM,
2420 fc_lun.scsi_lun, io_req, hwq)) {
2421 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2422 io_req = fnic_priv(sc)->io_req;
2423 if (io_req)
2424 io_req->abts_done = NULL;
2425 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
2426 fnic_priv(sc)->state = old_ioreq_state;
2427 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2428 iter_data->ret = FAILED;
2429 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
2430 "hwq: %d abt_tag: 0x%lx Abort could not be queued\n",
2431 hwq, abt_tag);
2432 return false;
2433 } else {
2434 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2435 if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
2436 fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
2437 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2438 }
2439 fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
2440
2441 wait_for_completion_timeout(&tm_done, msecs_to_jiffies
2442 (fnic->config.ed_tov));
2443
2444 /* Recheck cmd state to check if it is now aborted */
2445 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2446 io_req = fnic_priv(sc)->io_req;
2447 if (!io_req) {
2448 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2449 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
2450 return true;
2451 }
2452
2453 io_req->abts_done = NULL;
2454
2455 /* if abort is still pending with fw, fail */
2456 if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
2457 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2458 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
2459 iter_data->ret = FAILED;
2460 return false;
2461 }
2462 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
2463
2464 /* original sc used for lr is handled by dev reset code */
2465 if (sc != iter_data->lr_sc) {
2466 fnic_priv(sc)->io_req = NULL;
2467 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(abt_tag)] = NULL;
2468 }
2469 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2470
2471 /* original sc used for lr is handled by dev reset code */
2472 if (sc != iter_data->lr_sc) {
2473 fnic_release_ioreq_buf(fnic, io_req, sc);
2474 mempool_free(io_req, fnic->io_req_pool);
2475 }
2476
2477 /*
2478 * Any IO is returned during reset, it needs to call scsi_done
2479 * to return the scsi_cmnd to upper layer.
2480 */
2481 /* Set result to let upper SCSI layer retry */
2482 sc->result = DID_RESET << 16;
2483 scsi_done(sc);
2484
2485 return true;
2486}
2487
2488/*
2489 * Clean up any pending aborts on the lun
2490 * For each outstanding IO on this lun, whose abort is not completed by fw,
2491 * issue a local abort. Wait for abort to complete. Return 0 if all commands
2492 * successfully aborted, 1 otherwise
2493 */
2494static int fnic_clean_pending_aborts(struct fnic *fnic,
2495 struct scsi_cmnd *lr_sc,
2496 bool new_sc)
2497
2498{
2499 int ret = 0;
2500 struct fnic_pending_aborts_iter_data iter_data = {
2501 .fnic = fnic,
2502 .lun_dev = lr_sc->device,
2503 .ret = SUCCESS,
2504 };
2505
2506 iter_data.lr_sc = lr_sc;
2507
2508 scsi_host_busy_iter(fnic->host,
2509 fnic_pending_aborts_iter, &iter_data);
2510 if (iter_data.ret == FAILED) {
2511 ret = iter_data.ret;
2512 goto clean_pending_aborts_end;
2513 }
2514 schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
2515
2516 /* walk again to check, if IOs are still pending in fw */
2517 if (fnic_is_abts_pending(fnic, lr_sc))
2518 ret = 1;
2519
2520clean_pending_aborts_end:
2521 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2522 "exit status: %d\n", ret);
2523 return ret;
2524}
2525
2526/*
2527 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
2528 * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
2529 * on the LUN.
2530 */
2531int fnic_device_reset(struct scsi_cmnd *sc)
2532{
2533 struct request *rq = scsi_cmd_to_rq(sc);
2534 struct fnic *fnic;
2535 struct fnic_io_req *io_req = NULL;
2536 struct fc_rport *rport;
2537 int status;
2538 int count = 0;
2539 int ret = FAILED;
2540 unsigned long flags;
2541 unsigned long start_time = 0;
2542 struct scsi_lun fc_lun;
2543 struct fnic_stats *fnic_stats;
2544 struct reset_stats *reset_stats;
2545 int mqtag = rq->tag;
2546 DECLARE_COMPLETION_ONSTACK(tm_done);
2547 bool new_sc = 0;
2548 uint16_t hwq = 0;
2549 struct fnic_iport_s *iport = NULL;
2550 struct rport_dd_data_s *rdd_data;
2551 struct fnic_tport_s *tport;
2552 u32 old_soft_reset_count;
2553 u32 old_link_down_cnt;
2554 int exit_dr = 0;
2555
2556 /* Wait for rport to unblock */
2557 fc_block_scsi_eh(sc);
2558
2559 /* Get local-port, check ready and link up */
2560 fnic = *((struct fnic **) shost_priv(sc->device->host));
2561 iport = &fnic->iport;
2562
2563 fnic_stats = &fnic->fnic_stats;
2564 reset_stats = &fnic_stats->reset_stats;
2565
2566 atomic64_inc(&reset_stats->device_resets);
2567
2568 rport = starget_to_rport(scsi_target(sc->device));
2569
2570 spin_lock_irqsave(&fnic->fnic_lock, flags);
2571 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
2572 "fcid: 0x%x lun: %llu hwq: %d mqtag: 0x%x flags: 0x%x Device reset\n",
2573 rport->port_id, sc->device->lun, hwq, mqtag,
2574 fnic_priv(sc)->flags);
2575
2576 rdd_data = rport->dd_data;
2577 tport = rdd_data->tport;
2578 if (!tport) {
2579 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
2580 "Dev rst called after tport delete! rport fcid: 0x%x lun: %llu\n",
2581 rport->port_id, sc->device->lun);
2582 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2583 goto fnic_device_reset_end;
2584 }
2585
2586 if (iport->state != FNIC_IPORT_STATE_READY) {
2587 atomic64_inc(&fnic_stats->misc_stats.iport_not_ready);
2588 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2589 "iport NOT in READY state");
2590 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2591 goto fnic_device_reset_end;
2592 }
2593
2594 if ((tport->state != FDLS_TGT_STATE_READY) &&
2595 (tport->state != FDLS_TGT_STATE_ADISC)) {
2596 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
2597 "tport state: %d\n", tport->state);
2598 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2599 goto fnic_device_reset_end;
2600 }
2601 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2602
2603 /* Check if remote port up */
2604 if (fc_remote_port_chkready(rport)) {
2605 atomic64_inc(&fnic_stats->misc_stats.tport_not_ready);
2606 goto fnic_device_reset_end;
2607 }
2608
2609 fnic_priv(sc)->flags = FNIC_DEVICE_RESET;
2610
2611 if (unlikely(mqtag < 0)) {
2612 /*
2613 * For device reset issued through sg3utils, we let
2614 * only one LUN_RESET to go through and use a special
2615 * tag equal to max_tag_id so that we don't have to allocate
2616 * or free it. It won't interact with tags
2617 * allocated by mid layer.
2618 */
2619 mutex_lock(&fnic->sgreset_mutex);
2620 mqtag = fnic->fnic_max_tag_id;
2621 new_sc = 1;
2622 } else {
2623 mqtag = blk_mq_unique_tag(rq);
2624 hwq = blk_mq_unique_tag_to_hwq(mqtag);
2625 }
2626
2627 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2628 io_req = fnic_priv(sc)->io_req;
2629
2630 /*
2631 * If there is a io_req attached to this command, then use it,
2632 * else allocate a new one.
2633 */
2634 if (!io_req) {
2635 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
2636 if (!io_req) {
2637 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2638 goto fnic_device_reset_end;
2639 }
2640 memset(io_req, 0, sizeof(*io_req));
2641 io_req->port_id = rport->port_id;
2642 io_req->tag = mqtag;
2643 fnic_priv(sc)->io_req = io_req;
2644 io_req->tport = tport;
2645 io_req->sc = sc;
2646
2647 if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL)
2648 WARN(1, "fnic<%d>: %s: tag 0x%x already exists\n",
2649 fnic->fnic_num, __func__, blk_mq_unique_tag_to_tag(mqtag));
2650
2651 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] =
2652 io_req;
2653 }
2654 io_req->dr_done = &tm_done;
2655 fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
2656 fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE;
2657 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2658
2659 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "TAG %x\n", mqtag);
2660
2661 /*
2662 * issue the device reset, if enqueue failed, clean up the ioreq
2663 * and break assoc with scsi cmd
2664 */
2665 if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
2666 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2667 io_req = fnic_priv(sc)->io_req;
2668 if (io_req)
2669 io_req->dr_done = NULL;
2670 goto fnic_device_reset_clean;
2671 }
2672 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2673 fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED;
2674 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2675
2676 spin_lock_irqsave(&fnic->fnic_lock, flags);
2677 old_link_down_cnt = iport->fnic->link_down_cnt;
2678 old_soft_reset_count = fnic->soft_reset_count;
2679 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2680
2681 /*
2682 * Wait on the local completion for LUN reset. The io_req may be
2683 * freed while we wait since we hold no lock.
2684 */
2685 wait_for_completion_timeout(&tm_done,
2686 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2687
2688 /*
2689 * Wake up can be due to the following reasons:
2690 * 1) The device reset completed from target.
2691 * 2) Device reset timed out.
2692 * 3) A link-down/host_reset may have happened in between.
2693 * 4) The device reset was aborted and io_req->dr_done was called.
2694 */
2695
2696 exit_dr = 0;
2697 spin_lock_irqsave(&fnic->fnic_lock, flags);
2698 if ((old_link_down_cnt != fnic->link_down_cnt) ||
2699 (fnic->reset_in_progress) ||
2700 (fnic->soft_reset_count != old_soft_reset_count) ||
2701 (iport->state != FNIC_IPORT_STATE_READY))
2702 exit_dr = 1;
2703
2704 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2705
2706 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2707 io_req = fnic_priv(sc)->io_req;
2708 if (!io_req) {
2709 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2710 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
2711 "io_req is null mqtag 0x%x sc 0x%p\n", mqtag, sc);
2712 goto fnic_device_reset_end;
2713 }
2714
2715 if (exit_dr) {
2716 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
2717 "Host reset called for fnic. Exit device reset\n");
2718 io_req->dr_done = NULL;
2719 goto fnic_device_reset_clean;
2720 }
2721 io_req->dr_done = NULL;
2722
2723 status = fnic_priv(sc)->lr_status;
2724
2725 /*
2726 * If lun reset not completed, bail out with failed. io_req
2727 * gets cleaned up during higher levels of EH
2728 */
2729 if (status == FCPIO_INVALID_CODE) {
2730 atomic64_inc(&reset_stats->device_reset_timeouts);
2731 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
2732 "Device reset timed out\n");
2733 fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT;
2734 int_to_scsilun(sc->device->lun, &fc_lun);
2735 goto fnic_device_reset_clean;
2736 } else {
2737 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2738 }
2739
2740 /* Completed, but not successful, clean up the io_req, return fail */
2741 if (status != FCPIO_SUCCESS) {
2742 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2743 FNIC_SCSI_DBG(KERN_DEBUG,
2744 fnic->host, fnic->fnic_num,
2745 "Device reset completed - failed\n");
2746 io_req = fnic_priv(sc)->io_req;
2747 goto fnic_device_reset_clean;
2748 }
2749
2750 /*
2751 * Clean up any aborts on this lun that have still not
2752 * completed. If any of these fail, then LUN reset fails.
2753 * clean_pending_aborts cleans all cmds on this lun except
2754 * the lun reset cmd. If all cmds get cleaned, the lun reset
2755 * succeeds
2756 */
2757 if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
2758 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2759 io_req = fnic_priv(sc)->io_req;
2760 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
2761 "Device reset failed: Cannot abort all IOs\n");
2762 goto fnic_device_reset_clean;
2763 }
2764
2765 /* Clean lun reset command */
2766 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2767 io_req = fnic_priv(sc)->io_req;
2768 if (io_req)
2769 /* Completed, and successful */
2770 ret = SUCCESS;
2771
2772fnic_device_reset_clean:
2773 if (io_req) {
2774 fnic_priv(sc)->io_req = NULL;
2775 io_req->sc = NULL;
2776 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(io_req->tag)] = NULL;
2777 }
2778
2779 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2780
2781 if (io_req) {
2782 start_time = io_req->start_time;
2783 fnic_release_ioreq_buf(fnic, io_req, sc);
2784 mempool_free(io_req, fnic->io_req_pool);
2785 }
2786
2787 /*
2788 * If link-event is seen while LUN reset is issued we need
2789 * to complete the LUN reset here
2790 */
2791 if (!new_sc) {
2792 sc->result = DID_RESET << 16;
2793 scsi_done(sc);
2794 }
2795
2796fnic_device_reset_end:
2797 FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc,
2798 jiffies_to_msecs(jiffies - start_time),
2799 0, ((u64)sc->cmnd[0] << 32 |
2800 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2801 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2802 fnic_flags_and_state(sc));
2803
2804 if (new_sc) {
2805 fnic->sgreset_sc = NULL;
2806 mutex_unlock(&fnic->sgreset_mutex);
2807 }
2808
2809 while ((ret == SUCCESS) && fnic_count_lun_ioreqs(fnic, sc->device)) {
2810 if (count >= 2) {
2811 ret = FAILED;
2812 break;
2813 }
2814 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
2815 "Cannot clean up all IOs for the LUN\n");
2816 schedule_timeout(msecs_to_jiffies(1000));
2817 count++;
2818 }
2819
2820 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
2821 "Returning from device reset %s\n",
2822 (ret == SUCCESS) ?
2823 "SUCCESS" : "FAILED");
2824
2825 if (ret == FAILED)
2826 atomic64_inc(&reset_stats->device_reset_failures);
2827
2828 return ret;
2829}
2830
2831static void fnic_post_flogo_linkflap(struct fnic *fnic)
2832{
2833 unsigned long flags;
2834
2835 fnic_fdls_link_status_change(fnic, 0);
2836 spin_lock_irqsave(&fnic->fnic_lock, flags);
2837
2838 if (fnic->link_status) {
2839 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2840 fnic_fdls_link_status_change(fnic, 1);
2841 return;
2842 }
2843 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2844}
2845
2846/* Logout from all the targets and simulate link flap */
2847void fnic_reset(struct Scsi_Host *shost)
2848{
2849 struct fnic *fnic;
2850 struct reset_stats *reset_stats;
2851
2852 fnic = *((struct fnic **) shost_priv(shost));
2853 reset_stats = &fnic->fnic_stats.reset_stats;
2854
2855 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2856 "Issuing fnic reset\n");
2857
2858 atomic64_inc(&reset_stats->fnic_resets);
2859 fnic_post_flogo_linkflap(fnic);
2860
2861 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2862 "Returning from fnic reset");
2863
2864 atomic64_inc(&reset_stats->fnic_reset_completions);
2865}
2866
2867int fnic_issue_fc_host_lip(struct Scsi_Host *shost)
2868{
2869 int ret = 0;
2870 struct fnic *fnic = *((struct fnic **) shost_priv(shost));
2871
2872 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2873 "FC host lip issued");
2874
2875 ret = fnic_host_reset(shost);
2876 return ret;
2877}
2878
2879int fnic_host_reset(struct Scsi_Host *shost)
2880{
2881 int ret = SUCCESS;
2882 unsigned long wait_host_tmo;
2883 struct fnic *fnic = *((struct fnic **) shost_priv(shost));
2884 unsigned long flags;
2885 struct fnic_iport_s *iport = &fnic->iport;
2886
2887 spin_lock_irqsave(&fnic->fnic_lock, flags);
2888 if (fnic->reset_in_progress == NOT_IN_PROGRESS) {
2889 fnic->reset_in_progress = IN_PROGRESS;
2890 } else {
2891 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2892 wait_for_completion_timeout(&fnic->reset_completion_wait,
2893 msecs_to_jiffies(10000));
2894
2895 spin_lock_irqsave(&fnic->fnic_lock, flags);
2896 if (fnic->reset_in_progress == IN_PROGRESS) {
2897 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2898 FNIC_SCSI_DBG(KERN_WARNING, fnic->host, fnic->fnic_num,
2899 "Firmware reset in progress. Skipping another host reset\n");
2900 return SUCCESS;
2901 }
2902 fnic->reset_in_progress = IN_PROGRESS;
2903 }
2904 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2905
2906 /*
2907 * If fnic_reset is successful, wait for fabric login to complete
2908 * scsi-ml tries to send a TUR to every device if host reset is
2909 * successful, so before returning to scsi, fabric should be up
2910 */
2911 fnic_reset(shost);
2912
2913 spin_lock_irqsave(&fnic->fnic_lock, flags);
2914 fnic->reset_in_progress = NOT_IN_PROGRESS;
2915 complete(&fnic->reset_completion_wait);
2916 fnic->soft_reset_count++;
2917
2918 /* wait till the link is up */
2919 if (fnic->link_status) {
2920 wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
2921 ret = FAILED;
2922 while (time_before(jiffies, wait_host_tmo)) {
2923 if (iport->state != FNIC_IPORT_STATE_READY
2924 && fnic->link_status) {
2925 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2926 ssleep(1);
2927 spin_lock_irqsave(&fnic->fnic_lock, flags);
2928 } else {
2929 ret = SUCCESS;
2930 break;
2931 }
2932 }
2933 }
2934 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2935
2936 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2937 "host reset return status: %d\n", ret);
2938 return ret;
2939}
2940
2941static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data)
2942{
2943 struct request *const rq = scsi_cmd_to_rq(sc);
2944 struct fnic_pending_aborts_iter_data *iter_data = data;
2945 struct fnic *fnic = iter_data->fnic;
2946 int cmd_state;
2947 struct fnic_io_req *io_req;
2948 unsigned long flags;
2949 uint16_t hwq = 0;
2950 int tag;
2951
2952 tag = blk_mq_unique_tag(rq);
2953 hwq = blk_mq_unique_tag_to_hwq(tag);
2954
2955 /*
2956 * ignore this lun reset cmd or cmds that do not belong to
2957 * this lun
2958 */
2959 if (iter_data->lr_sc && sc == iter_data->lr_sc)
2960 return true;
2961 if (iter_data->lun_dev && sc->device != iter_data->lun_dev)
2962 return true;
2963
2964 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2965
2966 io_req = fnic_priv(sc)->io_req;
2967 if (!io_req) {
2968 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2969 return true;
2970 }
2971
2972 /*
2973 * Found IO that is still pending with firmware and
2974 * belongs to the LUN that we are resetting
2975 */
2976 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2977 "hwq: %d tag: 0x%x Found IO in state: %s on lun\n",
2978 hwq, tag,
2979 fnic_ioreq_state_to_str(fnic_priv(sc)->state));
2980 cmd_state = fnic_priv(sc)->state;
2981 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2982 if (cmd_state == FNIC_IOREQ_ABTS_PENDING)
2983 iter_data->ret = 1;
2984
2985 return iter_data->ret ? false : true;
2986}
2987
2988/*
2989 * fnic_is_abts_pending() is a helper function that
2990 * walks through tag map to check if there is any IOs pending,if there is one,
2991 * then it returns 1 (true), otherwise 0 (false)
2992 * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
2993 * otherwise, it checks for all IOs.
2994 */
2995int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
2996{
2997 struct fnic_pending_aborts_iter_data iter_data = {
2998 .fnic = fnic,
2999 .lun_dev = NULL,
3000 .ret = 0,
3001 };
3002
3003 if (lr_sc) {
3004 iter_data.lun_dev = lr_sc->device;
3005 iter_data.lr_sc = lr_sc;
3006 }
3007
3008 /* walk again to check, if IOs are still pending in fw */
3009 scsi_host_busy_iter(fnic->host,
3010 fnic_abts_pending_iter, &iter_data);
3011
3012 return iter_data.ret;
3013}
3014
3015/*
3016 * SCSI Error handling calls driver's eh_host_reset if all prior
3017 * error handling levels return FAILED. If host reset completes
3018 * successfully, and if link is up, then Fabric login begins.
3019 *
3020 * Host Reset is the highest level of error recovery. If this fails, then
3021 * host is offlined by SCSI.
3022 *
3023 */
3024int fnic_eh_host_reset_handler(struct scsi_cmnd *sc)
3025{
3026 int ret = 0;
3027 struct Scsi_Host *shost = sc->device->host;
3028 struct fnic *fnic = *((struct fnic **) shost_priv(shost));
3029
3030 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
3031 "SCSI error handling: fnic host reset");
3032
3033 ret = fnic_host_reset(shost);
3034 return ret;
3035}