Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
4 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 */
6#include <linux/errno.h>
7#include <linux/pci.h>
8#include <linux/slab.h>
9#include <linux/skbuff.h>
10#include <linux/interrupt.h>
11#include <linux/spinlock.h>
12#include <linux/if_ether.h>
13#include <linux/if_vlan.h>
14#include <linux/workqueue.h>
15#include <scsi/fc/fc_fip.h>
16#include <scsi/fc/fc_els.h>
17#include <scsi/fc_frame.h>
18#include <linux/etherdevice.h>
19#include <scsi/scsi_transport_fc.h>
20#include "fnic_io.h"
21#include "fnic.h"
22#include "fnic_fdls.h"
23#include "fdls_fc.h"
24#include "cq_enet_desc.h"
25#include "cq_exch_desc.h"
26#include "fip.h"
27
28#define MAX_RESET_WAIT_COUNT 64
29
30struct workqueue_struct *fnic_event_queue;
31
32static uint8_t FCOE_ALL_FCF_MAC[6] = FC_FCOE_FLOGI_MAC;
33
34/*
35 * Internal Functions
36 * This function will initialize the src_mac address to be
37 * used in outgoing frames
38 */
39static inline void fnic_fdls_set_fcoe_srcmac(struct fnic *fnic,
40 uint8_t *src_mac)
41{
42 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
43 "Setting src mac: %02x:%02x:%02x:%02x:%02x:%02x",
44 src_mac[0], src_mac[1], src_mac[2], src_mac[3],
45 src_mac[4], src_mac[5]);
46
47 memcpy(fnic->iport.fpma, src_mac, 6);
48}
49
50/*
51 * This function will initialize the dst_mac address to be
52 * used in outgoing frames
53 */
54static inline void fnic_fdls_set_fcoe_dstmac(struct fnic *fnic,
55 uint8_t *dst_mac)
56{
57 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
58 "Setting dst mac: %02x:%02x:%02x:%02x:%02x:%02x",
59 dst_mac[0], dst_mac[1], dst_mac[2], dst_mac[3],
60 dst_mac[4], dst_mac[5]);
61
62 memcpy(fnic->iport.fcfmac, dst_mac, 6);
63}
64
65void fnic_get_host_port_state(struct Scsi_Host *shost)
66{
67 struct fnic *fnic = *((struct fnic **) shost_priv(shost));
68 struct fnic_iport_s *iport = &fnic->iport;
69 unsigned long flags;
70
71 spin_lock_irqsave(&fnic->fnic_lock, flags);
72 if (!fnic->link_status)
73 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
74 else if (iport->state == FNIC_IPORT_STATE_READY)
75 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
76 else
77 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
78 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
79}
80
81void fnic_fdls_link_status_change(struct fnic *fnic, int linkup)
82{
83 struct fnic_iport_s *iport = &fnic->iport;
84
85 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
86 "link up: %d, usefip: %d", linkup, iport->usefip);
87
88 spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
89
90 if (linkup) {
91 if (iport->usefip) {
92 iport->state = FNIC_IPORT_STATE_FIP;
93 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
94 "link up: %d, usefip: %d", linkup, iport->usefip);
95 fnic_fcoe_send_vlan_req(fnic);
96 } else {
97 iport->state = FNIC_IPORT_STATE_FABRIC_DISC;
98 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
99 "iport->state: %d", iport->state);
100 fnic_fdls_disc_start(iport);
101 }
102 } else {
103 iport->state = FNIC_IPORT_STATE_LINK_WAIT;
104 if (!is_zero_ether_addr(iport->fpma))
105 vnic_dev_del_addr(fnic->vdev, iport->fpma);
106 fnic_common_fip_cleanup(fnic);
107 fnic_fdls_link_down(iport);
108
109 }
110 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
111}
112
113
114/*
115 * FPMA can be either taken from ethhdr(dst_mac) or flogi resp
116 * or derive from FC_MAP and FCID combination. While it should be
117 * same, revisit this if there is any possibility of not-correct.
118 */
119void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame,
120 uint8_t *fcid)
121{
122 struct fnic *fnic = iport->fnic;
123 struct ethhdr *ethhdr = (struct ethhdr *) rx_frame;
124 uint8_t fcmac[6] = { 0x0E, 0xFC, 0x00, 0x00, 0x00, 0x00 };
125
126 memcpy(&fcmac[3], fcid, 3);
127
128 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
129 "learn fcoe: dst_mac: %02x:%02x:%02x:%02x:%02x:%02x",
130 ethhdr->h_dest[0], ethhdr->h_dest[1],
131 ethhdr->h_dest[2], ethhdr->h_dest[3],
132 ethhdr->h_dest[4], ethhdr->h_dest[5]);
133
134 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
135 "learn fcoe: fc_mac: %02x:%02x:%02x:%02x:%02x:%02x",
136 fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4],
137 fcmac[5]);
138
139 fnic_fdls_set_fcoe_srcmac(fnic, fcmac);
140 fnic_fdls_set_fcoe_dstmac(fnic, ethhdr->h_source);
141}
142
143void fnic_fdls_init(struct fnic *fnic, int usefip)
144{
145 struct fnic_iport_s *iport = &fnic->iport;
146
147 /* Initialize iPort structure */
148 iport->state = FNIC_IPORT_STATE_INIT;
149 iport->fnic = fnic;
150 iport->usefip = usefip;
151
152 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
153 "iportsrcmac: %02x:%02x:%02x:%02x:%02x:%02x",
154 iport->hwmac[0], iport->hwmac[1], iport->hwmac[2],
155 iport->hwmac[3], iport->hwmac[4], iport->hwmac[5]);
156
157 INIT_LIST_HEAD(&iport->tport_list);
158 INIT_LIST_HEAD(&iport->tport_list_pending_del);
159
160 fnic_fdls_disc_init(iport);
161}
162
163void fnic_handle_link(struct work_struct *work)
164{
165 struct fnic *fnic = container_of(work, struct fnic, link_work);
166 int old_link_status;
167 u32 old_link_down_cnt;
168 int max_count = 0;
169
170 if (vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI)
171 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
172 "Interrupt mode is not MSI\n");
173
174 spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
175
176 if (fnic->stop_rx_link_events) {
177 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
178 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
179 "Stop link rx events\n");
180 return;
181 }
182
183 /* Do not process if the fnic is already in transitional state */
184 if ((fnic->state != FNIC_IN_ETH_MODE)
185 && (fnic->state != FNIC_IN_FC_MODE)) {
186 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
187 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
188 "fnic in transitional state: %d. link up: %d ignored",
189 fnic->state, vnic_dev_link_status(fnic->vdev));
190 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
191 "Current link status: %d iport state: %d\n",
192 fnic->link_status, fnic->iport.state);
193 return;
194 }
195
196 old_link_down_cnt = fnic->link_down_cnt;
197 old_link_status = fnic->link_status;
198 fnic->link_status = vnic_dev_link_status(fnic->vdev);
199 fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
200
201 while (fnic->reset_in_progress == IN_PROGRESS) {
202 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
203 "fnic reset in progress. Link event needs to wait\n");
204
205 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
206 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
207 "waiting for reset completion\n");
208 wait_for_completion_timeout(&fnic->reset_completion_wait,
209 msecs_to_jiffies(5000));
210 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
211 "woken up from reset completion wait\n");
212 spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
213
214 max_count++;
215 if (max_count >= MAX_RESET_WAIT_COUNT) {
216 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
217 "Rstth waited for too long. Skipping handle link event\n");
218 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
219 return;
220 }
221 }
222 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
223 "Marking fnic reset in progress\n");
224 fnic->reset_in_progress = IN_PROGRESS;
225
226 if ((vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI) ||
227 (fnic->link_status != old_link_status)) {
228 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
229 "old link status: %d link status: %d\n",
230 old_link_status, (int) fnic->link_status);
231 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
232 "old down count %d down count: %d\n",
233 old_link_down_cnt, (int) fnic->link_down_cnt);
234 }
235
236 if (old_link_status == fnic->link_status) {
237 if (!fnic->link_status) {
238 /* DOWN -> DOWN */
239 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
240 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
241 "down->down\n");
242 } else {
243 if (old_link_down_cnt != fnic->link_down_cnt) {
244 /* UP -> DOWN -> UP */
245 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
246 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
247 "up->down. Link down\n");
248 fnic_fdls_link_status_change(fnic, 0);
249
250 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
251 "down->up. Link up\n");
252 fnic_fdls_link_status_change(fnic, 1);
253 } else {
254 /* UP -> UP */
255 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
256 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
257 "up->up\n");
258 }
259 }
260 } else if (fnic->link_status) {
261 /* DOWN -> UP */
262 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
263 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
264 "down->up. Link up\n");
265 fnic_fdls_link_status_change(fnic, 1);
266 } else {
267 /* UP -> DOWN */
268 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
269 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
270 "up->down. Link down\n");
271 fnic_fdls_link_status_change(fnic, 0);
272 }
273
274 spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
275 fnic->reset_in_progress = NOT_IN_PROGRESS;
276 complete(&fnic->reset_completion_wait);
277
278 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
279 "Marking fnic reset completion\n");
280 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
281}
282
283void fnic_handle_frame(struct work_struct *work)
284{
285 struct fnic *fnic = container_of(work, struct fnic, frame_work);
286 struct fnic_frame_list *cur_frame, *next;
287 int fchdr_offset = 0;
288
289 spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
290 list_for_each_entry_safe(cur_frame, next, &fnic->frame_queue, links) {
291 if (fnic->stop_rx_link_events) {
292 list_del(&cur_frame->links);
293 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
294 mempool_free(cur_frame->fp, fnic->frame_recv_pool);
295 mempool_free(cur_frame, fnic->frame_elem_pool);
296 return;
297 }
298
299 /*
300 * If we're in a transitional state, just re-queue and return.
301 * The queue will be serviced when we get to a stable state.
302 */
303 if (fnic->state != FNIC_IN_FC_MODE &&
304 fnic->state != FNIC_IN_ETH_MODE) {
305 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
306 "Cannot process frame in transitional state\n");
307 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
308 return;
309 }
310
311 list_del(&cur_frame->links);
312
313 /* Frames from FCP_RQ will have ethhdrs stripped off */
314 fchdr_offset = (cur_frame->rx_ethhdr_stripped) ?
315 0 : FNIC_ETH_FCOE_HDRS_OFFSET;
316
317 fnic_fdls_recv_frame(&fnic->iport, cur_frame->fp,
318 cur_frame->frame_len, fchdr_offset);
319
320 mempool_free(cur_frame->fp, fnic->frame_recv_pool);
321 mempool_free(cur_frame, fnic->frame_elem_pool);
322 }
323 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
324}
325
326void fnic_handle_fip_frame(struct work_struct *work)
327{
328 struct fnic_frame_list *cur_frame, *next;
329 struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
330
331 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
332 "Processing FIP frame\n");
333
334 spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
335 list_for_each_entry_safe(cur_frame, next, &fnic->fip_frame_queue,
336 links) {
337 if (fnic->stop_rx_link_events) {
338 list_del(&cur_frame->links);
339 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
340 mempool_free(cur_frame->fp, fnic->frame_recv_pool);
341 mempool_free(cur_frame, fnic->frame_elem_pool);
342 return;
343 }
344
345 /*
346 * If we're in a transitional state, just re-queue and return.
347 * The queue will be serviced when we get to a stable state.
348 */
349 if (fnic->state != FNIC_IN_FC_MODE &&
350 fnic->state != FNIC_IN_ETH_MODE) {
351 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
352 return;
353 }
354
355 list_del(&cur_frame->links);
356
357 if (fdls_fip_recv_frame(fnic, cur_frame->fp)) {
358 mempool_free(cur_frame->fp, fnic->frame_recv_pool);
359 mempool_free(cur_frame, fnic->frame_elem_pool);
360 }
361 }
362 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
363}
364
365/**
366 * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
367 * @fnic: fnic instance.
368 * @fp: Ethernet Frame.
369 */
370static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, void *fp)
371{
372 struct ethhdr *eh;
373 struct fnic_frame_list *fip_fr_elem;
374 unsigned long flags;
375
376 eh = (struct ethhdr *) fp;
377 if ((eh->h_proto == cpu_to_be16(ETH_P_FIP)) && (fnic->iport.usefip)) {
378 fip_fr_elem = mempool_alloc(fnic->frame_elem_pool, GFP_ATOMIC);
379 if (!fip_fr_elem)
380 return 0;
381 memset(fip_fr_elem, 0, sizeof(struct fnic_frame_list));
382 fip_fr_elem->fp = fp;
383 spin_lock_irqsave(&fnic->fnic_lock, flags);
384 list_add_tail(&fip_fr_elem->links, &fnic->fip_frame_queue);
385 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
386 queue_work(fnic_fip_queue, &fnic->fip_frame_work);
387 return 1; /* let caller know packet was used */
388 } else
389 return 0;
390}
391
392/**
393 * fnic_update_mac_locked() - set data MAC address and filters.
394 * @fnic: fnic instance.
395 * @new: newly-assigned FCoE MAC address.
396 *
397 * Called with the fnic lock held.
398 */
399void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
400{
401 struct fnic_iport_s *iport = &fnic->iport;
402 u8 *ctl = iport->hwmac;
403 u8 *data = fnic->data_src_addr;
404
405 if (is_zero_ether_addr(new))
406 new = ctl;
407 if (ether_addr_equal(data, new))
408 return;
409
410 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
411 "Update MAC: %u\n", *new);
412
413 if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
414 vnic_dev_del_addr(fnic->vdev, data);
415
416 memcpy(data, new, ETH_ALEN);
417 if (!ether_addr_equal(new, ctl))
418 vnic_dev_add_addr(fnic->vdev, new);
419}
420
421static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
422 *cq_desc, struct vnic_rq_buf *buf,
423 int skipped __attribute__((unused)),
424 void *opaque)
425{
426 struct fnic *fnic = vnic_dev_priv(rq->vdev);
427 uint8_t *fp;
428 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
429 unsigned int ethhdr_stripped;
430 u8 type, color, eop, sop, ingress_port, vlan_stripped;
431 u8 fcoe_fnic_crc_ok = 1, fcoe_enc_error = 0;
432 u8 fcs_ok = 1, packet_error = 0;
433 u16 q_number, completed_index, vlan;
434 u32 rss_hash;
435 u16 checksum;
436 u8 csum_not_calc, rss_type, ipv4, ipv6, ipv4_fragment;
437 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
438 u8 fcoe = 0, fcoe_sof, fcoe_eof;
439 u16 exchange_id, tmpl;
440 u8 sof = 0;
441 u8 eof = 0;
442 u32 fcp_bytes_written = 0;
443 u16 enet_bytes_written = 0;
444 u32 bytes_written = 0;
445 unsigned long flags;
446 struct fnic_frame_list *frame_elem = NULL;
447 struct ethhdr *eh;
448
449 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
450 DMA_FROM_DEVICE);
451 fp = (uint8_t *) buf->os_buf;
452 buf->os_buf = NULL;
453
454 cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
455 if (type == CQ_DESC_TYPE_RQ_FCP) {
456 cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *) cq_desc, &type,
457 &color, &q_number, &completed_index, &eop, &sop,
458 &fcoe_fnic_crc_ok, &exchange_id, &tmpl,
459 &fcp_bytes_written, &sof, &eof, &ingress_port,
460 &packet_error, &fcoe_enc_error, &fcs_ok,
461 &vlan_stripped, &vlan);
462 ethhdr_stripped = 1;
463 bytes_written = fcp_bytes_written;
464 } else if (type == CQ_DESC_TYPE_RQ_ENET) {
465 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *) cq_desc, &type,
466 &color, &q_number, &completed_index,
467 &ingress_port, &fcoe, &eop, &sop, &rss_type,
468 &csum_not_calc, &rss_hash, &enet_bytes_written,
469 &packet_error, &vlan_stripped, &vlan,
470 &checksum, &fcoe_sof, &fcoe_fnic_crc_ok,
471 &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok,
472 &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4,
473 &ipv4_fragment, &fcs_ok);
474
475 ethhdr_stripped = 0;
476 bytes_written = enet_bytes_written;
477
478 if (!fcs_ok) {
479 atomic64_inc(&fnic_stats->misc_stats.frame_errors);
480 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
481 "fnic 0x%p fcs error. Dropping packet.\n", fnic);
482 goto drop;
483 }
484 eh = (struct ethhdr *) fp;
485 if (eh->h_proto != cpu_to_be16(ETH_P_FCOE)) {
486
487 if (fnic_import_rq_eth_pkt(fnic, fp))
488 return;
489
490 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
491 "Dropping h_proto 0x%x",
492 be16_to_cpu(eh->h_proto));
493 goto drop;
494 }
495 } else {
496 /* wrong CQ type */
497 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
498 "fnic rq_cmpl wrong cq type x%x\n", type);
499 goto drop;
500 }
501
502 if (!fcs_ok || packet_error || !fcoe_fnic_crc_ok || fcoe_enc_error) {
503 atomic64_inc(&fnic_stats->misc_stats.frame_errors);
504 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
505 "fcoe %x fcsok %x pkterr %x ffco %x fee %x\n",
506 fcoe, fcs_ok, packet_error,
507 fcoe_fnic_crc_ok, fcoe_enc_error);
508 goto drop;
509 }
510
511 spin_lock_irqsave(&fnic->fnic_lock, flags);
512 if (fnic->stop_rx_link_events) {
513 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
514 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
515 "fnic->stop_rx_link_events: %d\n",
516 fnic->stop_rx_link_events);
517 goto drop;
518 }
519
520 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
521
522 frame_elem = mempool_alloc(fnic->frame_elem_pool, GFP_ATOMIC);
523 if (!frame_elem) {
524 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
525 "Failed to allocate memory for frame elem");
526 goto drop;
527 }
528 memset(frame_elem, 0, sizeof(struct fnic_frame_list));
529 frame_elem->fp = fp;
530 frame_elem->rx_ethhdr_stripped = ethhdr_stripped;
531 frame_elem->frame_len = bytes_written;
532
533 spin_lock_irqsave(&fnic->fnic_lock, flags);
534 list_add_tail(&frame_elem->links, &fnic->frame_queue);
535 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
536
537 queue_work(fnic_event_queue, &fnic->frame_work);
538 return;
539
540drop:
541 mempool_free(fp, fnic->frame_recv_pool);
542}
543
544static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
545 struct cq_desc *cq_desc, u8 type,
546 u16 q_number, u16 completed_index,
547 void *opaque)
548{
549 struct fnic *fnic = vnic_dev_priv(vdev);
550
551 vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
552 VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
553 NULL);
554 return 0;
555}
556
557int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
558{
559 unsigned int tot_rq_work_done = 0, cur_work_done;
560 unsigned int i;
561 int err;
562
563 for (i = 0; i < fnic->rq_count; i++) {
564 cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
565 fnic_rq_cmpl_handler_cont,
566 NULL);
567 if (cur_work_done && fnic->stop_rx_link_events != 1) {
568 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
569 if (err)
570 shost_printk(KERN_ERR, fnic->host,
571 "fnic_alloc_rq_frame can't alloc"
572 " frame\n");
573 }
574 tot_rq_work_done += cur_work_done;
575 }
576
577 return tot_rq_work_done;
578}
579
580/*
581 * This function is called once at init time to allocate and fill RQ
582 * buffers. Subsequently, it is called in the interrupt context after RQ
583 * buffer processing to replenish the buffers in the RQ
584 */
585int fnic_alloc_rq_frame(struct vnic_rq *rq)
586{
587 struct fnic *fnic = vnic_dev_priv(rq->vdev);
588 void *buf;
589 u16 len;
590 dma_addr_t pa;
591 int ret;
592
593 len = FNIC_FRAME_HT_ROOM;
594 buf = mempool_alloc(fnic->frame_recv_pool, GFP_ATOMIC);
595 if (!buf) {
596 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
597 "Unable to allocate RQ buffer of size: %d\n", len);
598 return -ENOMEM;
599 }
600
601 pa = dma_map_single(&fnic->pdev->dev, buf, len, DMA_FROM_DEVICE);
602 if (dma_mapping_error(&fnic->pdev->dev, pa)) {
603 ret = -ENOMEM;
604 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
605 "PCI mapping failed with error %d\n", ret);
606 goto free_buf;
607 }
608
609 fnic_queue_rq_desc(rq, buf, pa, len);
610 return 0;
611free_buf:
612 mempool_free(buf, fnic->frame_recv_pool);
613 return ret;
614}
615
616void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
617{
618 void *rq_buf = buf->os_buf;
619 struct fnic *fnic = vnic_dev_priv(rq->vdev);
620
621 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
622 DMA_FROM_DEVICE);
623
624 mempool_free(rq_buf, fnic->frame_recv_pool);
625 buf->os_buf = NULL;
626}
627
628/*
629 * Send FC frame.
630 */
631static int fnic_send_frame(struct fnic *fnic, void *frame, int frame_len)
632{
633 struct vnic_wq *wq = &fnic->wq[0];
634 dma_addr_t pa;
635 int ret = 0;
636 unsigned long flags;
637
638 pa = dma_map_single(&fnic->pdev->dev, frame, frame_len, DMA_TO_DEVICE);
639 if (dma_mapping_error(&fnic->pdev->dev, pa))
640 return -ENOMEM;
641
642 if ((fnic_fc_trace_set_data(fnic->fnic_num,
643 FNIC_FC_SEND | 0x80, (char *) frame,
644 frame_len)) != 0) {
645 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
646 "fnic ctlr frame trace error");
647 }
648
649 spin_lock_irqsave(&fnic->wq_lock[0], flags);
650
651 if (!vnic_wq_desc_avail(wq)) {
652 dma_unmap_single(&fnic->pdev->dev, pa, frame_len, DMA_TO_DEVICE);
653 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
654 "vnic work queue descriptor is not available");
655 ret = -1;
656 goto fnic_send_frame_end;
657 }
658
659 /* hw inserts cos value */
660 fnic_queue_wq_desc(wq, frame, pa, frame_len, FC_EOF_T,
661 0, fnic->vlan_id, 1, 1, 1);
662
663fnic_send_frame_end:
664 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
665 return ret;
666}
667
668/**
669 * fdls_send_fcoe_frame - send a filled-in FC frame, filling in eth and FCoE
670 * info. This interface is used only in the non fast path. (login, fabric
671 * registrations etc.)
672 *
673 * @fnic: fnic instance
674 * @frame: frame structure with FC payload filled in
675 * @frame_size: length of the frame to be sent
676 * @srcmac: source mac address
677 * @dstmac: destination mac address
678 *
679 * Called with the fnic lock held.
680 */
681static int
682fdls_send_fcoe_frame(struct fnic *fnic, void *frame, int frame_size,
683 uint8_t *srcmac, uint8_t *dstmac)
684{
685 struct ethhdr *pethhdr;
686 struct fcoe_hdr *pfcoe_hdr;
687 struct fnic_frame_list *frame_elem;
688 int len = frame_size;
689 int ret;
690 struct fc_frame_header *fchdr = (struct fc_frame_header *) (frame +
691 FNIC_ETH_FCOE_HDRS_OFFSET);
692
693 pethhdr = (struct ethhdr *) frame;
694 pethhdr->h_proto = cpu_to_be16(ETH_P_FCOE);
695 memcpy(pethhdr->h_source, srcmac, ETH_ALEN);
696 memcpy(pethhdr->h_dest, dstmac, ETH_ALEN);
697
698 pfcoe_hdr = (struct fcoe_hdr *) (frame + sizeof(struct ethhdr));
699 pfcoe_hdr->fcoe_sof = FC_SOF_I3;
700
701 /*
702 * Queue frame if in a transitional state.
703 * This occurs while registering the Port_ID / MAC address after FLOGI.
704 */
705 if ((fnic->state != FNIC_IN_FC_MODE)
706 && (fnic->state != FNIC_IN_ETH_MODE)) {
707 frame_elem = mempool_alloc(fnic->frame_elem_pool, GFP_ATOMIC);
708 if (!frame_elem) {
709 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
710 "Failed to allocate memory for frame elem");
711 return -ENOMEM;
712 }
713 memset(frame_elem, 0, sizeof(struct fnic_frame_list));
714
715 FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
716 "Queueing FC frame: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x\n",
717 ntoh24(fchdr->fh_s_id), ntoh24(fchdr->fh_d_id),
718 fchdr->fh_type, FNIC_STD_GET_OX_ID(fchdr));
719 frame_elem->fp = frame;
720 frame_elem->frame_len = len;
721 list_add_tail(&frame_elem->links, &fnic->tx_queue);
722 return 0;
723 }
724
725 fnic_debug_dump_fc_frame(fnic, fchdr, frame_size, "Outgoing");
726
727 ret = fnic_send_frame(fnic, frame, len);
728 return ret;
729}
730
731void fnic_send_fcoe_frame(struct fnic_iport_s *iport, void *frame,
732 int frame_size)
733{
734 struct fnic *fnic = iport->fnic;
735 uint8_t *dstmac, *srcmac;
736
737 /* If module unload is in-progress, don't send */
738 if (fnic->in_remove)
739 return;
740
741 if (iport->fabric.flags & FNIC_FDLS_FPMA_LEARNT) {
742 srcmac = iport->fpma;
743 dstmac = iport->fcfmac;
744 } else {
745 srcmac = iport->hwmac;
746 dstmac = FCOE_ALL_FCF_MAC;
747 }
748
749 fdls_send_fcoe_frame(fnic, frame, frame_size, srcmac, dstmac);
750}
751
752int
753fnic_send_fip_frame(struct fnic_iport_s *iport, void *frame,
754 int frame_size)
755{
756 struct fnic *fnic = iport->fnic;
757
758 if (fnic->in_remove)
759 return -1;
760
761 fnic_debug_dump_fip_frame(fnic, frame, frame_size, "Outgoing");
762 return fnic_send_frame(fnic, frame, frame_size);
763}
764
765/**
766 * fnic_flush_tx() - send queued frames.
767 * @work: pointer to work element
768 *
769 * Send frames that were waiting to go out in FC or Ethernet mode.
770 * Whenever changing modes we purge queued frames, so these frames should
771 * be queued for the stable mode that we're in, either FC or Ethernet.
772 *
773 * Called without fnic_lock held.
774 */
775void fnic_flush_tx(struct work_struct *work)
776{
777 struct fnic *fnic = container_of(work, struct fnic, flush_work);
778 struct fc_frame *fp;
779 struct fnic_frame_list *cur_frame, *next;
780
781 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
782 "Flush queued frames");
783
784 list_for_each_entry_safe(cur_frame, next, &fnic->tx_queue, links) {
785 fp = cur_frame->fp;
786 list_del(&cur_frame->links);
787 fnic_send_frame(fnic, fp, cur_frame->frame_len);
788 mempool_free(cur_frame, fnic->frame_elem_pool);
789 }
790}
791
792int
793fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id,
794 void *fp)
795{
796 struct fnic *fnic = iport->fnic;
797 struct ethhdr *ethhdr;
798 int ret;
799
800 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
801 "Setting port id: 0x%x fp: 0x%p fnic state: %d", port_id,
802 fp, fnic->state);
803
804 if (fp) {
805 ethhdr = (struct ethhdr *) fp;
806 vnic_dev_add_addr(fnic->vdev, ethhdr->h_dest);
807 }
808
809 /* Change state to reflect transition to FC mode */
810 if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
811 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
812 else {
813 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
814 "Unexpected fnic state while processing FLOGI response\n");
815 return -1;
816 }
817
818 /*
819 * Send FLOGI registration to firmware to set up FC mode.
820 * The new address will be set up when registration completes.
821 */
822 ret = fnic_flogi_reg_handler(fnic, port_id);
823 if (ret < 0) {
824 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
825 "FLOGI registration error ret: %d fnic state: %d\n",
826 ret, fnic->state);
827 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
828 fnic->state = FNIC_IN_ETH_MODE;
829
830 return -1;
831 }
832 iport->fabric.flags |= FNIC_FDLS_FPMA_LEARNT;
833
834 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
835 "FLOGI registration success\n");
836 return 0;
837}
838
839void fnic_free_txq(struct fnic *fnic)
840{
841 struct fnic_frame_list *cur_frame, *next;
842
843 list_for_each_entry_safe(cur_frame, next, &fnic->tx_queue, links) {
844 list_del(&cur_frame->links);
845 mempool_free(cur_frame->fp, fnic->frame_pool);
846 mempool_free(cur_frame, fnic->frame_elem_pool);
847 }
848}
849
850void fnic_free_rxq(struct fnic *fnic)
851{
852 struct fnic_frame_list *cur_frame, *next;
853
854 list_for_each_entry_safe(cur_frame, next, &fnic->frame_queue, links) {
855 list_del(&cur_frame->links);
856 mempool_free(cur_frame->fp, fnic->frame_recv_pool);
857 mempool_free(cur_frame, fnic->frame_elem_pool);
858 }
859
860 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
861 list_for_each_entry_safe(cur_frame, next,
862 &fnic->fip_frame_queue, links) {
863 list_del(&cur_frame->links);
864 mempool_free(cur_frame->fp, fnic->frame_recv_pool);
865 mempool_free(cur_frame, fnic->frame_elem_pool);
866 }
867 }
868}
869
870static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
871 struct cq_desc *cq_desc,
872 struct vnic_wq_buf *buf, void *opaque)
873{
874 struct fnic *fnic = vnic_dev_priv(wq->vdev);
875
876 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
877 DMA_TO_DEVICE);
878 mempool_free(buf->os_buf, fnic->frame_pool);
879 buf->os_buf = NULL;
880}
881
882static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
883 struct cq_desc *cq_desc, u8 type,
884 u16 q_number, u16 completed_index,
885 void *opaque)
886{
887 struct fnic *fnic = vnic_dev_priv(vdev);
888 unsigned long flags;
889
890 spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
891 vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
892 fnic_wq_complete_frame_send, NULL);
893 spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
894
895 return 0;
896}
897
898int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
899{
900 unsigned int wq_work_done = 0;
901 unsigned int i;
902
903 for (i = 0; i < fnic->raw_wq_count; i++) {
904 wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
905 work_to_do,
906 fnic_wq_cmpl_handler_cont,
907 NULL);
908 }
909
910 return wq_work_done;
911}
912
913
914void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
915{
916 struct fnic *fnic = vnic_dev_priv(wq->vdev);
917
918 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
919 DMA_TO_DEVICE);
920
921 mempool_free(buf->os_buf, fnic->frame_pool);
922 buf->os_buf = NULL;
923}
924
925void
926fnic_fdls_add_tport(struct fnic_iport_s *iport, struct fnic_tport_s *tport,
927 unsigned long flags)
928{
929 struct fnic *fnic = iport->fnic;
930 struct fc_rport *rport;
931 struct fc_rport_identifiers ids;
932 struct rport_dd_data_s *rdd_data;
933
934 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
935 "Adding rport fcid: 0x%x", tport->fcid);
936
937 ids.node_name = tport->wwnn;
938 ids.port_name = tport->wwpn;
939 ids.port_id = tport->fcid;
940 ids.roles = FC_RPORT_ROLE_FCP_TARGET;
941
942 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
943 rport = fc_remote_port_add(fnic->host, 0, &ids);
944 spin_lock_irqsave(&fnic->fnic_lock, flags);
945 if (!rport) {
946 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
947 "Failed to add rport for tport: 0x%x", tport->fcid);
948 return;
949 }
950
951 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
952 "Added rport fcid: 0x%x", tport->fcid);
953
954 /* Mimic these assignments in queuecommand to avoid timing issues */
955 rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN;
956 rport->supported_classes = FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET;
957 rdd_data = rport->dd_data;
958 rdd_data->tport = tport;
959 rdd_data->iport = iport;
960 tport->rport = rport;
961 tport->flags |= FNIC_FDLS_SCSI_REGISTERED;
962}
963
964void
965fnic_fdls_remove_tport(struct fnic_iport_s *iport,
966 struct fnic_tport_s *tport, unsigned long flags)
967{
968 struct fnic *fnic = iport->fnic;
969 struct rport_dd_data_s *rdd_data;
970
971 struct fc_rport *rport;
972
973 if (!tport)
974 return;
975
976 fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINE);
977 rport = tport->rport;
978
979 if (rport) {
980 /* tport resource release will be done
981 * after fnic_terminate_rport_io()
982 */
983 tport->flags |= FNIC_FDLS_TPORT_DELETED;
984 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
985
986 /* Interface to scsi_fc_transport */
987 fc_remote_port_delete(rport);
988
989 spin_lock_irqsave(&fnic->fnic_lock, flags);
990 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
991 "Deregistered and freed tport fcid: 0x%x from scsi transport fc",
992 tport->fcid);
993
994 /*
995 * the dd_data is allocated by fc transport
996 * of size dd_fcrport_size
997 */
998 rdd_data = rport->dd_data;
999 rdd_data->tport = NULL;
1000 rdd_data->iport = NULL;
1001 list_del(&tport->links);
1002 kfree(tport);
1003 } else {
1004 fnic_del_tport_timer_sync(fnic, tport);
1005 list_del(&tport->links);
1006 kfree(tport);
1007 }
1008}
1009
1010void fnic_delete_fcp_tports(struct fnic *fnic)
1011{
1012 struct fnic_tport_s *tport, *next;
1013 unsigned long flags;
1014
1015 spin_lock_irqsave(&fnic->fnic_lock, flags);
1016 list_for_each_entry_safe(tport, next, &fnic->iport.tport_list, links) {
1017 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1018 "removing fcp rport fcid: 0x%x", tport->fcid);
1019 fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINING);
1020 fnic_del_tport_timer_sync(fnic, tport);
1021 fnic_fdls_remove_tport(&fnic->iport, tport, flags);
1022 }
1023 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1024}
1025
1026/**
1027 * fnic_tport_event_handler() - Handler for remote port events
1028 * in the tport_event_queue.
1029 *
1030 * @work: Handle to the remote port being dequeued
1031 */
1032void fnic_tport_event_handler(struct work_struct *work)
1033{
1034 struct fnic *fnic = container_of(work, struct fnic, tport_work);
1035 struct fnic_tport_event_s *cur_evt, *next;
1036 unsigned long flags;
1037 struct fnic_tport_s *tport;
1038
1039 spin_lock_irqsave(&fnic->fnic_lock, flags);
1040 list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) {
1041 tport = cur_evt->arg1;
1042 switch (cur_evt->event) {
1043 case TGT_EV_RPORT_ADD:
1044 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1045 "Add rport event");
1046 if (tport->state == FDLS_TGT_STATE_READY) {
1047 fnic_fdls_add_tport(&fnic->iport,
1048 (struct fnic_tport_s *) cur_evt->arg1, flags);
1049 } else {
1050 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1051 "Target not ready. Add rport event dropped: 0x%x",
1052 tport->fcid);
1053 }
1054 break;
1055 case TGT_EV_RPORT_DEL:
1056 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1057 "Remove rport event");
1058 if (tport->state == FDLS_TGT_STATE_OFFLINING) {
1059 fnic_fdls_remove_tport(&fnic->iport,
1060 (struct fnic_tport_s *) cur_evt->arg1, flags);
1061 } else {
1062 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1063 "remove rport event dropped tport fcid: 0x%x",
1064 tport->fcid);
1065 }
1066 break;
1067 case TGT_EV_TPORT_DELETE:
1068 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1069 "Delete tport event");
1070 fdls_delete_tport(tport->iport, tport);
1071 break;
1072 default:
1073 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1074 "Unknown tport event");
1075 break;
1076 }
1077 list_del(&cur_evt->links);
1078 kfree(cur_evt);
1079 }
1080 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1081}
1082
1083void fnic_flush_tport_event_list(struct fnic *fnic)
1084{
1085 struct fnic_tport_event_s *cur_evt, *next;
1086 unsigned long flags;
1087
1088 spin_lock_irqsave(&fnic->fnic_lock, flags);
1089 list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) {
1090 list_del(&cur_evt->links);
1091 kfree(cur_evt);
1092 }
1093 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1094}
1095
1096void fnic_reset_work_handler(struct work_struct *work)
1097{
1098 struct fnic *cur_fnic, *next_fnic;
1099 unsigned long reset_fnic_list_lock_flags;
1100 int host_reset_ret_code;
1101
1102 /*
1103 * This is a single thread. It is per fnic module, not per fnic
1104 * All the fnics that need to be reset
1105 * have been serialized via the reset fnic list.
1106 */
1107 spin_lock_irqsave(&reset_fnic_list_lock, reset_fnic_list_lock_flags);
1108 list_for_each_entry_safe(cur_fnic, next_fnic, &reset_fnic_list, links) {
1109 list_del(&cur_fnic->links);
1110 spin_unlock_irqrestore(&reset_fnic_list_lock,
1111 reset_fnic_list_lock_flags);
1112
1113 dev_err(&cur_fnic->pdev->dev, "fnic: <%d>: issuing a host reset\n",
1114 cur_fnic->fnic_num);
1115 host_reset_ret_code = fnic_host_reset(cur_fnic->host);
1116 dev_err(&cur_fnic->pdev->dev,
1117 "fnic: <%d>: returned from host reset with status: %d\n",
1118 cur_fnic->fnic_num, host_reset_ret_code);
1119
1120 spin_lock_irqsave(&cur_fnic->fnic_lock, cur_fnic->lock_flags);
1121 cur_fnic->pc_rscn_handling_status =
1122 PC_RSCN_HANDLING_NOT_IN_PROGRESS;
1123 spin_unlock_irqrestore(&cur_fnic->fnic_lock, cur_fnic->lock_flags);
1124
1125 spin_lock_irqsave(&reset_fnic_list_lock,
1126 reset_fnic_list_lock_flags);
1127 }
1128 spin_unlock_irqrestore(&reset_fnic_list_lock,
1129 reset_fnic_list_lock_flags);
1130}
1131
1132void fnic_fcpio_reset(struct fnic *fnic)
1133{
1134 unsigned long flags;
1135 enum fnic_state old_state;
1136 struct fnic_iport_s *iport = &fnic->iport;
1137 DECLARE_COMPLETION_ONSTACK(fw_reset_done);
1138 int time_remain;
1139
1140 /* issue fw reset */
1141 spin_lock_irqsave(&fnic->fnic_lock, flags);
1142 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
1143 /* fw reset is in progress, poll for its completion */
1144 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1145 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1146 "fnic is in unexpected state: %d for fw_reset\n",
1147 fnic->state);
1148 return;
1149 }
1150
1151 old_state = fnic->state;
1152 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1153
1154 fnic_update_mac_locked(fnic, iport->hwmac);
1155 fnic->fw_reset_done = &fw_reset_done;
1156 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1157
1158 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1159 "Issuing fw reset\n");
1160 if (fnic_fw_reset_handler(fnic)) {
1161 spin_lock_irqsave(&fnic->fnic_lock, flags);
1162 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
1163 fnic->state = old_state;
1164 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1165 } else {
1166 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1167 "Waiting for fw completion\n");
1168 time_remain = wait_for_completion_timeout(&fw_reset_done,
1169 msecs_to_jiffies(FNIC_FW_RESET_TIMEOUT));
1170 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1171 "Woken up after fw completion timeout\n");
1172 if (time_remain == 0) {
1173 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1174 "FW reset completion timed out after %d ms\n",
1175 FNIC_FW_RESET_TIMEOUT);
1176 }
1177 atomic64_inc(&fnic->fnic_stats.reset_stats.fw_reset_timeouts);
1178 }
1179 fnic->fw_reset_done = NULL;
1180}