Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull more SCSI updates from James Bottomley:
"This is just a couple of drivers (hpsa and lpfc) that got left out for
further testing in linux-next. We also have one fix to a prior
submission (qla2xxx sparse)"

* tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (36 commits)
qla2xxx: fix sparse warnings introduced by previous target mode t10-dif patch
lpfc: Update lpfc version to driver version 10.2.8001.0
lpfc: Fix ExpressLane priority setup
lpfc: mark old devices as obsolete
lpfc: Fix for initializing RRQ bitmap
lpfc: Fix for cleaning up stale ring flag and sp_queue_event entries
lpfc: Update lpfc version to driver version 10.2.8000.0
lpfc: Update Copyright on changed files from 8.3.45 patches
lpfc: Update Copyright on changed files
lpfc: Fixed locking for scsi task management commands
lpfc: Convert runtime references to old xlane cfg param to fof cfg param
lpfc: Fix FW dump using sysfs
lpfc: Fix SLI4 s abort loop to process all FCP rings and under ring_lock
lpfc: Fixed kernel panic in lpfc_abort_handler
lpfc: Fix locking for postbufq when freeing
lpfc: Fix locking for lpfc_hba_down_post
lpfc: Fix dynamic transitions of FirstBurst from on to off
hpsa: fix handling of hpsa_volume_offline return value
hpsa: return -ENOMEM not -1 on kzalloc failure in hpsa_get_device_id
hpsa: remove messages about volume status VPD inquiry page not supported
...

+736 -390
+158 -108
drivers/scsi/hpsa.c
··· 48 48 #include <linux/bitmap.h> 49 49 #include <linux/atomic.h> 50 50 #include <linux/jiffies.h> 51 + #include <linux/percpu.h> 51 52 #include <asm/div64.h> 52 53 #include "hpsa_cmd.h" 53 54 #include "hpsa.h" ··· 194 193 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); 195 194 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); 196 195 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); 197 - static void start_io(struct ctlr_info *h); 196 + static void lock_and_start_io(struct ctlr_info *h); 197 + static void start_io(struct ctlr_info *h, unsigned long *flags); 198 198 199 199 #ifdef CONFIG_COMPAT 200 200 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); ··· 697 695 static inline u32 next_command(struct ctlr_info *h, u8 q) 698 696 { 699 697 u32 a; 700 - struct reply_pool *rq = &h->reply_queue[q]; 698 + struct reply_queue_buffer *rq = &h->reply_queue[q]; 701 699 unsigned long flags; 702 700 703 701 if (h->transMethod & CFGTBL_Trans_io_accel1) ··· 846 844 spin_lock_irqsave(&h->lock, flags); 847 845 addQ(&h->reqQ, c); 848 846 h->Qdepth++; 847 + start_io(h, &flags); 849 848 spin_unlock_irqrestore(&h->lock, flags); 850 - start_io(h); 851 849 } 852 850 853 851 static inline void removeQ(struct CommandList *c) ··· 1556 1554 dev_warn(&h->pdev->dev, 1557 1555 "%s: task complete with check condition.\n", 1558 1556 "HP SSD Smart Path"); 1557 + cmd->result |= SAM_STAT_CHECK_CONDITION; 1559 1558 if (c2->error_data.data_present != 1560 - IOACCEL2_SENSE_DATA_PRESENT) 1559 + IOACCEL2_SENSE_DATA_PRESENT) { 1560 + memset(cmd->sense_buffer, 0, 1561 + SCSI_SENSE_BUFFERSIZE); 1561 1562 break; 1563 + } 1562 1564 /* copy the sense data */ 1563 1565 data_len = c2->error_data.sense_data_len; 1564 1566 if (data_len > SCSI_SENSE_BUFFERSIZE) ··· 1572 1566 sizeof(c2->error_data.sense_data_buff); 1573 1567 memcpy(cmd->sense_buffer, 1574 1568 c2->error_data.sense_data_buff, data_len); 1575 - cmd->result |= SAM_STAT_CHECK_CONDITION; 1576 1569 retry = 1; 1577 1570 break; 1578 1571 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: ··· 1656 1651 if (is_logical_dev_addr_mode(dev->scsi3addr) && 1657 1652 c2->error_data.serv_response == 1658 1653 IOACCEL2_SERV_RESPONSE_FAILURE) { 1659 - if (c2->error_data.status == 1660 - IOACCEL2_STATUS_SR_IOACCEL_DISABLED) 1661 - dev_warn(&h->pdev->dev, 1662 - "%s: Path is unavailable, retrying on standard path.\n", 1663 - "HP SSD Smart Path"); 1664 - else 1665 - dev_warn(&h->pdev->dev, 1666 - "%s: Error 0x%02x, retrying on standard path.\n", 1667 - "HP SSD Smart Path", c2->error_data.status); 1668 - 1669 1654 dev->offload_enabled = 0; 1670 1655 h->drv_req_rescan = 1; /* schedule controller for a rescan */ 1671 1656 cmd->result = DID_SOFT_ERROR << 16; ··· 1986 1991 wait_for_completion(&wait); 1987 1992 } 1988 1993 1994 + static u32 lockup_detected(struct ctlr_info *h) 1995 + { 1996 + int cpu; 1997 + u32 rc, *lockup_detected; 1998 + 1999 + cpu = get_cpu(); 2000 + lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); 2001 + rc = *lockup_detected; 2002 + put_cpu(); 2003 + return rc; 2004 + } 2005 + 1989 2006 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, 1990 2007 struct CommandList *c) 1991 2008 { 1992 - unsigned long flags; 1993 - 1994 2009 /* If controller lockup detected, fake a hardware error. */ 1995 - spin_lock_irqsave(&h->lock, flags); 1996 - if (unlikely(h->lockup_detected)) { 1997 - spin_unlock_irqrestore(&h->lock, flags); 2010 + if (unlikely(lockup_detected(h))) 1998 2011 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 1999 - } else { 2000 - spin_unlock_irqrestore(&h->lock, flags); 2012 + else 2001 2013 hpsa_scsi_do_simple_cmd_core(h, c); 2002 - } 2003 2014 } 2004 2015 2005 2016 #define MAX_DRIVER_CMD_RETRIES 25 ··· 2430 2429 buflen = 16; 2431 2430 buf = kzalloc(64, GFP_KERNEL); 2432 2431 if (!buf) 2433 - return -1; 2432 + return -ENOMEM; 2434 2433 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64); 2435 2434 if (rc == 0) 2436 2435 memcpy(device_id, &buf[8], buflen); ··· 2516 2515 return HPSA_VPD_LV_STATUS_UNSUPPORTED; 2517 2516 2518 2517 /* Does controller have VPD for logical volume status? */ 2519 - if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) { 2520 - dev_warn(&h->pdev->dev, "Logical volume status VPD page is unsupported.\n"); 2518 + if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) 2521 2519 goto exit_failed; 2522 - } 2523 2520 2524 2521 /* Get the size of the VPD return buffer */ 2525 2522 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, 2526 2523 buf, HPSA_VPD_HEADER_SZ); 2527 - if (rc != 0) { 2528 - dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n"); 2524 + if (rc != 0) 2529 2525 goto exit_failed; 2530 - } 2531 2526 size = buf[3]; 2532 2527 2533 2528 /* Now get the whole VPD buffer */ 2534 2529 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, 2535 2530 buf, size + HPSA_VPD_HEADER_SZ); 2536 - if (rc != 0) { 2537 - dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n"); 2531 + if (rc != 0) 2538 2532 goto exit_failed; 2539 - } 2540 2533 status = buf[4]; /* status byte */ 2541 2534 2542 2535 kfree(buf); ··· 2543 2548 /* Determine offline status of a volume. 2544 2549 * Return either: 2545 2550 * 0 (not offline) 2546 - * -1 (offline for unknown reasons) 2551 + * 0xff (offline for unknown reasons) 2547 2552 * # (integer code indicating one of several NOT READY states 2548 2553 * describing why a volume is to be kept offline) 2549 2554 */ 2550 - static unsigned char hpsa_volume_offline(struct ctlr_info *h, 2555 + static int hpsa_volume_offline(struct ctlr_info *h, 2551 2556 unsigned char scsi3addr[]) 2552 2557 { 2553 2558 struct CommandList *c; ··· 2646 2651 2647 2652 if (this_device->devtype == TYPE_DISK && 2648 2653 is_logical_dev_addr_mode(scsi3addr)) { 2654 + int volume_offline; 2655 + 2649 2656 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 2650 2657 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) 2651 2658 hpsa_get_ioaccel_status(h, scsi3addr, this_device); 2652 - this_device->volume_offline = 2653 - hpsa_volume_offline(h, scsi3addr); 2659 + volume_offline = hpsa_volume_offline(h, scsi3addr); 2660 + if (volume_offline < 0 || volume_offline > 0xff) 2661 + volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED; 2662 + this_device->volume_offline = volume_offline & 0xff; 2654 2663 } else { 2655 2664 this_device->raid_level = RAID_UNKNOWN; 2656 2665 this_device->offload_config = 0; ··· 2860 2861 nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) / 2861 2862 responsesize; 2862 2863 2863 - 2864 2864 /* find ioaccel2 handle in list of physicals: */ 2865 2865 for (i = 0; i < nphysicals; i++) { 2866 + struct ext_report_lun_entry *entry = &physicals->LUN[i]; 2867 + 2866 2868 /* handle is in bytes 28-31 of each lun */ 2867 - if (memcmp(&((struct ReportExtendedLUNdata *) 2868 - physicals)->LUN[i][20], &find, 4) != 0) { 2869 + if (entry->ioaccel_handle != find) 2869 2870 continue; /* didn't match */ 2870 - } 2871 2871 found = 1; 2872 - memcpy(scsi3addr, &((struct ReportExtendedLUNdata *) 2873 - physicals)->LUN[i][0], 8); 2872 + memcpy(scsi3addr, entry->lunid, 8); 2874 2873 if (h->raid_offload_debug > 0) 2875 2874 dev_info(&h->pdev->dev, 2876 - "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 2875 + "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n", 2877 2876 __func__, find, 2878 - ((struct ReportExtendedLUNdata *) 2879 - physicals)->LUN[i][20], 2880 - scsi3addr[0], scsi3addr[1], scsi3addr[2], 2881 - scsi3addr[3], scsi3addr[4], scsi3addr[5], 2882 - scsi3addr[6], scsi3addr[7]); 2877 + entry->ioaccel_handle, scsi3addr); 2883 2878 break; /* found it */ 2884 2879 } 2885 2880 ··· 2958 2965 return RAID_CTLR_LUNID; 2959 2966 2960 2967 if (i < logicals_start) 2961 - return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; 2968 + return &physdev_list->LUN[i - 2969 + (raid_ctlr_position == 0)].lunid[0]; 2962 2970 2963 2971 if (i < last_device) 2964 2972 return &logdev_list->LUN[i - nphysicals - ··· 3068 3074 ndev_allocated++; 3069 3075 } 3070 3076 3071 - if (unlikely(is_scsi_rev_5(h))) 3077 + if (is_scsi_rev_5(h)) 3072 3078 raid_ctlr_position = 0; 3073 3079 else 3074 3080 raid_ctlr_position = nphysicals + nlogicals; ··· 3965 3971 struct hpsa_scsi_dev_t *dev; 3966 3972 unsigned char scsi3addr[8]; 3967 3973 struct CommandList *c; 3968 - unsigned long flags; 3969 3974 int rc = 0; 3970 3975 3971 3976 /* Get the ptr to our adapter structure out of cmd->host. */ ··· 3977 3984 } 3978 3985 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); 3979 3986 3980 - spin_lock_irqsave(&h->lock, flags); 3981 - if (unlikely(h->lockup_detected)) { 3982 - spin_unlock_irqrestore(&h->lock, flags); 3987 + if (unlikely(lockup_detected(h))) { 3983 3988 cmd->result = DID_ERROR << 16; 3984 3989 done(cmd); 3985 3990 return 0; 3986 3991 } 3987 - spin_unlock_irqrestore(&h->lock, flags); 3988 3992 c = cmd_alloc(h); 3989 3993 if (c == NULL) { /* trouble... */ 3990 3994 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); ··· 4093 4103 * we can prevent new rescan threads from piling up on a 4094 4104 * locked up controller. 4095 4105 */ 4096 - spin_lock_irqsave(&h->lock, flags); 4097 - if (unlikely(h->lockup_detected)) { 4098 - spin_unlock_irqrestore(&h->lock, flags); 4106 + if (unlikely(lockup_detected(h))) { 4099 4107 spin_lock_irqsave(&h->scan_lock, flags); 4100 4108 h->scan_finished = 1; 4101 4109 wake_up_all(&h->scan_wait_queue); 4102 4110 spin_unlock_irqrestore(&h->scan_lock, flags); 4103 4111 return 1; 4104 4112 } 4105 - spin_unlock_irqrestore(&h->lock, flags); 4106 4113 return 0; 4107 4114 } 4108 4115 ··· 4950 4963 buff = kmalloc(iocommand.buf_size, GFP_KERNEL); 4951 4964 if (buff == NULL) 4952 4965 return -EFAULT; 4953 - if (iocommand.Request.Type.Direction == XFER_WRITE) { 4966 + if (iocommand.Request.Type.Direction & XFER_WRITE) { 4954 4967 /* Copy the data into the buffer we created */ 4955 4968 if (copy_from_user(buff, iocommand.buf, 4956 4969 iocommand.buf_size)) { ··· 5013 5026 rc = -EFAULT; 5014 5027 goto out; 5015 5028 } 5016 - if (iocommand.Request.Type.Direction == XFER_READ && 5029 + if ((iocommand.Request.Type.Direction & XFER_READ) && 5017 5030 iocommand.buf_size > 0) { 5018 5031 /* Copy the data out of the buffer we created */ 5019 5032 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { ··· 5090 5103 status = -ENOMEM; 5091 5104 goto cleanup1; 5092 5105 } 5093 - if (ioc->Request.Type.Direction == XFER_WRITE) { 5106 + if (ioc->Request.Type.Direction & XFER_WRITE) { 5094 5107 if (copy_from_user(buff[sg_used], data_ptr, sz)) { 5095 5108 status = -ENOMEM; 5096 5109 goto cleanup1; ··· 5142 5155 status = -EFAULT; 5143 5156 goto cleanup0; 5144 5157 } 5145 - if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) { 5158 + if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { 5146 5159 /* Copy the data out of the buffer we created */ 5147 5160 BYTE __user *ptr = ioc->buf; 5148 5161 for (i = 0; i < sg_used; i++) { ··· 5446 5459 5447 5460 /* Takes cmds off the submission queue and sends them to the hardware, 5448 5461 * then puts them on the queue of cmds waiting for completion. 5462 + * Assumes h->lock is held 5449 5463 */ 5450 - static void start_io(struct ctlr_info *h) 5464 + static void start_io(struct ctlr_info *h, unsigned long *flags) 5451 5465 { 5452 5466 struct CommandList *c; 5453 - unsigned long flags; 5454 5467 5455 - spin_lock_irqsave(&h->lock, flags); 5456 5468 while (!list_empty(&h->reqQ)) { 5457 5469 c = list_entry(h->reqQ.next, struct CommandList, list); 5458 5470 /* can't do anything if fifo is full */ ··· 5474 5488 * condition. 5475 5489 */ 5476 5490 h->commands_outstanding++; 5477 - if (h->commands_outstanding > h->max_outstanding) 5478 - h->max_outstanding = h->commands_outstanding; 5479 5491 5480 5492 /* Tell the controller execute command */ 5481 - spin_unlock_irqrestore(&h->lock, flags); 5493 + spin_unlock_irqrestore(&h->lock, *flags); 5482 5494 h->access.submit_command(h, c); 5483 - spin_lock_irqsave(&h->lock, flags); 5495 + spin_lock_irqsave(&h->lock, *flags); 5484 5496 } 5497 + } 5498 + 5499 + static void lock_and_start_io(struct ctlr_info *h) 5500 + { 5501 + unsigned long flags; 5502 + 5503 + spin_lock_irqsave(&h->lock, flags); 5504 + start_io(h, &flags); 5485 5505 spin_unlock_irqrestore(&h->lock, flags); 5486 5506 } 5487 5507 ··· 5555 5563 else if (c->cmd_type == CMD_IOCTL_PEND) 5556 5564 complete(c->waiting); 5557 5565 if (unlikely(io_may_be_stalled)) 5558 - start_io(h); 5566 + lock_and_start_io(h); 5559 5567 } 5560 5568 5561 5569 static inline u32 hpsa_tag_contains_index(u32 tag) ··· 5832 5840 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 5833 5841 writel(use_doorbell, vaddr + SA5_DOORBELL); 5834 5842 5835 - /* PMC hardware guys tell us we need a 5 second delay after 5843 + /* PMC hardware guys tell us we need a 10 second delay after 5836 5844 * doorbell reset and before any attempt to talk to the board 5837 5845 * at all to ensure that this actually works and doesn't fall 5838 5846 * over in some weird corner cases. 5839 5847 */ 5840 - msleep(5000); 5848 + msleep(10000); 5841 5849 } else { /* Try to do it the PCI power state way */ 5842 5850 5843 5851 /* Quoting from the Open CISS Specification: "The Power ··· 6158 6166 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 6159 6167 dev_info(&h->pdev->dev, "MSIX\n"); 6160 6168 h->msix_vector = MAX_REPLY_QUEUES; 6169 + if (h->msix_vector > num_online_cpus()) 6170 + h->msix_vector = num_online_cpus(); 6161 6171 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 6162 6172 h->msix_vector); 6163 6173 if (err > 0) { ··· 6609 6615 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); 6610 6616 } 6611 6617 6618 + static void hpsa_irq_affinity_hints(struct ctlr_info *h) 6619 + { 6620 + int i, cpu, rc; 6621 + 6622 + cpu = cpumask_first(cpu_online_mask); 6623 + for (i = 0; i < h->msix_vector; i++) { 6624 + rc = irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu)); 6625 + cpu = cpumask_next(cpu, cpu_online_mask); 6626 + } 6627 + } 6628 + 6612 6629 static int hpsa_request_irq(struct ctlr_info *h, 6613 6630 irqreturn_t (*msixhandler)(int, void *), 6614 6631 irqreturn_t (*intxhandler)(int, void *)) ··· 6639 6634 rc = request_irq(h->intr[i], msixhandler, 6640 6635 0, h->devname, 6641 6636 &h->q[i]); 6637 + hpsa_irq_affinity_hints(h); 6642 6638 } else { 6643 6639 /* Use single reply pool */ 6644 6640 if (h->msix_vector > 0 || h->msi_vector) { ··· 6691 6685 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { 6692 6686 /* Single reply queue, only one irq to free */ 6693 6687 i = h->intr_mode; 6688 + irq_set_affinity_hint(h->intr[i], NULL); 6694 6689 free_irq(h->intr[i], &h->q[i]); 6695 6690 return; 6696 6691 } 6697 6692 6698 - for (i = 0; i < h->msix_vector; i++) 6693 + for (i = 0; i < h->msix_vector; i++) { 6694 + irq_set_affinity_hint(h->intr[i], NULL); 6699 6695 free_irq(h->intr[i], &h->q[i]); 6696 + } 6700 6697 } 6701 6698 6702 6699 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) ··· 6716 6707 #endif /* CONFIG_PCI_MSI */ 6717 6708 } 6718 6709 6710 + static void hpsa_free_reply_queues(struct ctlr_info *h) 6711 + { 6712 + int i; 6713 + 6714 + for (i = 0; i < h->nreply_queues; i++) { 6715 + if (!h->reply_queue[i].head) 6716 + continue; 6717 + pci_free_consistent(h->pdev, h->reply_queue_size, 6718 + h->reply_queue[i].head, h->reply_queue[i].busaddr); 6719 + h->reply_queue[i].head = NULL; 6720 + h->reply_queue[i].busaddr = 0; 6721 + } 6722 + } 6723 + 6719 6724 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) 6720 6725 { 6721 6726 hpsa_free_irqs_and_disable_msix(h); ··· 6737 6714 hpsa_free_cmd_pool(h); 6738 6715 kfree(h->ioaccel1_blockFetchTable); 6739 6716 kfree(h->blockFetchTable); 6740 - pci_free_consistent(h->pdev, h->reply_pool_size, 6741 - h->reply_pool, h->reply_pool_dhandle); 6717 + hpsa_free_reply_queues(h); 6742 6718 if (h->vaddr) 6743 6719 iounmap(h->vaddr); 6744 6720 if (h->transtable) ··· 6762 6740 } 6763 6741 } 6764 6742 6743 + static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) 6744 + { 6745 + int i, cpu; 6746 + 6747 + cpu = cpumask_first(cpu_online_mask); 6748 + for (i = 0; i < num_online_cpus(); i++) { 6749 + u32 *lockup_detected; 6750 + lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); 6751 + *lockup_detected = value; 6752 + cpu = cpumask_next(cpu, cpu_online_mask); 6753 + } 6754 + wmb(); /* be sure the per-cpu variables are out to memory */ 6755 + } 6756 + 6765 6757 static void controller_lockup_detected(struct ctlr_info *h) 6766 6758 { 6767 6759 unsigned long flags; 6760 + u32 lockup_detected; 6768 6761 6769 6762 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6770 6763 spin_lock_irqsave(&h->lock, flags); 6771 - h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 6764 + lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 6765 + if (!lockup_detected) { 6766 + /* no heartbeat, but controller gave us a zero. */ 6767 + dev_warn(&h->pdev->dev, 6768 + "lockup detected but scratchpad register is zero\n"); 6769 + lockup_detected = 0xffffffff; 6770 + } 6771 + set_lockup_detected_for_all_cpus(h, lockup_detected); 6772 6772 spin_unlock_irqrestore(&h->lock, flags); 6773 6773 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", 6774 - h->lockup_detected); 6774 + lockup_detected); 6775 6775 pci_disable_device(h->pdev); 6776 6776 spin_lock_irqsave(&h->lock, flags); 6777 6777 fail_all_cmds_on_list(h, &h->cmpQ); ··· 6928 6884 struct ctlr_info *h = container_of(to_delayed_work(work), 6929 6885 struct ctlr_info, monitor_ctlr_work); 6930 6886 detect_controller_lockup(h); 6931 - if (h->lockup_detected) 6887 + if (lockup_detected(h)) 6932 6888 return; 6933 6889 6934 6890 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { ··· 6978 6934 * the 5 lower bits of the address are used by the hardware. and by 6979 6935 * the driver. See comments in hpsa.h for more info. 6980 6936 */ 6981 - #define COMMANDLIST_ALIGNMENT 128 6982 6937 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); 6983 6938 h = kzalloc(sizeof(*h), GFP_KERNEL); 6984 6939 if (!h) ··· 6992 6949 spin_lock_init(&h->offline_device_lock); 6993 6950 spin_lock_init(&h->scan_lock); 6994 6951 spin_lock_init(&h->passthru_count_lock); 6952 + 6953 + /* Allocate and clear per-cpu variable lockup_detected */ 6954 + h->lockup_detected = alloc_percpu(u32); 6955 + if (!h->lockup_detected) 6956 + goto clean1; 6957 + set_lockup_detected_for_all_cpus(h, 0); 6958 + 6995 6959 rc = hpsa_pci_init(h); 6996 6960 if (rc != 0) 6997 6961 goto clean1; ··· 7122 7072 free_irqs(h); 7123 7073 clean2: 7124 7074 clean1: 7075 + if (h->lockup_detected) 7076 + free_percpu(h->lockup_detected); 7125 7077 kfree(h); 7126 7078 return rc; 7127 7079 } ··· 7132 7080 { 7133 7081 char *flush_buf; 7134 7082 struct CommandList *c; 7135 - unsigned long flags; 7136 7083 7137 7084 /* Don't bother trying to flush the cache if locked up */ 7138 - spin_lock_irqsave(&h->lock, flags); 7139 - if (unlikely(h->lockup_detected)) { 7140 - spin_unlock_irqrestore(&h->lock, flags); 7085 + if (unlikely(lockup_detected(h))) 7141 7086 return; 7142 - } 7143 - spin_unlock_irqrestore(&h->lock, flags); 7144 - 7145 7087 flush_buf = kzalloc(4, GFP_KERNEL); 7146 7088 if (!flush_buf) 7147 7089 return; ··· 7211 7165 pci_free_consistent(h->pdev, 7212 7166 h->nr_cmds * sizeof(struct ErrorInfo), 7213 7167 h->errinfo_pool, h->errinfo_pool_dhandle); 7214 - pci_free_consistent(h->pdev, h->reply_pool_size, 7215 - h->reply_pool, h->reply_pool_dhandle); 7168 + hpsa_free_reply_queues(h); 7216 7169 kfree(h->cmd_pool_bits); 7217 7170 kfree(h->blockFetchTable); 7218 7171 kfree(h->ioaccel1_blockFetchTable); ··· 7219 7174 kfree(h->hba_inquiry_data); 7220 7175 pci_disable_device(pdev); 7221 7176 pci_release_regions(pdev); 7177 + free_percpu(h->lockup_detected); 7222 7178 kfree(h); 7223 7179 } 7224 7180 ··· 7324 7278 * 10 = 6 s/g entry or 24k 7325 7279 */ 7326 7280 7281 + /* If the controller supports either ioaccel method then 7282 + * we can also use the RAID stack submit path that does not 7283 + * perform the superfluous readl() after each command submission. 7284 + */ 7285 + if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2)) 7286 + access = SA5_performant_access_no_read; 7287 + 7327 7288 /* Controller spec: zero out this buffer. */ 7328 - memset(h->reply_pool, 0, h->reply_pool_size); 7289 + for (i = 0; i < h->nreply_queues; i++) 7290 + memset(h->reply_queue[i].head, 0, h->reply_queue_size); 7329 7291 7330 7292 bft[7] = SG_ENTRIES_IN_CMD + 4; 7331 7293 calc_bucket_map(bft, ARRAY_SIZE(bft), ··· 7349 7295 7350 7296 for (i = 0; i < h->nreply_queues; i++) { 7351 7297 writel(0, &h->transtable->RepQAddr[i].upper); 7352 - writel(h->reply_pool_dhandle + 7353 - (h->max_commands * sizeof(u64) * i), 7298 + writel(h->reply_queue[i].busaddr, 7354 7299 &h->transtable->RepQAddr[i].lower); 7355 7300 } 7356 7301 ··· 7397 7344 h->ioaccel1_blockFetchTable); 7398 7345 7399 7346 /* initialize all reply queue entries to unused */ 7400 - memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED, 7401 - h->reply_pool_size); 7347 + for (i = 0; i < h->nreply_queues; i++) 7348 + memset(h->reply_queue[i].head, 7349 + (u8) IOACCEL_MODE1_REPLY_UNUSED, 7350 + h->reply_queue_size); 7402 7351 7403 7352 /* set all the constant fields in the accelerator command 7404 7353 * frames once at init time to save CPU cycles later. ··· 7462 7407 * because the 7 lower bits of the address are used by the 7463 7408 * hardware. 7464 7409 */ 7465 - #define IOACCEL1_COMMANDLIST_ALIGNMENT 128 7466 7410 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % 7467 7411 IOACCEL1_COMMANDLIST_ALIGNMENT); 7468 7412 h->ioaccel_cmd_pool = ··· 7499 7445 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) 7500 7446 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; 7501 7447 7502 - #define IOACCEL2_COMMANDLIST_ALIGNMENT 128 7503 7448 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % 7504 7449 IOACCEL2_COMMANDLIST_ALIGNMENT); 7505 7450 h->ioaccel2_cmd_pool = ··· 7556 7503 } 7557 7504 } 7558 7505 7559 - /* TODO, check that this next line h->nreply_queues is correct */ 7560 7506 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; 7561 7507 hpsa_get_max_perf_mode_cmds(h); 7562 7508 /* Performant mode ring buffer and supporting data structures */ 7563 - h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues; 7564 - h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, 7565 - &(h->reply_pool_dhandle)); 7509 + h->reply_queue_size = h->max_commands * sizeof(u64); 7566 7510 7567 7511 for (i = 0; i < h->nreply_queues; i++) { 7568 - h->reply_queue[i].head = &h->reply_pool[h->max_commands * i]; 7512 + h->reply_queue[i].head = pci_alloc_consistent(h->pdev, 7513 + h->reply_queue_size, 7514 + &(h->reply_queue[i].busaddr)); 7515 + if (!h->reply_queue[i].head) 7516 + goto clean_up; 7569 7517 h->reply_queue[i].size = h->max_commands; 7570 7518 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ 7571 7519 h->reply_queue[i].current_entry = 0; ··· 7575 7521 /* Need a block fetch table for performant mode */ 7576 7522 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * 7577 7523 sizeof(u32)), GFP_KERNEL); 7578 - 7579 - if ((h->reply_pool == NULL) 7580 - || (h->blockFetchTable == NULL)) 7524 + if (!h->blockFetchTable) 7581 7525 goto clean_up; 7582 7526 7583 7527 hpsa_enter_performant_mode(h, trans_support); 7584 7528 return; 7585 7529 7586 7530 clean_up: 7587 - if (h->reply_pool) 7588 - pci_free_consistent(h->pdev, h->reply_pool_size, 7589 - h->reply_pool, h->reply_pool_dhandle); 7531 + hpsa_free_reply_queues(h); 7590 7532 kfree(h->blockFetchTable); 7591 7533 } 7592 7534
+22 -20
drivers/scsi/hpsa.h
··· 57 57 58 58 }; 59 59 60 - struct reply_pool { 60 + struct reply_queue_buffer { 61 61 u64 *head; 62 62 size_t size; 63 63 u8 wraparound; 64 64 u32 current_entry; 65 + dma_addr_t busaddr; 65 66 }; 66 67 67 68 #pragma pack(1) ··· 117 116 int nr_cmds; /* Number of commands allowed on this controller */ 118 117 struct CfgTable __iomem *cfgtable; 119 118 int interrupts_enabled; 120 - int major; 121 119 int max_commands; 122 120 int commands_outstanding; 123 - int max_outstanding; /* Debug */ 124 - int usage_count; /* number of opens all all minor devices */ 125 121 # define PERF_MODE_INT 0 126 122 # define DOORBELL_INT 1 127 123 # define SIMPLE_MODE_INT 2 ··· 175 177 /* 176 178 * Performant mode completion buffers 177 179 */ 178 - u64 *reply_pool; 179 - size_t reply_pool_size; 180 - struct reply_pool reply_queue[MAX_REPLY_QUEUES]; 180 + size_t reply_queue_size; 181 + struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES]; 181 182 u8 nreply_queues; 182 - dma_addr_t reply_pool_dhandle; 183 183 u32 *blockFetchTable; 184 184 u32 *ioaccel1_blockFetchTable; 185 185 u32 *ioaccel2_blockFetchTable; ··· 192 196 u64 last_heartbeat_timestamp; 193 197 u32 heartbeat_sample_interval; 194 198 atomic_t firmware_flash_in_progress; 195 - u32 lockup_detected; 199 + u32 *lockup_detected; 196 200 struct delayed_work monitor_ctlr_work; 197 201 int remove_in_progress; 198 202 u32 fifo_recently_full; ··· 229 233 #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31) 230 234 231 235 #define RESCAN_REQUIRED_EVENT_BITS \ 232 - (CTLR_STATE_CHANGE_EVENT | \ 233 - CTLR_ENCLOSURE_HOT_PLUG_EVENT | \ 236 + (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \ 234 237 CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \ 235 238 CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \ 236 - CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL | \ 237 239 CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \ 238 240 CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE) 239 241 spinlock_t offline_device_lock; ··· 340 346 static void SA5_submit_command(struct ctlr_info *h, 341 347 struct CommandList *c) 342 348 { 343 - dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, 344 - c->Header.Tag.lower); 345 349 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 346 350 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 351 + } 352 + 353 + static void SA5_submit_command_no_read(struct ctlr_info *h, 354 + struct CommandList *c) 355 + { 356 + writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 347 357 } 348 358 349 359 static void SA5_submit_command_ioaccel2(struct ctlr_info *h, 350 360 struct CommandList *c) 351 361 { 352 - dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, 353 - c->Header.Tag.lower); 354 362 if (c->cmd_type == CMD_IOACCEL2) 355 363 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); 356 364 else 357 365 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 358 - (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 359 366 } 360 367 361 368 /* ··· 394 399 395 400 static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) 396 401 { 397 - struct reply_pool *rq = &h->reply_queue[q]; 402 + struct reply_queue_buffer *rq = &h->reply_queue[q]; 398 403 unsigned long flags, register_value = FIFO_EMPTY; 399 404 400 405 /* msi auto clears the interrupt pending bit. */ ··· 473 478 { 474 479 unsigned long register_value = 475 480 readl(h->vaddr + SA5_INTR_STATUS); 476 - dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value); 477 481 return register_value & SA5_INTR_PENDING; 478 482 } 479 483 ··· 509 515 static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) 510 516 { 511 517 u64 register_value; 512 - struct reply_pool *rq = &h->reply_queue[q]; 518 + struct reply_queue_buffer *rq = &h->reply_queue[q]; 513 519 unsigned long flags; 514 520 515 521 BUG_ON(q >= h->nreply_queues); ··· 561 567 562 568 static struct access_method SA5_performant_access = { 563 569 SA5_submit_command, 570 + SA5_performant_intr_mask, 571 + SA5_fifo_full, 572 + SA5_performant_intr_pending, 573 + SA5_performant_completed, 574 + }; 575 + 576 + static struct access_method SA5_performant_access_no_read = { 577 + SA5_submit_command_no_read, 564 578 SA5_performant_intr_mask, 565 579 SA5_fifo_full, 566 580 SA5_performant_intr_pending,
+19 -30
drivers/scsi/hpsa_cmd.h
··· 151 151 #define HPSA_VPD_HEADER_SZ 4 152 152 153 153 /* Logical volume states */ 154 - #define HPSA_VPD_LV_STATUS_UNSUPPORTED -1 154 + #define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff 155 155 #define HPSA_LV_OK 0x0 156 156 #define HPSA_LV_UNDERGOING_ERASE 0x0F 157 157 #define HPSA_LV_UNDERGOING_RPI 0x12 ··· 238 238 u8 LUN[HPSA_MAX_LUN][8]; 239 239 }; 240 240 241 + struct ext_report_lun_entry { 242 + u8 lunid[8]; 243 + u8 wwid[8]; 244 + u8 device_type; 245 + u8 device_flags; 246 + u8 lun_count; /* multi-lun device, how many luns */ 247 + u8 redundant_paths; 248 + u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */ 249 + }; 250 + 241 251 struct ReportExtendedLUNdata { 242 252 u8 LUNListLength[4]; 243 253 u8 extended_response_flag; 244 254 u8 reserved[3]; 245 - u8 LUN[HPSA_MAX_LUN][24]; 255 + struct ext_report_lun_entry LUN[HPSA_MAX_LUN]; 246 256 }; 247 257 248 258 struct SenseSubsystem_info { ··· 385 375 * or a bus address. 386 376 */ 387 377 378 + #define COMMANDLIST_ALIGNMENT 128 388 379 struct CommandList { 389 380 struct CommandListHeader Header; 390 381 struct RequestBlock Request; ··· 400 389 struct list_head list; 401 390 struct completion *waiting; 402 391 void *scsi_cmd; 403 - 404 - /* on 64 bit architectures, to get this to be 32-byte-aligned 405 - * it so happens we need PAD_64 bytes of padding, on 32 bit systems, 406 - * we need PAD_32 bytes of padding (see below). This does that. 407 - * If it happens that 64 bit and 32 bit systems need different 408 - * padding, PAD_32 and PAD_64 can be set independently, and. 409 - * the code below will do the right thing. 410 - */ 411 - #define IS_32_BIT ((8 - sizeof(long))/4) 412 - #define IS_64_BIT (!IS_32_BIT) 413 - #define PAD_32 (40) 414 - #define PAD_64 (12) 415 - #define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64) 416 - u8 pad[COMMANDLIST_PAD]; 417 - }; 392 + } __aligned(COMMANDLIST_ALIGNMENT); 418 393 419 394 /* Max S/G elements in I/O accelerator command */ 420 395 #define IOACCEL1_MAXSGENTRIES 24 ··· 410 413 * Structure for I/O accelerator (mode 1) commands. 411 414 * Note that this structure must be 128-byte aligned in size. 412 415 */ 416 + #define IOACCEL1_COMMANDLIST_ALIGNMENT 128 413 417 struct io_accel1_cmd { 414 418 u16 dev_handle; /* 0x00 - 0x01 */ 415 419 u8 reserved1; /* 0x02 */ ··· 438 440 struct vals32 host_addr; /* 0x70 - 0x77 */ 439 441 u8 CISS_LUN[8]; /* 0x78 - 0x7F */ 440 442 struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES]; 441 - #define IOACCEL1_PAD_64 0 442 - #define IOACCEL1_PAD_32 0 443 - #define IOACCEL1_PAD (IS_32_BIT * IOACCEL1_PAD_32 + \ 444 - IS_64_BIT * IOACCEL1_PAD_64) 445 - u8 pad[IOACCEL1_PAD]; 446 - }; 443 + } __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT); 447 444 448 445 #define IOACCEL1_FUNCTION_SCSIIO 0x00 449 446 #define IOACCEL1_SGLOFFSET 32 ··· 503 510 u8 sense_data_buff[32]; /* sense/response data buffer */ 504 511 }; 505 512 506 - #define IOACCEL2_64_PAD 76 507 - #define IOACCEL2_32_PAD 76 508 - #define IOACCEL2_PAD (IS_32_BIT * IOACCEL2_32_PAD + \ 509 - IS_64_BIT * IOACCEL2_64_PAD) 510 513 /* 511 514 * Structure for I/O accelerator (mode 2 or m2) commands. 512 515 * Note that this structure must be 128-byte aligned in size. 513 516 */ 517 + #define IOACCEL2_COMMANDLIST_ALIGNMENT 128 514 518 struct io_accel2_cmd { 515 519 u8 IU_type; /* IU Type */ 516 520 u8 direction; /* direction, memtype, and encryption */ ··· 534 544 u32 tweak_upper; /* Encryption tweak, upper 4 bytes */ 535 545 struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES]; 536 546 struct io_accel2_scsi_response error_data; 537 - u8 pad[IOACCEL2_PAD]; 538 - }; 547 + } __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); 539 548 540 549 /* 541 550 * defines for Mode 2 command struct ··· 625 636 u32 RepQCount; 626 637 u32 RepQCtrAddrLow32; 627 638 u32 RepQCtrAddrHigh32; 628 - #define MAX_REPLY_QUEUES 8 639 + #define MAX_REPLY_QUEUES 64 629 640 struct vals32 RepQAddr[MAX_REPLY_QUEUES]; 630 641 }; 631 642
+2 -1
drivers/scsi/lpfc/lpfc.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2014 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 640 640 #define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ 641 641 #define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ 642 642 #define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */ 643 + #define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */ 643 644 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ 644 645 struct lpfc_dmabuf slim2p; 645 646
+14 -9
drivers/scsi/lpfc/lpfc_attr.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2014 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 919 919 phba->cfg_sriov_nr_virtfn = 0; 920 920 } 921 921 922 + if (opcode == LPFC_FW_DUMP) 923 + phba->hba_flag |= HBA_FW_DUMP_OP; 924 + 922 925 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 923 926 924 - if (status != 0) 927 + if (status != 0) { 928 + phba->hba_flag &= ~HBA_FW_DUMP_OP; 925 929 return status; 930 + } 926 931 927 932 /* wait for the device to be quiesced before firmware reset */ 928 933 msleep(100); ··· 2369 2364 uint8_t wwpn[WWN_SZ]; 2370 2365 int rc; 2371 2366 2372 - if (!phba->cfg_EnableXLane) 2367 + if (!phba->cfg_fof) 2373 2368 return -EPERM; 2374 2369 2375 2370 /* count may include a LF at end of string */ ··· 2437 2432 uint8_t wwpn[WWN_SZ]; 2438 2433 int rc; 2439 2434 2440 - if (!phba->cfg_EnableXLane) 2435 + if (!phba->cfg_fof) 2441 2436 return -EPERM; 2442 2437 2443 2438 /* count may include a LF at end of string */ ··· 2504 2499 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2505 2500 int val = 0; 2506 2501 2507 - if (!phba->cfg_EnableXLane) 2502 + if (!phba->cfg_fof) 2508 2503 return -EPERM; 2509 2504 2510 2505 if (!isdigit(buf[0])) ··· 2570 2565 2571 2566 int rc = 0; 2572 2567 2573 - if (!phba->cfg_EnableXLane) 2568 + if (!phba->cfg_fof) 2574 2569 return -EPERM; 2575 2570 2576 2571 if (oas_state) { ··· 2675 2670 uint64_t oas_lun; 2676 2671 int len = 0; 2677 2672 2678 - if (!phba->cfg_EnableXLane) 2673 + if (!phba->cfg_fof) 2679 2674 return -EPERM; 2680 2675 2681 2676 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) ··· 2721 2716 uint64_t scsi_lun; 2722 2717 ssize_t rc; 2723 2718 2724 - if (!phba->cfg_EnableXLane) 2719 + if (!phba->cfg_fof) 2725 2720 return -EPERM; 2726 2721 2727 2722 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) ··· 4660 4655 # 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits) 4661 4656 # Value range is [0x0,0x7f]. Default value is 0 4662 4657 */ 4663 - LPFC_ATTR_R(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature."); 4658 + LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature."); 4664 4659 4665 4660 /* 4666 4661 # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
+1 -1
drivers/scsi/lpfc/lpfc_bsg.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2009-2013 Emulex. All rights reserved. * 4 + * Copyright (C) 2009-2014 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * *
+1 -1
drivers/scsi/lpfc/lpfc_bsg.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2010-2012 Emulex. All rights reserved. * 4 + * Copyright (C) 2010-2014 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * *
+5 -1
drivers/scsi/lpfc/lpfc_crtn.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2014 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * * ··· 289 289 void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); 290 290 void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); 291 291 void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 292 + void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba); 292 293 void lpfc_sli_hba_iocb_abort(struct lpfc_hba *); 293 294 void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); 294 295 int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, ··· 311 310 int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd); 312 311 int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t, 313 312 uint64_t, lpfc_ctx_cmd); 313 + int 314 + lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *, 315 + uint16_t, uint64_t, lpfc_ctx_cmd); 314 316 315 317 void lpfc_mbox_timeout(unsigned long); 316 318 void lpfc_mbox_timeout_handler(struct lpfc_hba *);
+2 -2
drivers/scsi/lpfc/lpfc_debugfs.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2007-2012 Emulex. All rights reserved. * 4 + * Copyright (C) 2007-2014 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * * ··· 2314 2314 goto too_big; 2315 2315 } 2316 2316 2317 - if (phba->cfg_EnableXLane) { 2317 + if (phba->cfg_fof) { 2318 2318 2319 2319 /* OAS CQ */ 2320 2320 qp = phba->sli4_hba.oas_cq;
+1 -1
drivers/scsi/lpfc/lpfc_els.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2014 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+4 -1
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2014 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 5634 5634 ndlp->active_rrqs_xri_bitmap = 5635 5635 mempool_alloc(vport->phba->active_rrq_pool, 5636 5636 GFP_KERNEL); 5637 + if (ndlp->active_rrqs_xri_bitmap) 5638 + memset(ndlp->active_rrqs_xri_bitmap, 0, 5639 + ndlp->phba->cfg_rrq_xri_bitmap_sz); 5637 5640 } 5638 5641 5639 5642
+1 -1
drivers/scsi/lpfc/lpfc_hw.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2014 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * *
+1 -1
drivers/scsi/lpfc/lpfc_hw4.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2009-2013 Emulex. All rights reserved. * 4 + * Copyright (C) 2009-2014 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * *
+201 -99
drivers/scsi/lpfc/lpfc_init.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2014 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 820 820 } 821 821 822 822 /** 823 + * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 824 + * rspiocb which got deferred 825 + * 826 + * @phba: pointer to lpfc HBA data structure. 827 + * 828 + * This routine will cleanup completed slow path events after HBA is reset 829 + * when bringing down the SLI Layer. 830 + * 831 + * 832 + * Return codes 833 + * void. 834 + **/ 835 + static void 836 + lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 837 + { 838 + struct lpfc_iocbq *rspiocbq; 839 + struct hbq_dmabuf *dmabuf; 840 + struct lpfc_cq_event *cq_event; 841 + 842 + spin_lock_irq(&phba->hbalock); 843 + phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 844 + spin_unlock_irq(&phba->hbalock); 845 + 846 + while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 847 + /* Get the response iocb from the head of work queue */ 848 + spin_lock_irq(&phba->hbalock); 849 + list_remove_head(&phba->sli4_hba.sp_queue_event, 850 + cq_event, struct lpfc_cq_event, list); 851 + spin_unlock_irq(&phba->hbalock); 852 + 853 + switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 854 + case CQE_CODE_COMPL_WQE: 855 + rspiocbq = container_of(cq_event, struct lpfc_iocbq, 856 + cq_event); 857 + lpfc_sli_release_iocbq(phba, rspiocbq); 858 + break; 859 + case CQE_CODE_RECEIVE: 860 + case CQE_CODE_RECEIVE_V1: 861 + dmabuf = container_of(cq_event, struct hbq_dmabuf, 862 + cq_event); 863 + lpfc_in_buf_free(phba, &dmabuf->dbuf); 864 + } 865 + } 866 + } 867 + 868 + /** 869 + * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 870 + * @phba: pointer to lpfc HBA data structure. 871 + * 872 + * This routine will cleanup posted ELS buffers after the HBA is reset 873 + * when bringing down the SLI Layer. 874 + * 875 + * 876 + * Return codes 877 + * void. 878 + **/ 879 + static void 880 + lpfc_hba_free_post_buf(struct lpfc_hba *phba) 881 + { 882 + struct lpfc_sli *psli = &phba->sli; 883 + struct lpfc_sli_ring *pring; 884 + struct lpfc_dmabuf *mp, *next_mp; 885 + LIST_HEAD(buflist); 886 + int count; 887 + 888 + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 889 + lpfc_sli_hbqbuf_free_all(phba); 890 + else { 891 + /* Cleanup preposted buffers on the ELS ring */ 892 + pring = &psli->ring[LPFC_ELS_RING]; 893 + spin_lock_irq(&phba->hbalock); 894 + list_splice_init(&pring->postbufq, &buflist); 895 + spin_unlock_irq(&phba->hbalock); 896 + 897 + count = 0; 898 + list_for_each_entry_safe(mp, next_mp, &buflist, list) { 899 + list_del(&mp->list); 900 + count++; 901 + lpfc_mbuf_free(phba, mp->virt, mp->phys); 902 + kfree(mp); 903 + } 904 + 905 + spin_lock_irq(&phba->hbalock); 906 + pring->postbufq_cnt -= count; 907 + spin_unlock_irq(&phba->hbalock); 908 + } 909 + } 910 + 911 + /** 912 + * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 913 + * @phba: pointer to lpfc HBA data structure. 914 + * 915 + * This routine will cleanup the txcmplq after the HBA is reset when bringing 916 + * down the SLI Layer. 917 + * 918 + * Return codes 919 + * void 920 + **/ 921 + static void 922 + lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 923 + { 924 + struct lpfc_sli *psli = &phba->sli; 925 + struct lpfc_sli_ring *pring; 926 + LIST_HEAD(completions); 927 + int i; 928 + 929 + for (i = 0; i < psli->num_rings; i++) { 930 + pring = &psli->ring[i]; 931 + if (phba->sli_rev >= LPFC_SLI_REV4) 932 + spin_lock_irq(&pring->ring_lock); 933 + else 934 + spin_lock_irq(&phba->hbalock); 935 + /* At this point in time the HBA is either reset or DOA. Either 936 + * way, nothing should be on txcmplq as it will NEVER complete. 937 + */ 938 + list_splice_init(&pring->txcmplq, &completions); 939 + pring->txcmplq_cnt = 0; 940 + 941 + if (phba->sli_rev >= LPFC_SLI_REV4) 942 + spin_unlock_irq(&pring->ring_lock); 943 + else 944 + spin_unlock_irq(&phba->hbalock); 945 + 946 + /* Cancel all the IOCBs from the completions list */ 947 + lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 948 + IOERR_SLI_ABORTED); 949 + lpfc_sli_abort_iocb_ring(phba, pring); 950 + } 951 + } 952 + 953 + /** 823 954 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 955 + int i; 824 956 * @phba: pointer to lpfc HBA data structure. 825 957 * 826 958 * This routine will do uninitialization after the HBA is reset when bring ··· 965 833 static int 966 834 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 967 835 { 968 - struct lpfc_sli *psli = &phba->sli; 969 - struct lpfc_sli_ring *pring; 970 - struct lpfc_dmabuf *mp, *next_mp; 971 - LIST_HEAD(completions); 972 - int i; 973 - 974 - if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 975 - lpfc_sli_hbqbuf_free_all(phba); 976 - else { 977 - /* Cleanup preposted buffers on the ELS ring */ 978 - pring = &psli->ring[LPFC_ELS_RING]; 979 - list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 980 - list_del(&mp->list); 981 - pring->postbufq_cnt--; 982 - lpfc_mbuf_free(phba, mp->virt, mp->phys); 983 - kfree(mp); 984 - } 985 - } 986 - 987 - spin_lock_irq(&phba->hbalock); 988 - for (i = 0; i < psli->num_rings; i++) { 989 - pring = &psli->ring[i]; 990 - 991 - /* At this point in time the HBA is either reset or DOA. Either 992 - * way, nothing should be on txcmplq as it will NEVER complete. 993 - */ 994 - list_splice_init(&pring->txcmplq, &completions); 995 - spin_unlock_irq(&phba->hbalock); 996 - 997 - /* Cancel all the IOCBs from the completions list */ 998 - lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 999 - IOERR_SLI_ABORTED); 1000 - 1001 - lpfc_sli_abort_iocb_ring(phba, pring); 1002 - spin_lock_irq(&phba->hbalock); 1003 - } 1004 - spin_unlock_irq(&phba->hbalock); 1005 - 836 + lpfc_hba_free_post_buf(phba); 837 + lpfc_hba_clean_txcmplq(phba); 1006 838 return 0; 1007 839 } 1008 840 ··· 986 890 { 987 891 struct lpfc_scsi_buf *psb, *psb_next; 988 892 LIST_HEAD(aborts); 989 - int ret; 990 893 unsigned long iflag = 0; 991 894 struct lpfc_sglq *sglq_entry = NULL; 992 895 993 - ret = lpfc_hba_down_post_s3(phba); 994 - if (ret) 995 - return ret; 896 + lpfc_hba_free_post_buf(phba); 897 + lpfc_hba_clean_txcmplq(phba); 898 + 996 899 /* At this point in time the HBA is either reset or DOA. Either 997 900 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 998 901 * on the lpfc_sgl_list so that it can either be freed if the ··· 1027 932 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); 1028 933 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); 1029 934 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 935 + 936 + lpfc_sli4_free_sp_events(phba); 1030 937 return 0; 1031 938 } 1032 939 ··· 1347 1250 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1348 1251 { 1349 1252 uint32_t old_host_status = phba->work_hs; 1350 - struct lpfc_sli_ring *pring; 1351 1253 struct lpfc_sli *psli = &phba->sli; 1352 1254 1353 1255 /* If the pci channel is offline, ignore possible errors, ··· 1375 1279 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1376 1280 * SCSI layer retry it after re-establishing link. 1377 1281 */ 1378 - pring = &psli->ring[psli->fcp_ring]; 1379 - lpfc_sli_abort_iocb_ring(phba, pring); 1282 + lpfc_sli_abort_fcp_rings(phba); 1380 1283 1381 1284 /* 1382 1285 * There was a firmware error. Take the hba offline and then ··· 1443 1348 { 1444 1349 struct lpfc_vport *vport = phba->pport; 1445 1350 struct lpfc_sli *psli = &phba->sli; 1446 - struct lpfc_sli_ring *pring; 1447 1351 uint32_t event_data; 1448 1352 unsigned long temperature; 1449 1353 struct temp_event temp_event_data; ··· 1494 1400 * Error iocb (I/O) on txcmplq and let the SCSI layer 1495 1401 * retry it after re-establishing link. 1496 1402 */ 1497 - pring = &psli->ring[psli->fcp_ring]; 1498 - lpfc_sli_abort_iocb_ring(phba, pring); 1403 + lpfc_sli_abort_fcp_rings(phba); 1499 1404 1500 1405 /* 1501 1406 * There was a firmware error. Take the hba offline and then ··· 2033 1940 2034 1941 switch (dev_id) { 2035 1942 case PCI_DEVICE_ID_FIREFLY: 2036 - m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; 1943 + m = (typeof(m)){"LP6000", "PCI", 1944 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2037 1945 break; 2038 1946 case PCI_DEVICE_ID_SUPERFLY: 2039 1947 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2040 - m = (typeof(m)){"LP7000", "PCI", 2041 - "Fibre Channel Adapter"}; 1948 + m = (typeof(m)){"LP7000", "PCI", ""}; 2042 1949 else 2043 - m = (typeof(m)){"LP7000E", "PCI", 2044 - "Fibre Channel Adapter"}; 1950 + m = (typeof(m)){"LP7000E", "PCI", ""}; 1951 + m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2045 1952 break; 2046 1953 case PCI_DEVICE_ID_DRAGONFLY: 2047 1954 m = (typeof(m)){"LP8000", "PCI", 2048 - "Fibre Channel Adapter"}; 1955 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2049 1956 break; 2050 1957 case PCI_DEVICE_ID_CENTAUR: 2051 1958 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2052 - m = (typeof(m)){"LP9002", "PCI", 2053 - "Fibre Channel Adapter"}; 1959 + m = (typeof(m)){"LP9002", "PCI", ""}; 2054 1960 else 2055 - m = (typeof(m)){"LP9000", "PCI", 2056 - "Fibre Channel Adapter"}; 1961 + m = (typeof(m)){"LP9000", "PCI", ""}; 1962 + m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2057 1963 break; 2058 1964 case PCI_DEVICE_ID_RFLY: 2059 1965 m = (typeof(m)){"LP952", "PCI", 2060 - "Fibre Channel Adapter"}; 1966 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2061 1967 break; 2062 1968 case PCI_DEVICE_ID_PEGASUS: 2063 1969 m = (typeof(m)){"LP9802", "PCI-X", 2064 - "Fibre Channel Adapter"}; 1970 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2065 1971 break; 2066 1972 case PCI_DEVICE_ID_THOR: 2067 1973 m = (typeof(m)){"LP10000", "PCI-X", 2068 - "Fibre Channel Adapter"}; 1974 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2069 1975 break; 2070 1976 case PCI_DEVICE_ID_VIPER: 2071 1977 m = (typeof(m)){"LPX1000", "PCI-X", 2072 - "Fibre Channel Adapter"}; 1978 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2073 1979 break; 2074 1980 case PCI_DEVICE_ID_PFLY: 2075 1981 m = (typeof(m)){"LP982", "PCI-X", 2076 - "Fibre Channel Adapter"}; 1982 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2077 1983 break; 2078 1984 case PCI_DEVICE_ID_TFLY: 2079 1985 m = (typeof(m)){"LP1050", "PCI-X", 2080 - "Fibre Channel Adapter"}; 1986 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2081 1987 break; 2082 1988 case PCI_DEVICE_ID_HELIOS: 2083 1989 m = (typeof(m)){"LP11000", "PCI-X2", 2084 - "Fibre Channel Adapter"}; 1990 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2085 1991 break; 2086 1992 case PCI_DEVICE_ID_HELIOS_SCSP: 2087 1993 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2088 - "Fibre Channel Adapter"}; 1994 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2089 1995 break; 2090 1996 case PCI_DEVICE_ID_HELIOS_DCSP: 2091 1997 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2092 - "Fibre Channel Adapter"}; 1998 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2093 1999 break; 2094 2000 case PCI_DEVICE_ID_NEPTUNE: 2095 - m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; 2001 + m = (typeof(m)){"LPe1000", "PCIe", 2002 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2096 2003 break; 2097 2004 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2098 - m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; 2005 + m = (typeof(m)){"LPe1000-SP", "PCIe", 2006 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2099 2007 break; 2100 2008 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2101 - m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; 2009 + m = (typeof(m)){"LPe1002-SP", "PCIe", 2010 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2102 2011 break; 2103 2012 case PCI_DEVICE_ID_BMID: 2104 2013 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2105 2014 break; 2106 2015 case PCI_DEVICE_ID_BSMB: 2107 - m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; 2016 + m = (typeof(m)){"LP111", "PCI-X2", 2017 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2108 2018 break; 2109 2019 case PCI_DEVICE_ID_ZEPHYR: 2110 2020 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; ··· 2126 2030 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2127 2031 break; 2128 2032 case PCI_DEVICE_ID_LP101: 2129 - m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; 2033 + m = (typeof(m)){"LP101", "PCI-X", 2034 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2130 2035 break; 2131 2036 case PCI_DEVICE_ID_LP10000S: 2132 - m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; 2037 + m = (typeof(m)){"LP10000-S", "PCI", 2038 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2133 2039 break; 2134 2040 case PCI_DEVICE_ID_LP11000S: 2135 - m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; 2041 + m = (typeof(m)){"LP11000-S", "PCI-X2", 2042 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2136 2043 break; 2137 2044 case PCI_DEVICE_ID_LPE11000S: 2138 - m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; 2045 + m = (typeof(m)){"LPe11000-S", "PCIe", 2046 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2139 2047 break; 2140 2048 case PCI_DEVICE_ID_SAT: 2141 2049 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; ··· 2160 2060 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2161 2061 break; 2162 2062 case PCI_DEVICE_ID_HORNET: 2163 - m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; 2063 + m = (typeof(m)){"LP21000", "PCIe", 2064 + "Obsolete, Unsupported FCoE Adapter"}; 2164 2065 GE = 1; 2165 2066 break; 2166 2067 case PCI_DEVICE_ID_PROTEUS_VF: 2167 2068 m = (typeof(m)){"LPev12000", "PCIe IOV", 2168 - "Fibre Channel Adapter"}; 2069 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2169 2070 break; 2170 2071 case PCI_DEVICE_ID_PROTEUS_PF: 2171 2072 m = (typeof(m)){"LPev12000", "PCIe IOV", 2172 - "Fibre Channel Adapter"}; 2073 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2173 2074 break; 2174 2075 case PCI_DEVICE_ID_PROTEUS_S: 2175 2076 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2176 - "Fibre Channel Adapter"}; 2077 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2177 2078 break; 2178 2079 case PCI_DEVICE_ID_TIGERSHARK: 2179 2080 oneConnect = 1; ··· 2190 2089 break; 2191 2090 case PCI_DEVICE_ID_BALIUS: 2192 2091 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2193 - "Fibre Channel Adapter"}; 2092 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2194 2093 break; 2195 2094 case PCI_DEVICE_ID_LANCER_FC: 2196 - case PCI_DEVICE_ID_LANCER_FC_VF: 2197 2095 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2198 2096 break; 2097 + case PCI_DEVICE_ID_LANCER_FC_VF: 2098 + m = (typeof(m)){"LPe16000", "PCIe", 2099 + "Obsolete, Unsupported Fibre Channel Adapter"}; 2100 + break; 2199 2101 case PCI_DEVICE_ID_LANCER_FCOE: 2200 - case PCI_DEVICE_ID_LANCER_FCOE_VF: 2201 2102 oneConnect = 1; 2202 2103 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2104 + break; 2105 + case PCI_DEVICE_ID_LANCER_FCOE_VF: 2106 + oneConnect = 1; 2107 + m = (typeof(m)){"OCe15100", "PCIe", 2108 + "Obsolete, Unsupported FCoE"}; 2203 2109 break; 2204 2110 case PCI_DEVICE_ID_SKYHAWK: 2205 2111 case PCI_DEVICE_ID_SKYHAWK_VF: ··· 4722 4614 phba->link_state = LPFC_HBA_ERROR; 4723 4615 return; 4724 4616 } 4725 - lpfc_offline_prep(phba, LPFC_MBX_WAIT); 4617 + if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 4618 + lpfc_offline_prep(phba, LPFC_MBX_WAIT); 4619 + else 4620 + lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 4726 4621 lpfc_offline(phba); 4727 4622 lpfc_sli_brdrestart(phba); 4728 4623 lpfc_online(phba); ··· 9774 9663 static void 9775 9664 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 9776 9665 { 9777 - struct lpfc_sli *psli = &phba->sli; 9778 - struct lpfc_sli_ring *pring; 9779 - 9780 9666 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9781 9667 "2723 PCI channel I/O abort preparing for recovery\n"); 9782 9668 ··· 9781 9673 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 9782 9674 * and let the SCSI mid-layer to retry them to recover. 9783 9675 */ 9784 - pring = &psli->ring[psli->fcp_ring]; 9785 - lpfc_sli_abort_iocb_ring(phba, pring); 9676 + lpfc_sli_abort_fcp_rings(phba); 9786 9677 } 9787 9678 9788 9679 /** ··· 10524 10417 static void 10525 10418 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 10526 10419 { 10527 - struct lpfc_sli *psli = &phba->sli; 10528 - struct lpfc_sli_ring *pring; 10529 - 10530 10420 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10531 10421 "2828 PCI channel I/O abort preparing for recovery\n"); 10532 10422 /* 10533 10423 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 10534 10424 * and let the SCSI mid-layer to retry them to recover. 10535 10425 */ 10536 - pring = &psli->ring[psli->fcp_ring]; 10537 - lpfc_sli_abort_iocb_ring(phba, pring); 10426 + lpfc_sli_abort_fcp_rings(phba); 10538 10427 } 10539 10428 10540 10429 /** ··· 11001 10898 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 11002 10899 phba->cfg_fof = 1; 11003 10900 } else { 11004 - phba->cfg_EnableXLane = 0; 10901 + phba->cfg_fof = 0; 11005 10902 if (phba->device_data_mem_pool) 11006 10903 mempool_destroy(phba->device_data_mem_pool); 11007 10904 phba->device_data_mem_pool = NULL; ··· 11031 10928 if (rc) 11032 10929 return -ENOMEM; 11033 10930 11034 - if (phba->cfg_EnableXLane) { 10931 + if (phba->cfg_fof) { 11035 10932 11036 10933 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq, 11037 10934 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP); ··· 11050 10947 return 0; 11051 10948 11052 10949 out_oas_wq: 11053 - if (phba->cfg_EnableXLane) 11054 - lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq); 10950 + lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq); 11055 10951 out_oas_cq: 11056 10952 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq); 11057 10953 return rc; ··· 11084 10982 11085 10983 phba->sli4_hba.fof_eq = qdesc; 11086 10984 11087 - if (phba->cfg_EnableXLane) { 10985 + if (phba->cfg_fof) { 11088 10986 11089 10987 /* Create OAS CQ */ 11090 10988 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+1 -1
drivers/scsi/lpfc/lpfc_mem.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2012 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2014 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+44 -16
drivers/scsi/lpfc/lpfc_scsi.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2014 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 73 73 { 74 74 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata; 75 75 76 - if (vport->phba->cfg_EnableXLane) 76 + if (vport->phba->cfg_fof) 77 77 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data; 78 78 else 79 79 return (struct lpfc_rport_data *)sdev->hostdata; ··· 3462 3462 * If the OAS driver feature is enabled and the lun is enabled for 3463 3463 * OAS, set the oas iocb related flags. 3464 3464 */ 3465 - if ((phba->cfg_EnableXLane) && ((struct lpfc_device_data *) 3465 + if ((phba->cfg_fof) && ((struct lpfc_device_data *) 3466 3466 scsi_cmnd->device->hostdata)->oas_enabled) 3467 3467 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS; 3468 3468 return 0; ··· 4314 4314 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 4315 4315 4316 4316 sli4 = (phba->sli_rev == LPFC_SLI_REV4); 4317 + piocbq->iocb.un.fcpi.fcpi_XRdy = 0; 4317 4318 4318 4319 /* 4319 4320 * There are three possibilities here - use scatter-gather segment, use ··· 4783 4782 struct lpfc_scsi_buf *lpfc_cmd; 4784 4783 IOCB_t *cmd, *icmd; 4785 4784 int ret = SUCCESS, status = 0; 4786 - unsigned long flags; 4785 + struct lpfc_sli_ring *pring_s4; 4786 + int ring_number, ret_val; 4787 + unsigned long flags, iflags; 4787 4788 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 4788 4789 4789 4790 status = fc_block_scsi_eh(cmnd); ··· 4836 4833 4837 4834 BUG_ON(iocb->context1 != lpfc_cmd); 4838 4835 4836 + /* abort issued in recovery is still in progress */ 4837 + if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) { 4838 + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4839 + "3389 SCSI Layer I/O Abort Request is pending\n"); 4840 + spin_unlock_irqrestore(&phba->hbalock, flags); 4841 + goto wait_for_cmpl; 4842 + } 4843 + 4839 4844 abtsiocb = __lpfc_sli_get_iocbq(phba); 4840 4845 if (abtsiocb == NULL) { 4841 4846 ret = FAILED; ··· 4882 4871 4883 4872 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 4884 4873 abtsiocb->vport = vport; 4874 + if (phba->sli_rev == LPFC_SLI_REV4) { 4875 + ring_number = MAX_SLI3_CONFIGURED_RINGS + iocb->fcp_wqidx; 4876 + pring_s4 = &phba->sli.ring[ring_number]; 4877 + /* Note: both hbalock and ring_lock must be set here */ 4878 + spin_lock_irqsave(&pring_s4->ring_lock, iflags); 4879 + ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, 4880 + abtsiocb, 0); 4881 + spin_unlock_irqrestore(&pring_s4->ring_lock, iflags); 4882 + } else { 4883 + ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, 4884 + abtsiocb, 0); 4885 + } 4885 4886 /* no longer need the lock after this point */ 4886 4887 spin_unlock_irqrestore(&phba->hbalock, flags); 4887 4888 4888 - if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) == 4889 - IOCB_ERROR) { 4889 + 4890 + if (ret_val == IOCB_ERROR) { 4890 4891 lpfc_sli_release_iocbq(phba, abtsiocb); 4891 4892 ret = FAILED; 4892 4893 goto out; ··· 4908 4885 lpfc_sli_handle_fast_ring_event(phba, 4909 4886 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); 4910 4887 4888 + wait_for_cmpl: 4911 4889 lpfc_cmd->waitq = &waitq; 4912 4890 /* Wait for abort to complete */ 4913 4891 wait_event_timeout(waitq, 4914 4892 (lpfc_cmd->pCmd != cmnd), 4915 4893 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); 4894 + 4895 + spin_lock_irqsave(shost->host_lock, flags); 4916 4896 lpfc_cmd->waitq = NULL; 4897 + spin_unlock_irqrestore(shost->host_lock, flags); 4917 4898 4918 4899 if (lpfc_cmd->pCmd == cmnd) { 4919 4900 ret = FAILED; ··· 5199 5172 5200 5173 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 5201 5174 if (cnt) 5202 - lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 5203 - tgt_id, lun_id, context); 5175 + lpfc_sli_abort_taskmgmt(vport, 5176 + &phba->sli.ring[phba->sli.fcp_ring], 5177 + tgt_id, lun_id, context); 5204 5178 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 5205 5179 while (time_after(later, jiffies) && cnt) { 5206 5180 schedule_timeout_uninterruptible(msecs_to_jiffies(20)); ··· 5519 5491 if (!rport || fc_remote_port_chkready(rport)) 5520 5492 return -ENXIO; 5521 5493 5522 - if (phba->cfg_EnableXLane) { 5494 + if (phba->cfg_fof) { 5523 5495 5524 5496 /* 5525 5497 * Check to see if the device data structure for the lun ··· 5644 5616 struct lpfc_device_data *device_data = sdev->hostdata; 5645 5617 5646 5618 atomic_dec(&phba->sdev_cnt); 5647 - if ((phba->cfg_EnableXLane) && (device_data)) { 5619 + if ((phba->cfg_fof) && (device_data)) { 5648 5620 spin_lock_irqsave(&phba->devicelock, flags); 5649 5621 device_data->available = false; 5650 5622 if (!device_data->oas_enabled) ··· 5683 5655 int memory_flags; 5684 5656 5685 5657 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 5686 - !(phba->cfg_EnableXLane)) 5658 + !(phba->cfg_fof)) 5687 5659 return NULL; 5688 5660 5689 5661 /* Attempt to create the device data to contain lun info */ ··· 5721 5693 { 5722 5694 5723 5695 if (unlikely(!phba) || !lun_info || 5724 - !(phba->cfg_EnableXLane)) 5696 + !(phba->cfg_fof)) 5725 5697 return; 5726 5698 5727 5699 if (!list_empty(&lun_info->listentry)) ··· 5755 5727 struct lpfc_device_data *lun_info; 5756 5728 5757 5729 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn || 5758 - !phba->cfg_EnableXLane) 5730 + !phba->cfg_fof) 5759 5731 return NULL; 5760 5732 5761 5733 /* Check to see if the lun is already enabled for OAS. */ ··· 5817 5789 !starting_lun || !found_vport_wwpn || 5818 5790 !found_target_wwpn || !found_lun || !found_lun_status || 5819 5791 (*starting_lun == NO_MORE_OAS_LUN) || 5820 - !phba->cfg_EnableXLane) 5792 + !phba->cfg_fof) 5821 5793 return false; 5822 5794 5823 5795 lun = *starting_lun; ··· 5901 5873 unsigned long flags; 5902 5874 5903 5875 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 5904 - !phba->cfg_EnableXLane) 5876 + !phba->cfg_fof) 5905 5877 return false; 5906 5878 5907 5879 spin_lock_irqsave(&phba->devicelock, flags); ··· 5958 5930 unsigned long flags; 5959 5931 5960 5932 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 5961 - !phba->cfg_EnableXLane) 5933 + !phba->cfg_fof) 5962 5934 return false; 5963 5935 5964 5936 spin_lock_irqsave(&phba->devicelock, flags);
+1 -1
drivers/scsi/lpfc/lpfc_scsi.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2014 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * *
+227 -70
drivers/scsi/lpfc/lpfc_sli.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2014 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 3532 3532 /* Error everything on txq and txcmplq 3533 3533 * First do the txq. 3534 3534 */ 3535 - spin_lock_irq(&phba->hbalock); 3536 - list_splice_init(&pring->txq, &completions); 3535 + if (phba->sli_rev >= LPFC_SLI_REV4) { 3536 + spin_lock_irq(&pring->ring_lock); 3537 + list_splice_init(&pring->txq, &completions); 3538 + pring->txq_cnt = 0; 3539 + spin_unlock_irq(&pring->ring_lock); 3537 3540 3538 - /* Next issue ABTS for everything on the txcmplq */ 3539 - list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3540 - lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3541 + spin_lock_irq(&phba->hbalock); 3542 + /* Next issue ABTS for everything on the txcmplq */ 3543 + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3544 + lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3545 + spin_unlock_irq(&phba->hbalock); 3546 + } else { 3547 + spin_lock_irq(&phba->hbalock); 3548 + list_splice_init(&pring->txq, &completions); 3549 + pring->txq_cnt = 0; 3541 3550 3542 - spin_unlock_irq(&phba->hbalock); 3551 + /* Next issue ABTS for everything on the txcmplq */ 3552 + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3553 + lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3554 + spin_unlock_irq(&phba->hbalock); 3555 + } 3543 3556 3544 3557 /* Cancel all the IOCBs from the completions list */ 3545 3558 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3546 3559 IOERR_SLI_ABORTED); 3547 3560 } 3561 + 3562 + /** 3563 + * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings 3564 + * @phba: Pointer to HBA context object. 3565 + * @pring: Pointer to driver SLI ring object. 3566 + * 3567 + * This function aborts all iocbs in FCP rings and frees all the iocb 3568 + * objects in txq. This function issues an abort iocb for all the iocb commands 3569 + * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3570 + * the return of this function. The caller is not required to hold any locks. 3571 + **/ 3572 + void 3573 + lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) 3574 + { 3575 + struct lpfc_sli *psli = &phba->sli; 3576 + struct lpfc_sli_ring *pring; 3577 + uint32_t i; 3578 + 3579 + /* Look on all the FCP Rings for the iotag */ 3580 + if (phba->sli_rev >= LPFC_SLI_REV4) { 3581 + for (i = 0; i < phba->cfg_fcp_io_channel; i++) { 3582 + pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS]; 3583 + lpfc_sli_abort_iocb_ring(phba, pring); 3584 + } 3585 + } else { 3586 + pring = &psli->ring[psli->fcp_ring]; 3587 + lpfc_sli_abort_iocb_ring(phba, pring); 3588 + } 3589 + } 3590 + 3548 3591 3549 3592 /** 3550 3593 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring ··· 3606 3563 LIST_HEAD(txcmplq); 3607 3564 struct lpfc_sli *psli = &phba->sli; 3608 3565 struct lpfc_sli_ring *pring; 3609 - 3610 - /* Currently, only one fcp ring */ 3611 - pring = &psli->ring[psli->fcp_ring]; 3566 + uint32_t i; 3612 3567 3613 3568 spin_lock_irq(&phba->hbalock); 3614 - /* Retrieve everything on txq */ 3615 - list_splice_init(&pring->txq, &txq); 3616 - 3617 - /* Retrieve everything on the txcmplq */ 3618 - list_splice_init(&pring->txcmplq, &txcmplq); 3619 - 3620 3569 /* Indicate the I/O queues are flushed */ 3621 3570 phba->hba_flag |= HBA_FCP_IOQ_FLUSH; 3622 3571 spin_unlock_irq(&phba->hbalock); 3623 3572 3624 - /* Flush the txq */ 3625 - lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 3626 - IOERR_SLI_DOWN); 3573 + /* Look on all the FCP Rings for the iotag */ 3574 + if (phba->sli_rev >= LPFC_SLI_REV4) { 3575 + for (i = 0; i < phba->cfg_fcp_io_channel; i++) { 3576 + pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS]; 3627 3577 3628 - /* Flush the txcmpq */ 3629 - lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 3630 - IOERR_SLI_DOWN); 3578 + spin_lock_irq(&pring->ring_lock); 3579 + /* Retrieve everything on txq */ 3580 + list_splice_init(&pring->txq, &txq); 3581 + /* Retrieve everything on the txcmplq */ 3582 + list_splice_init(&pring->txcmplq, &txcmplq); 3583 + pring->txq_cnt = 0; 3584 + pring->txcmplq_cnt = 0; 3585 + spin_unlock_irq(&pring->ring_lock); 3586 + 3587 + /* Flush the txq */ 3588 + lpfc_sli_cancel_iocbs(phba, &txq, 3589 + IOSTAT_LOCAL_REJECT, 3590 + IOERR_SLI_DOWN); 3591 + /* Flush the txcmpq */ 3592 + lpfc_sli_cancel_iocbs(phba, &txcmplq, 3593 + IOSTAT_LOCAL_REJECT, 3594 + IOERR_SLI_DOWN); 3595 + } 3596 + } else { 3597 + pring = &psli->ring[psli->fcp_ring]; 3598 + 3599 + spin_lock_irq(&phba->hbalock); 3600 + /* Retrieve everything on txq */ 3601 + list_splice_init(&pring->txq, &txq); 3602 + /* Retrieve everything on the txcmplq */ 3603 + list_splice_init(&pring->txcmplq, &txcmplq); 3604 + pring->txq_cnt = 0; 3605 + pring->txcmplq_cnt = 0; 3606 + spin_unlock_irq(&phba->hbalock); 3607 + 3608 + /* Flush the txq */ 3609 + lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 3610 + IOERR_SLI_DOWN); 3611 + /* Flush the txcmpq */ 3612 + lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 3613 + IOERR_SLI_DOWN); 3614 + } 3631 3615 } 3632 3616 3633 3617 /** ··· 4057 3987 { 4058 3988 struct lpfc_sli *psli = &phba->sli; 4059 3989 uint16_t cfg_value; 4060 - int rc; 3990 + int rc = 0; 4061 3991 4062 3992 /* Reset HBA */ 4063 3993 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4064 - "0295 Reset HBA Data: x%x x%x\n", 4065 - phba->pport->port_state, psli->sli_flag); 3994 + "0295 Reset HBA Data: x%x x%x x%x\n", 3995 + phba->pport->port_state, psli->sli_flag, 3996 + phba->hba_flag); 4066 3997 4067 3998 /* perform board reset */ 4068 3999 phba->fc_eventTag = 0; ··· 4075 4004 psli->sli_flag &= ~(LPFC_PROCESS_LA); 4076 4005 phba->fcf.fcf_flag = 0; 4077 4006 spin_unlock_irq(&phba->hbalock); 4007 + 4008 + /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */ 4009 + if (phba->hba_flag & HBA_FW_DUMP_OP) { 4010 + phba->hba_flag &= ~HBA_FW_DUMP_OP; 4011 + return rc; 4012 + } 4078 4013 4079 4014 /* Now physically reset the device */ 4080 4015 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, ··· 5079 5002 } while (++fcp_eqidx < phba->cfg_fcp_io_channel); 5080 5003 } 5081 5004 5082 - if (phba->cfg_EnableXLane) 5005 + if (phba->cfg_fof) 5083 5006 lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM); 5084 5007 5085 5008 if (phba->sli4_hba.hba_eq) { ··· 6799 6722 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 6800 6723 MAILBOX_t *mb = &pmbox->u.mb; 6801 6724 struct lpfc_sli *psli = &phba->sli; 6802 - struct lpfc_sli_ring *pring; 6803 6725 6804 6726 /* If the mailbox completed, process the completion and return */ 6805 6727 if (lpfc_sli4_process_missed_mbox_completions(phba)) ··· 6840 6764 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 6841 6765 spin_unlock_irq(&phba->hbalock); 6842 6766 6843 - pring = &psli->ring[psli->fcp_ring]; 6844 - lpfc_sli_abort_iocb_ring(phba, pring); 6767 + lpfc_sli_abort_fcp_rings(phba); 6845 6768 6846 6769 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6847 6770 "0345 Resetting board due to mailbox timeout\n"); ··· 8208 8133 abort_tag = (uint32_t) iocbq->iotag; 8209 8134 xritag = iocbq->sli4_xritag; 8210 8135 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ 8136 + wqe->generic.wqe_com.word10 = 0; 8211 8137 /* words0-2 bpl convert bde */ 8212 8138 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 8213 8139 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / ··· 8715 8639 8716 8640 if ((piocb->iocb_flag & LPFC_IO_FCP) || 8717 8641 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 8718 - if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag & 8719 - LPFC_IO_OAS))) { 8642 + if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) { 8720 8643 wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx]; 8721 8644 } else { 8722 8645 wq = phba->sli4_hba.oas_wq; ··· 8810 8735 8811 8736 if (phba->sli_rev == LPFC_SLI_REV4) { 8812 8737 if (piocb->iocb_flag & LPFC_IO_FCP) { 8813 - if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag & 8738 + if (!phba->cfg_fof || (!(piocb->iocb_flag & 8814 8739 LPFC_IO_OAS))) { 8815 8740 if (unlikely(!phba->sli4_hba.fcp_wq)) 8816 8741 return IOCB_ERROR; ··· 9245 9170 pring->sli.sli3.next_cmdidx = 0; 9246 9171 pring->sli.sli3.local_getidx = 0; 9247 9172 pring->sli.sli3.cmdidx = 0; 9173 + pring->flag = 0; 9248 9174 INIT_LIST_HEAD(&pring->txq); 9249 9175 INIT_LIST_HEAD(&pring->txcmplq); 9250 9176 INIT_LIST_HEAD(&pring->iocb_continueq); ··· 9881 9805 } 9882 9806 9883 9807 /** 9884 - * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring 9885 - * @phba: Pointer to HBA context object. 9886 - * @pring: Pointer to driver SLI ring object. 9887 - * 9888 - * This function aborts all iocbs in the given ring and frees all the iocb 9889 - * objects in txq. This function issues abort iocbs unconditionally for all 9890 - * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed 9891 - * to complete before the return of this function. The caller is not required 9892 - * to hold any locks. 9893 - **/ 9894 - static void 9895 - lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 9896 - { 9897 - LIST_HEAD(completions); 9898 - struct lpfc_iocbq *iocb, *next_iocb; 9899 - 9900 - if (pring->ringno == LPFC_ELS_RING) 9901 - lpfc_fabric_abort_hba(phba); 9902 - 9903 - spin_lock_irq(&phba->hbalock); 9904 - 9905 - /* Take off all the iocbs on txq for cancelling */ 9906 - list_splice_init(&pring->txq, &completions); 9907 - pring->txq_cnt = 0; 9908 - 9909 - /* Next issue ABTS for everything on the txcmplq */ 9910 - list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 9911 - lpfc_sli_abort_iotag_issue(phba, pring, iocb); 9912 - 9913 - spin_unlock_irq(&phba->hbalock); 9914 - 9915 - /* Cancel all the IOCBs from the completions list */ 9916 - lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 9917 - IOERR_SLI_ABORTED); 9918 - } 9919 - 9920 - /** 9921 9808 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 9922 9809 * @phba: pointer to lpfc HBA data structure. 9923 9810 * ··· 9895 9856 9896 9857 for (i = 0; i < psli->num_rings; i++) { 9897 9858 pring = &psli->ring[i]; 9898 - lpfc_sli_iocb_ring_abort(phba, pring); 9859 + lpfc_sli_abort_iocb_ring(phba, pring); 9899 9860 } 9900 9861 } 9901 9862 ··· 10117 10078 } 10118 10079 10119 10080 return errcnt; 10081 + } 10082 + 10083 + /** 10084 + * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN 10085 + * @vport: Pointer to virtual port. 10086 + * @pring: Pointer to driver SLI ring object. 10087 + * @tgt_id: SCSI ID of the target. 10088 + * @lun_id: LUN ID of the scsi device. 10089 + * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 10090 + * 10091 + * This function sends an abort command for every SCSI command 10092 + * associated with the given virtual port pending on the ring 10093 + * filtered by lpfc_sli_validate_fcp_iocb function. 10094 + * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the 10095 + * FCP iocbs associated with lun specified by tgt_id and lun_id 10096 + * parameters 10097 + * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the 10098 + * FCP iocbs associated with SCSI target specified by tgt_id parameter. 10099 + * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all 10100 + * FCP iocbs associated with virtual port. 10101 + * This function returns number of iocbs it aborted . 10102 + * This function is called with no locks held right after a taskmgmt 10103 + * command is sent. 10104 + **/ 10105 + int 10106 + lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 10107 + uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) 10108 + { 10109 + struct lpfc_hba *phba = vport->phba; 10110 + struct lpfc_iocbq *abtsiocbq; 10111 + struct lpfc_iocbq *iocbq; 10112 + IOCB_t *icmd; 10113 + int sum, i, ret_val; 10114 + unsigned long iflags; 10115 + struct lpfc_sli_ring *pring_s4; 10116 + uint32_t ring_number; 10117 + 10118 + spin_lock_irq(&phba->hbalock); 10119 + 10120 + /* all I/Os are in process of being flushed */ 10121 + if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { 10122 + spin_unlock_irq(&phba->hbalock); 10123 + return 0; 10124 + } 10125 + sum = 0; 10126 + 10127 + for (i = 1; i <= phba->sli.last_iotag; i++) { 10128 + iocbq = phba->sli.iocbq_lookup[i]; 10129 + 10130 + if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 10131 + cmd) != 0) 10132 + continue; 10133 + 10134 + /* 10135 + * If the iocbq is already being aborted, don't take a second 10136 + * action, but do count it. 10137 + */ 10138 + if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 10139 + continue; 10140 + 10141 + /* issue ABTS for this IOCB based on iotag */ 10142 + abtsiocbq = __lpfc_sli_get_iocbq(phba); 10143 + if (abtsiocbq == NULL) 10144 + continue; 10145 + 10146 + icmd = &iocbq->iocb; 10147 + abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 10148 + abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext; 10149 + if (phba->sli_rev == LPFC_SLI_REV4) 10150 + abtsiocbq->iocb.un.acxri.abortIoTag = 10151 + iocbq->sli4_xritag; 10152 + else 10153 + abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag; 10154 + abtsiocbq->iocb.ulpLe = 1; 10155 + abtsiocbq->iocb.ulpClass = icmd->ulpClass; 10156 + abtsiocbq->vport = vport; 10157 + 10158 + /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 10159 + abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx; 10160 + if (iocbq->iocb_flag & LPFC_IO_FCP) 10161 + abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 10162 + 10163 + if (lpfc_is_link_up(phba)) 10164 + abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN; 10165 + else 10166 + abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 10167 + 10168 + /* Setup callback routine and issue the command. */ 10169 + abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 10170 + 10171 + /* 10172 + * Indicate the IO is being aborted by the driver and set 10173 + * the caller's flag into the aborted IO. 10174 + */ 10175 + iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 10176 + 10177 + if (phba->sli_rev == LPFC_SLI_REV4) { 10178 + ring_number = MAX_SLI3_CONFIGURED_RINGS + 10179 + iocbq->fcp_wqidx; 10180 + pring_s4 = &phba->sli.ring[ring_number]; 10181 + /* Note: both hbalock and ring_lock must be set here */ 10182 + spin_lock_irqsave(&pring_s4->ring_lock, iflags); 10183 + ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, 10184 + abtsiocbq, 0); 10185 + spin_unlock_irqrestore(&pring_s4->ring_lock, iflags); 10186 + } else { 10187 + ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, 10188 + abtsiocbq, 0); 10189 + } 10190 + 10191 + 10192 + if (ret_val == IOCB_ERROR) 10193 + __lpfc_sli_release_iocbq(phba, abtsiocbq); 10194 + else 10195 + sum++; 10196 + } 10197 + spin_unlock_irq(&phba->hbalock); 10198 + return sum; 10120 10199 } 10121 10200 10122 10201 /**
+1 -1
drivers/scsi/lpfc/lpfc_sli.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2014 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * *
+1 -1
drivers/scsi/lpfc/lpfc_sli4.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2009-2013 Emulex. All rights reserved. * 4 + * Copyright (C) 2009-2014 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * *
+3 -3
drivers/scsi/lpfc/lpfc_version.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2014 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * * ··· 18 18 * included with this package. * 19 19 *******************************************************************/ 20 20 21 - #define LPFC_DRIVER_VERSION "8.3.45" 21 + #define LPFC_DRIVER_VERSION "10.2.8001.0." 22 22 #define LPFC_DRIVER_NAME "lpfc" 23 23 24 24 /* Used for SLI 2/3 */ ··· 30 30 31 31 #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ 32 32 LPFC_DRIVER_VERSION 33 - #define LPFC_COPYRIGHT "Copyright(c) 2004-2013 Emulex. All rights reserved." 33 + #define LPFC_COPYRIGHT "Copyright(c) 2004-2014 Emulex. All rights reserved."
+8 -8
drivers/scsi/qla2xxx/qla_def.h
··· 1648 1648 */ 1649 1649 struct crc_context { 1650 1650 uint32_t handle; /* System handle. */ 1651 - uint32_t ref_tag; 1652 - uint16_t app_tag; 1651 + __le32 ref_tag; 1652 + __le16 app_tag; 1653 1653 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ 1654 1654 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ 1655 - uint16_t guard_seed; /* Initial Guard Seed */ 1656 - uint16_t prot_opts; /* Requested Data Protection Mode */ 1657 - uint16_t blk_size; /* Data size in bytes */ 1655 + __le16 guard_seed; /* Initial Guard Seed */ 1656 + __le16 prot_opts; /* Requested Data Protection Mode */ 1657 + __le16 blk_size; /* Data size in bytes */ 1658 1658 uint16_t runt_blk_guard; /* Guard value for runt block (tape 1659 1659 * only) */ 1660 - uint32_t byte_count; /* Total byte count/ total data 1660 + __le32 byte_count; /* Total byte count/ total data 1661 1661 * transfer count */ 1662 1662 union { 1663 1663 struct { ··· 1671 1671 uint32_t reserved_6; 1672 1672 } nobundling; 1673 1673 struct { 1674 - uint32_t dif_byte_count; /* Total DIF byte 1674 + __le32 dif_byte_count; /* Total DIF byte 1675 1675 * count */ 1676 1676 uint16_t reserved_1; 1677 - uint16_t dseg_count; /* Data segment count */ 1677 + __le16 dseg_count; /* Data segment count */ 1678 1678 uint32_t reserved_2; 1679 1679 uint32_t data_address[2]; 1680 1680 uint32_t data_length;
+10 -5
drivers/scsi/qla2xxx/qla_target.c
··· 1996 1996 * have been immplemented by TCM, before AppTag is avail. 1997 1997 * Look for modesense_handlers[] 1998 1998 */ 1999 - ctx->app_tag = __constant_cpu_to_le16(0); 1999 + ctx->app_tag = 0; 2000 2000 ctx->app_tag_mask[0] = 0x0; 2001 2001 ctx->app_tag_mask[1] = 0x0; 2002 2002 ··· 2078 2078 struct se_cmd *se_cmd = &cmd->se_cmd; 2079 2079 uint32_t h; 2080 2080 struct atio_from_isp *atio = &prm->cmd->atio; 2081 + uint16_t t16; 2081 2082 2082 2083 sgc = 0; 2083 2084 ha = vha->hw; ··· 2175 2174 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2176 2175 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2177 2176 pkt->exchange_addr = atio->u.isp24.exchange_addr; 2178 - pkt->ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 2179 - pkt->flags |= (atio->u.isp24.attr << 9); 2177 + 2178 + /* silence compile warning */ 2179 + t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2180 + pkt->ox_id = cpu_to_le16(t16); 2181 + 2182 + t16 = (atio->u.isp24.attr << 9); 2183 + pkt->flags |= cpu_to_le16(t16); 2180 2184 pkt->relative_offset = cpu_to_le32(prm->cmd->offset); 2181 2185 2182 2186 /* Set transfer direction */ ··· 2256 2250 2257 2251 if (bundling && prm->prot_seg_cnt) { 2258 2252 /* Walks dif segments */ 2259 - pkt->add_flags |= 2260 - __constant_cpu_to_le16(CTIO_CRC2_AF_DIF_DSD_ENA); 2253 + pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA; 2261 2254 2262 2255 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; 2263 2256 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
+8 -8
drivers/scsi/qla2xxx/qla_target.h
··· 316 316 uint8_t seq_id; 317 317 uint8_t df_ctl; 318 318 uint16_t seq_cnt; 319 - uint16_t ox_id; 319 + __be16 ox_id; 320 320 uint16_t rx_id; 321 321 uint32_t parameter; 322 322 } __packed; ··· 441 441 union { 442 442 struct { 443 443 uint16_t reserved1; 444 - uint16_t flags; 444 + __le16 flags; 445 445 uint32_t residual; 446 446 uint16_t ox_id; 447 447 uint16_t scsi_status; ··· 527 527 528 528 uint32_t handle; /* System handle. */ 529 529 uint16_t nport_handle; /* N_PORT handle. */ 530 - uint16_t timeout; /* Command timeout. */ 530 + __le16 timeout; /* Command timeout. */ 531 531 532 532 uint16_t dseg_count; /* Data segment count. */ 533 533 uint8_t vp_index; ··· 538 538 uint8_t reserved1; 539 539 uint32_t exchange_addr; /* rcv exchange address */ 540 540 uint16_t reserved2; 541 - uint16_t flags; /* refer to CTIO7 flags values */ 541 + __le16 flags; /* refer to CTIO7 flags values */ 542 542 uint32_t residual; 543 - uint16_t ox_id; 543 + __le16 ox_id; 544 544 uint16_t scsi_status; 545 - uint32_t relative_offset; 545 + __le32 relative_offset; 546 546 uint32_t reserved5; 547 - uint32_t transfer_length; /* total fc transfer length */ 547 + __le32 transfer_length; /* total fc transfer length */ 548 548 uint32_t reserved6; 549 - uint32_t crc_context_address[2];/* Data segment address. */ 549 + __le32 crc_context_address[2];/* Data segment address. */ 550 550 uint16_t crc_context_len; /* Data segment length. */ 551 551 uint16_t reserved_1; /* MUST be set to 0. */ 552 552 } __packed;