Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev:
[PCI] Add JMicron PCI ID constants
[PATCH] ahci: Ensure that we don't grab both functions
[PATCH] libata-core.c: restore configuration boot messages in ata_dev_configure(), v2
[PATCH] sata_sil24: add suspend/sleep support
[PATCH] sata_sil24: separate out sil24_init_controller()
[PATCH] sata_sil: add suspend/sleep support
[PATCH] sata_sil: separate out sil_init_controller()
[PATCH] libata: reimplement controller-wide PM
[PATCH] libata: reimplement per-dev PM
[PATCH] libata: implement PM EH actions
[PATCH] libata: separate out __ata_ehi_hotplugged()
[PATCH] libata: implement ATA_EHI_NO_AUTOPSY and QUIET
[PATCH] libata: clean up debounce parameters and improve parameter selection
[PATCH] libata: implement ATA_EHI_RESUME_LINK
[PATCH] libata: replace ap_lock w/ ap->lock in ata_scsi_error()
[PATCH] libata: fix ehc->i.action setting in ata_eh_autopsy()
[PATCH] libata: add ap->pflags and move core dynamic flags to it
[PATCH] libata: Conditionally set host->max_cmd_len
[PATCH] sata_vsc: data_xfer should use mmio

+901 -275
+12 -5
drivers/scsi/ahci.c
··· 1052 1052 1053 1053 static void ahci_error_handler(struct ata_port *ap) 1054 1054 { 1055 - if (!(ap->flags & ATA_FLAG_FROZEN)) { 1055 + if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 1056 1056 /* restart engine */ 1057 1057 ahci_stop_engine(ap); 1058 1058 ahci_start_engine(ap); ··· 1323 1323 if (!printed_version++) 1324 1324 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1325 1325 1326 + /* JMicron-specific fixup: make sure we're in AHCI mode */ 1327 + /* This is protected from races with ata_jmicron by the pci probe 1328 + locking */ 1329 + if (pdev->vendor == PCI_VENDOR_ID_JMICRON) { 1330 + /* AHCI enable, AHCI on function 0 */ 1331 + pci_write_config_byte(pdev, 0x41, 0xa1); 1332 + /* Function 1 is the PATA controller */ 1333 + if (PCI_FUNC(pdev->devfn)) 1334 + return -ENODEV; 1335 + } 1336 + 1326 1337 rc = pci_enable_device(pdev); 1327 1338 if (rc) 1328 1339 return rc; ··· 1388 1377 1389 1378 if (have_msi) 1390 1379 hpriv->flags |= AHCI_FLAG_MSI; 1391 - 1392 - /* JMicron-specific fixup: make sure we're in AHCI mode */ 1393 - if (pdev->vendor == 0x197b) 1394 - pci_write_config_byte(pdev, 0x41, 0xa1); 1395 1380 1396 1381 /* initialize adapter */ 1397 1382 rc = ahci_host_init(probe_ent);
+185 -104
drivers/scsi/libata-core.c
··· 61 61 #include "libata.h" 62 62 63 63 /* debounce timing parameters in msecs { interval, duration, timeout } */ 64 - const unsigned long sata_deb_timing_boot[] = { 5, 100, 2000 }; 65 - const unsigned long sata_deb_timing_eh[] = { 25, 500, 2000 }; 66 - const unsigned long sata_deb_timing_before_fsrst[] = { 100, 2000, 5000 }; 64 + const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 65 + const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 66 + const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 67 67 68 68 static unsigned int ata_dev_init_params(struct ata_device *dev, 69 69 u16 heads, u16 sectors); ··· 907 907 { 908 908 int rc; 909 909 910 - if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK) 910 + if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK) 911 911 return; 912 912 913 913 PREPARE_WORK(&ap->port_task, fn, data); ··· 938 938 DPRINTK("ENTER\n"); 939 939 940 940 spin_lock_irqsave(ap->lock, flags); 941 - ap->flags |= ATA_FLAG_FLUSH_PORT_TASK; 941 + ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK; 942 942 spin_unlock_irqrestore(ap->lock, flags); 943 943 944 944 DPRINTK("flush #1\n"); ··· 957 957 } 958 958 959 959 spin_lock_irqsave(ap->lock, flags); 960 - ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK; 960 + ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK; 961 961 spin_unlock_irqrestore(ap->lock, flags); 962 962 963 963 if (ata_msg_ctl(ap)) ··· 1009 1009 spin_lock_irqsave(ap->lock, flags); 1010 1010 1011 1011 /* no internal command while frozen */ 1012 - if (ap->flags & ATA_FLAG_FROZEN) { 1012 + if (ap->pflags & ATA_PFLAG_FROZEN) { 1013 1013 spin_unlock_irqrestore(ap->lock, flags); 1014 1014 return AC_ERR_SYSTEM; 1015 1015 } ··· 1325 1325 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth); 1326 1326 } 1327 1327 1328 + static void ata_set_port_max_cmd_len(struct ata_port *ap) 1329 + { 1330 + int i; 1331 + 1332 + if (ap->host) { 1333 + ap->host->max_cmd_len = 0; 1334 + for (i = 0; i < ATA_MAX_DEVICES; i++) 1335 + ap->host->max_cmd_len = max_t(unsigned int, 1336 + ap->host->max_cmd_len, 1337 + ap->device[i].cdb_len); 1338 + } 1339 + } 1340 + 1328 1341 /** 1329 1342 * ata_dev_configure - Configure the specified ATA/ATAPI device 1330 1343 * @dev: Target device to configure ··· 1357 1344 struct ata_port *ap = dev->ap; 1358 1345 const u16 *id = dev->id; 1359 1346 unsigned int xfer_mask; 1360 - int i, rc; 1347 + int rc; 1361 1348 1362 1349 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 1363 1350 ata_dev_printk(dev, KERN_INFO, ··· 1417 1404 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 1418 1405 1419 1406 /* print device info to dmesg */ 1420 - if (ata_msg_info(ap)) 1407 + if (ata_msg_drv(ap) && print_info) 1421 1408 ata_dev_printk(dev, KERN_INFO, "ATA-%d, " 1422 1409 "max %s, %Lu sectors: %s %s\n", 1423 1410 ata_id_major_version(id), ··· 1440 1427 } 1441 1428 1442 1429 /* print device info to dmesg */ 1443 - if (ata_msg_info(ap)) 1430 + if (ata_msg_drv(ap) && print_info) 1444 1431 ata_dev_printk(dev, KERN_INFO, "ATA-%d, " 1445 1432 "max %s, %Lu sectors: CHS %u/%u/%u\n", 1446 1433 ata_id_major_version(id), ··· 1452 1439 1453 1440 if (dev->id[59] & 0x100) { 1454 1441 dev->multi_count = dev->id[59] & 0xff; 1455 - if (ata_msg_info(ap)) 1442 + if (ata_msg_drv(ap) && print_info) 1456 1443 ata_dev_printk(dev, KERN_INFO, 1457 1444 "ata%u: dev %u multi count %u\n", 1458 1445 ap->id, dev->devno, dev->multi_count); ··· 1481 1468 } 1482 1469 1483 1470 /* print device info to dmesg */ 1484 - if (ata_msg_info(ap)) 1471 + if (ata_msg_drv(ap) && print_info) 1485 1472 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n", 1486 1473 ata_mode_string(xfer_mask), 1487 1474 cdb_intr_string); 1488 1475 } 1489 1476 1490 - ap->host->max_cmd_len = 0; 1491 - for (i = 0; i < ATA_MAX_DEVICES; i++) 1492 - ap->host->max_cmd_len = max_t(unsigned int, 1493 - ap->host->max_cmd_len, 1494 - ap->device[i].cdb_len); 1477 + ata_set_port_max_cmd_len(ap); 1495 1478 1496 1479 /* limit bridge transfers to udma5, 200 sectors */ 1497 1480 if (ata_dev_knobble(dev)) { 1498 - if (ata_msg_info(ap)) 1481 + if (ata_msg_drv(ap) && print_info) 1499 1482 ata_dev_printk(dev, KERN_INFO, 1500 1483 "applying bridge limits\n"); 1501 1484 dev->udma_mask &= ATA_UDMA5; ··· 2146 2137 * return error code and failing device on failure. 2147 2138 */ 2148 2139 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2149 - if (ata_dev_enabled(&ap->device[i])) { 2140 + if (ata_dev_ready(&ap->device[i])) { 2150 2141 ap->ops->set_mode(ap); 2151 2142 break; 2152 2143 } ··· 2212 2203 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2213 2204 dev = &ap->device[i]; 2214 2205 2215 - if (!ata_dev_enabled(dev)) 2206 + /* don't udpate suspended devices' xfer mode */ 2207 + if (!ata_dev_ready(dev)) 2216 2208 continue; 2217 2209 2218 2210 rc = ata_dev_set_mode(dev); ··· 2589 2579 2590 2580 /* first, debounce phy if SATA */ 2591 2581 if (ap->cbl == ATA_CBL_SATA) { 2592 - rc = sata_phy_debounce(ap, sata_deb_timing_eh); 2582 + rc = sata_phy_debounce(ap, sata_deb_timing_hotplug); 2593 2583 2594 2584 /* if debounced successfully and offline, no need to wait */ 2595 2585 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap)) ··· 2625 2615 int ata_std_prereset(struct ata_port *ap) 2626 2616 { 2627 2617 struct ata_eh_context *ehc = &ap->eh_context; 2628 - const unsigned long *timing; 2618 + const unsigned long *timing = sata_ehc_deb_timing(ehc); 2629 2619 int rc; 2630 2620 2631 - /* hotplug? */ 2632 - if (ehc->i.flags & ATA_EHI_HOTPLUGGED) { 2633 - if (ap->flags & ATA_FLAG_HRST_TO_RESUME) 2634 - ehc->i.action |= ATA_EH_HARDRESET; 2635 - if (ap->flags & ATA_FLAG_SKIP_D2H_BSY) 2636 - ata_wait_spinup(ap); 2637 - } 2621 + /* handle link resume & hotplug spinup */ 2622 + if ((ehc->i.flags & ATA_EHI_RESUME_LINK) && 2623 + (ap->flags & ATA_FLAG_HRST_TO_RESUME)) 2624 + ehc->i.action |= ATA_EH_HARDRESET; 2625 + 2626 + if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) && 2627 + (ap->flags & ATA_FLAG_SKIP_D2H_BSY)) 2628 + ata_wait_spinup(ap); 2638 2629 2639 2630 /* if we're about to do hardreset, nothing more to do */ 2640 2631 if (ehc->i.action & ATA_EH_HARDRESET) ··· 2643 2632 2644 2633 /* if SATA, resume phy */ 2645 2634 if (ap->cbl == ATA_CBL_SATA) { 2646 - if (ap->flags & ATA_FLAG_LOADING) 2647 - timing = sata_deb_timing_boot; 2648 - else 2649 - timing = sata_deb_timing_eh; 2650 - 2651 2635 rc = sata_phy_resume(ap, timing); 2652 2636 if (rc && rc != -EOPNOTSUPP) { 2653 2637 /* phy resume failed */ ··· 2730 2724 */ 2731 2725 int sata_std_hardreset(struct ata_port *ap, unsigned int *class) 2732 2726 { 2727 + struct ata_eh_context *ehc = &ap->eh_context; 2728 + const unsigned long *timing = sata_ehc_deb_timing(ehc); 2733 2729 u32 scontrol; 2734 2730 int rc; 2735 2731 ··· 2769 2761 msleep(1); 2770 2762 2771 2763 /* bring phy back */ 2772 - sata_phy_resume(ap, sata_deb_timing_eh); 2764 + sata_phy_resume(ap, timing); 2773 2765 2774 2766 /* TODO: phy layer with polling, timeouts, etc. */ 2775 2767 if (ata_port_offline(ap)) { ··· 4293 4285 unsigned int i; 4294 4286 4295 4287 /* no command while frozen */ 4296 - if (unlikely(ap->flags & ATA_FLAG_FROZEN)) 4288 + if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4297 4289 return NULL; 4298 4290 4299 4291 /* the last tag is reserved for internal command. */ ··· 4415 4407 * taken care of. 4416 4408 */ 4417 4409 if (ap->ops->error_handler) { 4418 - WARN_ON(ap->flags & ATA_FLAG_FROZEN); 4410 + WARN_ON(ap->pflags & ATA_PFLAG_FROZEN); 4419 4411 4420 4412 if (unlikely(qc->err_mask)) 4421 4413 qc->flags |= ATA_QCFLAG_FAILED; ··· 5009 5001 return 0; 5010 5002 } 5011 5003 5012 - static int ata_standby_drive(struct ata_device *dev) 5004 + static int ata_host_set_request_pm(struct ata_host_set *host_set, 5005 + pm_message_t mesg, unsigned int action, 5006 + unsigned int ehi_flags, int wait) 5013 5007 { 5014 - unsigned int err_mask; 5008 + unsigned long flags; 5009 + int i, rc; 5015 5010 5016 - err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1); 5017 - if (err_mask) { 5018 - ata_dev_printk(dev, KERN_ERR, "failed to standby drive " 5019 - "(err_mask=0x%x)\n", err_mask); 5020 - return -EIO; 5021 - } 5011 + for (i = 0; i < host_set->n_ports; i++) { 5012 + struct ata_port *ap = host_set->ports[i]; 5022 5013 5023 - return 0; 5024 - } 5014 + /* Previous resume operation might still be in 5015 + * progress. Wait for PM_PENDING to clear. 5016 + */ 5017 + if (ap->pflags & ATA_PFLAG_PM_PENDING) { 5018 + ata_port_wait_eh(ap); 5019 + WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5020 + } 5025 5021 5026 - static int ata_start_drive(struct ata_device *dev) 5027 - { 5028 - unsigned int err_mask; 5022 + /* request PM ops to EH */ 5023 + spin_lock_irqsave(ap->lock, flags); 5029 5024 5030 - err_mask = ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE); 5031 - if (err_mask) { 5032 - ata_dev_printk(dev, KERN_ERR, "failed to start drive " 5033 - "(err_mask=0x%x)\n", err_mask); 5034 - return -EIO; 5025 + ap->pm_mesg = mesg; 5026 + if (wait) { 5027 + rc = 0; 5028 + ap->pm_result = &rc; 5029 + } 5030 + 5031 + ap->pflags |= ATA_PFLAG_PM_PENDING; 5032 + ap->eh_info.action |= action; 5033 + ap->eh_info.flags |= ehi_flags; 5034 + 5035 + ata_port_schedule_eh(ap); 5036 + 5037 + spin_unlock_irqrestore(ap->lock, flags); 5038 + 5039 + /* wait and check result */ 5040 + if (wait) { 5041 + ata_port_wait_eh(ap); 5042 + WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5043 + if (rc) 5044 + return rc; 5045 + } 5035 5046 } 5036 5047 5037 5048 return 0; 5038 5049 } 5039 5050 5040 5051 /** 5041 - * ata_device_resume - wakeup a previously suspended devices 5042 - * @dev: the device to resume 5052 + * ata_host_set_suspend - suspend host_set 5053 + * @host_set: host_set to suspend 5054 + * @mesg: PM message 5043 5055 * 5044 - * Kick the drive back into action, by sending it an idle immediate 5045 - * command and making sure its transfer mode matches between drive 5046 - * and host. 5056 + * Suspend @host_set. Actual operation is performed by EH. This 5057 + * function requests EH to perform PM operations and waits for EH 5058 + * to finish. 5047 5059 * 5060 + * LOCKING: 5061 + * Kernel thread context (may sleep). 5062 + * 5063 + * RETURNS: 5064 + * 0 on success, -errno on failure. 5048 5065 */ 5049 - int ata_device_resume(struct ata_device *dev) 5066 + int ata_host_set_suspend(struct ata_host_set *host_set, pm_message_t mesg) 5050 5067 { 5051 - struct ata_port *ap = dev->ap; 5068 + int i, j, rc; 5052 5069 5053 - if (ap->flags & ATA_FLAG_SUSPENDED) { 5054 - struct ata_device *failed_dev; 5070 + rc = ata_host_set_request_pm(host_set, mesg, 0, ATA_EHI_QUIET, 1); 5071 + if (rc) 5072 + goto fail; 5055 5073 5056 - ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 5057 - ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000); 5074 + /* EH is quiescent now. Fail if we have any ready device. 5075 + * This happens if hotplug occurs between completion of device 5076 + * suspension and here. 5077 + */ 5078 + for (i = 0; i < host_set->n_ports; i++) { 5079 + struct ata_port *ap = host_set->ports[i]; 5058 5080 5059 - ap->flags &= ~ATA_FLAG_SUSPENDED; 5060 - while (ata_set_mode(ap, &failed_dev)) 5061 - ata_dev_disable(failed_dev); 5081 + for (j = 0; j < ATA_MAX_DEVICES; j++) { 5082 + struct ata_device *dev = &ap->device[j]; 5083 + 5084 + if (ata_dev_ready(dev)) { 5085 + ata_port_printk(ap, KERN_WARNING, 5086 + "suspend failed, device %d " 5087 + "still active\n", dev->devno); 5088 + rc = -EBUSY; 5089 + goto fail; 5090 + } 5091 + } 5062 5092 } 5063 - if (!ata_dev_enabled(dev)) 5064 - return 0; 5065 - if (dev->class == ATA_DEV_ATA) 5066 - ata_start_drive(dev); 5067 5093 5094 + host_set->dev->power.power_state = mesg; 5068 5095 return 0; 5096 + 5097 + fail: 5098 + ata_host_set_resume(host_set); 5099 + return rc; 5069 5100 } 5070 5101 5071 5102 /** 5072 - * ata_device_suspend - prepare a device for suspend 5073 - * @dev: the device to suspend 5074 - * @state: target power management state 5103 + * ata_host_set_resume - resume host_set 5104 + * @host_set: host_set to resume 5075 5105 * 5076 - * Flush the cache on the drive, if appropriate, then issue a 5077 - * standbynow command. 5106 + * Resume @host_set. Actual operation is performed by EH. This 5107 + * function requests EH to perform PM operations and returns. 5108 + * Note that all resume operations are performed parallely. 5109 + * 5110 + * LOCKING: 5111 + * Kernel thread context (may sleep). 5078 5112 */ 5079 - int ata_device_suspend(struct ata_device *dev, pm_message_t state) 5113 + void ata_host_set_resume(struct ata_host_set *host_set) 5080 5114 { 5081 - struct ata_port *ap = dev->ap; 5082 - 5083 - if (!ata_dev_enabled(dev)) 5084 - return 0; 5085 - if (dev->class == ATA_DEV_ATA) 5086 - ata_flush_cache(dev); 5087 - 5088 - if (state.event != PM_EVENT_FREEZE) 5089 - ata_standby_drive(dev); 5090 - ap->flags |= ATA_FLAG_SUSPENDED; 5091 - return 0; 5115 + ata_host_set_request_pm(host_set, PMSG_ON, ATA_EH_SOFTRESET, 5116 + ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); 5117 + host_set->dev->power.power_state = PMSG_ON; 5092 5118 } 5093 5119 5094 5120 /** ··· 5482 5440 } 5483 5441 5484 5442 if (ap->ops->error_handler) { 5443 + struct ata_eh_info *ehi = &ap->eh_info; 5485 5444 unsigned long flags; 5486 5445 5487 5446 ata_port_probe(ap); ··· 5490 5447 /* kick EH for boot probing */ 5491 5448 spin_lock_irqsave(ap->lock, flags); 5492 5449 5493 - ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1; 5494 - ap->eh_info.action |= ATA_EH_SOFTRESET; 5450 + ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1; 5451 + ehi->action |= ATA_EH_SOFTRESET; 5452 + ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 5495 5453 5496 - ap->flags |= ATA_FLAG_LOADING; 5454 + ap->pflags |= ATA_PFLAG_LOADING; 5497 5455 ata_port_schedule_eh(ap); 5498 5456 5499 5457 spin_unlock_irqrestore(ap->lock, flags); ··· 5562 5518 5563 5519 /* tell EH we're leaving & flush EH */ 5564 5520 spin_lock_irqsave(ap->lock, flags); 5565 - ap->flags |= ATA_FLAG_UNLOADING; 5521 + ap->pflags |= ATA_PFLAG_UNLOADING; 5566 5522 spin_unlock_irqrestore(ap->lock, flags); 5567 5523 5568 5524 ata_port_wait_eh(ap); ··· 5767 5723 return (tmp == bits->val) ? 1 : 0; 5768 5724 } 5769 5725 5770 - int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state) 5726 + void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t state) 5771 5727 { 5772 5728 pci_save_state(pdev); 5773 - pci_disable_device(pdev); 5774 - pci_set_power_state(pdev, PCI_D3hot); 5775 - return 0; 5729 + 5730 + if (state.event == PM_EVENT_SUSPEND) { 5731 + pci_disable_device(pdev); 5732 + pci_set_power_state(pdev, PCI_D3hot); 5733 + } 5776 5734 } 5777 5735 5778 - int ata_pci_device_resume(struct pci_dev *pdev) 5736 + void ata_pci_device_do_resume(struct pci_dev *pdev) 5779 5737 { 5780 5738 pci_set_power_state(pdev, PCI_D0); 5781 5739 pci_restore_state(pdev); 5782 5740 pci_enable_device(pdev); 5783 5741 pci_set_master(pdev); 5742 + } 5743 + 5744 + int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state) 5745 + { 5746 + struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev); 5747 + int rc = 0; 5748 + 5749 + rc = ata_host_set_suspend(host_set, state); 5750 + if (rc) 5751 + return rc; 5752 + 5753 + if (host_set->next) { 5754 + rc = ata_host_set_suspend(host_set->next, state); 5755 + if (rc) { 5756 + ata_host_set_resume(host_set); 5757 + return rc; 5758 + } 5759 + } 5760 + 5761 + ata_pci_device_do_suspend(pdev, state); 5762 + 5763 + return 0; 5764 + } 5765 + 5766 + int ata_pci_device_resume(struct pci_dev *pdev) 5767 + { 5768 + struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev); 5769 + 5770 + ata_pci_device_do_resume(pdev); 5771 + ata_host_set_resume(host_set); 5772 + if (host_set->next) 5773 + ata_host_set_resume(host_set->next); 5774 + 5784 5775 return 0; 5785 5776 } 5786 5777 #endif /* CONFIG_PCI */ ··· 5921 5842 * Do not depend on ABI/API stability. 5922 5843 */ 5923 5844 5924 - EXPORT_SYMBOL_GPL(sata_deb_timing_boot); 5925 - EXPORT_SYMBOL_GPL(sata_deb_timing_eh); 5926 - EXPORT_SYMBOL_GPL(sata_deb_timing_before_fsrst); 5845 + EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 5846 + EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 5847 + EXPORT_SYMBOL_GPL(sata_deb_timing_long); 5927 5848 EXPORT_SYMBOL_GPL(ata_std_bios_param); 5928 5849 EXPORT_SYMBOL_GPL(ata_std_ports); 5929 5850 EXPORT_SYMBOL_GPL(ata_device_add); ··· 5995 5916 EXPORT_SYMBOL_GPL(sata_scr_write_flush); 5996 5917 EXPORT_SYMBOL_GPL(ata_port_online); 5997 5918 EXPORT_SYMBOL_GPL(ata_port_offline); 5919 + EXPORT_SYMBOL_GPL(ata_host_set_suspend); 5920 + EXPORT_SYMBOL_GPL(ata_host_set_resume); 5998 5921 EXPORT_SYMBOL_GPL(ata_id_string); 5999 5922 EXPORT_SYMBOL_GPL(ata_id_c_string); 6000 5923 EXPORT_SYMBOL_GPL(ata_scsi_simulate); ··· 6011 5930 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); 6012 5931 EXPORT_SYMBOL_GPL(ata_pci_init_one); 6013 5932 EXPORT_SYMBOL_GPL(ata_pci_remove_one); 5933 + EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); 5934 + EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); 6014 5935 EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 6015 5936 EXPORT_SYMBOL_GPL(ata_pci_device_resume); 6016 5937 EXPORT_SYMBOL_GPL(ata_pci_default_filter); 6017 5938 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex); 6018 5939 #endif /* CONFIG_PCI */ 6019 5940 6020 - EXPORT_SYMBOL_GPL(ata_device_suspend); 6021 - EXPORT_SYMBOL_GPL(ata_device_resume); 6022 5941 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend); 6023 5942 EXPORT_SYMBOL_GPL(ata_scsi_device_resume); 6024 5943
+357 -48
drivers/scsi/libata-eh.c
··· 47 47 48 48 static void __ata_port_freeze(struct ata_port *ap); 49 49 static void ata_eh_finish(struct ata_port *ap); 50 + static void ata_eh_handle_port_suspend(struct ata_port *ap); 51 + static void ata_eh_handle_port_resume(struct ata_port *ap); 50 52 51 53 static void ata_ering_record(struct ata_ering *ering, int is_io, 52 54 unsigned int err_mask) ··· 192 190 void ata_scsi_error(struct Scsi_Host *host) 193 191 { 194 192 struct ata_port *ap = ata_shost_to_port(host); 195 - spinlock_t *ap_lock = ap->lock; 196 193 int i, repeat_cnt = ATA_EH_MAX_REPEAT; 197 194 unsigned long flags; 198 195 ··· 218 217 struct scsi_cmnd *scmd, *tmp; 219 218 int nr_timedout = 0; 220 219 221 - spin_lock_irqsave(ap_lock, flags); 220 + spin_lock_irqsave(ap->lock, flags); 222 221 223 222 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { 224 223 struct ata_queued_cmd *qc; ··· 257 256 if (nr_timedout) 258 257 __ata_port_freeze(ap); 259 258 260 - spin_unlock_irqrestore(ap_lock, flags); 259 + spin_unlock_irqrestore(ap->lock, flags); 261 260 } else 262 - spin_unlock_wait(ap_lock); 261 + spin_unlock_wait(ap->lock); 263 262 264 263 repeat: 265 264 /* invoke error handler */ 266 265 if (ap->ops->error_handler) { 266 + /* process port resume request */ 267 + ata_eh_handle_port_resume(ap); 268 + 267 269 /* fetch & clear EH info */ 268 - spin_lock_irqsave(ap_lock, flags); 270 + spin_lock_irqsave(ap->lock, flags); 269 271 270 272 memset(&ap->eh_context, 0, sizeof(ap->eh_context)); 271 273 ap->eh_context.i = ap->eh_info; 272 274 memset(&ap->eh_info, 0, sizeof(ap->eh_info)); 273 275 274 - ap->flags |= ATA_FLAG_EH_IN_PROGRESS; 275 - ap->flags &= ~ATA_FLAG_EH_PENDING; 276 + ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 277 + ap->pflags &= ~ATA_PFLAG_EH_PENDING; 276 278 277 - spin_unlock_irqrestore(ap_lock, flags); 279 + spin_unlock_irqrestore(ap->lock, flags); 278 280 279 - /* invoke EH. if unloading, just finish failed qcs */ 280 - if (!(ap->flags & ATA_FLAG_UNLOADING)) 281 + /* invoke EH, skip if unloading or suspended */ 282 + if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 281 283 ap->ops->error_handler(ap); 282 284 else 283 285 ata_eh_finish(ap); 286 + 287 + /* process port suspend request */ 288 + ata_eh_handle_port_suspend(ap); 284 289 285 290 /* Exception might have happend after ->error_handler 286 291 * recovered the port but before this point. Repeat 287 292 * EH in such case. 288 293 */ 289 - spin_lock_irqsave(ap_lock, flags); 294 + spin_lock_irqsave(ap->lock, flags); 290 295 291 - if (ap->flags & ATA_FLAG_EH_PENDING) { 296 + if (ap->pflags & ATA_PFLAG_EH_PENDING) { 292 297 if (--repeat_cnt) { 293 298 ata_port_printk(ap, KERN_INFO, 294 299 "EH pending after completion, " 295 300 "repeating EH (cnt=%d)\n", repeat_cnt); 296 - spin_unlock_irqrestore(ap_lock, flags); 301 + spin_unlock_irqrestore(ap->lock, flags); 297 302 goto repeat; 298 303 } 299 304 ata_port_printk(ap, KERN_ERR, "EH pending after %d " ··· 309 302 /* this run is complete, make sure EH info is clear */ 310 303 memset(&ap->eh_info, 0, sizeof(ap->eh_info)); 311 304 312 - /* Clear host_eh_scheduled while holding ap_lock such 305 + /* Clear host_eh_scheduled while holding ap->lock such 313 306 * that if exception occurs after this point but 314 307 * before EH completion, SCSI midlayer will 315 308 * re-initiate EH. 316 309 */ 317 310 host->host_eh_scheduled = 0; 318 311 319 - spin_unlock_irqrestore(ap_lock, flags); 312 + spin_unlock_irqrestore(ap->lock, flags); 320 313 } else { 321 314 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL); 322 315 ap->ops->eng_timeout(ap); ··· 328 321 scsi_eh_flush_done_q(&ap->eh_done_q); 329 322 330 323 /* clean up */ 331 - spin_lock_irqsave(ap_lock, flags); 324 + spin_lock_irqsave(ap->lock, flags); 332 325 333 - if (ap->flags & ATA_FLAG_LOADING) { 334 - ap->flags &= ~ATA_FLAG_LOADING; 335 - } else { 336 - if (ap->flags & ATA_FLAG_SCSI_HOTPLUG) 337 - queue_work(ata_aux_wq, &ap->hotplug_task); 338 - if (ap->flags & ATA_FLAG_RECOVERED) 339 - ata_port_printk(ap, KERN_INFO, "EH complete\n"); 340 - } 326 + if (ap->pflags & ATA_PFLAG_LOADING) 327 + ap->pflags &= ~ATA_PFLAG_LOADING; 328 + else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 329 + queue_work(ata_aux_wq, &ap->hotplug_task); 341 330 342 - ap->flags &= ~(ATA_FLAG_SCSI_HOTPLUG | ATA_FLAG_RECOVERED); 331 + if (ap->pflags & ATA_PFLAG_RECOVERED) 332 + ata_port_printk(ap, KERN_INFO, "EH complete\n"); 333 + 334 + ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 343 335 344 336 /* tell wait_eh that we're done */ 345 - ap->flags &= ~ATA_FLAG_EH_IN_PROGRESS; 337 + ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 346 338 wake_up_all(&ap->eh_wait_q); 347 339 348 - spin_unlock_irqrestore(ap_lock, flags); 340 + spin_unlock_irqrestore(ap->lock, flags); 349 341 350 342 DPRINTK("EXIT\n"); 351 343 } ··· 366 360 retry: 367 361 spin_lock_irqsave(ap->lock, flags); 368 362 369 - while (ap->flags & (ATA_FLAG_EH_PENDING | ATA_FLAG_EH_IN_PROGRESS)) { 363 + while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 370 364 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 371 365 spin_unlock_irqrestore(ap->lock, flags); 372 366 schedule(); ··· 495 489 WARN_ON(!ap->ops->error_handler); 496 490 497 491 qc->flags |= ATA_QCFLAG_FAILED; 498 - qc->ap->flags |= ATA_FLAG_EH_PENDING; 492 + qc->ap->pflags |= ATA_PFLAG_EH_PENDING; 499 493 500 494 /* The following will fail if timeout has already expired. 501 495 * ata_scsi_error() takes care of such scmds on EH entry. ··· 519 513 { 520 514 WARN_ON(!ap->ops->error_handler); 521 515 522 - ap->flags |= ATA_FLAG_EH_PENDING; 516 + ap->pflags |= ATA_PFLAG_EH_PENDING; 523 517 scsi_schedule_eh(ap->host); 524 518 525 519 DPRINTK("port EH scheduled\n"); ··· 584 578 if (ap->ops->freeze) 585 579 ap->ops->freeze(ap); 586 580 587 - ap->flags |= ATA_FLAG_FROZEN; 581 + ap->pflags |= ATA_PFLAG_FROZEN; 588 582 589 583 DPRINTK("ata%u port frozen\n", ap->id); 590 584 } ··· 652 646 653 647 spin_lock_irqsave(ap->lock, flags); 654 648 655 - ap->flags &= ~ATA_FLAG_FROZEN; 649 + ap->pflags &= ~ATA_PFLAG_FROZEN; 656 650 657 651 if (ap->ops->thaw) 658 652 ap->ops->thaw(ap); ··· 737 731 738 732 if (ata_scsi_offline_dev(dev)) { 739 733 dev->flags |= ATA_DFLAG_DETACHED; 740 - ap->flags |= ATA_FLAG_SCSI_HOTPLUG; 734 + ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 741 735 } 742 736 743 737 /* clear per-dev EH actions */ ··· 766 760 unsigned long flags; 767 761 768 762 spin_lock_irqsave(ap->lock, flags); 763 + 769 764 ata_eh_clear_action(dev, &ap->eh_info, action); 770 - ap->flags |= ATA_FLAG_RECOVERED; 765 + 766 + if (!(ap->eh_context.i.flags & ATA_EHI_QUIET)) 767 + ap->pflags |= ATA_PFLAG_RECOVERED; 768 + 771 769 spin_unlock_irqrestore(ap->lock, flags); 772 770 } 773 771 ··· 1037 1027 int tag, rc; 1038 1028 1039 1029 /* if frozen, we can't do much */ 1040 - if (ap->flags & ATA_FLAG_FROZEN) 1030 + if (ap->pflags & ATA_PFLAG_FROZEN) 1041 1031 return; 1042 1032 1043 1033 /* is it NCQ device error? */ ··· 1285 1275 1286 1276 DPRINTK("ENTER\n"); 1287 1277 1278 + if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 1279 + return; 1280 + 1288 1281 /* obtain and analyze SError */ 1289 1282 rc = sata_scr_read(ap, SCR_ERROR, &serror); 1290 1283 if (rc == 0) { ··· 1340 1327 } 1341 1328 1342 1329 /* enforce default EH actions */ 1343 - if (ap->flags & ATA_FLAG_FROZEN || 1330 + if (ap->pflags & ATA_PFLAG_FROZEN || 1344 1331 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 1345 1332 action |= ATA_EH_SOFTRESET; 1346 1333 else if (all_err_mask) ··· 1359 1346 1360 1347 /* record autopsy result */ 1361 1348 ehc->i.dev = failed_dev; 1362 - ehc->i.action = action; 1349 + ehc->i.action |= action; 1363 1350 1364 1351 DPRINTK("EXIT\n"); 1365 1352 } ··· 1398 1385 return; 1399 1386 1400 1387 frozen = ""; 1401 - if (ap->flags & ATA_FLAG_FROZEN) 1388 + if (ap->pflags & ATA_PFLAG_FROZEN) 1402 1389 frozen = " frozen"; 1403 1390 1404 1391 if (ehc->i.dev) { ··· 1478 1465 struct ata_eh_context *ehc = &ap->eh_context; 1479 1466 unsigned int *classes = ehc->classes; 1480 1467 int tries = ATA_EH_RESET_TRIES; 1481 - int verbose = !(ap->flags & ATA_FLAG_LOADING); 1468 + int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 1482 1469 unsigned int action; 1483 1470 ata_reset_fn_t reset; 1484 1471 int i, did_followup_srst, rc; ··· 1618 1605 dev = &ap->device[i]; 1619 1606 action = ata_eh_dev_action(dev); 1620 1607 1621 - if (action & ATA_EH_REVALIDATE && ata_dev_enabled(dev)) { 1608 + if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) { 1622 1609 if (ata_port_offline(ap)) { 1623 1610 rc = -EIO; 1624 1611 break; ··· 1649 1636 } 1650 1637 1651 1638 spin_lock_irqsave(ap->lock, flags); 1652 - ap->flags |= ATA_FLAG_SCSI_HOTPLUG; 1639 + ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1653 1640 spin_unlock_irqrestore(ap->lock, flags); 1654 1641 } 1655 1642 } ··· 1659 1646 1660 1647 DPRINTK("EXIT\n"); 1661 1648 return rc; 1649 + } 1650 + 1651 + /** 1652 + * ata_eh_suspend - handle suspend EH action 1653 + * @ap: target host port 1654 + * @r_failed_dev: result parameter to indicate failing device 1655 + * 1656 + * Handle suspend EH action. Disk devices are spinned down and 1657 + * other types of devices are just marked suspended. Once 1658 + * suspended, no EH action to the device is allowed until it is 1659 + * resumed. 1660 + * 1661 + * LOCKING: 1662 + * Kernel thread context (may sleep). 1663 + * 1664 + * RETURNS: 1665 + * 0 on success, -errno otherwise 1666 + */ 1667 + static int ata_eh_suspend(struct ata_port *ap, struct ata_device **r_failed_dev) 1668 + { 1669 + struct ata_device *dev; 1670 + int i, rc = 0; 1671 + 1672 + DPRINTK("ENTER\n"); 1673 + 1674 + for (i = 0; i < ATA_MAX_DEVICES; i++) { 1675 + unsigned long flags; 1676 + unsigned int action, err_mask; 1677 + 1678 + dev = &ap->device[i]; 1679 + action = ata_eh_dev_action(dev); 1680 + 1681 + if (!ata_dev_enabled(dev) || !(action & ATA_EH_SUSPEND)) 1682 + continue; 1683 + 1684 + WARN_ON(dev->flags & ATA_DFLAG_SUSPENDED); 1685 + 1686 + ata_eh_about_to_do(ap, dev, ATA_EH_SUSPEND); 1687 + 1688 + if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) { 1689 + /* flush cache */ 1690 + rc = ata_flush_cache(dev); 1691 + if (rc) 1692 + break; 1693 + 1694 + /* spin down */ 1695 + err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1); 1696 + if (err_mask) { 1697 + ata_dev_printk(dev, KERN_ERR, "failed to " 1698 + "spin down (err_mask=0x%x)\n", 1699 + err_mask); 1700 + rc = -EIO; 1701 + break; 1702 + } 1703 + } 1704 + 1705 + spin_lock_irqsave(ap->lock, flags); 1706 + dev->flags |= ATA_DFLAG_SUSPENDED; 1707 + spin_unlock_irqrestore(ap->lock, flags); 1708 + 1709 + ata_eh_done(ap, dev, ATA_EH_SUSPEND); 1710 + } 1711 + 1712 + if (rc) 1713 + *r_failed_dev = dev; 1714 + 1715 + DPRINTK("EXIT\n"); 1716 + return 0; 1717 + } 1718 + 1719 + /** 1720 + * ata_eh_prep_resume - prep for resume EH action 1721 + * @ap: target host port 1722 + * 1723 + * Clear SUSPENDED in preparation for scheduled resume actions. 1724 + * This allows other parts of EH to access the devices being 1725 + * resumed. 1726 + * 1727 + * LOCKING: 1728 + * Kernel thread context (may sleep). 1729 + */ 1730 + static void ata_eh_prep_resume(struct ata_port *ap) 1731 + { 1732 + struct ata_device *dev; 1733 + unsigned long flags; 1734 + int i; 1735 + 1736 + DPRINTK("ENTER\n"); 1737 + 1738 + for (i = 0; i < ATA_MAX_DEVICES; i++) { 1739 + unsigned int action; 1740 + 1741 + dev = &ap->device[i]; 1742 + action = ata_eh_dev_action(dev); 1743 + 1744 + if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME)) 1745 + continue; 1746 + 1747 + spin_lock_irqsave(ap->lock, flags); 1748 + dev->flags &= ~ATA_DFLAG_SUSPENDED; 1749 + spin_unlock_irqrestore(ap->lock, flags); 1750 + } 1751 + 1752 + DPRINTK("EXIT\n"); 1753 + } 1754 + 1755 + /** 1756 + * ata_eh_resume - handle resume EH action 1757 + * @ap: target host port 1758 + * @r_failed_dev: result parameter to indicate failing device 1759 + * 1760 + * Handle resume EH action. Target devices are already reset and 1761 + * revalidated. Spinning up is the only operation left. 1762 + * 1763 + * LOCKING: 1764 + * Kernel thread context (may sleep). 1765 + * 1766 + * RETURNS: 1767 + * 0 on success, -errno otherwise 1768 + */ 1769 + static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev) 1770 + { 1771 + struct ata_device *dev; 1772 + int i, rc = 0; 1773 + 1774 + DPRINTK("ENTER\n"); 1775 + 1776 + for (i = 0; i < ATA_MAX_DEVICES; i++) { 1777 + unsigned int action, err_mask; 1778 + 1779 + dev = &ap->device[i]; 1780 + action = ata_eh_dev_action(dev); 1781 + 1782 + if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME)) 1783 + continue; 1784 + 1785 + ata_eh_about_to_do(ap, dev, ATA_EH_RESUME); 1786 + 1787 + if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) { 1788 + err_mask = ata_do_simple_cmd(dev, 1789 + ATA_CMD_IDLEIMMEDIATE); 1790 + if (err_mask) { 1791 + ata_dev_printk(dev, KERN_ERR, "failed to " 1792 + "spin up (err_mask=0x%x)\n", 1793 + err_mask); 1794 + rc = -EIO; 1795 + break; 1796 + } 1797 + } 1798 + 1799 + ata_eh_done(ap, dev, ATA_EH_RESUME); 1800 + } 1801 + 1802 + if (rc) 1803 + *r_failed_dev = dev; 1804 + 1805 + DPRINTK("EXIT\n"); 1806 + return 0; 1662 1807 } 1663 1808 1664 1809 static int ata_port_nr_enabled(struct ata_port *ap) ··· 1844 1673 struct ata_eh_context *ehc = &ap->eh_context; 1845 1674 int i; 1846 1675 1847 - if (ap->flags & ATA_FLAG_FROZEN || ata_port_nr_enabled(ap)) 1676 + /* skip if all possible devices are suspended */ 1677 + for (i = 0; i < ata_port_max_devices(ap); i++) { 1678 + struct ata_device *dev = &ap->device[i]; 1679 + 1680 + if (ata_dev_absent(dev) || ata_dev_ready(dev)) 1681 + break; 1682 + } 1683 + 1684 + if (i == ata_port_max_devices(ap)) 1685 + return 1; 1686 + 1687 + /* always thaw frozen port and recover failed devices */ 1688 + if (ap->pflags & ATA_PFLAG_FROZEN || ata_port_nr_enabled(ap)) 1848 1689 return 0; 1849 1690 1850 1691 /* skip if class codes for all vacant slots are ATA_DEV_NONE */ ··· 1927 1744 rc = 0; 1928 1745 1929 1746 /* if UNLOADING, finish immediately */ 1930 - if (ap->flags & ATA_FLAG_UNLOADING) 1747 + if (ap->pflags & ATA_PFLAG_UNLOADING) 1931 1748 goto out; 1749 + 1750 + /* prep for resume */ 1751 + ata_eh_prep_resume(ap); 1932 1752 1933 1753 /* skip EH if possible. */ 1934 1754 if (ata_eh_skip_recovery(ap)) ··· 1960 1774 if (rc) 1961 1775 goto dev_fail; 1962 1776 1777 + /* resume devices */ 1778 + rc = ata_eh_resume(ap, &dev); 1779 + if (rc) 1780 + goto dev_fail; 1781 + 1963 1782 /* configure transfer mode if the port has been reset */ 1964 1783 if (ehc->i.flags & ATA_EHI_DID_RESET) { 1965 1784 rc = ata_set_mode(ap, &dev); ··· 1973 1782 goto dev_fail; 1974 1783 } 1975 1784 } 1785 + 1786 + /* suspend devices */ 1787 + rc = ata_eh_suspend(ap, &dev); 1788 + if (rc) 1789 + goto dev_fail; 1976 1790 1977 1791 goto out; 1978 1792 ··· 2104 1908 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 2105 1909 ata_postreset_fn_t postreset) 2106 1910 { 2107 - if (!(ap->flags & ATA_FLAG_LOADING)) { 2108 - ata_eh_autopsy(ap); 2109 - ata_eh_report(ap); 2110 - } 2111 - 1911 + ata_eh_autopsy(ap); 1912 + ata_eh_report(ap); 2112 1913 ata_eh_recover(ap, prereset, softreset, hardreset, postreset); 2113 1914 ata_eh_finish(ap); 1915 + } 1916 + 1917 + /** 1918 + * ata_eh_handle_port_suspend - perform port suspend operation 1919 + * @ap: port to suspend 1920 + * 1921 + * Suspend @ap. 1922 + * 1923 + * LOCKING: 1924 + * Kernel thread context (may sleep). 1925 + */ 1926 + static void ata_eh_handle_port_suspend(struct ata_port *ap) 1927 + { 1928 + unsigned long flags; 1929 + int rc = 0; 1930 + 1931 + /* are we suspending? */ 1932 + spin_lock_irqsave(ap->lock, flags); 1933 + if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 1934 + ap->pm_mesg.event == PM_EVENT_ON) { 1935 + spin_unlock_irqrestore(ap->lock, flags); 1936 + return; 1937 + } 1938 + spin_unlock_irqrestore(ap->lock, flags); 1939 + 1940 + WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 1941 + 1942 + /* suspend */ 1943 + ata_eh_freeze_port(ap); 1944 + 1945 + if (ap->ops->port_suspend) 1946 + rc = ap->ops->port_suspend(ap, ap->pm_mesg); 1947 + 1948 + /* report result */ 1949 + spin_lock_irqsave(ap->lock, flags); 1950 + 1951 + ap->pflags &= ~ATA_PFLAG_PM_PENDING; 1952 + if (rc == 0) 1953 + ap->pflags |= ATA_PFLAG_SUSPENDED; 1954 + else 1955 + ata_port_schedule_eh(ap); 1956 + 1957 + if (ap->pm_result) { 1958 + *ap->pm_result = rc; 1959 + ap->pm_result = NULL; 1960 + } 1961 + 1962 + spin_unlock_irqrestore(ap->lock, flags); 1963 + 1964 + return; 1965 + } 1966 + 1967 + /** 1968 + * ata_eh_handle_port_resume - perform port resume operation 1969 + * @ap: port to resume 1970 + * 1971 + * Resume @ap. 1972 + * 1973 + * This function also waits upto one second until all devices 1974 + * hanging off this port requests resume EH action. This is to 1975 + * prevent invoking EH and thus reset multiple times on resume. 1976 + * 1977 + * On DPM resume, where some of devices might not be resumed 1978 + * together, this may delay port resume upto one second, but such 1979 + * DPM resumes are rare and 1 sec delay isn't too bad. 1980 + * 1981 + * LOCKING: 1982 + * Kernel thread context (may sleep). 1983 + */ 1984 + static void ata_eh_handle_port_resume(struct ata_port *ap) 1985 + { 1986 + unsigned long timeout; 1987 + unsigned long flags; 1988 + int i, rc = 0; 1989 + 1990 + /* are we resuming? */ 1991 + spin_lock_irqsave(ap->lock, flags); 1992 + if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 1993 + ap->pm_mesg.event != PM_EVENT_ON) { 1994 + spin_unlock_irqrestore(ap->lock, flags); 1995 + return; 1996 + } 1997 + spin_unlock_irqrestore(ap->lock, flags); 1998 + 1999 + /* spurious? */ 2000 + if (!(ap->pflags & ATA_PFLAG_SUSPENDED)) 2001 + goto done; 2002 + 2003 + if (ap->ops->port_resume) 2004 + rc = ap->ops->port_resume(ap); 2005 + 2006 + /* give devices time to request EH */ 2007 + timeout = jiffies + HZ; /* 1s max */ 2008 + while (1) { 2009 + for (i = 0; i < ATA_MAX_DEVICES; i++) { 2010 + struct ata_device *dev = &ap->device[i]; 2011 + unsigned int action = ata_eh_dev_action(dev); 2012 + 2013 + if ((dev->flags & ATA_DFLAG_SUSPENDED) && 2014 + !(action & ATA_EH_RESUME)) 2015 + break; 2016 + } 2017 + 2018 + if (i == ATA_MAX_DEVICES || time_after(jiffies, timeout)) 2019 + break; 2020 + msleep(10); 2021 + } 2022 + 2023 + done: 2024 + spin_lock_irqsave(ap->lock, flags); 2025 + ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 2026 + if (ap->pm_result) { 2027 + *ap->pm_result = rc; 2028 + ap->pm_result = NULL; 2029 + } 2030 + spin_unlock_irqrestore(ap->lock, flags); 2114 2031 }
+121 -11
drivers/scsi/libata-scsi.c
··· 397 397 } 398 398 } 399 399 400 - int ata_scsi_device_resume(struct scsi_device *sdev) 401 - { 402 - struct ata_port *ap = ata_shost_to_port(sdev->host); 403 - struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); 404 - 405 - return ata_device_resume(dev); 406 - } 407 - 400 + /** 401 + * ata_scsi_device_suspend - suspend ATA device associated with sdev 402 + * @sdev: the SCSI device to suspend 403 + * @state: target power management state 404 + * 405 + * Request suspend EH action on the ATA device associated with 406 + * @sdev and wait for the operation to complete. 407 + * 408 + * LOCKING: 409 + * Kernel thread context (may sleep). 410 + * 411 + * RETURNS: 412 + * 0 on success, -errno otherwise. 413 + */ 408 414 int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state) 409 415 { 410 416 struct ata_port *ap = ata_shost_to_port(sdev->host); 411 - struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); 417 + struct ata_device *dev = ata_scsi_find_dev(ap, sdev); 418 + unsigned long flags; 419 + unsigned int action; 420 + int rc = 0; 412 421 413 - return ata_device_suspend(dev, state); 422 + if (!dev) 423 + goto out; 424 + 425 + spin_lock_irqsave(ap->lock, flags); 426 + 427 + /* wait for the previous resume to complete */ 428 + while (dev->flags & ATA_DFLAG_SUSPENDED) { 429 + spin_unlock_irqrestore(ap->lock, flags); 430 + ata_port_wait_eh(ap); 431 + spin_lock_irqsave(ap->lock, flags); 432 + } 433 + 434 + /* if @sdev is already detached, nothing to do */ 435 + if (sdev->sdev_state == SDEV_OFFLINE || 436 + sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL) 437 + goto out_unlock; 438 + 439 + /* request suspend */ 440 + action = ATA_EH_SUSPEND; 441 + if (state.event != PM_EVENT_SUSPEND) 442 + action |= ATA_EH_PM_FREEZE; 443 + ap->eh_info.dev_action[dev->devno] |= action; 444 + ap->eh_info.flags |= ATA_EHI_QUIET; 445 + ata_port_schedule_eh(ap); 446 + 447 + spin_unlock_irqrestore(ap->lock, flags); 448 + 449 + /* wait for EH to do the job */ 450 + ata_port_wait_eh(ap); 451 + 452 + spin_lock_irqsave(ap->lock, flags); 453 + 454 + /* If @sdev is still attached but the associated ATA device 455 + * isn't suspended, the operation failed. 456 + */ 457 + if (sdev->sdev_state != SDEV_OFFLINE && 458 + sdev->sdev_state != SDEV_CANCEL && sdev->sdev_state != SDEV_DEL && 459 + !(dev->flags & ATA_DFLAG_SUSPENDED)) 460 + rc = -EIO; 461 + 462 + out_unlock: 463 + spin_unlock_irqrestore(ap->lock, flags); 464 + out: 465 + if (rc == 0) 466 + sdev->sdev_gendev.power.power_state = state; 467 + return rc; 468 + } 469 + 470 + /** 471 + * ata_scsi_device_resume - resume ATA device associated with sdev 472 + * @sdev: the SCSI device to resume 473 + * 474 + * Request resume EH action on the ATA device associated with 475 + * @sdev and return immediately. This enables parallel 476 + * wakeup/spinup of devices. 477 + * 478 + * LOCKING: 479 + * Kernel thread context (may sleep). 480 + * 481 + * RETURNS: 482 + * 0. 483 + */ 484 + int ata_scsi_device_resume(struct scsi_device *sdev) 485 + { 486 + struct ata_port *ap = ata_shost_to_port(sdev->host); 487 + struct ata_device *dev = ata_scsi_find_dev(ap, sdev); 488 + struct ata_eh_info *ehi = &ap->eh_info; 489 + unsigned long flags; 490 + unsigned int action; 491 + 492 + if (!dev) 493 + goto out; 494 + 495 + spin_lock_irqsave(ap->lock, flags); 496 + 497 + /* if @sdev is already detached, nothing to do */ 498 + if (sdev->sdev_state == SDEV_OFFLINE || 499 + sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL) 500 + goto out_unlock; 501 + 502 + /* request resume */ 503 + action = ATA_EH_RESUME; 504 + if (sdev->sdev_gendev.power.power_state.event == PM_EVENT_SUSPEND) 505 + __ata_ehi_hotplugged(ehi); 506 + else 507 + action |= ATA_EH_PM_FREEZE | ATA_EH_SOFTRESET; 508 + ehi->dev_action[dev->devno] |= action; 509 + 510 + /* We don't want autopsy and verbose EH messages. Disable 511 + * those if we're the only device on this link. 512 + */ 513 + if (ata_port_max_devices(ap) == 1) 514 + ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 515 + 516 + ata_port_schedule_eh(ap); 517 + 518 + out_unlock: 519 + spin_unlock_irqrestore(ap->lock, flags); 520 + out: 521 + sdev->sdev_gendev.power.power_state = PMSG_ON; 522 + return 0; 414 523 } 415 524 416 525 /** ··· 3039 2930 struct ata_port *ap = data; 3040 2931 int i; 3041 2932 3042 - if (ap->flags & ATA_FLAG_UNLOADING) { 2933 + if (ap->pflags & ATA_PFLAG_UNLOADING) { 3043 2934 DPRINTK("ENTER/EXIT - unloading\n"); 3044 2935 return; 3045 2936 } ··· 3120 3011 if (dev) { 3121 3012 ap->eh_info.probe_mask |= 1 << dev->devno; 3122 3013 ap->eh_info.action |= ATA_EH_SOFTRESET; 3014 + ap->eh_info.flags |= ATA_EHI_RESUME_LINK; 3123 3015 } else 3124 3016 rc = -EINVAL; 3125 3017 }
+66 -39
drivers/scsi/sata_sil.c
··· 109 109 }; 110 110 111 111 static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 112 + static int sil_pci_device_resume(struct pci_dev *pdev); 112 113 static void sil_dev_config(struct ata_port *ap, struct ata_device *dev); 113 114 static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg); 114 115 static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); ··· 161 160 .id_table = sil_pci_tbl, 162 161 .probe = sil_init_one, 163 162 .remove = ata_pci_remove_one, 163 + .suspend = ata_pci_device_suspend, 164 + .resume = sil_pci_device_resume, 164 165 }; 165 166 166 167 static struct scsi_host_template sil_sht = { ··· 181 178 .slave_configure = ata_scsi_slave_config, 182 179 .slave_destroy = ata_scsi_slave_destroy, 183 180 .bios_param = ata_std_bios_param, 181 + .suspend = ata_scsi_device_suspend, 182 + .resume = ata_scsi_device_resume, 184 183 }; 185 184 186 185 static const struct ata_port_operations sil_ops = { ··· 375 370 * during hardreset makes controllers with broken SIEN 376 371 * repeat probing needlessly. 377 372 */ 378 - if (!(ap->flags & ATA_FLAG_FROZEN)) { 373 + if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 379 374 ata_ehi_hotplugged(&ap->eh_info); 380 375 ap->eh_info.serror |= serror; 381 376 } ··· 566 561 } 567 562 } 568 563 564 + static void sil_init_controller(struct pci_dev *pdev, 565 + int n_ports, unsigned long host_flags, 566 + void __iomem *mmio_base) 567 + { 568 + u8 cls; 569 + u32 tmp; 570 + int i; 571 + 572 + /* Initialize FIFO PCI bus arbitration */ 573 + cls = sil_get_device_cache_line(pdev); 574 + if (cls) { 575 + cls >>= 3; 576 + cls++; /* cls = (line_size/8)+1 */ 577 + for (i = 0; i < n_ports; i++) 578 + writew(cls << 8 | cls, 579 + mmio_base + sil_port[i].fifo_cfg); 580 + } else 581 + dev_printk(KERN_WARNING, &pdev->dev, 582 + "cache line size not set. Driver may not function\n"); 583 + 584 + /* Apply R_ERR on DMA activate FIS errata workaround */ 585 + if (host_flags & SIL_FLAG_RERR_ON_DMA_ACT) { 586 + int cnt; 587 + 588 + for (i = 0, cnt = 0; i < n_ports; i++) { 589 + tmp = readl(mmio_base + sil_port[i].sfis_cfg); 590 + if ((tmp & 0x3) != 0x01) 591 + continue; 592 + if (!cnt) 593 + dev_printk(KERN_INFO, &pdev->dev, 594 + "Applying R_ERR on DMA activate " 595 + "FIS errata fix\n"); 596 + writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg); 597 + cnt++; 598 + } 599 + } 600 + 601 + if (n_ports == 4) { 602 + /* flip the magic "make 4 ports work" bit */ 603 + tmp = readl(mmio_base + sil_port[2].bmdma); 604 + if ((tmp & SIL_INTR_STEERING) == 0) 605 + writel(tmp | SIL_INTR_STEERING, 606 + mmio_base + sil_port[2].bmdma); 607 + } 608 + } 609 + 569 610 static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 570 611 { 571 612 static int printed_version; ··· 621 570 int rc; 622 571 unsigned int i; 623 572 int pci_dev_busy = 0; 624 - u32 tmp; 625 - u8 cls; 626 573 627 574 if (!printed_version++) 628 575 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); ··· 679 630 ata_std_ports(&probe_ent->port[i]); 680 631 } 681 632 682 - /* Initialize FIFO PCI bus arbitration */ 683 - cls = sil_get_device_cache_line(pdev); 684 - if (cls) { 685 - cls >>= 3; 686 - cls++; /* cls = (line_size/8)+1 */ 687 - for (i = 0; i < probe_ent->n_ports; i++) 688 - writew(cls << 8 | cls, 689 - mmio_base + sil_port[i].fifo_cfg); 690 - } else 691 - dev_printk(KERN_WARNING, &pdev->dev, 692 - "cache line size not set. Driver may not function\n"); 693 - 694 - /* Apply R_ERR on DMA activate FIS errata workaround */ 695 - if (probe_ent->host_flags & SIL_FLAG_RERR_ON_DMA_ACT) { 696 - int cnt; 697 - 698 - for (i = 0, cnt = 0; i < probe_ent->n_ports; i++) { 699 - tmp = readl(mmio_base + sil_port[i].sfis_cfg); 700 - if ((tmp & 0x3) != 0x01) 701 - continue; 702 - if (!cnt) 703 - dev_printk(KERN_INFO, &pdev->dev, 704 - "Applying R_ERR on DMA activate " 705 - "FIS errata fix\n"); 706 - writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg); 707 - cnt++; 708 - } 709 - } 710 - 711 - if (ent->driver_data == sil_3114) { 712 - /* flip the magic "make 4 ports work" bit */ 713 - tmp = readl(mmio_base + sil_port[2].bmdma); 714 - if ((tmp & SIL_INTR_STEERING) == 0) 715 - writel(tmp | SIL_INTR_STEERING, 716 - mmio_base + sil_port[2].bmdma); 717 - } 633 + sil_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags, 634 + mmio_base); 718 635 719 636 pci_set_master(pdev); 720 637 ··· 698 683 if (!pci_dev_busy) 699 684 pci_disable_device(pdev); 700 685 return rc; 686 + } 687 + 688 + static int sil_pci_device_resume(struct pci_dev *pdev) 689 + { 690 + struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev); 691 + 692 + ata_pci_device_do_resume(pdev); 693 + sil_init_controller(pdev, host_set->n_ports, host_set->ports[0]->flags, 694 + host_set->mmio_base); 695 + ata_host_set_resume(host_set); 696 + 697 + return 0; 701 698 } 702 699 703 700 static int __init sil_init(void)
+88 -46
drivers/scsi/sata_sil24.c
··· 92 92 HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */ 93 93 HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */ 94 94 HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */ 95 + HOST_CTRL_GLOBAL_RST = (1 << 31), /* global reset */ 95 96 96 97 /* 97 98 * Port registers ··· 339 338 static void sil24_port_stop(struct ata_port *ap); 340 339 static void sil24_host_stop(struct ata_host_set *host_set); 341 340 static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 341 + static int sil24_pci_device_resume(struct pci_dev *pdev); 342 342 343 343 static const struct pci_device_id sil24_pci_tbl[] = { 344 344 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 }, ··· 355 353 .id_table = sil24_pci_tbl, 356 354 .probe = sil24_init_one, 357 355 .remove = ata_pci_remove_one, /* safe? */ 356 + .suspend = ata_pci_device_suspend, 357 + .resume = sil24_pci_device_resume, 358 358 }; 359 359 360 360 static struct scsi_host_template sil24_sht = { ··· 376 372 .slave_configure = ata_scsi_slave_config, 377 373 .slave_destroy = ata_scsi_slave_destroy, 378 374 .bios_param = ata_std_bios_param, 375 + .suspend = ata_scsi_device_suspend, 376 + .resume = ata_scsi_device_resume, 379 377 }; 380 378 381 379 static const struct ata_port_operations sil24_ops = { ··· 613 607 /* SStatus oscillates between zero and valid status after 614 608 * DEV_RST, debounce it. 615 609 */ 616 - rc = sata_phy_debounce(ap, sata_deb_timing_before_fsrst); 610 + rc = sata_phy_debounce(ap, sata_deb_timing_long); 617 611 if (rc) { 618 612 reason = "PHY debouncing failed"; 619 613 goto err; ··· 994 988 kfree(hpriv); 995 989 } 996 990 991 + static void sil24_init_controller(struct pci_dev *pdev, int n_ports, 992 + unsigned long host_flags, 993 + void __iomem *host_base, 994 + void __iomem *port_base) 995 + { 996 + u32 tmp; 997 + int i; 998 + 999 + /* GPIO off */ 1000 + writel(0, host_base + HOST_FLASH_CMD); 1001 + 1002 + /* clear global reset & mask interrupts during initialization */ 1003 + writel(0, host_base + HOST_CTRL); 1004 + 1005 + /* init ports */ 1006 + for (i = 0; i < n_ports; i++) { 1007 + void __iomem *port = port_base + i * PORT_REGS_SIZE; 1008 + 1009 + /* Initial PHY setting */ 1010 + writel(0x20c, port + PORT_PHY_CFG); 1011 + 1012 + /* Clear port RST */ 1013 + tmp = readl(port + PORT_CTRL_STAT); 1014 + if (tmp & PORT_CS_PORT_RST) { 1015 + writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR); 1016 + tmp = ata_wait_register(port + PORT_CTRL_STAT, 1017 + PORT_CS_PORT_RST, 1018 + PORT_CS_PORT_RST, 10, 100); 1019 + if (tmp & PORT_CS_PORT_RST) 1020 + dev_printk(KERN_ERR, &pdev->dev, 1021 + "failed to clear port RST\n"); 1022 + } 1023 + 1024 + /* Configure IRQ WoC */ 1025 + if (host_flags & SIL24_FLAG_PCIX_IRQ_WOC) 1026 + writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT); 1027 + else 1028 + writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR); 1029 + 1030 + /* Zero error counters. */ 1031 + writel(0x8000, port + PORT_DECODE_ERR_THRESH); 1032 + writel(0x8000, port + PORT_CRC_ERR_THRESH); 1033 + writel(0x8000, port + PORT_HSHK_ERR_THRESH); 1034 + writel(0x0000, port + PORT_DECODE_ERR_CNT); 1035 + writel(0x0000, port + PORT_CRC_ERR_CNT); 1036 + writel(0x0000, port + PORT_HSHK_ERR_CNT); 1037 + 1038 + /* Always use 64bit activation */ 1039 + writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR); 1040 + 1041 + /* Clear port multiplier enable and resume bits */ 1042 + writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR); 1043 + } 1044 + 1045 + /* Turn on interrupts */ 1046 + writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL); 1047 + } 1048 + 997 1049 static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 998 1050 { 999 1051 static int printed_version = 0; ··· 1140 1076 } 1141 1077 } 1142 1078 1143 - /* GPIO off */ 1144 - writel(0, host_base + HOST_FLASH_CMD); 1145 - 1146 1079 /* Apply workaround for completion IRQ loss on PCI-X errata */ 1147 1080 if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) { 1148 1081 tmp = readl(host_base + HOST_CTRL); ··· 1151 1090 probe_ent->host_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC; 1152 1091 } 1153 1092 1154 - /* clear global reset & mask interrupts during initialization */ 1155 - writel(0, host_base + HOST_CTRL); 1156 - 1157 1093 for (i = 0; i < probe_ent->n_ports; i++) { 1158 - void __iomem *port = port_base + i * PORT_REGS_SIZE; 1159 - unsigned long portu = (unsigned long)port; 1094 + unsigned long portu = 1095 + (unsigned long)port_base + i * PORT_REGS_SIZE; 1160 1096 1161 1097 probe_ent->port[i].cmd_addr = portu; 1162 1098 probe_ent->port[i].scr_addr = portu + PORT_SCONTROL; 1163 1099 1164 1100 ata_std_ports(&probe_ent->port[i]); 1165 - 1166 - /* Initial PHY setting */ 1167 - writel(0x20c, port + PORT_PHY_CFG); 1168 - 1169 - /* Clear port RST */ 1170 - tmp = readl(port + PORT_CTRL_STAT); 1171 - if (tmp & PORT_CS_PORT_RST) { 1172 - writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR); 1173 - tmp = ata_wait_register(port + PORT_CTRL_STAT, 1174 - PORT_CS_PORT_RST, 1175 - PORT_CS_PORT_RST, 10, 100); 1176 - if (tmp & PORT_CS_PORT_RST) 1177 - dev_printk(KERN_ERR, &pdev->dev, 1178 - "failed to clear port RST\n"); 1179 - } 1180 - 1181 - /* Configure IRQ WoC */ 1182 - if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) 1183 - writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT); 1184 - else 1185 - writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR); 1186 - 1187 - /* Zero error counters. */ 1188 - writel(0x8000, port + PORT_DECODE_ERR_THRESH); 1189 - writel(0x8000, port + PORT_CRC_ERR_THRESH); 1190 - writel(0x8000, port + PORT_HSHK_ERR_THRESH); 1191 - writel(0x0000, port + PORT_DECODE_ERR_CNT); 1192 - writel(0x0000, port + PORT_CRC_ERR_CNT); 1193 - writel(0x0000, port + PORT_HSHK_ERR_CNT); 1194 - 1195 - /* Always use 64bit activation */ 1196 - writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR); 1197 - 1198 - /* Clear port multiplier enable and resume bits */ 1199 - writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR); 1200 1101 } 1201 1102 1202 - /* Turn on interrupts */ 1203 - writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL); 1103 + sil24_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags, 1104 + host_base, port_base); 1204 1105 1205 1106 pci_set_master(pdev); 1206 1107 ··· 1183 1160 out_disable: 1184 1161 pci_disable_device(pdev); 1185 1162 return rc; 1163 + } 1164 + 1165 + static int sil24_pci_device_resume(struct pci_dev *pdev) 1166 + { 1167 + struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev); 1168 + struct sil24_host_priv *hpriv = host_set->private_data; 1169 + 1170 + ata_pci_device_do_resume(pdev); 1171 + 1172 + if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) 1173 + writel(HOST_CTRL_GLOBAL_RST, hpriv->host_base + HOST_CTRL); 1174 + 1175 + sil24_init_controller(pdev, host_set->n_ports, 1176 + host_set->ports[0]->flags, 1177 + hpriv->host_base, hpriv->port_base); 1178 + 1179 + ata_host_set_resume(host_set); 1180 + 1181 + return 0; 1186 1182 } 1187 1183 1188 1184 static int __init sil24_init(void)
+1 -1
drivers/scsi/sata_vsc.c
··· 297 297 .bmdma_status = ata_bmdma_status, 298 298 .qc_prep = ata_qc_prep, 299 299 .qc_issue = ata_qc_issue_prot, 300 - .data_xfer = ata_pio_data_xfer, 300 + .data_xfer = ata_mmio_data_xfer, 301 301 .freeze = ata_bmdma_freeze, 302 302 .thaw = ata_bmdma_thaw, 303 303 .error_handler = ata_bmdma_error_handler,
+64 -21
include/linux/libata.h
··· 131 131 ATA_DFLAG_CFG_MASK = (1 << 8) - 1, 132 132 133 133 ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */ 134 + ATA_DFLAG_SUSPENDED = (1 << 9), /* device suspended */ 134 135 ATA_DFLAG_INIT_MASK = (1 << 16) - 1, 135 136 136 137 ATA_DFLAG_DETACH = (1 << 16), ··· 161 160 ATA_FLAG_HRST_TO_RESUME = (1 << 11), /* hardreset to resume phy */ 162 161 ATA_FLAG_SKIP_D2H_BSY = (1 << 12), /* can't wait for the first D2H 163 162 * Register FIS clearing BSY */ 164 - 165 163 ATA_FLAG_DEBUGMSG = (1 << 13), 166 - ATA_FLAG_FLUSH_PORT_TASK = (1 << 14), /* flush port task */ 167 164 168 - ATA_FLAG_EH_PENDING = (1 << 15), /* EH pending */ 169 - ATA_FLAG_EH_IN_PROGRESS = (1 << 16), /* EH in progress */ 170 - ATA_FLAG_FROZEN = (1 << 17), /* port is frozen */ 171 - ATA_FLAG_RECOVERED = (1 << 18), /* recovery action performed */ 172 - ATA_FLAG_LOADING = (1 << 19), /* boot/loading probe */ 173 - ATA_FLAG_UNLOADING = (1 << 20), /* module is unloading */ 174 - ATA_FLAG_SCSI_HOTPLUG = (1 << 21), /* SCSI hotplug scheduled */ 165 + /* The following flag belongs to ap->pflags but is kept in 166 + * ap->flags because it's referenced in many LLDs and will be 167 + * removed in not-too-distant future. 168 + */ 169 + ATA_FLAG_DISABLED = (1 << 23), /* port is disabled, ignore it */ 175 170 176 - ATA_FLAG_DISABLED = (1 << 22), /* port is disabled, ignore it */ 177 - ATA_FLAG_SUSPENDED = (1 << 23), /* port is suspended (power) */ 171 + /* bits 24:31 of ap->flags are reserved for LLD specific flags */ 178 172 179 - /* bits 24:31 of ap->flags are reserved for LLDD specific flags */ 173 + /* struct ata_port pflags */ 174 + ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */ 175 + ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */ 176 + ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */ 177 + ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */ 178 + ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */ 179 + ATA_PFLAG_UNLOADING = (1 << 5), /* module is unloading */ 180 + ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */ 181 + 182 + ATA_PFLAG_FLUSH_PORT_TASK = (1 << 16), /* flush port task */ 183 + ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */ 184 + ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ 180 185 181 186 /* struct ata_queued_cmd flags */ 182 187 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ ··· 255 248 ATA_EH_REVALIDATE = (1 << 0), 256 249 ATA_EH_SOFTRESET = (1 << 1), 257 250 ATA_EH_HARDRESET = (1 << 2), 251 + ATA_EH_SUSPEND = (1 << 3), 252 + ATA_EH_RESUME = (1 << 4), 253 + ATA_EH_PM_FREEZE = (1 << 5), 258 254 259 255 ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, 260 - ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE, 256 + ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_SUSPEND | 257 + ATA_EH_RESUME | ATA_EH_PM_FREEZE, 261 258 262 259 /* ata_eh_info->flags */ 263 260 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ 261 + ATA_EHI_RESUME_LINK = (1 << 1), /* need to resume link */ 262 + ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ 263 + ATA_EHI_QUIET = (1 << 3), /* be quiet */ 264 264 265 265 ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */ 266 266 ··· 500 486 const struct ata_port_operations *ops; 501 487 spinlock_t *lock; 502 488 unsigned long flags; /* ATA_FLAG_xxx */ 489 + unsigned int pflags; /* ATA_PFLAG_xxx */ 503 490 unsigned int id; /* unique id req'd by scsi midlyr */ 504 491 unsigned int port_no; /* unique port #; from zero */ 505 492 unsigned int hard_port_no; /* hardware port #; from zero */ ··· 549 534 u32 msg_enable; 550 535 struct list_head eh_done_q; 551 536 wait_queue_head_t eh_wait_q; 537 + 538 + pm_message_t pm_mesg; 539 + int *pm_result; 552 540 553 541 void *private_data; 554 542 ··· 607 589 void (*scr_write) (struct ata_port *ap, unsigned int sc_reg, 608 590 u32 val); 609 591 592 + int (*port_suspend) (struct ata_port *ap, pm_message_t mesg); 593 + int (*port_resume) (struct ata_port *ap); 594 + 610 595 int (*port_start) (struct ata_port *ap); 611 596 void (*port_stop) (struct ata_port *ap); 612 597 ··· 643 622 644 623 #define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin) 645 624 646 - extern const unsigned long sata_deb_timing_boot[]; 647 - extern const unsigned long sata_deb_timing_eh[]; 648 - extern const unsigned long sata_deb_timing_before_fsrst[]; 625 + extern const unsigned long sata_deb_timing_normal[]; 626 + extern const unsigned long sata_deb_timing_hotplug[]; 627 + extern const unsigned long sata_deb_timing_long[]; 628 + 629 + static inline const unsigned long * 630 + sata_ehc_deb_timing(struct ata_eh_context *ehc) 631 + { 632 + if (ehc->i.flags & ATA_EHI_HOTPLUGGED) 633 + return sata_deb_timing_hotplug; 634 + else 635 + return sata_deb_timing_normal; 636 + } 649 637 650 638 extern void ata_port_probe(struct ata_port *); 651 639 extern void __sata_phy_reset(struct ata_port *ap); ··· 674 644 extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, 675 645 unsigned int n_ports); 676 646 extern void ata_pci_remove_one (struct pci_dev *pdev); 647 + extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t state); 648 + extern void ata_pci_device_do_resume(struct pci_dev *pdev); 677 649 extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state); 678 650 extern int ata_pci_device_resume(struct pci_dev *pdev); 679 651 extern int ata_pci_clear_simplex(struct pci_dev *pdev); ··· 696 664 extern int ata_port_offline(struct ata_port *ap); 697 665 extern int ata_scsi_device_resume(struct scsi_device *); 698 666 extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state); 699 - extern int ata_device_resume(struct ata_device *); 700 - extern int ata_device_suspend(struct ata_device *, pm_message_t state); 667 + extern int ata_host_set_suspend(struct ata_host_set *host_set, 668 + pm_message_t mesg); 669 + extern void ata_host_set_resume(struct ata_host_set *host_set); 701 670 extern int ata_ratelimit(void); 702 671 extern unsigned int ata_busy_sleep(struct ata_port *ap, 703 672 unsigned long timeout_pat, ··· 858 825 (ehi)->desc_len = 0; \ 859 826 } while (0) 860 827 861 - static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi) 828 + static inline void __ata_ehi_hotplugged(struct ata_eh_info *ehi) 862 829 { 863 830 if (ehi->flags & ATA_EHI_HOTPLUGGED) 864 831 return; 865 832 866 - ehi->flags |= ATA_EHI_HOTPLUGGED; 833 + ehi->flags |= ATA_EHI_HOTPLUGGED | ATA_EHI_RESUME_LINK; 867 834 ehi->hotplug_timestamp = jiffies; 868 835 869 - ehi->err_mask |= AC_ERR_ATA_BUS; 870 836 ehi->action |= ATA_EH_SOFTRESET; 871 837 ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1; 838 + } 839 + 840 + static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi) 841 + { 842 + __ata_ehi_hotplugged(ehi); 843 + ehi->err_mask |= AC_ERR_ATA_BUS; 872 844 } 873 845 874 846 /* ··· 957 919 static inline unsigned int ata_dev_absent(const struct ata_device *dev) 958 920 { 959 921 return ata_class_absent(dev->class); 922 + } 923 + 924 + static inline unsigned int ata_dev_ready(const struct ata_device *dev) 925 + { 926 + return ata_dev_enabled(dev) && !(dev->flags & ATA_DFLAG_SUSPENDED); 960 927 } 961 928 962 929 /*
+7
include/linux/pci_ids.h
··· 2019 2019 #define PCI_VENDOR_ID_TDI 0x192E 2020 2020 #define PCI_DEVICE_ID_TDI_EHCI 0x0101 2021 2021 2022 + #define PCI_VENDOR_ID_JMICRON 0x197B 2023 + #define PCI_DEVICE_ID_JMICRON_JMB360 0x2360 2024 + #define PCI_DEVICE_ID_JMICRON_JMB361 0x2361 2025 + #define PCI_DEVICE_ID_JMICRON_JMB363 0x2363 2026 + #define PCI_DEVICE_ID_JMICRON_JMB365 0x2365 2027 + #define PCI_DEVICE_ID_JMICRON_JMB366 0x2366 2028 + #define PCI_DEVICE_ID_JMICRON_JMB368 0x2368 2022 2029 2023 2030 #define PCI_VENDOR_ID_TEKRAM 0x1de1 2024 2031 #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29