diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/ata/libata-core.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/ata/libata-core.c')
-rw-r--r-- | drivers/ata/libata-core.c | 479 |
1 files changed, 220 insertions, 259 deletions
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 932eaee50245..000d03ae6653 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -68,7 +68,7 @@ | |||
68 | #include <linux/ratelimit.h> | 68 | #include <linux/ratelimit.h> |
69 | 69 | ||
70 | #include "libata.h" | 70 | #include "libata.h" |
71 | 71 | #include "libata-transport.h" | |
72 | 72 | ||
73 | /* debounce timing parameters in msecs { interval, duration, timeout } */ | 73 | /* debounce timing parameters in msecs { interval, duration, timeout } */ |
74 | const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; | 74 | const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; |
@@ -91,8 +91,6 @@ const struct ata_port_operations sata_port_ops = { | |||
91 | static unsigned int ata_dev_init_params(struct ata_device *dev, | 91 | static unsigned int ata_dev_init_params(struct ata_device *dev, |
92 | u16 heads, u16 sectors); | 92 | u16 heads, u16 sectors); |
93 | static unsigned int ata_dev_set_xfermode(struct ata_device *dev); | 93 | static unsigned int ata_dev_set_xfermode(struct ata_device *dev); |
94 | static unsigned int ata_dev_set_feature(struct ata_device *dev, | ||
95 | u8 enable, u8 feature); | ||
96 | static void ata_dev_xfermask(struct ata_device *dev); | 94 | static void ata_dev_xfermask(struct ata_device *dev); |
97 | static unsigned long ata_dev_blacklisted(const struct ata_device *dev); | 95 | static unsigned long ata_dev_blacklisted(const struct ata_device *dev); |
98 | 96 | ||
@@ -1017,7 +1015,7 @@ const char *ata_mode_string(unsigned long xfer_mask) | |||
1017 | return "<n/a>"; | 1015 | return "<n/a>"; |
1018 | } | 1016 | } |
1019 | 1017 | ||
1020 | static const char *sata_spd_string(unsigned int spd) | 1018 | const char *sata_spd_string(unsigned int spd) |
1021 | { | 1019 | { |
1022 | static const char * const spd_str[] = { | 1020 | static const char * const spd_str[] = { |
1023 | "1.5 Gbps", | 1021 | "1.5 Gbps", |
@@ -1030,182 +1028,6 @@ static const char *sata_spd_string(unsigned int spd) | |||
1030 | return spd_str[spd - 1]; | 1028 | return spd_str[spd - 1]; |
1031 | } | 1029 | } |
1032 | 1030 | ||
1033 | static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy) | ||
1034 | { | ||
1035 | struct ata_link *link = dev->link; | ||
1036 | struct ata_port *ap = link->ap; | ||
1037 | u32 scontrol; | ||
1038 | unsigned int err_mask; | ||
1039 | int rc; | ||
1040 | |||
1041 | /* | ||
1042 | * disallow DIPM for drivers which haven't set | ||
1043 | * ATA_FLAG_IPM. This is because when DIPM is enabled, | ||
1044 | * phy ready will be set in the interrupt status on | ||
1045 | * state changes, which will cause some drivers to | ||
1046 | * think there are errors - additionally drivers will | ||
1047 | * need to disable hot plug. | ||
1048 | */ | ||
1049 | if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) { | ||
1050 | ap->pm_policy = NOT_AVAILABLE; | ||
1051 | return -EINVAL; | ||
1052 | } | ||
1053 | |||
1054 | /* | ||
1055 | * For DIPM, we will only enable it for the | ||
1056 | * min_power setting. | ||
1057 | * | ||
1058 | * Why? Because Disks are too stupid to know that | ||
1059 | * If the host rejects a request to go to SLUMBER | ||
1060 | * they should retry at PARTIAL, and instead it | ||
1061 | * just would give up. So, for medium_power to | ||
1062 | * work at all, we need to only allow HIPM. | ||
1063 | */ | ||
1064 | rc = sata_scr_read(link, SCR_CONTROL, &scontrol); | ||
1065 | if (rc) | ||
1066 | return rc; | ||
1067 | |||
1068 | switch (policy) { | ||
1069 | case MIN_POWER: | ||
1070 | /* no restrictions on IPM transitions */ | ||
1071 | scontrol &= ~(0x3 << 8); | ||
1072 | rc = sata_scr_write(link, SCR_CONTROL, scontrol); | ||
1073 | if (rc) | ||
1074 | return rc; | ||
1075 | |||
1076 | /* enable DIPM */ | ||
1077 | if (dev->flags & ATA_DFLAG_DIPM) | ||
1078 | err_mask = ata_dev_set_feature(dev, | ||
1079 | SETFEATURES_SATA_ENABLE, SATA_DIPM); | ||
1080 | break; | ||
1081 | case MEDIUM_POWER: | ||
1082 | /* allow IPM to PARTIAL */ | ||
1083 | scontrol &= ~(0x1 << 8); | ||
1084 | scontrol |= (0x2 << 8); | ||
1085 | rc = sata_scr_write(link, SCR_CONTROL, scontrol); | ||
1086 | if (rc) | ||
1087 | return rc; | ||
1088 | |||
1089 | /* | ||
1090 | * we don't have to disable DIPM since IPM flags | ||
1091 | * disallow transitions to SLUMBER, which effectively | ||
1092 | * disable DIPM if it does not support PARTIAL | ||
1093 | */ | ||
1094 | break; | ||
1095 | case NOT_AVAILABLE: | ||
1096 | case MAX_PERFORMANCE: | ||
1097 | /* disable all IPM transitions */ | ||
1098 | scontrol |= (0x3 << 8); | ||
1099 | rc = sata_scr_write(link, SCR_CONTROL, scontrol); | ||
1100 | if (rc) | ||
1101 | return rc; | ||
1102 | |||
1103 | /* | ||
1104 | * we don't have to disable DIPM since IPM flags | ||
1105 | * disallow all transitions which effectively | ||
1106 | * disable DIPM anyway. | ||
1107 | */ | ||
1108 | break; | ||
1109 | } | ||
1110 | |||
1111 | /* FIXME: handle SET FEATURES failure */ | ||
1112 | (void) err_mask; | ||
1113 | |||
1114 | return 0; | ||
1115 | } | ||
1116 | |||
1117 | /** | ||
1118 | * ata_dev_enable_pm - enable SATA interface power management | ||
1119 | * @dev: device to enable power management | ||
1120 | * @policy: the link power management policy | ||
1121 | * | ||
1122 | * Enable SATA Interface power management. This will enable | ||
1123 | * Device Interface Power Management (DIPM) for min_power | ||
1124 | * policy, and then call driver specific callbacks for | ||
1125 | * enabling Host Initiated Power management. | ||
1126 | * | ||
1127 | * Locking: Caller. | ||
1128 | * Returns: -EINVAL if IPM is not supported, 0 otherwise. | ||
1129 | */ | ||
1130 | void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy) | ||
1131 | { | ||
1132 | int rc = 0; | ||
1133 | struct ata_port *ap = dev->link->ap; | ||
1134 | |||
1135 | /* set HIPM first, then DIPM */ | ||
1136 | if (ap->ops->enable_pm) | ||
1137 | rc = ap->ops->enable_pm(ap, policy); | ||
1138 | if (rc) | ||
1139 | goto enable_pm_out; | ||
1140 | rc = ata_dev_set_dipm(dev, policy); | ||
1141 | |||
1142 | enable_pm_out: | ||
1143 | if (rc) | ||
1144 | ap->pm_policy = MAX_PERFORMANCE; | ||
1145 | else | ||
1146 | ap->pm_policy = policy; | ||
1147 | return /* rc */; /* hopefully we can use 'rc' eventually */ | ||
1148 | } | ||
1149 | |||
1150 | #ifdef CONFIG_PM | ||
1151 | /** | ||
1152 | * ata_dev_disable_pm - disable SATA interface power management | ||
1153 | * @dev: device to disable power management | ||
1154 | * | ||
1155 | * Disable SATA Interface power management. This will disable | ||
1156 | * Device Interface Power Management (DIPM) without changing | ||
1157 | * policy, call driver specific callbacks for disabling Host | ||
1158 | * Initiated Power management. | ||
1159 | * | ||
1160 | * Locking: Caller. | ||
1161 | * Returns: void | ||
1162 | */ | ||
1163 | static void ata_dev_disable_pm(struct ata_device *dev) | ||
1164 | { | ||
1165 | struct ata_port *ap = dev->link->ap; | ||
1166 | |||
1167 | ata_dev_set_dipm(dev, MAX_PERFORMANCE); | ||
1168 | if (ap->ops->disable_pm) | ||
1169 | ap->ops->disable_pm(ap); | ||
1170 | } | ||
1171 | #endif /* CONFIG_PM */ | ||
1172 | |||
1173 | void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy) | ||
1174 | { | ||
1175 | ap->pm_policy = policy; | ||
1176 | ap->link.eh_info.action |= ATA_EH_LPM; | ||
1177 | ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY; | ||
1178 | ata_port_schedule_eh(ap); | ||
1179 | } | ||
1180 | |||
1181 | #ifdef CONFIG_PM | ||
1182 | static void ata_lpm_enable(struct ata_host *host) | ||
1183 | { | ||
1184 | struct ata_link *link; | ||
1185 | struct ata_port *ap; | ||
1186 | struct ata_device *dev; | ||
1187 | int i; | ||
1188 | |||
1189 | for (i = 0; i < host->n_ports; i++) { | ||
1190 | ap = host->ports[i]; | ||
1191 | ata_for_each_link(link, ap, EDGE) { | ||
1192 | ata_for_each_dev(dev, link, ALL) | ||
1193 | ata_dev_disable_pm(dev); | ||
1194 | } | ||
1195 | } | ||
1196 | } | ||
1197 | |||
1198 | static void ata_lpm_disable(struct ata_host *host) | ||
1199 | { | ||
1200 | int i; | ||
1201 | |||
1202 | for (i = 0; i < host->n_ports; i++) { | ||
1203 | struct ata_port *ap = host->ports[i]; | ||
1204 | ata_lpm_schedule(ap, ap->pm_policy); | ||
1205 | } | ||
1206 | } | ||
1207 | #endif /* CONFIG_PM */ | ||
1208 | |||
1209 | /** | 1031 | /** |
1210 | * ata_dev_classify - determine device type based on ATA-spec signature | 1032 | * ata_dev_classify - determine device type based on ATA-spec signature |
1211 | * @tf: ATA taskfile register set for device to be identified | 1033 | * @tf: ATA taskfile register set for device to be identified |
@@ -1806,8 +1628,14 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, | |||
1806 | } | 1628 | } |
1807 | } | 1629 | } |
1808 | 1630 | ||
1631 | if (ap->ops->error_handler) | ||
1632 | ata_eh_release(ap); | ||
1633 | |||
1809 | rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); | 1634 | rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); |
1810 | 1635 | ||
1636 | if (ap->ops->error_handler) | ||
1637 | ata_eh_acquire(ap); | ||
1638 | |||
1811 | ata_sff_flush_pio_task(ap); | 1639 | ata_sff_flush_pio_task(ap); |
1812 | 1640 | ||
1813 | if (!rc) { | 1641 | if (!rc) { |
@@ -2412,7 +2240,7 @@ int ata_dev_configure(struct ata_device *dev) | |||
2412 | if (id[ATA_ID_CFA_KEY_MGMT] & 1) | 2240 | if (id[ATA_ID_CFA_KEY_MGMT] & 1) |
2413 | ata_dev_printk(dev, KERN_WARNING, | 2241 | ata_dev_printk(dev, KERN_WARNING, |
2414 | "supports DRM functions and may " | 2242 | "supports DRM functions and may " |
2415 | "not be fully accessable.\n"); | 2243 | "not be fully accessible.\n"); |
2416 | snprintf(revbuf, 7, "CFA"); | 2244 | snprintf(revbuf, 7, "CFA"); |
2417 | } else { | 2245 | } else { |
2418 | snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); | 2246 | snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); |
@@ -2420,7 +2248,7 @@ int ata_dev_configure(struct ata_device *dev) | |||
2420 | if (ata_id_has_tpm(id)) | 2248 | if (ata_id_has_tpm(id)) |
2421 | ata_dev_printk(dev, KERN_WARNING, | 2249 | ata_dev_printk(dev, KERN_WARNING, |
2422 | "supports DRM functions and may " | 2250 | "supports DRM functions and may " |
2423 | "not be fully accessable.\n"); | 2251 | "not be fully accessible.\n"); |
2424 | } | 2252 | } |
2425 | 2253 | ||
2426 | dev->n_sectors = ata_id_n_sectors(id); | 2254 | dev->n_sectors = ata_id_n_sectors(id); |
@@ -2564,13 +2392,6 @@ int ata_dev_configure(struct ata_device *dev) | |||
2564 | if (dev->flags & ATA_DFLAG_LBA48) | 2392 | if (dev->flags & ATA_DFLAG_LBA48) |
2565 | dev->max_sectors = ATA_MAX_SECTORS_LBA48; | 2393 | dev->max_sectors = ATA_MAX_SECTORS_LBA48; |
2566 | 2394 | ||
2567 | if (!(dev->horkage & ATA_HORKAGE_IPM)) { | ||
2568 | if (ata_id_has_hipm(dev->id)) | ||
2569 | dev->flags |= ATA_DFLAG_HIPM; | ||
2570 | if (ata_id_has_dipm(dev->id)) | ||
2571 | dev->flags |= ATA_DFLAG_DIPM; | ||
2572 | } | ||
2573 | |||
2574 | /* Limit PATA drive on SATA cable bridge transfers to udma5, | 2395 | /* Limit PATA drive on SATA cable bridge transfers to udma5, |
2575 | 200 sectors */ | 2396 | 200 sectors */ |
2576 | if (ata_dev_knobble(dev)) { | 2397 | if (ata_dev_knobble(dev)) { |
@@ -2591,13 +2412,6 @@ int ata_dev_configure(struct ata_device *dev) | |||
2591 | dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, | 2412 | dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, |
2592 | dev->max_sectors); | 2413 | dev->max_sectors); |
2593 | 2414 | ||
2594 | if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) { | ||
2595 | dev->horkage |= ATA_HORKAGE_IPM; | ||
2596 | |||
2597 | /* reset link pm_policy for this port to no pm */ | ||
2598 | ap->pm_policy = MAX_PERFORMANCE; | ||
2599 | } | ||
2600 | |||
2601 | if (ap->ops->dev_config) | 2415 | if (ap->ops->dev_config) |
2602 | ap->ops->dev_config(dev); | 2416 | ap->ops->dev_config(dev); |
2603 | 2417 | ||
@@ -3596,7 +3410,7 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline, | |||
3596 | warned = 1; | 3410 | warned = 1; |
3597 | } | 3411 | } |
3598 | 3412 | ||
3599 | msleep(50); | 3413 | ata_msleep(link->ap, 50); |
3600 | } | 3414 | } |
3601 | } | 3415 | } |
3602 | 3416 | ||
@@ -3617,7 +3431,7 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline, | |||
3617 | int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, | 3431 | int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, |
3618 | int (*check_ready)(struct ata_link *link)) | 3432 | int (*check_ready)(struct ata_link *link)) |
3619 | { | 3433 | { |
3620 | msleep(ATA_WAIT_AFTER_RESET); | 3434 | ata_msleep(link->ap, ATA_WAIT_AFTER_RESET); |
3621 | 3435 | ||
3622 | return ata_wait_ready(link, deadline, check_ready); | 3436 | return ata_wait_ready(link, deadline, check_ready); |
3623 | } | 3437 | } |
@@ -3628,7 +3442,7 @@ int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, | |||
3628 | * @params: timing parameters { interval, duratinon, timeout } in msec | 3442 | * @params: timing parameters { interval, duratinon, timeout } in msec |
3629 | * @deadline: deadline jiffies for the operation | 3443 | * @deadline: deadline jiffies for the operation |
3630 | * | 3444 | * |
3631 | * Make sure SStatus of @link reaches stable state, determined by | 3445 | * Make sure SStatus of @link reaches stable state, determined by |
3632 | * holding the same value where DET is not 1 for @duration polled | 3446 | * holding the same value where DET is not 1 for @duration polled |
3633 | * every @interval, before @timeout. Timeout constraints the | 3447 | * every @interval, before @timeout. Timeout constraints the |
3634 | * beginning of the stable state. Because DET gets stuck at 1 on | 3448 | * beginning of the stable state. Because DET gets stuck at 1 on |
@@ -3665,7 +3479,7 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params, | |||
3665 | last_jiffies = jiffies; | 3479 | last_jiffies = jiffies; |
3666 | 3480 | ||
3667 | while (1) { | 3481 | while (1) { |
3668 | msleep(interval); | 3482 | ata_msleep(link->ap, interval); |
3669 | if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) | 3483 | if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) |
3670 | return rc; | 3484 | return rc; |
3671 | cur &= 0xf; | 3485 | cur &= 0xf; |
@@ -3730,7 +3544,7 @@ int sata_link_resume(struct ata_link *link, const unsigned long *params, | |||
3730 | * immediately after resuming. Delay 200ms before | 3544 | * immediately after resuming. Delay 200ms before |
3731 | * debouncing. | 3545 | * debouncing. |
3732 | */ | 3546 | */ |
3733 | msleep(200); | 3547 | ata_msleep(link->ap, 200); |
3734 | 3548 | ||
3735 | /* is SControl restored correctly? */ | 3549 | /* is SControl restored correctly? */ |
3736 | if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) | 3550 | if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) |
@@ -3760,6 +3574,78 @@ int sata_link_resume(struct ata_link *link, const unsigned long *params, | |||
3760 | } | 3574 | } |
3761 | 3575 | ||
3762 | /** | 3576 | /** |
3577 | * sata_link_scr_lpm - manipulate SControl IPM and SPM fields | ||
3578 | * @link: ATA link to manipulate SControl for | ||
3579 | * @policy: LPM policy to configure | ||
3580 | * @spm_wakeup: initiate LPM transition to active state | ||
3581 | * | ||
3582 | * Manipulate the IPM field of the SControl register of @link | ||
3583 | * according to @policy. If @policy is ATA_LPM_MAX_POWER and | ||
3584 | * @spm_wakeup is %true, the SPM field is manipulated to wake up | ||
3585 | * the link. This function also clears PHYRDY_CHG before | ||
3586 | * returning. | ||
3587 | * | ||
3588 | * LOCKING: | ||
3589 | * EH context. | ||
3590 | * | ||
3591 | * RETURNS: | ||
3592 | * 0 on succes, -errno otherwise. | ||
3593 | */ | ||
3594 | int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, | ||
3595 | bool spm_wakeup) | ||
3596 | { | ||
3597 | struct ata_eh_context *ehc = &link->eh_context; | ||
3598 | bool woken_up = false; | ||
3599 | u32 scontrol; | ||
3600 | int rc; | ||
3601 | |||
3602 | rc = sata_scr_read(link, SCR_CONTROL, &scontrol); | ||
3603 | if (rc) | ||
3604 | return rc; | ||
3605 | |||
3606 | switch (policy) { | ||
3607 | case ATA_LPM_MAX_POWER: | ||
3608 | /* disable all LPM transitions */ | ||
3609 | scontrol |= (0x3 << 8); | ||
3610 | /* initiate transition to active state */ | ||
3611 | if (spm_wakeup) { | ||
3612 | scontrol |= (0x4 << 12); | ||
3613 | woken_up = true; | ||
3614 | } | ||
3615 | break; | ||
3616 | case ATA_LPM_MED_POWER: | ||
3617 | /* allow LPM to PARTIAL */ | ||
3618 | scontrol &= ~(0x1 << 8); | ||
3619 | scontrol |= (0x2 << 8); | ||
3620 | break; | ||
3621 | case ATA_LPM_MIN_POWER: | ||
3622 | if (ata_link_nr_enabled(link) > 0) | ||
3623 | /* no restrictions on LPM transitions */ | ||
3624 | scontrol &= ~(0x3 << 8); | ||
3625 | else { | ||
3626 | /* empty port, power off */ | ||
3627 | scontrol &= ~0xf; | ||
3628 | scontrol |= (0x1 << 2); | ||
3629 | } | ||
3630 | break; | ||
3631 | default: | ||
3632 | WARN_ON(1); | ||
3633 | } | ||
3634 | |||
3635 | rc = sata_scr_write(link, SCR_CONTROL, scontrol); | ||
3636 | if (rc) | ||
3637 | return rc; | ||
3638 | |||
3639 | /* give the link time to transit out of LPM state */ | ||
3640 | if (woken_up) | ||
3641 | msleep(10); | ||
3642 | |||
3643 | /* clear PHYRDY_CHG from SError */ | ||
3644 | ehc->i.serror &= ~SERR_PHYRDY_CHG; | ||
3645 | return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); | ||
3646 | } | ||
3647 | |||
3648 | /** | ||
3763 | * ata_std_prereset - prepare for reset | 3649 | * ata_std_prereset - prepare for reset |
3764 | * @link: ATA link to be reset | 3650 | * @link: ATA link to be reset |
3765 | * @deadline: deadline jiffies for the operation | 3651 | * @deadline: deadline jiffies for the operation |
@@ -3868,7 +3754,7 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, | |||
3868 | /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 | 3754 | /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 |
3869 | * 10.4.2 says at least 1 ms. | 3755 | * 10.4.2 says at least 1 ms. |
3870 | */ | 3756 | */ |
3871 | msleep(1); | 3757 | ata_msleep(link->ap, 1); |
3872 | 3758 | ||
3873 | /* bring link back */ | 3759 | /* bring link back */ |
3874 | rc = sata_link_resume(link, timing, deadline); | 3760 | rc = sata_link_resume(link, timing, deadline); |
@@ -4257,7 +4143,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4257 | * Devices which choke on SETXFER. Applies only if both the | 4143 | * Devices which choke on SETXFER. Applies only if both the |
4258 | * device and controller are SATA. | 4144 | * device and controller are SATA. |
4259 | */ | 4145 | */ |
4260 | { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER }, | 4146 | { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER }, |
4147 | { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, | ||
4148 | { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, | ||
4261 | 4149 | ||
4262 | /* End Marker */ | 4150 | /* End Marker */ |
4263 | { } | 4151 | { } |
@@ -4329,7 +4217,7 @@ static int glob_match (const char *text, const char *pattern) | |||
4329 | return 0; /* End of both strings: match */ | 4217 | return 0; /* End of both strings: match */ |
4330 | return 1; /* No match */ | 4218 | return 1; /* No match */ |
4331 | } | 4219 | } |
4332 | 4220 | ||
4333 | static unsigned long ata_dev_blacklisted(const struct ata_device *dev) | 4221 | static unsigned long ata_dev_blacklisted(const struct ata_device *dev) |
4334 | { | 4222 | { |
4335 | unsigned char model_num[ATA_ID_PROD_LEN + 1]; | 4223 | unsigned char model_num[ATA_ID_PROD_LEN + 1]; |
@@ -4551,6 +4439,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev) | |||
4551 | DPRINTK("EXIT, err_mask=%x\n", err_mask); | 4439 | DPRINTK("EXIT, err_mask=%x\n", err_mask); |
4552 | return err_mask; | 4440 | return err_mask; |
4553 | } | 4441 | } |
4442 | |||
4554 | /** | 4443 | /** |
4555 | * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES | 4444 | * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES |
4556 | * @dev: Device to which command will be sent | 4445 | * @dev: Device to which command will be sent |
@@ -4566,8 +4455,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev) | |||
4566 | * RETURNS: | 4455 | * RETURNS: |
4567 | * 0 on success, AC_ERR_* mask otherwise. | 4456 | * 0 on success, AC_ERR_* mask otherwise. |
4568 | */ | 4457 | */ |
4569 | static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, | 4458 | unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature) |
4570 | u8 feature) | ||
4571 | { | 4459 | { |
4572 | struct ata_taskfile tf; | 4460 | struct ata_taskfile tf; |
4573 | unsigned int err_mask; | 4461 | unsigned int err_mask; |
@@ -4927,9 +4815,6 @@ static void ata_verify_xfer(struct ata_queued_cmd *qc) | |||
4927 | { | 4815 | { |
4928 | struct ata_device *dev = qc->dev; | 4816 | struct ata_device *dev = qc->dev; |
4929 | 4817 | ||
4930 | if (ata_tag_internal(qc->tag)) | ||
4931 | return; | ||
4932 | |||
4933 | if (ata_is_nodata(qc->tf.protocol)) | 4818 | if (ata_is_nodata(qc->tf.protocol)) |
4934 | return; | 4819 | return; |
4935 | 4820 | ||
@@ -4943,8 +4828,13 @@ static void ata_verify_xfer(struct ata_queued_cmd *qc) | |||
4943 | * ata_qc_complete - Complete an active ATA command | 4828 | * ata_qc_complete - Complete an active ATA command |
4944 | * @qc: Command to complete | 4829 | * @qc: Command to complete |
4945 | * | 4830 | * |
4946 | * Indicate to the mid and upper layers that an ATA | 4831 | * Indicate to the mid and upper layers that an ATA command has |
4947 | * command has completed, with either an ok or not-ok status. | 4832 | * completed, with either an ok or not-ok status. |
4833 | * | ||
4834 | * Refrain from calling this function multiple times when | ||
4835 | * successfully completing multiple NCQ commands. | ||
4836 | * ata_qc_complete_multiple() should be used instead, which will | ||
4837 | * properly update IRQ expect state. | ||
4948 | * | 4838 | * |
4949 | * LOCKING: | 4839 | * LOCKING: |
4950 | * spin_lock_irqsave(host lock) | 4840 | * spin_lock_irqsave(host lock) |
@@ -4973,14 +4863,23 @@ void ata_qc_complete(struct ata_queued_cmd *qc) | |||
4973 | if (unlikely(qc->err_mask)) | 4863 | if (unlikely(qc->err_mask)) |
4974 | qc->flags |= ATA_QCFLAG_FAILED; | 4864 | qc->flags |= ATA_QCFLAG_FAILED; |
4975 | 4865 | ||
4976 | if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { | 4866 | /* |
4977 | /* always fill result TF for failed qc */ | 4867 | * Finish internal commands without any further processing |
4868 | * and always with the result TF filled. | ||
4869 | */ | ||
4870 | if (unlikely(ata_tag_internal(qc->tag))) { | ||
4978 | fill_result_tf(qc); | 4871 | fill_result_tf(qc); |
4872 | __ata_qc_complete(qc); | ||
4873 | return; | ||
4874 | } | ||
4979 | 4875 | ||
4980 | if (!ata_tag_internal(qc->tag)) | 4876 | /* |
4981 | ata_qc_schedule_eh(qc); | 4877 | * Non-internal qc has failed. Fill the result TF and |
4982 | else | 4878 | * summon EH. |
4983 | __ata_qc_complete(qc); | 4879 | */ |
4880 | if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { | ||
4881 | fill_result_tf(qc); | ||
4882 | ata_qc_schedule_eh(qc); | ||
4984 | return; | 4883 | return; |
4985 | } | 4884 | } |
4986 | 4885 | ||
@@ -5037,6 +4936,10 @@ void ata_qc_complete(struct ata_queued_cmd *qc) | |||
5037 | * requests normally. ap->qc_active and @qc_active is compared | 4936 | * requests normally. ap->qc_active and @qc_active is compared |
5038 | * and commands are completed accordingly. | 4937 | * and commands are completed accordingly. |
5039 | * | 4938 | * |
4939 | * Always use this function when completing multiple NCQ commands | ||
4940 | * from IRQ handlers instead of calling ata_qc_complete() | ||
4941 | * multiple times to keep IRQ expect status properly in sync. | ||
4942 | * | ||
5040 | * LOCKING: | 4943 | * LOCKING: |
5041 | * spin_lock_irqsave(host lock) | 4944 | * spin_lock_irqsave(host lock) |
5042 | * | 4945 | * |
@@ -5422,12 +5325,6 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg) | |||
5422 | int rc; | 5325 | int rc; |
5423 | 5326 | ||
5424 | /* | 5327 | /* |
5425 | * disable link pm on all ports before requesting | ||
5426 | * any pm activity | ||
5427 | */ | ||
5428 | ata_lpm_enable(host); | ||
5429 | |||
5430 | /* | ||
5431 | * On some hardware, device fails to respond after spun down | 5328 | * On some hardware, device fails to respond after spun down |
5432 | * for suspend. As the device won't be used before being | 5329 | * for suspend. As the device won't be used before being |
5433 | * resumed, we don't need to touch the device. Ask EH to skip | 5330 | * resumed, we don't need to touch the device. Ask EH to skip |
@@ -5450,7 +5347,7 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg) | |||
5450 | * | 5347 | * |
5451 | * Resume @host. Actual operation is performed by EH. This | 5348 | * Resume @host. Actual operation is performed by EH. This |
5452 | * function requests EH to perform PM operations and returns. | 5349 | * function requests EH to perform PM operations and returns. |
5453 | * Note that all resume operations are performed parallely. | 5350 | * Note that all resume operations are performed parallelly. |
5454 | * | 5351 | * |
5455 | * LOCKING: | 5352 | * LOCKING: |
5456 | * Kernel thread context (may sleep). | 5353 | * Kernel thread context (may sleep). |
@@ -5460,9 +5357,6 @@ void ata_host_resume(struct ata_host *host) | |||
5460 | ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET, | 5357 | ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET, |
5461 | ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); | 5358 | ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); |
5462 | host->dev->power.power_state = PMSG_ON; | 5359 | host->dev->power.power_state = PMSG_ON; |
5463 | |||
5464 | /* reenable link pm */ | ||
5465 | ata_lpm_disable(host); | ||
5466 | } | 5360 | } |
5467 | #endif | 5361 | #endif |
5468 | 5362 | ||
@@ -5517,7 +5411,8 @@ void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) | |||
5517 | int i; | 5411 | int i; |
5518 | 5412 | ||
5519 | /* clear everything except for devices */ | 5413 | /* clear everything except for devices */ |
5520 | memset(link, 0, offsetof(struct ata_link, device[0])); | 5414 | memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0, |
5415 | ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN); | ||
5521 | 5416 | ||
5522 | link->ap = ap; | 5417 | link->ap = ap; |
5523 | link->pmp = pmp; | 5418 | link->pmp = pmp; |
@@ -5592,7 +5487,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host) | |||
5592 | if (!ap) | 5487 | if (!ap) |
5593 | return NULL; | 5488 | return NULL; |
5594 | 5489 | ||
5595 | ap->pflags |= ATA_PFLAG_INITIALIZING; | 5490 | ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN; |
5596 | ap->lock = &host->lock; | 5491 | ap->lock = &host->lock; |
5597 | ap->print_id = -1; | 5492 | ap->print_id = -1; |
5598 | ap->host = host; | 5493 | ap->host = host; |
@@ -5695,6 +5590,7 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports) | |||
5695 | dev_set_drvdata(dev, host); | 5590 | dev_set_drvdata(dev, host); |
5696 | 5591 | ||
5697 | spin_lock_init(&host->lock); | 5592 | spin_lock_init(&host->lock); |
5593 | mutex_init(&host->eh_mutex); | ||
5698 | host->dev = dev; | 5594 | host->dev = dev; |
5699 | host->n_ports = max_ports; | 5595 | host->n_ports = max_ports; |
5700 | 5596 | ||
@@ -5992,26 +5888,15 @@ void ata_host_init(struct ata_host *host, struct device *dev, | |||
5992 | unsigned long flags, struct ata_port_operations *ops) | 5888 | unsigned long flags, struct ata_port_operations *ops) |
5993 | { | 5889 | { |
5994 | spin_lock_init(&host->lock); | 5890 | spin_lock_init(&host->lock); |
5891 | mutex_init(&host->eh_mutex); | ||
5995 | host->dev = dev; | 5892 | host->dev = dev; |
5996 | host->flags = flags; | 5893 | host->flags = flags; |
5997 | host->ops = ops; | 5894 | host->ops = ops; |
5998 | } | 5895 | } |
5999 | 5896 | ||
6000 | 5897 | int ata_port_probe(struct ata_port *ap) | |
6001 | static void async_port_probe(void *data, async_cookie_t cookie) | ||
6002 | { | 5898 | { |
6003 | int rc; | 5899 | int rc = 0; |
6004 | struct ata_port *ap = data; | ||
6005 | |||
6006 | /* | ||
6007 | * If we're not allowed to scan this host in parallel, | ||
6008 | * we need to wait until all previous scans have completed | ||
6009 | * before going further. | ||
6010 | * Jeff Garzik says this is only within a controller, so we | ||
6011 | * don't need to wait for port 0, only for later ports. | ||
6012 | */ | ||
6013 | if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) | ||
6014 | async_synchronize_cookie(cookie); | ||
6015 | 5900 | ||
6016 | /* probe */ | 5901 | /* probe */ |
6017 | if (ap->ops->error_handler) { | 5902 | if (ap->ops->error_handler) { |
@@ -6022,7 +5907,7 @@ static void async_port_probe(void *data, async_cookie_t cookie) | |||
6022 | spin_lock_irqsave(ap->lock, flags); | 5907 | spin_lock_irqsave(ap->lock, flags); |
6023 | 5908 | ||
6024 | ehi->probe_mask |= ATA_ALL_DEVICES; | 5909 | ehi->probe_mask |= ATA_ALL_DEVICES; |
6025 | ehi->action |= ATA_EH_RESET | ATA_EH_LPM; | 5910 | ehi->action |= ATA_EH_RESET; |
6026 | ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; | 5911 | ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; |
6027 | 5912 | ||
6028 | ap->pflags &= ~ATA_PFLAG_INITIALIZING; | 5913 | ap->pflags &= ~ATA_PFLAG_INITIALIZING; |
@@ -6037,23 +5922,33 @@ static void async_port_probe(void *data, async_cookie_t cookie) | |||
6037 | DPRINTK("ata%u: bus probe begin\n", ap->print_id); | 5922 | DPRINTK("ata%u: bus probe begin\n", ap->print_id); |
6038 | rc = ata_bus_probe(ap); | 5923 | rc = ata_bus_probe(ap); |
6039 | DPRINTK("ata%u: bus probe end\n", ap->print_id); | 5924 | DPRINTK("ata%u: bus probe end\n", ap->print_id); |
6040 | |||
6041 | if (rc) { | ||
6042 | /* FIXME: do something useful here? | ||
6043 | * Current libata behavior will | ||
6044 | * tear down everything when | ||
6045 | * the module is removed | ||
6046 | * or the h/w is unplugged. | ||
6047 | */ | ||
6048 | } | ||
6049 | } | 5925 | } |
5926 | return rc; | ||
5927 | } | ||
5928 | |||
5929 | |||
5930 | static void async_port_probe(void *data, async_cookie_t cookie) | ||
5931 | { | ||
5932 | struct ata_port *ap = data; | ||
5933 | |||
5934 | /* | ||
5935 | * If we're not allowed to scan this host in parallel, | ||
5936 | * we need to wait until all previous scans have completed | ||
5937 | * before going further. | ||
5938 | * Jeff Garzik says this is only within a controller, so we | ||
5939 | * don't need to wait for port 0, only for later ports. | ||
5940 | */ | ||
5941 | if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) | ||
5942 | async_synchronize_cookie(cookie); | ||
5943 | |||
5944 | (void)ata_port_probe(ap); | ||
6050 | 5945 | ||
6051 | /* in order to keep device order, we need to synchronize at this point */ | 5946 | /* in order to keep device order, we need to synchronize at this point */ |
6052 | async_synchronize_cookie(cookie); | 5947 | async_synchronize_cookie(cookie); |
6053 | 5948 | ||
6054 | ata_scsi_scan_host(ap, 1); | 5949 | ata_scsi_scan_host(ap, 1); |
6055 | |||
6056 | } | 5950 | } |
5951 | |||
6057 | /** | 5952 | /** |
6058 | * ata_host_register - register initialized ATA host | 5953 | * ata_host_register - register initialized ATA host |
6059 | * @host: ATA host to register | 5954 | * @host: ATA host to register |
@@ -6093,9 +5988,18 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) | |||
6093 | for (i = 0; i < host->n_ports; i++) | 5988 | for (i = 0; i < host->n_ports; i++) |
6094 | host->ports[i]->print_id = ata_print_id++; | 5989 | host->ports[i]->print_id = ata_print_id++; |
6095 | 5990 | ||
5991 | |||
5992 | /* Create associated sysfs transport objects */ | ||
5993 | for (i = 0; i < host->n_ports; i++) { | ||
5994 | rc = ata_tport_add(host->dev,host->ports[i]); | ||
5995 | if (rc) { | ||
5996 | goto err_tadd; | ||
5997 | } | ||
5998 | } | ||
5999 | |||
6096 | rc = ata_scsi_add_hosts(host, sht); | 6000 | rc = ata_scsi_add_hosts(host, sht); |
6097 | if (rc) | 6001 | if (rc) |
6098 | return rc; | 6002 | goto err_tadd; |
6099 | 6003 | ||
6100 | /* associate with ACPI nodes */ | 6004 | /* associate with ACPI nodes */ |
6101 | ata_acpi_associate(host); | 6005 | ata_acpi_associate(host); |
@@ -6136,6 +6040,13 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) | |||
6136 | } | 6040 | } |
6137 | 6041 | ||
6138 | return 0; | 6042 | return 0; |
6043 | |||
6044 | err_tadd: | ||
6045 | while (--i >= 0) { | ||
6046 | ata_tport_delete(host->ports[i]); | ||
6047 | } | ||
6048 | return rc; | ||
6049 | |||
6139 | } | 6050 | } |
6140 | 6051 | ||
6141 | /** | 6052 | /** |
@@ -6223,9 +6134,16 @@ static void ata_port_detach(struct ata_port *ap) | |||
6223 | /* it better be dead now */ | 6134 | /* it better be dead now */ |
6224 | WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); | 6135 | WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); |
6225 | 6136 | ||
6226 | cancel_rearming_delayed_work(&ap->hotplug_task); | 6137 | cancel_delayed_work_sync(&ap->hotplug_task); |
6227 | 6138 | ||
6228 | skip_eh: | 6139 | skip_eh: |
6140 | if (ap->pmp_link) { | ||
6141 | int i; | ||
6142 | for (i = 0; i < SATA_PMP_MAX_PORTS; i++) | ||
6143 | ata_tlink_delete(&ap->pmp_link[i]); | ||
6144 | } | ||
6145 | ata_tport_delete(ap); | ||
6146 | |||
6229 | /* remove the associated SCSI host */ | 6147 | /* remove the associated SCSI host */ |
6230 | scsi_remove_host(ap->scsi_host); | 6148 | scsi_remove_host(ap->scsi_host); |
6231 | } | 6149 | } |
@@ -6542,7 +6460,7 @@ static void __init ata_parse_force_param(void) | |||
6542 | 6460 | ||
6543 | static int __init ata_init(void) | 6461 | static int __init ata_init(void) |
6544 | { | 6462 | { |
6545 | int rc = -ENOMEM; | 6463 | int rc; |
6546 | 6464 | ||
6547 | ata_parse_force_param(); | 6465 | ata_parse_force_param(); |
6548 | 6466 | ||
@@ -6552,12 +6470,25 @@ static int __init ata_init(void) | |||
6552 | return rc; | 6470 | return rc; |
6553 | } | 6471 | } |
6554 | 6472 | ||
6473 | libata_transport_init(); | ||
6474 | ata_scsi_transport_template = ata_attach_transport(); | ||
6475 | if (!ata_scsi_transport_template) { | ||
6476 | ata_sff_exit(); | ||
6477 | rc = -ENOMEM; | ||
6478 | goto err_out; | ||
6479 | } | ||
6480 | |||
6555 | printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); | 6481 | printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); |
6556 | return 0; | 6482 | return 0; |
6483 | |||
6484 | err_out: | ||
6485 | return rc; | ||
6557 | } | 6486 | } |
6558 | 6487 | ||
6559 | static void __exit ata_exit(void) | 6488 | static void __exit ata_exit(void) |
6560 | { | 6489 | { |
6490 | ata_release_transport(ata_scsi_transport_template); | ||
6491 | libata_transport_exit(); | ||
6561 | ata_sff_exit(); | 6492 | ata_sff_exit(); |
6562 | kfree(ata_force_tbl); | 6493 | kfree(ata_force_tbl); |
6563 | } | 6494 | } |
@@ -6573,7 +6504,35 @@ int ata_ratelimit(void) | |||
6573 | } | 6504 | } |
6574 | 6505 | ||
6575 | /** | 6506 | /** |
6507 | * ata_msleep - ATA EH owner aware msleep | ||
6508 | * @ap: ATA port to attribute the sleep to | ||
6509 | * @msecs: duration to sleep in milliseconds | ||
6510 | * | ||
6511 | * Sleeps @msecs. If the current task is owner of @ap's EH, the | ||
6512 | * ownership is released before going to sleep and reacquired | ||
6513 | * after the sleep is complete. IOW, other ports sharing the | ||
6514 | * @ap->host will be allowed to own the EH while this task is | ||
6515 | * sleeping. | ||
6516 | * | ||
6517 | * LOCKING: | ||
6518 | * Might sleep. | ||
6519 | */ | ||
6520 | void ata_msleep(struct ata_port *ap, unsigned int msecs) | ||
6521 | { | ||
6522 | bool owns_eh = ap && ap->host->eh_owner == current; | ||
6523 | |||
6524 | if (owns_eh) | ||
6525 | ata_eh_release(ap); | ||
6526 | |||
6527 | msleep(msecs); | ||
6528 | |||
6529 | if (owns_eh) | ||
6530 | ata_eh_acquire(ap); | ||
6531 | } | ||
6532 | |||
6533 | /** | ||
6576 | * ata_wait_register - wait until register value changes | 6534 | * ata_wait_register - wait until register value changes |
6535 | * @ap: ATA port to wait register for, can be NULL | ||
6577 | * @reg: IO-mapped register | 6536 | * @reg: IO-mapped register |
6578 | * @mask: Mask to apply to read register value | 6537 | * @mask: Mask to apply to read register value |
6579 | * @val: Wait condition | 6538 | * @val: Wait condition |
@@ -6595,7 +6554,7 @@ int ata_ratelimit(void) | |||
6595 | * RETURNS: | 6554 | * RETURNS: |
6596 | * The final register value. | 6555 | * The final register value. |
6597 | */ | 6556 | */ |
6598 | u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, | 6557 | u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, |
6599 | unsigned long interval, unsigned long timeout) | 6558 | unsigned long interval, unsigned long timeout) |
6600 | { | 6559 | { |
6601 | unsigned long deadline; | 6560 | unsigned long deadline; |
@@ -6610,7 +6569,7 @@ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, | |||
6610 | deadline = ata_deadline(jiffies, timeout); | 6569 | deadline = ata_deadline(jiffies, timeout); |
6611 | 6570 | ||
6612 | while ((tmp & mask) == val && time_before(jiffies, deadline)) { | 6571 | while ((tmp & mask) == val && time_before(jiffies, deadline)) { |
6613 | msleep(interval); | 6572 | ata_msleep(ap, interval); |
6614 | tmp = ioread32(reg); | 6573 | tmp = ioread32(reg); |
6615 | } | 6574 | } |
6616 | 6575 | ||
@@ -6686,6 +6645,7 @@ EXPORT_SYMBOL_GPL(sata_set_spd); | |||
6686 | EXPORT_SYMBOL_GPL(ata_wait_after_reset); | 6645 | EXPORT_SYMBOL_GPL(ata_wait_after_reset); |
6687 | EXPORT_SYMBOL_GPL(sata_link_debounce); | 6646 | EXPORT_SYMBOL_GPL(sata_link_debounce); |
6688 | EXPORT_SYMBOL_GPL(sata_link_resume); | 6647 | EXPORT_SYMBOL_GPL(sata_link_resume); |
6648 | EXPORT_SYMBOL_GPL(sata_link_scr_lpm); | ||
6689 | EXPORT_SYMBOL_GPL(ata_std_prereset); | 6649 | EXPORT_SYMBOL_GPL(ata_std_prereset); |
6690 | EXPORT_SYMBOL_GPL(sata_link_hardreset); | 6650 | EXPORT_SYMBOL_GPL(sata_link_hardreset); |
6691 | EXPORT_SYMBOL_GPL(sata_std_hardreset); | 6651 | EXPORT_SYMBOL_GPL(sata_std_hardreset); |
@@ -6693,6 +6653,7 @@ EXPORT_SYMBOL_GPL(ata_std_postreset); | |||
6693 | EXPORT_SYMBOL_GPL(ata_dev_classify); | 6653 | EXPORT_SYMBOL_GPL(ata_dev_classify); |
6694 | EXPORT_SYMBOL_GPL(ata_dev_pair); | 6654 | EXPORT_SYMBOL_GPL(ata_dev_pair); |
6695 | EXPORT_SYMBOL_GPL(ata_ratelimit); | 6655 | EXPORT_SYMBOL_GPL(ata_ratelimit); |
6656 | EXPORT_SYMBOL_GPL(ata_msleep); | ||
6696 | EXPORT_SYMBOL_GPL(ata_wait_register); | 6657 | EXPORT_SYMBOL_GPL(ata_wait_register); |
6697 | EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); | 6658 | EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); |
6698 | EXPORT_SYMBOL_GPL(ata_scsi_slave_config); | 6659 | EXPORT_SYMBOL_GPL(ata_scsi_slave_config); |