aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/libata-core.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 18:58:44 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 18:58:44 -0400
commit6edad161cd4dfe1df772e7a74ab63cab53b5e8c1 (patch)
tree389d6daa728b2ba1bd8c2180cab705706449f62a /drivers/scsi/libata-core.c
parent236ee8c33277ab48671995f26dc68a4639936418 (diff)
parent0dd4b21f517e138ea113db255645fbae1bf5eef3 (diff)
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev: (258 commits) [libata] conversion to new debug scheme, part 1 of $N [PATCH] libata: Add ata_scsi_dev_disabled [libata] Add host lock to struct ata_port [PATCH] libata: implement per-dev EH action mask eh_info->dev_action[] [PATCH] libata-dev: move the CDB-intr DMA blacklisting [PATCH] ahci: disable NCQ support on vt8251 [libata] ahci: add JMicron PCI IDs [libata] sata_nv: add PCI IDs [libata] ahci: Add NVIDIA PCI IDs. [PATCH] libata: convert several bmdma-style controllers to new EH, take #3 [PATCH] sata_via: convert to new EH, take #3 [libata] sata_nv: s/spin_lock_irqsave/spin_lock/ in irq handler [PATCH] sata_nv: add hotplug support [PATCH] sata_nv: convert to new EH [PATCH] sata_nv: better irq handlers [PATCH] sata_nv: simplify constants [PATCH] sata_nv: kill struct nv_host_desc and nv_host [PATCH] sata_nv: kill not-working hotplug code [libata] Update docs to reflect current driver API [PATCH] libata: add host_set->next for legacy two host_sets case, take #3 ...
Diffstat (limited to 'drivers/scsi/libata-core.c')
-rw-r--r--drivers/scsi/libata-core.c3042
1 files changed, 1988 insertions, 1054 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index de9ba7890b5a..6c66877be2bf 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -61,22 +61,29 @@
61 61
62#include "libata.h" 62#include "libata.h"
63 63
64static unsigned int ata_dev_init_params(struct ata_port *ap, 64/* debounce timing parameters in msecs { interval, duration, timeout } */
65 struct ata_device *dev, 65const unsigned long sata_deb_timing_boot[] = { 5, 100, 2000 };
66 u16 heads, 66const unsigned long sata_deb_timing_eh[] = { 25, 500, 2000 };
67 u16 sectors); 67const unsigned long sata_deb_timing_before_fsrst[] = { 100, 2000, 5000 };
68static void ata_set_mode(struct ata_port *ap); 68
69static unsigned int ata_dev_set_xfermode(struct ata_port *ap, 69static unsigned int ata_dev_init_params(struct ata_device *dev,
70 struct ata_device *dev); 70 u16 heads, u16 sectors);
71static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev); 71static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
72static void ata_dev_xfermask(struct ata_device *dev);
72 73
73static unsigned int ata_unique_id = 1; 74static unsigned int ata_unique_id = 1;
74static struct workqueue_struct *ata_wq; 75static struct workqueue_struct *ata_wq;
75 76
77struct workqueue_struct *ata_aux_wq;
78
76int atapi_enabled = 1; 79int atapi_enabled = 1;
77module_param(atapi_enabled, int, 0444); 80module_param(atapi_enabled, int, 0444);
78MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); 81MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
79 82
83int atapi_dmadir = 0;
84module_param(atapi_dmadir, int, 0444);
85MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
86
80int libata_fua = 0; 87int libata_fua = 0;
81module_param_named(fua, libata_fua, int, 0444); 88module_param_named(fua, libata_fua, int, 0444);
82MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); 89MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
@@ -397,11 +404,22 @@ static const char *ata_mode_string(unsigned int xfer_mask)
397 return "<n/a>"; 404 return "<n/a>";
398} 405}
399 406
400static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev) 407static const char *sata_spd_string(unsigned int spd)
401{ 408{
402 if (ata_dev_present(dev)) { 409 static const char * const spd_str[] = {
403 printk(KERN_WARNING "ata%u: dev %u disabled\n", 410 "1.5 Gbps",
404 ap->id, dev->devno); 411 "3.0 Gbps",
412 };
413
414 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
415 return "<unknown>";
416 return spd_str[spd - 1];
417}
418
419void ata_dev_disable(struct ata_device *dev)
420{
421 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
422 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
405 dev->class++; 423 dev->class++;
406 } 424 }
407} 425}
@@ -759,8 +777,11 @@ void ata_std_dev_select (struct ata_port *ap, unsigned int device)
759void ata_dev_select(struct ata_port *ap, unsigned int device, 777void ata_dev_select(struct ata_port *ap, unsigned int device,
760 unsigned int wait, unsigned int can_sleep) 778 unsigned int wait, unsigned int can_sleep)
761{ 779{
762 VPRINTK("ENTER, ata%u: device %u, wait %u\n", 780 if (ata_msg_probe(ap)) {
763 ap->id, device, wait); 781 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
782 "device %u, wait %u\n",
783 ap->id, device, wait);
784 }
764 785
765 if (wait) 786 if (wait)
766 ata_wait_idle(ap); 787 ata_wait_idle(ap);
@@ -915,9 +936,9 @@ void ata_port_flush_task(struct ata_port *ap)
915 936
916 DPRINTK("ENTER\n"); 937 DPRINTK("ENTER\n");
917 938
918 spin_lock_irqsave(&ap->host_set->lock, flags); 939 spin_lock_irqsave(ap->lock, flags);
919 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK; 940 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
920 spin_unlock_irqrestore(&ap->host_set->lock, flags); 941 spin_unlock_irqrestore(ap->lock, flags);
921 942
922 DPRINTK("flush #1\n"); 943 DPRINTK("flush #1\n");
923 flush_workqueue(ata_wq); 944 flush_workqueue(ata_wq);
@@ -928,30 +949,31 @@ void ata_port_flush_task(struct ata_port *ap)
928 * Cancel and flush. 949 * Cancel and flush.
929 */ 950 */
930 if (!cancel_delayed_work(&ap->port_task)) { 951 if (!cancel_delayed_work(&ap->port_task)) {
931 DPRINTK("flush #2\n"); 952 if (ata_msg_ctl(ap))
953 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", __FUNCTION__);
932 flush_workqueue(ata_wq); 954 flush_workqueue(ata_wq);
933 } 955 }
934 956
935 spin_lock_irqsave(&ap->host_set->lock, flags); 957 spin_lock_irqsave(ap->lock, flags);
936 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK; 958 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
937 spin_unlock_irqrestore(&ap->host_set->lock, flags); 959 spin_unlock_irqrestore(ap->lock, flags);
938 960
939 DPRINTK("EXIT\n"); 961 if (ata_msg_ctl(ap))
962 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
940} 963}
941 964
942void ata_qc_complete_internal(struct ata_queued_cmd *qc) 965void ata_qc_complete_internal(struct ata_queued_cmd *qc)
943{ 966{
944 struct completion *waiting = qc->private_data; 967 struct completion *waiting = qc->private_data;
945 968
946 qc->ap->ops->tf_read(qc->ap, &qc->tf);
947 complete(waiting); 969 complete(waiting);
948} 970}
949 971
950/** 972/**
951 * ata_exec_internal - execute libata internal command 973 * ata_exec_internal - execute libata internal command
952 * @ap: Port to which the command is sent
953 * @dev: Device to which the command is sent 974 * @dev: Device to which the command is sent
954 * @tf: Taskfile registers for the command and the result 975 * @tf: Taskfile registers for the command and the result
976 * @cdb: CDB for packet command
955 * @dma_dir: Data tranfer direction of the command 977 * @dma_dir: Data tranfer direction of the command
956 * @buf: Data buffer of the command 978 * @buf: Data buffer of the command
957 * @buflen: Length of data buffer 979 * @buflen: Length of data buffer
@@ -964,25 +986,66 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc)
964 * 986 *
965 * LOCKING: 987 * LOCKING:
966 * None. Should be called with kernel context, might sleep. 988 * None. Should be called with kernel context, might sleep.
989 *
990 * RETURNS:
991 * Zero on success, AC_ERR_* mask on failure
967 */ 992 */
968 993unsigned ata_exec_internal(struct ata_device *dev,
969static unsigned 994 struct ata_taskfile *tf, const u8 *cdb,
970ata_exec_internal(struct ata_port *ap, struct ata_device *dev, 995 int dma_dir, void *buf, unsigned int buflen)
971 struct ata_taskfile *tf,
972 int dma_dir, void *buf, unsigned int buflen)
973{ 996{
997 struct ata_port *ap = dev->ap;
974 u8 command = tf->command; 998 u8 command = tf->command;
975 struct ata_queued_cmd *qc; 999 struct ata_queued_cmd *qc;
1000 unsigned int tag, preempted_tag;
1001 u32 preempted_sactive, preempted_qc_active;
976 DECLARE_COMPLETION(wait); 1002 DECLARE_COMPLETION(wait);
977 unsigned long flags; 1003 unsigned long flags;
978 unsigned int err_mask; 1004 unsigned int err_mask;
1005 int rc;
1006
1007 spin_lock_irqsave(ap->lock, flags);
1008
1009 /* no internal command while frozen */
1010 if (ap->flags & ATA_FLAG_FROZEN) {
1011 spin_unlock_irqrestore(ap->lock, flags);
1012 return AC_ERR_SYSTEM;
1013 }
979 1014
980 spin_lock_irqsave(&ap->host_set->lock, flags); 1015 /* initialize internal qc */
981 1016
982 qc = ata_qc_new_init(ap, dev); 1017 /* XXX: Tag 0 is used for drivers with legacy EH as some
983 BUG_ON(qc == NULL); 1018 * drivers choke if any other tag is given. This breaks
1019 * ata_tag_internal() test for those drivers. Don't use new
1020 * EH stuff without converting to it.
1021 */
1022 if (ap->ops->error_handler)
1023 tag = ATA_TAG_INTERNAL;
1024 else
1025 tag = 0;
1026
1027 if (test_and_set_bit(tag, &ap->qc_allocated))
1028 BUG();
1029 qc = __ata_qc_from_tag(ap, tag);
984 1030
1031 qc->tag = tag;
1032 qc->scsicmd = NULL;
1033 qc->ap = ap;
1034 qc->dev = dev;
1035 ata_qc_reinit(qc);
1036
1037 preempted_tag = ap->active_tag;
1038 preempted_sactive = ap->sactive;
1039 preempted_qc_active = ap->qc_active;
1040 ap->active_tag = ATA_TAG_POISON;
1041 ap->sactive = 0;
1042 ap->qc_active = 0;
1043
1044 /* prepare & issue qc */
985 qc->tf = *tf; 1045 qc->tf = *tf;
1046 if (cdb)
1047 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1048 qc->flags |= ATA_QCFLAG_RESULT_TF;
986 qc->dma_dir = dma_dir; 1049 qc->dma_dir = dma_dir;
987 if (dma_dir != DMA_NONE) { 1050 if (dma_dir != DMA_NONE) {
988 ata_sg_init_one(qc, buf, buflen); 1051 ata_sg_init_one(qc, buf, buflen);
@@ -994,33 +1057,58 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
994 1057
995 ata_qc_issue(qc); 1058 ata_qc_issue(qc);
996 1059
997 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1060 spin_unlock_irqrestore(ap->lock, flags);
1061
1062 rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL);
998 1063
999 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) { 1064 ata_port_flush_task(ap);
1000 ata_port_flush_task(ap);
1001 1065
1002 spin_lock_irqsave(&ap->host_set->lock, flags); 1066 if (!rc) {
1067 spin_lock_irqsave(ap->lock, flags);
1003 1068
1004 /* We're racing with irq here. If we lose, the 1069 /* We're racing with irq here. If we lose, the
1005 * following test prevents us from completing the qc 1070 * following test prevents us from completing the qc
1006 * again. If completion irq occurs after here but 1071 * twice. If we win, the port is frozen and will be
1007 * before the caller cleans up, it will result in a 1072 * cleaned up by ->post_internal_cmd().
1008 * spurious interrupt. We can live with that.
1009 */ 1073 */
1010 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1074 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1011 qc->err_mask = AC_ERR_TIMEOUT; 1075 qc->err_mask |= AC_ERR_TIMEOUT;
1012 ata_qc_complete(qc); 1076
1013 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n", 1077 if (ap->ops->error_handler)
1014 ap->id, command); 1078 ata_port_freeze(ap);
1079 else
1080 ata_qc_complete(qc);
1081
1082 if (ata_msg_warn(ap))
1083 ata_dev_printk(dev, KERN_WARNING,
1084 "qc timeout (cmd 0x%x)\n", command);
1015 } 1085 }
1016 1086
1017 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1087 spin_unlock_irqrestore(ap->lock, flags);
1018 } 1088 }
1019 1089
1020 *tf = qc->tf; 1090 /* do post_internal_cmd */
1091 if (ap->ops->post_internal_cmd)
1092 ap->ops->post_internal_cmd(qc);
1093
1094 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1095 if (ata_msg_warn(ap))
1096 ata_dev_printk(dev, KERN_WARNING,
1097 "zero err_mask for failed "
1098 "internal command, assuming AC_ERR_OTHER\n");
1099 qc->err_mask |= AC_ERR_OTHER;
1100 }
1101
1102 /* finish up */
1103 spin_lock_irqsave(ap->lock, flags);
1104
1105 *tf = qc->result_tf;
1021 err_mask = qc->err_mask; 1106 err_mask = qc->err_mask;
1022 1107
1023 ata_qc_free(qc); 1108 ata_qc_free(qc);
1109 ap->active_tag = preempted_tag;
1110 ap->sactive = preempted_sactive;
1111 ap->qc_active = preempted_qc_active;
1024 1112
1025 /* XXX - Some LLDDs (sata_mv) disable port on command failure. 1113 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1026 * Until those drivers are fixed, we detect the condition 1114 * Until those drivers are fixed, we detect the condition
@@ -1033,11 +1121,13 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1033 * 1121 *
1034 * Kill the following code as soon as those drivers are fixed. 1122 * Kill the following code as soon as those drivers are fixed.
1035 */ 1123 */
1036 if (ap->flags & ATA_FLAG_PORT_DISABLED) { 1124 if (ap->flags & ATA_FLAG_DISABLED) {
1037 err_mask |= AC_ERR_SYSTEM; 1125 err_mask |= AC_ERR_SYSTEM;
1038 ata_port_probe(ap); 1126 ata_port_probe(ap);
1039 } 1127 }
1040 1128
1129 spin_unlock_irqrestore(ap->lock, flags);
1130
1041 return err_mask; 1131 return err_mask;
1042} 1132}
1043 1133
@@ -1076,11 +1166,10 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1076 1166
1077/** 1167/**
1078 * ata_dev_read_id - Read ID data from the specified device 1168 * ata_dev_read_id - Read ID data from the specified device
1079 * @ap: port on which target device resides
1080 * @dev: target device 1169 * @dev: target device
1081 * @p_class: pointer to class of the target device (may be changed) 1170 * @p_class: pointer to class of the target device (may be changed)
1082 * @post_reset: is this read ID post-reset? 1171 * @post_reset: is this read ID post-reset?
1083 * @p_id: read IDENTIFY page (newly allocated) 1172 * @id: buffer to read IDENTIFY data into
1084 * 1173 *
1085 * Read ID data from the specified device. ATA_CMD_ID_ATA is 1174 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1086 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 1175 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
@@ -1093,29 +1182,24 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1093 * RETURNS: 1182 * RETURNS:
1094 * 0 on success, -errno otherwise. 1183 * 0 on success, -errno otherwise.
1095 */ 1184 */
1096static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev, 1185int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1097 unsigned int *p_class, int post_reset, u16 **p_id) 1186 int post_reset, u16 *id)
1098{ 1187{
1188 struct ata_port *ap = dev->ap;
1099 unsigned int class = *p_class; 1189 unsigned int class = *p_class;
1100 struct ata_taskfile tf; 1190 struct ata_taskfile tf;
1101 unsigned int err_mask = 0; 1191 unsigned int err_mask = 0;
1102 u16 *id;
1103 const char *reason; 1192 const char *reason;
1104 int rc; 1193 int rc;
1105 1194
1106 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno); 1195 if (ata_msg_ctl(ap))
1196 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1197 __FUNCTION__, ap->id, dev->devno);
1107 1198
1108 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ 1199 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1109 1200
1110 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1111 if (id == NULL) {
1112 rc = -ENOMEM;
1113 reason = "out of memory";
1114 goto err_out;
1115 }
1116
1117 retry: 1201 retry:
1118 ata_tf_init(ap, &tf, dev->devno); 1202 ata_tf_init(dev, &tf);
1119 1203
1120 switch (class) { 1204 switch (class) {
1121 case ATA_DEV_ATA: 1205 case ATA_DEV_ATA:
@@ -1132,7 +1216,7 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1132 1216
1133 tf.protocol = ATA_PROT_PIO; 1217 tf.protocol = ATA_PROT_PIO;
1134 1218
1135 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE, 1219 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1136 id, sizeof(id[0]) * ATA_ID_WORDS); 1220 id, sizeof(id[0]) * ATA_ID_WORDS);
1137 if (err_mask) { 1221 if (err_mask) {
1138 rc = -EIO; 1222 rc = -EIO;
@@ -1159,7 +1243,7 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1159 * Some drives were very specific about that exact sequence. 1243 * Some drives were very specific about that exact sequence.
1160 */ 1244 */
1161 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 1245 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1162 err_mask = ata_dev_init_params(ap, dev, id[3], id[6]); 1246 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1163 if (err_mask) { 1247 if (err_mask) {
1164 rc = -EIO; 1248 rc = -EIO;
1165 reason = "INIT_DEV_PARAMS failed"; 1249 reason = "INIT_DEV_PARAMS failed";
@@ -1175,25 +1259,45 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1175 } 1259 }
1176 1260
1177 *p_class = class; 1261 *p_class = class;
1178 *p_id = id; 1262
1179 return 0; 1263 return 0;
1180 1264
1181 err_out: 1265 err_out:
1182 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n", 1266 if (ata_msg_warn(ap))
1183 ap->id, dev->devno, reason); 1267 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1184 kfree(id); 1268 "(%s, err_mask=0x%x)\n", reason, err_mask);
1185 return rc; 1269 return rc;
1186} 1270}
1187 1271
1188static inline u8 ata_dev_knobble(const struct ata_port *ap, 1272static inline u8 ata_dev_knobble(struct ata_device *dev)
1189 struct ata_device *dev)
1190{ 1273{
1191 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 1274 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1275}
1276
1277static void ata_dev_config_ncq(struct ata_device *dev,
1278 char *desc, size_t desc_sz)
1279{
1280 struct ata_port *ap = dev->ap;
1281 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1282
1283 if (!ata_id_has_ncq(dev->id)) {
1284 desc[0] = '\0';
1285 return;
1286 }
1287
1288 if (ap->flags & ATA_FLAG_NCQ) {
1289 hdepth = min(ap->host->can_queue, ATA_MAX_QUEUE - 1);
1290 dev->flags |= ATA_DFLAG_NCQ;
1291 }
1292
1293 if (hdepth >= ddepth)
1294 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1295 else
1296 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1192} 1297}
1193 1298
1194/** 1299/**
1195 * ata_dev_configure - Configure the specified ATA/ATAPI device 1300 * ata_dev_configure - Configure the specified ATA/ATAPI device
1196 * @ap: Port on which target device resides
1197 * @dev: Target device to configure 1301 * @dev: Target device to configure
1198 * @print_info: Enable device info printout 1302 * @print_info: Enable device info printout
1199 * 1303 *
@@ -1206,30 +1310,33 @@ static inline u8 ata_dev_knobble(const struct ata_port *ap,
1206 * RETURNS: 1310 * RETURNS:
1207 * 0 on success, -errno otherwise 1311 * 0 on success, -errno otherwise
1208 */ 1312 */
1209static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, 1313int ata_dev_configure(struct ata_device *dev, int print_info)
1210 int print_info)
1211{ 1314{
1315 struct ata_port *ap = dev->ap;
1212 const u16 *id = dev->id; 1316 const u16 *id = dev->id;
1213 unsigned int xfer_mask; 1317 unsigned int xfer_mask;
1214 int i, rc; 1318 int i, rc;
1215 1319
1216 if (!ata_dev_present(dev)) { 1320 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1217 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n", 1321 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1218 ap->id, dev->devno); 1322 __FUNCTION__, ap->id, dev->devno);
1219 return 0; 1323 return 0;
1220 } 1324 }
1221 1325
1222 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno); 1326 if (ata_msg_probe(ap))
1327 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1328 __FUNCTION__, ap->id, dev->devno);
1223 1329
1224 /* print device capabilities */ 1330 /* print device capabilities */
1225 if (print_info) 1331 if (ata_msg_probe(ap))
1226 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x " 1332 ata_dev_printk(dev, KERN_DEBUG, "%s: cfg 49:%04x 82:%04x 83:%04x "
1227 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n", 1333 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1228 ap->id, dev->devno, id[49], id[82], id[83], 1334 __FUNCTION__,
1229 id[84], id[85], id[86], id[87], id[88]); 1335 id[49], id[82], id[83], id[84],
1336 id[85], id[86], id[87], id[88]);
1230 1337
1231 /* initialize to-be-configured parameters */ 1338 /* initialize to-be-configured parameters */
1232 dev->flags = 0; 1339 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1233 dev->max_sectors = 0; 1340 dev->max_sectors = 0;
1234 dev->cdb_len = 0; 1341 dev->cdb_len = 0;
1235 dev->n_sectors = 0; 1342 dev->n_sectors = 0;
@@ -1244,7 +1351,8 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1244 /* find max transfer mode; for printk only */ 1351 /* find max transfer mode; for printk only */
1245 xfer_mask = ata_id_xfermask(id); 1352 xfer_mask = ata_id_xfermask(id);
1246 1353
1247 ata_dump_id(id); 1354 if (ata_msg_probe(ap))
1355 ata_dump_id(id);
1248 1356
1249 /* ATA-specific feature tests */ 1357 /* ATA-specific feature tests */
1250 if (dev->class == ATA_DEV_ATA) { 1358 if (dev->class == ATA_DEV_ATA) {
@@ -1252,6 +1360,7 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1252 1360
1253 if (ata_id_has_lba(id)) { 1361 if (ata_id_has_lba(id)) {
1254 const char *lba_desc; 1362 const char *lba_desc;
1363 char ncq_desc[20];
1255 1364
1256 lba_desc = "LBA"; 1365 lba_desc = "LBA";
1257 dev->flags |= ATA_DFLAG_LBA; 1366 dev->flags |= ATA_DFLAG_LBA;
@@ -1260,15 +1369,17 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1260 lba_desc = "LBA48"; 1369 lba_desc = "LBA48";
1261 } 1370 }
1262 1371
1372 /* config NCQ */
1373 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1374
1263 /* print device info to dmesg */ 1375 /* print device info to dmesg */
1264 if (print_info) 1376 if (ata_msg_info(ap))
1265 printk(KERN_INFO "ata%u: dev %u ATA-%d, " 1377 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1266 "max %s, %Lu sectors: %s\n", 1378 "max %s, %Lu sectors: %s %s\n",
1267 ap->id, dev->devno, 1379 ata_id_major_version(id),
1268 ata_id_major_version(id), 1380 ata_mode_string(xfer_mask),
1269 ata_mode_string(xfer_mask), 1381 (unsigned long long)dev->n_sectors,
1270 (unsigned long long)dev->n_sectors, 1382 lba_desc, ncq_desc);
1271 lba_desc);
1272 } else { 1383 } else {
1273 /* CHS */ 1384 /* CHS */
1274 1385
@@ -1285,14 +1396,20 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1285 } 1396 }
1286 1397
1287 /* print device info to dmesg */ 1398 /* print device info to dmesg */
1288 if (print_info) 1399 if (ata_msg_info(ap))
1289 printk(KERN_INFO "ata%u: dev %u ATA-%d, " 1400 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1290 "max %s, %Lu sectors: CHS %u/%u/%u\n", 1401 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1291 ap->id, dev->devno, 1402 ata_id_major_version(id),
1292 ata_id_major_version(id), 1403 ata_mode_string(xfer_mask),
1293 ata_mode_string(xfer_mask), 1404 (unsigned long long)dev->n_sectors,
1294 (unsigned long long)dev->n_sectors, 1405 dev->cylinders, dev->heads, dev->sectors);
1295 dev->cylinders, dev->heads, dev->sectors); 1406 }
1407
1408 if (dev->id[59] & 0x100) {
1409 dev->multi_count = dev->id[59] & 0xff;
1410 if (ata_msg_info(ap))
1411 ata_dev_printk(dev, KERN_INFO, "ata%u: dev %u multi count %u\n",
1412 ap->id, dev->devno, dev->multi_count);
1296 } 1413 }
1297 1414
1298 dev->cdb_len = 16; 1415 dev->cdb_len = 16;
@@ -1300,18 +1417,28 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1300 1417
1301 /* ATAPI-specific feature tests */ 1418 /* ATAPI-specific feature tests */
1302 else if (dev->class == ATA_DEV_ATAPI) { 1419 else if (dev->class == ATA_DEV_ATAPI) {
1420 char *cdb_intr_string = "";
1421
1303 rc = atapi_cdb_len(id); 1422 rc = atapi_cdb_len(id);
1304 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 1423 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1305 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id); 1424 if (ata_msg_warn(ap))
1425 ata_dev_printk(dev, KERN_WARNING,
1426 "unsupported CDB len\n");
1306 rc = -EINVAL; 1427 rc = -EINVAL;
1307 goto err_out_nosup; 1428 goto err_out_nosup;
1308 } 1429 }
1309 dev->cdb_len = (unsigned int) rc; 1430 dev->cdb_len = (unsigned int) rc;
1310 1431
1432 if (ata_id_cdb_intr(dev->id)) {
1433 dev->flags |= ATA_DFLAG_CDB_INTR;
1434 cdb_intr_string = ", CDB intr";
1435 }
1436
1311 /* print device info to dmesg */ 1437 /* print device info to dmesg */
1312 if (print_info) 1438 if (ata_msg_info(ap))
1313 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", 1439 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1314 ap->id, dev->devno, ata_mode_string(xfer_mask)); 1440 ata_mode_string(xfer_mask),
1441 cdb_intr_string);
1315 } 1442 }
1316 1443
1317 ap->host->max_cmd_len = 0; 1444 ap->host->max_cmd_len = 0;
@@ -1321,10 +1448,10 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1321 ap->device[i].cdb_len); 1448 ap->device[i].cdb_len);
1322 1449
1323 /* limit bridge transfers to udma5, 200 sectors */ 1450 /* limit bridge transfers to udma5, 200 sectors */
1324 if (ata_dev_knobble(ap, dev)) { 1451 if (ata_dev_knobble(dev)) {
1325 if (print_info) 1452 if (ata_msg_info(ap))
1326 printk(KERN_INFO "ata%u(%u): applying bridge limits\n", 1453 ata_dev_printk(dev, KERN_INFO,
1327 ap->id, dev->devno); 1454 "applying bridge limits\n");
1328 dev->udma_mask &= ATA_UDMA5; 1455 dev->udma_mask &= ATA_UDMA5;
1329 dev->max_sectors = ATA_MAX_SECTORS; 1456 dev->max_sectors = ATA_MAX_SECTORS;
1330 } 1457 }
@@ -1332,11 +1459,15 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1332 if (ap->ops->dev_config) 1459 if (ap->ops->dev_config)
1333 ap->ops->dev_config(ap, dev); 1460 ap->ops->dev_config(ap, dev);
1334 1461
1335 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap)); 1462 if (ata_msg_probe(ap))
1463 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1464 __FUNCTION__, ata_chk_status(ap));
1336 return 0; 1465 return 0;
1337 1466
1338err_out_nosup: 1467err_out_nosup:
1339 DPRINTK("EXIT, err\n"); 1468 if (ata_msg_probe(ap))
1469 ata_dev_printk(dev, KERN_DEBUG,
1470 "%s: EXIT, err\n", __FUNCTION__);
1340 return rc; 1471 return rc;
1341} 1472}
1342 1473
@@ -1352,79 +1483,104 @@ err_out_nosup:
1352 * PCI/etc. bus probe sem. 1483 * PCI/etc. bus probe sem.
1353 * 1484 *
1354 * RETURNS: 1485 * RETURNS:
1355 * Zero on success, non-zero on error. 1486 * Zero on success, negative errno otherwise.
1356 */ 1487 */
1357 1488
1358static int ata_bus_probe(struct ata_port *ap) 1489static int ata_bus_probe(struct ata_port *ap)
1359{ 1490{
1360 unsigned int classes[ATA_MAX_DEVICES]; 1491 unsigned int classes[ATA_MAX_DEVICES];
1361 unsigned int i, rc, found = 0; 1492 int tries[ATA_MAX_DEVICES];
1493 int i, rc, down_xfermask;
1494 struct ata_device *dev;
1362 1495
1363 ata_port_probe(ap); 1496 ata_port_probe(ap);
1364 1497
1365 /* reset and determine device classes */
1366 for (i = 0; i < ATA_MAX_DEVICES; i++) 1498 for (i = 0; i < ATA_MAX_DEVICES; i++)
1367 classes[i] = ATA_DEV_UNKNOWN; 1499 tries[i] = ATA_PROBE_MAX_TRIES;
1368 1500
1369 if (ap->ops->probe_reset) { 1501 retry:
1370 rc = ap->ops->probe_reset(ap, classes); 1502 down_xfermask = 0;
1371 if (rc) {
1372 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1373 return rc;
1374 }
1375 } else {
1376 ap->ops->phy_reset(ap);
1377 1503
1378 if (!(ap->flags & ATA_FLAG_PORT_DISABLED)) 1504 /* reset and determine device classes */
1379 for (i = 0; i < ATA_MAX_DEVICES; i++) 1505 ap->ops->phy_reset(ap);
1380 classes[i] = ap->device[i].class;
1381 1506
1382 ata_port_probe(ap); 1507 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1508 dev = &ap->device[i];
1509
1510 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1511 dev->class != ATA_DEV_UNKNOWN)
1512 classes[dev->devno] = dev->class;
1513 else
1514 classes[dev->devno] = ATA_DEV_NONE;
1515
1516 dev->class = ATA_DEV_UNKNOWN;
1383 } 1517 }
1384 1518
1519 ata_port_probe(ap);
1520
1521 /* after the reset the device state is PIO 0 and the controller
1522 state is undefined. Record the mode */
1523
1385 for (i = 0; i < ATA_MAX_DEVICES; i++) 1524 for (i = 0; i < ATA_MAX_DEVICES; i++)
1386 if (classes[i] == ATA_DEV_UNKNOWN) 1525 ap->device[i].pio_mode = XFER_PIO_0;
1387 classes[i] = ATA_DEV_NONE;
1388 1526
1389 /* read IDENTIFY page and configure devices */ 1527 /* read IDENTIFY page and configure devices */
1390 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1528 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1391 struct ata_device *dev = &ap->device[i]; 1529 dev = &ap->device[i];
1392 1530
1393 dev->class = classes[i]; 1531 if (tries[i])
1532 dev->class = classes[i];
1394 1533
1395 if (!ata_dev_present(dev)) 1534 if (!ata_dev_enabled(dev))
1396 continue; 1535 continue;
1397 1536
1398 WARN_ON(dev->id != NULL); 1537 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1399 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) { 1538 if (rc)
1400 dev->class = ATA_DEV_NONE; 1539 goto fail;
1401 continue;
1402 }
1403 1540
1404 if (ata_dev_configure(ap, dev, 1)) { 1541 rc = ata_dev_configure(dev, 1);
1405 ata_dev_disable(ap, dev); 1542 if (rc)
1406 continue; 1543 goto fail;
1407 } 1544 }
1408 1545
1409 found = 1; 1546 /* configure transfer mode */
1547 rc = ata_set_mode(ap, &dev);
1548 if (rc) {
1549 down_xfermask = 1;
1550 goto fail;
1410 } 1551 }
1411 1552
1412 if (!found) 1553 for (i = 0; i < ATA_MAX_DEVICES; i++)
1413 goto err_out_disable; 1554 if (ata_dev_enabled(&ap->device[i]))
1555 return 0;
1414 1556
1415 if (ap->ops->set_mode) 1557 /* no device present, disable port */
1416 ap->ops->set_mode(ap); 1558 ata_port_disable(ap);
1417 else 1559 ap->ops->port_disable(ap);
1418 ata_set_mode(ap); 1560 return -ENODEV;
1419 1561
1420 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1562 fail:
1421 goto err_out_disable; 1563 switch (rc) {
1564 case -EINVAL:
1565 case -ENODEV:
1566 tries[dev->devno] = 0;
1567 break;
1568 case -EIO:
1569 sata_down_spd_limit(ap);
1570 /* fall through */
1571 default:
1572 tries[dev->devno]--;
1573 if (down_xfermask &&
1574 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1575 tries[dev->devno] = 0;
1576 }
1422 1577
1423 return 0; 1578 if (!tries[dev->devno]) {
1579 ata_down_xfermask_limit(dev, 1);
1580 ata_dev_disable(dev);
1581 }
1424 1582
1425err_out_disable: 1583 goto retry;
1426 ap->ops->port_disable(ap);
1427 return -1;
1428} 1584}
1429 1585
1430/** 1586/**
@@ -1440,7 +1596,7 @@ err_out_disable:
1440 1596
1441void ata_port_probe(struct ata_port *ap) 1597void ata_port_probe(struct ata_port *ap)
1442{ 1598{
1443 ap->flags &= ~ATA_FLAG_PORT_DISABLED; 1599 ap->flags &= ~ATA_FLAG_DISABLED;
1444} 1600}
1445 1601
1446/** 1602/**
@@ -1454,27 +1610,21 @@ void ata_port_probe(struct ata_port *ap)
1454 */ 1610 */
1455static void sata_print_link_status(struct ata_port *ap) 1611static void sata_print_link_status(struct ata_port *ap)
1456{ 1612{
1457 u32 sstatus, tmp; 1613 u32 sstatus, scontrol, tmp;
1458 const char *speed;
1459 1614
1460 if (!ap->ops->scr_read) 1615 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1461 return; 1616 return;
1617 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1462 1618
1463 sstatus = scr_read(ap, SCR_STATUS); 1619 if (ata_port_online(ap)) {
1464
1465 if (sata_dev_present(ap)) {
1466 tmp = (sstatus >> 4) & 0xf; 1620 tmp = (sstatus >> 4) & 0xf;
1467 if (tmp & (1 << 0)) 1621 ata_port_printk(ap, KERN_INFO,
1468 speed = "1.5"; 1622 "SATA link up %s (SStatus %X SControl %X)\n",
1469 else if (tmp & (1 << 1)) 1623 sata_spd_string(tmp), sstatus, scontrol);
1470 speed = "3.0";
1471 else
1472 speed = "<unknown>";
1473 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1474 ap->id, speed, sstatus);
1475 } else { 1624 } else {
1476 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n", 1625 ata_port_printk(ap, KERN_INFO,
1477 ap->id, sstatus); 1626 "SATA link down (SStatus %X SControl %X)\n",
1627 sstatus, scontrol);
1478 } 1628 }
1479} 1629}
1480 1630
@@ -1497,17 +1647,18 @@ void __sata_phy_reset(struct ata_port *ap)
1497 1647
1498 if (ap->flags & ATA_FLAG_SATA_RESET) { 1648 if (ap->flags & ATA_FLAG_SATA_RESET) {
1499 /* issue phy wake/reset */ 1649 /* issue phy wake/reset */
1500 scr_write_flush(ap, SCR_CONTROL, 0x301); 1650 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1501 /* Couldn't find anything in SATA I/II specs, but 1651 /* Couldn't find anything in SATA I/II specs, but
1502 * AHCI-1.1 10.4.2 says at least 1 ms. */ 1652 * AHCI-1.1 10.4.2 says at least 1 ms. */
1503 mdelay(1); 1653 mdelay(1);
1504 } 1654 }
1505 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */ 1655 /* phy wake/clear reset */
1656 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1506 1657
1507 /* wait for phy to become ready, if necessary */ 1658 /* wait for phy to become ready, if necessary */
1508 do { 1659 do {
1509 msleep(200); 1660 msleep(200);
1510 sstatus = scr_read(ap, SCR_STATUS); 1661 sata_scr_read(ap, SCR_STATUS, &sstatus);
1511 if ((sstatus & 0xf) != 1) 1662 if ((sstatus & 0xf) != 1)
1512 break; 1663 break;
1513 } while (time_before(jiffies, timeout)); 1664 } while (time_before(jiffies, timeout));
@@ -1516,12 +1667,12 @@ void __sata_phy_reset(struct ata_port *ap)
1516 sata_print_link_status(ap); 1667 sata_print_link_status(ap);
1517 1668
1518 /* TODO: phy layer with polling, timeouts, etc. */ 1669 /* TODO: phy layer with polling, timeouts, etc. */
1519 if (sata_dev_present(ap)) 1670 if (!ata_port_offline(ap))
1520 ata_port_probe(ap); 1671 ata_port_probe(ap);
1521 else 1672 else
1522 ata_port_disable(ap); 1673 ata_port_disable(ap);
1523 1674
1524 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1675 if (ap->flags & ATA_FLAG_DISABLED)
1525 return; 1676 return;
1526 1677
1527 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 1678 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
@@ -1546,24 +1697,24 @@ void __sata_phy_reset(struct ata_port *ap)
1546void sata_phy_reset(struct ata_port *ap) 1697void sata_phy_reset(struct ata_port *ap)
1547{ 1698{
1548 __sata_phy_reset(ap); 1699 __sata_phy_reset(ap);
1549 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1700 if (ap->flags & ATA_FLAG_DISABLED)
1550 return; 1701 return;
1551 ata_bus_reset(ap); 1702 ata_bus_reset(ap);
1552} 1703}
1553 1704
1554/** 1705/**
1555 * ata_dev_pair - return other device on cable 1706 * ata_dev_pair - return other device on cable
1556 * @ap: port
1557 * @adev: device 1707 * @adev: device
1558 * 1708 *
1559 * Obtain the other device on the same cable, or if none is 1709 * Obtain the other device on the same cable, or if none is
1560 * present NULL is returned 1710 * present NULL is returned
1561 */ 1711 */
1562 1712
1563struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev) 1713struct ata_device *ata_dev_pair(struct ata_device *adev)
1564{ 1714{
1715 struct ata_port *ap = adev->ap;
1565 struct ata_device *pair = &ap->device[1 - adev->devno]; 1716 struct ata_device *pair = &ap->device[1 - adev->devno];
1566 if (!ata_dev_present(pair)) 1717 if (!ata_dev_enabled(pair))
1567 return NULL; 1718 return NULL;
1568 return pair; 1719 return pair;
1569} 1720}
@@ -1585,7 +1736,122 @@ void ata_port_disable(struct ata_port *ap)
1585{ 1736{
1586 ap->device[0].class = ATA_DEV_NONE; 1737 ap->device[0].class = ATA_DEV_NONE;
1587 ap->device[1].class = ATA_DEV_NONE; 1738 ap->device[1].class = ATA_DEV_NONE;
1588 ap->flags |= ATA_FLAG_PORT_DISABLED; 1739 ap->flags |= ATA_FLAG_DISABLED;
1740}
1741
1742/**
1743 * sata_down_spd_limit - adjust SATA spd limit downward
1744 * @ap: Port to adjust SATA spd limit for
1745 *
1746 * Adjust SATA spd limit of @ap downward. Note that this
1747 * function only adjusts the limit. The change must be applied
1748 * using sata_set_spd().
1749 *
1750 * LOCKING:
1751 * Inherited from caller.
1752 *
1753 * RETURNS:
1754 * 0 on success, negative errno on failure
1755 */
1756int sata_down_spd_limit(struct ata_port *ap)
1757{
1758 u32 sstatus, spd, mask;
1759 int rc, highbit;
1760
1761 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1762 if (rc)
1763 return rc;
1764
1765 mask = ap->sata_spd_limit;
1766 if (mask <= 1)
1767 return -EINVAL;
1768 highbit = fls(mask) - 1;
1769 mask &= ~(1 << highbit);
1770
1771 spd = (sstatus >> 4) & 0xf;
1772 if (spd <= 1)
1773 return -EINVAL;
1774 spd--;
1775 mask &= (1 << spd) - 1;
1776 if (!mask)
1777 return -EINVAL;
1778
1779 ap->sata_spd_limit = mask;
1780
1781 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1782 sata_spd_string(fls(mask)));
1783
1784 return 0;
1785}
1786
1787static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1788{
1789 u32 spd, limit;
1790
1791 if (ap->sata_spd_limit == UINT_MAX)
1792 limit = 0;
1793 else
1794 limit = fls(ap->sata_spd_limit);
1795
1796 spd = (*scontrol >> 4) & 0xf;
1797 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1798
1799 return spd != limit;
1800}
1801
1802/**
1803 * sata_set_spd_needed - is SATA spd configuration needed
1804 * @ap: Port in question
1805 *
1806 * Test whether the spd limit in SControl matches
1807 * @ap->sata_spd_limit. This function is used to determine
1808 * whether hardreset is necessary to apply SATA spd
1809 * configuration.
1810 *
1811 * LOCKING:
1812 * Inherited from caller.
1813 *
1814 * RETURNS:
1815 * 1 if SATA spd configuration is needed, 0 otherwise.
1816 */
1817int sata_set_spd_needed(struct ata_port *ap)
1818{
1819 u32 scontrol;
1820
1821 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1822 return 0;
1823
1824 return __sata_set_spd_needed(ap, &scontrol);
1825}
1826
1827/**
1828 * sata_set_spd - set SATA spd according to spd limit
1829 * @ap: Port to set SATA spd for
1830 *
1831 * Set SATA spd of @ap according to sata_spd_limit.
1832 *
1833 * LOCKING:
1834 * Inherited from caller.
1835 *
1836 * RETURNS:
1837 * 0 if spd doesn't need to be changed, 1 if spd has been
1838 * changed. Negative errno if SCR registers are inaccessible.
1839 */
1840int sata_set_spd(struct ata_port *ap)
1841{
1842 u32 scontrol;
1843 int rc;
1844
1845 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1846 return rc;
1847
1848 if (!__sata_set_spd_needed(ap, &scontrol))
1849 return 0;
1850
1851 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1852 return rc;
1853
1854 return 1;
1589} 1855}
1590 1856
1591/* 1857/*
@@ -1736,151 +2002,196 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1736 return 0; 2002 return 0;
1737} 2003}
1738 2004
1739static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) 2005/**
2006 * ata_down_xfermask_limit - adjust dev xfer masks downward
2007 * @dev: Device to adjust xfer masks
2008 * @force_pio0: Force PIO0
2009 *
2010 * Adjust xfer masks of @dev downward. Note that this function
2011 * does not apply the change. Invoking ata_set_mode() afterwards
2012 * will apply the limit.
2013 *
2014 * LOCKING:
2015 * Inherited from caller.
2016 *
2017 * RETURNS:
2018 * 0 on success, negative errno on failure
2019 */
2020int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
2021{
2022 unsigned long xfer_mask;
2023 int highbit;
2024
2025 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2026 dev->udma_mask);
2027
2028 if (!xfer_mask)
2029 goto fail;
2030 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2031 if (xfer_mask & ATA_MASK_UDMA)
2032 xfer_mask &= ~ATA_MASK_MWDMA;
2033
2034 highbit = fls(xfer_mask) - 1;
2035 xfer_mask &= ~(1 << highbit);
2036 if (force_pio0)
2037 xfer_mask &= 1 << ATA_SHIFT_PIO;
2038 if (!xfer_mask)
2039 goto fail;
2040
2041 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2042 &dev->udma_mask);
2043
2044 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2045 ata_mode_string(xfer_mask));
2046
2047 return 0;
2048
2049 fail:
2050 return -EINVAL;
2051}
2052
2053static int ata_dev_set_mode(struct ata_device *dev)
1740{ 2054{
1741 unsigned int err_mask; 2055 unsigned int err_mask;
1742 int rc; 2056 int rc;
1743 2057
2058 dev->flags &= ~ATA_DFLAG_PIO;
1744 if (dev->xfer_shift == ATA_SHIFT_PIO) 2059 if (dev->xfer_shift == ATA_SHIFT_PIO)
1745 dev->flags |= ATA_DFLAG_PIO; 2060 dev->flags |= ATA_DFLAG_PIO;
1746 2061
1747 err_mask = ata_dev_set_xfermode(ap, dev); 2062 err_mask = ata_dev_set_xfermode(dev);
1748 if (err_mask) { 2063 if (err_mask) {
1749 printk(KERN_ERR 2064 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
1750 "ata%u: failed to set xfermode (err_mask=0x%x)\n", 2065 "(err_mask=0x%x)\n", err_mask);
1751 ap->id, err_mask);
1752 return -EIO; 2066 return -EIO;
1753 } 2067 }
1754 2068
1755 rc = ata_dev_revalidate(ap, dev, 0); 2069 rc = ata_dev_revalidate(dev, 0);
1756 if (rc) { 2070 if (rc)
1757 printk(KERN_ERR
1758 "ata%u: failed to revalidate after set xfermode\n",
1759 ap->id);
1760 return rc; 2071 return rc;
1761 }
1762 2072
1763 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 2073 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1764 dev->xfer_shift, (int)dev->xfer_mode); 2074 dev->xfer_shift, (int)dev->xfer_mode);
1765 2075
1766 printk(KERN_INFO "ata%u: dev %u configured for %s\n", 2076 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1767 ap->id, dev->devno, 2077 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1768 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1769 return 0;
1770}
1771
1772static int ata_host_set_pio(struct ata_port *ap)
1773{
1774 int i;
1775
1776 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1777 struct ata_device *dev = &ap->device[i];
1778
1779 if (!ata_dev_present(dev))
1780 continue;
1781
1782 if (!dev->pio_mode) {
1783 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1784 return -1;
1785 }
1786
1787 dev->xfer_mode = dev->pio_mode;
1788 dev->xfer_shift = ATA_SHIFT_PIO;
1789 if (ap->ops->set_piomode)
1790 ap->ops->set_piomode(ap, dev);
1791 }
1792
1793 return 0; 2078 return 0;
1794} 2079}
1795 2080
1796static void ata_host_set_dma(struct ata_port *ap)
1797{
1798 int i;
1799
1800 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1801 struct ata_device *dev = &ap->device[i];
1802
1803 if (!ata_dev_present(dev) || !dev->dma_mode)
1804 continue;
1805
1806 dev->xfer_mode = dev->dma_mode;
1807 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1808 if (ap->ops->set_dmamode)
1809 ap->ops->set_dmamode(ap, dev);
1810 }
1811}
1812
1813/** 2081/**
1814 * ata_set_mode - Program timings and issue SET FEATURES - XFER 2082 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1815 * @ap: port on which timings will be programmed 2083 * @ap: port on which timings will be programmed
2084 * @r_failed_dev: out paramter for failed device
1816 * 2085 *
1817 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). 2086 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2087 * ata_set_mode() fails, pointer to the failing device is
2088 * returned in @r_failed_dev.
1818 * 2089 *
1819 * LOCKING: 2090 * LOCKING:
1820 * PCI/etc. bus probe sem. 2091 * PCI/etc. bus probe sem.
2092 *
2093 * RETURNS:
2094 * 0 on success, negative errno otherwise
1821 */ 2095 */
1822static void ata_set_mode(struct ata_port *ap) 2096int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1823{ 2097{
1824 int i, rc, used_dma = 0; 2098 struct ata_device *dev;
2099 int i, rc = 0, used_dma = 0, found = 0;
2100
2101 /* has private set_mode? */
2102 if (ap->ops->set_mode) {
2103 /* FIXME: make ->set_mode handle no device case and
2104 * return error code and failing device on failure.
2105 */
2106 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2107 if (ata_dev_enabled(&ap->device[i])) {
2108 ap->ops->set_mode(ap);
2109 break;
2110 }
2111 }
2112 return 0;
2113 }
1825 2114
1826 /* step 1: calculate xfer_mask */ 2115 /* step 1: calculate xfer_mask */
1827 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2116 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1828 struct ata_device *dev = &ap->device[i];
1829 unsigned int pio_mask, dma_mask; 2117 unsigned int pio_mask, dma_mask;
1830 2118
1831 if (!ata_dev_present(dev)) 2119 dev = &ap->device[i];
1832 continue;
1833 2120
1834 ata_dev_xfermask(ap, dev); 2121 if (!ata_dev_enabled(dev))
2122 continue;
1835 2123
1836 /* TODO: let LLDD filter dev->*_mask here */ 2124 ata_dev_xfermask(dev);
1837 2125
1838 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 2126 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
1839 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 2127 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
1840 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 2128 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
1841 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 2129 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
1842 2130
2131 found = 1;
1843 if (dev->dma_mode) 2132 if (dev->dma_mode)
1844 used_dma = 1; 2133 used_dma = 1;
1845 } 2134 }
2135 if (!found)
2136 goto out;
1846 2137
1847 /* step 2: always set host PIO timings */ 2138 /* step 2: always set host PIO timings */
1848 rc = ata_host_set_pio(ap); 2139 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1849 if (rc) 2140 dev = &ap->device[i];
1850 goto err_out; 2141 if (!ata_dev_enabled(dev))
2142 continue;
2143
2144 if (!dev->pio_mode) {
2145 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2146 rc = -EINVAL;
2147 goto out;
2148 }
2149
2150 dev->xfer_mode = dev->pio_mode;
2151 dev->xfer_shift = ATA_SHIFT_PIO;
2152 if (ap->ops->set_piomode)
2153 ap->ops->set_piomode(ap, dev);
2154 }
1851 2155
1852 /* step 3: set host DMA timings */ 2156 /* step 3: set host DMA timings */
1853 ata_host_set_dma(ap); 2157 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2158 dev = &ap->device[i];
2159
2160 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2161 continue;
2162
2163 dev->xfer_mode = dev->dma_mode;
2164 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2165 if (ap->ops->set_dmamode)
2166 ap->ops->set_dmamode(ap, dev);
2167 }
1854 2168
1855 /* step 4: update devices' xfer mode */ 2169 /* step 4: update devices' xfer mode */
1856 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2170 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1857 struct ata_device *dev = &ap->device[i]; 2171 dev = &ap->device[i];
1858 2172
1859 if (!ata_dev_present(dev)) 2173 if (!ata_dev_enabled(dev))
1860 continue; 2174 continue;
1861 2175
1862 if (ata_dev_set_mode(ap, dev)) 2176 rc = ata_dev_set_mode(dev);
1863 goto err_out; 2177 if (rc)
2178 goto out;
1864 } 2179 }
1865 2180
1866 /* 2181 /* Record simplex status. If we selected DMA then the other
1867 * Record simplex status. If we selected DMA then the other 2182 * host channels are not permitted to do so.
1868 * host channels are not permitted to do so.
1869 */ 2183 */
1870
1871 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX)) 2184 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
1872 ap->host_set->simplex_claimed = 1; 2185 ap->host_set->simplex_claimed = 1;
1873 2186
1874 /* 2187 /* step5: chip specific finalisation */
1875 * Chip specific finalisation
1876 */
1877 if (ap->ops->post_set_mode) 2188 if (ap->ops->post_set_mode)
1878 ap->ops->post_set_mode(ap); 2189 ap->ops->post_set_mode(ap);
1879 2190
1880 return; 2191 out:
1881 2192 if (rc)
1882err_out: 2193 *r_failed_dev = dev;
1883 ata_port_disable(ap); 2194 return rc;
1884} 2195}
1885 2196
1886/** 2197/**
@@ -1930,8 +2241,8 @@ unsigned int ata_busy_sleep (struct ata_port *ap,
1930 } 2241 }
1931 2242
1932 if (status & ATA_BUSY) 2243 if (status & ATA_BUSY)
1933 printk(KERN_WARNING "ata%u is slow to respond, " 2244 ata_port_printk(ap, KERN_WARNING,
1934 "please be patient\n", ap->id); 2245 "port is slow to respond, please be patient\n");
1935 2246
1936 timeout = timer_start + tmout; 2247 timeout = timer_start + tmout;
1937 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { 2248 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
@@ -1940,8 +2251,8 @@ unsigned int ata_busy_sleep (struct ata_port *ap,
1940 } 2251 }
1941 2252
1942 if (status & ATA_BUSY) { 2253 if (status & ATA_BUSY) {
1943 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n", 2254 ata_port_printk(ap, KERN_ERR, "port failed to respond "
1944 ap->id, tmout / HZ); 2255 "(%lu secs)\n", tmout / HZ);
1945 return 1; 2256 return 1;
1946 } 2257 }
1947 2258
@@ -2033,8 +2344,10 @@ static unsigned int ata_bus_softreset(struct ata_port *ap,
2033 * the bus shows 0xFF because the odd clown forgets the D7 2344 * the bus shows 0xFF because the odd clown forgets the D7
2034 * pulldown resistor. 2345 * pulldown resistor.
2035 */ 2346 */
2036 if (ata_check_status(ap) == 0xFF) 2347 if (ata_check_status(ap) == 0xFF) {
2348 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
2037 return AC_ERR_OTHER; 2349 return AC_ERR_OTHER;
2350 }
2038 2351
2039 ata_bus_post_reset(ap, devmask); 2352 ata_bus_post_reset(ap, devmask);
2040 2353
@@ -2058,7 +2371,7 @@ static unsigned int ata_bus_softreset(struct ata_port *ap,
2058 * Obtains host_set lock. 2371 * Obtains host_set lock.
2059 * 2372 *
2060 * SIDE EFFECTS: 2373 * SIDE EFFECTS:
2061 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails. 2374 * Sets ATA_FLAG_DISABLED if bus reset fails.
2062 */ 2375 */
2063 2376
2064void ata_bus_reset(struct ata_port *ap) 2377void ata_bus_reset(struct ata_port *ap)
@@ -2126,60 +2439,195 @@ void ata_bus_reset(struct ata_port *ap)
2126 return; 2439 return;
2127 2440
2128err_out: 2441err_out:
2129 printk(KERN_ERR "ata%u: disabling port\n", ap->id); 2442 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2130 ap->ops->port_disable(ap); 2443 ap->ops->port_disable(ap);
2131 2444
2132 DPRINTK("EXIT\n"); 2445 DPRINTK("EXIT\n");
2133} 2446}
2134 2447
2135static int sata_phy_resume(struct ata_port *ap) 2448/**
2449 * sata_phy_debounce - debounce SATA phy status
2450 * @ap: ATA port to debounce SATA phy status for
2451 * @params: timing parameters { interval, duratinon, timeout } in msec
2452 *
2453 * Make sure SStatus of @ap reaches stable state, determined by
2454 * holding the same value where DET is not 1 for @duration polled
2455 * every @interval, before @timeout. Timeout constraints the
2456 * beginning of the stable state. Because, after hot unplugging,
2457 * DET gets stuck at 1 on some controllers, this functions waits
2458 * until timeout then returns 0 if DET is stable at 1.
2459 *
2460 * LOCKING:
2461 * Kernel thread context (may sleep)
2462 *
2463 * RETURNS:
2464 * 0 on success, -errno on failure.
2465 */
2466int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
2136{ 2467{
2137 unsigned long timeout = jiffies + (HZ * 5); 2468 unsigned long interval_msec = params[0];
2138 u32 sstatus; 2469 unsigned long duration = params[1] * HZ / 1000;
2470 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2471 unsigned long last_jiffies;
2472 u32 last, cur;
2473 int rc;
2139 2474
2140 scr_write_flush(ap, SCR_CONTROL, 0x300); 2475 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2476 return rc;
2477 cur &= 0xf;
2141 2478
2142 /* Wait for phy to become ready, if necessary. */ 2479 last = cur;
2143 do { 2480 last_jiffies = jiffies;
2144 msleep(200);
2145 sstatus = scr_read(ap, SCR_STATUS);
2146 if ((sstatus & 0xf) != 1)
2147 return 0;
2148 } while (time_before(jiffies, timeout));
2149 2481
2150 return -1; 2482 while (1) {
2483 msleep(interval_msec);
2484 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2485 return rc;
2486 cur &= 0xf;
2487
2488 /* DET stable? */
2489 if (cur == last) {
2490 if (cur == 1 && time_before(jiffies, timeout))
2491 continue;
2492 if (time_after(jiffies, last_jiffies + duration))
2493 return 0;
2494 continue;
2495 }
2496
2497 /* unstable, start over */
2498 last = cur;
2499 last_jiffies = jiffies;
2500
2501 /* check timeout */
2502 if (time_after(jiffies, timeout))
2503 return -EBUSY;
2504 }
2151} 2505}
2152 2506
2153/** 2507/**
2154 * ata_std_probeinit - initialize probing 2508 * sata_phy_resume - resume SATA phy
2155 * @ap: port to be probed 2509 * @ap: ATA port to resume SATA phy for
2510 * @params: timing parameters { interval, duratinon, timeout } in msec
2511 *
2512 * Resume SATA phy of @ap and debounce it.
2156 * 2513 *
2157 * @ap is about to be probed. Initialize it. This function is 2514 * LOCKING:
2158 * to be used as standard callback for ata_drive_probe_reset(). 2515 * Kernel thread context (may sleep)
2159 * 2516 *
2160 * NOTE!!! Do not use this function as probeinit if a low level 2517 * RETURNS:
2161 * driver implements only hardreset. Just pass NULL as probeinit 2518 * 0 on success, -errno on failure.
2162 * in that case. Using this function is probably okay but doing
2163 * so makes reset sequence different from the original
2164 * ->phy_reset implementation and Jeff nervous. :-P
2165 */ 2519 */
2166void ata_std_probeinit(struct ata_port *ap) 2520int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2167{ 2521{
2168 if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) { 2522 u32 scontrol;
2169 sata_phy_resume(ap); 2523 int rc;
2170 if (sata_dev_present(ap)) 2524
2171 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 2525 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2526 return rc;
2527
2528 scontrol = (scontrol & 0x0f0) | 0x300;
2529
2530 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2531 return rc;
2532
2533 /* Some PHYs react badly if SStatus is pounded immediately
2534 * after resuming. Delay 200ms before debouncing.
2535 */
2536 msleep(200);
2537
2538 return sata_phy_debounce(ap, params);
2539}
2540
2541static void ata_wait_spinup(struct ata_port *ap)
2542{
2543 struct ata_eh_context *ehc = &ap->eh_context;
2544 unsigned long end, secs;
2545 int rc;
2546
2547 /* first, debounce phy if SATA */
2548 if (ap->cbl == ATA_CBL_SATA) {
2549 rc = sata_phy_debounce(ap, sata_deb_timing_eh);
2550
2551 /* if debounced successfully and offline, no need to wait */
2552 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2553 return;
2172 } 2554 }
2555
2556 /* okay, let's give the drive time to spin up */
2557 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2558 secs = ((end - jiffies) + HZ - 1) / HZ;
2559
2560 if (time_after(jiffies, end))
2561 return;
2562
2563 if (secs > 5)
2564 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2565 "(%lu secs)\n", secs);
2566
2567 schedule_timeout_uninterruptible(end - jiffies);
2568}
2569
2570/**
2571 * ata_std_prereset - prepare for reset
2572 * @ap: ATA port to be reset
2573 *
2574 * @ap is about to be reset. Initialize it.
2575 *
2576 * LOCKING:
2577 * Kernel thread context (may sleep)
2578 *
2579 * RETURNS:
2580 * 0 on success, -errno otherwise.
2581 */
2582int ata_std_prereset(struct ata_port *ap)
2583{
2584 struct ata_eh_context *ehc = &ap->eh_context;
2585 const unsigned long *timing;
2586 int rc;
2587
2588 /* hotplug? */
2589 if (ehc->i.flags & ATA_EHI_HOTPLUGGED) {
2590 if (ap->flags & ATA_FLAG_HRST_TO_RESUME)
2591 ehc->i.action |= ATA_EH_HARDRESET;
2592 if (ap->flags & ATA_FLAG_SKIP_D2H_BSY)
2593 ata_wait_spinup(ap);
2594 }
2595
2596 /* if we're about to do hardreset, nothing more to do */
2597 if (ehc->i.action & ATA_EH_HARDRESET)
2598 return 0;
2599
2600 /* if SATA, resume phy */
2601 if (ap->cbl == ATA_CBL_SATA) {
2602 if (ap->flags & ATA_FLAG_LOADING)
2603 timing = sata_deb_timing_boot;
2604 else
2605 timing = sata_deb_timing_eh;
2606
2607 rc = sata_phy_resume(ap, timing);
2608 if (rc && rc != -EOPNOTSUPP) {
2609 /* phy resume failed */
2610 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2611 "link for reset (errno=%d)\n", rc);
2612 return rc;
2613 }
2614 }
2615
2616 /* Wait for !BSY if the controller can wait for the first D2H
2617 * Reg FIS and we don't know that no device is attached.
2618 */
2619 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2620 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2621
2622 return 0;
2173} 2623}
2174 2624
2175/** 2625/**
2176 * ata_std_softreset - reset host port via ATA SRST 2626 * ata_std_softreset - reset host port via ATA SRST
2177 * @ap: port to reset 2627 * @ap: port to reset
2178 * @verbose: fail verbosely
2179 * @classes: resulting classes of attached devices 2628 * @classes: resulting classes of attached devices
2180 * 2629 *
2181 * Reset host port using ATA SRST. This function is to be used 2630 * Reset host port using ATA SRST.
2182 * as standard callback for ata_drive_*_reset() functions.
2183 * 2631 *
2184 * LOCKING: 2632 * LOCKING:
2185 * Kernel thread context (may sleep) 2633 * Kernel thread context (may sleep)
@@ -2187,7 +2635,7 @@ void ata_std_probeinit(struct ata_port *ap)
2187 * RETURNS: 2635 * RETURNS:
2188 * 0 on success, -errno otherwise. 2636 * 0 on success, -errno otherwise.
2189 */ 2637 */
2190int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes) 2638int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2191{ 2639{
2192 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 2640 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2193 unsigned int devmask = 0, err_mask; 2641 unsigned int devmask = 0, err_mask;
@@ -2195,7 +2643,7 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2195 2643
2196 DPRINTK("ENTER\n"); 2644 DPRINTK("ENTER\n");
2197 2645
2198 if (ap->ops->scr_read && !sata_dev_present(ap)) { 2646 if (ata_port_offline(ap)) {
2199 classes[0] = ATA_DEV_NONE; 2647 classes[0] = ATA_DEV_NONE;
2200 goto out; 2648 goto out;
2201 } 2649 }
@@ -2213,11 +2661,7 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2213 DPRINTK("about to softreset, devmask=%x\n", devmask); 2661 DPRINTK("about to softreset, devmask=%x\n", devmask);
2214 err_mask = ata_bus_softreset(ap, devmask); 2662 err_mask = ata_bus_softreset(ap, devmask);
2215 if (err_mask) { 2663 if (err_mask) {
2216 if (verbose) 2664 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2217 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2218 ap->id, err_mask);
2219 else
2220 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2221 err_mask); 2665 err_mask);
2222 return -EIO; 2666 return -EIO;
2223 } 2667 }
@@ -2235,12 +2679,9 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2235/** 2679/**
2236 * sata_std_hardreset - reset host port via SATA phy reset 2680 * sata_std_hardreset - reset host port via SATA phy reset
2237 * @ap: port to reset 2681 * @ap: port to reset
2238 * @verbose: fail verbosely
2239 * @class: resulting class of attached device 2682 * @class: resulting class of attached device
2240 * 2683 *
2241 * SATA phy-reset host port using DET bits of SControl register. 2684 * SATA phy-reset host port using DET bits of SControl register.
2242 * This function is to be used as standard callback for
2243 * ata_drive_*_reset().
2244 * 2685 *
2245 * LOCKING: 2686 * LOCKING:
2246 * Kernel thread context (may sleep) 2687 * Kernel thread context (may sleep)
@@ -2248,35 +2689,57 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2248 * RETURNS: 2689 * RETURNS:
2249 * 0 on success, -errno otherwise. 2690 * 0 on success, -errno otherwise.
2250 */ 2691 */
2251int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class) 2692int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2252{ 2693{
2694 u32 scontrol;
2695 int rc;
2696
2253 DPRINTK("ENTER\n"); 2697 DPRINTK("ENTER\n");
2254 2698
2255 /* Issue phy wake/reset */ 2699 if (sata_set_spd_needed(ap)) {
2256 scr_write_flush(ap, SCR_CONTROL, 0x301); 2700 /* SATA spec says nothing about how to reconfigure
2701 * spd. To be on the safe side, turn off phy during
2702 * reconfiguration. This works for at least ICH7 AHCI
2703 * and Sil3124.
2704 */
2705 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2706 return rc;
2257 2707
2258 /* 2708 scontrol = (scontrol & 0x0f0) | 0x302;
2259 * Couldn't find anything in SATA I/II specs, but AHCI-1.1 2709
2710 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2711 return rc;
2712
2713 sata_set_spd(ap);
2714 }
2715
2716 /* issue phy wake/reset */
2717 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2718 return rc;
2719
2720 scontrol = (scontrol & 0x0f0) | 0x301;
2721
2722 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2723 return rc;
2724
2725 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2260 * 10.4.2 says at least 1 ms. 2726 * 10.4.2 says at least 1 ms.
2261 */ 2727 */
2262 msleep(1); 2728 msleep(1);
2263 2729
2264 /* Bring phy back */ 2730 /* bring phy back */
2265 sata_phy_resume(ap); 2731 sata_phy_resume(ap, sata_deb_timing_eh);
2266 2732
2267 /* TODO: phy layer with polling, timeouts, etc. */ 2733 /* TODO: phy layer with polling, timeouts, etc. */
2268 if (!sata_dev_present(ap)) { 2734 if (ata_port_offline(ap)) {
2269 *class = ATA_DEV_NONE; 2735 *class = ATA_DEV_NONE;
2270 DPRINTK("EXIT, link offline\n"); 2736 DPRINTK("EXIT, link offline\n");
2271 return 0; 2737 return 0;
2272 } 2738 }
2273 2739
2274 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 2740 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2275 if (verbose) 2741 ata_port_printk(ap, KERN_ERR,
2276 printk(KERN_ERR "ata%u: COMRESET failed " 2742 "COMRESET failed (device not ready)\n");
2277 "(device not ready)\n", ap->id);
2278 else
2279 DPRINTK("EXIT, device not ready\n");
2280 return -EIO; 2743 return -EIO;
2281 } 2744 }
2282 2745
@@ -2297,27 +2760,28 @@ int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2297 * the device might have been reset more than once using 2760 * the device might have been reset more than once using
2298 * different reset methods before postreset is invoked. 2761 * different reset methods before postreset is invoked.
2299 * 2762 *
2300 * This function is to be used as standard callback for
2301 * ata_drive_*_reset().
2302 *
2303 * LOCKING: 2763 * LOCKING:
2304 * Kernel thread context (may sleep) 2764 * Kernel thread context (may sleep)
2305 */ 2765 */
2306void ata_std_postreset(struct ata_port *ap, unsigned int *classes) 2766void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2307{ 2767{
2308 DPRINTK("ENTER\n"); 2768 u32 serror;
2309 2769
2310 /* set cable type if it isn't already set */ 2770 DPRINTK("ENTER\n");
2311 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2312 ap->cbl = ATA_CBL_SATA;
2313 2771
2314 /* print link status */ 2772 /* print link status */
2315 if (ap->cbl == ATA_CBL_SATA) 2773 sata_print_link_status(ap);
2316 sata_print_link_status(ap); 2774
2775 /* clear SError */
2776 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2777 sata_scr_write(ap, SCR_ERROR, serror);
2317 2778
2318 /* re-enable interrupts */ 2779 /* re-enable interrupts */
2319 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ 2780 if (!ap->ops->error_handler) {
2320 ata_irq_on(ap); 2781 /* FIXME: hack. create a hook instead */
2782 if (ap->ioaddr.ctl_addr)
2783 ata_irq_on(ap);
2784 }
2321 2785
2322 /* is double-select really necessary? */ 2786 /* is double-select really necessary? */
2323 if (classes[0] != ATA_DEV_NONE) 2787 if (classes[0] != ATA_DEV_NONE)
@@ -2343,126 +2807,7 @@ void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2343} 2807}
2344 2808
2345/** 2809/**
2346 * ata_std_probe_reset - standard probe reset method
2347 * @ap: prot to perform probe-reset
2348 * @classes: resulting classes of attached devices
2349 *
2350 * The stock off-the-shelf ->probe_reset method.
2351 *
2352 * LOCKING:
2353 * Kernel thread context (may sleep)
2354 *
2355 * RETURNS:
2356 * 0 on success, -errno otherwise.
2357 */
2358int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2359{
2360 ata_reset_fn_t hardreset;
2361
2362 hardreset = NULL;
2363 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2364 hardreset = sata_std_hardreset;
2365
2366 return ata_drive_probe_reset(ap, ata_std_probeinit,
2367 ata_std_softreset, hardreset,
2368 ata_std_postreset, classes);
2369}
2370
2371static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2372 ata_postreset_fn_t postreset,
2373 unsigned int *classes)
2374{
2375 int i, rc;
2376
2377 for (i = 0; i < ATA_MAX_DEVICES; i++)
2378 classes[i] = ATA_DEV_UNKNOWN;
2379
2380 rc = reset(ap, 0, classes);
2381 if (rc)
2382 return rc;
2383
2384 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2385 * is complete and convert all ATA_DEV_UNKNOWN to
2386 * ATA_DEV_NONE.
2387 */
2388 for (i = 0; i < ATA_MAX_DEVICES; i++)
2389 if (classes[i] != ATA_DEV_UNKNOWN)
2390 break;
2391
2392 if (i < ATA_MAX_DEVICES)
2393 for (i = 0; i < ATA_MAX_DEVICES; i++)
2394 if (classes[i] == ATA_DEV_UNKNOWN)
2395 classes[i] = ATA_DEV_NONE;
2396
2397 if (postreset)
2398 postreset(ap, classes);
2399
2400 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2401}
2402
2403/**
2404 * ata_drive_probe_reset - Perform probe reset with given methods
2405 * @ap: port to reset
2406 * @probeinit: probeinit method (can be NULL)
2407 * @softreset: softreset method (can be NULL)
2408 * @hardreset: hardreset method (can be NULL)
2409 * @postreset: postreset method (can be NULL)
2410 * @classes: resulting classes of attached devices
2411 *
2412 * Reset the specified port and classify attached devices using
2413 * given methods. This function prefers softreset but tries all
2414 * possible reset sequences to reset and classify devices. This
2415 * function is intended to be used for constructing ->probe_reset
2416 * callback by low level drivers.
2417 *
2418 * Reset methods should follow the following rules.
2419 *
2420 * - Return 0 on sucess, -errno on failure.
2421 * - If classification is supported, fill classes[] with
2422 * recognized class codes.
2423 * - If classification is not supported, leave classes[] alone.
2424 * - If verbose is non-zero, print error message on failure;
2425 * otherwise, shut up.
2426 *
2427 * LOCKING:
2428 * Kernel thread context (may sleep)
2429 *
2430 * RETURNS:
2431 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2432 * if classification fails, and any error code from reset
2433 * methods.
2434 */
2435int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2436 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2437 ata_postreset_fn_t postreset, unsigned int *classes)
2438{
2439 int rc = -EINVAL;
2440
2441 if (probeinit)
2442 probeinit(ap);
2443
2444 if (softreset) {
2445 rc = do_probe_reset(ap, softreset, postreset, classes);
2446 if (rc == 0)
2447 return 0;
2448 }
2449
2450 if (!hardreset)
2451 return rc;
2452
2453 rc = do_probe_reset(ap, hardreset, postreset, classes);
2454 if (rc == 0 || rc != -ENODEV)
2455 return rc;
2456
2457 if (softreset)
2458 rc = do_probe_reset(ap, softreset, postreset, classes);
2459
2460 return rc;
2461}
2462
2463/**
2464 * ata_dev_same_device - Determine whether new ID matches configured device 2810 * ata_dev_same_device - Determine whether new ID matches configured device
2465 * @ap: port on which the device to compare against resides
2466 * @dev: device to compare against 2811 * @dev: device to compare against
2467 * @new_class: class of the new device 2812 * @new_class: class of the new device
2468 * @new_id: IDENTIFY page of the new device 2813 * @new_id: IDENTIFY page of the new device
@@ -2477,17 +2822,16 @@ int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2477 * RETURNS: 2822 * RETURNS:
2478 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 2823 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2479 */ 2824 */
2480static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev, 2825static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2481 unsigned int new_class, const u16 *new_id) 2826 const u16 *new_id)
2482{ 2827{
2483 const u16 *old_id = dev->id; 2828 const u16 *old_id = dev->id;
2484 unsigned char model[2][41], serial[2][21]; 2829 unsigned char model[2][41], serial[2][21];
2485 u64 new_n_sectors; 2830 u64 new_n_sectors;
2486 2831
2487 if (dev->class != new_class) { 2832 if (dev->class != new_class) {
2488 printk(KERN_INFO 2833 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2489 "ata%u: dev %u class mismatch %d != %d\n", 2834 dev->class, new_class);
2490 ap->id, dev->devno, dev->class, new_class);
2491 return 0; 2835 return 0;
2492 } 2836 }
2493 2837
@@ -2498,24 +2842,22 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2498 new_n_sectors = ata_id_n_sectors(new_id); 2842 new_n_sectors = ata_id_n_sectors(new_id);
2499 2843
2500 if (strcmp(model[0], model[1])) { 2844 if (strcmp(model[0], model[1])) {
2501 printk(KERN_INFO 2845 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2502 "ata%u: dev %u model number mismatch '%s' != '%s'\n", 2846 "'%s' != '%s'\n", model[0], model[1]);
2503 ap->id, dev->devno, model[0], model[1]);
2504 return 0; 2847 return 0;
2505 } 2848 }
2506 2849
2507 if (strcmp(serial[0], serial[1])) { 2850 if (strcmp(serial[0], serial[1])) {
2508 printk(KERN_INFO 2851 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2509 "ata%u: dev %u serial number mismatch '%s' != '%s'\n", 2852 "'%s' != '%s'\n", serial[0], serial[1]);
2510 ap->id, dev->devno, serial[0], serial[1]);
2511 return 0; 2853 return 0;
2512 } 2854 }
2513 2855
2514 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) { 2856 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2515 printk(KERN_INFO 2857 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2516 "ata%u: dev %u n_sectors mismatch %llu != %llu\n", 2858 "%llu != %llu\n",
2517 ap->id, dev->devno, (unsigned long long)dev->n_sectors, 2859 (unsigned long long)dev->n_sectors,
2518 (unsigned long long)new_n_sectors); 2860 (unsigned long long)new_n_sectors);
2519 return 0; 2861 return 0;
2520 } 2862 }
2521 2863
@@ -2524,7 +2866,6 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2524 2866
2525/** 2867/**
2526 * ata_dev_revalidate - Revalidate ATA device 2868 * ata_dev_revalidate - Revalidate ATA device
2527 * @ap: port on which the device to revalidate resides
2528 * @dev: device to revalidate 2869 * @dev: device to revalidate
2529 * @post_reset: is this revalidation after reset? 2870 * @post_reset: is this revalidation after reset?
2530 * 2871 *
@@ -2537,40 +2878,37 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2537 * RETURNS: 2878 * RETURNS:
2538 * 0 on success, negative errno otherwise 2879 * 0 on success, negative errno otherwise
2539 */ 2880 */
2540int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev, 2881int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2541 int post_reset)
2542{ 2882{
2543 unsigned int class; 2883 unsigned int class = dev->class;
2544 u16 *id; 2884 u16 *id = (void *)dev->ap->sector_buf;
2545 int rc; 2885 int rc;
2546 2886
2547 if (!ata_dev_present(dev)) 2887 if (!ata_dev_enabled(dev)) {
2548 return -ENODEV; 2888 rc = -ENODEV;
2549 2889 goto fail;
2550 class = dev->class; 2890 }
2551 id = NULL;
2552 2891
2553 /* allocate & read ID data */ 2892 /* read ID data */
2554 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id); 2893 rc = ata_dev_read_id(dev, &class, post_reset, id);
2555 if (rc) 2894 if (rc)
2556 goto fail; 2895 goto fail;
2557 2896
2558 /* is the device still there? */ 2897 /* is the device still there? */
2559 if (!ata_dev_same_device(ap, dev, class, id)) { 2898 if (!ata_dev_same_device(dev, class, id)) {
2560 rc = -ENODEV; 2899 rc = -ENODEV;
2561 goto fail; 2900 goto fail;
2562 } 2901 }
2563 2902
2564 kfree(dev->id); 2903 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
2565 dev->id = id;
2566 2904
2567 /* configure device according to the new ID */ 2905 /* configure device according to the new ID */
2568 return ata_dev_configure(ap, dev, 0); 2906 rc = ata_dev_configure(dev, 0);
2907 if (rc == 0)
2908 return 0;
2569 2909
2570 fail: 2910 fail:
2571 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n", 2911 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
2572 ap->id, dev->devno, rc);
2573 kfree(id);
2574 return rc; 2912 return rc;
2575} 2913}
2576 2914
@@ -2626,6 +2964,14 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
2626 unsigned int nlen, rlen; 2964 unsigned int nlen, rlen;
2627 int i; 2965 int i;
2628 2966
2967 /* We don't support polling DMA.
2968 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
2969 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
2970 */
2971 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
2972 (dev->flags & ATA_DFLAG_CDB_INTR))
2973 return 1;
2974
2629 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 2975 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2630 sizeof(model_num)); 2976 sizeof(model_num));
2631 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS, 2977 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
@@ -2646,7 +2992,6 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
2646 2992
2647/** 2993/**
2648 * ata_dev_xfermask - Compute supported xfermask of the given device 2994 * ata_dev_xfermask - Compute supported xfermask of the given device
2649 * @ap: Port on which the device to compute xfermask for resides
2650 * @dev: Device to compute xfermask for 2995 * @dev: Device to compute xfermask for
2651 * 2996 *
2652 * Compute supported xfermask of @dev and store it in 2997 * Compute supported xfermask of @dev and store it in
@@ -2661,49 +3006,61 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
2661 * LOCKING: 3006 * LOCKING:
2662 * None. 3007 * None.
2663 */ 3008 */
2664static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev) 3009static void ata_dev_xfermask(struct ata_device *dev)
2665{ 3010{
3011 struct ata_port *ap = dev->ap;
2666 struct ata_host_set *hs = ap->host_set; 3012 struct ata_host_set *hs = ap->host_set;
2667 unsigned long xfer_mask; 3013 unsigned long xfer_mask;
2668 int i; 3014 int i;
2669 3015
2670 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 3016 xfer_mask = ata_pack_xfermask(ap->pio_mask,
2671 ap->udma_mask); 3017 ap->mwdma_mask, ap->udma_mask);
3018
3019 /* Apply cable rule here. Don't apply it early because when
3020 * we handle hot plug the cable type can itself change.
3021 */
3022 if (ap->cbl == ATA_CBL_PATA40)
3023 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2672 3024
2673 /* FIXME: Use port-wide xfermask for now */ 3025 /* FIXME: Use port-wide xfermask for now */
2674 for (i = 0; i < ATA_MAX_DEVICES; i++) { 3026 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2675 struct ata_device *d = &ap->device[i]; 3027 struct ata_device *d = &ap->device[i];
2676 if (!ata_dev_present(d)) 3028
3029 if (ata_dev_absent(d))
2677 continue; 3030 continue;
2678 xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask, 3031
2679 d->udma_mask); 3032 if (ata_dev_disabled(d)) {
3033 /* to avoid violating device selection timing */
3034 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3035 UINT_MAX, UINT_MAX);
3036 continue;
3037 }
3038
3039 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3040 d->mwdma_mask, d->udma_mask);
2680 xfer_mask &= ata_id_xfermask(d->id); 3041 xfer_mask &= ata_id_xfermask(d->id);
2681 if (ata_dma_blacklisted(d)) 3042 if (ata_dma_blacklisted(d))
2682 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 3043 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2683 /* Apply cable rule here. Don't apply it early because when
2684 we handle hot plug the cable type can itself change */
2685 if (ap->cbl == ATA_CBL_PATA40)
2686 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2687 } 3044 }
2688 3045
2689 if (ata_dma_blacklisted(dev)) 3046 if (ata_dma_blacklisted(dev))
2690 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, " 3047 ata_dev_printk(dev, KERN_WARNING,
2691 "disabling DMA\n", ap->id, dev->devno); 3048 "device is on DMA blacklist, disabling DMA\n");
2692 3049
2693 if (hs->flags & ATA_HOST_SIMPLEX) { 3050 if (hs->flags & ATA_HOST_SIMPLEX) {
2694 if (hs->simplex_claimed) 3051 if (hs->simplex_claimed)
2695 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 3052 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2696 } 3053 }
3054
2697 if (ap->ops->mode_filter) 3055 if (ap->ops->mode_filter)
2698 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask); 3056 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
2699 3057
2700 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 3058 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
2701 &dev->udma_mask); 3059 &dev->mwdma_mask, &dev->udma_mask);
2702} 3060}
2703 3061
2704/** 3062/**
2705 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 3063 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2706 * @ap: Port associated with device @dev
2707 * @dev: Device to which command will be sent 3064 * @dev: Device to which command will be sent
2708 * 3065 *
2709 * Issue SET FEATURES - XFER MODE command to device @dev 3066 * Issue SET FEATURES - XFER MODE command to device @dev
@@ -2716,8 +3073,7 @@ static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2716 * 0 on success, AC_ERR_* mask otherwise. 3073 * 0 on success, AC_ERR_* mask otherwise.
2717 */ 3074 */
2718 3075
2719static unsigned int ata_dev_set_xfermode(struct ata_port *ap, 3076static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
2720 struct ata_device *dev)
2721{ 3077{
2722 struct ata_taskfile tf; 3078 struct ata_taskfile tf;
2723 unsigned int err_mask; 3079 unsigned int err_mask;
@@ -2725,14 +3081,14 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2725 /* set up set-features taskfile */ 3081 /* set up set-features taskfile */
2726 DPRINTK("set features - xfer mode\n"); 3082 DPRINTK("set features - xfer mode\n");
2727 3083
2728 ata_tf_init(ap, &tf, dev->devno); 3084 ata_tf_init(dev, &tf);
2729 tf.command = ATA_CMD_SET_FEATURES; 3085 tf.command = ATA_CMD_SET_FEATURES;
2730 tf.feature = SETFEATURES_XFER; 3086 tf.feature = SETFEATURES_XFER;
2731 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 3087 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2732 tf.protocol = ATA_PROT_NODATA; 3088 tf.protocol = ATA_PROT_NODATA;
2733 tf.nsect = dev->xfer_mode; 3089 tf.nsect = dev->xfer_mode;
2734 3090
2735 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); 3091 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
2736 3092
2737 DPRINTK("EXIT, err_mask=%x\n", err_mask); 3093 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2738 return err_mask; 3094 return err_mask;
@@ -2740,7 +3096,6 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2740 3096
2741/** 3097/**
2742 * ata_dev_init_params - Issue INIT DEV PARAMS command 3098 * ata_dev_init_params - Issue INIT DEV PARAMS command
2743 * @ap: Port associated with device @dev
2744 * @dev: Device to which command will be sent 3099 * @dev: Device to which command will be sent
2745 * @heads: Number of heads (taskfile parameter) 3100 * @heads: Number of heads (taskfile parameter)
2746 * @sectors: Number of sectors (taskfile parameter) 3101 * @sectors: Number of sectors (taskfile parameter)
@@ -2751,11 +3106,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2751 * RETURNS: 3106 * RETURNS:
2752 * 0 on success, AC_ERR_* mask otherwise. 3107 * 0 on success, AC_ERR_* mask otherwise.
2753 */ 3108 */
2754 3109static unsigned int ata_dev_init_params(struct ata_device *dev,
2755static unsigned int ata_dev_init_params(struct ata_port *ap, 3110 u16 heads, u16 sectors)
2756 struct ata_device *dev,
2757 u16 heads,
2758 u16 sectors)
2759{ 3111{
2760 struct ata_taskfile tf; 3112 struct ata_taskfile tf;
2761 unsigned int err_mask; 3113 unsigned int err_mask;
@@ -2767,14 +3119,14 @@ static unsigned int ata_dev_init_params(struct ata_port *ap,
2767 /* set up init dev params taskfile */ 3119 /* set up init dev params taskfile */
2768 DPRINTK("init dev params \n"); 3120 DPRINTK("init dev params \n");
2769 3121
2770 ata_tf_init(ap, &tf, dev->devno); 3122 ata_tf_init(dev, &tf);
2771 tf.command = ATA_CMD_INIT_DEV_PARAMS; 3123 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2772 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 3124 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2773 tf.protocol = ATA_PROT_NODATA; 3125 tf.protocol = ATA_PROT_NODATA;
2774 tf.nsect = sectors; 3126 tf.nsect = sectors;
2775 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 3127 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2776 3128
2777 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); 3129 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
2778 3130
2779 DPRINTK("EXIT, err_mask=%x\n", err_mask); 3131 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2780 return err_mask; 3132 return err_mask;
@@ -2957,6 +3309,7 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2957 qc->n_elem = 1; 3309 qc->n_elem = 1;
2958 qc->orig_n_elem = 1; 3310 qc->orig_n_elem = 1;
2959 qc->buf_virt = buf; 3311 qc->buf_virt = buf;
3312 qc->nbytes = buflen;
2960 3313
2961 sg = qc->__sg; 3314 sg = qc->__sg;
2962 sg_init_one(sg, buf, buflen); 3315 sg_init_one(sg, buf, buflen);
@@ -3140,134 +3493,6 @@ skip_map:
3140} 3493}
3141 3494
3142/** 3495/**
3143 * ata_poll_qc_complete - turn irq back on and finish qc
3144 * @qc: Command to complete
3145 * @err_mask: ATA status register content
3146 *
3147 * LOCKING:
3148 * None. (grabs host lock)
3149 */
3150
3151void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3152{
3153 struct ata_port *ap = qc->ap;
3154 unsigned long flags;
3155
3156 spin_lock_irqsave(&ap->host_set->lock, flags);
3157 ap->flags &= ~ATA_FLAG_NOINTR;
3158 ata_irq_on(ap);
3159 ata_qc_complete(qc);
3160 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3161}
3162
3163/**
3164 * ata_pio_poll - poll using PIO, depending on current state
3165 * @ap: the target ata_port
3166 *
3167 * LOCKING:
3168 * None. (executing in kernel thread context)
3169 *
3170 * RETURNS:
3171 * timeout value to use
3172 */
3173
3174static unsigned long ata_pio_poll(struct ata_port *ap)
3175{
3176 struct ata_queued_cmd *qc;
3177 u8 status;
3178 unsigned int poll_state = HSM_ST_UNKNOWN;
3179 unsigned int reg_state = HSM_ST_UNKNOWN;
3180
3181 qc = ata_qc_from_tag(ap, ap->active_tag);
3182 WARN_ON(qc == NULL);
3183
3184 switch (ap->hsm_task_state) {
3185 case HSM_ST:
3186 case HSM_ST_POLL:
3187 poll_state = HSM_ST_POLL;
3188 reg_state = HSM_ST;
3189 break;
3190 case HSM_ST_LAST:
3191 case HSM_ST_LAST_POLL:
3192 poll_state = HSM_ST_LAST_POLL;
3193 reg_state = HSM_ST_LAST;
3194 break;
3195 default:
3196 BUG();
3197 break;
3198 }
3199
3200 status = ata_chk_status(ap);
3201 if (status & ATA_BUSY) {
3202 if (time_after(jiffies, ap->pio_task_timeout)) {
3203 qc->err_mask |= AC_ERR_TIMEOUT;
3204 ap->hsm_task_state = HSM_ST_TMOUT;
3205 return 0;
3206 }
3207 ap->hsm_task_state = poll_state;
3208 return ATA_SHORT_PAUSE;
3209 }
3210
3211 ap->hsm_task_state = reg_state;
3212 return 0;
3213}
3214
3215/**
3216 * ata_pio_complete - check if drive is busy or idle
3217 * @ap: the target ata_port
3218 *
3219 * LOCKING:
3220 * None. (executing in kernel thread context)
3221 *
3222 * RETURNS:
3223 * Non-zero if qc completed, zero otherwise.
3224 */
3225
3226static int ata_pio_complete (struct ata_port *ap)
3227{
3228 struct ata_queued_cmd *qc;
3229 u8 drv_stat;
3230
3231 /*
3232 * This is purely heuristic. This is a fast path. Sometimes when
3233 * we enter, BSY will be cleared in a chk-status or two. If not,
3234 * the drive is probably seeking or something. Snooze for a couple
3235 * msecs, then chk-status again. If still busy, fall back to
3236 * HSM_ST_POLL state.
3237 */
3238 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3239 if (drv_stat & ATA_BUSY) {
3240 msleep(2);
3241 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3242 if (drv_stat & ATA_BUSY) {
3243 ap->hsm_task_state = HSM_ST_LAST_POLL;
3244 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3245 return 0;
3246 }
3247 }
3248
3249 qc = ata_qc_from_tag(ap, ap->active_tag);
3250 WARN_ON(qc == NULL);
3251
3252 drv_stat = ata_wait_idle(ap);
3253 if (!ata_ok(drv_stat)) {
3254 qc->err_mask |= __ac_err_mask(drv_stat);
3255 ap->hsm_task_state = HSM_ST_ERR;
3256 return 0;
3257 }
3258
3259 ap->hsm_task_state = HSM_ST_IDLE;
3260
3261 WARN_ON(qc->err_mask);
3262 ata_poll_qc_complete(qc);
3263
3264 /* another command may start at this point */
3265
3266 return 1;
3267}
3268
3269
3270/**
3271 * swap_buf_le16 - swap halves of 16-bit words in place 3496 * swap_buf_le16 - swap halves of 16-bit words in place
3272 * @buf: Buffer to swap 3497 * @buf: Buffer to swap
3273 * @buf_words: Number of 16-bit words in buffer. 3498 * @buf_words: Number of 16-bit words in buffer.
@@ -3291,7 +3516,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
3291 3516
3292/** 3517/**
3293 * ata_mmio_data_xfer - Transfer data by MMIO 3518 * ata_mmio_data_xfer - Transfer data by MMIO
3294 * @ap: port to read/write 3519 * @adev: device for this I/O
3295 * @buf: data buffer 3520 * @buf: data buffer
3296 * @buflen: buffer length 3521 * @buflen: buffer length
3297 * @write_data: read/write 3522 * @write_data: read/write
@@ -3302,9 +3527,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
3302 * Inherited from caller. 3527 * Inherited from caller.
3303 */ 3528 */
3304 3529
3305static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, 3530void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3306 unsigned int buflen, int write_data) 3531 unsigned int buflen, int write_data)
3307{ 3532{
3533 struct ata_port *ap = adev->ap;
3308 unsigned int i; 3534 unsigned int i;
3309 unsigned int words = buflen >> 1; 3535 unsigned int words = buflen >> 1;
3310 u16 *buf16 = (u16 *) buf; 3536 u16 *buf16 = (u16 *) buf;
@@ -3336,7 +3562,7 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3336 3562
3337/** 3563/**
3338 * ata_pio_data_xfer - Transfer data by PIO 3564 * ata_pio_data_xfer - Transfer data by PIO
3339 * @ap: port to read/write 3565 * @adev: device to target
3340 * @buf: data buffer 3566 * @buf: data buffer
3341 * @buflen: buffer length 3567 * @buflen: buffer length
3342 * @write_data: read/write 3568 * @write_data: read/write
@@ -3347,9 +3573,10 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3347 * Inherited from caller. 3573 * Inherited from caller.
3348 */ 3574 */
3349 3575
3350static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf, 3576void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3351 unsigned int buflen, int write_data) 3577 unsigned int buflen, int write_data)
3352{ 3578{
3579 struct ata_port *ap = adev->ap;
3353 unsigned int words = buflen >> 1; 3580 unsigned int words = buflen >> 1;
3354 3581
3355 /* Transfer multiple of 2 bytes */ 3582 /* Transfer multiple of 2 bytes */
@@ -3374,38 +3601,29 @@ static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3374} 3601}
3375 3602
3376/** 3603/**
3377 * ata_data_xfer - Transfer data from/to the data register. 3604 * ata_pio_data_xfer_noirq - Transfer data by PIO
3378 * @ap: port to read/write 3605 * @adev: device to target
3379 * @buf: data buffer 3606 * @buf: data buffer
3380 * @buflen: buffer length 3607 * @buflen: buffer length
3381 * @do_write: read/write 3608 * @write_data: read/write
3382 * 3609 *
3383 * Transfer data from/to the device data register. 3610 * Transfer data from/to the device data register by PIO. Do the
3611 * transfer with interrupts disabled.
3384 * 3612 *
3385 * LOCKING: 3613 * LOCKING:
3386 * Inherited from caller. 3614 * Inherited from caller.
3387 */ 3615 */
3388 3616
3389static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, 3617void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3390 unsigned int buflen, int do_write) 3618 unsigned int buflen, int write_data)
3391{ 3619{
3392 /* Make the crap hardware pay the costs not the good stuff */ 3620 unsigned long flags;
3393 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) { 3621 local_irq_save(flags);
3394 unsigned long flags; 3622 ata_pio_data_xfer(adev, buf, buflen, write_data);
3395 local_irq_save(flags); 3623 local_irq_restore(flags);
3396 if (ap->flags & ATA_FLAG_MMIO)
3397 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3398 else
3399 ata_pio_data_xfer(ap, buf, buflen, do_write);
3400 local_irq_restore(flags);
3401 } else {
3402 if (ap->flags & ATA_FLAG_MMIO)
3403 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3404 else
3405 ata_pio_data_xfer(ap, buf, buflen, do_write);
3406 }
3407} 3624}
3408 3625
3626
3409/** 3627/**
3410 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data. 3628 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3411 * @qc: Command on going 3629 * @qc: Command on going
@@ -3435,7 +3653,24 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
3435 page = nth_page(page, (offset >> PAGE_SHIFT)); 3653 page = nth_page(page, (offset >> PAGE_SHIFT));
3436 offset %= PAGE_SIZE; 3654 offset %= PAGE_SIZE;
3437 3655
3438 buf = kmap(page) + offset; 3656 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3657
3658 if (PageHighMem(page)) {
3659 unsigned long flags;
3660
3661 /* FIXME: use a bounce buffer */
3662 local_irq_save(flags);
3663 buf = kmap_atomic(page, KM_IRQ0);
3664
3665 /* do the actual data transfer */
3666 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3667
3668 kunmap_atomic(buf, KM_IRQ0);
3669 local_irq_restore(flags);
3670 } else {
3671 buf = page_address(page);
3672 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3673 }
3439 3674
3440 qc->cursect++; 3675 qc->cursect++;
3441 qc->cursg_ofs++; 3676 qc->cursg_ofs++;
@@ -3444,14 +3679,68 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
3444 qc->cursg++; 3679 qc->cursg++;
3445 qc->cursg_ofs = 0; 3680 qc->cursg_ofs = 0;
3446 } 3681 }
3682}
3447 3683
3448 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 3684/**
3685 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3686 * @qc: Command on going
3687 *
3688 * Transfer one or many ATA_SECT_SIZE of data from/to the
3689 * ATA device for the DRQ request.
3690 *
3691 * LOCKING:
3692 * Inherited from caller.
3693 */
3694
3695static void ata_pio_sectors(struct ata_queued_cmd *qc)
3696{
3697 if (is_multi_taskfile(&qc->tf)) {
3698 /* READ/WRITE MULTIPLE */
3699 unsigned int nsect;
3449 3700
3450 /* do the actual data transfer */ 3701 WARN_ON(qc->dev->multi_count == 0);
3451 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3452 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3453 3702
3454 kunmap(page); 3703 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3704 while (nsect--)
3705 ata_pio_sector(qc);
3706 } else
3707 ata_pio_sector(qc);
3708}
3709
3710/**
3711 * atapi_send_cdb - Write CDB bytes to hardware
3712 * @ap: Port to which ATAPI device is attached.
3713 * @qc: Taskfile currently active
3714 *
3715 * When device has indicated its readiness to accept
3716 * a CDB, this function is called. Send the CDB.
3717 *
3718 * LOCKING:
3719 * caller.
3720 */
3721
3722static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3723{
3724 /* send SCSI cdb */
3725 DPRINTK("send cdb\n");
3726 WARN_ON(qc->dev->cdb_len < 12);
3727
3728 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
3729 ata_altstatus(ap); /* flush */
3730
3731 switch (qc->tf.protocol) {
3732 case ATA_PROT_ATAPI:
3733 ap->hsm_task_state = HSM_ST;
3734 break;
3735 case ATA_PROT_ATAPI_NODATA:
3736 ap->hsm_task_state = HSM_ST_LAST;
3737 break;
3738 case ATA_PROT_ATAPI_DMA:
3739 ap->hsm_task_state = HSM_ST_LAST;
3740 /* initiate bmdma */
3741 ap->ops->bmdma_start(qc);
3742 break;
3743 }
3455} 3744}
3456 3745
3457/** 3746/**
@@ -3492,11 +3781,11 @@ next_sg:
3492 unsigned int i; 3781 unsigned int i;
3493 3782
3494 if (words) /* warning if bytes > 1 */ 3783 if (words) /* warning if bytes > 1 */
3495 printk(KERN_WARNING "ata%u: %u bytes trailing data\n", 3784 ata_dev_printk(qc->dev, KERN_WARNING,
3496 ap->id, bytes); 3785 "%u bytes trailing data\n", bytes);
3497 3786
3498 for (i = 0; i < words; i++) 3787 for (i = 0; i < words; i++)
3499 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write); 3788 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
3500 3789
3501 ap->hsm_task_state = HSM_ST_LAST; 3790 ap->hsm_task_state = HSM_ST_LAST;
3502 return; 3791 return;
@@ -3517,7 +3806,24 @@ next_sg:
3517 /* don't cross page boundaries */ 3806 /* don't cross page boundaries */
3518 count = min(count, (unsigned int)PAGE_SIZE - offset); 3807 count = min(count, (unsigned int)PAGE_SIZE - offset);
3519 3808
3520 buf = kmap(page) + offset; 3809 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3810
3811 if (PageHighMem(page)) {
3812 unsigned long flags;
3813
3814 /* FIXME: use bounce buffer */
3815 local_irq_save(flags);
3816 buf = kmap_atomic(page, KM_IRQ0);
3817
3818 /* do the actual data transfer */
3819 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3820
3821 kunmap_atomic(buf, KM_IRQ0);
3822 local_irq_restore(flags);
3823 } else {
3824 buf = page_address(page);
3825 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3826 }
3521 3827
3522 bytes -= count; 3828 bytes -= count;
3523 qc->curbytes += count; 3829 qc->curbytes += count;
@@ -3528,13 +3834,6 @@ next_sg:
3528 qc->cursg_ofs = 0; 3834 qc->cursg_ofs = 0;
3529 } 3835 }
3530 3836
3531 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3532
3533 /* do the actual data transfer */
3534 ata_data_xfer(ap, buf, count, do_write);
3535
3536 kunmap(page);
3537
3538 if (bytes) 3837 if (bytes)
3539 goto next_sg; 3838 goto next_sg;
3540} 3839}
@@ -3556,10 +3855,16 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3556 unsigned int ireason, bc_lo, bc_hi, bytes; 3855 unsigned int ireason, bc_lo, bc_hi, bytes;
3557 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; 3856 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3558 3857
3559 ap->ops->tf_read(ap, &qc->tf); 3858 /* Abuse qc->result_tf for temp storage of intermediate TF
3560 ireason = qc->tf.nsect; 3859 * here to save some kernel stack usage.
3561 bc_lo = qc->tf.lbam; 3860 * For normal completion, qc->result_tf is not relevant. For
3562 bc_hi = qc->tf.lbah; 3861 * error, qc->result_tf is later overwritten by ata_qc_complete().
3862 * So, the correctness of qc->result_tf is not affected.
3863 */
3864 ap->ops->tf_read(ap, &qc->result_tf);
3865 ireason = qc->result_tf.nsect;
3866 bc_lo = qc->result_tf.lbam;
3867 bc_hi = qc->result_tf.lbah;
3563 bytes = (bc_hi << 8) | bc_lo; 3868 bytes = (bc_hi << 8) | bc_lo;
3564 3869
3565 /* shall be cleared to zero, indicating xfer of data */ 3870 /* shall be cleared to zero, indicating xfer of data */
@@ -3571,307 +3876,365 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3571 if (do_write != i_write) 3876 if (do_write != i_write)
3572 goto err_out; 3877 goto err_out;
3573 3878
3879 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3880
3574 __atapi_pio_bytes(qc, bytes); 3881 __atapi_pio_bytes(qc, bytes);
3575 3882
3576 return; 3883 return;
3577 3884
3578err_out: 3885err_out:
3579 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", 3886 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
3580 ap->id, dev->devno);
3581 qc->err_mask |= AC_ERR_HSM; 3887 qc->err_mask |= AC_ERR_HSM;
3582 ap->hsm_task_state = HSM_ST_ERR; 3888 ap->hsm_task_state = HSM_ST_ERR;
3583} 3889}
3584 3890
3585/** 3891/**
3586 * ata_pio_block - start PIO on a block 3892 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3587 * @ap: the target ata_port 3893 * @ap: the target ata_port
3894 * @qc: qc on going
3588 * 3895 *
3589 * LOCKING: 3896 * RETURNS:
3590 * None. (executing in kernel thread context) 3897 * 1 if ok in workqueue, 0 otherwise.
3591 */ 3898 */
3592 3899
3593static void ata_pio_block(struct ata_port *ap) 3900static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3594{ 3901{
3595 struct ata_queued_cmd *qc; 3902 if (qc->tf.flags & ATA_TFLAG_POLLING)
3596 u8 status; 3903 return 1;
3597 3904
3598 /* 3905 if (ap->hsm_task_state == HSM_ST_FIRST) {
3599 * This is purely heuristic. This is a fast path. 3906 if (qc->tf.protocol == ATA_PROT_PIO &&
3600 * Sometimes when we enter, BSY will be cleared in 3907 (qc->tf.flags & ATA_TFLAG_WRITE))
3601 * a chk-status or two. If not, the drive is probably seeking 3908 return 1;
3602 * or something. Snooze for a couple msecs, then 3909
3603 * chk-status again. If still busy, fall back to 3910 if (is_atapi_taskfile(&qc->tf) &&
3604 * HSM_ST_POLL state. 3911 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3605 */ 3912 return 1;
3606 status = ata_busy_wait(ap, ATA_BUSY, 5);
3607 if (status & ATA_BUSY) {
3608 msleep(2);
3609 status = ata_busy_wait(ap, ATA_BUSY, 10);
3610 if (status & ATA_BUSY) {
3611 ap->hsm_task_state = HSM_ST_POLL;
3612 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3613 return;
3614 }
3615 } 3913 }
3616 3914
3617 qc = ata_qc_from_tag(ap, ap->active_tag); 3915 return 0;
3618 WARN_ON(qc == NULL); 3916}
3619 3917
3620 /* check error */ 3918/**
3621 if (status & (ATA_ERR | ATA_DF)) { 3919 * ata_hsm_qc_complete - finish a qc running on standard HSM
3622 qc->err_mask |= AC_ERR_DEV; 3920 * @qc: Command to complete
3623 ap->hsm_task_state = HSM_ST_ERR; 3921 * @in_wq: 1 if called from workqueue, 0 otherwise
3624 return; 3922 *
3625 } 3923 * Finish @qc which is running on standard HSM.
3924 *
3925 * LOCKING:
3926 * If @in_wq is zero, spin_lock_irqsave(host_set lock).
3927 * Otherwise, none on entry and grabs host lock.
3928 */
3929static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
3930{
3931 struct ata_port *ap = qc->ap;
3932 unsigned long flags;
3626 3933
3627 /* transfer data if any */ 3934 if (ap->ops->error_handler) {
3628 if (is_atapi_taskfile(&qc->tf)) { 3935 if (in_wq) {
3629 /* DRQ=0 means no more data to transfer */ 3936 spin_lock_irqsave(ap->lock, flags);
3630 if ((status & ATA_DRQ) == 0) {
3631 ap->hsm_task_state = HSM_ST_LAST;
3632 return;
3633 }
3634 3937
3635 atapi_pio_bytes(qc); 3938 /* EH might have kicked in while host_set lock
3636 } else { 3939 * is released.
3637 /* handle BSY=0, DRQ=0 as error */ 3940 */
3638 if ((status & ATA_DRQ) == 0) { 3941 qc = ata_qc_from_tag(ap, qc->tag);
3639 qc->err_mask |= AC_ERR_HSM; 3942 if (qc) {
3640 ap->hsm_task_state = HSM_ST_ERR; 3943 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
3641 return; 3944 ata_irq_on(ap);
3642 } 3945 ata_qc_complete(qc);
3946 } else
3947 ata_port_freeze(ap);
3948 }
3643 3949
3644 ata_pio_sector(qc); 3950 spin_unlock_irqrestore(ap->lock, flags);
3951 } else {
3952 if (likely(!(qc->err_mask & AC_ERR_HSM)))
3953 ata_qc_complete(qc);
3954 else
3955 ata_port_freeze(ap);
3956 }
3957 } else {
3958 if (in_wq) {
3959 spin_lock_irqsave(ap->lock, flags);
3960 ata_irq_on(ap);
3961 ata_qc_complete(qc);
3962 spin_unlock_irqrestore(ap->lock, flags);
3963 } else
3964 ata_qc_complete(qc);
3645 } 3965 }
3646 3966
3647 ata_altstatus(ap); /* flush */ 3967 ata_altstatus(ap); /* flush */
3648} 3968}
3649 3969
3650static void ata_pio_error(struct ata_port *ap) 3970/**
3971 * ata_hsm_move - move the HSM to the next state.
3972 * @ap: the target ata_port
3973 * @qc: qc on going
3974 * @status: current device status
3975 * @in_wq: 1 if called from workqueue, 0 otherwise
3976 *
3977 * RETURNS:
3978 * 1 when poll next status needed, 0 otherwise.
3979 */
3980int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
3981 u8 status, int in_wq)
3651{ 3982{
3652 struct ata_queued_cmd *qc; 3983 unsigned long flags = 0;
3653 3984 int poll_next;
3654 qc = ata_qc_from_tag(ap, ap->active_tag);
3655 WARN_ON(qc == NULL);
3656 3985
3657 if (qc->tf.command != ATA_CMD_PACKET) 3986 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
3658 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3659 3987
3660 /* make sure qc->err_mask is available to 3988 /* Make sure ata_qc_issue_prot() does not throw things
3661 * know what's wrong and recover 3989 * like DMA polling into the workqueue. Notice that
3990 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
3662 */ 3991 */
3663 WARN_ON(qc->err_mask == 0); 3992 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
3664
3665 ap->hsm_task_state = HSM_ST_IDLE;
3666
3667 ata_poll_qc_complete(qc);
3668}
3669
3670static void ata_pio_task(void *_data)
3671{
3672 struct ata_port *ap = _data;
3673 unsigned long timeout;
3674 int qc_completed;
3675 3993
3676fsm_start: 3994fsm_start:
3677 timeout = 0; 3995 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
3678 qc_completed = 0; 3996 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
3679 3997
3680 switch (ap->hsm_task_state) { 3998 switch (ap->hsm_task_state) {
3681 case HSM_ST_IDLE: 3999 case HSM_ST_FIRST:
3682 return; 4000 /* Send first data block or PACKET CDB */
3683 4001
3684 case HSM_ST: 4002 /* If polling, we will stay in the work queue after
3685 ata_pio_block(ap); 4003 * sending the data. Otherwise, interrupt handler
3686 break; 4004 * takes over after sending the data.
3687 4005 */
3688 case HSM_ST_LAST: 4006 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
3689 qc_completed = ata_pio_complete(ap); 4007
3690 break; 4008 /* check device status */
3691 4009 if (unlikely((status & ATA_DRQ) == 0)) {
3692 case HSM_ST_POLL: 4010 /* handle BSY=0, DRQ=0 as error */
3693 case HSM_ST_LAST_POLL: 4011 if (likely(status & (ATA_ERR | ATA_DF)))
3694 timeout = ata_pio_poll(ap); 4012 /* device stops HSM for abort/error */
3695 break; 4013 qc->err_mask |= AC_ERR_DEV;
3696 4014 else
3697 case HSM_ST_TMOUT: 4015 /* HSM violation. Let EH handle this */
3698 case HSM_ST_ERR: 4016 qc->err_mask |= AC_ERR_HSM;
3699 ata_pio_error(ap);
3700 return;
3701 }
3702
3703 if (timeout)
3704 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3705 else if (!qc_completed)
3706 goto fsm_start;
3707}
3708
3709/**
3710 * atapi_packet_task - Write CDB bytes to hardware
3711 * @_data: Port to which ATAPI device is attached.
3712 *
3713 * When device has indicated its readiness to accept
3714 * a CDB, this function is called. Send the CDB.
3715 * If DMA is to be performed, exit immediately.
3716 * Otherwise, we are in polling mode, so poll
3717 * status under operation succeeds or fails.
3718 *
3719 * LOCKING:
3720 * Kernel thread context (may sleep)
3721 */
3722
3723static void atapi_packet_task(void *_data)
3724{
3725 struct ata_port *ap = _data;
3726 struct ata_queued_cmd *qc;
3727 u8 status;
3728
3729 qc = ata_qc_from_tag(ap, ap->active_tag);
3730 WARN_ON(qc == NULL);
3731 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3732 4017
3733 /* sleep-wait for BSY to clear */ 4018 ap->hsm_task_state = HSM_ST_ERR;
3734 DPRINTK("busy wait\n"); 4019 goto fsm_start;
3735 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) { 4020 }
3736 qc->err_mask |= AC_ERR_TIMEOUT;
3737 goto err_out;
3738 }
3739 4021
3740 /* make sure DRQ is set */ 4022 /* Device should not ask for data transfer (DRQ=1)
3741 status = ata_chk_status(ap); 4023 * when it finds something wrong.
3742 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) { 4024 * We ignore DRQ here and stop the HSM by
3743 qc->err_mask |= AC_ERR_HSM; 4025 * changing hsm_task_state to HSM_ST_ERR and
3744 goto err_out; 4026 * let the EH abort the command or reset the device.
3745 } 4027 */
4028 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4029 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4030 ap->id, status);
4031 qc->err_mask |= AC_ERR_HSM;
4032 ap->hsm_task_state = HSM_ST_ERR;
4033 goto fsm_start;
4034 }
3746 4035
3747 /* send SCSI cdb */ 4036 /* Send the CDB (atapi) or the first data block (ata pio out).
3748 DPRINTK("send cdb\n"); 4037 * During the state transition, interrupt handler shouldn't
3749 WARN_ON(qc->dev->cdb_len < 12); 4038 * be invoked before the data transfer is complete and
4039 * hsm_task_state is changed. Hence, the following locking.
4040 */
4041 if (in_wq)
4042 spin_lock_irqsave(ap->lock, flags);
3750 4043
3751 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || 4044 if (qc->tf.protocol == ATA_PROT_PIO) {
3752 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { 4045 /* PIO data out protocol.
3753 unsigned long flags; 4046 * send first data block.
4047 */
3754 4048
3755 /* Once we're done issuing command and kicking bmdma, 4049 /* ata_pio_sectors() might change the state
3756 * irq handler takes over. To not lose irq, we need 4050 * to HSM_ST_LAST. so, the state is changed here
3757 * to clear NOINTR flag before sending cdb, but 4051 * before ata_pio_sectors().
3758 * interrupt handler shouldn't be invoked before we're 4052 */
3759 * finished. Hence, the following locking. 4053 ap->hsm_task_state = HSM_ST;
4054 ata_pio_sectors(qc);
4055 ata_altstatus(ap); /* flush */
4056 } else
4057 /* send CDB */
4058 atapi_send_cdb(ap, qc);
4059
4060 if (in_wq)
4061 spin_unlock_irqrestore(ap->lock, flags);
4062
4063 /* if polling, ata_pio_task() handles the rest.
4064 * otherwise, interrupt handler takes over from here.
3760 */ 4065 */
3761 spin_lock_irqsave(&ap->host_set->lock, flags); 4066 break;
3762 ap->flags &= ~ATA_FLAG_NOINTR;
3763 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3764 ata_altstatus(ap); /* flush */
3765 4067
3766 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) 4068 case HSM_ST:
3767 ap->ops->bmdma_start(qc); /* initiate bmdma */ 4069 /* complete command or read/write the data register */
3768 spin_unlock_irqrestore(&ap->host_set->lock, flags); 4070 if (qc->tf.protocol == ATA_PROT_ATAPI) {
3769 } else { 4071 /* ATAPI PIO protocol */
3770 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1); 4072 if ((status & ATA_DRQ) == 0) {
3771 ata_altstatus(ap); /* flush */ 4073 /* No more data to transfer or device error.
4074 * Device error will be tagged in HSM_ST_LAST.
4075 */
4076 ap->hsm_task_state = HSM_ST_LAST;
4077 goto fsm_start;
4078 }
3772 4079
3773 /* PIO commands are handled by polling */ 4080 /* Device should not ask for data transfer (DRQ=1)
3774 ap->hsm_task_state = HSM_ST; 4081 * when it finds something wrong.
3775 ata_port_queue_task(ap, ata_pio_task, ap, 0); 4082 * We ignore DRQ here and stop the HSM by
3776 } 4083 * changing hsm_task_state to HSM_ST_ERR and
4084 * let the EH abort the command or reset the device.
4085 */
4086 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4087 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4088 ap->id, status);
4089 qc->err_mask |= AC_ERR_HSM;
4090 ap->hsm_task_state = HSM_ST_ERR;
4091 goto fsm_start;
4092 }
3777 4093
3778 return; 4094 atapi_pio_bytes(qc);
3779 4095
3780err_out: 4096 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
3781 ata_poll_qc_complete(qc); 4097 /* bad ireason reported by device */
3782} 4098 goto fsm_start;
3783 4099
3784/** 4100 } else {
3785 * ata_qc_timeout - Handle timeout of queued command 4101 /* ATA PIO protocol */
3786 * @qc: Command that timed out 4102 if (unlikely((status & ATA_DRQ) == 0)) {
3787 * 4103 /* handle BSY=0, DRQ=0 as error */
3788 * Some part of the kernel (currently, only the SCSI layer) 4104 if (likely(status & (ATA_ERR | ATA_DF)))
3789 * has noticed that the active command on port @ap has not 4105 /* device stops HSM for abort/error */
3790 * completed after a specified length of time. Handle this 4106 qc->err_mask |= AC_ERR_DEV;
3791 * condition by disabling DMA (if necessary) and completing 4107 else
3792 * transactions, with error if necessary. 4108 /* HSM violation. Let EH handle this */
3793 * 4109 qc->err_mask |= AC_ERR_HSM;
3794 * This also handles the case of the "lost interrupt", where 4110
3795 * for some reason (possibly hardware bug, possibly driver bug) 4111 ap->hsm_task_state = HSM_ST_ERR;
3796 * an interrupt was not delivered to the driver, even though the 4112 goto fsm_start;
3797 * transaction completed successfully. 4113 }
3798 *
3799 * LOCKING:
3800 * Inherited from SCSI layer (none, can sleep)
3801 */
3802 4114
3803static void ata_qc_timeout(struct ata_queued_cmd *qc) 4115 /* For PIO reads, some devices may ask for
3804{ 4116 * data transfer (DRQ=1) alone with ERR=1.
3805 struct ata_port *ap = qc->ap; 4117 * We respect DRQ here and transfer one
3806 struct ata_host_set *host_set = ap->host_set; 4118 * block of junk data before changing the
3807 u8 host_stat = 0, drv_stat; 4119 * hsm_task_state to HSM_ST_ERR.
3808 unsigned long flags; 4120 *
4121 * For PIO writes, ERR=1 DRQ=1 doesn't make
4122 * sense since the data block has been
4123 * transferred to the device.
4124 */
4125 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4126 /* data might be corrputed */
4127 qc->err_mask |= AC_ERR_DEV;
4128
4129 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4130 ata_pio_sectors(qc);
4131 ata_altstatus(ap);
4132 status = ata_wait_idle(ap);
4133 }
4134
4135 if (status & (ATA_BUSY | ATA_DRQ))
4136 qc->err_mask |= AC_ERR_HSM;
4137
4138 /* ata_pio_sectors() might change the
4139 * state to HSM_ST_LAST. so, the state
4140 * is changed after ata_pio_sectors().
4141 */
4142 ap->hsm_task_state = HSM_ST_ERR;
4143 goto fsm_start;
4144 }
3809 4145
3810 DPRINTK("ENTER\n"); 4146 ata_pio_sectors(qc);
3811 4147
3812 ap->hsm_task_state = HSM_ST_IDLE; 4148 if (ap->hsm_task_state == HSM_ST_LAST &&
4149 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4150 /* all data read */
4151 ata_altstatus(ap);
4152 status = ata_wait_idle(ap);
4153 goto fsm_start;
4154 }
4155 }
3813 4156
3814 spin_lock_irqsave(&host_set->lock, flags); 4157 ata_altstatus(ap); /* flush */
4158 poll_next = 1;
4159 break;
3815 4160
3816 switch (qc->tf.protocol) { 4161 case HSM_ST_LAST:
4162 if (unlikely(!ata_ok(status))) {
4163 qc->err_mask |= __ac_err_mask(status);
4164 ap->hsm_task_state = HSM_ST_ERR;
4165 goto fsm_start;
4166 }
3817 4167
3818 case ATA_PROT_DMA: 4168 /* no more data to transfer */
3819 case ATA_PROT_ATAPI_DMA: 4169 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
3820 host_stat = ap->ops->bmdma_status(ap); 4170 ap->id, qc->dev->devno, status);
3821 4171
3822 /* before we do anything else, clear DMA-Start bit */ 4172 WARN_ON(qc->err_mask);
3823 ap->ops->bmdma_stop(qc);
3824 4173
3825 /* fall through */ 4174 ap->hsm_task_state = HSM_ST_IDLE;
3826 4175
3827 default: 4176 /* complete taskfile transaction */
3828 ata_altstatus(ap); 4177 ata_hsm_qc_complete(qc, in_wq);
3829 drv_stat = ata_chk_status(ap); 4178
4179 poll_next = 0;
4180 break;
3830 4181
3831 /* ack bmdma irq events */ 4182 case HSM_ST_ERR:
3832 ap->ops->irq_clear(ap); 4183 /* make sure qc->err_mask is available to
4184 * know what's wrong and recover
4185 */
4186 WARN_ON(qc->err_mask == 0);
3833 4187
3834 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", 4188 ap->hsm_task_state = HSM_ST_IDLE;
3835 ap->id, qc->tf.command, drv_stat, host_stat);
3836 4189
3837 /* complete taskfile transaction */ 4190 /* complete taskfile transaction */
3838 qc->err_mask |= ac_err_mask(drv_stat); 4191 ata_hsm_qc_complete(qc, in_wq);
4192
4193 poll_next = 0;
3839 break; 4194 break;
4195 default:
4196 poll_next = 0;
4197 BUG();
3840 } 4198 }
3841 4199
3842 spin_unlock_irqrestore(&host_set->lock, flags); 4200 return poll_next;
3843
3844 ata_eh_qc_complete(qc);
3845
3846 DPRINTK("EXIT\n");
3847} 4201}
3848 4202
3849/** 4203static void ata_pio_task(void *_data)
3850 * ata_eng_timeout - Handle timeout of queued command
3851 * @ap: Port on which timed-out command is active
3852 *
3853 * Some part of the kernel (currently, only the SCSI layer)
3854 * has noticed that the active command on port @ap has not
3855 * completed after a specified length of time. Handle this
3856 * condition by disabling DMA (if necessary) and completing
3857 * transactions, with error if necessary.
3858 *
3859 * This also handles the case of the "lost interrupt", where
3860 * for some reason (possibly hardware bug, possibly driver bug)
3861 * an interrupt was not delivered to the driver, even though the
3862 * transaction completed successfully.
3863 *
3864 * LOCKING:
3865 * Inherited from SCSI layer (none, can sleep)
3866 */
3867
3868void ata_eng_timeout(struct ata_port *ap)
3869{ 4204{
3870 DPRINTK("ENTER\n"); 4205 struct ata_queued_cmd *qc = _data;
4206 struct ata_port *ap = qc->ap;
4207 u8 status;
4208 int poll_next;
3871 4209
3872 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag)); 4210fsm_start:
4211 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
3873 4212
3874 DPRINTK("EXIT\n"); 4213 /*
4214 * This is purely heuristic. This is a fast path.
4215 * Sometimes when we enter, BSY will be cleared in
4216 * a chk-status or two. If not, the drive is probably seeking
4217 * or something. Snooze for a couple msecs, then
4218 * chk-status again. If still busy, queue delayed work.
4219 */
4220 status = ata_busy_wait(ap, ATA_BUSY, 5);
4221 if (status & ATA_BUSY) {
4222 msleep(2);
4223 status = ata_busy_wait(ap, ATA_BUSY, 10);
4224 if (status & ATA_BUSY) {
4225 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4226 return;
4227 }
4228 }
4229
4230 /* move the HSM */
4231 poll_next = ata_hsm_move(ap, qc, status, 1);
4232
4233 /* another command or interrupt handler
4234 * may be running at this point.
4235 */
4236 if (poll_next)
4237 goto fsm_start;
3875} 4238}
3876 4239
3877/** 4240/**
@@ -3888,9 +4251,14 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3888 struct ata_queued_cmd *qc = NULL; 4251 struct ata_queued_cmd *qc = NULL;
3889 unsigned int i; 4252 unsigned int i;
3890 4253
3891 for (i = 0; i < ATA_MAX_QUEUE; i++) 4254 /* no command while frozen */
3892 if (!test_and_set_bit(i, &ap->qactive)) { 4255 if (unlikely(ap->flags & ATA_FLAG_FROZEN))
3893 qc = ata_qc_from_tag(ap, i); 4256 return NULL;
4257
4258 /* the last tag is reserved for internal command. */
4259 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4260 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4261 qc = __ata_qc_from_tag(ap, i);
3894 break; 4262 break;
3895 } 4263 }
3896 4264
@@ -3902,16 +4270,15 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3902 4270
3903/** 4271/**
3904 * ata_qc_new_init - Request an available ATA command, and initialize it 4272 * ata_qc_new_init - Request an available ATA command, and initialize it
3905 * @ap: Port associated with device @dev
3906 * @dev: Device from whom we request an available command structure 4273 * @dev: Device from whom we request an available command structure
3907 * 4274 *
3908 * LOCKING: 4275 * LOCKING:
3909 * None. 4276 * None.
3910 */ 4277 */
3911 4278
3912struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 4279struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
3913 struct ata_device *dev)
3914{ 4280{
4281 struct ata_port *ap = dev->ap;
3915 struct ata_queued_cmd *qc; 4282 struct ata_queued_cmd *qc;
3916 4283
3917 qc = ata_qc_new(ap); 4284 qc = ata_qc_new(ap);
@@ -3946,36 +4313,153 @@ void ata_qc_free(struct ata_queued_cmd *qc)
3946 qc->flags = 0; 4313 qc->flags = 0;
3947 tag = qc->tag; 4314 tag = qc->tag;
3948 if (likely(ata_tag_valid(tag))) { 4315 if (likely(ata_tag_valid(tag))) {
3949 if (tag == ap->active_tag)
3950 ap->active_tag = ATA_TAG_POISON;
3951 qc->tag = ATA_TAG_POISON; 4316 qc->tag = ATA_TAG_POISON;
3952 clear_bit(tag, &ap->qactive); 4317 clear_bit(tag, &ap->qc_allocated);
3953 } 4318 }
3954} 4319}
3955 4320
3956void __ata_qc_complete(struct ata_queued_cmd *qc) 4321void __ata_qc_complete(struct ata_queued_cmd *qc)
3957{ 4322{
4323 struct ata_port *ap = qc->ap;
4324
3958 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4325 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3959 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); 4326 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3960 4327
3961 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 4328 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3962 ata_sg_clean(qc); 4329 ata_sg_clean(qc);
3963 4330
4331 /* command should be marked inactive atomically with qc completion */
4332 if (qc->tf.protocol == ATA_PROT_NCQ)
4333 ap->sactive &= ~(1 << qc->tag);
4334 else
4335 ap->active_tag = ATA_TAG_POISON;
4336
3964 /* atapi: mark qc as inactive to prevent the interrupt handler 4337 /* atapi: mark qc as inactive to prevent the interrupt handler
3965 * from completing the command twice later, before the error handler 4338 * from completing the command twice later, before the error handler
3966 * is called. (when rc != 0 and atapi request sense is needed) 4339 * is called. (when rc != 0 and atapi request sense is needed)
3967 */ 4340 */
3968 qc->flags &= ~ATA_QCFLAG_ACTIVE; 4341 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4342 ap->qc_active &= ~(1 << qc->tag);
3969 4343
3970 /* call completion callback */ 4344 /* call completion callback */
3971 qc->complete_fn(qc); 4345 qc->complete_fn(qc);
3972} 4346}
3973 4347
4348/**
4349 * ata_qc_complete - Complete an active ATA command
4350 * @qc: Command to complete
4351 * @err_mask: ATA Status register contents
4352 *
4353 * Indicate to the mid and upper layers that an ATA
4354 * command has completed, with either an ok or not-ok status.
4355 *
4356 * LOCKING:
4357 * spin_lock_irqsave(host_set lock)
4358 */
4359void ata_qc_complete(struct ata_queued_cmd *qc)
4360{
4361 struct ata_port *ap = qc->ap;
4362
4363 /* XXX: New EH and old EH use different mechanisms to
4364 * synchronize EH with regular execution path.
4365 *
4366 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4367 * Normal execution path is responsible for not accessing a
4368 * failed qc. libata core enforces the rule by returning NULL
4369 * from ata_qc_from_tag() for failed qcs.
4370 *
4371 * Old EH depends on ata_qc_complete() nullifying completion
4372 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4373 * not synchronize with interrupt handler. Only PIO task is
4374 * taken care of.
4375 */
4376 if (ap->ops->error_handler) {
4377 WARN_ON(ap->flags & ATA_FLAG_FROZEN);
4378
4379 if (unlikely(qc->err_mask))
4380 qc->flags |= ATA_QCFLAG_FAILED;
4381
4382 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4383 if (!ata_tag_internal(qc->tag)) {
4384 /* always fill result TF for failed qc */
4385 ap->ops->tf_read(ap, &qc->result_tf);
4386 ata_qc_schedule_eh(qc);
4387 return;
4388 }
4389 }
4390
4391 /* read result TF if requested */
4392 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4393 ap->ops->tf_read(ap, &qc->result_tf);
4394
4395 __ata_qc_complete(qc);
4396 } else {
4397 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4398 return;
4399
4400 /* read result TF if failed or requested */
4401 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4402 ap->ops->tf_read(ap, &qc->result_tf);
4403
4404 __ata_qc_complete(qc);
4405 }
4406}
4407
4408/**
4409 * ata_qc_complete_multiple - Complete multiple qcs successfully
4410 * @ap: port in question
4411 * @qc_active: new qc_active mask
4412 * @finish_qc: LLDD callback invoked before completing a qc
4413 *
4414 * Complete in-flight commands. This functions is meant to be
4415 * called from low-level driver's interrupt routine to complete
4416 * requests normally. ap->qc_active and @qc_active is compared
4417 * and commands are completed accordingly.
4418 *
4419 * LOCKING:
4420 * spin_lock_irqsave(host_set lock)
4421 *
4422 * RETURNS:
4423 * Number of completed commands on success, -errno otherwise.
4424 */
4425int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4426 void (*finish_qc)(struct ata_queued_cmd *))
4427{
4428 int nr_done = 0;
4429 u32 done_mask;
4430 int i;
4431
4432 done_mask = ap->qc_active ^ qc_active;
4433
4434 if (unlikely(done_mask & qc_active)) {
4435 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4436 "(%08x->%08x)\n", ap->qc_active, qc_active);
4437 return -EINVAL;
4438 }
4439
4440 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4441 struct ata_queued_cmd *qc;
4442
4443 if (!(done_mask & (1 << i)))
4444 continue;
4445
4446 if ((qc = ata_qc_from_tag(ap, i))) {
4447 if (finish_qc)
4448 finish_qc(qc);
4449 ata_qc_complete(qc);
4450 nr_done++;
4451 }
4452 }
4453
4454 return nr_done;
4455}
4456
3974static inline int ata_should_dma_map(struct ata_queued_cmd *qc) 4457static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3975{ 4458{
3976 struct ata_port *ap = qc->ap; 4459 struct ata_port *ap = qc->ap;
3977 4460
3978 switch (qc->tf.protocol) { 4461 switch (qc->tf.protocol) {
4462 case ATA_PROT_NCQ:
3979 case ATA_PROT_DMA: 4463 case ATA_PROT_DMA:
3980 case ATA_PROT_ATAPI_DMA: 4464 case ATA_PROT_ATAPI_DMA:
3981 return 1; 4465 return 1;
@@ -4010,8 +4494,22 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
4010{ 4494{
4011 struct ata_port *ap = qc->ap; 4495 struct ata_port *ap = qc->ap;
4012 4496
4013 qc->ap->active_tag = qc->tag; 4497 /* Make sure only one non-NCQ command is outstanding. The
4498 * check is skipped for old EH because it reuses active qc to
4499 * request ATAPI sense.
4500 */
4501 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4502
4503 if (qc->tf.protocol == ATA_PROT_NCQ) {
4504 WARN_ON(ap->sactive & (1 << qc->tag));
4505 ap->sactive |= 1 << qc->tag;
4506 } else {
4507 WARN_ON(ap->sactive);
4508 ap->active_tag = qc->tag;
4509 }
4510
4014 qc->flags |= ATA_QCFLAG_ACTIVE; 4511 qc->flags |= ATA_QCFLAG_ACTIVE;
4512 ap->qc_active |= 1 << qc->tag;
4015 4513
4016 if (ata_should_dma_map(qc)) { 4514 if (ata_should_dma_map(qc)) {
4017 if (qc->flags & ATA_QCFLAG_SG) { 4515 if (qc->flags & ATA_QCFLAG_SG) {
@@ -4061,43 +4559,105 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4061{ 4559{
4062 struct ata_port *ap = qc->ap; 4560 struct ata_port *ap = qc->ap;
4063 4561
4562 /* Use polling pio if the LLD doesn't handle
4563 * interrupt driven pio and atapi CDB interrupt.
4564 */
4565 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4566 switch (qc->tf.protocol) {
4567 case ATA_PROT_PIO:
4568 case ATA_PROT_ATAPI:
4569 case ATA_PROT_ATAPI_NODATA:
4570 qc->tf.flags |= ATA_TFLAG_POLLING;
4571 break;
4572 case ATA_PROT_ATAPI_DMA:
4573 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4574 /* see ata_dma_blacklisted() */
4575 BUG();
4576 break;
4577 default:
4578 break;
4579 }
4580 }
4581
4582 /* select the device */
4064 ata_dev_select(ap, qc->dev->devno, 1, 0); 4583 ata_dev_select(ap, qc->dev->devno, 1, 0);
4065 4584
4585 /* start the command */
4066 switch (qc->tf.protocol) { 4586 switch (qc->tf.protocol) {
4067 case ATA_PROT_NODATA: 4587 case ATA_PROT_NODATA:
4588 if (qc->tf.flags & ATA_TFLAG_POLLING)
4589 ata_qc_set_polling(qc);
4590
4068 ata_tf_to_host(ap, &qc->tf); 4591 ata_tf_to_host(ap, &qc->tf);
4592 ap->hsm_task_state = HSM_ST_LAST;
4593
4594 if (qc->tf.flags & ATA_TFLAG_POLLING)
4595 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4596
4069 break; 4597 break;
4070 4598
4071 case ATA_PROT_DMA: 4599 case ATA_PROT_DMA:
4600 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4601
4072 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 4602 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4073 ap->ops->bmdma_setup(qc); /* set up bmdma */ 4603 ap->ops->bmdma_setup(qc); /* set up bmdma */
4074 ap->ops->bmdma_start(qc); /* initiate bmdma */ 4604 ap->ops->bmdma_start(qc); /* initiate bmdma */
4605 ap->hsm_task_state = HSM_ST_LAST;
4075 break; 4606 break;
4076 4607
4077 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ 4608 case ATA_PROT_PIO:
4078 ata_qc_set_polling(qc); 4609 if (qc->tf.flags & ATA_TFLAG_POLLING)
4079 ata_tf_to_host(ap, &qc->tf); 4610 ata_qc_set_polling(qc);
4080 ap->hsm_task_state = HSM_ST;
4081 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4082 break;
4083 4611
4084 case ATA_PROT_ATAPI:
4085 ata_qc_set_polling(qc);
4086 ata_tf_to_host(ap, &qc->tf); 4612 ata_tf_to_host(ap, &qc->tf);
4087 ata_port_queue_task(ap, atapi_packet_task, ap, 0); 4613
4614 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4615 /* PIO data out protocol */
4616 ap->hsm_task_state = HSM_ST_FIRST;
4617 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4618
4619 /* always send first data block using
4620 * the ata_pio_task() codepath.
4621 */
4622 } else {
4623 /* PIO data in protocol */
4624 ap->hsm_task_state = HSM_ST;
4625
4626 if (qc->tf.flags & ATA_TFLAG_POLLING)
4627 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4628
4629 /* if polling, ata_pio_task() handles the rest.
4630 * otherwise, interrupt handler takes over from here.
4631 */
4632 }
4633
4088 break; 4634 break;
4089 4635
4636 case ATA_PROT_ATAPI:
4090 case ATA_PROT_ATAPI_NODATA: 4637 case ATA_PROT_ATAPI_NODATA:
4091 ap->flags |= ATA_FLAG_NOINTR; 4638 if (qc->tf.flags & ATA_TFLAG_POLLING)
4639 ata_qc_set_polling(qc);
4640
4092 ata_tf_to_host(ap, &qc->tf); 4641 ata_tf_to_host(ap, &qc->tf);
4093 ata_port_queue_task(ap, atapi_packet_task, ap, 0); 4642
4643 ap->hsm_task_state = HSM_ST_FIRST;
4644
4645 /* send cdb by polling if no cdb interrupt */
4646 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4647 (qc->tf.flags & ATA_TFLAG_POLLING))
4648 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4094 break; 4649 break;
4095 4650
4096 case ATA_PROT_ATAPI_DMA: 4651 case ATA_PROT_ATAPI_DMA:
4097 ap->flags |= ATA_FLAG_NOINTR; 4652 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4653
4098 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 4654 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4099 ap->ops->bmdma_setup(qc); /* set up bmdma */ 4655 ap->ops->bmdma_setup(qc); /* set up bmdma */
4100 ata_port_queue_task(ap, atapi_packet_task, ap, 0); 4656 ap->hsm_task_state = HSM_ST_FIRST;
4657
4658 /* send cdb by polling if no cdb interrupt */
4659 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4660 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4101 break; 4661 break;
4102 4662
4103 default: 4663 default:
@@ -4127,52 +4687,66 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4127inline unsigned int ata_host_intr (struct ata_port *ap, 4687inline unsigned int ata_host_intr (struct ata_port *ap,
4128 struct ata_queued_cmd *qc) 4688 struct ata_queued_cmd *qc)
4129{ 4689{
4130 u8 status, host_stat; 4690 u8 status, host_stat = 0;
4131
4132 switch (qc->tf.protocol) {
4133
4134 case ATA_PROT_DMA:
4135 case ATA_PROT_ATAPI_DMA:
4136 case ATA_PROT_ATAPI:
4137 /* check status of DMA engine */
4138 host_stat = ap->ops->bmdma_status(ap);
4139 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4140
4141 /* if it's not our irq... */
4142 if (!(host_stat & ATA_DMA_INTR))
4143 goto idle_irq;
4144 4691
4145 /* before we do anything else, clear DMA-Start bit */ 4692 VPRINTK("ata%u: protocol %d task_state %d\n",
4146 ap->ops->bmdma_stop(qc); 4693 ap->id, qc->tf.protocol, ap->hsm_task_state);
4147 4694
4148 /* fall through */ 4695 /* Check whether we are expecting interrupt in this state */
4149 4696 switch (ap->hsm_task_state) {
4150 case ATA_PROT_ATAPI_NODATA: 4697 case HSM_ST_FIRST:
4151 case ATA_PROT_NODATA: 4698 /* Some pre-ATAPI-4 devices assert INTRQ
4152 /* check altstatus */ 4699 * at this state when ready to receive CDB.
4153 status = ata_altstatus(ap); 4700 */
4154 if (status & ATA_BUSY)
4155 goto idle_irq;
4156 4701
4157 /* check main status, clearing INTRQ */ 4702 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4158 status = ata_chk_status(ap); 4703 * The flag was turned on only for atapi devices.
4159 if (unlikely(status & ATA_BUSY)) 4704 * No need to check is_atapi_taskfile(&qc->tf) again.
4705 */
4706 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4160 goto idle_irq; 4707 goto idle_irq;
4161 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4162 ap->id, qc->tf.protocol, status);
4163
4164 /* ack bmdma irq events */
4165 ap->ops->irq_clear(ap);
4166
4167 /* complete taskfile transaction */
4168 qc->err_mask |= ac_err_mask(status);
4169 ata_qc_complete(qc);
4170 break; 4708 break;
4171 4709 case HSM_ST_LAST:
4710 if (qc->tf.protocol == ATA_PROT_DMA ||
4711 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4712 /* check status of DMA engine */
4713 host_stat = ap->ops->bmdma_status(ap);
4714 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4715
4716 /* if it's not our irq... */
4717 if (!(host_stat & ATA_DMA_INTR))
4718 goto idle_irq;
4719
4720 /* before we do anything else, clear DMA-Start bit */
4721 ap->ops->bmdma_stop(qc);
4722
4723 if (unlikely(host_stat & ATA_DMA_ERR)) {
4724 /* error when transfering data to/from memory */
4725 qc->err_mask |= AC_ERR_HOST_BUS;
4726 ap->hsm_task_state = HSM_ST_ERR;
4727 }
4728 }
4729 break;
4730 case HSM_ST:
4731 break;
4172 default: 4732 default:
4173 goto idle_irq; 4733 goto idle_irq;
4174 } 4734 }
4175 4735
4736 /* check altstatus */
4737 status = ata_altstatus(ap);
4738 if (status & ATA_BUSY)
4739 goto idle_irq;
4740
4741 /* check main status, clearing INTRQ */
4742 status = ata_chk_status(ap);
4743 if (unlikely(status & ATA_BUSY))
4744 goto idle_irq;
4745
4746 /* ack bmdma irq events */
4747 ap->ops->irq_clear(ap);
4748
4749 ata_hsm_move(ap, qc, status, 0);
4176 return 1; /* irq handled */ 4750 return 1; /* irq handled */
4177 4751
4178idle_irq: 4752idle_irq:
@@ -4181,7 +4755,7 @@ idle_irq:
4181#ifdef ATA_IRQ_TRAP 4755#ifdef ATA_IRQ_TRAP
4182 if ((ap->stats.idle_irq % 1000) == 0) { 4756 if ((ap->stats.idle_irq % 1000) == 0) {
4183 ata_irq_ack(ap, 0); /* debug trap */ 4757 ata_irq_ack(ap, 0); /* debug trap */
4184 printk(KERN_WARNING "ata%d: irq trap\n", ap->id); 4758 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4185 return 1; 4759 return 1;
4186 } 4760 }
4187#endif 4761#endif
@@ -4219,11 +4793,11 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4219 4793
4220 ap = host_set->ports[i]; 4794 ap = host_set->ports[i];
4221 if (ap && 4795 if (ap &&
4222 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 4796 !(ap->flags & ATA_FLAG_DISABLED)) {
4223 struct ata_queued_cmd *qc; 4797 struct ata_queued_cmd *qc;
4224 4798
4225 qc = ata_qc_from_tag(ap, ap->active_tag); 4799 qc = ata_qc_from_tag(ap, ap->active_tag);
4226 if (qc && (!(qc->tf.ctl & ATA_NIEN)) && 4800 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4227 (qc->flags & ATA_QCFLAG_ACTIVE)) 4801 (qc->flags & ATA_QCFLAG_ACTIVE))
4228 handled |= ata_host_intr(ap, qc); 4802 handled |= ata_host_intr(ap, qc);
4229 } 4803 }
@@ -4234,32 +4808,168 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4234 return IRQ_RETVAL(handled); 4808 return IRQ_RETVAL(handled);
4235} 4809}
4236 4810
4811/**
4812 * sata_scr_valid - test whether SCRs are accessible
4813 * @ap: ATA port to test SCR accessibility for
4814 *
4815 * Test whether SCRs are accessible for @ap.
4816 *
4817 * LOCKING:
4818 * None.
4819 *
4820 * RETURNS:
4821 * 1 if SCRs are accessible, 0 otherwise.
4822 */
4823int sata_scr_valid(struct ata_port *ap)
4824{
4825 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4826}
4827
4828/**
4829 * sata_scr_read - read SCR register of the specified port
4830 * @ap: ATA port to read SCR for
4831 * @reg: SCR to read
4832 * @val: Place to store read value
4833 *
4834 * Read SCR register @reg of @ap into *@val. This function is
4835 * guaranteed to succeed if the cable type of the port is SATA
4836 * and the port implements ->scr_read.
4837 *
4838 * LOCKING:
4839 * None.
4840 *
4841 * RETURNS:
4842 * 0 on success, negative errno on failure.
4843 */
4844int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4845{
4846 if (sata_scr_valid(ap)) {
4847 *val = ap->ops->scr_read(ap, reg);
4848 return 0;
4849 }
4850 return -EOPNOTSUPP;
4851}
4852
4853/**
4854 * sata_scr_write - write SCR register of the specified port
4855 * @ap: ATA port to write SCR for
4856 * @reg: SCR to write
4857 * @val: value to write
4858 *
4859 * Write @val to SCR register @reg of @ap. This function is
4860 * guaranteed to succeed if the cable type of the port is SATA
4861 * and the port implements ->scr_read.
4862 *
4863 * LOCKING:
4864 * None.
4865 *
4866 * RETURNS:
4867 * 0 on success, negative errno on failure.
4868 */
4869int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4870{
4871 if (sata_scr_valid(ap)) {
4872 ap->ops->scr_write(ap, reg, val);
4873 return 0;
4874 }
4875 return -EOPNOTSUPP;
4876}
4877
4878/**
4879 * sata_scr_write_flush - write SCR register of the specified port and flush
4880 * @ap: ATA port to write SCR for
4881 * @reg: SCR to write
4882 * @val: value to write
4883 *
4884 * This function is identical to sata_scr_write() except that this
4885 * function performs flush after writing to the register.
4886 *
4887 * LOCKING:
4888 * None.
4889 *
4890 * RETURNS:
4891 * 0 on success, negative errno on failure.
4892 */
4893int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4894{
4895 if (sata_scr_valid(ap)) {
4896 ap->ops->scr_write(ap, reg, val);
4897 ap->ops->scr_read(ap, reg);
4898 return 0;
4899 }
4900 return -EOPNOTSUPP;
4901}
4902
4903/**
4904 * ata_port_online - test whether the given port is online
4905 * @ap: ATA port to test
4906 *
4907 * Test whether @ap is online. Note that this function returns 0
4908 * if online status of @ap cannot be obtained, so
4909 * ata_port_online(ap) != !ata_port_offline(ap).
4910 *
4911 * LOCKING:
4912 * None.
4913 *
4914 * RETURNS:
4915 * 1 if the port online status is available and online.
4916 */
4917int ata_port_online(struct ata_port *ap)
4918{
4919 u32 sstatus;
4920
4921 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
4922 return 1;
4923 return 0;
4924}
4925
4926/**
4927 * ata_port_offline - test whether the given port is offline
4928 * @ap: ATA port to test
4929 *
4930 * Test whether @ap is offline. Note that this function returns
4931 * 0 if offline status of @ap cannot be obtained, so
4932 * ata_port_online(ap) != !ata_port_offline(ap).
4933 *
4934 * LOCKING:
4935 * None.
4936 *
4937 * RETURNS:
4938 * 1 if the port offline status is available and offline.
4939 */
4940int ata_port_offline(struct ata_port *ap)
4941{
4942 u32 sstatus;
4943
4944 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
4945 return 1;
4946 return 0;
4947}
4237 4948
4238/* 4949/*
4239 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself, 4950 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4240 * without filling any other registers 4951 * without filling any other registers
4241 */ 4952 */
4242static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev, 4953static int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
4243 u8 cmd)
4244{ 4954{
4245 struct ata_taskfile tf; 4955 struct ata_taskfile tf;
4246 int err; 4956 int err;
4247 4957
4248 ata_tf_init(ap, &tf, dev->devno); 4958 ata_tf_init(dev, &tf);
4249 4959
4250 tf.command = cmd; 4960 tf.command = cmd;
4251 tf.flags |= ATA_TFLAG_DEVICE; 4961 tf.flags |= ATA_TFLAG_DEVICE;
4252 tf.protocol = ATA_PROT_NODATA; 4962 tf.protocol = ATA_PROT_NODATA;
4253 4963
4254 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); 4964 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4255 if (err) 4965 if (err)
4256 printk(KERN_ERR "%s: ata command failed: %d\n", 4966 ata_dev_printk(dev, KERN_ERR, "%s: ata command failed: %d\n",
4257 __FUNCTION__, err); 4967 __FUNCTION__, err);
4258 4968
4259 return err; 4969 return err;
4260} 4970}
4261 4971
4262static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev) 4972static int ata_flush_cache(struct ata_device *dev)
4263{ 4973{
4264 u8 cmd; 4974 u8 cmd;
4265 4975
@@ -4271,22 +4981,21 @@ static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4271 else 4981 else
4272 cmd = ATA_CMD_FLUSH; 4982 cmd = ATA_CMD_FLUSH;
4273 4983
4274 return ata_do_simple_cmd(ap, dev, cmd); 4984 return ata_do_simple_cmd(dev, cmd);
4275} 4985}
4276 4986
4277static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev) 4987static int ata_standby_drive(struct ata_device *dev)
4278{ 4988{
4279 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1); 4989 return ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
4280} 4990}
4281 4991
4282static int ata_start_drive(struct ata_port *ap, struct ata_device *dev) 4992static int ata_start_drive(struct ata_device *dev)
4283{ 4993{
4284 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE); 4994 return ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
4285} 4995}
4286 4996
4287/** 4997/**
4288 * ata_device_resume - wakeup a previously suspended devices 4998 * ata_device_resume - wakeup a previously suspended devices
4289 * @ap: port the device is connected to
4290 * @dev: the device to resume 4999 * @dev: the device to resume
4291 * 5000 *
4292 * Kick the drive back into action, by sending it an idle immediate 5001 * Kick the drive back into action, by sending it an idle immediate
@@ -4294,40 +5003,47 @@ static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4294 * and host. 5003 * and host.
4295 * 5004 *
4296 */ 5005 */
4297int ata_device_resume(struct ata_port *ap, struct ata_device *dev) 5006int ata_device_resume(struct ata_device *dev)
4298{ 5007{
5008 struct ata_port *ap = dev->ap;
5009
4299 if (ap->flags & ATA_FLAG_SUSPENDED) { 5010 if (ap->flags & ATA_FLAG_SUSPENDED) {
5011 struct ata_device *failed_dev;
5012
4300 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 5013 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
4301 ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000); 5014 ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000);
5015
4302 ap->flags &= ~ATA_FLAG_SUSPENDED; 5016 ap->flags &= ~ATA_FLAG_SUSPENDED;
4303 ata_set_mode(ap); 5017 while (ata_set_mode(ap, &failed_dev))
5018 ata_dev_disable(failed_dev);
4304 } 5019 }
4305 if (!ata_dev_present(dev)) 5020 if (!ata_dev_enabled(dev))
4306 return 0; 5021 return 0;
4307 if (dev->class == ATA_DEV_ATA) 5022 if (dev->class == ATA_DEV_ATA)
4308 ata_start_drive(ap, dev); 5023 ata_start_drive(dev);
4309 5024
4310 return 0; 5025 return 0;
4311} 5026}
4312 5027
4313/** 5028/**
4314 * ata_device_suspend - prepare a device for suspend 5029 * ata_device_suspend - prepare a device for suspend
4315 * @ap: port the device is connected to
4316 * @dev: the device to suspend 5030 * @dev: the device to suspend
4317 * @state: target power management state 5031 * @state: target power management state
4318 * 5032 *
4319 * Flush the cache on the drive, if appropriate, then issue a 5033 * Flush the cache on the drive, if appropriate, then issue a
4320 * standbynow command. 5034 * standbynow command.
4321 */ 5035 */
4322int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state) 5036int ata_device_suspend(struct ata_device *dev, pm_message_t state)
4323{ 5037{
4324 if (!ata_dev_present(dev)) 5038 struct ata_port *ap = dev->ap;
5039
5040 if (!ata_dev_enabled(dev))
4325 return 0; 5041 return 0;
4326 if (dev->class == ATA_DEV_ATA) 5042 if (dev->class == ATA_DEV_ATA)
4327 ata_flush_cache(ap, dev); 5043 ata_flush_cache(dev);
4328 5044
4329 if (state.event != PM_EVENT_FREEZE) 5045 if (state.event != PM_EVENT_FREEZE)
4330 ata_standby_drive(ap, dev); 5046 ata_standby_drive(dev);
4331 ap->flags |= ATA_FLAG_SUSPENDED; 5047 ap->flags |= ATA_FLAG_SUSPENDED;
4332 return 0; 5048 return 0;
4333} 5049}
@@ -4415,6 +5131,38 @@ static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4415} 5131}
4416 5132
4417/** 5133/**
5134 * ata_dev_init - Initialize an ata_device structure
5135 * @dev: Device structure to initialize
5136 *
5137 * Initialize @dev in preparation for probing.
5138 *
5139 * LOCKING:
5140 * Inherited from caller.
5141 */
5142void ata_dev_init(struct ata_device *dev)
5143{
5144 struct ata_port *ap = dev->ap;
5145 unsigned long flags;
5146
5147 /* SATA spd limit is bound to the first device */
5148 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5149
5150 /* High bits of dev->flags are used to record warm plug
5151 * requests which occur asynchronously. Synchronize using
5152 * host_set lock.
5153 */
5154 spin_lock_irqsave(ap->lock, flags);
5155 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5156 spin_unlock_irqrestore(ap->lock, flags);
5157
5158 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5159 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5160 dev->pio_mask = UINT_MAX;
5161 dev->mwdma_mask = UINT_MAX;
5162 dev->udma_mask = UINT_MAX;
5163}
5164
5165/**
4418 * ata_host_init - Initialize an ata_port structure 5166 * ata_host_init - Initialize an ata_port structure
4419 * @ap: Structure to initialize 5167 * @ap: Structure to initialize
4420 * @host: associated SCSI mid-layer structure 5168 * @host: associated SCSI mid-layer structure
@@ -4428,7 +5176,6 @@ static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4428 * LOCKING: 5176 * LOCKING:
4429 * Inherited from caller. 5177 * Inherited from caller.
4430 */ 5178 */
4431
4432static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, 5179static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4433 struct ata_host_set *host_set, 5180 struct ata_host_set *host_set,
4434 const struct ata_probe_ent *ent, unsigned int port_no) 5181 const struct ata_probe_ent *ent, unsigned int port_no)
@@ -4441,7 +5188,8 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4441 host->unique_id = ata_unique_id++; 5188 host->unique_id = ata_unique_id++;
4442 host->max_cmd_len = 12; 5189 host->max_cmd_len = 12;
4443 5190
4444 ap->flags = ATA_FLAG_PORT_DISABLED; 5191 ap->lock = &host_set->lock;
5192 ap->flags = ATA_FLAG_DISABLED;
4445 ap->id = host->unique_id; 5193 ap->id = host->unique_id;
4446 ap->host = host; 5194 ap->host = host;
4447 ap->ctl = ATA_DEVCTL_OBS; 5195 ap->ctl = ATA_DEVCTL_OBS;
@@ -4455,19 +5203,35 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4455 ap->udma_mask = ent->udma_mask; 5203 ap->udma_mask = ent->udma_mask;
4456 ap->flags |= ent->host_flags; 5204 ap->flags |= ent->host_flags;
4457 ap->ops = ent->port_ops; 5205 ap->ops = ent->port_ops;
4458 ap->cbl = ATA_CBL_NONE; 5206 ap->hw_sata_spd_limit = UINT_MAX;
4459 ap->active_tag = ATA_TAG_POISON; 5207 ap->active_tag = ATA_TAG_POISON;
4460 ap->last_ctl = 0xFF; 5208 ap->last_ctl = 0xFF;
4461 5209
5210#if defined(ATA_VERBOSE_DEBUG)
5211 /* turn on all debugging levels */
5212 ap->msg_enable = 0x00FF;
5213#elif defined(ATA_DEBUG)
5214 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5215#else
5216 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5217#endif
5218
4462 INIT_WORK(&ap->port_task, NULL, NULL); 5219 INIT_WORK(&ap->port_task, NULL, NULL);
5220 INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
5221 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
4463 INIT_LIST_HEAD(&ap->eh_done_q); 5222 INIT_LIST_HEAD(&ap->eh_done_q);
5223 init_waitqueue_head(&ap->eh_wait_q);
5224
5225 /* set cable type */
5226 ap->cbl = ATA_CBL_NONE;
5227 if (ap->flags & ATA_FLAG_SATA)
5228 ap->cbl = ATA_CBL_SATA;
4464 5229
4465 for (i = 0; i < ATA_MAX_DEVICES; i++) { 5230 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4466 struct ata_device *dev = &ap->device[i]; 5231 struct ata_device *dev = &ap->device[i];
5232 dev->ap = ap;
4467 dev->devno = i; 5233 dev->devno = i;
4468 dev->pio_mask = UINT_MAX; 5234 ata_dev_init(dev);
4469 dev->mwdma_mask = UINT_MAX;
4470 dev->udma_mask = UINT_MAX;
4471 } 5235 }
4472 5236
4473#ifdef ATA_IRQ_TRAP 5237#ifdef ATA_IRQ_TRAP
@@ -4503,7 +5267,7 @@ static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4503 5267
4504 DPRINTK("ENTER\n"); 5268 DPRINTK("ENTER\n");
4505 5269
4506 if (!ent->port_ops->probe_reset && 5270 if (!ent->port_ops->error_handler &&
4507 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) { 5271 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
4508 printk(KERN_ERR "ata%u: no reset mechanism available\n", 5272 printk(KERN_ERR "ata%u: no reset mechanism available\n",
4509 port_no); 5273 port_no);
@@ -4516,7 +5280,7 @@ static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4516 5280
4517 host->transportt = &ata_scsi_transport_template; 5281 host->transportt = &ata_scsi_transport_template;
4518 5282
4519 ap = (struct ata_port *) &host->hostdata[0]; 5283 ap = ata_shost_to_port(host);
4520 5284
4521 ata_host_init(ap, host, host_set, ent, port_no); 5285 ata_host_init(ap, host, host_set, ent, port_no);
4522 5286
@@ -4549,12 +5313,12 @@ err_out:
4549 * RETURNS: 5313 * RETURNS:
4550 * Number of ports registered. Zero on error (no ports registered). 5314 * Number of ports registered. Zero on error (no ports registered).
4551 */ 5315 */
4552
4553int ata_device_add(const struct ata_probe_ent *ent) 5316int ata_device_add(const struct ata_probe_ent *ent)
4554{ 5317{
4555 unsigned int count = 0, i; 5318 unsigned int count = 0, i;
4556 struct device *dev = ent->dev; 5319 struct device *dev = ent->dev;
4557 struct ata_host_set *host_set; 5320 struct ata_host_set *host_set;
5321 int rc;
4558 5322
4559 DPRINTK("ENTER\n"); 5323 DPRINTK("ENTER\n");
4560 /* alloc a container for our list of ATA ports (buses) */ 5324 /* alloc a container for our list of ATA ports (buses) */
@@ -4587,18 +5351,18 @@ int ata_device_add(const struct ata_probe_ent *ent)
4587 (ap->pio_mask << ATA_SHIFT_PIO); 5351 (ap->pio_mask << ATA_SHIFT_PIO);
4588 5352
4589 /* print per-port info to dmesg */ 5353 /* print per-port info to dmesg */
4590 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX " 5354 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
4591 "bmdma 0x%lX irq %lu\n", 5355 "ctl 0x%lX bmdma 0x%lX irq %lu\n",
4592 ap->id, 5356 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4593 ap->flags & ATA_FLAG_SATA ? 'S' : 'P', 5357 ata_mode_string(xfer_mode_mask),
4594 ata_mode_string(xfer_mode_mask), 5358 ap->ioaddr.cmd_addr,
4595 ap->ioaddr.cmd_addr, 5359 ap->ioaddr.ctl_addr,
4596 ap->ioaddr.ctl_addr, 5360 ap->ioaddr.bmdma_addr,
4597 ap->ioaddr.bmdma_addr, 5361 ent->irq);
4598 ent->irq);
4599 5362
4600 ata_chk_status(ap); 5363 ata_chk_status(ap);
4601 host_set->ops->irq_clear(ap); 5364 host_set->ops->irq_clear(ap);
5365 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
4602 count++; 5366 count++;
4603 } 5367 }
4604 5368
@@ -4606,41 +5370,72 @@ int ata_device_add(const struct ata_probe_ent *ent)
4606 goto err_free_ret; 5370 goto err_free_ret;
4607 5371
4608 /* obtain irq, that is shared between channels */ 5372 /* obtain irq, that is shared between channels */
4609 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags, 5373 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4610 DRV_NAME, host_set)) 5374 DRV_NAME, host_set);
5375 if (rc) {
5376 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5377 ent->irq, rc);
4611 goto err_out; 5378 goto err_out;
5379 }
4612 5380
4613 /* perform each probe synchronously */ 5381 /* perform each probe synchronously */
4614 DPRINTK("probe begin\n"); 5382 DPRINTK("probe begin\n");
4615 for (i = 0; i < count; i++) { 5383 for (i = 0; i < count; i++) {
4616 struct ata_port *ap; 5384 struct ata_port *ap;
5385 u32 scontrol;
4617 int rc; 5386 int rc;
4618 5387
4619 ap = host_set->ports[i]; 5388 ap = host_set->ports[i];
4620 5389
4621 DPRINTK("ata%u: bus probe begin\n", ap->id); 5390 /* init sata_spd_limit to the current value */
4622 rc = ata_bus_probe(ap); 5391 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
4623 DPRINTK("ata%u: bus probe end\n", ap->id); 5392 int spd = (scontrol >> 4) & 0xf;
4624 5393 ap->hw_sata_spd_limit &= (1 << spd) - 1;
4625 if (rc) {
4626 /* FIXME: do something useful here?
4627 * Current libata behavior will
4628 * tear down everything when
4629 * the module is removed
4630 * or the h/w is unplugged.
4631 */
4632 } 5394 }
5395 ap->sata_spd_limit = ap->hw_sata_spd_limit;
4633 5396
4634 rc = scsi_add_host(ap->host, dev); 5397 rc = scsi_add_host(ap->host, dev);
4635 if (rc) { 5398 if (rc) {
4636 printk(KERN_ERR "ata%u: scsi_add_host failed\n", 5399 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
4637 ap->id);
4638 /* FIXME: do something useful here */ 5400 /* FIXME: do something useful here */
4639 /* FIXME: handle unconditional calls to 5401 /* FIXME: handle unconditional calls to
4640 * scsi_scan_host and ata_host_remove, below, 5402 * scsi_scan_host and ata_host_remove, below,
4641 * at the very least 5403 * at the very least
4642 */ 5404 */
4643 } 5405 }
5406
5407 if (ap->ops->error_handler) {
5408 unsigned long flags;
5409
5410 ata_port_probe(ap);
5411
5412 /* kick EH for boot probing */
5413 spin_lock_irqsave(ap->lock, flags);
5414
5415 ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5416 ap->eh_info.action |= ATA_EH_SOFTRESET;
5417
5418 ap->flags |= ATA_FLAG_LOADING;
5419 ata_port_schedule_eh(ap);
5420
5421 spin_unlock_irqrestore(ap->lock, flags);
5422
5423 /* wait for EH to finish */
5424 ata_port_wait_eh(ap);
5425 } else {
5426 DPRINTK("ata%u: bus probe begin\n", ap->id);
5427 rc = ata_bus_probe(ap);
5428 DPRINTK("ata%u: bus probe end\n", ap->id);
5429
5430 if (rc) {
5431 /* FIXME: do something useful here?
5432 * Current libata behavior will
5433 * tear down everything when
5434 * the module is removed
5435 * or the h/w is unplugged.
5436 */
5437 }
5438 }
4644 } 5439 }
4645 5440
4646 /* probes are done, now scan each port's disk(s) */ 5441 /* probes are done, now scan each port's disk(s) */
@@ -4668,6 +5463,63 @@ err_free_ret:
4668} 5463}
4669 5464
4670/** 5465/**
5466 * ata_port_detach - Detach ATA port in prepration of device removal
5467 * @ap: ATA port to be detached
5468 *
5469 * Detach all ATA devices and the associated SCSI devices of @ap;
5470 * then, remove the associated SCSI host. @ap is guaranteed to
5471 * be quiescent on return from this function.
5472 *
5473 * LOCKING:
5474 * Kernel thread context (may sleep).
5475 */
5476void ata_port_detach(struct ata_port *ap)
5477{
5478 unsigned long flags;
5479 int i;
5480
5481 if (!ap->ops->error_handler)
5482 return;
5483
5484 /* tell EH we're leaving & flush EH */
5485 spin_lock_irqsave(ap->lock, flags);
5486 ap->flags |= ATA_FLAG_UNLOADING;
5487 spin_unlock_irqrestore(ap->lock, flags);
5488
5489 ata_port_wait_eh(ap);
5490
5491 /* EH is now guaranteed to see UNLOADING, so no new device
5492 * will be attached. Disable all existing devices.
5493 */
5494 spin_lock_irqsave(ap->lock, flags);
5495
5496 for (i = 0; i < ATA_MAX_DEVICES; i++)
5497 ata_dev_disable(&ap->device[i]);
5498
5499 spin_unlock_irqrestore(ap->lock, flags);
5500
5501 /* Final freeze & EH. All in-flight commands are aborted. EH
5502 * will be skipped and retrials will be terminated with bad
5503 * target.
5504 */
5505 spin_lock_irqsave(ap->lock, flags);
5506 ata_port_freeze(ap); /* won't be thawed */
5507 spin_unlock_irqrestore(ap->lock, flags);
5508
5509 ata_port_wait_eh(ap);
5510
5511 /* Flush hotplug task. The sequence is similar to
5512 * ata_port_flush_task().
5513 */
5514 flush_workqueue(ata_aux_wq);
5515 cancel_delayed_work(&ap->hotplug_task);
5516 flush_workqueue(ata_aux_wq);
5517
5518 /* remove the associated SCSI host */
5519 scsi_remove_host(ap->host);
5520}
5521
5522/**
4671 * ata_host_set_remove - PCI layer callback for device removal 5523 * ata_host_set_remove - PCI layer callback for device removal
4672 * @host_set: ATA host set that was removed 5524 * @host_set: ATA host set that was removed
4673 * 5525 *
@@ -4680,18 +5532,15 @@ err_free_ret:
4680 5532
4681void ata_host_set_remove(struct ata_host_set *host_set) 5533void ata_host_set_remove(struct ata_host_set *host_set)
4682{ 5534{
4683 struct ata_port *ap;
4684 unsigned int i; 5535 unsigned int i;
4685 5536
4686 for (i = 0; i < host_set->n_ports; i++) { 5537 for (i = 0; i < host_set->n_ports; i++)
4687 ap = host_set->ports[i]; 5538 ata_port_detach(host_set->ports[i]);
4688 scsi_remove_host(ap->host);
4689 }
4690 5539
4691 free_irq(host_set->irq, host_set); 5540 free_irq(host_set->irq, host_set);
4692 5541
4693 for (i = 0; i < host_set->n_ports; i++) { 5542 for (i = 0; i < host_set->n_ports; i++) {
4694 ap = host_set->ports[i]; 5543 struct ata_port *ap = host_set->ports[i];
4695 5544
4696 ata_scsi_release(ap->host); 5545 ata_scsi_release(ap->host);
4697 5546
@@ -4729,15 +5578,12 @@ void ata_host_set_remove(struct ata_host_set *host_set)
4729 5578
4730int ata_scsi_release(struct Scsi_Host *host) 5579int ata_scsi_release(struct Scsi_Host *host)
4731{ 5580{
4732 struct ata_port *ap = (struct ata_port *) &host->hostdata[0]; 5581 struct ata_port *ap = ata_shost_to_port(host);
4733 int i;
4734 5582
4735 DPRINTK("ENTER\n"); 5583 DPRINTK("ENTER\n");
4736 5584
4737 ap->ops->port_disable(ap); 5585 ap->ops->port_disable(ap);
4738 ata_host_remove(ap, 0); 5586 ata_host_remove(ap, 0);
4739 for (i = 0; i < ATA_MAX_DEVICES; i++)
4740 kfree(ap->device[i].id);
4741 5587
4742 DPRINTK("EXIT\n"); 5588 DPRINTK("EXIT\n");
4743 return 1; 5589 return 1;
@@ -4797,8 +5643,12 @@ void ata_pci_remove_one (struct pci_dev *pdev)
4797{ 5643{
4798 struct device *dev = pci_dev_to_dev(pdev); 5644 struct device *dev = pci_dev_to_dev(pdev);
4799 struct ata_host_set *host_set = dev_get_drvdata(dev); 5645 struct ata_host_set *host_set = dev_get_drvdata(dev);
5646 struct ata_host_set *host_set2 = host_set->next;
4800 5647
4801 ata_host_set_remove(host_set); 5648 ata_host_set_remove(host_set);
5649 if (host_set2)
5650 ata_host_set_remove(host_set2);
5651
4802 pci_release_regions(pdev); 5652 pci_release_regions(pdev);
4803 pci_disable_device(pdev); 5653 pci_disable_device(pdev);
4804 dev_set_drvdata(dev, NULL); 5654 dev_set_drvdata(dev, NULL);
@@ -4863,6 +5713,12 @@ static int __init ata_init(void)
4863 if (!ata_wq) 5713 if (!ata_wq)
4864 return -ENOMEM; 5714 return -ENOMEM;
4865 5715
5716 ata_aux_wq = create_singlethread_workqueue("ata_aux");
5717 if (!ata_aux_wq) {
5718 destroy_workqueue(ata_wq);
5719 return -ENOMEM;
5720 }
5721
4866 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 5722 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4867 return 0; 5723 return 0;
4868} 5724}
@@ -4870,6 +5726,7 @@ static int __init ata_init(void)
4870static void __exit ata_exit(void) 5726static void __exit ata_exit(void)
4871{ 5727{
4872 destroy_workqueue(ata_wq); 5728 destroy_workqueue(ata_wq);
5729 destroy_workqueue(ata_aux_wq);
4873} 5730}
4874 5731
4875module_init(ata_init); 5732module_init(ata_init);
@@ -4896,6 +5753,52 @@ int ata_ratelimit(void)
4896 return rc; 5753 return rc;
4897} 5754}
4898 5755
5756/**
5757 * ata_wait_register - wait until register value changes
5758 * @reg: IO-mapped register
5759 * @mask: Mask to apply to read register value
5760 * @val: Wait condition
5761 * @interval_msec: polling interval in milliseconds
5762 * @timeout_msec: timeout in milliseconds
5763 *
5764 * Waiting for some bits of register to change is a common
5765 * operation for ATA controllers. This function reads 32bit LE
5766 * IO-mapped register @reg and tests for the following condition.
5767 *
5768 * (*@reg & mask) != val
5769 *
5770 * If the condition is met, it returns; otherwise, the process is
5771 * repeated after @interval_msec until timeout.
5772 *
5773 * LOCKING:
5774 * Kernel thread context (may sleep)
5775 *
5776 * RETURNS:
5777 * The final register value.
5778 */
5779u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5780 unsigned long interval_msec,
5781 unsigned long timeout_msec)
5782{
5783 unsigned long timeout;
5784 u32 tmp;
5785
5786 tmp = ioread32(reg);
5787
5788 /* Calculate timeout _after_ the first read to make sure
5789 * preceding writes reach the controller before starting to
5790 * eat away the timeout.
5791 */
5792 timeout = jiffies + (timeout_msec * HZ) / 1000;
5793
5794 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
5795 msleep(interval_msec);
5796 tmp = ioread32(reg);
5797 }
5798
5799 return tmp;
5800}
5801
4899/* 5802/*
4900 * libata is essentially a library of internal helper functions for 5803 * libata is essentially a library of internal helper functions for
4901 * low-level ATA host controller drivers. As such, the API/ABI is 5804 * low-level ATA host controller drivers. As such, the API/ABI is
@@ -4903,15 +5806,20 @@ int ata_ratelimit(void)
4903 * Do not depend on ABI/API stability. 5806 * Do not depend on ABI/API stability.
4904 */ 5807 */
4905 5808
5809EXPORT_SYMBOL_GPL(sata_deb_timing_boot);
5810EXPORT_SYMBOL_GPL(sata_deb_timing_eh);
5811EXPORT_SYMBOL_GPL(sata_deb_timing_before_fsrst);
4906EXPORT_SYMBOL_GPL(ata_std_bios_param); 5812EXPORT_SYMBOL_GPL(ata_std_bios_param);
4907EXPORT_SYMBOL_GPL(ata_std_ports); 5813EXPORT_SYMBOL_GPL(ata_std_ports);
4908EXPORT_SYMBOL_GPL(ata_device_add); 5814EXPORT_SYMBOL_GPL(ata_device_add);
5815EXPORT_SYMBOL_GPL(ata_port_detach);
4909EXPORT_SYMBOL_GPL(ata_host_set_remove); 5816EXPORT_SYMBOL_GPL(ata_host_set_remove);
4910EXPORT_SYMBOL_GPL(ata_sg_init); 5817EXPORT_SYMBOL_GPL(ata_sg_init);
4911EXPORT_SYMBOL_GPL(ata_sg_init_one); 5818EXPORT_SYMBOL_GPL(ata_sg_init_one);
4912EXPORT_SYMBOL_GPL(__ata_qc_complete); 5819EXPORT_SYMBOL_GPL(ata_hsm_move);
5820EXPORT_SYMBOL_GPL(ata_qc_complete);
5821EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
4913EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 5822EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4914EXPORT_SYMBOL_GPL(ata_eng_timeout);
4915EXPORT_SYMBOL_GPL(ata_tf_load); 5823EXPORT_SYMBOL_GPL(ata_tf_load);
4916EXPORT_SYMBOL_GPL(ata_tf_read); 5824EXPORT_SYMBOL_GPL(ata_tf_read);
4917EXPORT_SYMBOL_GPL(ata_noop_dev_select); 5825EXPORT_SYMBOL_GPL(ata_noop_dev_select);
@@ -4925,6 +5833,9 @@ EXPORT_SYMBOL_GPL(ata_port_start);
4925EXPORT_SYMBOL_GPL(ata_port_stop); 5833EXPORT_SYMBOL_GPL(ata_port_stop);
4926EXPORT_SYMBOL_GPL(ata_host_stop); 5834EXPORT_SYMBOL_GPL(ata_host_stop);
4927EXPORT_SYMBOL_GPL(ata_interrupt); 5835EXPORT_SYMBOL_GPL(ata_interrupt);
5836EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
5837EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
5838EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
4928EXPORT_SYMBOL_GPL(ata_qc_prep); 5839EXPORT_SYMBOL_GPL(ata_qc_prep);
4929EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 5840EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4930EXPORT_SYMBOL_GPL(ata_bmdma_setup); 5841EXPORT_SYMBOL_GPL(ata_bmdma_setup);
@@ -4932,33 +5843,46 @@ EXPORT_SYMBOL_GPL(ata_bmdma_start);
4932EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); 5843EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4933EXPORT_SYMBOL_GPL(ata_bmdma_status); 5844EXPORT_SYMBOL_GPL(ata_bmdma_status);
4934EXPORT_SYMBOL_GPL(ata_bmdma_stop); 5845EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5846EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
5847EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
5848EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
5849EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
5850EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
4935EXPORT_SYMBOL_GPL(ata_port_probe); 5851EXPORT_SYMBOL_GPL(ata_port_probe);
5852EXPORT_SYMBOL_GPL(sata_set_spd);
5853EXPORT_SYMBOL_GPL(sata_phy_debounce);
5854EXPORT_SYMBOL_GPL(sata_phy_resume);
4936EXPORT_SYMBOL_GPL(sata_phy_reset); 5855EXPORT_SYMBOL_GPL(sata_phy_reset);
4937EXPORT_SYMBOL_GPL(__sata_phy_reset); 5856EXPORT_SYMBOL_GPL(__sata_phy_reset);
4938EXPORT_SYMBOL_GPL(ata_bus_reset); 5857EXPORT_SYMBOL_GPL(ata_bus_reset);
4939EXPORT_SYMBOL_GPL(ata_std_probeinit); 5858EXPORT_SYMBOL_GPL(ata_std_prereset);
4940EXPORT_SYMBOL_GPL(ata_std_softreset); 5859EXPORT_SYMBOL_GPL(ata_std_softreset);
4941EXPORT_SYMBOL_GPL(sata_std_hardreset); 5860EXPORT_SYMBOL_GPL(sata_std_hardreset);
4942EXPORT_SYMBOL_GPL(ata_std_postreset); 5861EXPORT_SYMBOL_GPL(ata_std_postreset);
4943EXPORT_SYMBOL_GPL(ata_std_probe_reset);
4944EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
4945EXPORT_SYMBOL_GPL(ata_dev_revalidate); 5862EXPORT_SYMBOL_GPL(ata_dev_revalidate);
4946EXPORT_SYMBOL_GPL(ata_dev_classify); 5863EXPORT_SYMBOL_GPL(ata_dev_classify);
4947EXPORT_SYMBOL_GPL(ata_dev_pair); 5864EXPORT_SYMBOL_GPL(ata_dev_pair);
4948EXPORT_SYMBOL_GPL(ata_port_disable); 5865EXPORT_SYMBOL_GPL(ata_port_disable);
4949EXPORT_SYMBOL_GPL(ata_ratelimit); 5866EXPORT_SYMBOL_GPL(ata_ratelimit);
5867EXPORT_SYMBOL_GPL(ata_wait_register);
4950EXPORT_SYMBOL_GPL(ata_busy_sleep); 5868EXPORT_SYMBOL_GPL(ata_busy_sleep);
4951EXPORT_SYMBOL_GPL(ata_port_queue_task); 5869EXPORT_SYMBOL_GPL(ata_port_queue_task);
4952EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 5870EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4953EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 5871EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4954EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 5872EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5873EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
5874EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
4955EXPORT_SYMBOL_GPL(ata_scsi_release); 5875EXPORT_SYMBOL_GPL(ata_scsi_release);
4956EXPORT_SYMBOL_GPL(ata_host_intr); 5876EXPORT_SYMBOL_GPL(ata_host_intr);
5877EXPORT_SYMBOL_GPL(sata_scr_valid);
5878EXPORT_SYMBOL_GPL(sata_scr_read);
5879EXPORT_SYMBOL_GPL(sata_scr_write);
5880EXPORT_SYMBOL_GPL(sata_scr_write_flush);
5881EXPORT_SYMBOL_GPL(ata_port_online);
5882EXPORT_SYMBOL_GPL(ata_port_offline);
4957EXPORT_SYMBOL_GPL(ata_id_string); 5883EXPORT_SYMBOL_GPL(ata_id_string);
4958EXPORT_SYMBOL_GPL(ata_id_c_string); 5884EXPORT_SYMBOL_GPL(ata_id_c_string);
4959EXPORT_SYMBOL_GPL(ata_scsi_simulate); 5885EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4960EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
4961EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
4962 5886
4963EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 5887EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
4964EXPORT_SYMBOL_GPL(ata_timing_compute); 5888EXPORT_SYMBOL_GPL(ata_timing_compute);
@@ -4980,3 +5904,13 @@ EXPORT_SYMBOL_GPL(ata_device_suspend);
4980EXPORT_SYMBOL_GPL(ata_device_resume); 5904EXPORT_SYMBOL_GPL(ata_device_resume);
4981EXPORT_SYMBOL_GPL(ata_scsi_device_suspend); 5905EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
4982EXPORT_SYMBOL_GPL(ata_scsi_device_resume); 5906EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
5907
5908EXPORT_SYMBOL_GPL(ata_eng_timeout);
5909EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
5910EXPORT_SYMBOL_GPL(ata_port_abort);
5911EXPORT_SYMBOL_GPL(ata_port_freeze);
5912EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
5913EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
5914EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5915EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5916EXPORT_SYMBOL_GPL(ata_do_eh);