aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-05-20 13:35:41 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-05-20 13:35:41 -0400
commit6566a3f8f3281497a81815dfe2b64eb54dafe05d (patch)
treeff0073a0b85edef12d6248608d08c4962fec58d6 /drivers
parentbb02aacc02c6002143a1cfc313d144a413eec8d0 (diff)
parente2a7f77a7b4ab298a38c8d1f624628456069bdb0 (diff)
Merge branch 'upstream-fixes' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev
* 'upstream-fixes' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev: [PATCH] libata-core: fix current kernel-doc warnings [PATCH] sata_mv: version bump [PATCH] sata_mv: endian fix [PATCH] sata_mv: remove local copy of queue indexes [PATCH] sata_mv: spurious interrupt workaround [PATCH] sata_mv: chip initialization fixes [PATCH] sata_mv: deal with interrupt coalescing interrupts [PATCH] sata_mv: prevent unnecessary double-resets
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/libata-core.c6
-rw-r--r--drivers/scsi/sata_mv.c134
2 files changed, 83 insertions, 57 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index bd147207f25d..823dfa78c0ba 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -864,6 +864,9 @@ static unsigned int ata_id_xfermask(const u16 *id)
864/** 864/**
865 * ata_port_queue_task - Queue port_task 865 * ata_port_queue_task - Queue port_task
866 * @ap: The ata_port to queue port_task for 866 * @ap: The ata_port to queue port_task for
867 * @fn: workqueue function to be scheduled
868 * @data: data value to pass to workqueue function
869 * @delay: delay time for workqueue function
867 * 870 *
868 * Schedule @fn(@data) for execution after @delay jiffies using 871 * Schedule @fn(@data) for execution after @delay jiffies using
869 * port_task. There is one port_task per port and it's the 872 * port_task. There is one port_task per port and it's the
@@ -2739,6 +2742,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2739 * ata_dev_init_params - Issue INIT DEV PARAMS command 2742 * ata_dev_init_params - Issue INIT DEV PARAMS command
2740 * @ap: Port associated with device @dev 2743 * @ap: Port associated with device @dev
2741 * @dev: Device to which command will be sent 2744 * @dev: Device to which command will be sent
2745 * @heads: Number of heads (taskfile parameter)
2746 * @sectors: Number of sectors (taskfile parameter)
2742 * 2747 *
2743 * LOCKING: 2748 * LOCKING:
2744 * Kernel thread context (may sleep) 2749 * Kernel thread context (may sleep)
@@ -4302,6 +4307,7 @@ int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4302 * ata_device_suspend - prepare a device for suspend 4307 * ata_device_suspend - prepare a device for suspend
4303 * @ap: port the device is connected to 4308 * @ap: port the device is connected to
4304 * @dev: the device to suspend 4309 * @dev: the device to suspend
4310 * @state: target power management state
4305 * 4311 *
4306 * Flush the cache on the drive, if appropriate, then issue a 4312 * Flush the cache on the drive, if appropriate, then issue a
4307 * standbynow command. 4313 * standbynow command.
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index d5fdcb9a8842..9b8bca1ac1f0 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -37,7 +37,7 @@
37#include <asm/io.h> 37#include <asm/io.h>
38 38
39#define DRV_NAME "sata_mv" 39#define DRV_NAME "sata_mv"
40#define DRV_VERSION "0.6" 40#define DRV_VERSION "0.7"
41 41
42enum { 42enum {
43 /* BAR's are enumerated in terms of pci_resource_start() terms */ 43 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -50,6 +50,12 @@ enum {
50 50
51 MV_PCI_REG_BASE = 0, 51 MV_PCI_REG_BASE = 0,
52 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */ 52 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
53 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
54 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
55 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
56 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
57 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
58
53 MV_SATAHC0_REG_BASE = 0x20000, 59 MV_SATAHC0_REG_BASE = 0x20000,
54 MV_FLASH_CTL = 0x1046c, 60 MV_FLASH_CTL = 0x1046c,
55 MV_GPIO_PORT_CTL = 0x104f0, 61 MV_GPIO_PORT_CTL = 0x104f0,
@@ -302,9 +308,6 @@ struct mv_port_priv {
302 dma_addr_t crpb_dma; 308 dma_addr_t crpb_dma;
303 struct mv_sg *sg_tbl; 309 struct mv_sg *sg_tbl;
304 dma_addr_t sg_tbl_dma; 310 dma_addr_t sg_tbl_dma;
305
306 unsigned req_producer; /* cp of req_in_ptr */
307 unsigned rsp_consumer; /* cp of rsp_out_ptr */
308 u32 pp_flags; 311 u32 pp_flags;
309}; 312};
310 313
@@ -937,8 +940,6 @@ static int mv_port_start(struct ata_port *ap)
937 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, 940 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
938 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 941 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
939 942
940 pp->req_producer = pp->rsp_consumer = 0;
941
942 /* Don't turn on EDMA here...do it before DMA commands only. Else 943 /* Don't turn on EDMA here...do it before DMA commands only. Else
943 * we'll be unable to send non-data, PIO, etc due to restricted access 944 * we'll be unable to send non-data, PIO, etc due to restricted access
944 * to shadow regs. 945 * to shadow regs.
@@ -1022,16 +1023,16 @@ static void mv_fill_sg(struct ata_queued_cmd *qc)
1022 } 1023 }
1023} 1024}
1024 1025
1025static inline unsigned mv_inc_q_index(unsigned *index) 1026static inline unsigned mv_inc_q_index(unsigned index)
1026{ 1027{
1027 *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK; 1028 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
1028 return *index;
1029} 1029}
1030 1030
1031static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last) 1031static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last)
1032{ 1032{
1033 *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | 1033 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1034 (last ? CRQB_CMD_LAST : 0); 1034 (last ? CRQB_CMD_LAST : 0);
1035 *cmdw = cpu_to_le16(tmp);
1035} 1036}
1036 1037
1037/** 1038/**
@@ -1053,15 +1054,11 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1053 u16 *cw; 1054 u16 *cw;
1054 struct ata_taskfile *tf; 1055 struct ata_taskfile *tf;
1055 u16 flags = 0; 1056 u16 flags = 0;
1057 unsigned in_index;
1056 1058
1057 if (ATA_PROT_DMA != qc->tf.protocol) 1059 if (ATA_PROT_DMA != qc->tf.protocol)
1058 return; 1060 return;
1059 1061
1060 /* the req producer index should be the same as we remember it */
1061 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
1062 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1063 pp->req_producer);
1064
1065 /* Fill in command request block 1062 /* Fill in command request block
1066 */ 1063 */
1067 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1064 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
@@ -1069,13 +1066,17 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1069 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1066 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1070 flags |= qc->tag << CRQB_TAG_SHIFT; 1067 flags |= qc->tag << CRQB_TAG_SHIFT;
1071 1068
1072 pp->crqb[pp->req_producer].sg_addr = 1069 /* get current queue index from hardware */
1070 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1071 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1072
1073 pp->crqb[in_index].sg_addr =
1073 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); 1074 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1074 pp->crqb[pp->req_producer].sg_addr_hi = 1075 pp->crqb[in_index].sg_addr_hi =
1075 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); 1076 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1076 pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags); 1077 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1077 1078
1078 cw = &pp->crqb[pp->req_producer].ata_cmd[0]; 1079 cw = &pp->crqb[in_index].ata_cmd[0];
1079 tf = &qc->tf; 1080 tf = &qc->tf;
1080 1081
1081 /* Sadly, the CRQB cannot accomodate all registers--there are 1082 /* Sadly, the CRQB cannot accomodate all registers--there are
@@ -1144,16 +1145,12 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1144 struct mv_port_priv *pp = ap->private_data; 1145 struct mv_port_priv *pp = ap->private_data;
1145 struct mv_crqb_iie *crqb; 1146 struct mv_crqb_iie *crqb;
1146 struct ata_taskfile *tf; 1147 struct ata_taskfile *tf;
1148 unsigned in_index;
1147 u32 flags = 0; 1149 u32 flags = 0;
1148 1150
1149 if (ATA_PROT_DMA != qc->tf.protocol) 1151 if (ATA_PROT_DMA != qc->tf.protocol)
1150 return; 1152 return;
1151 1153
1152 /* the req producer index should be the same as we remember it */
1153 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
1154 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1155 pp->req_producer);
1156
1157 /* Fill in Gen IIE command request block 1154 /* Fill in Gen IIE command request block
1158 */ 1155 */
1159 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1156 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
@@ -1162,7 +1159,11 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1162 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1159 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1163 flags |= qc->tag << CRQB_TAG_SHIFT; 1160 flags |= qc->tag << CRQB_TAG_SHIFT;
1164 1161
1165 crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer]; 1162 /* get current queue index from hardware */
1163 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1164 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1165
1166 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1166 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); 1167 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1167 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); 1168 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1168 crqb->flags = cpu_to_le32(flags); 1169 crqb->flags = cpu_to_le32(flags);
@@ -1210,6 +1211,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1210{ 1211{
1211 void __iomem *port_mmio = mv_ap_base(qc->ap); 1212 void __iomem *port_mmio = mv_ap_base(qc->ap);
1212 struct mv_port_priv *pp = qc->ap->private_data; 1213 struct mv_port_priv *pp = qc->ap->private_data;
1214 unsigned in_index;
1213 u32 in_ptr; 1215 u32 in_ptr;
1214 1216
1215 if (ATA_PROT_DMA != qc->tf.protocol) { 1217 if (ATA_PROT_DMA != qc->tf.protocol) {
@@ -1221,23 +1223,20 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1221 return ata_qc_issue_prot(qc); 1223 return ata_qc_issue_prot(qc);
1222 } 1224 }
1223 1225
1224 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1226 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1227 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1225 1228
1226 /* the req producer index should be the same as we remember it */
1227 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1228 pp->req_producer);
1229 /* until we do queuing, the queue should be empty at this point */ 1229 /* until we do queuing, the queue should be empty at this point */
1230 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != 1230 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1231 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> 1231 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1232 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1233 1232
1234 mv_inc_q_index(&pp->req_producer); /* now incr producer index */ 1233 in_index = mv_inc_q_index(in_index); /* now incr producer index */
1235 1234
1236 mv_start_dma(port_mmio, pp); 1235 mv_start_dma(port_mmio, pp);
1237 1236
1238 /* and write the request in pointer to kick the EDMA to life */ 1237 /* and write the request in pointer to kick the EDMA to life */
1239 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; 1238 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
1240 in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT; 1239 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
1241 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1240 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1242 1241
1243 return 0; 1242 return 0;
@@ -1260,28 +1259,26 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
1260{ 1259{
1261 void __iomem *port_mmio = mv_ap_base(ap); 1260 void __iomem *port_mmio = mv_ap_base(ap);
1262 struct mv_port_priv *pp = ap->private_data; 1261 struct mv_port_priv *pp = ap->private_data;
1262 unsigned out_index;
1263 u32 out_ptr; 1263 u32 out_ptr;
1264 u8 ata_status; 1264 u8 ata_status;
1265 1265
1266 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1266 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1267 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1267 1268
1268 /* the response consumer index should be the same as we remember it */ 1269 ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1269 WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != 1270 >> CRPB_FLAG_STATUS_SHIFT;
1270 pp->rsp_consumer);
1271
1272 ata_status = pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT;
1273 1271
1274 /* increment our consumer index... */ 1272 /* increment our consumer index... */
1275 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); 1273 out_index = mv_inc_q_index(out_index);
1276 1274
1277 /* and, until we do NCQ, there should only be 1 CRPB waiting */ 1275 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1278 WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> 1276 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1279 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != 1277 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1280 pp->rsp_consumer);
1281 1278
1282 /* write out our inc'd consumer index so EDMA knows we're caught up */ 1279 /* write out our inc'd consumer index so EDMA knows we're caught up */
1283 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; 1280 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1284 out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT; 1281 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
1285 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1282 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1286 1283
1287 /* Return ATA status register for completed CRPB */ 1284 /* Return ATA status register for completed CRPB */
@@ -1291,6 +1288,7 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
1291/** 1288/**
1292 * mv_err_intr - Handle error interrupts on the port 1289 * mv_err_intr - Handle error interrupts on the port
1293 * @ap: ATA channel to manipulate 1290 * @ap: ATA channel to manipulate
1291 * @reset_allowed: bool: 0 == don't trigger from reset here
1294 * 1292 *
1295 * In most cases, just clear the interrupt and move on. However, 1293 * In most cases, just clear the interrupt and move on. However,
1296 * some cases require an eDMA reset, which is done right before 1294 * some cases require an eDMA reset, which is done right before
@@ -1301,7 +1299,7 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
1301 * LOCKING: 1299 * LOCKING:
1302 * Inherited from caller. 1300 * Inherited from caller.
1303 */ 1301 */
1304static void mv_err_intr(struct ata_port *ap) 1302static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1305{ 1303{
1306 void __iomem *port_mmio = mv_ap_base(ap); 1304 void __iomem *port_mmio = mv_ap_base(ap);
1307 u32 edma_err_cause, serr = 0; 1305 u32 edma_err_cause, serr = 0;
@@ -1323,9 +1321,8 @@ static void mv_err_intr(struct ata_port *ap)
1323 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1321 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1324 1322
1325 /* check for fatal here and recover if needed */ 1323 /* check for fatal here and recover if needed */
1326 if (EDMA_ERR_FATAL & edma_err_cause) { 1324 if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
1327 mv_stop_and_reset(ap); 1325 mv_stop_and_reset(ap);
1328 }
1329} 1326}
1330 1327
1331/** 1328/**
@@ -1374,12 +1371,12 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1374 struct ata_port *ap = host_set->ports[port]; 1371 struct ata_port *ap = host_set->ports[port];
1375 struct mv_port_priv *pp = ap->private_data; 1372 struct mv_port_priv *pp = ap->private_data;
1376 1373
1377 hard_port = port & MV_PORT_MASK; /* range 0-3 */ 1374 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1378 handled = 0; /* ensure ata_status is set if handled++ */ 1375 handled = 0; /* ensure ata_status is set if handled++ */
1379 1376
1380 /* Note that DEV_IRQ might happen spuriously during EDMA, 1377 /* Note that DEV_IRQ might happen spuriously during EDMA,
1381 * and should be ignored in such cases. We could mask it, 1378 * and should be ignored in such cases.
1382 * but it's pretty rare and may not be worth the overhead. 1379 * The cause of this is still under investigation.
1383 */ 1380 */
1384 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 1381 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1385 /* EDMA: check for response queue interrupt */ 1382 /* EDMA: check for response queue interrupt */
@@ -1393,6 +1390,11 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1393 ata_status = readb((void __iomem *) 1390 ata_status = readb((void __iomem *)
1394 ap->ioaddr.status_addr); 1391 ap->ioaddr.status_addr);
1395 handled = 1; 1392 handled = 1;
1393 /* ignore spurious intr if drive still BUSY */
1394 if (ata_status & ATA_BUSY) {
1395 ata_status = 0;
1396 handled = 0;
1397 }
1396 } 1398 }
1397 } 1399 }
1398 1400
@@ -1406,7 +1408,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1406 shift++; /* skip bit 8 in the HC Main IRQ reg */ 1408 shift++; /* skip bit 8 in the HC Main IRQ reg */
1407 } 1409 }
1408 if ((PORT0_ERR << shift) & relevant) { 1410 if ((PORT0_ERR << shift) & relevant) {
1409 mv_err_intr(ap); 1411 mv_err_intr(ap, 1);
1410 err_mask |= AC_ERR_OTHER; 1412 err_mask |= AC_ERR_OTHER;
1411 handled = 1; 1413 handled = 1;
1412 } 1414 }
@@ -1448,6 +1450,7 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
1448 struct ata_host_set *host_set = dev_instance; 1450 struct ata_host_set *host_set = dev_instance;
1449 unsigned int hc, handled = 0, n_hcs; 1451 unsigned int hc, handled = 0, n_hcs;
1450 void __iomem *mmio = host_set->mmio_base; 1452 void __iomem *mmio = host_set->mmio_base;
1453 struct mv_host_priv *hpriv;
1451 u32 irq_stat; 1454 u32 irq_stat;
1452 1455
1453 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); 1456 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
@@ -1469,6 +1472,17 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
1469 handled++; 1472 handled++;
1470 } 1473 }
1471 } 1474 }
1475
1476 hpriv = host_set->private_data;
1477 if (IS_60XX(hpriv)) {
1478 /* deal with the interrupt coalescing bits */
1479 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1480 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1481 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1482 writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1483 }
1484 }
1485
1472 if (PCI_ERR & irq_stat) { 1486 if (PCI_ERR & irq_stat) {
1473 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n", 1487 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1474 readl(mmio + PCI_IRQ_CAUSE_OFS)); 1488 readl(mmio + PCI_IRQ_CAUSE_OFS));
@@ -1867,7 +1881,8 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1867 1881
1868 if (IS_60XX(hpriv)) { 1882 if (IS_60XX(hpriv)) {
1869 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); 1883 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
1870 ifctl |= (1 << 12) | (1 << 7); 1884 ifctl |= (1 << 7); /* enable gen2i speed */
1885 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
1871 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); 1886 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1872 } 1887 }
1873 1888
@@ -2031,11 +2046,14 @@ static void mv_eng_timeout(struct ata_port *ap)
2031 ap->host_set->mmio_base, ap, qc, qc->scsicmd, 2046 ap->host_set->mmio_base, ap, qc, qc->scsicmd,
2032 &qc->scsicmd->cmnd); 2047 &qc->scsicmd->cmnd);
2033 2048
2034 mv_err_intr(ap); 2049 mv_err_intr(ap, 0);
2035 mv_stop_and_reset(ap); 2050 mv_stop_and_reset(ap);
2036 2051
2037 qc->err_mask |= AC_ERR_TIMEOUT; 2052 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2038 ata_eh_qc_complete(qc); 2053 if (qc->flags & ATA_QCFLAG_ACTIVE) {
2054 qc->err_mask |= AC_ERR_TIMEOUT;
2055 ata_eh_qc_complete(qc);
2056 }
2039} 2057}
2040 2058
2041/** 2059/**
@@ -2229,7 +2247,8 @@ static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
2229 void __iomem *port_mmio = mv_port_base(mmio, port); 2247 void __iomem *port_mmio = mv_port_base(mmio, port);
2230 2248
2231 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); 2249 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2232 ifctl |= (1 << 12); 2250 ifctl |= (1 << 7); /* enable gen2i speed */
2251 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2233 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); 2252 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2234 } 2253 }
2235 2254
@@ -2330,6 +2349,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2330 if (rc) { 2349 if (rc) {
2331 return rc; 2350 return rc;
2332 } 2351 }
2352 pci_set_master(pdev);
2333 2353
2334 rc = pci_request_regions(pdev, DRV_NAME); 2354 rc = pci_request_regions(pdev, DRV_NAME);
2335 if (rc) { 2355 if (rc) {