aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/sata_mv.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-20 11:46:42 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-20 11:46:42 -0400
commitdee2383784212c67819fdda1cbd4339f11d23b99 (patch)
treec6ec906c99c220c4a3894504195fc9f9b2d0f286 /drivers/ata/sata_mv.c
parente609ccc3161ead8a685b15533d9b6958ed368358 (diff)
parent5ddf24c5ea9d715dc4f5d5d5dd1c9337d90466dc (diff)
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev: (29 commits) libata: implement EH fast drain libata: schedule probing after SError access failure during autopsy libata: clear HOTPLUG flag after a reset libata: reorganize ata_ehi_hotplugged() libata: improve SCSI scan failure handling libata: quickly trigger SATA SPD down after debouncing failed libata: improve SATA PHY speed down logic The SATA controller device ID is different according to ahci: implement SCR_NOTIFICATION r/w ahci: make NO_NCQ handling more consistent libata: make ->scr_read/write callbacks return error code libata: implement AC_ERR_NCQ libata: improve EH report formatting sata_sil24: separate out sil24_do_softreset() sata_sil24: separate out sil24_exec_polled_cmd() sata_sil24: replace sil24_update_tf() with sil24_read_tf() ahci: separate out ahci_do_softreset() ahci: separate out ahci_exec_polled_cmd() ahci: separate out ahci_kick_engine() ahci: use deadline instead of fixed timeout for 1st FIS for SRST ...
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r--drivers/ata/sata_mv.c203
1 files changed, 119 insertions, 84 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index fb8a749423ca..8ec520885b95 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -35,8 +35,6 @@
35 35
36 6) Add port multiplier support (intermediate) 36 6) Add port multiplier support (intermediate)
37 37
38 7) Test and verify 3.0 Gbps support
39
40 8) Develop a low-power-consumption strategy, and implement it. 38 8) Develop a low-power-consumption strategy, and implement it.
41 39
42 9) [Experiment, low priority] See if ATAPI can be supported using 40 9) [Experiment, low priority] See if ATAPI can be supported using
@@ -227,26 +225,26 @@ enum {
227 225
228 EDMA_ERR_IRQ_CAUSE_OFS = 0x8, 226 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
229 EDMA_ERR_IRQ_MASK_OFS = 0xc, 227 EDMA_ERR_IRQ_MASK_OFS = 0xc,
230 EDMA_ERR_D_PAR = (1 << 0), 228 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
231 EDMA_ERR_PRD_PAR = (1 << 1), 229 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
232 EDMA_ERR_DEV = (1 << 2), 230 EDMA_ERR_DEV = (1 << 2), /* device error */
233 EDMA_ERR_DEV_DCON = (1 << 3), 231 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
234 EDMA_ERR_DEV_CON = (1 << 4), 232 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
235 EDMA_ERR_SERR = (1 << 5), 233 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
236 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */ 234 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
237 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */ 235 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
238 EDMA_ERR_BIST_ASYNC = (1 << 8), 236 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
239 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */ 237 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
240 EDMA_ERR_CRBQ_PAR = (1 << 9), 238 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
241 EDMA_ERR_CRPB_PAR = (1 << 10), 239 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
242 EDMA_ERR_INTRL_PAR = (1 << 11), 240 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
243 EDMA_ERR_IORDY = (1 << 12), 241 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
244 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), 242 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
245 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), 243 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
246 EDMA_ERR_LNK_DATA_RX = (0xf << 17), 244 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
247 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), 245 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
248 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), 246 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
249 EDMA_ERR_TRANS_PROTO = (1 << 31), 247 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
250 EDMA_ERR_OVERRUN_5 = (1 << 5), 248 EDMA_ERR_OVERRUN_5 = (1 << 5),
251 EDMA_ERR_UNDERRUN_5 = (1 << 6), 249 EDMA_ERR_UNDERRUN_5 = (1 << 6),
252 EDMA_EH_FREEZE = EDMA_ERR_D_PAR | 250 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
@@ -255,7 +253,7 @@ enum {
255 EDMA_ERR_DEV_CON | 253 EDMA_ERR_DEV_CON |
256 EDMA_ERR_SERR | 254 EDMA_ERR_SERR |
257 EDMA_ERR_SELF_DIS | 255 EDMA_ERR_SELF_DIS |
258 EDMA_ERR_CRBQ_PAR | 256 EDMA_ERR_CRQB_PAR |
259 EDMA_ERR_CRPB_PAR | 257 EDMA_ERR_CRPB_PAR |
260 EDMA_ERR_INTRL_PAR | 258 EDMA_ERR_INTRL_PAR |
261 EDMA_ERR_IORDY | 259 EDMA_ERR_IORDY |
@@ -270,7 +268,7 @@ enum {
270 EDMA_ERR_OVERRUN_5 | 268 EDMA_ERR_OVERRUN_5 |
271 EDMA_ERR_UNDERRUN_5 | 269 EDMA_ERR_UNDERRUN_5 |
272 EDMA_ERR_SELF_DIS_5 | 270 EDMA_ERR_SELF_DIS_5 |
273 EDMA_ERR_CRBQ_PAR | 271 EDMA_ERR_CRQB_PAR |
274 EDMA_ERR_CRPB_PAR | 272 EDMA_ERR_CRPB_PAR |
275 EDMA_ERR_INTRL_PAR | 273 EDMA_ERR_INTRL_PAR |
276 EDMA_ERR_IORDY, 274 EDMA_ERR_IORDY,
@@ -286,10 +284,10 @@ enum {
286 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */ 284 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
287 EDMA_RSP_Q_PTR_SHIFT = 3, 285 EDMA_RSP_Q_PTR_SHIFT = 3,
288 286
289 EDMA_CMD_OFS = 0x28, 287 EDMA_CMD_OFS = 0x28, /* EDMA command register */
290 EDMA_EN = (1 << 0), 288 EDMA_EN = (1 << 0), /* enable EDMA */
291 EDMA_DS = (1 << 1), 289 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
292 ATA_RST = (1 << 2), 290 ATA_RST = (1 << 2), /* reset trans/link/phy */
293 291
294 EDMA_IORDY_TMOUT = 0x34, 292 EDMA_IORDY_TMOUT = 0x34,
295 EDMA_ARB_CFG = 0x38, 293 EDMA_ARB_CFG = 0x38,
@@ -301,14 +299,13 @@ enum {
301 MV_HP_ERRATA_60X1B2 = (1 << 3), 299 MV_HP_ERRATA_60X1B2 = (1 << 3),
302 MV_HP_ERRATA_60X1C0 = (1 << 4), 300 MV_HP_ERRATA_60X1C0 = (1 << 4),
303 MV_HP_ERRATA_XX42A0 = (1 << 5), 301 MV_HP_ERRATA_XX42A0 = (1 << 5),
304 MV_HP_GEN_I = (1 << 6), 302 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
305 MV_HP_GEN_II = (1 << 7), 303 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
306 MV_HP_GEN_IIE = (1 << 8), 304 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
307 305
308 /* Port private flags (pp_flags) */ 306 /* Port private flags (pp_flags) */
309 MV_PP_FLAG_EDMA_EN = (1 << 0), 307 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
310 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1), 308 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
311 MV_PP_FLAG_HAD_A_RESET = (1 << 2),
312}; 309};
313 310
314#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) 311#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
@@ -318,8 +315,12 @@ enum {
318enum { 315enum {
319 MV_DMA_BOUNDARY = 0xffffffffU, 316 MV_DMA_BOUNDARY = 0xffffffffU,
320 317
318 /* mask of register bits containing lower 32 bits
319 * of EDMA request queue DMA address
320 */
321 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, 321 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
322 322
323 /* ditto, for response queue */
323 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, 324 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
324}; 325};
325 326
@@ -403,10 +404,10 @@ struct mv_host_priv {
403}; 404};
404 405
405static void mv_irq_clear(struct ata_port *ap); 406static void mv_irq_clear(struct ata_port *ap);
406static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); 407static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
407static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 408static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
408static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in); 409static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
409static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 410static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
410static int mv_port_start(struct ata_port *ap); 411static int mv_port_start(struct ata_port *ap);
411static void mv_port_stop(struct ata_port *ap); 412static void mv_port_stop(struct ata_port *ap);
412static void mv_qc_prep(struct ata_queued_cmd *qc); 413static void mv_qc_prep(struct ata_queued_cmd *qc);
@@ -823,7 +824,7 @@ static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
823} 824}
824 825
825/** 826/**
826 * mv_stop_dma - Disable eDMA engine 827 * __mv_stop_dma - Disable eDMA engine
827 * @ap: ATA channel to manipulate 828 * @ap: ATA channel to manipulate
828 * 829 *
829 * Verify the local cache of the eDMA state is accurate with a 830 * Verify the local cache of the eDMA state is accurate with a
@@ -832,7 +833,7 @@ static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
832 * LOCKING: 833 * LOCKING:
833 * Inherited from caller. 834 * Inherited from caller.
834 */ 835 */
835static int mv_stop_dma(struct ata_port *ap) 836static int __mv_stop_dma(struct ata_port *ap)
836{ 837{
837 void __iomem *port_mmio = mv_ap_base(ap); 838 void __iomem *port_mmio = mv_ap_base(ap);
838 struct mv_port_priv *pp = ap->private_data; 839 struct mv_port_priv *pp = ap->private_data;
@@ -865,6 +866,18 @@ static int mv_stop_dma(struct ata_port *ap)
865 return err; 866 return err;
866} 867}
867 868
869static int mv_stop_dma(struct ata_port *ap)
870{
871 unsigned long flags;
872 int rc;
873
874 spin_lock_irqsave(&ap->host->lock, flags);
875 rc = __mv_stop_dma(ap);
876 spin_unlock_irqrestore(&ap->host->lock, flags);
877
878 return rc;
879}
880
868#ifdef ATA_DEBUG 881#ifdef ATA_DEBUG
869static void mv_dump_mem(void __iomem *start, unsigned bytes) 882static void mv_dump_mem(void __iomem *start, unsigned bytes)
870{ 883{
@@ -961,22 +974,26 @@ static unsigned int mv_scr_offset(unsigned int sc_reg_in)
961 return ofs; 974 return ofs;
962} 975}
963 976
964static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in) 977static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
965{ 978{
966 unsigned int ofs = mv_scr_offset(sc_reg_in); 979 unsigned int ofs = mv_scr_offset(sc_reg_in);
967 980
968 if (0xffffffffU != ofs) 981 if (ofs != 0xffffffffU) {
969 return readl(mv_ap_base(ap) + ofs); 982 *val = readl(mv_ap_base(ap) + ofs);
970 else 983 return 0;
971 return (u32) ofs; 984 } else
985 return -EINVAL;
972} 986}
973 987
974static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) 988static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
975{ 989{
976 unsigned int ofs = mv_scr_offset(sc_reg_in); 990 unsigned int ofs = mv_scr_offset(sc_reg_in);
977 991
978 if (0xffffffffU != ofs) 992 if (ofs != 0xffffffffU) {
979 writelfl(val, mv_ap_base(ap) + ofs); 993 writelfl(val, mv_ap_base(ap) + ofs);
994 return 0;
995 } else
996 return -EINVAL;
980} 997}
981 998
982static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv, 999static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
@@ -1029,6 +1046,7 @@ static int mv_port_start(struct ata_port *ap)
1029 void __iomem *port_mmio = mv_ap_base(ap); 1046 void __iomem *port_mmio = mv_ap_base(ap);
1030 void *mem; 1047 void *mem;
1031 dma_addr_t mem_dma; 1048 dma_addr_t mem_dma;
1049 unsigned long flags;
1032 int rc; 1050 int rc;
1033 1051
1034 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1052 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
@@ -1067,10 +1085,14 @@ static int mv_port_start(struct ata_port *ap)
1067 pp->sg_tbl = mem; 1085 pp->sg_tbl = mem;
1068 pp->sg_tbl_dma = mem_dma; 1086 pp->sg_tbl_dma = mem_dma;
1069 1087
1088 spin_lock_irqsave(&ap->host->lock, flags);
1089
1070 mv_edma_cfg(ap, hpriv, port_mmio); 1090 mv_edma_cfg(ap, hpriv, port_mmio);
1071 1091
1072 mv_set_edma_ptrs(port_mmio, hpriv, pp); 1092 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1073 1093
1094 spin_unlock_irqrestore(&ap->host->lock, flags);
1095
1074 /* Don't turn on EDMA here...do it before DMA commands only. Else 1096 /* Don't turn on EDMA here...do it before DMA commands only. Else
1075 * we'll be unable to send non-data, PIO, etc due to restricted access 1097 * we'll be unable to send non-data, PIO, etc due to restricted access
1076 * to shadow regs. 1098 * to shadow regs.
@@ -1090,11 +1112,7 @@ static int mv_port_start(struct ata_port *ap)
1090 */ 1112 */
1091static void mv_port_stop(struct ata_port *ap) 1113static void mv_port_stop(struct ata_port *ap)
1092{ 1114{
1093 unsigned long flags;
1094
1095 spin_lock_irqsave(&ap->host->lock, flags);
1096 mv_stop_dma(ap); 1115 mv_stop_dma(ap);
1097 spin_unlock_irqrestore(&ap->host->lock, flags);
1098} 1116}
1099 1117
1100/** 1118/**
@@ -1325,7 +1343,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1325 * port. Turn off EDMA so there won't be problems accessing 1343 * port. Turn off EDMA so there won't be problems accessing
1326 * shadow block, etc registers. 1344 * shadow block, etc registers.
1327 */ 1345 */
1328 mv_stop_dma(ap); 1346 __mv_stop_dma(ap);
1329 return ata_qc_issue_prot(qc); 1347 return ata_qc_issue_prot(qc);
1330 } 1348 }
1331 1349
@@ -1393,16 +1411,16 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1393 if (edma_err_cause & EDMA_ERR_DEV) 1411 if (edma_err_cause & EDMA_ERR_DEV)
1394 err_mask |= AC_ERR_DEV; 1412 err_mask |= AC_ERR_DEV;
1395 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | 1413 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1396 EDMA_ERR_CRBQ_PAR | EDMA_ERR_CRPB_PAR | 1414 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1397 EDMA_ERR_INTRL_PAR)) { 1415 EDMA_ERR_INTRL_PAR)) {
1398 err_mask |= AC_ERR_ATA_BUS; 1416 err_mask |= AC_ERR_ATA_BUS;
1399 action |= ATA_EH_HARDRESET; 1417 action |= ATA_EH_HARDRESET;
1400 ata_ehi_push_desc(ehi, ", parity error"); 1418 ata_ehi_push_desc(ehi, "parity error");
1401 } 1419 }
1402 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { 1420 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1403 ata_ehi_hotplugged(ehi); 1421 ata_ehi_hotplugged(ehi);
1404 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? 1422 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1405 ", dev disconnect" : ", dev connect"); 1423 "dev disconnect" : "dev connect");
1406 } 1424 }
1407 1425
1408 if (IS_GEN_I(hpriv)) { 1426 if (IS_GEN_I(hpriv)) {
@@ -1411,7 +1429,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1411 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { 1429 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1412 struct mv_port_priv *pp = ap->private_data; 1430 struct mv_port_priv *pp = ap->private_data;
1413 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1431 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1414 ata_ehi_push_desc(ehi, ", EDMA self-disable"); 1432 ata_ehi_push_desc(ehi, "EDMA self-disable");
1415 } 1433 }
1416 } else { 1434 } else {
1417 eh_freeze_mask = EDMA_EH_FREEZE; 1435 eh_freeze_mask = EDMA_EH_FREEZE;
@@ -1419,7 +1437,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1419 if (edma_err_cause & EDMA_ERR_SELF_DIS) { 1437 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1420 struct mv_port_priv *pp = ap->private_data; 1438 struct mv_port_priv *pp = ap->private_data;
1421 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1439 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1422 ata_ehi_push_desc(ehi, ", EDMA self-disable"); 1440 ata_ehi_push_desc(ehi, "EDMA self-disable");
1423 } 1441 }
1424 1442
1425 if (edma_err_cause & EDMA_ERR_SERR) { 1443 if (edma_err_cause & EDMA_ERR_SERR) {
@@ -1489,33 +1507,30 @@ static void mv_intr_edma(struct ata_port *ap)
1489 1507
1490 while (1) { 1508 while (1) {
1491 u16 status; 1509 u16 status;
1510 unsigned int tag;
1492 1511
1493 /* get s/w response queue last-read pointer, and compare */ 1512 /* get s/w response queue last-read pointer, and compare */
1494 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK; 1513 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1495 if (in_index == out_index) 1514 if (in_index == out_index)
1496 break; 1515 break;
1497 1516
1498
1499 /* 50xx: get active ATA command */ 1517 /* 50xx: get active ATA command */
1500 if (IS_GEN_I(hpriv)) 1518 if (IS_GEN_I(hpriv))
1501 qc = ata_qc_from_tag(ap, ap->active_tag); 1519 tag = ap->active_tag;
1502 1520
1503 /* 60xx: get active ATA command via tag, to enable support 1521 /* Gen II/IIE: get active ATA command via tag, to enable
1504 * for queueing. this works transparently for queued and 1522 * support for queueing. this works transparently for
1505 * non-queued modes. 1523 * queued and non-queued modes.
1506 */ 1524 */
1507 else { 1525 else if (IS_GEN_II(hpriv))
1508 unsigned int tag; 1526 tag = (le16_to_cpu(pp->crpb[out_index].id)
1527 >> CRPB_IOID_SHIFT_6) & 0x3f;
1509 1528
1510 if (IS_GEN_II(hpriv)) 1529 else /* IS_GEN_IIE */
1511 tag = (le16_to_cpu(pp->crpb[out_index].id) 1530 tag = (le16_to_cpu(pp->crpb[out_index].id)
1512 >> CRPB_IOID_SHIFT_6) & 0x3f; 1531 >> CRPB_IOID_SHIFT_7) & 0x3f;
1513 else
1514 tag = (le16_to_cpu(pp->crpb[out_index].id)
1515 >> CRPB_IOID_SHIFT_7) & 0x3f;
1516 1532
1517 qc = ata_qc_from_tag(ap, tag); 1533 qc = ata_qc_from_tag(ap, tag);
1518 }
1519 1534
1520 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS 1535 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1521 * bits (WARNING: might not necessarily be associated 1536 * bits (WARNING: might not necessarily be associated
@@ -1535,7 +1550,7 @@ static void mv_intr_edma(struct ata_port *ap)
1535 ata_qc_complete(qc); 1550 ata_qc_complete(qc);
1536 } 1551 }
1537 1552
1538 /* advance software response queue pointer, to 1553 /* advance software response queue pointer, to
1539 * indicate (after the loop completes) to hardware 1554 * indicate (after the loop completes) to hardware
1540 * that we have consumed a response queue entry. 1555 * that we have consumed a response queue entry.
1541 */ 1556 */
@@ -1741,26 +1756,30 @@ static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1741 return ofs; 1756 return ofs;
1742} 1757}
1743 1758
1744static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in) 1759static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1745{ 1760{
1746 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; 1761 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1747 void __iomem *addr = mv5_phy_base(mmio, ap->port_no); 1762 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1748 unsigned int ofs = mv5_scr_offset(sc_reg_in); 1763 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1749 1764
1750 if (ofs != 0xffffffffU) 1765 if (ofs != 0xffffffffU) {
1751 return readl(addr + ofs); 1766 *val = readl(addr + ofs);
1752 else 1767 return 0;
1753 return (u32) ofs; 1768 } else
1769 return -EINVAL;
1754} 1770}
1755 1771
1756static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) 1772static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1757{ 1773{
1758 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; 1774 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1759 void __iomem *addr = mv5_phy_base(mmio, ap->port_no); 1775 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1760 unsigned int ofs = mv5_scr_offset(sc_reg_in); 1776 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1761 1777
1762 if (ofs != 0xffffffffU) 1778 if (ofs != 0xffffffffU) {
1763 writelfl(val, addr + ofs); 1779 writelfl(val, addr + ofs);
1780 return 0;
1781 } else
1782 return -EINVAL;
1764} 1783}
1765 1784
1766static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio) 1785static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
@@ -2138,9 +2157,17 @@ static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2138 2157
2139 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio); 2158 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2140 2159
2141 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x " 2160#ifdef DEBUG
2142 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), 2161 {
2143 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); 2162 u32 sstatus, serror, scontrol;
2163
2164 mv_scr_read(ap, SCR_STATUS, &sstatus);
2165 mv_scr_read(ap, SCR_ERROR, &serror);
2166 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2167 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2168 "SCtrl 0x%08x\n", status, serror, scontrol);
2169 }
2170#endif
2144 2171
2145 /* Issue COMRESET via SControl */ 2172 /* Issue COMRESET via SControl */
2146comreset_retry: 2173comreset_retry:
@@ -2164,9 +2191,17 @@ comreset_retry:
2164 (retry-- > 0)) 2191 (retry-- > 0))
2165 goto comreset_retry; 2192 goto comreset_retry;
2166 2193
2167 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x " 2194#ifdef DEBUG
2168 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), 2195 {
2169 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); 2196 u32 sstatus, serror, scontrol;
2197
2198 mv_scr_read(ap, SCR_STATUS, &sstatus);
2199 mv_scr_read(ap, SCR_ERROR, &serror);
2200 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2201 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2202 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2203 }
2204#endif
2170 2205
2171 if (ata_port_offline(ap)) { 2206 if (ata_port_offline(ap)) {
2172 *class = ATA_DEV_NONE; 2207 *class = ATA_DEV_NONE;
@@ -2209,7 +2244,7 @@ static int mv_prereset(struct ata_port *ap, unsigned long deadline)
2209 struct mv_port_priv *pp = ap->private_data; 2244 struct mv_port_priv *pp = ap->private_data;
2210 struct ata_eh_context *ehc = &ap->eh_context; 2245 struct ata_eh_context *ehc = &ap->eh_context;
2211 int rc; 2246 int rc;
2212 2247
2213 rc = mv_stop_dma(ap); 2248 rc = mv_stop_dma(ap);
2214 if (rc) 2249 if (rc)
2215 ehc->i.action |= ATA_EH_HARDRESET; 2250 ehc->i.action |= ATA_EH_HARDRESET;