aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKe Wei <kewei.mv@gmail.com>2008-02-23 08:15:27 -0500
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2008-02-23 08:30:58 -0500
commit8f261aaf9be5c1246013cf6a65b98586d24832a5 (patch)
tree425ace0d5f6788fa50db334a019902050f867dec
parentb5762948263dd5e9725a380e7a9626f99e40ae9d (diff)
[SCSI] mvsas: convert from rough draft to working driver
Convert rough draft Marvell 6440 driver to a working driver. Added support for SAS and SATA devices, hotplug, wide port, and expanders. Signed-off-by: Ke Wei <kewei@marvell.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
-rwxr-xr-x[-rw-r--r--]drivers/scsi/mvsas.c1782
1 files changed, 1469 insertions, 313 deletions
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c
index 03638b9bb283..30e20e69715a 100644..100755
--- a/drivers/scsi/mvsas.c
+++ b/drivers/scsi/mvsas.c
@@ -2,6 +2,7 @@
2 mvsas.c - Marvell 88SE6440 SAS/SATA support 2 mvsas.c - Marvell 88SE6440 SAS/SATA support
3 3
4 Copyright 2007 Red Hat, Inc. 4 Copyright 2007 Red Hat, Inc.
5 Copyright 2008 Marvell. <kewei@marvell.com>
5 6
6 This program is free software; you can redistribute it and/or 7 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as 8 modify it under the terms of the GNU General Public License as
@@ -37,16 +38,35 @@
37#include <scsi/libsas.h> 38#include <scsi/libsas.h>
38#include <asm/io.h> 39#include <asm/io.h>
39 40
40#define DRV_NAME "mvsas" 41#define DRV_NAME "mvsas"
41#define DRV_VERSION "0.1" 42#define DRV_VERSION "0.5"
43#define _MV_DUMP 0
44#define MVS_DISABLE_NVRAM
45#define MVS_DISABLE_MSI
42 46
43#define mr32(reg) readl(regs + MVS_##reg) 47#define mr32(reg) readl(regs + MVS_##reg)
44#define mw32(reg,val) writel((val), regs + MVS_##reg) 48#define mw32(reg,val) writel((val), regs + MVS_##reg)
45#define mw32_f(reg,val) do { \ 49#define mw32_f(reg,val) do { \
46 writel((val), regs + MVS_##reg); \ 50 writel((val), regs + MVS_##reg); \
47 readl(regs + MVS_##reg); \ 51 readl(regs + MVS_##reg); \
48 } while (0) 52 } while (0)
49 53
54#define MVS_ID_NOT_MAPPED 0xff
55#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
56
57/* offset for D2H FIS in the Received FIS List Structure */
58#define SATA_RECEIVED_D2H_FIS(reg_set) \
59 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
60#define SATA_RECEIVED_PIO_FIS(reg_set) \
61 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
62#define UNASSOC_D2H_FIS(id) \
63 ((void *) mvi->rx_fis + 0x100 * id)
64
65#define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \
66 for ((__mc) = (__lseq_mask), (__lseq) = 0; \
67 (__mc) != 0 && __rest; \
68 (++__lseq), (__mc) >>= 1)
69
50/* driver compile-time configuration */ 70/* driver compile-time configuration */
51enum driver_configuration { 71enum driver_configuration {
52 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ 72 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
@@ -57,10 +77,12 @@ enum driver_configuration {
57 MVS_SLOTS = 512, /* command slots */ 77 MVS_SLOTS = 512, /* command slots */
58 MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */ 78 MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
59 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ 79 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
60 MVS_ATA_CMD_SZ = 128, /* SATA command table buffer size */ 80 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
61 MVS_OAF_SZ = 64, /* Open address frame buffer size */ 81 MVS_OAF_SZ = 64, /* Open address frame buffer size */
62 82
63 MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */ 83 MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */
84
85 MVS_QUEUE_SIZE = 30, /* Support Queue depth */
64}; 86};
65 87
66/* unchangeable hardware details */ 88/* unchangeable hardware details */
@@ -89,7 +111,7 @@ enum hw_registers {
89 MVS_GBL_CTL = 0x04, /* global control */ 111 MVS_GBL_CTL = 0x04, /* global control */
90 MVS_GBL_INT_STAT = 0x08, /* global irq status */ 112 MVS_GBL_INT_STAT = 0x08, /* global irq status */
91 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ 113 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
92 MVS_GBL_PORT_TYPE = 0x00, /* port type */ 114 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
93 115
94 MVS_CTL = 0x100, /* SAS/SATA port configuration */ 116 MVS_CTL = 0x100, /* SAS/SATA port configuration */
95 MVS_PCS = 0x104, /* SAS/SATA port control/status */ 117 MVS_PCS = 0x104, /* SAS/SATA port control/status */
@@ -102,24 +124,29 @@ enum hw_registers {
102 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */ 124 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
103 MVS_TX_HI = 0x128, 125 MVS_TX_HI = 0x128,
104 126
105 MVS_RX_PROD_IDX = 0x12C, /* RX producer pointer */ 127 MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
106 MVS_RX_CONS_IDX = 0x130, /* RX consumer pointer (RO) */ 128 MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
107 MVS_RX_CFG = 0x134, /* RX configuration */ 129 MVS_RX_CFG = 0x134, /* RX configuration */
108 MVS_RX_LO = 0x138, /* RX (completion) ring addr */ 130 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
109 MVS_RX_HI = 0x13C, 131 MVS_RX_HI = 0x13C,
132 MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
110 133
111 MVS_INT_COAL = 0x148, /* Int coalescing config */ 134 MVS_INT_COAL = 0x148, /* Int coalescing config */
112 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ 135 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
113 MVS_INT_STAT = 0x150, /* Central int status */ 136 MVS_INT_STAT = 0x150, /* Central int status */
114 MVS_INT_MASK = 0x154, /* Central int enable */ 137 MVS_INT_MASK = 0x154, /* Central int enable */
115 MVS_INT_STAT_SRS = 0x158, /* SATA register set status */ 138 MVS_INT_STAT_SRS = 0x158, /* SATA register set status */
139 MVS_INT_MASK_SRS = 0x15C,
116 140
117 /* ports 1-3 follow after this */ 141 /* ports 1-3 follow after this */
118 MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */ 142 MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
119 MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */ 143 MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
144 MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */
145 MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */
120 146
121 /* ports 1-3 follow after this */ 147 /* ports 1-3 follow after this */
122 MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */ 148 MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
149 MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
123 150
124 MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */ 151 MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
125 MVS_CMD_DATA = 0x1BC, /* Command register port (data) */ 152 MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
@@ -127,6 +154,14 @@ enum hw_registers {
127 /* ports 1-3 follow after this */ 154 /* ports 1-3 follow after this */
128 MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */ 155 MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
129 MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */ 156 MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
157 MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */
158 MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */
159
160 /* ports 1-3 follow after this */
161 MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
162 MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
163 MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */
164 MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */
130}; 165};
131 166
132enum hw_register_bits { 167enum hw_register_bits {
@@ -140,12 +175,35 @@ enum hw_register_bits {
140 175
141 /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */ 176 /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
142 SATA_TARGET = (1U << 16), /* port0 SATA target enable */ 177 SATA_TARGET = (1U << 16), /* port0 SATA target enable */
143 AUTO_DET = (1U << 8), /* port0 SAS/SATA autodetect */ 178 MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
144 SAS_MODE = (1U << 0), /* port0 SAS(1), SATA(0) mode */ 179 MODE_AUTO_DET_PORT6 = (1U << 14),
145 /* SAS_MODE value may be 180 MODE_AUTO_DET_PORT5 = (1U << 13),
146 * dictated (in hw) by values 181 MODE_AUTO_DET_PORT4 = (1U << 12),
147 * of SATA_TARGET & AUTO_DET 182 MODE_AUTO_DET_PORT3 = (1U << 11),
148 */ 183 MODE_AUTO_DET_PORT2 = (1U << 10),
184 MODE_AUTO_DET_PORT1 = (1U << 9),
185 MODE_AUTO_DET_PORT0 = (1U << 8),
186 MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
187 MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
188 MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
189 MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
190 MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
191 MODE_SAS_PORT6_MASK = (1U << 6),
192 MODE_SAS_PORT5_MASK = (1U << 5),
193 MODE_SAS_PORT4_MASK = (1U << 4),
194 MODE_SAS_PORT3_MASK = (1U << 3),
195 MODE_SAS_PORT2_MASK = (1U << 2),
196 MODE_SAS_PORT1_MASK = (1U << 1),
197 MODE_SAS_PORT0_MASK = (1U << 0),
198 MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
199 MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
200 MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
201 MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
202
203 /* SAS_MODE value may be
204 * dictated (in hw) by values
205 * of SATA_TARGET & AUTO_DET
206 */
149 207
150 /* MVS_TX_CFG */ 208 /* MVS_TX_CFG */
151 TX_EN = (1U << 16), /* Enable TX */ 209 TX_EN = (1U << 16), /* Enable TX */
@@ -167,12 +225,14 @@ enum hw_register_bits {
167 CINT_MEM = (1U << 26), /* int mem parity err */ 225 CINT_MEM = (1U << 26), /* int mem parity err */
168 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */ 226 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
169 CINT_SRS = (1U << 3), /* SRS event */ 227 CINT_SRS = (1U << 3), /* SRS event */
170 CINT_CI_STOP = (1U << 10), /* cmd issue stopped */ 228 CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
171 CINT_DONE = (1U << 0), /* cmd completion */ 229 CINT_DONE = (1U << 0), /* cmd completion */
172 230
173 /* shl for ports 1-3 */ 231 /* shl for ports 1-3 */
174 CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */ 232 CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
175 CINT_PORT = (1U << 8), /* port0 event */ 233 CINT_PORT = (1U << 8), /* port0 event */
234 CINT_PORT_MASK_OFFSET = 8,
235 CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
176 236
177 /* TX (delivery) ring bits */ 237 /* TX (delivery) ring bits */
178 TXQ_CMD_SHIFT = 29, 238 TXQ_CMD_SHIFT = 29,
@@ -239,8 +299,15 @@ enum hw_register_bits {
239 PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */ 299 PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
240 PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */ 300 PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
241 PHY_RST = (1U << 0), /* phy reset */ 301 PHY_RST = (1U << 0), /* phy reset */
302 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
303 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
304 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
305 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
306 (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
307 PHY_READY_MASK = (1U << 20),
242 308
243 /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */ 309 /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
310 PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
244 PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */ 311 PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
245 PHYEV_AN = (1U << 18), /* SATA async notification */ 312 PHYEV_AN = (1U << 18), /* SATA async notification */
246 PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */ 313 PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
@@ -260,13 +327,37 @@ enum hw_register_bits {
260 PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */ 327 PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
261 328
262 /* MVS_PCS */ 329 /* MVS_PCS */
330 PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
331 PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
332 PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */
263 PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */ 333 PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
264 PCS_RSP_RX_EN = (1U << 7), /* raw response rx */ 334 PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
265 PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */ 335 PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
266 PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */ 336 PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
267 PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */ 337 PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
268 PCS_CMD_RST = (1U << 2), /* reset cmd issue */ 338 PCS_CMD_RST = (1U << 1), /* reset cmd issue */
269 PCS_CMD_EN = (1U << 0), /* enable cmd issue */ 339 PCS_CMD_EN = (1U << 0), /* enable cmd issue */
340
341 /* Port n Attached Device Info */
342 PORT_DEV_SSP_TRGT = (1U << 19),
343 PORT_DEV_SMP_TRGT = (1U << 18),
344 PORT_DEV_STP_TRGT = (1U << 17),
345 PORT_DEV_SSP_INIT = (1U << 11),
346 PORT_DEV_SMP_INIT = (1U << 10),
347 PORT_DEV_STP_INIT = (1U << 9),
348 PORT_PHY_ID_MASK = (0xFFU << 24),
349 PORT_DEV_TRGT_MASK = (0x7U << 17),
350 PORT_DEV_INIT_MASK = (0x7U << 9),
351 PORT_DEV_TYPE_MASK = (0x7U << 0),
352
353 /* Port n PHY Status */
354 PHY_RDY = (1U << 2),
355 PHY_DW_SYNC = (1U << 1),
356 PHY_OOB_DTCTD = (1U << 0),
357
358 /* VSR */
359 /* PHYMODE 6 (CDB) */
360 PHY_MODE6_DTL_SPEED = (1U << 27),
270}; 361};
271 362
272enum mvs_info_flags { 363enum mvs_info_flags {
@@ -329,33 +420,60 @@ enum sas_cmd_port_registers {
329 420
330/* SAS/SATA configuration port registers, aka phy registers */ 421/* SAS/SATA configuration port registers, aka phy registers */
331enum sas_sata_config_port_regs { 422enum sas_sata_config_port_regs {
332 PHYR_IDENTIFY = 0x0, /* info for IDENTIFY frame */ 423 PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
333 PHYR_ADDR_LO = 0x4, /* my SAS address (low) */ 424 PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
334 PHYR_ADDR_HI = 0x8, /* my SAS address (high) */ 425 PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
335 PHYR_ATT_DEV_INFO = 0xC, /* attached device info */ 426 PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
336 PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */ 427 PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
337 PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */ 428 PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
338 PHYR_SATA_CTL = 0x18, /* SATA control */ 429 PHYR_SATA_CTL = 0x18, /* SATA control */
339 PHYR_PHY_STAT = 0x1C, /* PHY status */ 430 PHYR_PHY_STAT = 0x1C, /* PHY status */
431 PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
432 PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
433 PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
434 PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
435 PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
436 PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
340 PHYR_WIDE_PORT = 0x38, /* wide port participating */ 437 PHYR_WIDE_PORT = 0x38, /* wide port participating */
341 PHYR_CURRENT0 = 0x80, /* current connection info 0 */ 438 PHYR_CURRENT0 = 0x80, /* current connection info 0 */
342 PHYR_CURRENT1 = 0x84, /* current connection info 1 */ 439 PHYR_CURRENT1 = 0x84, /* current connection info 1 */
343 PHYR_CURRENT2 = 0x88, /* current connection info 2 */ 440 PHYR_CURRENT2 = 0x88, /* current connection info 2 */
344}; 441};
345 442
443/* SAS/SATA Vendor Specific Port Registers */
444enum sas_sata_vsp_regs {
445 VSR_PHY_STAT = 0x00, /* Phy Status */
446 VSR_PHY_MODE1 = 0x01, /* phy tx */
447 VSR_PHY_MODE2 = 0x02, /* tx scc */
448 VSR_PHY_MODE3 = 0x03, /* pll */
449 VSR_PHY_MODE4 = 0x04, /* VCO */
450 VSR_PHY_MODE5 = 0x05, /* Rx */
451 VSR_PHY_MODE6 = 0x06, /* CDR */
452 VSR_PHY_MODE7 = 0x07, /* Impedance */
453 VSR_PHY_MODE8 = 0x08, /* Voltage */
454 VSR_PHY_MODE9 = 0x09, /* Test */
455 VSR_PHY_MODE10 = 0x0A, /* Power */
456 VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
457 VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
458 VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
459};
460
346enum pci_cfg_registers { 461enum pci_cfg_registers {
347 PCR_PHY_CTL = 0x40, 462 PCR_PHY_CTL = 0x40,
348 PCR_PHY_CTL2 = 0x90, 463 PCR_PHY_CTL2 = 0x90,
464 PCR_DEV_CTRL = 0xE8,
349}; 465};
350 466
351enum pci_cfg_register_bits { 467enum pci_cfg_register_bits {
352 PCTL_PWR_ON = (0xFU << 24), 468 PCTL_PWR_ON = (0xFU << 24),
353 PCTL_OFF = (0xFU << 12), 469 PCTL_OFF = (0xFU << 12),
470 PRD_REQ_SIZE = (0x4000),
471 PRD_REQ_MASK = (0x00007000),
354}; 472};
355 473
356enum nvram_layout_offsets { 474enum nvram_layout_offsets {
357 NVR_SIG = 0x00, /* 0xAA, 0x55 */ 475 NVR_SIG = 0x00, /* 0xAA, 0x55 */
358 NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */ 476 NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */
359}; 477};
360 478
361enum chip_flavors { 479enum chip_flavors {
@@ -364,10 +482,41 @@ enum chip_flavors {
364 chip_6480, 482 chip_6480,
365}; 483};
366 484
485enum port_type {
486 PORT_TYPE_SAS = (1L << 1),
487 PORT_TYPE_SATA = (1L << 0),
488};
489
490/* Command Table Format */
491enum ct_format {
492 /* SSP */
493 SSP_F_H = 0x00,
494 SSP_F_IU = 0x18,
495 SSP_F_MAX = 0x4D,
496 /* STP */
497 STP_CMD_FIS = 0x00,
498 STP_ATAPI_CMD = 0x40,
499 STP_F_MAX = 0x10,
500 /* SMP */
501 SMP_F_T = 0x00,
502 SMP_F_DEP = 0x01,
503 SMP_F_MAX = 0x101,
504};
505
506enum status_buffer {
507 SB_EIR_OFF = 0x00, /* Error Information Record */
508 SB_RFB_OFF = 0x08, /* Response Frame Buffer */
509 SB_RFB_MAX = 0x400, /* RFB size*/
510};
511
512enum error_info_rec {
513 CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
514};
515
367struct mvs_chip_info { 516struct mvs_chip_info {
368 unsigned int n_phy; 517 u32 n_phy;
369 unsigned int srs_sz; 518 u32 srs_sz;
370 unsigned int slot_width; 519 u32 slot_width;
371}; 520};
372 521
373struct mvs_err_info { 522struct mvs_err_info {
@@ -395,26 +544,43 @@ struct mvs_cmd_hdr {
395 544
396struct mvs_slot_info { 545struct mvs_slot_info {
397 struct sas_task *task; 546 struct sas_task *task;
398 unsigned int n_elem; 547 u32 n_elem;
548 u32 tx;
399 549
400 /* DMA buffer for storing cmd tbl, open addr frame, status buffer, 550 /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
401 * and PRD table 551 * and PRD table
402 */ 552 */
403 void *buf; 553 void *buf;
404 dma_addr_t buf_dma; 554 dma_addr_t buf_dma;
555#if _MV_DUMP
556 u32 cmd_size;
557#endif
405 558
406 void *response; 559 void *response;
407}; 560};
408 561
409struct mvs_port { 562struct mvs_port {
410 struct asd_sas_port sas_port; 563 struct asd_sas_port sas_port;
564 u8 port_attached;
565 u8 taskfileset;
566 u8 wide_port_phymap;
411}; 567};
412 568
413struct mvs_phy { 569struct mvs_phy {
414 struct mvs_port *port; 570 struct mvs_port *port;
415 struct asd_sas_phy sas_phy; 571 struct asd_sas_phy sas_phy;
416 572 struct sas_identify identify;
417 u8 frame_rcvd[24 + 1024]; 573 struct scsi_device *sdev;
574 u64 dev_sas_addr;
575 u64 att_dev_sas_addr;
576 u32 att_dev_info;
577 u32 dev_info;
578 u32 phy_type;
579 u32 phy_status;
580 u32 irq_status;
581 u32 frame_rcvd_size;
582 u8 frame_rcvd[32];
583 u8 phy_attached;
418}; 584};
419 585
420struct mvs_info { 586struct mvs_info {
@@ -440,24 +606,62 @@ struct mvs_info {
440 __le32 *rx_fis; /* RX'd FIS area */ 606 __le32 *rx_fis; /* RX'd FIS area */
441 dma_addr_t rx_fis_dma; 607 dma_addr_t rx_fis_dma;
442 608
443 struct mvs_cmd_hdr *slot; /* DMA command header slots */ 609 struct mvs_cmd_hdr *slot; /* DMA command header slots */
444 dma_addr_t slot_dma; 610 dma_addr_t slot_dma;
445 611
446 const struct mvs_chip_info *chip; 612 const struct mvs_chip_info *chip;
447 613
448 /* further per-slot information */ 614 unsigned long tags[MVS_SLOTS];
449 struct mvs_slot_info slot_info[MVS_SLOTS]; 615 struct mvs_slot_info slot_info[MVS_SLOTS];
450 unsigned long tags[(MVS_SLOTS / sizeof(unsigned long)) + 1]; 616 /* further per-slot information */
451
452 struct mvs_phy phy[MVS_MAX_PHYS]; 617 struct mvs_phy phy[MVS_MAX_PHYS];
453 struct mvs_port port[MVS_MAX_PHYS]; 618 struct mvs_port port[MVS_MAX_PHYS];
619
620 u32 can_queue; /* per adapter */
621 u32 tag_out; /*Get*/
622 u32 tag_in; /*Give*/
454}; 623};
455 624
625struct mvs_queue_task {
626 struct list_head list;
627
628 void *uldd_task;
629};
630
631static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
632 void *funcdata);
633static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port);
634static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val);
635static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port);
636static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
637 u32 port, u32 val);
638static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port);
639static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val);
640static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr);
641static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port);
642static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val);
643static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr);
644static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port);
645static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val);
646static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val);
647static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port);
648
649static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
650static void mvs_detect_porttype(struct mvs_info *mvi, int i);
651static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
652static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port);
653static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port);
654static u32 mvs_is_sig_fis_received(u32 irq_status);
655
656static int mvs_scan_finished(struct Scsi_Host *, unsigned long);
657static void mvs_scan_start(struct Scsi_Host *);
658static int mvs_sas_slave_alloc(struct scsi_device *scsi_dev);
659
456static struct scsi_transport_template *mvs_stt; 660static struct scsi_transport_template *mvs_stt;
457 661
458static const struct mvs_chip_info mvs_chips[] = { 662static const struct mvs_chip_info mvs_chips[] = {
459 [chip_6320] = { 2, 16, 9 }, 663 [chip_6320] = { 2, 16, 9 },
460 [chip_6440] = { 4, 16, 9 }, 664 [chip_6440] = { 4, 16, 9 },
461 [chip_6480] = { 8, 32, 10 }, 665 [chip_6480] = { 8, 32, 10 },
462}; 666};
463 667
@@ -468,6 +672,8 @@ static struct scsi_host_template mvs_sht = {
468 .target_alloc = sas_target_alloc, 672 .target_alloc = sas_target_alloc,
469 .slave_configure = sas_slave_configure, 673 .slave_configure = sas_slave_configure,
470 .slave_destroy = sas_slave_destroy, 674 .slave_destroy = sas_slave_destroy,
675 .scan_finished = mvs_scan_finished,
676 .scan_start = mvs_scan_start,
471 .change_queue_depth = sas_change_queue_depth, 677 .change_queue_depth = sas_change_queue_depth,
472 .change_queue_type = sas_change_queue_type, 678 .change_queue_type = sas_change_queue_type,
473 .bios_param = sas_bios_param, 679 .bios_param = sas_bios_param,
@@ -477,14 +683,154 @@ static struct scsi_host_template mvs_sht = {
477 .sg_tablesize = SG_ALL, 683 .sg_tablesize = SG_ALL,
478 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 684 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
479 .use_clustering = ENABLE_CLUSTERING, 685 .use_clustering = ENABLE_CLUSTERING,
480 .eh_device_reset_handler= sas_eh_device_reset_handler, 686 .eh_device_reset_handler = sas_eh_device_reset_handler,
481 .eh_bus_reset_handler = sas_eh_bus_reset_handler, 687 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
482 .slave_alloc = sas_slave_alloc, 688 .slave_alloc = mvs_sas_slave_alloc,
483 .target_destroy = sas_target_destroy, 689 .target_destroy = sas_target_destroy,
484 .ioctl = sas_ioctl, 690 .ioctl = sas_ioctl,
485}; 691};
486 692
487static void mvs_int_rx(struct mvs_info *mvi, bool self_clear); 693static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
694{
695 u32 i;
696 u32 run;
697 u32 offset;
698
699 offset = 0;
700 while (size) {
701 printk("%08X : ", baseaddr + offset);
702 if (size >= 16)
703 run = 16;
704 else
705 run = size;
706 size -= run;
707 for (i = 0; i < 16; i++) {
708 if (i < run)
709 printk("%02X ", (u32)data[i]);
710 else
711 printk(" ");
712 }
713 printk(": ");
714 for (i = 0; i < run; i++)
715 printk("%c", isalnum(data[i]) ? data[i] : '.');
716 printk("\n");
717 data = &data[16];
718 offset += run;
719 }
720 printk("\n");
721}
722
723static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
724 enum sas_protocol proto)
725{
726#if _MV_DUMP
727 u32 offset;
728 struct pci_dev *pdev = mvi->pdev;
729 struct mvs_slot_info *slot = &mvi->slot_info[tag];
730
731 offset = slot->cmd_size + MVS_OAF_SZ +
732 sizeof(struct mvs_prd) * slot->n_elem;
733 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n",
734 tag);
735 mvs_hexdump(32, (u8 *) slot->response,
736 (u32) slot->buf_dma + offset);
737#endif
738}
739
740static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
741 enum sas_protocol proto)
742{
743#if _MV_DUMP
744 u32 sz, w_ptr, r_ptr;
745 u64 addr;
746 void __iomem *regs = mvi->regs;
747 struct pci_dev *pdev = mvi->pdev;
748 struct mvs_slot_info *slot = &mvi->slot_info[tag];
749
750 /*Delivery Queue */
751 sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
752 w_ptr = mr32(TX_PROD_IDX) & TX_RING_SZ_MASK;
753 r_ptr = mr32(TX_CONS_IDX) & TX_RING_SZ_MASK;
754 addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
755 dev_printk(KERN_DEBUG, &pdev->dev,
756 "Delivery Queue Size=%04d , WRT_PTR=%04X , RD_PTR=%04X\n",
757 sz, w_ptr, r_ptr);
758 dev_printk(KERN_DEBUG, &pdev->dev,
759 "Delivery Queue Base Address=0x%llX (PA)"
760 "(tx_dma=0x%llX), Entry=%04d\n",
761 addr, mvi->tx_dma, w_ptr);
762 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
763 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
764 /*Command List */
765 addr = mr32(CMD_LIST_HI) << 16 << 16 | mr32(CMD_LIST_LO);
766 dev_printk(KERN_DEBUG, &pdev->dev,
767 "Command List Base Address=0x%llX (PA)"
768 "(slot_dma=0x%llX), Header=%03d\n",
769 addr, mvi->slot_dma, tag);
770 dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
771 /*mvs_cmd_hdr */
772 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
773 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
774 /*1.command table area */
775 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n");
776 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
777 /*2.open address frame area */
778 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n");
779 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
780 (u32) slot->buf_dma + slot->cmd_size);
781 /*3.status buffer */
782 mvs_hba_sb_dump(mvi, tag, proto);
783 /*4.PRD table */
784 dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n");
785 mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem,
786 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
787 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
788#endif
789}
790
791static void mvs_hba_cq_dump(struct mvs_info *mvi)
792{
793#if _MV_DUMP
794 u64 addr;
795 void __iomem *regs = mvi->regs;
796 struct pci_dev *pdev = mvi->pdev;
797 u32 entry = mvi->rx_cons + 1;
798 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
799
800 /*Completion Queue */
801 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
802 dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%08X\n",
803 (u32) mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
804 dev_printk(KERN_DEBUG, &pdev->dev,
805 "Completion List Base Address=0x%llX (PA), "
806 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
807 addr, entry - 1, mvi->rx[0]);
808 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
809 mvi->rx_dma + sizeof(u32) * entry);
810#endif
811}
812
813static void mvs_hba_interrupt_enable(struct mvs_info *mvi)
814{
815 void __iomem *regs = mvi->regs;
816 u32 tmp;
817
818 tmp = mr32(GBL_CTL);
819
820 mw32(GBL_CTL, tmp | INT_EN);
821}
822
823static void mvs_hba_interrupt_disable(struct mvs_info *mvi)
824{
825 void __iomem *regs = mvi->regs;
826 u32 tmp;
827
828 tmp = mr32(GBL_CTL);
829
830 mw32(GBL_CTL, tmp & ~INT_EN);
831}
832
833static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
488 834
489/* move to PCI layer or libata core? */ 835/* move to PCI layer or libata core? */
490static int pci_go_64(struct pci_dev *pdev) 836static int pci_go_64(struct pci_dev *pdev)
@@ -519,39 +865,38 @@ static int pci_go_64(struct pci_dev *pdev)
519 return rc; 865 return rc;
520} 866}
521 867
522static void mvs_tag_clear(struct mvs_info *mvi, unsigned int tag) 868static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
523{ 869{
524 mvi->tags[tag / sizeof(unsigned long)] &= 870 mvi->tag_in = (mvi->tag_in + 1) & (MVS_SLOTS - 1);
525 ~(1UL << (tag % sizeof(unsigned long))); 871 mvi->tags[mvi->tag_in] = tag;
526} 872}
527 873
528static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) 874static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
529{ 875{
530 mvi->tags[tag / sizeof(unsigned long)] |= 876 mvi->tag_out = (mvi->tag_out - 1) & (MVS_SLOTS - 1);
531 (1UL << (tag % sizeof(unsigned long)));
532} 877}
533 878
534static bool mvs_tag_test(struct mvs_info *mvi, unsigned int tag) 879static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
535{ 880{
536 return mvi->tags[tag / sizeof(unsigned long)] & 881 if (mvi->tag_out != mvi->tag_in) {
537 (1UL << (tag % sizeof(unsigned long))); 882 *tag_out = mvi->tags[mvi->tag_out];
883 mvi->tag_out = (mvi->tag_out + 1) & (MVS_SLOTS - 1);
884 return 0;
885 }
886 return -EBUSY;
538} 887}
539 888
540static int mvs_tag_alloc(struct mvs_info *mvi, unsigned int *tag_out) 889static void mvs_tag_init(struct mvs_info *mvi)
541{ 890{
542 unsigned int i; 891 int i;
543 892 for (i = 0; i < MVS_SLOTS; ++i)
544 for (i = 0; i < MVS_SLOTS; i++) 893 mvi->tags[i] = i;
545 if (!mvs_tag_test(mvi, i)) { 894 mvi->tag_out = 0;
546 mvs_tag_set(mvi, i); 895 mvi->tag_in = MVS_SLOTS - 1;
547 *tag_out = i;
548 return 0;
549 }
550
551 return -EBUSY;
552} 896}
553 897
554static int mvs_eep_read(void __iomem *regs, unsigned int addr, u32 *data) 898#ifndef MVS_DISABLE_NVRAM
899static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
555{ 900{
556 int timeout = 1000; 901 int timeout = 1000;
557 902
@@ -573,10 +918,10 @@ static int mvs_eep_read(void __iomem *regs, unsigned int addr, u32 *data)
573 return -EBUSY; 918 return -EBUSY;
574} 919}
575 920
576static int mvs_eep_read_buf(void __iomem *regs, unsigned int addr, 921static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
577 void *buf, unsigned int buflen) 922 void *buf, u32 buflen)
578{ 923{
579 unsigned int addr_end, tmp_addr, i, j; 924 u32 addr_end, tmp_addr, i, j;
580 u32 tmp = 0; 925 u32 tmp = 0;
581 int rc; 926 int rc;
582 u8 *tmp8, *buf8 = buf; 927 u8 *tmp8, *buf8 = buf;
@@ -592,7 +937,7 @@ static int mvs_eep_read_buf(void __iomem *regs, unsigned int addr,
592 if (rc) 937 if (rc)
593 return rc; 938 return rc;
594 939
595 tmp8 = (u8 *) &tmp; 940 tmp8 = (u8 *)&tmp;
596 for (i = j; i < 4; i++) 941 for (i = j; i < 4; i++)
597 *buf8++ = tmp8[i]; 942 *buf8++ = tmp8[i];
598 943
@@ -613,7 +958,7 @@ static int mvs_eep_read_buf(void __iomem *regs, unsigned int addr,
613 if (rc) 958 if (rc)
614 return rc; 959 return rc;
615 960
616 tmp8 = (u8 *) &tmp; 961 tmp8 = (u8 *)&tmp;
617 j = addr_end - tmp_addr; 962 j = addr_end - tmp_addr;
618 for (i = 0; i < j; i++) 963 for (i = 0; i < j; i++)
619 *buf8++ = tmp8[i]; 964 *buf8++ = tmp8[i];
@@ -623,13 +968,15 @@ static int mvs_eep_read_buf(void __iomem *regs, unsigned int addr,
623 968
624 return 0; 969 return 0;
625} 970}
971#endif
626 972
627static int mvs_nvram_read(struct mvs_info *mvi, unsigned int addr, 973static int mvs_nvram_read(struct mvs_info *mvi, u32 addr,
628 void *buf, unsigned int buflen) 974 void *buf, u32 buflen)
629{ 975{
976#ifndef MVS_DISABLE_NVRAM
630 void __iomem *regs = mvi->regs; 977 void __iomem *regs = mvi->regs;
631 int rc, i; 978 int rc, i;
632 unsigned int sum; 979 u32 sum;
633 u8 hdr[2], *tmp; 980 u8 hdr[2], *tmp;
634 const char *msg; 981 const char *msg;
635 982
@@ -644,16 +991,17 @@ static int mvs_nvram_read(struct mvs_info *mvi, unsigned int addr,
644 goto err_out; 991 goto err_out;
645 } 992 }
646 993
647 if (hdr[0] != 0x5A) { /* entry id */ 994 if (hdr[0] != 0x5A) {
995 /* entry id */
648 msg = "invalid nvram entry id"; 996 msg = "invalid nvram entry id";
649 rc = -ENOENT; 997 rc = -ENOENT;
650 goto err_out; 998 goto err_out;
651 } 999 }
652 1000
653 tmp = buf; 1001 tmp = buf;
654 sum = ((unsigned int)hdr[0]) + ((unsigned int)hdr[1]); 1002 sum = ((u32)hdr[0]) + ((u32)hdr[1]);
655 for (i = 0; i < buflen; i++) 1003 for (i = 0; i < buflen; i++)
656 sum += ((unsigned int)tmp[i]); 1004 sum += ((u32)tmp[i]);
657 1005
658 if (sum) { 1006 if (sum) {
659 msg = "nvram checksum failure"; 1007 msg = "nvram checksum failure";
@@ -666,11 +1014,122 @@ static int mvs_nvram_read(struct mvs_info *mvi, unsigned int addr,
666err_out: 1014err_out:
667 dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg); 1015 dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
668 return rc; 1016 return rc;
1017#else
1018 /* FIXME , For SAS target mode */
1019 memcpy(buf, "\x00\x00\xab\x11\x30\x04\x05\x50", 8);
1020 return 0;
1021#endif
1022}
1023
1024static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
1025{
1026 struct mvs_phy *phy = &mvi->phy[i];
1027
1028 if (!phy->phy_attached)
1029 return;
1030
1031 if (phy->phy_type & PORT_TYPE_SAS) {
1032 struct sas_identify_frame *id;
1033
1034 id = (struct sas_identify_frame *)phy->frame_rcvd;
1035 id->dev_type = phy->identify.device_type;
1036 id->initiator_bits = SAS_PROTOCOL_ALL;
1037 id->target_bits = phy->identify.target_port_protocols;
1038 } else if (phy->phy_type & PORT_TYPE_SATA) {
1039 /* TODO */
1040 }
1041 mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size;
1042 mvi->sas.notify_port_event(mvi->sas.sas_phy[i],
1043 PORTE_BYTES_DMAED);
1044}
1045
1046static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
1047{
1048 /* give the phy enabling interrupt event time to come in (1s
1049 * is empirically about all it takes) */
1050 if (time < HZ)
1051 return 0;
1052 /* Wait for discovery to finish */
1053 scsi_flush_work(shost);
1054 return 1;
1055}
1056
1057static void mvs_scan_start(struct Scsi_Host *shost)
1058{
1059 int i;
1060 struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha;
1061
1062 for (i = 0; i < mvi->chip->n_phy; ++i) {
1063 mvs_bytes_dmaed(mvi, i);
1064 }
1065}
1066
1067static int mvs_sas_slave_alloc(struct scsi_device *scsi_dev)
1068{
1069 int rc;
1070
1071 rc = sas_slave_alloc(scsi_dev);
1072
1073 return rc;
669} 1074}
670 1075
671static void mvs_int_port(struct mvs_info *mvi, int port_no, u32 events) 1076static void mvs_int_port(struct mvs_info *mvi, int port_no, u32 events)
672{ 1077{
673 /* FIXME */ 1078 struct pci_dev *pdev = mvi->pdev;
1079 struct sas_ha_struct *sas_ha = &mvi->sas;
1080 struct mvs_phy *phy = &mvi->phy[port_no];
1081 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1082
1083 phy->irq_status = mvs_read_port_irq_stat(mvi, port_no);
1084 /*
1085 * events is port event now ,
1086 * we need check the interrupt status which belongs to per port.
1087 */
1088 dev_printk(KERN_DEBUG, &pdev->dev,
1089 "Port %d Event = %X\n",
1090 port_no, phy->irq_status);
1091
1092 if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
1093 if (!mvs_is_phy_ready(mvi, port_no)) {
1094 sas_phy_disconnected(sas_phy);
1095 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1096 } else
1097 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
1098 }
1099 if (!(phy->irq_status & PHYEV_DEC_ERR)) {
1100 if (phy->irq_status & PHYEV_COMWAKE) {
1101 u32 tmp = mvs_read_port_irq_mask(mvi, port_no);
1102 mvs_write_port_irq_mask(mvi, port_no,
1103 tmp | PHYEV_SIG_FIS);
1104 }
1105 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
1106 phy->phy_status = mvs_is_phy_ready(mvi, port_no);
1107 if (phy->phy_status) {
1108 mvs_detect_porttype(mvi, port_no);
1109
1110 if (phy->phy_type & PORT_TYPE_SATA) {
1111 u32 tmp = mvs_read_port_irq_mask(mvi,
1112 port_no);
1113 tmp &= ~PHYEV_SIG_FIS;
1114 mvs_write_port_irq_mask(mvi,
1115 port_no, tmp);
1116 }
1117
1118 mvs_update_phyinfo(mvi, port_no, 0);
1119 sas_ha->notify_phy_event(sas_phy,
1120 PHYE_OOB_DONE);
1121 mvs_bytes_dmaed(mvi, port_no);
1122 } else {
1123 dev_printk(KERN_DEBUG, &pdev->dev,
1124 "plugin interrupt but phy is gone\n");
1125 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
1126 NULL);
1127 }
1128 } else if (phy->irq_status & PHYEV_BROAD_CH)
1129 sas_ha->notify_port_event(sas_phy,
1130 PORTE_BROADCAST_RCVD);
1131 }
1132 mvs_write_port_irq_stat(mvi, port_no, phy->irq_status);
674} 1133}
675 1134
676static void mvs_int_sata(struct mvs_info *mvi) 1135static void mvs_int_sata(struct mvs_info *mvi)
@@ -679,11 +1138,12 @@ static void mvs_int_sata(struct mvs_info *mvi)
679} 1138}
680 1139
681static void mvs_slot_free(struct mvs_info *mvi, struct sas_task *task, 1140static void mvs_slot_free(struct mvs_info *mvi, struct sas_task *task,
682 struct mvs_slot_info *slot, unsigned int slot_idx) 1141 struct mvs_slot_info *slot, u32 slot_idx)
683{ 1142{
684 if (slot->n_elem) 1143 if (!sas_protocol_ata(task->task_proto))
685 pci_unmap_sg(mvi->pdev, task->scatter, 1144 if (slot->n_elem)
686 slot->n_elem, task->data_dir); 1145 pci_unmap_sg(mvi->pdev, task->scatter,
1146 slot->n_elem, task->data_dir);
687 1147
688 switch (task->task_proto) { 1148 switch (task->task_proto) {
689 case SAS_PROTOCOL_SMP: 1149 case SAS_PROTOCOL_SMP:
@@ -701,40 +1161,60 @@ static void mvs_slot_free(struct mvs_info *mvi, struct sas_task *task,
701 break; 1161 break;
702 } 1162 }
703 1163
1164 slot->task = NULL;
704 mvs_tag_clear(mvi, slot_idx); 1165 mvs_tag_clear(mvi, slot_idx);
705} 1166}
706 1167
707static void mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, 1168static void mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
708 unsigned int slot_idx) 1169 u32 slot_idx)
709{ 1170{
710 /* FIXME */ 1171 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1172 u64 err_dw0 = *(u32 *) slot->response;
1173 void __iomem *regs = mvi->regs;
1174 u32 tmp;
1175
1176 if (err_dw0 & CMD_ISS_STPD)
1177 if (sas_protocol_ata(task->task_proto)) {
1178 tmp = mr32(INT_STAT_SRS);
1179 mw32(INT_STAT_SRS, tmp & 0xFFFF);
1180 }
1181
1182 mvs_hba_sb_dump(mvi, slot_idx, task->task_proto);
711} 1183}
712 1184
713static void mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc) 1185static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc)
714{ 1186{
715 unsigned int slot_idx = rx_desc & RXQ_SLOT_MASK; 1187 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
716 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 1188 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
717 struct sas_task *task = slot->task; 1189 struct sas_task *task = slot->task;
718 struct task_status_struct *tstat = &task->task_status; 1190 struct task_status_struct *tstat = &task->task_status;
1191 struct mvs_port *port = &mvi->port[task->dev->port->id];
719 bool aborted; 1192 bool aborted;
1193 void *to;
720 1194
721 spin_lock(&task->task_state_lock); 1195 spin_lock(&task->task_state_lock);
722 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; 1196 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
723 if (!aborted) { 1197 if (!aborted) {
724 task->task_state_flags &= 1198 task->task_state_flags &=
725 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1199 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
726 task->task_state_flags |= SAS_TASK_STATE_DONE; 1200 task->task_state_flags |= SAS_TASK_STATE_DONE;
727 } 1201 }
728 spin_unlock(&task->task_state_lock); 1202 spin_unlock(&task->task_state_lock);
729 1203
730 if (aborted) 1204 if (aborted)
731 return; 1205 return -1;
732 1206
733 memset(tstat, 0, sizeof(*tstat)); 1207 memset(tstat, 0, sizeof(*tstat));
734 tstat->resp = SAS_TASK_COMPLETE; 1208 tstat->resp = SAS_TASK_COMPLETE;
735 1209
1210
1211 if (unlikely(!port->port_attached)) {
1212 tstat->stat = SAS_PHY_DOWN;
1213 goto out;
1214 }
1215
736 /* error info record present */ 1216 /* error info record present */
737 if (rx_desc & RXQ_ERR) { 1217 if ((rx_desc & RXQ_ERR) && (*(u64 *) slot->response)) {
738 tstat->stat = SAM_CHECK_COND; 1218 tstat->stat = SAM_CHECK_COND;
739 mvs_slot_err(mvi, task, slot_idx); 1219 mvs_slot_err(mvi, task, slot_idx);
740 goto out; 1220 goto out;
@@ -743,13 +1223,14 @@ static void mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc)
743 switch (task->task_proto) { 1223 switch (task->task_proto) {
744 case SAS_PROTOCOL_SSP: 1224 case SAS_PROTOCOL_SSP:
745 /* hw says status == 0, datapres == 0 */ 1225 /* hw says status == 0, datapres == 0 */
746 if (rx_desc & RXQ_GOOD) 1226 if (rx_desc & RXQ_GOOD) {
747 tstat->stat = SAM_GOOD; 1227 tstat->stat = SAM_GOOD;
748 1228 tstat->resp = SAS_TASK_COMPLETE;
1229 }
749 /* response frame present */ 1230 /* response frame present */
750 else if (rx_desc & RXQ_RSP) { 1231 else if (rx_desc & RXQ_RSP) {
751 struct ssp_response_iu *iu = 1232 struct ssp_response_iu *iu =
752 slot->response + sizeof(struct mvs_err_info); 1233 slot->response + sizeof(struct mvs_err_info);
753 sas_ssp_task_response(&mvi->pdev->dev, task, iu); 1234 sas_ssp_task_response(&mvi->pdev->dev, task, iu);
754 } 1235 }
755 1236
@@ -758,20 +1239,37 @@ static void mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc)
758 tstat->stat = SAM_CHECK_COND; 1239 tstat->stat = SAM_CHECK_COND;
759 break; 1240 break;
760 1241
761 case SAS_PROTOCOL_SMP: 1242 case SAS_PROTOCOL_SMP: {
762 tstat->stat = SAM_GOOD; 1243 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
763 break; 1244 tstat->stat = SAM_GOOD;
1245 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
1246 memcpy(to + sg_resp->offset,
1247 slot->response + sizeof(struct mvs_err_info),
1248 sg_dma_len(sg_resp));
1249 kunmap_atomic(to, KM_IRQ0);
1250 break;
1251 }
764 1252
765 case SAS_PROTOCOL_SATA: 1253 case SAS_PROTOCOL_SATA:
766 case SAS_PROTOCOL_STP: 1254 case SAS_PROTOCOL_STP:
767 if ((rx_desc & (RXQ_DONE | RXQ_ERR | RXQ_ATTN)) == RXQ_DONE) 1255 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
768 tstat->stat = SAM_GOOD; 1256 struct ata_task_resp *resp =
769 else 1257 (struct ata_task_resp *)tstat->buf;
770 tstat->stat = SAM_CHECK_COND; 1258
771 /* FIXME: read taskfile data from SATA register set 1259 if ((rx_desc & (RXQ_DONE | RXQ_ERR | RXQ_ATTN)) ==
772 * associated with SATA target 1260 RXQ_DONE)
773 */ 1261 tstat->stat = SAM_GOOD;
774 break; 1262 else
1263 tstat->stat = SAM_CHECK_COND;
1264
1265 resp->frame_len = sizeof(struct dev_to_host_fis);
1266 memcpy(&resp->ending_fis[0],
1267 SATA_RECEIVED_D2H_FIS(port->taskfileset),
1268 sizeof(struct dev_to_host_fis));
1269 if (resp->ending_fis[2] & ATA_ERR)
1270 mvs_hexdump(16, resp->ending_fis, 0);
1271 break;
1272 }
775 1273
776 default: 1274 default:
777 tstat->stat = SAM_CHECK_COND; 1275 tstat->stat = SAM_CHECK_COND;
@@ -781,6 +1279,7 @@ static void mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc)
781out: 1279out:
782 mvs_slot_free(mvi, task, slot, slot_idx); 1280 mvs_slot_free(mvi, task, slot, slot_idx);
783 task->task_done(task); 1281 task->task_done(task);
1282 return tstat->stat;
784} 1283}
785 1284
786static void mvs_int_full(struct mvs_info *mvi) 1285static void mvs_int_full(struct mvs_info *mvi)
@@ -791,6 +1290,8 @@ static void mvs_int_full(struct mvs_info *mvi)
791 1290
792 stat = mr32(INT_STAT); 1291 stat = mr32(INT_STAT);
793 1292
1293 mvs_int_rx(mvi, false);
1294
794 for (i = 0; i < MVS_MAX_PORTS; i++) { 1295 for (i = 0; i < MVS_MAX_PORTS; i++) {
795 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED); 1296 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
796 if (tmp) 1297 if (tmp)
@@ -800,48 +1301,62 @@ static void mvs_int_full(struct mvs_info *mvi)
800 if (stat & CINT_SRS) 1301 if (stat & CINT_SRS)
801 mvs_int_sata(mvi); 1302 mvs_int_sata(mvi);
802 1303
803 if (stat & (CINT_CI_STOP | CINT_DONE))
804 mvs_int_rx(mvi, false);
805
806 mw32(INT_STAT, stat); 1304 mw32(INT_STAT, stat);
807} 1305}
808 1306
809static void mvs_int_rx(struct mvs_info *mvi, bool self_clear) 1307static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
810{ 1308{
1309 void __iomem *regs = mvi->regs;
811 u32 rx_prod_idx, rx_desc; 1310 u32 rx_prod_idx, rx_desc;
812 bool attn = false; 1311 bool attn = false;
1312 struct pci_dev *pdev = mvi->pdev;
813 1313
814 /* the first dword in the RX ring is special: it contains 1314 /* the first dword in the RX ring is special: it contains
815 * a mirror of the hardware's RX producer index, so that 1315 * a mirror of the hardware's RX producer index, so that
816 * we don't have to stall the CPU reading that register. 1316 * we don't have to stall the CPU reading that register.
817 * The actual RX ring is offset by one dword, due to this. 1317 * The actual RX ring is offset by one dword, due to this.
818 */ 1318 */
819 rx_prod_idx = le32_to_cpu(mvi->rx[0]) & 0xfff; 1319 rx_prod_idx = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
820 if (rx_prod_idx == 0xfff) { /* h/w hasn't touched RX ring yet */ 1320 if (rx_prod_idx == 0xfff) { /* h/w hasn't touched RX ring yet */
821 mvi->rx_cons = 0xfff; 1321 mvi->rx_cons = 0xfff;
822 return; 1322 return 0;
823 } 1323 }
1324
1325 /* The CMPL_Q may come late, read from register and try again
1326 * note: if coalescing is enabled,
1327 * it will need to read from register every time for sure
1328 */
1329 if (mvi->rx_cons == rx_prod_idx)
1330 return 0;
1331
824 if (mvi->rx_cons == 0xfff) 1332 if (mvi->rx_cons == 0xfff)
825 mvi->rx_cons = MVS_RX_RING_SZ - 1; 1333 mvi->rx_cons = MVS_RX_RING_SZ - 1;
826 1334
827 while (mvi->rx_cons != rx_prod_idx) { 1335 while (mvi->rx_cons != rx_prod_idx) {
1336
828 /* increment our internal RX consumer pointer */ 1337 /* increment our internal RX consumer pointer */
829 mvi->rx_cons = (mvi->rx_cons + 1) & (MVS_RX_RING_SZ - 1); 1338 mvi->rx_cons = (mvi->rx_cons + 1) & (MVS_RX_RING_SZ - 1);
830 1339
831 /* Read RX descriptor at offset+1, due to above */
832 rx_desc = le32_to_cpu(mvi->rx[mvi->rx_cons + 1]); 1340 rx_desc = le32_to_cpu(mvi->rx[mvi->rx_cons + 1]);
833 1341
834 if (rx_desc & RXQ_DONE) 1342 mvs_hba_cq_dump(mvi);
835 /* we had a completion, error or no */
836 mvs_slot_complete(mvi, rx_desc);
837 1343
838 if (rx_desc & RXQ_ATTN) 1344 if (unlikely(rx_desc & RXQ_DONE))
1345 mvs_slot_complete(mvi, rx_desc);
1346 if (rx_desc & RXQ_ATTN) {
839 attn = true; 1347 attn = true;
1348 dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
1349 rx_desc);
1350 } else if (rx_desc & RXQ_ERR) {
1351 dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
1352 rx_desc);
1353 }
840 } 1354 }
841 1355
842 if (attn && self_clear) 1356 if (attn && self_clear)
843 mvs_int_full(mvi); 1357 mvs_int_full(mvi);
844 1358
1359 return 0;
845} 1360}
846 1361
847static irqreturn_t mvs_interrupt(int irq, void *opaque) 1362static irqreturn_t mvs_interrupt(int irq, void *opaque)
@@ -851,6 +1366,10 @@ static irqreturn_t mvs_interrupt(int irq, void *opaque)
851 u32 stat; 1366 u32 stat;
852 1367
853 stat = mr32(GBL_INT_STAT); 1368 stat = mr32(GBL_INT_STAT);
1369
1370 /* clear CMD_CMPLT ASAP */
1371 mw32_f(INT_STAT, CINT_DONE);
1372
854 if (stat == 0 || stat == 0xffffffff) 1373 if (stat == 0 || stat == 0xffffffff)
855 return IRQ_NONE; 1374 return IRQ_NONE;
856 1375
@@ -863,6 +1382,7 @@ static irqreturn_t mvs_interrupt(int irq, void *opaque)
863 return IRQ_HANDLED; 1382 return IRQ_HANDLED;
864} 1383}
865 1384
1385#ifndef MVS_DISABLE_MSI
866static irqreturn_t mvs_msi_interrupt(int irq, void *opaque) 1386static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
867{ 1387{
868 struct mvs_info *mvi = opaque; 1388 struct mvs_info *mvi = opaque;
@@ -875,32 +1395,46 @@ static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
875 1395
876 return IRQ_HANDLED; 1396 return IRQ_HANDLED;
877} 1397}
1398#endif
878 1399
879struct mvs_task_exec_info { 1400struct mvs_task_exec_info {
880 struct sas_task *task; 1401 struct sas_task *task;
881 struct mvs_cmd_hdr *hdr; 1402 struct mvs_cmd_hdr *hdr;
882 unsigned int tag; 1403 struct mvs_port *port;
883 int n_elem; 1404 u32 tag;
1405 int n_elem;
884}; 1406};
885 1407
886static int mvs_task_prep_smp(struct mvs_info *mvi, struct mvs_task_exec_info *tei) 1408static int mvs_task_prep_smp(struct mvs_info *mvi,
1409 struct mvs_task_exec_info *tei)
887{ 1410{
888 int elem, rc; 1411 int elem, rc, i;
1412 struct sas_task *task = tei->task;
889 struct mvs_cmd_hdr *hdr = tei->hdr; 1413 struct mvs_cmd_hdr *hdr = tei->hdr;
890 struct scatterlist *sg_req, *sg_resp; 1414 struct scatterlist *sg_req, *sg_resp;
891 unsigned int req_len, resp_len, tag = tei->tag; 1415 u32 req_len, resp_len, tag = tei->tag;
892 1416 void *buf_tmp;
1417 u8 *buf_oaf;
1418 dma_addr_t buf_tmp_dma;
1419 struct mvs_prd *buf_prd;
1420 struct scatterlist *sg;
1421 struct mvs_slot_info *slot = &mvi->slot_info[tag];
1422 struct asd_sas_port *sas_port = task->dev->port;
1423 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
1424#if _MV_DUMP
1425 u8 *buf_cmd;
1426 void *from;
1427#endif
893 /* 1428 /*
894 * DMA-map SMP request, response buffers 1429 * DMA-map SMP request, response buffers
895 */ 1430 */
896 1431 sg_req = &task->smp_task.smp_req;
897 sg_req = &tei->task->smp_task.smp_req;
898 elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE); 1432 elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
899 if (!elem) 1433 if (!elem)
900 return -ENOMEM; 1434 return -ENOMEM;
901 req_len = sg_dma_len(sg_req); 1435 req_len = sg_dma_len(sg_req);
902 1436
903 sg_resp = &tei->task->smp_task.smp_resp; 1437 sg_resp = &task->smp_task.smp_resp;
904 elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE); 1438 elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
905 if (!elem) { 1439 if (!elem) {
906 rc = -ENOMEM; 1440 rc = -ENOMEM;
@@ -915,21 +1449,77 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, struct mvs_task_exec_info *te
915 } 1449 }
916 1450
917 /* 1451 /*
918 * Fill in TX ring and command slot header 1452 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
919 */ 1453 */
920 1454
921 mvi->tx[tag] = cpu_to_le32( 1455 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
922 (TXQ_CMD_SMP << TXQ_CMD_SHIFT) | TXQ_MODE_I | tag); 1456 buf_tmp = slot->buf;
1457 buf_tmp_dma = slot->buf_dma;
923 1458
924 hdr->flags = 0; 1459#if _MV_DUMP
925 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); 1460 buf_cmd = buf_tmp;
1461 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1462 buf_tmp += req_len;
1463 buf_tmp_dma += req_len;
1464 slot->cmd_size = req_len;
1465#else
1466 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
1467#endif
1468
1469 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1470 buf_oaf = buf_tmp;
1471 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1472
1473 buf_tmp += MVS_OAF_SZ;
1474 buf_tmp_dma += MVS_OAF_SZ;
1475
1476 /* region 3: PRD table ********************************************* */
1477 buf_prd = buf_tmp;
1478 if (tei->n_elem)
1479 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1480 else
1481 hdr->prd_tbl = 0;
1482
1483 i = sizeof(struct mvs_prd) * tei->n_elem;
1484 buf_tmp += i;
1485 buf_tmp_dma += i;
1486
1487 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1488 slot->response = buf_tmp;
1489 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1490
1491 /*
1492 * Fill in TX ring and command slot header
1493 */
1494 slot->tx = mvi->tx_prod;
1495 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
1496 TXQ_MODE_I | tag |
1497 (sas_port->phy_mask << TXQ_PHY_SHIFT));
1498
1499 hdr->flags |= flags;
1500 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
926 hdr->tags = cpu_to_le32(tag); 1501 hdr->tags = cpu_to_le32(tag);
927 hdr->data_len = 0; 1502 hdr->data_len = 0;
928 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
929 hdr->open_frame = 0;
930 hdr->status_buf = cpu_to_le64(sg_dma_address(sg_resp));
931 hdr->prd_tbl = 0;
932 1503
1504 /* generate open address frame hdr (first 12 bytes) */
1505 buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */
1506 buf_oaf[1] = task->dev->linkrate & 0xf;
1507 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
1508 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1509
1510 /* fill in PRD (scatter/gather) table, if any */
1511 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1512 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1513 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1514 buf_prd++;
1515 }
1516
1517#if _MV_DUMP
1518 /* copy cmd table */
1519 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
1520 memcpy(buf_cmd, from + sg_req->offset, req_len);
1521 kunmap_atomic(from, KM_IRQ0);
1522#endif
933 return 0; 1523 return 0;
934 1524
935err_out_2: 1525err_out_2:
@@ -941,6 +1531,73 @@ err_out:
941 return rc; 1531 return rc;
942} 1532}
943 1533
1534static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port)
1535{
1536 void __iomem *regs = mvi->regs;
1537 u32 tmp, offs;
1538 u8 *tfs = &port->taskfileset;
1539
1540 if (*tfs == MVS_ID_NOT_MAPPED)
1541 return;
1542
1543 offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
1544 if (*tfs < 16) {
1545 tmp = mr32(PCS);
1546 mw32(PCS, tmp & ~offs);
1547 } else {
1548 tmp = mr32(CTL);
1549 mw32(CTL, tmp & ~offs);
1550 }
1551
1552 tmp = mr32(INT_STAT_SRS) & (1U << *tfs);
1553 if (tmp)
1554 mw32(INT_STAT_SRS, tmp);
1555
1556 *tfs = MVS_ID_NOT_MAPPED;
1557}
1558
1559static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port)
1560{
1561 int i;
1562 u32 tmp, offs;
1563 void __iomem *regs = mvi->regs;
1564
1565 if (port->taskfileset != MVS_ID_NOT_MAPPED)
1566 return 0;
1567
1568 tmp = mr32(PCS);
1569
1570 for (i = 0; i < mvi->chip->srs_sz; i++) {
1571 if (i == 16)
1572 tmp = mr32(CTL);
1573 offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
1574 if (!(tmp & offs)) {
1575 port->taskfileset = i;
1576
1577 if (i < 16)
1578 mw32(PCS, tmp | offs);
1579 else
1580 mw32(CTL, tmp | offs);
1581 tmp = mr32(INT_STAT_SRS) & (1U << i);
1582 if (tmp)
1583 mw32(INT_STAT_SRS, tmp);
1584 return 0;
1585 }
1586 }
1587 return MVS_ID_NOT_MAPPED;
1588}
1589
1590static u32 mvs_get_ncq_tag(struct sas_task *task)
1591{
1592 u32 tag = 0;
1593 struct ata_queued_cmd *qc = task->uldd_task;
1594
1595 if (qc)
1596 tag = qc->tag;
1597
1598 return tag;
1599}
1600
944static int mvs_task_prep_ata(struct mvs_info *mvi, 1601static int mvs_task_prep_ata(struct mvs_info *mvi,
945 struct mvs_task_exec_info *tei) 1602 struct mvs_task_exec_info *tei)
946{ 1603{
@@ -948,47 +1605,65 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
948 struct domain_device *dev = task->dev; 1605 struct domain_device *dev = task->dev;
949 struct mvs_cmd_hdr *hdr = tei->hdr; 1606 struct mvs_cmd_hdr *hdr = tei->hdr;
950 struct asd_sas_port *sas_port = dev->port; 1607 struct asd_sas_port *sas_port = dev->port;
951 unsigned int tag = tei->tag; 1608 struct mvs_slot_info *slot;
952 struct mvs_slot_info *slot = &mvi->slot_info[tag];
953 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
954 struct scatterlist *sg; 1609 struct scatterlist *sg;
955 struct mvs_prd *buf_prd; 1610 struct mvs_prd *buf_prd;
1611 struct mvs_port *port = tei->port;
1612 u32 tag = tei->tag;
1613 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
956 void *buf_tmp; 1614 void *buf_tmp;
957 u8 *buf_cmd, *buf_oaf; 1615 u8 *buf_cmd, *buf_oaf;
958 dma_addr_t buf_tmp_dma; 1616 dma_addr_t buf_tmp_dma;
959 unsigned int i, req_len, resp_len; 1617 u32 i, req_len, resp_len;
1618 const u32 max_resp_len = SB_RFB_MAX;
1619
1620 if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED)
1621 return -EBUSY;
960 1622
961 /* FIXME: fill in SATA register set */ 1623 slot = &mvi->slot_info[tag];
962 mvi->tx[tag] = cpu_to_le32(TXQ_MODE_I | tag | 1624 slot->tx = mvi->tx_prod;
963 (TXQ_CMD_STP << TXQ_CMD_SHIFT) | 1625 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
964 (sas_port->phy_mask << TXQ_PHY_SHIFT)); 1626 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
1627 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
1628 (port->taskfileset << TXQ_SRS_SHIFT));
965 1629
966 if (task->ata_task.use_ncq) 1630 if (task->ata_task.use_ncq)
967 flags |= MCH_FPDMA; 1631 flags |= MCH_FPDMA;
968 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) 1632 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
969 flags |= MCH_ATAPI; 1633 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
1634 flags |= MCH_ATAPI;
1635 }
1636
970 /* FIXME: fill in port multiplier number */ 1637 /* FIXME: fill in port multiplier number */
971 1638
972 hdr->flags = cpu_to_le32(flags); 1639 hdr->flags = cpu_to_le32(flags);
973 hdr->tags = cpu_to_le32(tag); 1640
1641 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
1642 if (task->ata_task.use_ncq) {
1643 hdr->tags = cpu_to_le32(mvs_get_ncq_tag(task));
1644 /*Fill in task file */
1645 task->ata_task.fis.sector_count = hdr->tags << 3;
1646 } else
1647 hdr->tags = cpu_to_le32(tag);
974 hdr->data_len = cpu_to_le32(task->total_xfer_len); 1648 hdr->data_len = cpu_to_le32(task->total_xfer_len);
975 1649
976 /* 1650 /*
977 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs 1651 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
978 */ 1652 */
979 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
980 1653
981 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ***************/ 1654 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
982 buf_cmd = 1655 buf_cmd = buf_tmp = slot->buf;
983 buf_tmp = slot->buf;
984 buf_tmp_dma = slot->buf_dma; 1656 buf_tmp_dma = slot->buf_dma;
985 1657
986 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); 1658 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
987 1659
988 buf_tmp += MVS_ATA_CMD_SZ; 1660 buf_tmp += MVS_ATA_CMD_SZ;
989 buf_tmp_dma += MVS_ATA_CMD_SZ; 1661 buf_tmp_dma += MVS_ATA_CMD_SZ;
1662#if _MV_DUMP
1663 slot->cmd_size = MVS_ATA_CMD_SZ;
1664#endif
990 1665
991 /* region 2: open address frame area (MVS_OAF_SZ bytes) **********/ 1666 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
992 /* used for STP. unused for SATA? */ 1667 /* used for STP. unused for SATA? */
993 buf_oaf = buf_tmp; 1668 buf_oaf = buf_tmp;
994 hdr->open_frame = cpu_to_le64(buf_tmp_dma); 1669 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
@@ -996,40 +1671,49 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
996 buf_tmp += MVS_OAF_SZ; 1671 buf_tmp += MVS_OAF_SZ;
997 buf_tmp_dma += MVS_OAF_SZ; 1672 buf_tmp_dma += MVS_OAF_SZ;
998 1673
999 /* region 3: PRD table ***********************************************/ 1674 /* region 3: PRD table ********************************************* */
1000 buf_prd = buf_tmp; 1675 buf_prd = buf_tmp;
1001 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); 1676 if (tei->n_elem)
1677 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1678 else
1679 hdr->prd_tbl = 0;
1002 1680
1003 i = sizeof(struct mvs_prd) * tei->n_elem; 1681 i = sizeof(struct mvs_prd) * tei->n_elem;
1004 buf_tmp += i; 1682 buf_tmp += i;
1005 buf_tmp_dma += i; 1683 buf_tmp_dma += i;
1006 1684
1007 /* region 4: status buffer (larger the PRD, smaller this buf) ********/ 1685 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1008 /* FIXME: probably unused, for SATA. kept here just in case 1686 /* FIXME: probably unused, for SATA. kept here just in case
1009 * we get a STP/SATA error information record 1687 * we get a STP/SATA error information record
1010 */ 1688 */
1011 slot->response = buf_tmp; 1689 slot->response = buf_tmp;
1012 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 1690 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1013 1691
1014 req_len = sizeof(struct ssp_frame_hdr) + 28; 1692 req_len = sizeof(struct host_to_dev_fis);
1015 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - 1693 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
1016 sizeof(struct mvs_err_info) - i; 1694 sizeof(struct mvs_err_info) - i;
1017 1695
1018 /* request, response lengths */ 1696 /* request, response lengths */
1697 resp_len = min(resp_len, max_resp_len);
1019 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); 1698 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
1020 1699
1700 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
1021 /* fill in command FIS and ATAPI CDB */ 1701 /* fill in command FIS and ATAPI CDB */
1022 memcpy(buf_cmd, &task->ata_task.fis, 1702 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
1023 sizeof(struct host_to_dev_fis)); 1703 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
1024 memcpy(buf_cmd + 0x40, task->ata_task.atapi_packet, 16); 1704 memcpy(buf_cmd + STP_ATAPI_CMD,
1705 task->ata_task.atapi_packet, 16);
1706
1707 /* generate open address frame hdr (first 12 bytes) */
1708 buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */
1709 buf_oaf[1] = task->dev->linkrate & 0xf;
1710 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
1711 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1025 1712
1026 /* fill in PRD (scatter/gather) table, if any */ 1713 /* fill in PRD (scatter/gather) table, if any */
1027 sg = task->scatter; 1714 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1028 for (i = 0; i < tei->n_elem; i++) {
1029 buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); 1715 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1030 buf_prd->len = cpu_to_le32(sg_dma_len(sg)); 1716 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1031
1032 sg++;
1033 buf_prd++; 1717 buf_prd++;
1034 } 1718 }
1035 1719
@@ -1040,23 +1724,25 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
1040 struct mvs_task_exec_info *tei) 1724 struct mvs_task_exec_info *tei)
1041{ 1725{
1042 struct sas_task *task = tei->task; 1726 struct sas_task *task = tei->task;
1043 struct asd_sas_port *sas_port = task->dev->port;
1044 struct mvs_cmd_hdr *hdr = tei->hdr; 1727 struct mvs_cmd_hdr *hdr = tei->hdr;
1728 struct mvs_port *port = tei->port;
1045 struct mvs_slot_info *slot; 1729 struct mvs_slot_info *slot;
1046 struct scatterlist *sg; 1730 struct scatterlist *sg;
1047 unsigned int resp_len, req_len, i, tag = tei->tag;
1048 struct mvs_prd *buf_prd; 1731 struct mvs_prd *buf_prd;
1049 struct ssp_frame_hdr *ssp_hdr; 1732 struct ssp_frame_hdr *ssp_hdr;
1050 void *buf_tmp; 1733 void *buf_tmp;
1051 u8 *buf_cmd, *buf_oaf, fburst = 0; 1734 u8 *buf_cmd, *buf_oaf, fburst = 0;
1052 dma_addr_t buf_tmp_dma; 1735 dma_addr_t buf_tmp_dma;
1053 u32 flags; 1736 u32 flags;
1737 u32 resp_len, req_len, i, tag = tei->tag;
1738 const u32 max_resp_len = SB_RFB_MAX;
1054 1739
1055 slot = &mvi->slot_info[tag]; 1740 slot = &mvi->slot_info[tag];
1056 1741
1057 mvi->tx[tag] = cpu_to_le32(TXQ_MODE_I | tag | 1742 slot->tx = mvi->tx_prod;
1058 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | 1743 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
1059 (sas_port->phy_mask << TXQ_PHY_SHIFT)); 1744 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
1745 (port->wide_port_phymap << TXQ_PHY_SHIFT));
1060 1746
1061 flags = MCH_RETRY; 1747 flags = MCH_RETRY;
1062 if (task->ssp_task.enable_first_burst) { 1748 if (task->ssp_task.enable_first_burst) {
@@ -1064,8 +1750,8 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
1064 fburst = (1 << 7); 1750 fburst = (1 << 7);
1065 } 1751 }
1066 hdr->flags = cpu_to_le32(flags | 1752 hdr->flags = cpu_to_le32(flags |
1067 (tei->n_elem << MCH_PRD_LEN_SHIFT) | 1753 (tei->n_elem << MCH_PRD_LEN_SHIFT) |
1068 (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT)); 1754 (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
1069 1755
1070 hdr->tags = cpu_to_le32(tag); 1756 hdr->tags = cpu_to_le32(tag);
1071 hdr->data_len = cpu_to_le32(task->total_xfer_len); 1757 hdr->data_len = cpu_to_le32(task->total_xfer_len);
@@ -1073,40 +1759,46 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
1073 /* 1759 /*
1074 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs 1760 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1075 */ 1761 */
1076 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
1077 1762
1078 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***************/ 1763 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
1079 buf_cmd = 1764 buf_cmd = buf_tmp = slot->buf;
1080 buf_tmp = slot->buf;
1081 buf_tmp_dma = slot->buf_dma; 1765 buf_tmp_dma = slot->buf_dma;
1082 1766
1083 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); 1767 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1084 1768
1085 buf_tmp += MVS_SSP_CMD_SZ; 1769 buf_tmp += MVS_SSP_CMD_SZ;
1086 buf_tmp_dma += MVS_SSP_CMD_SZ; 1770 buf_tmp_dma += MVS_SSP_CMD_SZ;
1771#if _MV_DUMP
1772 slot->cmd_size = MVS_SSP_CMD_SZ;
1773#endif
1087 1774
1088 /* region 2: open address frame area (MVS_OAF_SZ bytes) **********/ 1775 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1089 buf_oaf = buf_tmp; 1776 buf_oaf = buf_tmp;
1090 hdr->open_frame = cpu_to_le64(buf_tmp_dma); 1777 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1091 1778
1092 buf_tmp += MVS_OAF_SZ; 1779 buf_tmp += MVS_OAF_SZ;
1093 buf_tmp_dma += MVS_OAF_SZ; 1780 buf_tmp_dma += MVS_OAF_SZ;
1094 1781
1095 /* region 3: PRD table ***********************************************/ 1782 /* region 3: PRD table ********************************************* */
1096 buf_prd = buf_tmp; 1783 buf_prd = buf_tmp;
1097 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); 1784 if (tei->n_elem)
1785 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1786 else
1787 hdr->prd_tbl = 0;
1098 1788
1099 i = sizeof(struct mvs_prd) * tei->n_elem; 1789 i = sizeof(struct mvs_prd) * tei->n_elem;
1100 buf_tmp += i; 1790 buf_tmp += i;
1101 buf_tmp_dma += i; 1791 buf_tmp_dma += i;
1102 1792
1103 /* region 4: status buffer (larger the PRD, smaller this buf) ********/ 1793 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1104 slot->response = buf_tmp; 1794 slot->response = buf_tmp;
1105 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 1795 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1106 1796
1107 req_len = sizeof(struct ssp_frame_hdr) + 28;
1108 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - 1797 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
1109 sizeof(struct mvs_err_info) - i; 1798 sizeof(struct mvs_err_info) - i;
1799 resp_len = min(resp_len, max_resp_len);
1800
1801 req_len = sizeof(struct ssp_frame_hdr) + 28;
1110 1802
1111 /* request, response lengths */ 1803 /* request, response lengths */
1112 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); 1804 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
@@ -1114,12 +1806,11 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
1114 /* generate open address frame hdr (first 12 bytes) */ 1806 /* generate open address frame hdr (first 12 bytes) */
1115 buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */ 1807 buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */
1116 buf_oaf[1] = task->dev->linkrate & 0xf; 1808 buf_oaf[1] = task->dev->linkrate & 0xf;
1117 buf_oaf[2] = tag >> 8; 1809 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
1118 buf_oaf[3] = tag;
1119 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); 1810 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1120 1811
1121 /* fill in SSP frame header */ 1812 /* fill in SSP frame header (Command Table.SSP frame header) */
1122 ssp_hdr = (struct ssp_frame_hdr *) buf_cmd; 1813 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
1123 ssp_hdr->frame_type = SSP_COMMAND; 1814 ssp_hdr->frame_type = SSP_COMMAND;
1124 memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr, 1815 memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
1125 HASHED_SAS_ADDR_SIZE); 1816 HASHED_SAS_ADDR_SIZE);
@@ -1130,18 +1821,14 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
1130 /* fill in command frame IU */ 1821 /* fill in command frame IU */
1131 buf_cmd += sizeof(*ssp_hdr); 1822 buf_cmd += sizeof(*ssp_hdr);
1132 memcpy(buf_cmd, &task->ssp_task.LUN, 8); 1823 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
1133 buf_cmd[9] = fburst | 1824 buf_cmd[9] = fburst | task->ssp_task.task_attr |
1134 task->ssp_task.task_attr | 1825 (task->ssp_task.task_prio << 3);
1135 (task->ssp_task.task_prio << 3);
1136 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16); 1826 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
1137 1827
1138 /* fill in PRD (scatter/gather) table, if any */ 1828 /* fill in PRD (scatter/gather) table, if any */
1139 sg = task->scatter; 1829 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1140 for (i = 0; i < tei->n_elem; i++) {
1141 buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); 1830 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1142 buf_prd->len = cpu_to_le32(sg_dma_len(sg)); 1831 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1143
1144 sg++;
1145 buf_prd++; 1832 buf_prd++;
1146 } 1833 }
1147 1834
@@ -1150,77 +1837,157 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
1150 1837
1151static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) 1838static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
1152{ 1839{
1153 struct mvs_info *mvi = task->dev->port->ha->lldd_ha; 1840 struct domain_device *dev = task->dev;
1154 unsigned int tag = 0xdeadbeef, rc, n_elem = 0; 1841 struct mvs_info *mvi = dev->port->ha->lldd_ha;
1842 struct pci_dev *pdev = mvi->pdev;
1155 void __iomem *regs = mvi->regs; 1843 void __iomem *regs = mvi->regs;
1156 unsigned long flags;
1157 struct mvs_task_exec_info tei; 1844 struct mvs_task_exec_info tei;
1845 struct sas_task *t = task;
1846 u32 tag = 0xdeadbeef, rc, n_elem = 0;
1847 unsigned long flags;
1848 u32 n = num, pass = 0;
1158 1849
1159 /* FIXME: STP/SATA support not complete yet */ 1850 spin_lock_irqsave(&mvi->lock, flags);
1160 if (task->task_proto == SAS_PROTOCOL_SATA || task->task_proto == SAS_PROTOCOL_STP)
1161 return -SAS_DEV_NO_RESPONSE;
1162 1851
1163 if (task->num_scatter) { 1852 do {
1164 n_elem = pci_map_sg(mvi->pdev, task->scatter, 1853 tei.port = &mvi->port[dev->port->id];
1165 task->num_scatter, task->data_dir);
1166 if (!n_elem)
1167 return -ENOMEM;
1168 }
1169 1854
1170 spin_lock_irqsave(&mvi->lock, flags); 1855 if (!tei.port->port_attached) {
1856 struct task_status_struct *ts = &t->task_status;
1857 ts->stat = SAS_PHY_DOWN;
1858 t->task_done(t);
1859 rc = 0;
1860 goto exec_exit;
1861 }
1862 if (!sas_protocol_ata(t->task_proto)) {
1863 if (t->num_scatter) {
1864 n_elem = pci_map_sg(mvi->pdev, t->scatter,
1865 t->num_scatter,
1866 t->data_dir);
1867 if (!n_elem) {
1868 rc = -ENOMEM;
1869 goto err_out;
1870 }
1871 }
1872 } else {
1873 n_elem = t->num_scatter;
1874 }
1171 1875
1172 rc = mvs_tag_alloc(mvi, &tag); 1876 rc = mvs_tag_alloc(mvi, &tag);
1173 if (rc) 1877 if (rc)
1174 goto err_out; 1878 goto err_out;
1175 1879
1176 mvi->slot_info[tag].task = task; 1880 mvi->slot_info[tag].task = t;
1177 mvi->slot_info[tag].n_elem = n_elem; 1881 mvi->slot_info[tag].n_elem = n_elem;
1178 tei.task = task; 1882 memset(mvi->slot_info[tag].buf, 0, MVS_SLOT_BUF_SZ);
1179 tei.hdr = &mvi->slot[tag]; 1883 tei.task = t;
1180 tei.tag = tag; 1884 tei.hdr = &mvi->slot[tag];
1181 tei.n_elem = n_elem; 1885 tei.tag = tag;
1886 tei.n_elem = n_elem;
1887
1888 switch (t->task_proto) {
1889 case SAS_PROTOCOL_SMP:
1890 rc = mvs_task_prep_smp(mvi, &tei);
1891 break;
1892 case SAS_PROTOCOL_SSP:
1893 rc = mvs_task_prep_ssp(mvi, &tei);
1894 break;
1895 case SAS_PROTOCOL_SATA:
1896 case SAS_PROTOCOL_STP:
1897 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1898 rc = mvs_task_prep_ata(mvi, &tei);
1899 break;
1900 default:
1901 dev_printk(KERN_ERR, &pdev->dev,
1902 "unknown sas_task proto: 0x%x\n",
1903 t->task_proto);
1904 rc = -EINVAL;
1905 break;
1906 }
1182 1907
1183 switch (task->task_proto) { 1908 if (rc)
1184 case SAS_PROTOCOL_SMP: 1909 goto err_out_tag;
1185 rc = mvs_task_prep_smp(mvi, &tei);
1186 break;
1187 case SAS_PROTOCOL_SSP:
1188 rc = mvs_task_prep_ssp(mvi, &tei);
1189 break;
1190 case SAS_PROTOCOL_SATA:
1191 case SAS_PROTOCOL_STP:
1192 rc = mvs_task_prep_ata(mvi, &tei);
1193 break;
1194 default:
1195 rc = -EINVAL;
1196 break;
1197 }
1198 1910
1199 if (rc) 1911 /* TODO: select normal or high priority */
1200 goto err_out_tag;
1201 1912
1202 /* TODO: select normal or high priority */ 1913 spin_lock(&t->task_state_lock);
1914 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
1915 spin_unlock(&t->task_state_lock);
1203 1916
1204 mw32(RX_PROD_IDX, mvi->tx_prod); 1917 if (n == 1) {
1918 spin_unlock_irqrestore(&mvi->lock, flags);
1919 mw32(TX_PROD_IDX, mvi->tx_prod);
1920 }
1921 mvs_hba_memory_dump(mvi, tag, t->task_proto);
1205 1922
1206 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_TX_RING_SZ - 1); 1923 ++pass;
1924 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
1207 1925
1208 spin_lock(&task->task_state_lock); 1926 if (n == 1)
1209 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 1927 break;
1210 spin_unlock(&task->task_state_lock); 1928
1929 t = list_entry(t->list.next, struct sas_task, list);
1930 } while (--n);
1211 1931
1212 spin_unlock_irqrestore(&mvi->lock, flags);
1213 return 0; 1932 return 0;
1214 1933
1215err_out_tag: 1934err_out_tag:
1216 mvs_tag_clear(mvi, tag); 1935 mvs_tag_free(mvi, tag);
1217err_out: 1936err_out:
1218 if (n_elem) 1937 dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc);
1219 pci_unmap_sg(mvi->pdev, task->scatter, n_elem, task->data_dir); 1938 if (!sas_protocol_ata(t->task_proto))
1939 if (n_elem)
1940 pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
1941 t->data_dir);
1942exec_exit:
1943 if (pass)
1944 mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
1220 spin_unlock_irqrestore(&mvi->lock, flags); 1945 spin_unlock_irqrestore(&mvi->lock, flags);
1221 return rc; 1946 return rc;
1222} 1947}
1223 1948
1949static int mvs_task_abort(struct sas_task *task)
1950{
1951 int rc = 1;
1952 unsigned long flags;
1953 struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
1954 struct pci_dev *pdev = mvi->pdev;
1955
1956 spin_lock_irqsave(&task->task_state_lock, flags);
1957 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1958 rc = TMF_RESP_FUNC_COMPLETE;
1959 goto out_done;
1960 }
1961 spin_unlock_irqrestore(&task->task_state_lock, flags);
1962
1963 /*FIXME*/
1964 rc = TMF_RESP_FUNC_COMPLETE;
1965
1966 switch (task->task_proto) {
1967 case SAS_PROTOCOL_SMP:
1968 dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! ");
1969 break;
1970 case SAS_PROTOCOL_SSP:
1971 dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! ");
1972 break;
1973 case SAS_PROTOCOL_SATA:
1974 case SAS_PROTOCOL_STP:
1975 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
1976 dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! "
1977 "Dump D2H FIS: \n");
1978 mvs_hexdump(sizeof(struct host_to_dev_fis),
1979 (void *)&task->ata_task.fis, 0);
1980 dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
1981 mvs_hexdump(16, task->ata_task.atapi_packet, 0);
1982 break;
1983 }
1984 default:
1985 break;
1986 }
1987out_done:
1988 return rc;
1989}
1990
1224static void mvs_free(struct mvs_info *mvi) 1991static void mvs_free(struct mvs_info *mvi)
1225{ 1992{
1226 int i; 1993 int i;
@@ -1238,7 +2005,7 @@ static void mvs_free(struct mvs_info *mvi)
1238 2005
1239 if (mvi->tx) 2006 if (mvi->tx)
1240 dma_free_coherent(&mvi->pdev->dev, 2007 dma_free_coherent(&mvi->pdev->dev,
1241 sizeof(*mvi->tx) * MVS_TX_RING_SZ, 2008 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
1242 mvi->tx, mvi->tx_dma); 2009 mvi->tx, mvi->tx_dma);
1243 if (mvi->rx_fis) 2010 if (mvi->rx_fis)
1244 dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ, 2011 dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ,
@@ -1249,10 +2016,12 @@ static void mvs_free(struct mvs_info *mvi)
1249 mvi->rx, mvi->rx_dma); 2016 mvi->rx, mvi->rx_dma);
1250 if (mvi->slot) 2017 if (mvi->slot)
1251 dma_free_coherent(&mvi->pdev->dev, 2018 dma_free_coherent(&mvi->pdev->dev,
1252 sizeof(*mvi->slot) * MVS_RX_RING_SZ, 2019 sizeof(*mvi->slot) * MVS_SLOTS,
1253 mvi->slot, mvi->slot_dma); 2020 mvi->slot, mvi->slot_dma);
2021#ifdef MVS_ENABLE_PERI
1254 if (mvi->peri_regs) 2022 if (mvi->peri_regs)
1255 iounmap(mvi->peri_regs); 2023 iounmap(mvi->peri_regs);
2024#endif
1256 if (mvi->regs) 2025 if (mvi->regs)
1257 iounmap(mvi->regs); 2026 iounmap(mvi->regs);
1258 if (mvi->shost) 2027 if (mvi->shost)
@@ -1267,42 +2036,39 @@ static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
1267 void *funcdata) 2036 void *funcdata)
1268{ 2037{
1269 struct mvs_info *mvi = sas_phy->ha->lldd_ha; 2038 struct mvs_info *mvi = sas_phy->ha->lldd_ha;
1270 void __iomem *reg;
1271 int rc = 0, phy_id = sas_phy->id; 2039 int rc = 0, phy_id = sas_phy->id;
1272 u32 tmp; 2040 u32 tmp;
1273 2041
1274 reg = mvi->regs + MVS_P0_SER_CTLSTAT + (phy_id * 4); 2042 tmp = mvs_read_phy_ctl(mvi, phy_id);
1275 2043
1276 switch (func) { 2044 switch (func) {
1277 case PHY_FUNC_SET_LINK_RATE: { 2045 case PHY_FUNC_SET_LINK_RATE:{
1278 struct sas_phy_linkrates *rates = funcdata; 2046 struct sas_phy_linkrates *rates = funcdata;
1279 u32 lrmin = 0, lrmax = 0; 2047 u32 lrmin = 0, lrmax = 0;
1280 2048
1281 lrmin = (rates->minimum_linkrate << 8); 2049 lrmin = (rates->minimum_linkrate << 8);
1282 lrmax = (rates->maximum_linkrate << 12); 2050 lrmax = (rates->maximum_linkrate << 12);
1283 2051
1284 tmp = readl(reg); 2052 if (lrmin) {
1285 if (lrmin) { 2053 tmp &= ~(0xf << 8);
1286 tmp &= ~(0xf << 8); 2054 tmp |= lrmin;
1287 tmp |= lrmin; 2055 }
1288 } 2056 if (lrmax) {
1289 if (lrmax) { 2057 tmp &= ~(0xf << 12);
1290 tmp &= ~(0xf << 12); 2058 tmp |= lrmax;
1291 tmp |= lrmax; 2059 }
2060 mvs_write_phy_ctl(mvi, phy_id, tmp);
2061 break;
1292 } 2062 }
1293 writel(tmp, reg);
1294 break;
1295 }
1296 2063
1297 case PHY_FUNC_HARD_RESET: 2064 case PHY_FUNC_HARD_RESET:
1298 tmp = readl(reg);
1299 if (tmp & PHY_RST_HARD) 2065 if (tmp & PHY_RST_HARD)
1300 break; 2066 break;
1301 writel(tmp | PHY_RST_HARD, reg); 2067 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD);
1302 break; 2068 break;
1303 2069
1304 case PHY_FUNC_LINK_RESET: 2070 case PHY_FUNC_LINK_RESET:
1305 writel(readl(reg) | PHY_RST, reg); 2071 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST);
1306 break; 2072 break;
1307 2073
1308 case PHY_FUNC_DISABLE: 2074 case PHY_FUNC_DISABLE:
@@ -1335,11 +2101,11 @@ static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
1335 sas_phy->lldd_phy = phy; 2101 sas_phy->lldd_phy = phy;
1336} 2102}
1337 2103
1338static struct mvs_info * __devinit mvs_alloc(struct pci_dev *pdev, 2104static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
1339 const struct pci_device_id *ent) 2105 const struct pci_device_id *ent)
1340{ 2106{
1341 struct mvs_info *mvi; 2107 struct mvs_info *mvi;
1342 unsigned long res_start, res_len; 2108 unsigned long res_start, res_len, res_flag;
1343 struct asd_sas_phy **arr_phy; 2109 struct asd_sas_phy **arr_phy;
1344 struct asd_sas_port **arr_port; 2110 struct asd_sas_port **arr_port;
1345 const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data]; 2111 const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
@@ -1381,9 +2147,10 @@ static struct mvs_info * __devinit mvs_alloc(struct pci_dev *pdev,
1381 2147
1382 SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas; 2148 SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
1383 mvi->shost->transportt = mvs_stt; 2149 mvi->shost->transportt = mvs_stt;
1384 mvi->shost->max_id = ~0; 2150 mvi->shost->max_id = 21;
1385 mvi->shost->max_lun = ~0; 2151 mvi->shost->max_lun = ~0;
1386 mvi->shost->max_cmd_len = ~0; 2152 mvi->shost->max_channel = 0;
2153 mvi->shost->max_cmd_len = 16;
1387 2154
1388 mvi->sas.sas_ha_name = DRV_NAME; 2155 mvi->sas.sas_ha_name = DRV_NAME;
1389 mvi->sas.dev = &pdev->dev; 2156 mvi->sas.dev = &pdev->dev;
@@ -1392,32 +2159,40 @@ static struct mvs_info * __devinit mvs_alloc(struct pci_dev *pdev,
1392 mvi->sas.sas_phy = arr_phy; 2159 mvi->sas.sas_phy = arr_phy;
1393 mvi->sas.sas_port = arr_port; 2160 mvi->sas.sas_port = arr_port;
1394 mvi->sas.num_phys = chip->n_phy; 2161 mvi->sas.num_phys = chip->n_phy;
1395 mvi->sas.lldd_max_execute_num = MVS_TX_RING_SZ - 1;/* FIXME: correct? */ 2162 mvi->sas.lldd_max_execute_num = MVS_CHIP_SLOT_SZ - 1;
1396 mvi->sas.lldd_queue_size = MVS_TX_RING_SZ - 1; /* FIXME: correct? */ 2163 mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
2164 mvi->can_queue = (MVS_CHIP_SLOT_SZ >> 1) - 1;
1397 mvi->sas.lldd_ha = mvi; 2165 mvi->sas.lldd_ha = mvi;
1398 mvi->sas.core.shost = mvi->shost; 2166 mvi->sas.core.shost = mvi->shost;
1399 2167
1400 mvs_tag_set(mvi, MVS_TX_RING_SZ - 1); 2168 mvs_tag_init(mvi);
1401 2169
1402 /* 2170 /*
1403 * ioremap main and peripheral registers 2171 * ioremap main and peripheral registers
1404 */ 2172 */
1405 2173
2174#ifdef MVS_ENABLE_PERI
1406 res_start = pci_resource_start(pdev, 2); 2175 res_start = pci_resource_start(pdev, 2);
1407 res_len = pci_resource_len(pdev, 2); 2176 res_len = pci_resource_len(pdev, 2);
1408 if (!res_start || !res_len) 2177 if (!res_start || !res_len)
1409 goto err_out; 2178 goto err_out;
1410 2179
1411 mvi->peri_regs = ioremap_nocache(res_start, res_len); 2180 mvi->peri_regs = ioremap_nocache(res_start, res_len);
1412 if (!mvi->regs) 2181 if (!mvi->peri_regs)
1413 goto err_out; 2182 goto err_out;
2183#endif
1414 2184
1415 res_start = pci_resource_start(pdev, 4); 2185 res_start = pci_resource_start(pdev, 4);
1416 res_len = pci_resource_len(pdev, 4); 2186 res_len = pci_resource_len(pdev, 4);
1417 if (!res_start || !res_len) 2187 if (!res_start || !res_len)
1418 goto err_out; 2188 goto err_out;
1419 2189
1420 mvi->regs = ioremap_nocache(res_start, res_len); 2190 res_flag = pci_resource_flags(pdev, 4);
2191 if (res_flag & IORESOURCE_CACHEABLE)
2192 mvi->regs = ioremap(res_start, res_len);
2193 else
2194 mvi->regs = ioremap_nocache(res_start, res_len);
2195
1421 if (!mvi->regs) 2196 if (!mvi->regs)
1422 goto err_out; 2197 goto err_out;
1423 2198
@@ -1426,14 +2201,14 @@ static struct mvs_info * __devinit mvs_alloc(struct pci_dev *pdev,
1426 */ 2201 */
1427 2202
1428 mvi->tx = dma_alloc_coherent(&pdev->dev, 2203 mvi->tx = dma_alloc_coherent(&pdev->dev,
1429 sizeof(*mvi->tx) * MVS_TX_RING_SZ, 2204 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
1430 &mvi->tx_dma, GFP_KERNEL); 2205 &mvi->tx_dma, GFP_KERNEL);
1431 if (!mvi->tx) 2206 if (!mvi->tx)
1432 goto err_out; 2207 goto err_out;
1433 memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_TX_RING_SZ); 2208 memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
1434 2209
1435 mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ, 2210 mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
1436 &mvi->rx_fis_dma, GFP_KERNEL); 2211 &mvi->rx_fis_dma, GFP_KERNEL);
1437 if (!mvi->rx_fis) 2212 if (!mvi->rx_fis)
1438 goto err_out; 2213 goto err_out;
1439 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); 2214 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
@@ -1459,7 +2234,7 @@ static struct mvs_info * __devinit mvs_alloc(struct pci_dev *pdev,
1459 struct mvs_slot_info *slot = &mvi->slot_info[i]; 2234 struct mvs_slot_info *slot = &mvi->slot_info[i];
1460 2235
1461 slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ, 2236 slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ,
1462 &slot->buf_dma, GFP_KERNEL); 2237 &slot->buf_dma, GFP_KERNEL);
1463 if (!slot->buf) 2238 if (!slot->buf)
1464 goto err_out; 2239 goto err_out;
1465 memset(slot->buf, 0, MVS_SLOT_BUF_SZ); 2240 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
@@ -1468,7 +2243,6 @@ static struct mvs_info * __devinit mvs_alloc(struct pci_dev *pdev,
1468 /* finally, read NVRAM to get our SAS address */ 2243 /* finally, read NVRAM to get our SAS address */
1469 if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8)) 2244 if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8))
1470 goto err_out; 2245 goto err_out;
1471
1472 return mvi; 2246 return mvi;
1473 2247
1474err_out: 2248err_out:
@@ -1488,26 +2262,89 @@ static void mvs_cw32(void __iomem *regs, u32 addr, u32 val)
1488 mw32(CMD_DATA, val); 2262 mw32(CMD_DATA, val);
1489} 2263}
1490 2264
1491#if 0 2265static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
1492static u32 mvs_phy_read(struct mvs_info *mvi, unsigned int phy_id, u32 addr)
1493{ 2266{
1494 void __iomem *regs = mvi->regs; 2267 void __iomem *regs = mvi->regs;
1495 void __iomem *phy_regs = regs + MVS_P0_CFG_ADDR + (phy_id * 8); 2268 return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4):
1496 2269 mr32(P4_SER_CTLSTAT + (port - 4) * 4);
1497 writel(addr, phy_regs);
1498 return readl(phy_regs + 4);
1499} 2270}
1500#endif
1501 2271
1502static void mvs_phy_write(struct mvs_info *mvi, unsigned int phy_id, 2272static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
1503 u32 addr, u32 val)
1504{ 2273{
1505 void __iomem *regs = mvi->regs; 2274 void __iomem *regs = mvi->regs;
1506 void __iomem *phy_regs = regs + MVS_P0_CFG_ADDR + (phy_id * 8); 2275 if (port < 4)
2276 mw32(P0_SER_CTLSTAT + port * 4, val);
2277 else
2278 mw32(P4_SER_CTLSTAT + (port - 4) * 4, val);
2279}
2280
2281static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port)
2282{
2283 void __iomem *regs = mvi->regs + off;
2284 void __iomem *regs2 = mvi->regs + off2;
2285 return (port < 4)?readl(regs + port * 8):
2286 readl(regs2 + (port - 4) * 8);
2287}
2288
2289static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
2290 u32 port, u32 val)
2291{
2292 void __iomem *regs = mvi->regs + off;
2293 void __iomem *regs2 = mvi->regs + off2;
2294 if (port < 4)
2295 writel(val, regs + port * 8);
2296 else
2297 writel(val, regs2 + (port - 4) * 8);
2298}
2299
2300static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
2301{
2302 return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port);
2303}
2304
2305static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val)
2306{
2307 mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val);
2308}
2309
2310static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr)
2311{
2312 mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr);
2313}
2314
2315static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
2316{
2317 return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port);
2318}
2319
2320static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val)
2321{
2322 mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val);
2323}
2324
2325static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr)
2326{
2327 mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr);
2328}
2329
2330static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
2331{
2332 return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port);
2333}
2334
2335static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val)
2336{
2337 mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val);
2338}
2339
2340static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
2341{
2342 return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port);
2343}
1507 2344
1508 writel(addr, phy_regs); 2345static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val)
1509 writel(val, phy_regs + 4); 2346{
1510 readl(phy_regs); /* flush */ 2347 mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val);
1511} 2348}
1512 2349
1513static void __devinit mvs_phy_hacks(struct mvs_info *mvi) 2350static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
@@ -1547,6 +2384,260 @@ static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
1547 tmp &= 0x1fffffff; 2384 tmp &= 0x1fffffff;
1548 tmp |= (2U << 29); /* 8 ms retry */ 2385 tmp |= (2U << 29); /* 8 ms retry */
1549 mvs_cw32(regs, CMD_PHY_TIMER, tmp); 2386 mvs_cw32(regs, CMD_PHY_TIMER, tmp);
2387
2388 /* TEST - for phy decoding error, adjust voltage levels */
2389 mw32(P0_VSR_ADDR + 0, 0x8);
2390 mw32(P0_VSR_DATA + 0, 0x2F0);
2391
2392 mw32(P0_VSR_ADDR + 8, 0x8);
2393 mw32(P0_VSR_DATA + 8, 0x2F0);
2394
2395 mw32(P0_VSR_ADDR + 16, 0x8);
2396 mw32(P0_VSR_DATA + 16, 0x2F0);
2397
2398 mw32(P0_VSR_ADDR + 24, 0x8);
2399 mw32(P0_VSR_DATA + 24, 0x2F0);
2400
2401}
2402
2403static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId)
2404{
2405 void __iomem *regs = mvi->regs;
2406 u32 tmp;
2407
2408 tmp = mr32(PCS);
2409 if (mvi->chip->n_phy <= 4)
2410 tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT);
2411 else
2412 tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2);
2413 mw32(PCS, tmp);
2414}
2415
2416static void mvs_detect_porttype(struct mvs_info *mvi, int i)
2417{
2418 void __iomem *regs = mvi->regs;
2419 u32 reg;
2420 struct mvs_phy *phy = &mvi->phy[i];
2421
2422 /* TODO check & save device type */
2423 reg = mr32(GBL_PORT_TYPE);
2424
2425 if (reg & MODE_SAS_SATA & (1 << i))
2426 phy->phy_type |= PORT_TYPE_SAS;
2427 else
2428 phy->phy_type |= PORT_TYPE_SATA;
2429}
2430
2431static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
2432{
2433 u32 *s = (u32 *) buf;
2434
2435 if (!s)
2436 return NULL;
2437
2438 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
2439 s[3] = mvs_read_port_cfg_data(mvi, i);
2440
2441 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
2442 s[2] = mvs_read_port_cfg_data(mvi, i);
2443
2444 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
2445 s[1] = mvs_read_port_cfg_data(mvi, i);
2446
2447 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
2448 s[0] = mvs_read_port_cfg_data(mvi, i);
2449
2450 return (void *)s;
2451}
2452
2453static u32 mvs_is_sig_fis_received(u32 irq_status)
2454{
2455 return irq_status & PHYEV_SIG_FIS;
2456}
2457
2458static void mvs_update_wideport(struct mvs_info *mvi, int i)
2459{
2460 struct mvs_phy *phy = &mvi->phy[i];
2461 struct mvs_port *port = phy->port;
2462 int j, no;
2463
2464 for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy)
2465 if (no & 1) {
2466 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
2467 mvs_write_port_cfg_data(mvi, no,
2468 port->wide_port_phymap);
2469 } else {
2470 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
2471 mvs_write_port_cfg_data(mvi, no, 0);
2472 }
2473}
2474
2475static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
2476{
2477 u32 tmp;
2478 struct mvs_phy *phy = &mvi->phy[i];
2479 struct mvs_port *port;
2480
2481 tmp = mvs_read_phy_ctl(mvi, i);
2482
2483 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
2484 if (!phy->port)
2485 phy->phy_attached = 1;
2486 return tmp;
2487 }
2488
2489 port = phy->port;
2490 if (port) {
2491 if (phy->phy_type & PORT_TYPE_SAS) {
2492 port->wide_port_phymap &= ~(1U << i);
2493 if (!port->wide_port_phymap)
2494 port->port_attached = 0;
2495 mvs_update_wideport(mvi, i);
2496 } else if (phy->phy_type & PORT_TYPE_SATA)
2497 port->port_attached = 0;
2498 mvs_free_reg_set(mvi, phy->port);
2499 phy->port = NULL;
2500 phy->phy_attached = 0;
2501 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
2502 }
2503 return 0;
2504}
2505
2506static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
2507 int get_st)
2508{
2509 struct mvs_phy *phy = &mvi->phy[i];
2510 struct pci_dev *pdev = mvi->pdev;
2511 u32 tmp, j;
2512 u64 tmp64;
2513
2514 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
2515 phy->dev_info = mvs_read_port_cfg_data(mvi, i);
2516
2517 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
2518 phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
2519
2520 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
2521 phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
2522
2523 if (get_st) {
2524 phy->irq_status = mvs_read_port_irq_stat(mvi, i);
2525 phy->phy_status = mvs_is_phy_ready(mvi, i);
2526 }
2527
2528 if (phy->phy_status) {
2529 u32 phy_st;
2530 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
2531
2532 mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
2533 phy_st = mvs_read_port_cfg_data(mvi, i);
2534
2535 sas_phy->linkrate =
2536 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
2537 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
2538
2539 /* Updated attached_sas_addr */
2540 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
2541 phy->att_dev_sas_addr =
2542 (u64) mvs_read_port_cfg_data(mvi, i) << 32;
2543
2544 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
2545 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
2546
2547 dev_printk(KERN_DEBUG, &pdev->dev,
2548 "phy[%d] Get Attached Address 0x%llX ,"
2549 " SAS Address 0x%llX\n",
2550 i, phy->att_dev_sas_addr, phy->dev_sas_addr);
2551 dev_printk(KERN_DEBUG, &pdev->dev,
2552 "Rate = %x , type = %d\n",
2553 sas_phy->linkrate, phy->phy_type);
2554
2555#if 1
2556 /*
2557 * If the device is capable of supporting a wide port
2558 * on its phys, it may configure the phys as a wide port.
2559 */
2560 if (phy->phy_type & PORT_TYPE_SAS)
2561 for (j = 0; j < mvi->chip->n_phy && j != i; ++j) {
2562 if ((mvi->phy[j].phy_attached) &&
2563 (mvi->phy[j].phy_type & PORT_TYPE_SAS))
2564 if (phy->att_dev_sas_addr ==
2565 mvi->phy[j].att_dev_sas_addr - 1) {
2566 phy->att_dev_sas_addr =
2567 mvi->phy[j].att_dev_sas_addr;
2568 break;
2569 }
2570 }
2571
2572#endif
2573
2574 tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
2575 memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
2576
2577 if (phy->phy_type & PORT_TYPE_SAS) {
2578 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
2579 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
2580 phy->identify.device_type =
2581 phy->att_dev_info & PORT_DEV_TYPE_MASK;
2582
2583 if (phy->identify.device_type == SAS_END_DEV)
2584 phy->identify.target_port_protocols =
2585 SAS_PROTOCOL_SSP;
2586 else if (phy->identify.device_type != NO_DEVICE)
2587 phy->identify.target_port_protocols =
2588 SAS_PROTOCOL_SMP;
2589 if (phy_st & PHY_OOB_DTCTD)
2590 sas_phy->oob_mode = SAS_OOB_MODE;
2591 phy->frame_rcvd_size =
2592 sizeof(struct sas_identify_frame);
2593 } else if (phy->phy_type & PORT_TYPE_SATA) {
2594 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
2595 if (mvs_is_sig_fis_received(phy->irq_status)) {
2596 if (phy_st & PHY_OOB_DTCTD)
2597 sas_phy->oob_mode = SATA_OOB_MODE;
2598 phy->frame_rcvd_size =
2599 sizeof(struct dev_to_host_fis);
2600 mvs_get_d2h_reg(mvi, i,
2601 (void *)sas_phy->frame_rcvd);
2602 } else {
2603 dev_printk(KERN_DEBUG, &pdev->dev,
2604 "No sig fis\n");
2605 }
2606 }
2607 /* workaround for HW phy decoding error on 1.5g disk drive */
2608 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
2609 tmp = mvs_read_port_vsr_data(mvi, i);
2610 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
2611 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
2612 SAS_LINK_RATE_1_5_GBPS)
2613 tmp &= ~PHY_MODE6_DTL_SPEED;
2614 else
2615 tmp |= PHY_MODE6_DTL_SPEED;
2616 mvs_write_port_vsr_data(mvi, i, tmp);
2617
2618 }
2619 if (get_st)
2620 mvs_write_port_irq_stat(mvi, i, phy->irq_status);
2621}
2622
2623static void mvs_port_formed(struct asd_sas_phy *sas_phy)
2624{
2625 struct sas_ha_struct *sas_ha = sas_phy->ha;
2626 struct mvs_info *mvi = sas_ha->lldd_ha;
2627 struct asd_sas_port *sas_port = sas_phy->port;
2628 struct mvs_phy *phy = sas_phy->lldd_phy;
2629 struct mvs_port *port = &mvi->port[sas_port->id];
2630 unsigned long flags;
2631
2632 spin_lock_irqsave(&mvi->lock, flags);
2633 port->port_attached = 1;
2634 phy->port = port;
2635 port->taskfileset = MVS_ID_NOT_MAPPED;
2636 if (phy->phy_type & PORT_TYPE_SAS) {
2637 port->wide_port_phymap = sas_port->phy_mask;
2638 mvs_update_wideport(mvi, sas_phy->id);
2639 }
2640 spin_unlock_irqrestore(&mvi->lock, flags);
1550} 2641}
1551 2642
1552static int __devinit mvs_hw_init(struct mvs_info *mvi) 2643static int __devinit mvs_hw_init(struct mvs_info *mvi)
@@ -1559,6 +2650,7 @@ static int __devinit mvs_hw_init(struct mvs_info *mvi)
1559 mw32(GBL_CTL, 0); 2650 mw32(GBL_CTL, 0);
1560 tmp = mr32(GBL_CTL); 2651 tmp = mr32(GBL_CTL);
1561 2652
2653 /* Reset Controller */
1562 if (!(tmp & HBA_RST)) { 2654 if (!(tmp & HBA_RST)) {
1563 if (mvi->flags & MVF_PHY_PWR_FIX) { 2655 if (mvi->flags & MVF_PHY_PWR_FIX) {
1564 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); 2656 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
@@ -1576,7 +2668,6 @@ static int __devinit mvs_hw_init(struct mvs_info *mvi)
1576 mw32_f(GBL_CTL, HBA_RST); 2668 mw32_f(GBL_CTL, HBA_RST);
1577 } 2669 }
1578 2670
1579
1580 /* wait for reset to finish; timeout is just a guess */ 2671 /* wait for reset to finish; timeout is just a guess */
1581 i = 1000; 2672 i = 1000;
1582 while (i-- > 0) { 2673 while (i-- > 0) {
@@ -1590,6 +2681,7 @@ static int __devinit mvs_hw_init(struct mvs_info *mvi)
1590 return -EBUSY; 2681 return -EBUSY;
1591 } 2682 }
1592 2683
2684 /* Init Chip */
1593 /* make sure RST is set; HBA_RST /should/ have done that for us */ 2685 /* make sure RST is set; HBA_RST /should/ have done that for us */
1594 cctl = mr32(CTL); 2686 cctl = mr32(CTL);
1595 if (cctl & CCTL_RST) 2687 if (cctl & CCTL_RST)
@@ -1597,6 +2689,12 @@ static int __devinit mvs_hw_init(struct mvs_info *mvi)
1597 else 2689 else
1598 mw32_f(CTL, cctl | CCTL_RST); 2690 mw32_f(CTL, cctl | CCTL_RST);
1599 2691
2692 /* write to device control _AND_ device status register? - A.C. */
2693 pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
2694 tmp &= ~PRD_REQ_MASK;
2695 tmp |= PRD_REQ_SIZE;
2696 pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
2697
1600 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); 2698 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
1601 tmp |= PCTL_PWR_ON; 2699 tmp |= PCTL_PWR_ON;
1602 tmp &= ~PCTL_OFF; 2700 tmp &= ~PCTL_OFF;
@@ -1609,6 +2707,9 @@ static int __devinit mvs_hw_init(struct mvs_info *mvi)
1609 2707
1610 mw32_f(CTL, cctl); 2708 mw32_f(CTL, cctl);
1611 2709
2710 /* reset control */
2711 mw32(PCS, 0); /*MVS_PCS */
2712
1612 mvs_phy_hacks(mvi); 2713 mvs_phy_hacks(mvi);
1613 2714
1614 mw32(CMD_LIST_LO, mvi->slot_dma); 2715 mw32(CMD_LIST_LO, mvi->slot_dma);
@@ -1617,7 +2718,7 @@ static int __devinit mvs_hw_init(struct mvs_info *mvi)
1617 mw32(RX_FIS_LO, mvi->rx_fis_dma); 2718 mw32(RX_FIS_LO, mvi->rx_fis_dma);
1618 mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); 2719 mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
1619 2720
1620 mw32(TX_CFG, MVS_TX_RING_SZ); 2721 mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
1621 mw32(TX_LO, mvi->tx_dma); 2722 mw32(TX_LO, mvi->tx_dma);
1622 mw32(TX_HI, (mvi->tx_dma >> 16) >> 16); 2723 mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
1623 2724
@@ -1625,44 +2726,88 @@ static int __devinit mvs_hw_init(struct mvs_info *mvi)
1625 mw32(RX_LO, mvi->rx_dma); 2726 mw32(RX_LO, mvi->rx_dma);
1626 mw32(RX_HI, (mvi->rx_dma >> 16) >> 16); 2727 mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
1627 2728
2729 /* enable auto port detection */
2730 mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
2731 msleep(100);
1628 /* init and reset phys */ 2732 /* init and reset phys */
1629 for (i = 0; i < mvi->chip->n_phy; i++) { 2733 for (i = 0; i < mvi->chip->n_phy; i++) {
1630 /* FIXME: is this the correct dword order? */ 2734 /* FIXME: is this the correct dword order? */
1631 u32 lo = *((u32 *) &mvi->sas_addr[0]); 2735 u32 lo = *((u32 *)&mvi->sas_addr[0]);
1632 u32 hi = *((u32 *) &mvi->sas_addr[4]); 2736 u32 hi = *((u32 *)&mvi->sas_addr[4]);
2737
2738 mvs_detect_porttype(mvi, i);
1633 2739
1634 /* set phy local SAS address */ 2740 /* set phy local SAS address */
1635 mvs_phy_write(mvi, i, PHYR_ADDR_LO, lo); 2741 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
1636 mvs_phy_write(mvi, i, PHYR_ADDR_HI, hi); 2742 mvs_write_port_cfg_data(mvi, i, lo);
2743 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
2744 mvs_write_port_cfg_data(mvi, i, hi);
1637 2745
1638 /* reset phy */ 2746 /* reset phy */
1639 tmp = readl(regs + MVS_P0_SER_CTLSTAT + (i * 4)); 2747 tmp = mvs_read_phy_ctl(mvi, i);
1640 tmp |= PHY_RST; 2748 tmp |= PHY_RST;
1641 writel(tmp, regs + MVS_P0_SER_CTLSTAT + (i * 4)); 2749 mvs_write_phy_ctl(mvi, i, tmp);
1642 } 2750 }
1643 2751
1644 msleep(100); 2752 msleep(100);
1645 2753
1646 for (i = 0; i < mvi->chip->n_phy; i++) { 2754 for (i = 0; i < mvi->chip->n_phy; i++) {
2755 /* clear phy int status */
2756 tmp = mvs_read_port_irq_stat(mvi, i);
2757 tmp &= ~PHYEV_SIG_FIS;
2758 mvs_write_port_irq_stat(mvi, i, tmp);
2759
1647 /* set phy int mask */ 2760 /* set phy int mask */
1648 writel(PHYEV_BROAD_CH | PHYEV_RDY_CH, 2761 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
1649 regs + MVS_P0_INT_MASK + (i * 8)); 2762 PHYEV_ID_DONE | PHYEV_DEC_ERR;
2763 mvs_write_port_irq_mask(mvi, i, tmp);
1650 2764
1651 /* clear phy int status */ 2765 msleep(100);
1652 tmp = readl(regs + MVS_P0_INT_STAT + (i * 8)); 2766 mvs_update_phyinfo(mvi, i, 1);
1653 writel(tmp, regs + MVS_P0_INT_STAT + (i * 8)); 2767 mvs_enable_xmt(mvi, i);
1654 } 2768 }
1655 2769
1656 /* FIXME: update wide port bitmaps */ 2770 /* FIXME: update wide port bitmaps */
1657 2771
2772 /* little endian for open address and command table, etc. */
2773 /* A.C.
2774 * it seems that ( from the spec ) turning on big-endian won't
2775 * do us any good on big-endian machines, need further confirmation
2776 */
2777 cctl = mr32(CTL);
2778 cctl |= CCTL_ENDIAN_CMD;
2779 cctl |= CCTL_ENDIAN_DATA;
2780 cctl &= ~CCTL_ENDIAN_OPEN;
2781 cctl |= CCTL_ENDIAN_RSP;
2782 mw32_f(CTL, cctl);
2783
2784 /* reset CMD queue */
2785 tmp = mr32(PCS);
2786 tmp |= PCS_CMD_RST;
2787 mw32(PCS, tmp);
2788 /* interrupt coalescing may cause missing HW interrput in some case,
2789 * and the max count is 0x1ff, while our max slot is 0x200,
2790 * it will make count 0.
2791 */
2792 tmp = 0;
2793 mw32(INT_COAL, tmp);
2794
2795 tmp = 0x100;
2796 mw32(INT_COAL_TMOUT, tmp);
2797
1658 /* ladies and gentlemen, start your engines */ 2798 /* ladies and gentlemen, start your engines */
1659 mw32(TX_CFG, MVS_TX_RING_SZ | TX_EN); 2799 mw32(TX_CFG, 0);
2800 mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
1660 mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN); 2801 mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
1661 mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN | 2802 /* enable CMD/CMPL_Q/RESP mode */
1662 ((mvi->flags & MVF_MSI) ? PCS_SELF_CLEAR : 0)); 2803 mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
1663 2804
1664 /* re-enable interrupts globally */ 2805 /* re-enable interrupts globally */
1665 mw32(GBL_CTL, INT_EN); 2806 mvs_hba_interrupt_enable(mvi);
2807
2808 /* enable completion queue interrupt */
2809 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM);
2810 mw32(INT_MASK, tmp);
1666 2811
1667 return 0; 2812 return 0;
1668} 2813}
@@ -1680,7 +2825,7 @@ static void __devinit mvs_print_info(struct mvs_info *mvi)
1680} 2825}
1681 2826
1682static int __devinit mvs_pci_init(struct pci_dev *pdev, 2827static int __devinit mvs_pci_init(struct pci_dev *pdev,
1683 const struct pci_device_id *ent) 2828 const struct pci_device_id *ent)
1684{ 2829{
1685 int rc; 2830 int rc;
1686 struct mvs_info *mvi; 2831 struct mvs_info *mvi;
@@ -1710,10 +2855,16 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
1710 if (rc) 2855 if (rc)
1711 goto err_out_mvi; 2856 goto err_out_mvi;
1712 2857
2858#ifndef MVS_DISABLE_MSI
1713 if (!pci_enable_msi(pdev)) { 2859 if (!pci_enable_msi(pdev)) {
2860 u32 tmp;
2861 void __iomem *regs = mvi->regs;
1714 mvi->flags |= MVF_MSI; 2862 mvi->flags |= MVF_MSI;
1715 irq_handler = mvs_msi_interrupt; 2863 irq_handler = mvs_msi_interrupt;
2864 tmp = mr32(PCS);
2865 mw32(PCS, tmp | PCS_SELF_CLEAR);
1716 } 2866 }
2867#endif
1717 2868
1718 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi); 2869 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi);
1719 if (rc) 2870 if (rc)
@@ -1732,6 +2883,7 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
1732 mvs_print_info(mvi); 2883 mvs_print_info(mvi);
1733 2884
1734 scsi_scan_host(mvi->shost); 2885 scsi_scan_host(mvi->shost);
2886
1735 return 0; 2887 return 0;
1736 2888
1737err_out_shost: 2889err_out_shost:
@@ -1756,21 +2908,26 @@ static void __devexit mvs_pci_remove(struct pci_dev *pdev)
1756 2908
1757 pci_set_drvdata(pdev, NULL); 2909 pci_set_drvdata(pdev, NULL);
1758 2910
1759 sas_unregister_ha(&mvi->sas); 2911 if (mvi) {
1760 sas_remove_host(mvi->shost); 2912 sas_unregister_ha(&mvi->sas);
1761 scsi_remove_host(mvi->shost); 2913 mvs_hba_interrupt_disable(mvi);
1762 2914 sas_remove_host(mvi->shost);
1763 free_irq(pdev->irq, mvi); 2915 scsi_remove_host(mvi->shost);
1764 if (mvi->flags & MVF_MSI) 2916
1765 pci_disable_msi(pdev); 2917 free_irq(pdev->irq, mvi);
1766 mvs_free(mvi); 2918 if (mvi->flags & MVF_MSI)
1767 pci_release_regions(pdev); 2919 pci_disable_msi(pdev);
2920 mvs_free(mvi);
2921 pci_release_regions(pdev);
2922 }
1768 pci_disable_device(pdev); 2923 pci_disable_device(pdev);
1769} 2924}
1770 2925
1771static struct sas_domain_function_template mvs_transport_ops = { 2926static struct sas_domain_function_template mvs_transport_ops = {
1772 .lldd_execute_task = mvs_task_exec, 2927 .lldd_execute_task = mvs_task_exec,
1773 .lldd_control_phy = mvs_phy_control, 2928 .lldd_control_phy = mvs_phy_control,
2929 .lldd_abort_task = mvs_task_abort,
2930 .lldd_port_formed = mvs_port_formed
1774}; 2931};
1775 2932
1776static struct pci_device_id __devinitdata mvs_pci_table[] = { 2933static struct pci_device_id __devinitdata mvs_pci_table[] = {
@@ -1822,4 +2979,3 @@ MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
1822MODULE_VERSION(DRV_VERSION); 2979MODULE_VERSION(DRV_VERSION);
1823MODULE_LICENSE("GPL"); 2980MODULE_LICENSE("GPL");
1824MODULE_DEVICE_TABLE(pci, mvs_pci_table); 2981MODULE_DEVICE_TABLE(pci, mvs_pci_table);
1825