aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/mvsas.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2007-10-25 20:58:22 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2008-02-23 08:29:31 -0500
commitb5762948263dd5e9725a380e7a9626f99e40ae9d (patch)
tree24eefba14b6394f53e258f13f964aa2b9e992913 /drivers/scsi/mvsas.c
parent63e4563b9cf77875286312758f61a20f912afbbb (diff)
[SCSI] mvsas: Add Marvell 6440 SAS/SATA driver
Signed-off-by: Jeff Garzik <jgarzik@redhat.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/mvsas.c')
-rw-r--r--drivers/scsi/mvsas.c1825
1 files changed, 1825 insertions, 0 deletions
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c
new file mode 100644
index 000000000000..03638b9bb283
--- /dev/null
+++ b/drivers/scsi/mvsas.c
@@ -0,0 +1,1825 @@
1/*
2 mvsas.c - Marvell 88SE6440 SAS/SATA support
3
4 Copyright 2007 Red Hat, Inc.
5
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2,
9 or (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty
13 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14 See the GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public
17 License along with this program; see the file COPYING. If not,
18 write to the Free Software Foundation, 675 Mass Ave, Cambridge,
19 MA 02139, USA.
20
21 ---------------------------------------------------------------
22
23 Random notes:
24 * hardware supports controlling the endian-ness of data
25 structures. this permits elimination of all the le32_to_cpu()
26 and cpu_to_le32() conversions.
27
28 */
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/pci.h>
33#include <linux/interrupt.h>
34#include <linux/spinlock.h>
35#include <linux/delay.h>
36#include <linux/dma-mapping.h>
37#include <scsi/libsas.h>
38#include <asm/io.h>
39
40#define DRV_NAME "mvsas"
41#define DRV_VERSION "0.1"
42
43#define mr32(reg) readl(regs + MVS_##reg)
44#define mw32(reg,val) writel((val), regs + MVS_##reg)
45#define mw32_f(reg,val) do { \
46 writel((val), regs + MVS_##reg); \
47 readl(regs + MVS_##reg); \
48 } while (0)
49
50/* driver compile-time configuration */
51enum driver_configuration {
52 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
53 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
54 /* software requires power-of-2
55 ring size */
56
57 MVS_SLOTS = 512, /* command slots */
58 MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
59 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
60 MVS_ATA_CMD_SZ = 128, /* SATA command table buffer size */
61 MVS_OAF_SZ = 64, /* Open address frame buffer size */
62
63 MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */
64};
65
66/* unchangeable hardware details */
67enum hardware_details {
68 MVS_MAX_PHYS = 8, /* max. possible phys */
69 MVS_MAX_PORTS = 8, /* max. possible ports */
70 MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100),
71};
72
73/* peripheral registers (BAR2) */
74enum peripheral_registers {
75 SPI_CTL = 0x10, /* EEPROM control */
76 SPI_CMD = 0x14, /* EEPROM command */
77 SPI_DATA = 0x18, /* EEPROM data */
78};
79
80enum peripheral_register_bits {
81 TWSI_RDY = (1U << 7), /* EEPROM interface ready */
82 TWSI_RD = (1U << 4), /* EEPROM read access */
83
84 SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
85};
86
87/* enhanced mode registers (BAR4) */
88enum hw_registers {
89 MVS_GBL_CTL = 0x04, /* global control */
90 MVS_GBL_INT_STAT = 0x08, /* global irq status */
91 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
92 MVS_GBL_PORT_TYPE = 0x00, /* port type */
93
94 MVS_CTL = 0x100, /* SAS/SATA port configuration */
95 MVS_PCS = 0x104, /* SAS/SATA port control/status */
96 MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
97 MVS_CMD_LIST_HI = 0x10C,
98 MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
99 MVS_RX_FIS_HI = 0x114,
100
101 MVS_TX_CFG = 0x120, /* TX configuration */
102 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
103 MVS_TX_HI = 0x128,
104
105 MVS_RX_PROD_IDX = 0x12C, /* RX producer pointer */
106 MVS_RX_CONS_IDX = 0x130, /* RX consumer pointer (RO) */
107 MVS_RX_CFG = 0x134, /* RX configuration */
108 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
109 MVS_RX_HI = 0x13C,
110
111 MVS_INT_COAL = 0x148, /* Int coalescing config */
112 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
113 MVS_INT_STAT = 0x150, /* Central int status */
114 MVS_INT_MASK = 0x154, /* Central int enable */
115 MVS_INT_STAT_SRS = 0x158, /* SATA register set status */
116
117 /* ports 1-3 follow after this */
118 MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
119 MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
120
121 /* ports 1-3 follow after this */
122 MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
123
124 MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
125 MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
126
127 /* ports 1-3 follow after this */
128 MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
129 MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
130};
131
132enum hw_register_bits {
133 /* MVS_GBL_CTL */
134 INT_EN = (1U << 1), /* Global int enable */
135 HBA_RST = (1U << 0), /* HBA reset */
136
137 /* MVS_GBL_INT_STAT */
138 INT_XOR = (1U << 4), /* XOR engine event */
139 INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
140
141 /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
142 SATA_TARGET = (1U << 16), /* port0 SATA target enable */
143 AUTO_DET = (1U << 8), /* port0 SAS/SATA autodetect */
144 SAS_MODE = (1U << 0), /* port0 SAS(1), SATA(0) mode */
145 /* SAS_MODE value may be
146 * dictated (in hw) by values
147 * of SATA_TARGET & AUTO_DET
148 */
149
150 /* MVS_TX_CFG */
151 TX_EN = (1U << 16), /* Enable TX */
152 TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
153
154 /* MVS_RX_CFG */
155 RX_EN = (1U << 16), /* Enable RX */
156 RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
157
158 /* MVS_INT_COAL */
159 COAL_EN = (1U << 16), /* Enable int coalescing */
160
161 /* MVS_INT_STAT, MVS_INT_MASK */
162 CINT_I2C = (1U << 31), /* I2C event */
163 CINT_SW0 = (1U << 30), /* software event 0 */
164 CINT_SW1 = (1U << 29), /* software event 1 */
165 CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
166 CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
167 CINT_MEM = (1U << 26), /* int mem parity err */
168 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
169 CINT_SRS = (1U << 3), /* SRS event */
170 CINT_CI_STOP = (1U << 10), /* cmd issue stopped */
171 CINT_DONE = (1U << 0), /* cmd completion */
172
173 /* shl for ports 1-3 */
174 CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
175 CINT_PORT = (1U << 8), /* port0 event */
176
177 /* TX (delivery) ring bits */
178 TXQ_CMD_SHIFT = 29,
179 TXQ_CMD_SSP = 1, /* SSP protocol */
180 TXQ_CMD_SMP = 2, /* SMP protocol */
181 TXQ_CMD_STP = 3, /* STP/SATA protocol */
182 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
183 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
184 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
185 TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
186 TXQ_SRS_SHIFT = 20, /* SATA register set */
187 TXQ_SRS_MASK = 0x7f,
188 TXQ_PHY_SHIFT = 12, /* PHY bitmap */
189 TXQ_PHY_MASK = 0xff,
190 TXQ_SLOT_MASK = 0xfff, /* slot number */
191
192 /* RX (completion) ring bits */
193 RXQ_GOOD = (1U << 23), /* Response good */
194 RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
195 RXQ_CMD_RX = (1U << 20), /* target cmd received */
196 RXQ_ATTN = (1U << 19), /* attention */
197 RXQ_RSP = (1U << 18), /* response frame xfer'd */
198 RXQ_ERR = (1U << 17), /* err info rec xfer'd */
199 RXQ_DONE = (1U << 16), /* cmd complete */
200 RXQ_SLOT_MASK = 0xfff, /* slot number */
201
202 /* mvs_cmd_hdr bits */
203 MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
204 MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
205
206 /* SSP initiator only */
207 MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
208
209 /* SSP initiator or target */
210 MCH_SSP_FR_TASK = 0x1, /* TASK frame */
211
212 /* SSP target only */
213 MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
214 MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
215 MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
216 MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
217
218 MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
219 MCH_FBURST = (1U << 11), /* first burst (SSP) */
220 MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
221 MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
222 MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
223 MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
224 MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
225 MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
226 MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
227 MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
228
229 CCTL_RST = (1U << 5), /* port logic reset */
230
231 /* 0(LSB first), 1(MSB first) */
232 CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
233 CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
234 CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
235 CCTL_ENDIAN_CMD = (1U << 0), /* command table */
236
237 /* MVS_Px_SER_CTLSTAT (per-phy control) */
238 PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
239 PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
240 PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
241 PHY_RST = (1U << 0), /* phy reset */
242
243 /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
244 PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
245 PHYEV_AN = (1U << 18), /* SATA async notification */
246 PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
247 PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
248 PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
249 PHYEV_IU_BIG = (1U << 11), /* IU too long err */
250 PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
251 PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
252 PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
253 PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
254 PHYEV_PORT_SEL = (1U << 6), /* port selector present */
255 PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
256 PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
257 PHYEV_ID_FAIL = (1U << 3), /* identify failed */
258 PHYEV_ID_DONE = (1U << 2), /* identify done */
259 PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
260 PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
261
262 /* MVS_PCS */
263 PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
264 PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
265 PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
266 PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
267 PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
268 PCS_CMD_RST = (1U << 2), /* reset cmd issue */
269 PCS_CMD_EN = (1U << 0), /* enable cmd issue */
270};
271
272enum mvs_info_flags {
273 MVF_MSI = (1U << 0), /* MSI is enabled */
274 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
275};
276
277enum sas_cmd_port_registers {
278 CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
279 CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
280 CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
281 CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
282 CMD_OOB_SPACE = 0x110, /* OOB space control register */
283 CMD_OOB_BURST = 0x114, /* OOB burst control register */
284 CMD_PHY_TIMER = 0x118, /* PHY timer control register */
285 CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
286 CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
287 CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
288 CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
289 CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
290 CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
291 CMD_ID_TEST = 0x134, /* ID test register */
292 CMD_PL_TIMER = 0x138, /* PL timer register */
293 CMD_WD_TIMER = 0x13c, /* WD timer register */
294 CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
295 CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
296 CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
297 CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
298 CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
299 CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
300 CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
301 CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
302 CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
303 CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
304 CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
305 CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
306 CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
307 CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
308 CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
309 CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
310 CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
311 CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
312 CMD_RESET_COUNT = 0x188, /* Reset Count */
313 CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
314 CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
315 CMD_PHY_CTL = 0x194, /* PHY Control and Status */
316 CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
317 CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
318 CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
319 CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
320 CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
321 CMD_HOST_CTL = 0x1AC, /* Host Control Status */
322 CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
323 CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
324 CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
325 CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
326 CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
327 CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
328};
329
330/* SAS/SATA configuration port registers, aka phy registers */
331enum sas_sata_config_port_regs {
332 PHYR_IDENTIFY = 0x0, /* info for IDENTIFY frame */
333 PHYR_ADDR_LO = 0x4, /* my SAS address (low) */
334 PHYR_ADDR_HI = 0x8, /* my SAS address (high) */
335 PHYR_ATT_DEV_INFO = 0xC, /* attached device info */
336 PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
337 PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
338 PHYR_SATA_CTL = 0x18, /* SATA control */
339 PHYR_PHY_STAT = 0x1C, /* PHY status */
340 PHYR_WIDE_PORT = 0x38, /* wide port participating */
341 PHYR_CURRENT0 = 0x80, /* current connection info 0 */
342 PHYR_CURRENT1 = 0x84, /* current connection info 1 */
343 PHYR_CURRENT2 = 0x88, /* current connection info 2 */
344};
345
346enum pci_cfg_registers {
347 PCR_PHY_CTL = 0x40,
348 PCR_PHY_CTL2 = 0x90,
349};
350
351enum pci_cfg_register_bits {
352 PCTL_PWR_ON = (0xFU << 24),
353 PCTL_OFF = (0xFU << 12),
354};
355
356enum nvram_layout_offsets {
357 NVR_SIG = 0x00, /* 0xAA, 0x55 */
358 NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */
359};
360
361enum chip_flavors {
362 chip_6320,
363 chip_6440,
364 chip_6480,
365};
366
367struct mvs_chip_info {
368 unsigned int n_phy;
369 unsigned int srs_sz;
370 unsigned int slot_width;
371};
372
373struct mvs_err_info {
374 __le32 flags;
375 __le32 flags2;
376};
377
378struct mvs_prd {
379 __le64 addr; /* 64-bit buffer address */
380 __le32 reserved;
381 __le32 len; /* 16-bit length */
382};
383
384struct mvs_cmd_hdr {
385 __le32 flags; /* PRD tbl len; SAS, SATA ctl */
386 __le32 lens; /* cmd, max resp frame len */
387 __le32 tags; /* targ port xfer tag; tag */
388 __le32 data_len; /* data xfer len */
389 __le64 cmd_tbl; /* command table address */
390 __le64 open_frame; /* open addr frame address */
391 __le64 status_buf; /* status buffer address */
392 __le64 prd_tbl; /* PRD tbl address */
393 __le32 reserved[4];
394};
395
396struct mvs_slot_info {
397 struct sas_task *task;
398 unsigned int n_elem;
399
400 /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
401 * and PRD table
402 */
403 void *buf;
404 dma_addr_t buf_dma;
405
406 void *response;
407};
408
409struct mvs_port {
410 struct asd_sas_port sas_port;
411};
412
413struct mvs_phy {
414 struct mvs_port *port;
415 struct asd_sas_phy sas_phy;
416
417 u8 frame_rcvd[24 + 1024];
418};
419
420struct mvs_info {
421 unsigned long flags;
422
423 spinlock_t lock; /* host-wide lock */
424 struct pci_dev *pdev; /* our device */
425 void __iomem *regs; /* enhanced mode registers */
426 void __iomem *peri_regs; /* peripheral registers */
427
428 u8 sas_addr[SAS_ADDR_SIZE];
429 struct sas_ha_struct sas; /* SCSI/SAS glue */
430 struct Scsi_Host *shost;
431
432 __le32 *tx; /* TX (delivery) DMA ring */
433 dma_addr_t tx_dma;
434 u32 tx_prod; /* cached next-producer idx */
435
436 __le32 *rx; /* RX (completion) DMA ring */
437 dma_addr_t rx_dma;
438 u32 rx_cons; /* RX consumer idx */
439
440 __le32 *rx_fis; /* RX'd FIS area */
441 dma_addr_t rx_fis_dma;
442
443 struct mvs_cmd_hdr *slot; /* DMA command header slots */
444 dma_addr_t slot_dma;
445
446 const struct mvs_chip_info *chip;
447
448 /* further per-slot information */
449 struct mvs_slot_info slot_info[MVS_SLOTS];
450 unsigned long tags[(MVS_SLOTS / sizeof(unsigned long)) + 1];
451
452 struct mvs_phy phy[MVS_MAX_PHYS];
453 struct mvs_port port[MVS_MAX_PHYS];
454};
455
456static struct scsi_transport_template *mvs_stt;
457
458static const struct mvs_chip_info mvs_chips[] = {
459 [chip_6320] = { 2, 16, 9 },
460 [chip_6440] = { 4, 16, 9 },
461 [chip_6480] = { 8, 32, 10 },
462};
463
464static struct scsi_host_template mvs_sht = {
465 .module = THIS_MODULE,
466 .name = DRV_NAME,
467 .queuecommand = sas_queuecommand,
468 .target_alloc = sas_target_alloc,
469 .slave_configure = sas_slave_configure,
470 .slave_destroy = sas_slave_destroy,
471 .change_queue_depth = sas_change_queue_depth,
472 .change_queue_type = sas_change_queue_type,
473 .bios_param = sas_bios_param,
474 .can_queue = 1,
475 .cmd_per_lun = 1,
476 .this_id = -1,
477 .sg_tablesize = SG_ALL,
478 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
479 .use_clustering = ENABLE_CLUSTERING,
480 .eh_device_reset_handler= sas_eh_device_reset_handler,
481 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
482 .slave_alloc = sas_slave_alloc,
483 .target_destroy = sas_target_destroy,
484 .ioctl = sas_ioctl,
485};
486
487static void mvs_int_rx(struct mvs_info *mvi, bool self_clear);
488
489/* move to PCI layer or libata core? */
490static int pci_go_64(struct pci_dev *pdev)
491{
492 int rc;
493
494 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
495 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
496 if (rc) {
497 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
498 if (rc) {
499 dev_printk(KERN_ERR, &pdev->dev,
500 "64-bit DMA enable failed\n");
501 return rc;
502 }
503 }
504 } else {
505 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
506 if (rc) {
507 dev_printk(KERN_ERR, &pdev->dev,
508 "32-bit DMA enable failed\n");
509 return rc;
510 }
511 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
512 if (rc) {
513 dev_printk(KERN_ERR, &pdev->dev,
514 "32-bit consistent DMA enable failed\n");
515 return rc;
516 }
517 }
518
519 return rc;
520}
521
522static void mvs_tag_clear(struct mvs_info *mvi, unsigned int tag)
523{
524 mvi->tags[tag / sizeof(unsigned long)] &=
525 ~(1UL << (tag % sizeof(unsigned long)));
526}
527
528static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
529{
530 mvi->tags[tag / sizeof(unsigned long)] |=
531 (1UL << (tag % sizeof(unsigned long)));
532}
533
534static bool mvs_tag_test(struct mvs_info *mvi, unsigned int tag)
535{
536 return mvi->tags[tag / sizeof(unsigned long)] &
537 (1UL << (tag % sizeof(unsigned long)));
538}
539
540static int mvs_tag_alloc(struct mvs_info *mvi, unsigned int *tag_out)
541{
542 unsigned int i;
543
544 for (i = 0; i < MVS_SLOTS; i++)
545 if (!mvs_tag_test(mvi, i)) {
546 mvs_tag_set(mvi, i);
547 *tag_out = i;
548 return 0;
549 }
550
551 return -EBUSY;
552}
553
554static int mvs_eep_read(void __iomem *regs, unsigned int addr, u32 *data)
555{
556 int timeout = 1000;
557
558 if (addr & ~SPI_ADDR_MASK)
559 return -EINVAL;
560
561 writel(addr, regs + SPI_CMD);
562 writel(TWSI_RD, regs + SPI_CTL);
563
564 while (timeout-- > 0) {
565 if (readl(regs + SPI_CTL) & TWSI_RDY) {
566 *data = readl(regs + SPI_DATA);
567 return 0;
568 }
569
570 udelay(10);
571 }
572
573 return -EBUSY;
574}
575
576static int mvs_eep_read_buf(void __iomem *regs, unsigned int addr,
577 void *buf, unsigned int buflen)
578{
579 unsigned int addr_end, tmp_addr, i, j;
580 u32 tmp = 0;
581 int rc;
582 u8 *tmp8, *buf8 = buf;
583
584 addr_end = addr + buflen;
585 tmp_addr = ALIGN(addr, 4);
586 if (addr > 0xff)
587 return -EINVAL;
588
589 j = addr & 0x3;
590 if (j) {
591 rc = mvs_eep_read(regs, tmp_addr, &tmp);
592 if (rc)
593 return rc;
594
595 tmp8 = (u8 *) &tmp;
596 for (i = j; i < 4; i++)
597 *buf8++ = tmp8[i];
598
599 tmp_addr += 4;
600 }
601
602 for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
603 rc = mvs_eep_read(regs, tmp_addr, &tmp);
604 if (rc)
605 return rc;
606
607 memcpy(buf8, &tmp, 4);
608 buf8 += 4;
609 }
610
611 if (tmp_addr < addr_end) {
612 rc = mvs_eep_read(regs, tmp_addr, &tmp);
613 if (rc)
614 return rc;
615
616 tmp8 = (u8 *) &tmp;
617 j = addr_end - tmp_addr;
618 for (i = 0; i < j; i++)
619 *buf8++ = tmp8[i];
620
621 tmp_addr += 4;
622 }
623
624 return 0;
625}
626
627static int mvs_nvram_read(struct mvs_info *mvi, unsigned int addr,
628 void *buf, unsigned int buflen)
629{
630 void __iomem *regs = mvi->regs;
631 int rc, i;
632 unsigned int sum;
633 u8 hdr[2], *tmp;
634 const char *msg;
635
636 rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
637 if (rc) {
638 msg = "nvram hdr read failed";
639 goto err_out;
640 }
641 rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
642 if (rc) {
643 msg = "nvram read failed";
644 goto err_out;
645 }
646
647 if (hdr[0] != 0x5A) { /* entry id */
648 msg = "invalid nvram entry id";
649 rc = -ENOENT;
650 goto err_out;
651 }
652
653 tmp = buf;
654 sum = ((unsigned int)hdr[0]) + ((unsigned int)hdr[1]);
655 for (i = 0; i < buflen; i++)
656 sum += ((unsigned int)tmp[i]);
657
658 if (sum) {
659 msg = "nvram checksum failure";
660 rc = -EILSEQ;
661 goto err_out;
662 }
663
664 return 0;
665
666err_out:
667 dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
668 return rc;
669}
670
671static void mvs_int_port(struct mvs_info *mvi, int port_no, u32 events)
672{
673 /* FIXME */
674}
675
676static void mvs_int_sata(struct mvs_info *mvi)
677{
678 /* FIXME */
679}
680
681static void mvs_slot_free(struct mvs_info *mvi, struct sas_task *task,
682 struct mvs_slot_info *slot, unsigned int slot_idx)
683{
684 if (slot->n_elem)
685 pci_unmap_sg(mvi->pdev, task->scatter,
686 slot->n_elem, task->data_dir);
687
688 switch (task->task_proto) {
689 case SAS_PROTOCOL_SMP:
690 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1,
691 PCI_DMA_FROMDEVICE);
692 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1,
693 PCI_DMA_TODEVICE);
694 break;
695
696 case SAS_PROTOCOL_SATA:
697 case SAS_PROTOCOL_STP:
698 case SAS_PROTOCOL_SSP:
699 default:
700 /* do nothing */
701 break;
702 }
703
704 mvs_tag_clear(mvi, slot_idx);
705}
706
707static void mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
708 unsigned int slot_idx)
709{
710 /* FIXME */
711}
712
713static void mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc)
714{
715 unsigned int slot_idx = rx_desc & RXQ_SLOT_MASK;
716 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
717 struct sas_task *task = slot->task;
718 struct task_status_struct *tstat = &task->task_status;
719 bool aborted;
720
721 spin_lock(&task->task_state_lock);
722 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
723 if (!aborted) {
724 task->task_state_flags &=
725 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
726 task->task_state_flags |= SAS_TASK_STATE_DONE;
727 }
728 spin_unlock(&task->task_state_lock);
729
730 if (aborted)
731 return;
732
733 memset(tstat, 0, sizeof(*tstat));
734 tstat->resp = SAS_TASK_COMPLETE;
735
736 /* error info record present */
737 if (rx_desc & RXQ_ERR) {
738 tstat->stat = SAM_CHECK_COND;
739 mvs_slot_err(mvi, task, slot_idx);
740 goto out;
741 }
742
743 switch (task->task_proto) {
744 case SAS_PROTOCOL_SSP:
745 /* hw says status == 0, datapres == 0 */
746 if (rx_desc & RXQ_GOOD)
747 tstat->stat = SAM_GOOD;
748
749 /* response frame present */
750 else if (rx_desc & RXQ_RSP) {
751 struct ssp_response_iu *iu =
752 slot->response + sizeof(struct mvs_err_info);
753 sas_ssp_task_response(&mvi->pdev->dev, task, iu);
754 }
755
756 /* should never happen? */
757 else
758 tstat->stat = SAM_CHECK_COND;
759 break;
760
761 case SAS_PROTOCOL_SMP:
762 tstat->stat = SAM_GOOD;
763 break;
764
765 case SAS_PROTOCOL_SATA:
766 case SAS_PROTOCOL_STP:
767 if ((rx_desc & (RXQ_DONE | RXQ_ERR | RXQ_ATTN)) == RXQ_DONE)
768 tstat->stat = SAM_GOOD;
769 else
770 tstat->stat = SAM_CHECK_COND;
771 /* FIXME: read taskfile data from SATA register set
772 * associated with SATA target
773 */
774 break;
775
776 default:
777 tstat->stat = SAM_CHECK_COND;
778 break;
779 }
780
781out:
782 mvs_slot_free(mvi, task, slot, slot_idx);
783 task->task_done(task);
784}
785
786static void mvs_int_full(struct mvs_info *mvi)
787{
788 void __iomem *regs = mvi->regs;
789 u32 tmp, stat;
790 int i;
791
792 stat = mr32(INT_STAT);
793
794 for (i = 0; i < MVS_MAX_PORTS; i++) {
795 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
796 if (tmp)
797 mvs_int_port(mvi, i, tmp);
798 }
799
800 if (stat & CINT_SRS)
801 mvs_int_sata(mvi);
802
803 if (stat & (CINT_CI_STOP | CINT_DONE))
804 mvs_int_rx(mvi, false);
805
806 mw32(INT_STAT, stat);
807}
808
809static void mvs_int_rx(struct mvs_info *mvi, bool self_clear)
810{
811 u32 rx_prod_idx, rx_desc;
812 bool attn = false;
813
814 /* the first dword in the RX ring is special: it contains
815 * a mirror of the hardware's RX producer index, so that
816 * we don't have to stall the CPU reading that register.
817 * The actual RX ring is offset by one dword, due to this.
818 */
819 rx_prod_idx = le32_to_cpu(mvi->rx[0]) & 0xfff;
820 if (rx_prod_idx == 0xfff) { /* h/w hasn't touched RX ring yet */
821 mvi->rx_cons = 0xfff;
822 return;
823 }
824 if (mvi->rx_cons == 0xfff)
825 mvi->rx_cons = MVS_RX_RING_SZ - 1;
826
827 while (mvi->rx_cons != rx_prod_idx) {
828 /* increment our internal RX consumer pointer */
829 mvi->rx_cons = (mvi->rx_cons + 1) & (MVS_RX_RING_SZ - 1);
830
831 /* Read RX descriptor at offset+1, due to above */
832 rx_desc = le32_to_cpu(mvi->rx[mvi->rx_cons + 1]);
833
834 if (rx_desc & RXQ_DONE)
835 /* we had a completion, error or no */
836 mvs_slot_complete(mvi, rx_desc);
837
838 if (rx_desc & RXQ_ATTN)
839 attn = true;
840 }
841
842 if (attn && self_clear)
843 mvs_int_full(mvi);
844
845}
846
847static irqreturn_t mvs_interrupt(int irq, void *opaque)
848{
849 struct mvs_info *mvi = opaque;
850 void __iomem *regs = mvi->regs;
851 u32 stat;
852
853 stat = mr32(GBL_INT_STAT);
854 if (stat == 0 || stat == 0xffffffff)
855 return IRQ_NONE;
856
857 spin_lock(&mvi->lock);
858
859 mvs_int_full(mvi);
860
861 spin_unlock(&mvi->lock);
862
863 return IRQ_HANDLED;
864}
865
866static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
867{
868 struct mvs_info *mvi = opaque;
869
870 spin_lock(&mvi->lock);
871
872 mvs_int_rx(mvi, true);
873
874 spin_unlock(&mvi->lock);
875
876 return IRQ_HANDLED;
877}
878
879struct mvs_task_exec_info {
880 struct sas_task *task;
881 struct mvs_cmd_hdr *hdr;
882 unsigned int tag;
883 int n_elem;
884};
885
886static int mvs_task_prep_smp(struct mvs_info *mvi, struct mvs_task_exec_info *tei)
887{
888 int elem, rc;
889 struct mvs_cmd_hdr *hdr = tei->hdr;
890 struct scatterlist *sg_req, *sg_resp;
891 unsigned int req_len, resp_len, tag = tei->tag;
892
893 /*
894 * DMA-map SMP request, response buffers
895 */
896
897 sg_req = &tei->task->smp_task.smp_req;
898 elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
899 if (!elem)
900 return -ENOMEM;
901 req_len = sg_dma_len(sg_req);
902
903 sg_resp = &tei->task->smp_task.smp_resp;
904 elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
905 if (!elem) {
906 rc = -ENOMEM;
907 goto err_out;
908 }
909 resp_len = sg_dma_len(sg_resp);
910
911 /* must be in dwords */
912 if ((req_len & 0x3) || (resp_len & 0x3)) {
913 rc = -EINVAL;
914 goto err_out_2;
915 }
916
917 /*
918 * Fill in TX ring and command slot header
919 */
920
921 mvi->tx[tag] = cpu_to_le32(
922 (TXQ_CMD_SMP << TXQ_CMD_SHIFT) | TXQ_MODE_I | tag);
923
924 hdr->flags = 0;
925 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
926 hdr->tags = cpu_to_le32(tag);
927 hdr->data_len = 0;
928 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
929 hdr->open_frame = 0;
930 hdr->status_buf = cpu_to_le64(sg_dma_address(sg_resp));
931 hdr->prd_tbl = 0;
932
933 return 0;
934
935err_out_2:
936 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1,
937 PCI_DMA_FROMDEVICE);
938err_out:
939 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1,
940 PCI_DMA_TODEVICE);
941 return rc;
942}
943
944static int mvs_task_prep_ata(struct mvs_info *mvi,
945 struct mvs_task_exec_info *tei)
946{
947 struct sas_task *task = tei->task;
948 struct domain_device *dev = task->dev;
949 struct mvs_cmd_hdr *hdr = tei->hdr;
950 struct asd_sas_port *sas_port = dev->port;
951 unsigned int tag = tei->tag;
952 struct mvs_slot_info *slot = &mvi->slot_info[tag];
953 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
954 struct scatterlist *sg;
955 struct mvs_prd *buf_prd;
956 void *buf_tmp;
957 u8 *buf_cmd, *buf_oaf;
958 dma_addr_t buf_tmp_dma;
959 unsigned int i, req_len, resp_len;
960
961 /* FIXME: fill in SATA register set */
962 mvi->tx[tag] = cpu_to_le32(TXQ_MODE_I | tag |
963 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
964 (sas_port->phy_mask << TXQ_PHY_SHIFT));
965
966 if (task->ata_task.use_ncq)
967 flags |= MCH_FPDMA;
968 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
969 flags |= MCH_ATAPI;
970 /* FIXME: fill in port multiplier number */
971
972 hdr->flags = cpu_to_le32(flags);
973 hdr->tags = cpu_to_le32(tag);
974 hdr->data_len = cpu_to_le32(task->total_xfer_len);
975
976 /*
977 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
978 */
979 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
980
981 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ***************/
982 buf_cmd =
983 buf_tmp = slot->buf;
984 buf_tmp_dma = slot->buf_dma;
985
986 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
987
988 buf_tmp += MVS_ATA_CMD_SZ;
989 buf_tmp_dma += MVS_ATA_CMD_SZ;
990
991 /* region 2: open address frame area (MVS_OAF_SZ bytes) **********/
992 /* used for STP. unused for SATA? */
993 buf_oaf = buf_tmp;
994 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
995
996 buf_tmp += MVS_OAF_SZ;
997 buf_tmp_dma += MVS_OAF_SZ;
998
999 /* region 3: PRD table ***********************************************/
1000 buf_prd = buf_tmp;
1001 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1002
1003 i = sizeof(struct mvs_prd) * tei->n_elem;
1004 buf_tmp += i;
1005 buf_tmp_dma += i;
1006
1007 /* region 4: status buffer (larger the PRD, smaller this buf) ********/
1008 /* FIXME: probably unused, for SATA. kept here just in case
1009 * we get a STP/SATA error information record
1010 */
1011 slot->response = buf_tmp;
1012 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1013
1014 req_len = sizeof(struct ssp_frame_hdr) + 28;
1015 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
1016 sizeof(struct mvs_err_info) - i;
1017
1018 /* request, response lengths */
1019 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
1020
1021 /* fill in command FIS and ATAPI CDB */
1022 memcpy(buf_cmd, &task->ata_task.fis,
1023 sizeof(struct host_to_dev_fis));
1024 memcpy(buf_cmd + 0x40, task->ata_task.atapi_packet, 16);
1025
1026 /* fill in PRD (scatter/gather) table, if any */
1027 sg = task->scatter;
1028 for (i = 0; i < tei->n_elem; i++) {
1029 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1030 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1031
1032 sg++;
1033 buf_prd++;
1034 }
1035
1036 return 0;
1037}
1038
1039static int mvs_task_prep_ssp(struct mvs_info *mvi,
1040 struct mvs_task_exec_info *tei)
1041{
1042 struct sas_task *task = tei->task;
1043 struct asd_sas_port *sas_port = task->dev->port;
1044 struct mvs_cmd_hdr *hdr = tei->hdr;
1045 struct mvs_slot_info *slot;
1046 struct scatterlist *sg;
1047 unsigned int resp_len, req_len, i, tag = tei->tag;
1048 struct mvs_prd *buf_prd;
1049 struct ssp_frame_hdr *ssp_hdr;
1050 void *buf_tmp;
1051 u8 *buf_cmd, *buf_oaf, fburst = 0;
1052 dma_addr_t buf_tmp_dma;
1053 u32 flags;
1054
1055 slot = &mvi->slot_info[tag];
1056
1057 mvi->tx[tag] = cpu_to_le32(TXQ_MODE_I | tag |
1058 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
1059 (sas_port->phy_mask << TXQ_PHY_SHIFT));
1060
1061 flags = MCH_RETRY;
1062 if (task->ssp_task.enable_first_burst) {
1063 flags |= MCH_FBURST;
1064 fburst = (1 << 7);
1065 }
1066 hdr->flags = cpu_to_le32(flags |
1067 (tei->n_elem << MCH_PRD_LEN_SHIFT) |
1068 (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
1069
1070 hdr->tags = cpu_to_le32(tag);
1071 hdr->data_len = cpu_to_le32(task->total_xfer_len);
1072
1073 /*
1074 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1075 */
1076 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
1077
1078 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***************/
1079 buf_cmd =
1080 buf_tmp = slot->buf;
1081 buf_tmp_dma = slot->buf_dma;
1082
1083 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1084
1085 buf_tmp += MVS_SSP_CMD_SZ;
1086 buf_tmp_dma += MVS_SSP_CMD_SZ;
1087
1088 /* region 2: open address frame area (MVS_OAF_SZ bytes) **********/
1089 buf_oaf = buf_tmp;
1090 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1091
1092 buf_tmp += MVS_OAF_SZ;
1093 buf_tmp_dma += MVS_OAF_SZ;
1094
1095 /* region 3: PRD table ***********************************************/
1096 buf_prd = buf_tmp;
1097 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1098
1099 i = sizeof(struct mvs_prd) * tei->n_elem;
1100 buf_tmp += i;
1101 buf_tmp_dma += i;
1102
1103 /* region 4: status buffer (larger the PRD, smaller this buf) ********/
1104 slot->response = buf_tmp;
1105 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1106
1107 req_len = sizeof(struct ssp_frame_hdr) + 28;
1108 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
1109 sizeof(struct mvs_err_info) - i;
1110
1111 /* request, response lengths */
1112 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
1113
1114 /* generate open address frame hdr (first 12 bytes) */
1115 buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */
1116 buf_oaf[1] = task->dev->linkrate & 0xf;
1117 buf_oaf[2] = tag >> 8;
1118 buf_oaf[3] = tag;
1119 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1120
1121 /* fill in SSP frame header */
1122 ssp_hdr = (struct ssp_frame_hdr *) buf_cmd;
1123 ssp_hdr->frame_type = SSP_COMMAND;
1124 memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
1125 HASHED_SAS_ADDR_SIZE);
1126 memcpy(ssp_hdr->hashed_src_addr,
1127 task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
1128 ssp_hdr->tag = cpu_to_be16(tag);
1129
1130 /* fill in command frame IU */
1131 buf_cmd += sizeof(*ssp_hdr);
1132 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
1133 buf_cmd[9] = fburst |
1134 task->ssp_task.task_attr |
1135 (task->ssp_task.task_prio << 3);
1136 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
1137
1138 /* fill in PRD (scatter/gather) table, if any */
1139 sg = task->scatter;
1140 for (i = 0; i < tei->n_elem; i++) {
1141 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1142 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1143
1144 sg++;
1145 buf_prd++;
1146 }
1147
1148 return 0;
1149}
1150
1151static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
1152{
1153 struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
1154 unsigned int tag = 0xdeadbeef, rc, n_elem = 0;
1155 void __iomem *regs = mvi->regs;
1156 unsigned long flags;
1157 struct mvs_task_exec_info tei;
1158
1159 /* FIXME: STP/SATA support not complete yet */
1160 if (task->task_proto == SAS_PROTOCOL_SATA || task->task_proto == SAS_PROTOCOL_STP)
1161 return -SAS_DEV_NO_RESPONSE;
1162
1163 if (task->num_scatter) {
1164 n_elem = pci_map_sg(mvi->pdev, task->scatter,
1165 task->num_scatter, task->data_dir);
1166 if (!n_elem)
1167 return -ENOMEM;
1168 }
1169
1170 spin_lock_irqsave(&mvi->lock, flags);
1171
1172 rc = mvs_tag_alloc(mvi, &tag);
1173 if (rc)
1174 goto err_out;
1175
1176 mvi->slot_info[tag].task = task;
1177 mvi->slot_info[tag].n_elem = n_elem;
1178 tei.task = task;
1179 tei.hdr = &mvi->slot[tag];
1180 tei.tag = tag;
1181 tei.n_elem = n_elem;
1182
1183 switch (task->task_proto) {
1184 case SAS_PROTOCOL_SMP:
1185 rc = mvs_task_prep_smp(mvi, &tei);
1186 break;
1187 case SAS_PROTOCOL_SSP:
1188 rc = mvs_task_prep_ssp(mvi, &tei);
1189 break;
1190 case SAS_PROTOCOL_SATA:
1191 case SAS_PROTOCOL_STP:
1192 rc = mvs_task_prep_ata(mvi, &tei);
1193 break;
1194 default:
1195 rc = -EINVAL;
1196 break;
1197 }
1198
1199 if (rc)
1200 goto err_out_tag;
1201
1202 /* TODO: select normal or high priority */
1203
1204 mw32(RX_PROD_IDX, mvi->tx_prod);
1205
1206 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_TX_RING_SZ - 1);
1207
1208 spin_lock(&task->task_state_lock);
1209 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1210 spin_unlock(&task->task_state_lock);
1211
1212 spin_unlock_irqrestore(&mvi->lock, flags);
1213 return 0;
1214
1215err_out_tag:
1216 mvs_tag_clear(mvi, tag);
1217err_out:
1218 if (n_elem)
1219 pci_unmap_sg(mvi->pdev, task->scatter, n_elem, task->data_dir);
1220 spin_unlock_irqrestore(&mvi->lock, flags);
1221 return rc;
1222}
1223
1224static void mvs_free(struct mvs_info *mvi)
1225{
1226 int i;
1227
1228 if (!mvi)
1229 return;
1230
1231 for (i = 0; i < MVS_SLOTS; i++) {
1232 struct mvs_slot_info *slot = &mvi->slot_info[i];
1233
1234 if (slot->buf)
1235 dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ,
1236 slot->buf, slot->buf_dma);
1237 }
1238
1239 if (mvi->tx)
1240 dma_free_coherent(&mvi->pdev->dev,
1241 sizeof(*mvi->tx) * MVS_TX_RING_SZ,
1242 mvi->tx, mvi->tx_dma);
1243 if (mvi->rx_fis)
1244 dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ,
1245 mvi->rx_fis, mvi->rx_fis_dma);
1246 if (mvi->rx)
1247 dma_free_coherent(&mvi->pdev->dev,
1248 sizeof(*mvi->rx) * MVS_RX_RING_SZ,
1249 mvi->rx, mvi->rx_dma);
1250 if (mvi->slot)
1251 dma_free_coherent(&mvi->pdev->dev,
1252 sizeof(*mvi->slot) * MVS_RX_RING_SZ,
1253 mvi->slot, mvi->slot_dma);
1254 if (mvi->peri_regs)
1255 iounmap(mvi->peri_regs);
1256 if (mvi->regs)
1257 iounmap(mvi->regs);
1258 if (mvi->shost)
1259 scsi_host_put(mvi->shost);
1260 kfree(mvi->sas.sas_port);
1261 kfree(mvi->sas.sas_phy);
1262 kfree(mvi);
1263}
1264
1265/* FIXME: locking? */
1266static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
1267 void *funcdata)
1268{
1269 struct mvs_info *mvi = sas_phy->ha->lldd_ha;
1270 void __iomem *reg;
1271 int rc = 0, phy_id = sas_phy->id;
1272 u32 tmp;
1273
1274 reg = mvi->regs + MVS_P0_SER_CTLSTAT + (phy_id * 4);
1275
1276 switch (func) {
1277 case PHY_FUNC_SET_LINK_RATE: {
1278 struct sas_phy_linkrates *rates = funcdata;
1279 u32 lrmin = 0, lrmax = 0;
1280
1281 lrmin = (rates->minimum_linkrate << 8);
1282 lrmax = (rates->maximum_linkrate << 12);
1283
1284 tmp = readl(reg);
1285 if (lrmin) {
1286 tmp &= ~(0xf << 8);
1287 tmp |= lrmin;
1288 }
1289 if (lrmax) {
1290 tmp &= ~(0xf << 12);
1291 tmp |= lrmax;
1292 }
1293 writel(tmp, reg);
1294 break;
1295 }
1296
1297 case PHY_FUNC_HARD_RESET:
1298 tmp = readl(reg);
1299 if (tmp & PHY_RST_HARD)
1300 break;
1301 writel(tmp | PHY_RST_HARD, reg);
1302 break;
1303
1304 case PHY_FUNC_LINK_RESET:
1305 writel(readl(reg) | PHY_RST, reg);
1306 break;
1307
1308 case PHY_FUNC_DISABLE:
1309 case PHY_FUNC_RELEASE_SPINUP_HOLD:
1310 default:
1311 rc = -EOPNOTSUPP;
1312 }
1313
1314 return rc;
1315}
1316
1317static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
1318{
1319 struct mvs_phy *phy = &mvi->phy[phy_id];
1320 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1321
1322 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
1323 sas_phy->class = SAS;
1324 sas_phy->iproto = SAS_PROTOCOL_ALL;
1325 sas_phy->tproto = 0;
1326 sas_phy->type = PHY_TYPE_PHYSICAL;
1327 sas_phy->role = PHY_ROLE_INITIATOR;
1328 sas_phy->oob_mode = OOB_NOT_CONNECTED;
1329 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
1330
1331 sas_phy->id = phy_id;
1332 sas_phy->sas_addr = &mvi->sas_addr[0];
1333 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
1334 sas_phy->ha = &mvi->sas;
1335 sas_phy->lldd_phy = phy;
1336}
1337
1338static struct mvs_info * __devinit mvs_alloc(struct pci_dev *pdev,
1339 const struct pci_device_id *ent)
1340{
1341 struct mvs_info *mvi;
1342 unsigned long res_start, res_len;
1343 struct asd_sas_phy **arr_phy;
1344 struct asd_sas_port **arr_port;
1345 const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
1346 int i;
1347
1348 /*
1349 * alloc and init our per-HBA mvs_info struct
1350 */
1351
1352 mvi = kzalloc(sizeof(*mvi), GFP_KERNEL);
1353 if (!mvi)
1354 return NULL;
1355
1356 spin_lock_init(&mvi->lock);
1357 mvi->pdev = pdev;
1358 mvi->chip = chip;
1359
1360 if (pdev->device == 0x6440 && pdev->revision == 0)
1361 mvi->flags |= MVF_PHY_PWR_FIX;
1362
1363 /*
1364 * alloc and init SCSI, SAS glue
1365 */
1366
1367 mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
1368 if (!mvi->shost)
1369 goto err_out;
1370
1371 arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
1372 arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
1373 if (!arr_phy || !arr_port)
1374 goto err_out;
1375
1376 for (i = 0; i < MVS_MAX_PHYS; i++) {
1377 mvs_phy_init(mvi, i);
1378 arr_phy[i] = &mvi->phy[i].sas_phy;
1379 arr_port[i] = &mvi->port[i].sas_port;
1380 }
1381
1382 SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
1383 mvi->shost->transportt = mvs_stt;
1384 mvi->shost->max_id = ~0;
1385 mvi->shost->max_lun = ~0;
1386 mvi->shost->max_cmd_len = ~0;
1387
1388 mvi->sas.sas_ha_name = DRV_NAME;
1389 mvi->sas.dev = &pdev->dev;
1390 mvi->sas.lldd_module = THIS_MODULE;
1391 mvi->sas.sas_addr = &mvi->sas_addr[0];
1392 mvi->sas.sas_phy = arr_phy;
1393 mvi->sas.sas_port = arr_port;
1394 mvi->sas.num_phys = chip->n_phy;
1395 mvi->sas.lldd_max_execute_num = MVS_TX_RING_SZ - 1;/* FIXME: correct? */
1396 mvi->sas.lldd_queue_size = MVS_TX_RING_SZ - 1; /* FIXME: correct? */
1397 mvi->sas.lldd_ha = mvi;
1398 mvi->sas.core.shost = mvi->shost;
1399
1400 mvs_tag_set(mvi, MVS_TX_RING_SZ - 1);
1401
1402 /*
1403 * ioremap main and peripheral registers
1404 */
1405
1406 res_start = pci_resource_start(pdev, 2);
1407 res_len = pci_resource_len(pdev, 2);
1408 if (!res_start || !res_len)
1409 goto err_out;
1410
1411 mvi->peri_regs = ioremap_nocache(res_start, res_len);
1412 if (!mvi->regs)
1413 goto err_out;
1414
1415 res_start = pci_resource_start(pdev, 4);
1416 res_len = pci_resource_len(pdev, 4);
1417 if (!res_start || !res_len)
1418 goto err_out;
1419
1420 mvi->regs = ioremap_nocache(res_start, res_len);
1421 if (!mvi->regs)
1422 goto err_out;
1423
1424 /*
1425 * alloc and init our DMA areas
1426 */
1427
1428 mvi->tx = dma_alloc_coherent(&pdev->dev,
1429 sizeof(*mvi->tx) * MVS_TX_RING_SZ,
1430 &mvi->tx_dma, GFP_KERNEL);
1431 if (!mvi->tx)
1432 goto err_out;
1433 memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_TX_RING_SZ);
1434
1435 mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
1436 &mvi->rx_fis_dma, GFP_KERNEL);
1437 if (!mvi->rx_fis)
1438 goto err_out;
1439 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
1440
1441 mvi->rx = dma_alloc_coherent(&pdev->dev,
1442 sizeof(*mvi->rx) * MVS_RX_RING_SZ,
1443 &mvi->rx_dma, GFP_KERNEL);
1444 if (!mvi->rx)
1445 goto err_out;
1446 memset(mvi->rx, 0, sizeof(*mvi->rx) * MVS_RX_RING_SZ);
1447
1448 mvi->rx[0] = cpu_to_le32(0xfff);
1449 mvi->rx_cons = 0xfff;
1450
1451 mvi->slot = dma_alloc_coherent(&pdev->dev,
1452 sizeof(*mvi->slot) * MVS_SLOTS,
1453 &mvi->slot_dma, GFP_KERNEL);
1454 if (!mvi->slot)
1455 goto err_out;
1456 memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS);
1457
1458 for (i = 0; i < MVS_SLOTS; i++) {
1459 struct mvs_slot_info *slot = &mvi->slot_info[i];
1460
1461 slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ,
1462 &slot->buf_dma, GFP_KERNEL);
1463 if (!slot->buf)
1464 goto err_out;
1465 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
1466 }
1467
1468 /* finally, read NVRAM to get our SAS address */
1469 if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8))
1470 goto err_out;
1471
1472 return mvi;
1473
1474err_out:
1475 mvs_free(mvi);
1476 return NULL;
1477}
1478
1479static u32 mvs_cr32(void __iomem *regs, u32 addr)
1480{
1481 mw32(CMD_ADDR, addr);
1482 return mr32(CMD_DATA);
1483}
1484
1485static void mvs_cw32(void __iomem *regs, u32 addr, u32 val)
1486{
1487 mw32(CMD_ADDR, addr);
1488 mw32(CMD_DATA, val);
1489}
1490
1491#if 0
1492static u32 mvs_phy_read(struct mvs_info *mvi, unsigned int phy_id, u32 addr)
1493{
1494 void __iomem *regs = mvi->regs;
1495 void __iomem *phy_regs = regs + MVS_P0_CFG_ADDR + (phy_id * 8);
1496
1497 writel(addr, phy_regs);
1498 return readl(phy_regs + 4);
1499}
1500#endif
1501
1502static void mvs_phy_write(struct mvs_info *mvi, unsigned int phy_id,
1503 u32 addr, u32 val)
1504{
1505 void __iomem *regs = mvi->regs;
1506 void __iomem *phy_regs = regs + MVS_P0_CFG_ADDR + (phy_id * 8);
1507
1508 writel(addr, phy_regs);
1509 writel(val, phy_regs + 4);
1510 readl(phy_regs); /* flush */
1511}
1512
1513static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
1514{
1515 void __iomem *regs = mvi->regs;
1516 u32 tmp;
1517
1518 /* workaround for SATA R-ERR, to ignore phy glitch */
1519 tmp = mvs_cr32(regs, CMD_PHY_TIMER);
1520 tmp &= ~(1 << 9);
1521 tmp |= (1 << 10);
1522 mvs_cw32(regs, CMD_PHY_TIMER, tmp);
1523
1524 /* enable retry 127 times */
1525 mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f);
1526
1527 /* extend open frame timeout to max */
1528 tmp = mvs_cr32(regs, CMD_SAS_CTL0);
1529 tmp &= ~0xffff;
1530 tmp |= 0x3fff;
1531 mvs_cw32(regs, CMD_SAS_CTL0, tmp);
1532
1533 /* workaround for WDTIMEOUT , set to 550 ms */
1534 mvs_cw32(regs, CMD_WD_TIMER, 0xffffff);
1535
1536 /* not to halt for different port op during wideport link change */
1537 mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d);
1538
1539 /* workaround for Seagate disk not-found OOB sequence, recv
1540 * COMINIT before sending out COMWAKE */
1541 tmp = mvs_cr32(regs, CMD_PHY_MODE_21);
1542 tmp &= 0x0000ffff;
1543 tmp |= 0x00fa0000;
1544 mvs_cw32(regs, CMD_PHY_MODE_21, tmp);
1545
1546 tmp = mvs_cr32(regs, CMD_PHY_TIMER);
1547 tmp &= 0x1fffffff;
1548 tmp |= (2U << 29); /* 8 ms retry */
1549 mvs_cw32(regs, CMD_PHY_TIMER, tmp);
1550}
1551
1552static int __devinit mvs_hw_init(struct mvs_info *mvi)
1553{
1554 void __iomem *regs = mvi->regs;
1555 int i;
1556 u32 tmp, cctl;
1557
1558 /* make sure interrupts are masked immediately (paranoia) */
1559 mw32(GBL_CTL, 0);
1560 tmp = mr32(GBL_CTL);
1561
1562 if (!(tmp & HBA_RST)) {
1563 if (mvi->flags & MVF_PHY_PWR_FIX) {
1564 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
1565 tmp &= ~PCTL_PWR_ON;
1566 tmp |= PCTL_OFF;
1567 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
1568
1569 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
1570 tmp &= ~PCTL_PWR_ON;
1571 tmp |= PCTL_OFF;
1572 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
1573 }
1574
1575 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
1576 mw32_f(GBL_CTL, HBA_RST);
1577 }
1578
1579
1580 /* wait for reset to finish; timeout is just a guess */
1581 i = 1000;
1582 while (i-- > 0) {
1583 msleep(10);
1584
1585 if (!(mr32(GBL_CTL) & HBA_RST))
1586 break;
1587 }
1588 if (mr32(GBL_CTL) & HBA_RST) {
1589 dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
1590 return -EBUSY;
1591 }
1592
1593 /* make sure RST is set; HBA_RST /should/ have done that for us */
1594 cctl = mr32(CTL);
1595 if (cctl & CCTL_RST)
1596 cctl &= ~CCTL_RST;
1597 else
1598 mw32_f(CTL, cctl | CCTL_RST);
1599
1600 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
1601 tmp |= PCTL_PWR_ON;
1602 tmp &= ~PCTL_OFF;
1603 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
1604
1605 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
1606 tmp |= PCTL_PWR_ON;
1607 tmp &= ~PCTL_OFF;
1608 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
1609
1610 mw32_f(CTL, cctl);
1611
1612 mvs_phy_hacks(mvi);
1613
1614 mw32(CMD_LIST_LO, mvi->slot_dma);
1615 mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
1616
1617 mw32(RX_FIS_LO, mvi->rx_fis_dma);
1618 mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
1619
1620 mw32(TX_CFG, MVS_TX_RING_SZ);
1621 mw32(TX_LO, mvi->tx_dma);
1622 mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
1623
1624 mw32(RX_CFG, MVS_RX_RING_SZ);
1625 mw32(RX_LO, mvi->rx_dma);
1626 mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
1627
1628 /* init and reset phys */
1629 for (i = 0; i < mvi->chip->n_phy; i++) {
1630 /* FIXME: is this the correct dword order? */
1631 u32 lo = *((u32 *) &mvi->sas_addr[0]);
1632 u32 hi = *((u32 *) &mvi->sas_addr[4]);
1633
1634 /* set phy local SAS address */
1635 mvs_phy_write(mvi, i, PHYR_ADDR_LO, lo);
1636 mvs_phy_write(mvi, i, PHYR_ADDR_HI, hi);
1637
1638 /* reset phy */
1639 tmp = readl(regs + MVS_P0_SER_CTLSTAT + (i * 4));
1640 tmp |= PHY_RST;
1641 writel(tmp, regs + MVS_P0_SER_CTLSTAT + (i * 4));
1642 }
1643
1644 msleep(100);
1645
1646 for (i = 0; i < mvi->chip->n_phy; i++) {
1647 /* set phy int mask */
1648 writel(PHYEV_BROAD_CH | PHYEV_RDY_CH,
1649 regs + MVS_P0_INT_MASK + (i * 8));
1650
1651 /* clear phy int status */
1652 tmp = readl(regs + MVS_P0_INT_STAT + (i * 8));
1653 writel(tmp, regs + MVS_P0_INT_STAT + (i * 8));
1654 }
1655
1656 /* FIXME: update wide port bitmaps */
1657
1658 /* ladies and gentlemen, start your engines */
1659 mw32(TX_CFG, MVS_TX_RING_SZ | TX_EN);
1660 mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
1661 mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN |
1662 ((mvi->flags & MVF_MSI) ? PCS_SELF_CLEAR : 0));
1663
1664 /* re-enable interrupts globally */
1665 mw32(GBL_CTL, INT_EN);
1666
1667 return 0;
1668}
1669
1670static void __devinit mvs_print_info(struct mvs_info *mvi)
1671{
1672 struct pci_dev *pdev = mvi->pdev;
1673 static int printed_version;
1674
1675 if (!printed_version++)
1676 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
1677
1678 dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
1679 mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
1680}
1681
1682static int __devinit mvs_pci_init(struct pci_dev *pdev,
1683 const struct pci_device_id *ent)
1684{
1685 int rc;
1686 struct mvs_info *mvi;
1687 irq_handler_t irq_handler = mvs_interrupt;
1688
1689 rc = pci_enable_device(pdev);
1690 if (rc)
1691 return rc;
1692
1693 pci_set_master(pdev);
1694
1695 rc = pci_request_regions(pdev, DRV_NAME);
1696 if (rc)
1697 goto err_out_disable;
1698
1699 rc = pci_go_64(pdev);
1700 if (rc)
1701 goto err_out_regions;
1702
1703 mvi = mvs_alloc(pdev, ent);
1704 if (!mvi) {
1705 rc = -ENOMEM;
1706 goto err_out_regions;
1707 }
1708
1709 rc = mvs_hw_init(mvi);
1710 if (rc)
1711 goto err_out_mvi;
1712
1713 if (!pci_enable_msi(pdev)) {
1714 mvi->flags |= MVF_MSI;
1715 irq_handler = mvs_msi_interrupt;
1716 }
1717
1718 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi);
1719 if (rc)
1720 goto err_out_msi;
1721
1722 rc = scsi_add_host(mvi->shost, &pdev->dev);
1723 if (rc)
1724 goto err_out_irq;
1725
1726 rc = sas_register_ha(&mvi->sas);
1727 if (rc)
1728 goto err_out_shost;
1729
1730 pci_set_drvdata(pdev, mvi);
1731
1732 mvs_print_info(mvi);
1733
1734 scsi_scan_host(mvi->shost);
1735 return 0;
1736
1737err_out_shost:
1738 scsi_remove_host(mvi->shost);
1739err_out_irq:
1740 free_irq(pdev->irq, mvi);
1741err_out_msi:
1742 if (mvi->flags |= MVF_MSI)
1743 pci_disable_msi(pdev);
1744err_out_mvi:
1745 mvs_free(mvi);
1746err_out_regions:
1747 pci_release_regions(pdev);
1748err_out_disable:
1749 pci_disable_device(pdev);
1750 return rc;
1751}
1752
1753static void __devexit mvs_pci_remove(struct pci_dev *pdev)
1754{
1755 struct mvs_info *mvi = pci_get_drvdata(pdev);
1756
1757 pci_set_drvdata(pdev, NULL);
1758
1759 sas_unregister_ha(&mvi->sas);
1760 sas_remove_host(mvi->shost);
1761 scsi_remove_host(mvi->shost);
1762
1763 free_irq(pdev->irq, mvi);
1764 if (mvi->flags & MVF_MSI)
1765 pci_disable_msi(pdev);
1766 mvs_free(mvi);
1767 pci_release_regions(pdev);
1768 pci_disable_device(pdev);
1769}
1770
1771static struct sas_domain_function_template mvs_transport_ops = {
1772 .lldd_execute_task = mvs_task_exec,
1773 .lldd_control_phy = mvs_phy_control,
1774};
1775
1776static struct pci_device_id __devinitdata mvs_pci_table[] = {
1777 { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
1778 { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
1779 { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
1780 { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 },
1781
1782 { } /* terminate list */
1783};
1784
1785static struct pci_driver mvs_pci_driver = {
1786 .name = DRV_NAME,
1787 .id_table = mvs_pci_table,
1788 .probe = mvs_pci_init,
1789 .remove = __devexit_p(mvs_pci_remove),
1790};
1791
1792static int __init mvs_init(void)
1793{
1794 int rc;
1795
1796 mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
1797 if (!mvs_stt)
1798 return -ENOMEM;
1799
1800 rc = pci_register_driver(&mvs_pci_driver);
1801 if (rc)
1802 goto err_out;
1803
1804 return 0;
1805
1806err_out:
1807 sas_release_transport(mvs_stt);
1808 return rc;
1809}
1810
1811static void __exit mvs_exit(void)
1812{
1813 pci_unregister_driver(&mvs_pci_driver);
1814 sas_release_transport(mvs_stt);
1815}
1816
1817module_init(mvs_init);
1818module_exit(mvs_exit);
1819
1820MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
1821MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
1822MODULE_VERSION(DRV_VERSION);
1823MODULE_LICENSE("GPL");
1824MODULE_DEVICE_TABLE(pci, mvs_pci_table);
1825