aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Yan <ayan@marvell.com>2009-05-08 17:46:40 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2009-05-20 18:21:12 -0400
commit20b09c2992fefbe78f8cede7b404fb143a413c52 (patch)
treec7e2368e4dd3f38b66db95fa4982ef009e2df00a
parentdd4969a892ea522ecf9d7d826ba1531ce044d46f (diff)
[SCSI] mvsas: add support for 94xx; layout change; bug fixes
This version contains following main changes - Switch to new layout to support more types of ASIC. - SSP TMF supported and related Error Handing enhanced. - Support flash feature with delay 2*HZ when PHY changed. - Support Marvell 94xx series ASIC for 6G SAS/SATA, which has 2 88SE64xx chips but any different register description. - Support SPI flash for HBA-related configuration info. - Other patch enhanced from kernel side such as increasing PHY type [jejb: fold back in DMA_BIT_MASK changes] Signed-off-by: Ying Chu <jasonchu@marvell.com> Signed-off-by: Andy Yan <ayan@marvell.com> Signed-off-by: Ke Wei <kewei@marvell.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
-rw-r--r--drivers/scsi/mvsas/Kconfig23
-rw-r--r--drivers/scsi/mvsas/Makefile9
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c785
-rw-r--r--drivers/scsi/mvsas/mv_64xx.h75
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c672
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h222
-rw-r--r--drivers/scsi/mvsas/mv_chips.h212
-rw-r--r--drivers/scsi/mvsas/mv_defs.h197
-rw-r--r--drivers/scsi/mvsas/mv_init.c629
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2117
-rw-r--r--drivers/scsi/mvsas/mv_sas.h329
11 files changed, 3893 insertions, 1377 deletions
diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig
index f83f368e6902..6de7af27e507 100644
--- a/drivers/scsi/mvsas/Kconfig
+++ b/drivers/scsi/mvsas/Kconfig
@@ -1,35 +1,42 @@
1# 1#
2# Kernel configuration file for 88SE64XX SAS/SATA driver. 2# Kernel configuration file for 88SE64XX/88SE94XX SAS/SATA driver.
3# 3#
4# Copyright 2007 Red Hat, Inc. 4# Copyright 2007 Red Hat, Inc.
5# Copyright 2008 Marvell. <kewei@marvell.com> 5# Copyright 2008 Marvell. <kewei@marvell.com>
6# 6#
7# This file is licensed under GPLv2. 7# This file is licensed under GPLv2.
8# 8#
9# This file is part of the 88SE64XX driver. 9# This file is part of the 88SE64XX/88SE94XX driver.
10# 10#
11# The 88SE64XX driver is free software; you can redistribute 11# The 88SE64XX/88SE94XX driver is free software; you can redistribute
12# it and/or modify it under the terms of the GNU General Public License 12# it and/or modify it under the terms of the GNU General Public License
13# as published by the Free Software Foundation; version 2 of the 13# as published by the Free Software Foundation; version 2 of the
14# License. 14# License.
15# 15#
16# The 88SE64XX driver is distributed in the hope that it will be 16# The 88SE64XX/88SE94XX driver is distributed in the hope that it will be
17# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of 17# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19# General Public License for more details. 19# General Public License for more details.
20# 20#
21# You should have received a copy of the GNU General Public License 21# You should have received a copy of the GNU General Public License
22# along with 88SE64XX Driver; if not, write to the Free Software 22# along with 88SE64XX/88SE94XX Driver; if not, write to the Free Software
23# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 23# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24# 24#
25# 25#
26 26
27config SCSI_MVSAS 27config SCSI_MVSAS
28 tristate "Marvell 88SE64XX SAS/SATA support" 28 tristate "Marvell 88SE64XX/88SE94XX SAS/SATA support"
29 depends on PCI 29 depends on PCI
30 select SCSI_SAS_LIBSAS 30 select SCSI_SAS_LIBSAS
31 select FW_LOADER 31 select FW_LOADER
32 help 32 help
33 This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX 33 This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX and 6Gb/s
34 chip based host adapters. 34 PCI-E 88SE94XX chip based host adapters.
35 35
36config SCSI_MVSAS_DEBUG
37 bool "Compile in debug mode"
38 default y
39 depends on SCSI_MVSAS
40 help
41 Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode,
42 the driver prints some messages to the console.
diff --git a/drivers/scsi/mvsas/Makefile b/drivers/scsi/mvsas/Makefile
index a1ca681e1a57..52ac4264677d 100644
--- a/drivers/scsi/mvsas/Makefile
+++ b/drivers/scsi/mvsas/Makefile
@@ -1,5 +1,5 @@
1# 1#
2# Makefile for Marvell 88SE64xx SAS/SATA driver. 2# Makefile for Marvell 88SE64xx/88SE84xx SAS/SATA driver.
3# 3#
4# Copyright 2007 Red Hat, Inc. 4# Copyright 2007 Red Hat, Inc.
5# Copyright 2008 Marvell. <kewei@marvell.com> 5# Copyright 2008 Marvell. <kewei@marvell.com>
@@ -21,7 +21,12 @@
21# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 21# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22# USA 22# USA
23 23
24ifeq ($(CONFIG_SCSI_MVSAS_DEBUG),y)
25 EXTRA_CFLAGS += -DMV_DEBUG
26endif
27
24obj-$(CONFIG_SCSI_MVSAS) += mvsas.o 28obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
25mvsas-y += mv_init.o \ 29mvsas-y += mv_init.o \
26 mv_sas.o \ 30 mv_sas.o \
27 mv_64xx.o 31 mv_64xx.o \
32 mv_94xx.o
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
index 697806c856af..10a5077b6aed 100644
--- a/drivers/scsi/mvsas/mv_64xx.c
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -1,184 +1,793 @@
1/* 1/*
2 mv_64xx.c - Marvell 88SE6440 SAS/SATA support 2 * Marvell 88SE64xx hardware specific
3 3 *
4 Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 6 *
7 This program is free software; you can redistribute it and/or 7 * This file is licensed under GPLv2.
8 modify it under the terms of the GNU General Public License as 8 *
9 published by the Free Software Foundation; either version 2, 9 * This program is free software; you can redistribute it and/or
10 or (at your option) any later version. 10 * modify it under the terms of the GNU General Public License as
11 11 * published by the Free Software Foundation; version 2 of the
12 This program is distributed in the hope that it will be useful, 12 * License.
13 but WITHOUT ANY WARRANTY; without even the implied warranty 13 *
14 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 14 * This program is distributed in the hope that it will be useful,
15 See the GNU General Public License for more details. 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 You should have received a copy of the GNU General Public 17 * General Public License for more details.
18 License along with this program; see the file COPYING. If not, 18 *
19 write to the Free Software Foundation, 675 Mass Ave, Cambridge, 19 * You should have received a copy of the GNU General Public License
20 MA 02139, USA. 20 * along with this program; if not, write to the Free Software
21 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 */ 22 * USA
23*/
23 24
24#include "mv_sas.h" 25#include "mv_sas.h"
25#include "mv_64xx.h" 26#include "mv_64xx.h"
26#include "mv_chips.h" 27#include "mv_chips.h"
27 28
28void mvs_detect_porttype(struct mvs_info *mvi, int i) 29static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
29{ 30{
30 void __iomem *regs = mvi->regs; 31 void __iomem *regs = mvi->regs;
31 u32 reg; 32 u32 reg;
32 struct mvs_phy *phy = &mvi->phy[i]; 33 struct mvs_phy *phy = &mvi->phy[i];
33 34
34 /* TODO check & save device type */ 35 /* TODO check & save device type */
35 reg = mr32(GBL_PORT_TYPE); 36 reg = mr32(MVS_GBL_PORT_TYPE);
36 37 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
37 if (reg & MODE_SAS_SATA & (1 << i)) 38 if (reg & MODE_SAS_SATA & (1 << i))
38 phy->phy_type |= PORT_TYPE_SAS; 39 phy->phy_type |= PORT_TYPE_SAS;
39 else 40 else
40 phy->phy_type |= PORT_TYPE_SATA; 41 phy->phy_type |= PORT_TYPE_SATA;
41} 42}
42 43
43void mvs_enable_xmt(struct mvs_info *mvi, int PhyId) 44static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
44{ 45{
45 void __iomem *regs = mvi->regs; 46 void __iomem *regs = mvi->regs;
46 u32 tmp; 47 u32 tmp;
47 48
48 tmp = mr32(PCS); 49 tmp = mr32(MVS_PCS);
49 if (mvi->chip->n_phy <= 4) 50 if (mvi->chip->n_phy <= 4)
50 tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT); 51 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT);
52 else
53 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
54 mw32(MVS_PCS, tmp);
55}
56
57static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi)
58{
59 void __iomem *regs = mvi->regs;
60
61 mvs_phy_hacks(mvi);
62
63 if (!(mvi->flags & MVF_FLAG_SOC)) {
64 /* TEST - for phy decoding error, adjust voltage levels */
65 mw32(MVS_P0_VSR_ADDR + 0, 0x8);
66 mw32(MVS_P0_VSR_DATA + 0, 0x2F0);
67
68 mw32(MVS_P0_VSR_ADDR + 8, 0x8);
69 mw32(MVS_P0_VSR_DATA + 8, 0x2F0);
70
71 mw32(MVS_P0_VSR_ADDR + 16, 0x8);
72 mw32(MVS_P0_VSR_DATA + 16, 0x2F0);
73
74 mw32(MVS_P0_VSR_ADDR + 24, 0x8);
75 mw32(MVS_P0_VSR_DATA + 24, 0x2F0);
76 } else {
77 int i;
78 /* disable auto port detection */
79 mw32(MVS_GBL_PORT_TYPE, 0);
80 for (i = 0; i < mvi->chip->n_phy; i++) {
81 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE7);
82 mvs_write_port_vsr_data(mvi, i, 0x90000000);
83 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE9);
84 mvs_write_port_vsr_data(mvi, i, 0x50f2);
85 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE11);
86 mvs_write_port_vsr_data(mvi, i, 0x0e);
87 }
88 }
89}
90
91static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
92{
93 void __iomem *regs = mvi->regs;
94 u32 reg, tmp;
95
96 if (!(mvi->flags & MVF_FLAG_SOC)) {
97 if (phy_id < 4)
98 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg);
99 else
100 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg);
101
102 } else
103 reg = mr32(MVS_PHY_CTL);
104
105 tmp = reg;
106 if (phy_id < 4)
107 tmp |= (1U << phy_id) << PCTL_LINK_OFFS;
51 else 108 else
52 tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2); 109 tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS;
53 mw32(PCS, tmp); 110
111 if (!(mvi->flags & MVF_FLAG_SOC)) {
112 if (phy_id < 4) {
113 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
114 mdelay(10);
115 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg);
116 } else {
117 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
118 mdelay(10);
119 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, reg);
120 }
121 } else {
122 mw32(MVS_PHY_CTL, tmp);
123 mdelay(10);
124 mw32(MVS_PHY_CTL, reg);
125 }
126}
127
128static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
129{
130 u32 tmp;
131 tmp = mvs_read_port_irq_stat(mvi, phy_id);
132 tmp &= ~PHYEV_RDY_CH;
133 mvs_write_port_irq_stat(mvi, phy_id, tmp);
134 tmp = mvs_read_phy_ctl(mvi, phy_id);
135 if (hard)
136 tmp |= PHY_RST_HARD;
137 else
138 tmp |= PHY_RST;
139 mvs_write_phy_ctl(mvi, phy_id, tmp);
140 if (hard) {
141 do {
142 tmp = mvs_read_phy_ctl(mvi, phy_id);
143 } while (tmp & PHY_RST_HARD);
144 }
145}
146
147static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi)
148{
149 void __iomem *regs = mvi->regs;
150 u32 tmp;
151 int i;
152
153 /* make sure interrupts are masked immediately (paranoia) */
154 mw32(MVS_GBL_CTL, 0);
155 tmp = mr32(MVS_GBL_CTL);
156
157 /* Reset Controller */
158 if (!(tmp & HBA_RST)) {
159 if (mvi->flags & MVF_PHY_PWR_FIX) {
160 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
161 tmp &= ~PCTL_PWR_OFF;
162 tmp |= PCTL_PHY_DSBL;
163 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
164
165 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
166 tmp &= ~PCTL_PWR_OFF;
167 tmp |= PCTL_PHY_DSBL;
168 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
169 }
170 }
171
172 /* make sure interrupts are masked immediately (paranoia) */
173 mw32(MVS_GBL_CTL, 0);
174 tmp = mr32(MVS_GBL_CTL);
175
176 /* Reset Controller */
177 if (!(tmp & HBA_RST)) {
178 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
179 mw32_f(MVS_GBL_CTL, HBA_RST);
180 }
181
182 /* wait for reset to finish; timeout is just a guess */
183 i = 1000;
184 while (i-- > 0) {
185 msleep(10);
186
187 if (!(mr32(MVS_GBL_CTL) & HBA_RST))
188 break;
189 }
190 if (mr32(MVS_GBL_CTL) & HBA_RST) {
191 dev_printk(KERN_ERR, mvi->dev, "HBA reset failed\n");
192 return -EBUSY;
193 }
194 return 0;
54} 195}
55 196
56void __devinit mvs_phy_hacks(struct mvs_info *mvi) 197static void mvs_64xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
57{ 198{
58 void __iomem *regs = mvi->regs; 199 void __iomem *regs = mvi->regs;
59 u32 tmp; 200 u32 tmp;
201 if (!(mvi->flags & MVF_FLAG_SOC)) {
202 u32 offs;
203 if (phy_id < 4)
204 offs = PCR_PHY_CTL;
205 else {
206 offs = PCR_PHY_CTL2;
207 phy_id -= 4;
208 }
209 pci_read_config_dword(mvi->pdev, offs, &tmp);
210 tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
211 pci_write_config_dword(mvi->pdev, offs, tmp);
212 } else {
213 tmp = mr32(MVS_PHY_CTL);
214 tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
215 mw32(MVS_PHY_CTL, tmp);
216 }
217}
218
219static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
220{
221 void __iomem *regs = mvi->regs;
222 u32 tmp;
223 if (!(mvi->flags & MVF_FLAG_SOC)) {
224 u32 offs;
225 if (phy_id < 4)
226 offs = PCR_PHY_CTL;
227 else {
228 offs = PCR_PHY_CTL2;
229 phy_id -= 4;
230 }
231 pci_read_config_dword(mvi->pdev, offs, &tmp);
232 tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
233 pci_write_config_dword(mvi->pdev, offs, tmp);
234 } else {
235 tmp = mr32(MVS_PHY_CTL);
236 tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
237 mw32(MVS_PHY_CTL, tmp);
238 }
239}
60 240
61 /* workaround for SATA R-ERR, to ignore phy glitch */ 241static int __devinit mvs_64xx_init(struct mvs_info *mvi)
62 tmp = mvs_cr32(regs, CMD_PHY_TIMER); 242{
63 tmp &= ~(1 << 9); 243 void __iomem *regs = mvi->regs;
64 tmp |= (1 << 10); 244 int i;
65 mvs_cw32(regs, CMD_PHY_TIMER, tmp); 245 u32 tmp, cctl;
246
247 if (mvi->pdev && mvi->pdev->revision == 0)
248 mvi->flags |= MVF_PHY_PWR_FIX;
249 if (!(mvi->flags & MVF_FLAG_SOC)) {
250 mvs_show_pcie_usage(mvi);
251 tmp = mvs_64xx_chip_reset(mvi);
252 if (tmp)
253 return tmp;
254 } else {
255 tmp = mr32(MVS_PHY_CTL);
256 tmp &= ~PCTL_PWR_OFF;
257 tmp |= PCTL_PHY_DSBL;
258 mw32(MVS_PHY_CTL, tmp);
259 }
66 260
67 /* enable retry 127 times */ 261 /* Init Chip */
68 mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f); 262 /* make sure RST is set; HBA_RST /should/ have done that for us */
263 cctl = mr32(MVS_CTL) & 0xFFFF;
264 if (cctl & CCTL_RST)
265 cctl &= ~CCTL_RST;
266 else
267 mw32_f(MVS_CTL, cctl | CCTL_RST);
268
269 if (!(mvi->flags & MVF_FLAG_SOC)) {
270 /* write to device control _AND_ device status register */
271 pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
272 tmp &= ~PRD_REQ_MASK;
273 tmp |= PRD_REQ_SIZE;
274 pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
275
276 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
277 tmp &= ~PCTL_PWR_OFF;
278 tmp &= ~PCTL_PHY_DSBL;
279 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
280
281 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
282 tmp &= PCTL_PWR_OFF;
283 tmp &= ~PCTL_PHY_DSBL;
284 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
285 } else {
286 tmp = mr32(MVS_PHY_CTL);
287 tmp &= ~PCTL_PWR_OFF;
288 tmp |= PCTL_COM_ON;
289 tmp &= ~PCTL_PHY_DSBL;
290 tmp |= PCTL_LINK_RST;
291 mw32(MVS_PHY_CTL, tmp);
292 msleep(100);
293 tmp &= ~PCTL_LINK_RST;
294 mw32(MVS_PHY_CTL, tmp);
295 msleep(100);
296 }
69 297
70 /* extend open frame timeout to max */ 298 /* reset control */
71 tmp = mvs_cr32(regs, CMD_SAS_CTL0); 299 mw32(MVS_PCS, 0); /* MVS_PCS */
72 tmp &= ~0xffff; 300 /* init phys */
73 tmp |= 0x3fff; 301 mvs_64xx_phy_hacks(mvi);
74 mvs_cw32(regs, CMD_SAS_CTL0, tmp);
75 302
76 /* workaround for WDTIMEOUT , set to 550 ms */ 303 /* enable auto port detection */
77 mvs_cw32(regs, CMD_WD_TIMER, 0x86470); 304 mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);
78 305
79 /* not to halt for different port op during wideport link change */ 306 mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
80 mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d); 307 mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
81 308
82 /* workaround for Seagate disk not-found OOB sequence, recv 309 mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
83 * COMINIT before sending out COMWAKE */ 310 mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
84 tmp = mvs_cr32(regs, CMD_PHY_MODE_21);
85 tmp &= 0x0000ffff;
86 tmp |= 0x00fa0000;
87 mvs_cw32(regs, CMD_PHY_MODE_21, tmp);
88 311
89 tmp = mvs_cr32(regs, CMD_PHY_TIMER); 312 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
90 tmp &= 0x1fffffff; 313 mw32(MVS_TX_LO, mvi->tx_dma);
91 tmp |= (2U << 29); /* 8 ms retry */ 314 mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
92 mvs_cw32(regs, CMD_PHY_TIMER, tmp);
93 315
94 /* TEST - for phy decoding error, adjust voltage levels */ 316 mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
95 mw32(P0_VSR_ADDR + 0, 0x8); 317 mw32(MVS_RX_LO, mvi->rx_dma);
96 mw32(P0_VSR_DATA + 0, 0x2F0); 318 mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
97 319
98 mw32(P0_VSR_ADDR + 8, 0x8); 320 for (i = 0; i < mvi->chip->n_phy; i++) {
99 mw32(P0_VSR_DATA + 8, 0x2F0); 321 /* set phy local SAS address */
322 /* should set little endian SAS address to 64xx chip */
323 mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI,
324 cpu_to_be64(mvi->phy[i].dev_sas_addr));
100 325
101 mw32(P0_VSR_ADDR + 16, 0x8); 326 mvs_64xx_enable_xmt(mvi, i);
102 mw32(P0_VSR_DATA + 16, 0x2F0);
103 327
104 mw32(P0_VSR_ADDR + 24, 0x8); 328 mvs_64xx_phy_reset(mvi, i, 1);
105 mw32(P0_VSR_DATA + 24, 0x2F0); 329 msleep(500);
330 mvs_64xx_detect_porttype(mvi, i);
331 }
332 if (mvi->flags & MVF_FLAG_SOC) {
333 /* set select registers */
334 writel(0x0E008000, regs + 0x000);
335 writel(0x59000008, regs + 0x004);
336 writel(0x20, regs + 0x008);
337 writel(0x20, regs + 0x00c);
338 writel(0x20, regs + 0x010);
339 writel(0x20, regs + 0x014);
340 writel(0x20, regs + 0x018);
341 writel(0x20, regs + 0x01c);
342 }
343 for (i = 0; i < mvi->chip->n_phy; i++) {
344 /* clear phy int status */
345 tmp = mvs_read_port_irq_stat(mvi, i);
346 tmp &= ~PHYEV_SIG_FIS;
347 mvs_write_port_irq_stat(mvi, i, tmp);
348
349 /* set phy int mask */
350 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
351 PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR |
352 PHYEV_DEC_ERR;
353 mvs_write_port_irq_mask(mvi, i, tmp);
354
355 msleep(100);
356 mvs_update_phyinfo(mvi, i, 1);
357 }
106 358
359 /* FIXME: update wide port bitmaps */
360
361 /* little endian for open address and command table, etc. */
362 /*
363 * it seems that ( from the spec ) turning on big-endian won't
364 * do us any good on big-endian machines, need further confirmation
365 */
366 cctl = mr32(MVS_CTL);
367 cctl |= CCTL_ENDIAN_CMD;
368 cctl |= CCTL_ENDIAN_DATA;
369 cctl &= ~CCTL_ENDIAN_OPEN;
370 cctl |= CCTL_ENDIAN_RSP;
371 mw32_f(MVS_CTL, cctl);
372
373 /* reset CMD queue */
374 tmp = mr32(MVS_PCS);
375 tmp |= PCS_CMD_RST;
376 mw32(MVS_PCS, tmp);
377 /* interrupt coalescing may cause missing HW interrput in some case,
378 * and the max count is 0x1ff, while our max slot is 0x200,
379 * it will make count 0.
380 */
381 tmp = 0;
382 mw32(MVS_INT_COAL, tmp);
383
384 tmp = 0x100;
385 mw32(MVS_INT_COAL_TMOUT, tmp);
386
387 /* ladies and gentlemen, start your engines */
388 mw32(MVS_TX_CFG, 0);
389 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
390 mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
391 /* enable CMD/CMPL_Q/RESP mode */
392 mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN |
393 PCS_CMD_EN | PCS_CMD_STOP_ERR);
394
395 /* enable completion queue interrupt */
396 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
397 CINT_DMA_PCIE);
398
399 mw32(MVS_INT_MASK, tmp);
400
401 /* Enable SRS interrupt */
402 mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
403
404 return 0;
107} 405}
108 406
109void mvs_hba_interrupt_enable(struct mvs_info *mvi) 407static int mvs_64xx_ioremap(struct mvs_info *mvi)
408{
409 if (!mvs_ioremap(mvi, 4, 2))
410 return 0;
411 return -1;
412}
413
414static void mvs_64xx_iounmap(struct mvs_info *mvi)
415{
416 mvs_iounmap(mvi->regs);
417 mvs_iounmap(mvi->regs_ex);
418}
419
420static void mvs_64xx_interrupt_enable(struct mvs_info *mvi)
110{ 421{
111 void __iomem *regs = mvi->regs; 422 void __iomem *regs = mvi->regs;
112 u32 tmp; 423 u32 tmp;
113 424
114 tmp = mr32(GBL_CTL); 425 tmp = mr32(MVS_GBL_CTL);
426 mw32(MVS_GBL_CTL, tmp | INT_EN);
427}
115 428
116 mw32(GBL_CTL, tmp | INT_EN); 429static void mvs_64xx_interrupt_disable(struct mvs_info *mvi)
430{
431 void __iomem *regs = mvi->regs;
432 u32 tmp;
433
434 tmp = mr32(MVS_GBL_CTL);
435 mw32(MVS_GBL_CTL, tmp & ~INT_EN);
117} 436}
118 437
119void mvs_hba_interrupt_disable(struct mvs_info *mvi) 438static u32 mvs_64xx_isr_status(struct mvs_info *mvi, int irq)
120{ 439{
121 void __iomem *regs = mvi->regs; 440 void __iomem *regs = mvi->regs;
441 u32 stat;
442
443 if (!(mvi->flags & MVF_FLAG_SOC)) {
444 stat = mr32(MVS_GBL_INT_STAT);
445
446 if (stat == 0 || stat == 0xffffffff)
447 return 0;
448 } else
449 stat = 1;
450 return stat;
451}
452
453static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat)
454{
455 void __iomem *regs = mvi->regs;
456
457 /* clear CMD_CMPLT ASAP */
458 mw32_f(MVS_INT_STAT, CINT_DONE);
459#ifndef MVS_USE_TASKLET
460 spin_lock(&mvi->lock);
461#endif
462 mvs_int_full(mvi);
463#ifndef MVS_USE_TASKLET
464 spin_unlock(&mvi->lock);
465#endif
466 return IRQ_HANDLED;
467}
468
469static void mvs_64xx_command_active(struct mvs_info *mvi, u32 slot_idx)
470{
122 u32 tmp; 471 u32 tmp;
472 mvs_cw32(mvi, 0x40 + (slot_idx >> 3), 1 << (slot_idx % 32));
473 mvs_cw32(mvi, 0x00 + (slot_idx >> 3), 1 << (slot_idx % 32));
474 do {
475 tmp = mvs_cr32(mvi, 0x00 + (slot_idx >> 3));
476 } while (tmp & 1 << (slot_idx % 32));
477 do {
478 tmp = mvs_cr32(mvi, 0x40 + (slot_idx >> 3));
479 } while (tmp & 1 << (slot_idx % 32));
480}
123 481
124 tmp = mr32(GBL_CTL); 482static void mvs_64xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
483 u32 tfs)
484{
485 void __iomem *regs = mvi->regs;
486 u32 tmp;
125 487
126 mw32(GBL_CTL, tmp & ~INT_EN); 488 if (type == PORT_TYPE_SATA) {
489 tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
490 mw32(MVS_INT_STAT_SRS_0, tmp);
491 }
492 mw32(MVS_INT_STAT, CINT_CI_STOP);
493 tmp = mr32(MVS_PCS) | 0xFF00;
494 mw32(MVS_PCS, tmp);
127} 495}
128 496
129void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port) 497static void mvs_64xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
130{ 498{
131 void __iomem *regs = mvi->regs; 499 void __iomem *regs = mvi->regs;
132 u32 tmp, offs; 500 u32 tmp, offs;
133 u8 *tfs = &port->taskfileset;
134 501
135 if (*tfs == MVS_ID_NOT_MAPPED) 502 if (*tfs == MVS_ID_NOT_MAPPED)
136 return; 503 return;
137 504
138 offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT); 505 offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
139 if (*tfs < 16) { 506 if (*tfs < 16) {
140 tmp = mr32(PCS); 507 tmp = mr32(MVS_PCS);
141 mw32(PCS, tmp & ~offs); 508 mw32(MVS_PCS, tmp & ~offs);
142 } else { 509 } else {
143 tmp = mr32(CTL); 510 tmp = mr32(MVS_CTL);
144 mw32(CTL, tmp & ~offs); 511 mw32(MVS_CTL, tmp & ~offs);
145 } 512 }
146 513
147 tmp = mr32(INT_STAT_SRS) & (1U << *tfs); 514 tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << *tfs);
148 if (tmp) 515 if (tmp)
149 mw32(INT_STAT_SRS, tmp); 516 mw32(MVS_INT_STAT_SRS_0, tmp);
150 517
151 *tfs = MVS_ID_NOT_MAPPED; 518 *tfs = MVS_ID_NOT_MAPPED;
519 return;
152} 520}
153 521
154u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port) 522static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
155{ 523{
156 int i; 524 int i;
157 u32 tmp, offs; 525 u32 tmp, offs;
158 void __iomem *regs = mvi->regs; 526 void __iomem *regs = mvi->regs;
159 527
160 if (port->taskfileset != MVS_ID_NOT_MAPPED) 528 if (*tfs != MVS_ID_NOT_MAPPED)
161 return 0; 529 return 0;
162 530
163 tmp = mr32(PCS); 531 tmp = mr32(MVS_PCS);
164 532
165 for (i = 0; i < mvi->chip->srs_sz; i++) { 533 for (i = 0; i < mvi->chip->srs_sz; i++) {
166 if (i == 16) 534 if (i == 16)
167 tmp = mr32(CTL); 535 tmp = mr32(MVS_CTL);
168 offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT); 536 offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
169 if (!(tmp & offs)) { 537 if (!(tmp & offs)) {
170 port->taskfileset = i; 538 *tfs = i;
171 539
172 if (i < 16) 540 if (i < 16)
173 mw32(PCS, tmp | offs); 541 mw32(MVS_PCS, tmp | offs);
174 else 542 else
175 mw32(CTL, tmp | offs); 543 mw32(MVS_CTL, tmp | offs);
176 tmp = mr32(INT_STAT_SRS) & (1U << i); 544 tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << i);
177 if (tmp) 545 if (tmp)
178 mw32(INT_STAT_SRS, tmp); 546 mw32(MVS_INT_STAT_SRS_0, tmp);
179 return 0; 547 return 0;
180 } 548 }
181 } 549 }
182 return MVS_ID_NOT_MAPPED; 550 return MVS_ID_NOT_MAPPED;
183} 551}
184 552
553void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
554{
555 int i;
556 struct scatterlist *sg;
557 struct mvs_prd *buf_prd = prd;
558 for_each_sg(scatter, sg, nr, i) {
559 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
560 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
561 buf_prd++;
562 }
563}
564
565static int mvs_64xx_oob_done(struct mvs_info *mvi, int i)
566{
567 u32 phy_st;
568 mvs_write_port_cfg_addr(mvi, i,
569 PHYR_PHY_STAT);
570 phy_st = mvs_read_port_cfg_data(mvi, i);
571 if (phy_st & PHY_OOB_DTCTD)
572 return 1;
573 return 0;
574}
575
576static void mvs_64xx_fix_phy_info(struct mvs_info *mvi, int i,
577 struct sas_identify_frame *id)
578
579{
580 struct mvs_phy *phy = &mvi->phy[i];
581 struct asd_sas_phy *sas_phy = &phy->sas_phy;
582
583 sas_phy->linkrate =
584 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
585 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
586
587 phy->minimum_linkrate =
588 (phy->phy_status &
589 PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
590 phy->maximum_linkrate =
591 (phy->phy_status &
592 PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
593
594 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
595 phy->dev_info = mvs_read_port_cfg_data(mvi, i);
596
597 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
598 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
599
600 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
601 phy->att_dev_sas_addr =
602 (u64) mvs_read_port_cfg_data(mvi, i) << 32;
603 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
604 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
605 phy->att_dev_sas_addr = SAS_ADDR(&phy->att_dev_sas_addr);
606}
607
608static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
609{
610 u32 tmp;
611 struct mvs_phy *phy = &mvi->phy[i];
612 /* workaround for HW phy decoding error on 1.5g disk drive */
613 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
614 tmp = mvs_read_port_vsr_data(mvi, i);
615 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
616 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
617 SAS_LINK_RATE_1_5_GBPS)
618 tmp &= ~PHY_MODE6_LATECLK;
619 else
620 tmp |= PHY_MODE6_LATECLK;
621 mvs_write_port_vsr_data(mvi, i, tmp);
622}
623
624void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
625 struct sas_phy_linkrates *rates)
626{
627 u32 lrmin = 0, lrmax = 0;
628 u32 tmp;
629
630 tmp = mvs_read_phy_ctl(mvi, phy_id);
631 lrmin = (rates->minimum_linkrate << 8);
632 lrmax = (rates->maximum_linkrate << 12);
633
634 if (lrmin) {
635 tmp &= ~(0xf << 8);
636 tmp |= lrmin;
637 }
638 if (lrmax) {
639 tmp &= ~(0xf << 12);
640 tmp |= lrmax;
641 }
642 mvs_write_phy_ctl(mvi, phy_id, tmp);
643 mvs_64xx_phy_reset(mvi, phy_id, 1);
644}
645
646static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
647{
648 u32 tmp;
649 void __iomem *regs = mvi->regs;
650 tmp = mr32(MVS_PCS);
651 mw32(MVS_PCS, tmp & 0xFFFF);
652 mw32(MVS_PCS, tmp);
653 tmp = mr32(MVS_CTL);
654 mw32(MVS_CTL, tmp & 0xFFFF);
655 mw32(MVS_CTL, tmp);
656}
657
658
659u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
660{
661 void __iomem *regs = mvi->regs_ex;
662 return ior32(SPI_DATA_REG_64XX);
663}
664
665void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
666{
667 void __iomem *regs = mvi->regs_ex;
668 iow32(SPI_DATA_REG_64XX, data);
669}
670
671
672int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
673 u32 *dwCmd,
674 u8 cmd,
675 u8 read,
676 u8 length,
677 u32 addr
678 )
679{
680 u32 dwTmp;
681
682 dwTmp = ((u32)cmd << 24) | ((u32)length << 19);
683 if (read)
684 dwTmp |= 1U<<23;
685
686 if (addr != MV_MAX_U32) {
687 dwTmp |= 1U<<22;
688 dwTmp |= (addr & 0x0003FFFF);
689 }
690
691 *dwCmd = dwTmp;
692 return 0;
693}
694
695
696int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
697{
698 void __iomem *regs = mvi->regs_ex;
699 int retry;
700
701 for (retry = 0; retry < 1; retry++) {
702 iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE);
703 iow32(SPI_CMD_REG_64XX, cmd);
704 iow32(SPI_CTRL_REG_64XX,
705 SPI_CTRL_VENDOR_ENABLE | SPI_CTRL_SPISTART);
706 }
707
708 return 0;
709}
710
711int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
712{
713 void __iomem *regs = mvi->regs_ex;
714 u32 i, dwTmp;
715
716 for (i = 0; i < timeout; i++) {
717 dwTmp = ior32(SPI_CTRL_REG_64XX);
718 if (!(dwTmp & SPI_CTRL_SPISTART))
719 return 0;
720 msleep(10);
721 }
722
723 return -1;
724}
725
726#ifndef DISABLE_HOTPLUG_DMA_FIX
727void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
728{
729 int i;
730 struct mvs_prd *buf_prd = prd;
731 buf_prd += from;
732 for (i = 0; i < MAX_SG_ENTRY - from; i++) {
733 buf_prd->addr = cpu_to_le64(buf_dma);
734 buf_prd->len = cpu_to_le32(buf_len);
735 ++buf_prd;
736 }
737}
738#endif
739
740const struct mvs_dispatch mvs_64xx_dispatch = {
741 "mv64xx",
742 mvs_64xx_init,
743 NULL,
744 mvs_64xx_ioremap,
745 mvs_64xx_iounmap,
746 mvs_64xx_isr,
747 mvs_64xx_isr_status,
748 mvs_64xx_interrupt_enable,
749 mvs_64xx_interrupt_disable,
750 mvs_read_phy_ctl,
751 mvs_write_phy_ctl,
752 mvs_read_port_cfg_data,
753 mvs_write_port_cfg_data,
754 mvs_write_port_cfg_addr,
755 mvs_read_port_vsr_data,
756 mvs_write_port_vsr_data,
757 mvs_write_port_vsr_addr,
758 mvs_read_port_irq_stat,
759 mvs_write_port_irq_stat,
760 mvs_read_port_irq_mask,
761 mvs_write_port_irq_mask,
762 mvs_get_sas_addr,
763 mvs_64xx_command_active,
764 mvs_64xx_issue_stop,
765 mvs_start_delivery,
766 mvs_rx_update,
767 mvs_int_full,
768 mvs_64xx_assign_reg_set,
769 mvs_64xx_free_reg_set,
770 mvs_get_prd_size,
771 mvs_get_prd_count,
772 mvs_64xx_make_prd,
773 mvs_64xx_detect_porttype,
774 mvs_64xx_oob_done,
775 mvs_64xx_fix_phy_info,
776 mvs_64xx_phy_work_around,
777 mvs_64xx_phy_set_link_rate,
778 mvs_hw_max_link_rate,
779 mvs_64xx_phy_disable,
780 mvs_64xx_phy_enable,
781 mvs_64xx_phy_reset,
782 mvs_64xx_stp_reset,
783 mvs_64xx_clear_active_cmds,
784 mvs_64xx_spi_read_data,
785 mvs_64xx_spi_write_data,
786 mvs_64xx_spi_buildcmd,
787 mvs_64xx_spi_issuecmd,
788 mvs_64xx_spi_waitdataready,
789#ifndef DISABLE_HOTPLUG_DMA_FIX
790 mvs_64xx_fix_dma,
791#endif
792};
793
diff --git a/drivers/scsi/mvsas/mv_64xx.h b/drivers/scsi/mvsas/mv_64xx.h
index c9f399ebc926..42e947d9795e 100644
--- a/drivers/scsi/mvsas/mv_64xx.h
+++ b/drivers/scsi/mvsas/mv_64xx.h
@@ -1,11 +1,43 @@
1/*
2 * Marvell 88SE64xx hardware specific head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
1#ifndef _MVS64XX_REG_H_ 25#ifndef _MVS64XX_REG_H_
2#define _MVS64XX_REG_H_ 26#define _MVS64XX_REG_H_
3 27
28#include <linux/types.h>
29
30#define MAX_LINK_RATE SAS_LINK_RATE_3_0_GBPS
31
4/* enhanced mode registers (BAR4) */ 32/* enhanced mode registers (BAR4) */
5enum hw_registers { 33enum hw_registers {
6 MVS_GBL_CTL = 0x04, /* global control */ 34 MVS_GBL_CTL = 0x04, /* global control */
7 MVS_GBL_INT_STAT = 0x08, /* global irq status */ 35 MVS_GBL_INT_STAT = 0x08, /* global irq status */
8 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ 36 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
37
38 MVS_PHY_CTL = 0x40, /* SOC PHY Control */
39 MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */
40
9 MVS_GBL_PORT_TYPE = 0xa0, /* port type */ 41 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
10 42
11 MVS_CTL = 0x100, /* SAS/SATA port configuration */ 43 MVS_CTL = 0x100, /* SAS/SATA port configuration */
@@ -30,17 +62,19 @@ enum hw_registers {
30 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ 62 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
31 MVS_INT_STAT = 0x150, /* Central int status */ 63 MVS_INT_STAT = 0x150, /* Central int status */
32 MVS_INT_MASK = 0x154, /* Central int enable */ 64 MVS_INT_MASK = 0x154, /* Central int enable */
33 MVS_INT_STAT_SRS = 0x158, /* SATA register set status */ 65 MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */
34 MVS_INT_MASK_SRS = 0x15C, 66 MVS_INT_MASK_SRS_0 = 0x15C,
35 67
36 /* ports 1-3 follow after this */ 68 /* ports 1-3 follow after this */
37 MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */ 69 MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
38 MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */ 70 MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
39 MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */ 71 /* ports 5-7 follow after this */
40 MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */ 72 MVS_P4_INT_STAT = 0x200, /* Port4 interrupt status */
73 MVS_P4_INT_MASK = 0x204, /* Port4 interrupt enable mask */
41 74
42 /* ports 1-3 follow after this */ 75 /* ports 1-3 follow after this */
43 MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */ 76 MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
77 /* ports 5-7 follow after this */
44 MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */ 78 MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
45 79
46 MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */ 80 MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
@@ -49,20 +83,23 @@ enum hw_registers {
49 /* ports 1-3 follow after this */ 83 /* ports 1-3 follow after this */
50 MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */ 84 MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
51 MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */ 85 MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
52 MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */ 86 /* ports 5-7 follow after this */
53 MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */ 87 MVS_P4_CFG_ADDR = 0x230, /* Port4 config address */
88 MVS_P4_CFG_DATA = 0x234, /* Port4 config data */
54 89
55 /* ports 1-3 follow after this */ 90 /* ports 1-3 follow after this */
56 MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */ 91 MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
57 MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */ 92 MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
58 MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */ 93 /* ports 5-7 follow after this */
59 MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */ 94 MVS_P4_VSR_ADDR = 0x250, /* port4 VSR addr */
95 MVS_P4_VSR_DATA = 0x254, /* port4 VSR data */
60}; 96};
61 97
62enum pci_cfg_registers { 98enum pci_cfg_registers {
63 PCR_PHY_CTL = 0x40, 99 PCR_PHY_CTL = 0x40,
64 PCR_PHY_CTL2 = 0x90, 100 PCR_PHY_CTL2 = 0x90,
65 PCR_DEV_CTRL = 0xE8, 101 PCR_DEV_CTRL = 0xE8,
102 PCR_LINK_STAT = 0xF2,
66}; 103};
67 104
68/* SAS/SATA Vendor Specific Port Registers */ 105/* SAS/SATA Vendor Specific Port Registers */
@@ -83,10 +120,32 @@ enum sas_sata_vsp_regs {
83 VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */ 120 VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
84}; 121};
85 122
123enum chip_register_bits {
124 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
125 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
126 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
127 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
128 (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
129};
130
131#define MAX_SG_ENTRY 64
132
86struct mvs_prd { 133struct mvs_prd {
87 __le64 addr; /* 64-bit buffer address */ 134 __le64 addr; /* 64-bit buffer address */
88 __le32 reserved; 135 __le32 reserved;
89 __le32 len; /* 16-bit length */ 136 __le32 len; /* 16-bit length */
90}; 137};
91 138
139#define SPI_CTRL_REG 0xc0
140#define SPI_CTRL_VENDOR_ENABLE (1U<<29)
141#define SPI_CTRL_SPIRDY (1U<<22)
142#define SPI_CTRL_SPISTART (1U<<20)
143
144#define SPI_CMD_REG 0xc4
145#define SPI_DATA_REG 0xc8
146
147#define SPI_CTRL_REG_64XX 0x10
148#define SPI_CMD_REG_64XX 0x14
149#define SPI_DATA_REG_64XX 0x18
150
92#endif 151#endif
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
new file mode 100644
index 000000000000..0940fae19d20
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -0,0 +1,672 @@
1/*
2 * Marvell 88SE94xx hardware specific
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#include "mv_sas.h"
26#include "mv_94xx.h"
27#include "mv_chips.h"
28
29static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
30{
31 u32 reg;
32 struct mvs_phy *phy = &mvi->phy[i];
33 u32 phy_status;
34
35 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3);
36 reg = mvs_read_port_vsr_data(mvi, i);
37 phy_status = ((reg & 0x3f0000) >> 16) & 0xff;
38 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
39 switch (phy_status) {
40 case 0x10:
41 phy->phy_type |= PORT_TYPE_SAS;
42 break;
43 case 0x1d:
44 default:
45 phy->phy_type |= PORT_TYPE_SATA;
46 break;
47 }
48}
49
50static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
51{
52 void __iomem *regs = mvi->regs;
53 u32 tmp;
54
55 tmp = mr32(MVS_PCS);
56 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
57 mw32(MVS_PCS, tmp);
58}
59
60static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
61{
62 u32 tmp;
63
64 tmp = mvs_read_port_irq_stat(mvi, phy_id);
65 tmp &= ~PHYEV_RDY_CH;
66 mvs_write_port_irq_stat(mvi, phy_id, tmp);
67 if (hard) {
68 tmp = mvs_read_phy_ctl(mvi, phy_id);
69 tmp |= PHY_RST_HARD;
70 mvs_write_phy_ctl(mvi, phy_id, tmp);
71 do {
72 tmp = mvs_read_phy_ctl(mvi, phy_id);
73 } while (tmp & PHY_RST_HARD);
74 } else {
75 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT);
76 tmp = mvs_read_port_vsr_data(mvi, phy_id);
77 tmp |= PHY_RST;
78 mvs_write_port_vsr_data(mvi, phy_id, tmp);
79 }
80}
81
82static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
83{
84 u32 tmp;
85 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
86 tmp = mvs_read_port_vsr_data(mvi, phy_id);
87 mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000);
88}
89
90static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
91{
92 mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4);
93 mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
94 mvs_write_port_vsr_addr(mvi, phy_id, 0x104);
95 mvs_write_port_vsr_data(mvi, phy_id, 0x00018080);
96 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
97 mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff);
98}
99
100static int __devinit mvs_94xx_init(struct mvs_info *mvi)
101{
102 void __iomem *regs = mvi->regs;
103 int i;
104 u32 tmp, cctl;
105
106 mvs_show_pcie_usage(mvi);
107 if (mvi->flags & MVF_FLAG_SOC) {
108 tmp = mr32(MVS_PHY_CTL);
109 tmp &= ~PCTL_PWR_OFF;
110 tmp |= PCTL_PHY_DSBL;
111 mw32(MVS_PHY_CTL, tmp);
112 }
113
114 /* Init Chip */
115 /* make sure RST is set; HBA_RST /should/ have done that for us */
116 cctl = mr32(MVS_CTL) & 0xFFFF;
117 if (cctl & CCTL_RST)
118 cctl &= ~CCTL_RST;
119 else
120 mw32_f(MVS_CTL, cctl | CCTL_RST);
121
122 if (mvi->flags & MVF_FLAG_SOC) {
123 tmp = mr32(MVS_PHY_CTL);
124 tmp &= ~PCTL_PWR_OFF;
125 tmp |= PCTL_COM_ON;
126 tmp &= ~PCTL_PHY_DSBL;
127 tmp |= PCTL_LINK_RST;
128 mw32(MVS_PHY_CTL, tmp);
129 msleep(100);
130 tmp &= ~PCTL_LINK_RST;
131 mw32(MVS_PHY_CTL, tmp);
132 msleep(100);
133 }
134
135 /* reset control */
136 mw32(MVS_PCS, 0); /* MVS_PCS */
137 mw32(MVS_STP_REG_SET_0, 0);
138 mw32(MVS_STP_REG_SET_1, 0);
139
140 /* init phys */
141 mvs_phy_hacks(mvi);
142
143 /* disable Multiplexing, enable phy implemented */
144 mw32(MVS_PORTS_IMP, 0xFF);
145
146
147 mw32(MVS_PA_VSR_ADDR, 0x00000104);
148 mw32(MVS_PA_VSR_PORT, 0x00018080);
149 mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8);
150 mw32(MVS_PA_VSR_PORT, 0x0084ffff);
151
152 /* set LED blink when IO*/
153 mw32(MVS_PA_VSR_ADDR, 0x00000030);
154 tmp = mr32(MVS_PA_VSR_PORT);
155 tmp &= 0xFFFF00FF;
156 tmp |= 0x00003300;
157 mw32(MVS_PA_VSR_PORT, tmp);
158
159 mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
160 mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
161
162 mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
163 mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
164
165 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
166 mw32(MVS_TX_LO, mvi->tx_dma);
167 mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
168
169 mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
170 mw32(MVS_RX_LO, mvi->rx_dma);
171 mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
172
173 for (i = 0; i < mvi->chip->n_phy; i++) {
174 mvs_94xx_phy_disable(mvi, i);
175 /* set phy local SAS address */
176 mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
177 (mvi->phy[i].dev_sas_addr));
178
179 mvs_94xx_enable_xmt(mvi, i);
180 mvs_94xx_phy_enable(mvi, i);
181
182 mvs_94xx_phy_reset(mvi, i, 1);
183 msleep(500);
184 mvs_94xx_detect_porttype(mvi, i);
185 }
186
187 if (mvi->flags & MVF_FLAG_SOC) {
188 /* set select registers */
189 writel(0x0E008000, regs + 0x000);
190 writel(0x59000008, regs + 0x004);
191 writel(0x20, regs + 0x008);
192 writel(0x20, regs + 0x00c);
193 writel(0x20, regs + 0x010);
194 writel(0x20, regs + 0x014);
195 writel(0x20, regs + 0x018);
196 writel(0x20, regs + 0x01c);
197 }
198 for (i = 0; i < mvi->chip->n_phy; i++) {
199 /* clear phy int status */
200 tmp = mvs_read_port_irq_stat(mvi, i);
201 tmp &= ~PHYEV_SIG_FIS;
202 mvs_write_port_irq_stat(mvi, i, tmp);
203
204 /* set phy int mask */
205 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH |
206 PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ;
207 mvs_write_port_irq_mask(mvi, i, tmp);
208
209 msleep(100);
210 mvs_update_phyinfo(mvi, i, 1);
211 }
212
213 /* FIXME: update wide port bitmaps */
214
215 /* little endian for open address and command table, etc. */
216 /*
217 * it seems that ( from the spec ) turning on big-endian won't
218 * do us any good on big-endian machines, need further confirmation
219 */
220 cctl = mr32(MVS_CTL);
221 cctl |= CCTL_ENDIAN_CMD;
222 cctl |= CCTL_ENDIAN_DATA;
223 cctl &= ~CCTL_ENDIAN_OPEN;
224 cctl |= CCTL_ENDIAN_RSP;
225 mw32_f(MVS_CTL, cctl);
226
227 /* reset CMD queue */
228 tmp = mr32(MVS_PCS);
229 tmp |= PCS_CMD_RST;
230 mw32(MVS_PCS, tmp);
231 /* interrupt coalescing may cause missing HW interrput in some case,
232 * and the max count is 0x1ff, while our max slot is 0x200,
233 * it will make count 0.
234 */
235 tmp = 0;
236 mw32(MVS_INT_COAL, tmp);
237
238 tmp = 0x100;
239 mw32(MVS_INT_COAL_TMOUT, tmp);
240
241 /* ladies and gentlemen, start your engines */
242 mw32(MVS_TX_CFG, 0);
243 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
244 mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
245 /* enable CMD/CMPL_Q/RESP mode */
246 mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN |
247 PCS_CMD_EN | PCS_CMD_STOP_ERR);
248
249 /* enable completion queue interrupt */
250 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
251 CINT_DMA_PCIE);
252 tmp |= CINT_PHY_MASK;
253 mw32(MVS_INT_MASK, tmp);
254
255 /* Enable SRS interrupt */
256 mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
257
258 return 0;
259}
260
261static int mvs_94xx_ioremap(struct mvs_info *mvi)
262{
263 if (!mvs_ioremap(mvi, 2, -1)) {
264 mvi->regs_ex = mvi->regs + 0x10200;
265 mvi->regs += 0x20000;
266 if (mvi->id == 1)
267 mvi->regs += 0x4000;
268 return 0;
269 }
270 return -1;
271}
272
273static void mvs_94xx_iounmap(struct mvs_info *mvi)
274{
275 if (mvi->regs) {
276 mvi->regs -= 0x20000;
277 if (mvi->id == 1)
278 mvi->regs -= 0x4000;
279 mvs_iounmap(mvi->regs);
280 }
281}
282
283static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)
284{
285 void __iomem *regs = mvi->regs_ex;
286 u32 tmp;
287
288 tmp = mr32(MVS_GBL_CTL);
289 tmp |= (IRQ_SAS_A | IRQ_SAS_B);
290 mw32(MVS_GBL_INT_STAT, tmp);
291 writel(tmp, regs + 0x0C);
292 writel(tmp, regs + 0x10);
293 writel(tmp, regs + 0x14);
294 writel(tmp, regs + 0x18);
295 mw32(MVS_GBL_CTL, tmp);
296}
297
298static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)
299{
300 void __iomem *regs = mvi->regs_ex;
301 u32 tmp;
302
303 tmp = mr32(MVS_GBL_CTL);
304
305 tmp &= ~(IRQ_SAS_A | IRQ_SAS_B);
306 mw32(MVS_GBL_INT_STAT, tmp);
307 writel(tmp, regs + 0x0C);
308 writel(tmp, regs + 0x10);
309 writel(tmp, regs + 0x14);
310 writel(tmp, regs + 0x18);
311 mw32(MVS_GBL_CTL, tmp);
312}
313
314static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)
315{
316 void __iomem *regs = mvi->regs_ex;
317 u32 stat = 0;
318 if (!(mvi->flags & MVF_FLAG_SOC)) {
319 stat = mr32(MVS_GBL_INT_STAT);
320
321 if (!(stat & (IRQ_SAS_A | IRQ_SAS_B)))
322 return 0;
323 }
324 return stat;
325}
326
327static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
328{
329 void __iomem *regs = mvi->regs;
330
331 if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
332 ((stat & IRQ_SAS_B) && mvi->id == 1)) {
333 mw32_f(MVS_INT_STAT, CINT_DONE);
334 #ifndef MVS_USE_TASKLET
335 spin_lock(&mvi->lock);
336 #endif
337 mvs_int_full(mvi);
338 #ifndef MVS_USE_TASKLET
339 spin_unlock(&mvi->lock);
340 #endif
341 }
342 return IRQ_HANDLED;
343}
344
345static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
346{
347 u32 tmp;
348 mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32));
349 do {
350 tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3));
351 } while (tmp & 1 << (slot_idx % 32));
352}
353
354static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
355 u32 tfs)
356{
357 void __iomem *regs = mvi->regs;
358 u32 tmp;
359
360 if (type == PORT_TYPE_SATA) {
361 tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
362 mw32(MVS_INT_STAT_SRS_0, tmp);
363 }
364 mw32(MVS_INT_STAT, CINT_CI_STOP);
365 tmp = mr32(MVS_PCS) | 0xFF00;
366 mw32(MVS_PCS, tmp);
367}
368
369static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
370{
371 void __iomem *regs = mvi->regs;
372 u32 tmp;
373 u8 reg_set = *tfs;
374
375 if (*tfs == MVS_ID_NOT_MAPPED)
376 return;
377
378 mvi->sata_reg_set &= ~bit(reg_set);
379 if (reg_set < 32) {
380 w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
381 tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set;
382 if (tmp)
383 mw32(MVS_INT_STAT_SRS_0, tmp);
384 } else {
385 w_reg_set_enable(reg_set, mvi->sata_reg_set);
386 tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set;
387 if (tmp)
388 mw32(MVS_INT_STAT_SRS_1, tmp);
389 }
390
391 *tfs = MVS_ID_NOT_MAPPED;
392
393 return;
394}
395
396static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
397{
398 int i;
399 void __iomem *regs = mvi->regs;
400
401 if (*tfs != MVS_ID_NOT_MAPPED)
402 return 0;
403
404 i = mv_ffc64(mvi->sata_reg_set);
405 if (i > 32) {
406 mvi->sata_reg_set |= bit(i);
407 w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
408 *tfs = i;
409 return 0;
410 } else if (i >= 0) {
411 mvi->sata_reg_set |= bit(i);
412 w_reg_set_enable(i, (u32)mvi->sata_reg_set);
413 *tfs = i;
414 return 0;
415 }
416 return MVS_ID_NOT_MAPPED;
417}
418
419static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
420{
421 int i;
422 struct scatterlist *sg;
423 struct mvs_prd *buf_prd = prd;
424 for_each_sg(scatter, sg, nr, i) {
425 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
426 buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
427 buf_prd++;
428 }
429}
430
431static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
432{
433 u32 phy_st;
434 phy_st = mvs_read_phy_ctl(mvi, i);
435 if (phy_st & PHY_READY_MASK) /* phy ready */
436 return 1;
437 return 0;
438}
439
440static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
441 struct sas_identify_frame *id)
442{
443 int i;
444 u32 id_frame[7];
445
446 for (i = 0; i < 7; i++) {
447 mvs_write_port_cfg_addr(mvi, port_id,
448 CONFIG_ID_FRAME0 + i * 4);
449 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
450 }
451 memcpy(id, id_frame, 28);
452}
453
454static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
455 struct sas_identify_frame *id)
456{
457 int i;
458 u32 id_frame[7];
459
460 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
461 for (i = 0; i < 7; i++) {
462 mvs_write_port_cfg_addr(mvi, port_id,
463 CONFIG_ATT_ID_FRAME0 + i * 4);
464 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
465 mv_dprintk("94xx phy %d atta frame %d %x.\n",
466 port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
467 }
468 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
469 memcpy(id, id_frame, 28);
470}
471
472static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id)
473{
474 u32 att_dev_info = 0;
475
476 att_dev_info |= id->dev_type;
477 if (id->stp_iport)
478 att_dev_info |= PORT_DEV_STP_INIT;
479 if (id->smp_iport)
480 att_dev_info |= PORT_DEV_SMP_INIT;
481 if (id->ssp_iport)
482 att_dev_info |= PORT_DEV_SSP_INIT;
483 if (id->stp_tport)
484 att_dev_info |= PORT_DEV_STP_TRGT;
485 if (id->smp_tport)
486 att_dev_info |= PORT_DEV_SMP_TRGT;
487 if (id->ssp_tport)
488 att_dev_info |= PORT_DEV_SSP_TRGT;
489
490 att_dev_info |= (u32)id->phy_id<<24;
491 return att_dev_info;
492}
493
494static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id)
495{
496 return mvs_94xx_make_dev_info(id);
497}
498
499static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
500 struct sas_identify_frame *id)
501{
502 struct mvs_phy *phy = &mvi->phy[i];
503 struct asd_sas_phy *sas_phy = &phy->sas_phy;
504 mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status);
505 sas_phy->linkrate =
506 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
507 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
508 sas_phy->linkrate += 0x8;
509 mv_dprintk("get link rate is %d\n", sas_phy->linkrate);
510 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
511 phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
512 mvs_94xx_get_dev_identify_frame(mvi, i, id);
513 phy->dev_info = mvs_94xx_make_dev_info(id);
514
515 if (phy->phy_type & PORT_TYPE_SAS) {
516 mvs_94xx_get_att_identify_frame(mvi, i, id);
517 phy->att_dev_info = mvs_94xx_make_att_info(id);
518 phy->att_dev_sas_addr = *(u64 *)id->sas_addr;
519 } else {
520 phy->att_dev_info = PORT_DEV_STP_TRGT | 1;
521 }
522
523}
524
525void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
526 struct sas_phy_linkrates *rates)
527{
528 /* TODO */
529}
530
531static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
532{
533 u32 tmp;
534 void __iomem *regs = mvi->regs;
535 tmp = mr32(MVS_STP_REG_SET_0);
536 mw32(MVS_STP_REG_SET_0, 0);
537 mw32(MVS_STP_REG_SET_0, tmp);
538 tmp = mr32(MVS_STP_REG_SET_1);
539 mw32(MVS_STP_REG_SET_1, 0);
540 mw32(MVS_STP_REG_SET_1, tmp);
541}
542
543
544u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
545{
546 void __iomem *regs = mvi->regs_ex - 0x10200;
547 return mr32(SPI_RD_DATA_REG_94XX);
548}
549
550void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
551{
552 void __iomem *regs = mvi->regs_ex - 0x10200;
553 mw32(SPI_RD_DATA_REG_94XX, data);
554}
555
556
557int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
558 u32 *dwCmd,
559 u8 cmd,
560 u8 read,
561 u8 length,
562 u32 addr
563 )
564{
565 void __iomem *regs = mvi->regs_ex - 0x10200;
566 u32 dwTmp;
567
568 dwTmp = ((u32)cmd << 8) | ((u32)length << 4);
569 if (read)
570 dwTmp |= SPI_CTRL_READ_94XX;
571
572 if (addr != MV_MAX_U32) {
573 mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL));
574 dwTmp |= SPI_ADDR_VLD_94XX;
575 }
576
577 *dwCmd = dwTmp;
578 return 0;
579}
580
581
582int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
583{
584 void __iomem *regs = mvi->regs_ex - 0x10200;
585 mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX);
586
587 return 0;
588}
589
590int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
591{
592 void __iomem *regs = mvi->regs_ex - 0x10200;
593 u32 i, dwTmp;
594
595 for (i = 0; i < timeout; i++) {
596 dwTmp = mr32(SPI_CTRL_REG_94XX);
597 if (!(dwTmp & SPI_CTRL_SpiStart_94XX))
598 return 0;
599 msleep(10);
600 }
601
602 return -1;
603}
604
605#ifndef DISABLE_HOTPLUG_DMA_FIX
606void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
607{
608 int i;
609 struct mvs_prd *buf_prd = prd;
610 buf_prd += from;
611 for (i = 0; i < MAX_SG_ENTRY - from; i++) {
612 buf_prd->addr = cpu_to_le64(buf_dma);
613 buf_prd->im_len.len = cpu_to_le32(buf_len);
614 ++buf_prd;
615 }
616}
617#endif
618
619const struct mvs_dispatch mvs_94xx_dispatch = {
620 "mv94xx",
621 mvs_94xx_init,
622 NULL,
623 mvs_94xx_ioremap,
624 mvs_94xx_iounmap,
625 mvs_94xx_isr,
626 mvs_94xx_isr_status,
627 mvs_94xx_interrupt_enable,
628 mvs_94xx_interrupt_disable,
629 mvs_read_phy_ctl,
630 mvs_write_phy_ctl,
631 mvs_read_port_cfg_data,
632 mvs_write_port_cfg_data,
633 mvs_write_port_cfg_addr,
634 mvs_read_port_vsr_data,
635 mvs_write_port_vsr_data,
636 mvs_write_port_vsr_addr,
637 mvs_read_port_irq_stat,
638 mvs_write_port_irq_stat,
639 mvs_read_port_irq_mask,
640 mvs_write_port_irq_mask,
641 mvs_get_sas_addr,
642 mvs_94xx_command_active,
643 mvs_94xx_issue_stop,
644 mvs_start_delivery,
645 mvs_rx_update,
646 mvs_int_full,
647 mvs_94xx_assign_reg_set,
648 mvs_94xx_free_reg_set,
649 mvs_get_prd_size,
650 mvs_get_prd_count,
651 mvs_94xx_make_prd,
652 mvs_94xx_detect_porttype,
653 mvs_94xx_oob_done,
654 mvs_94xx_fix_phy_info,
655 NULL,
656 mvs_94xx_phy_set_link_rate,
657 mvs_hw_max_link_rate,
658 mvs_94xx_phy_disable,
659 mvs_94xx_phy_enable,
660 mvs_94xx_phy_reset,
661 NULL,
662 mvs_94xx_clear_active_cmds,
663 mvs_94xx_spi_read_data,
664 mvs_94xx_spi_write_data,
665 mvs_94xx_spi_buildcmd,
666 mvs_94xx_spi_issuecmd,
667 mvs_94xx_spi_waitdataready,
668#ifndef DISABLE_HOTPLUG_DMA_FIX
669 mvs_94xx_fix_dma,
670#endif
671};
672
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
new file mode 100644
index 000000000000..23ed9b164669
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -0,0 +1,222 @@
1/*
2 * Marvell 88SE94xx hardware specific head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MVS94XX_REG_H_
26#define _MVS94XX_REG_H_
27
28#include <linux/types.h>
29
30#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS
31
32enum hw_registers {
33 MVS_GBL_CTL = 0x04, /* global control */
34 MVS_GBL_INT_STAT = 0x00, /* global irq status */
35 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
36
37 MVS_PHY_CTL = 0x40, /* SOC PHY Control */
38 MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */
39
40 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
41
42 MVS_CTL = 0x100, /* SAS/SATA port configuration */
43 MVS_PCS = 0x104, /* SAS/SATA port control/status */
44 MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
45 MVS_CMD_LIST_HI = 0x10C,
46 MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
47 MVS_RX_FIS_HI = 0x114,
48 MVS_STP_REG_SET_0 = 0x118, /* STP/SATA Register Set Enable */
49 MVS_STP_REG_SET_1 = 0x11C,
50 MVS_TX_CFG = 0x120, /* TX configuration */
51 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
52 MVS_TX_HI = 0x128,
53
54 MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
55 MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
56 MVS_RX_CFG = 0x134, /* RX configuration */
57 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
58 MVS_RX_HI = 0x13C,
59 MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
60
61 MVS_INT_COAL = 0x148, /* Int coalescing config */
62 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
63 MVS_INT_STAT = 0x150, /* Central int status */
64 MVS_INT_MASK = 0x154, /* Central int enable */
65 MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */
66 MVS_INT_MASK_SRS_0 = 0x15C,
67 MVS_INT_STAT_SRS_1 = 0x160,
68 MVS_INT_MASK_SRS_1 = 0x164,
69 MVS_NON_NCQ_ERR_0 = 0x168, /* SRS Non-specific NCQ Error */
70 MVS_NON_NCQ_ERR_1 = 0x16C,
71 MVS_CMD_ADDR = 0x170, /* Command register port (addr) */
72 MVS_CMD_DATA = 0x174, /* Command register port (data) */
73 MVS_MEM_PARITY_ERR = 0x178, /* Memory parity error */
74
75 /* ports 1-3 follow after this */
76 MVS_P0_INT_STAT = 0x180, /* port0 interrupt status */
77 MVS_P0_INT_MASK = 0x184, /* port0 interrupt mask */
78 /* ports 5-7 follow after this */
79 MVS_P4_INT_STAT = 0x1A0, /* Port4 interrupt status */
80 MVS_P4_INT_MASK = 0x1A4, /* Port4 interrupt enable mask */
81
82 /* ports 1-3 follow after this */
83 MVS_P0_SER_CTLSTAT = 0x1D0, /* port0 serial control/status */
84 /* ports 5-7 follow after this */
85 MVS_P4_SER_CTLSTAT = 0x1E0, /* port4 serial control/status */
86
87 /* ports 1-3 follow after this */
88 MVS_P0_CFG_ADDR = 0x200, /* port0 phy register address */
89 MVS_P0_CFG_DATA = 0x204, /* port0 phy register data */
90 /* ports 5-7 follow after this */
91 MVS_P4_CFG_ADDR = 0x220, /* Port4 config address */
92 MVS_P4_CFG_DATA = 0x224, /* Port4 config data */
93
94 /* phys 1-3 follow after this */
95 MVS_P0_VSR_ADDR = 0x250, /* phy0 VSR address */
96 MVS_P0_VSR_DATA = 0x254, /* phy0 VSR data */
97 /* phys 1-3 follow after this */
98 /* multiplexing */
99 MVS_P4_VSR_ADDR = 0x250, /* phy4 VSR address */
100 MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */
101 MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */
102 MVS_PA_VSR_PORT = 0x294, /* All port VSR data */
103};
104
105enum pci_cfg_registers {
106 PCR_PHY_CTL = 0x40,
107 PCR_PHY_CTL2 = 0x90,
108 PCR_DEV_CTRL = 0x78,
109 PCR_LINK_STAT = 0x82,
110};
111
112/* SAS/SATA Vendor Specific Port Registers */
113enum sas_sata_vsp_regs {
114 VSR_PHY_STAT = 0x00 * 4, /* Phy Status */
115 VSR_PHY_MODE1 = 0x01 * 4, /* phy tx */
116 VSR_PHY_MODE2 = 0x02 * 4, /* tx scc */
117 VSR_PHY_MODE3 = 0x03 * 4, /* pll */
118 VSR_PHY_MODE4 = 0x04 * 4, /* VCO */
119 VSR_PHY_MODE5 = 0x05 * 4, /* Rx */
120 VSR_PHY_MODE6 = 0x06 * 4, /* CDR */
121 VSR_PHY_MODE7 = 0x07 * 4, /* Impedance */
122 VSR_PHY_MODE8 = 0x08 * 4, /* Voltage */
123 VSR_PHY_MODE9 = 0x09 * 4, /* Test */
124 VSR_PHY_MODE10 = 0x0A * 4, /* Power */
125 VSR_PHY_MODE11 = 0x0B * 4, /* Phy Mode */
126 VSR_PHY_VS0 = 0x0C * 4, /* Vednor Specific 0 */
127 VSR_PHY_VS1 = 0x0D * 4, /* Vednor Specific 1 */
128};
129
130enum chip_register_bits {
131 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
132 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
133 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12),
134 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
135 (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
136};
137
138enum pci_interrupt_cause {
139 /* MAIN_IRQ_CAUSE (R10200) Bits*/
140 IRQ_COM_IN_I2O_IOP0 = (1 << 0),
141 IRQ_COM_IN_I2O_IOP1 = (1 << 1),
142 IRQ_COM_IN_I2O_IOP2 = (1 << 2),
143 IRQ_COM_IN_I2O_IOP3 = (1 << 3),
144 IRQ_COM_OUT_I2O_HOS0 = (1 << 4),
145 IRQ_COM_OUT_I2O_HOS1 = (1 << 5),
146 IRQ_COM_OUT_I2O_HOS2 = (1 << 6),
147 IRQ_COM_OUT_I2O_HOS3 = (1 << 7),
148 IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8),
149 IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9),
150 IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10),
151 IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11),
152 IRQ_PCIF_DRBL0 = (1 << 12),
153 IRQ_PCIF_DRBL1 = (1 << 13),
154 IRQ_PCIF_DRBL2 = (1 << 14),
155 IRQ_PCIF_DRBL3 = (1 << 15),
156 IRQ_XOR_A = (1 << 16),
157 IRQ_XOR_B = (1 << 17),
158 IRQ_SAS_A = (1 << 18),
159 IRQ_SAS_B = (1 << 19),
160 IRQ_CPU_CNTRL = (1 << 20),
161 IRQ_GPIO = (1 << 21),
162 IRQ_UART = (1 << 22),
163 IRQ_SPI = (1 << 23),
164 IRQ_I2C = (1 << 24),
165 IRQ_SGPIO = (1 << 25),
166 IRQ_COM_ERR = (1 << 29),
167 IRQ_I2O_ERR = (1 << 30),
168 IRQ_PCIE_ERR = (1 << 31),
169};
170
171#define MAX_SG_ENTRY 255
172
173struct mvs_prd_imt {
174 __le32 len:22;
175 u8 _r_a:2;
176 u8 misc_ctl:4;
177 u8 inter_sel:4;
178};
179
180struct mvs_prd {
181 /* 64-bit buffer address */
182 __le64 addr;
183 /* 22-bit length */
184 struct mvs_prd_imt im_len;
185} __attribute__ ((packed));
186
187#define SPI_CTRL_REG_94XX 0xc800
188#define SPI_ADDR_REG_94XX 0xc804
189#define SPI_WR_DATA_REG_94XX 0xc808
190#define SPI_RD_DATA_REG_94XX 0xc80c
191#define SPI_CTRL_READ_94XX (1U << 2)
192#define SPI_ADDR_VLD_94XX (1U << 1)
193#define SPI_CTRL_SpiStart_94XX (1U << 0)
194
195#define mv_ffc(x) ffz(x)
196
197static inline int
198mv_ffc64(u64 v)
199{
200 int i;
201 i = mv_ffc((u32)v);
202 if (i >= 0)
203 return i;
204 i = mv_ffc((u32)(v>>32));
205
206 if (i != 0)
207 return 32 + i;
208
209 return -1;
210}
211
212#define r_reg_set_enable(i) \
213 (((i) > 31) ? mr32(MVS_STP_REG_SET_1) : \
214 mr32(MVS_STP_REG_SET_0))
215
216#define w_reg_set_enable(i, tmp) \
217 (((i) > 31) ? mw32(MVS_STP_REG_SET_1, tmp) : \
218 mw32(MVS_STP_REG_SET_0, tmp))
219
220extern const struct mvs_dispatch mvs_94xx_dispatch;
221#endif
222
diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h
index cf74b7a3f643..a67e1c4172f9 100644
--- a/drivers/scsi/mvsas/mv_chips.h
+++ b/drivers/scsi/mvsas/mv_chips.h
@@ -1,46 +1,81 @@
1/*
2 * Marvell 88SE64xx/88SE94xx register IO interface
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25
1#ifndef _MV_CHIPS_H_ 26#ifndef _MV_CHIPS_H_
2#define _MV_CHIPS_H_ 27#define _MV_CHIPS_H_
3 28
4#define mr32(reg) readl(regs + MVS_##reg) 29#define mr32(reg) readl(regs + reg)
5#define mw32(reg,val) writel((val), regs + MVS_##reg) 30#define mw32(reg, val) writel((val), regs + reg)
6#define mw32_f(reg,val) do { \ 31#define mw32_f(reg, val) do { \
7 writel((val), regs + MVS_##reg); \ 32 mw32(reg, val); \
8 readl(regs + MVS_##reg); \ 33 mr32(reg); \
9 } while (0) 34 } while (0)
10 35
11static inline u32 mvs_cr32(void __iomem *regs, u32 addr) 36#define iow32(reg, val) outl(val, (unsigned long)(regs + reg))
37#define ior32(reg) inl((unsigned long)(regs + reg))
38#define iow16(reg, val) outw((unsigned long)(val, regs + reg))
39#define ior16(reg) inw((unsigned long)(regs + reg))
40#define iow8(reg, val) outb((unsigned long)(val, regs + reg))
41#define ior8(reg) inb((unsigned long)(regs + reg))
42
43static inline u32 mvs_cr32(struct mvs_info *mvi, u32 addr)
12{ 44{
13 mw32(CMD_ADDR, addr); 45 void __iomem *regs = mvi->regs;
14 return mr32(CMD_DATA); 46 mw32(MVS_CMD_ADDR, addr);
47 return mr32(MVS_CMD_DATA);
15} 48}
16 49
17static inline void mvs_cw32(void __iomem *regs, u32 addr, u32 val) 50static inline void mvs_cw32(struct mvs_info *mvi, u32 addr, u32 val)
18{ 51{
19 mw32(CMD_ADDR, addr); 52 void __iomem *regs = mvi->regs;
20 mw32(CMD_DATA, val); 53 mw32(MVS_CMD_ADDR, addr);
54 mw32(MVS_CMD_DATA, val);
21} 55}
22 56
23static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port) 57static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
24{ 58{
25 void __iomem *regs = mvi->regs; 59 void __iomem *regs = mvi->regs;
26 return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4): 60 return (port < 4) ? mr32(MVS_P0_SER_CTLSTAT + port * 4) :
27 mr32(P4_SER_CTLSTAT + (port - 4) * 4); 61 mr32(MVS_P4_SER_CTLSTAT + (port - 4) * 4);
28} 62}
29 63
30static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val) 64static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
31{ 65{
32 void __iomem *regs = mvi->regs; 66 void __iomem *regs = mvi->regs;
33 if (port < 4) 67 if (port < 4)
34 mw32(P0_SER_CTLSTAT + port * 4, val); 68 mw32(MVS_P0_SER_CTLSTAT + port * 4, val);
35 else 69 else
36 mw32(P4_SER_CTLSTAT + (port - 4) * 4, val); 70 mw32(MVS_P4_SER_CTLSTAT + (port - 4) * 4, val);
37} 71}
38 72
39static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port) 73static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off,
74 u32 off2, u32 port)
40{ 75{
41 void __iomem *regs = mvi->regs + off; 76 void __iomem *regs = mvi->regs + off;
42 void __iomem *regs2 = mvi->regs + off2; 77 void __iomem *regs2 = mvi->regs + off2;
43 return (port < 4)?readl(regs + port * 8): 78 return (port < 4) ? readl(regs + port * 8) :
44 readl(regs2 + (port - 4) * 8); 79 readl(regs2 + (port - 4) * 8);
45} 80}
46 81
@@ -61,16 +96,19 @@ static inline u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
61 MVS_P4_CFG_DATA, port); 96 MVS_P4_CFG_DATA, port);
62} 97}
63 98
64static inline void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val) 99static inline void mvs_write_port_cfg_data(struct mvs_info *mvi,
100 u32 port, u32 val)
65{ 101{
66 mvs_write_port(mvi, MVS_P0_CFG_DATA, 102 mvs_write_port(mvi, MVS_P0_CFG_DATA,
67 MVS_P4_CFG_DATA, port, val); 103 MVS_P4_CFG_DATA, port, val);
68} 104}
69 105
70static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr) 106static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi,
107 u32 port, u32 addr)
71{ 108{
72 mvs_write_port(mvi, MVS_P0_CFG_ADDR, 109 mvs_write_port(mvi, MVS_P0_CFG_ADDR,
73 MVS_P4_CFG_ADDR, port, addr); 110 MVS_P4_CFG_ADDR, port, addr);
111 mdelay(10);
74} 112}
75 113
76static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port) 114static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
@@ -79,16 +117,19 @@ static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
79 MVS_P4_VSR_DATA, port); 117 MVS_P4_VSR_DATA, port);
80} 118}
81 119
82static inline void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val) 120static inline void mvs_write_port_vsr_data(struct mvs_info *mvi,
121 u32 port, u32 val)
83{ 122{
84 mvs_write_port(mvi, MVS_P0_VSR_DATA, 123 mvs_write_port(mvi, MVS_P0_VSR_DATA,
85 MVS_P4_VSR_DATA, port, val); 124 MVS_P4_VSR_DATA, port, val);
86} 125}
87 126
88static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr) 127static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi,
128 u32 port, u32 addr)
89{ 129{
90 mvs_write_port(mvi, MVS_P0_VSR_ADDR, 130 mvs_write_port(mvi, MVS_P0_VSR_ADDR,
91 MVS_P4_VSR_ADDR, port, addr); 131 MVS_P4_VSR_ADDR, port, addr);
132 mdelay(10);
92} 133}
93 134
94static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port) 135static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
@@ -97,7 +138,8 @@ static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
97 MVS_P4_INT_STAT, port); 138 MVS_P4_INT_STAT, port);
98} 139}
99 140
100static inline void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val) 141static inline void mvs_write_port_irq_stat(struct mvs_info *mvi,
142 u32 port, u32 val)
101{ 143{
102 mvs_write_port(mvi, MVS_P0_INT_STAT, 144 mvs_write_port(mvi, MVS_P0_INT_STAT,
103 MVS_P4_INT_STAT, port, val); 145 MVS_P4_INT_STAT, port, val);
@@ -107,12 +149,132 @@ static inline u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
107{ 149{
108 return mvs_read_port(mvi, MVS_P0_INT_MASK, 150 return mvs_read_port(mvi, MVS_P0_INT_MASK,
109 MVS_P4_INT_MASK, port); 151 MVS_P4_INT_MASK, port);
152
110} 153}
111 154
112static inline void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val) 155static inline void mvs_write_port_irq_mask(struct mvs_info *mvi,
156 u32 port, u32 val)
113{ 157{
114 mvs_write_port(mvi, MVS_P0_INT_MASK, 158 mvs_write_port(mvi, MVS_P0_INT_MASK,
115 MVS_P4_INT_MASK, port, val); 159 MVS_P4_INT_MASK, port, val);
116} 160}
117 161
118#endif 162static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
163{
164 u32 tmp;
165
166 /* workaround for SATA R-ERR, to ignore phy glitch */
167 tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
168 tmp &= ~(1 << 9);
169 tmp |= (1 << 10);
170 mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
171
172 /* enable retry 127 times */
173 mvs_cw32(mvi, CMD_SAS_CTL1, 0x7f7f);
174
175 /* extend open frame timeout to max */
176 tmp = mvs_cr32(mvi, CMD_SAS_CTL0);
177 tmp &= ~0xffff;
178 tmp |= 0x3fff;
179 mvs_cw32(mvi, CMD_SAS_CTL0, tmp);
180
181 /* workaround for WDTIMEOUT , set to 550 ms */
182 mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000);
183
184 /* not to halt for different port op during wideport link change */
185 mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d);
186
187 /* workaround for Seagate disk not-found OOB sequence, recv
188 * COMINIT before sending out COMWAKE */
189 tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
190 tmp &= 0x0000ffff;
191 tmp |= 0x00fa0000;
192 mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
193
194 tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
195 tmp &= 0x1fffffff;
196 tmp |= (2U << 29); /* 8 ms retry */
197 mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
198}
199
200static inline void mvs_int_sata(struct mvs_info *mvi)
201{
202 u32 tmp;
203 void __iomem *regs = mvi->regs;
204 tmp = mr32(MVS_INT_STAT_SRS_0);
205 if (tmp)
206 mw32(MVS_INT_STAT_SRS_0, tmp);
207 MVS_CHIP_DISP->clear_active_cmds(mvi);
208}
209
210static inline void mvs_int_full(struct mvs_info *mvi)
211{
212 void __iomem *regs = mvi->regs;
213 u32 tmp, stat;
214 int i;
215
216 stat = mr32(MVS_INT_STAT);
217 mvs_int_rx(mvi, false);
218
219 for (i = 0; i < mvi->chip->n_phy; i++) {
220 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
221 if (tmp)
222 mvs_int_port(mvi, i, tmp);
223 }
224
225 if (stat & CINT_SRS)
226 mvs_int_sata(mvi);
227
228 mw32(MVS_INT_STAT, stat);
229}
230
231static inline void mvs_start_delivery(struct mvs_info *mvi, u32 tx)
232{
233 void __iomem *regs = mvi->regs;
234 mw32(MVS_TX_PROD_IDX, tx);
235}
236
237static inline u32 mvs_rx_update(struct mvs_info *mvi)
238{
239 void __iomem *regs = mvi->regs;
240 return mr32(MVS_RX_CONS_IDX);
241}
242
243static inline u32 mvs_get_prd_size(void)
244{
245 return sizeof(struct mvs_prd);
246}
247
248static inline u32 mvs_get_prd_count(void)
249{
250 return MAX_SG_ENTRY;
251}
252
253static inline void mvs_show_pcie_usage(struct mvs_info *mvi)
254{
255 u16 link_stat, link_spd;
256 const char *spd[] = {
257 "UnKnown",
258 "2.5",
259 "5.0",
260 };
261 if (mvi->flags & MVF_FLAG_SOC || mvi->id > 0)
262 return;
263
264 pci_read_config_word(mvi->pdev, PCR_LINK_STAT, &link_stat);
265 link_spd = (link_stat & PLS_LINK_SPD) >> PLS_LINK_SPD_OFFS;
266 if (link_spd >= 3)
267 link_spd = 0;
268 dev_printk(KERN_INFO, mvi->dev,
269 "mvsas: PCI-E x%u, Bandwidth Usage: %s Gbps\n",
270 (link_stat & PLS_NEG_LINK_WD) >> PLS_NEG_LINK_WD_OFFS,
271 spd[link_spd]);
272}
273
274static inline u32 mvs_hw_max_link_rate(void)
275{
276 return MAX_LINK_RATE;
277}
278
279#endif /* _MV_CHIPS_H_ */
280
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
index d8e96a3e5a21..f8cb9defb961 100644
--- a/drivers/scsi/mvsas/mv_defs.h
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -1,53 +1,66 @@
1/* 1/*
2 mv_defs.h - Marvell 88SE6440 SAS/SATA support 2 * Marvell 88SE64xx/88SE94xx const head file
3 3 *
4 Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 6 *
7 This program is free software; you can redistribute it and/or 7 * This file is licensed under GPLv2.
8 modify it under the terms of the GNU General Public License as 8 *
9 published by the Free Software Foundation; either version 2, 9 * This program is free software; you can redistribute it and/or
10 or (at your option) any later version. 10 * modify it under the terms of the GNU General Public License as
11 11 * published by the Free Software Foundation; version 2 of the
12 This program is distributed in the hope that it will be useful, 12 * License.
13 but WITHOUT ANY WARRANTY; without even the implied warranty 13 *
14 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 14 * This program is distributed in the hope that it will be useful,
15 See the GNU General Public License for more details. 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 You should have received a copy of the GNU General Public 17 * General Public License for more details.
18 License along with this program; see the file COPYING. If not, 18 *
19 write to the Free Software Foundation, 675 Mass Ave, Cambridge, 19 * You should have received a copy of the GNU General Public License
20 MA 02139, USA. 20 * along with this program; if not, write to the Free Software
21 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 */ 22 * USA
23*/
23 24
24#ifndef _MV_DEFS_H_ 25#ifndef _MV_DEFS_H_
25#define _MV_DEFS_H_ 26#define _MV_DEFS_H_
26 27
28
29enum chip_flavors {
30 chip_6320,
31 chip_6440,
32 chip_6485,
33 chip_9480,
34 chip_9180,
35};
36
27/* driver compile-time configuration */ 37/* driver compile-time configuration */
28enum driver_configuration { 38enum driver_configuration {
39 MVS_SLOTS = 512, /* command slots */
29 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ 40 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
30 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */ 41 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
31 /* software requires power-of-2 42 /* software requires power-of-2
32 ring size */ 43 ring size */
44 MVS_SOC_SLOTS = 64,
45 MVS_SOC_TX_RING_SZ = MVS_SOC_SLOTS * 2,
46 MVS_SOC_RX_RING_SZ = MVS_SOC_SLOTS * 2,
33 47
34 MVS_SLOTS = 512, /* command slots */
35 MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */ 48 MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
36 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ 49 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
37 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ 50 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
38 MVS_OAF_SZ = 64, /* Open address frame buffer size */ 51 MVS_OAF_SZ = 64, /* Open address frame buffer size */
39 52 MVS_QUEUE_SIZE = 32, /* Support Queue depth */
40 MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */ 53 MVS_CAN_QUEUE = MVS_SLOTS - 2, /* SCSI Queue depth */
41 54 MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2,
42 MVS_QUEUE_SIZE = 30, /* Support Queue depth */
43 MVS_CAN_QUEUE = MVS_SLOTS - 1, /* SCSI Queue depth */
44}; 55};
45 56
46/* unchangeable hardware details */ 57/* unchangeable hardware details */
47enum hardware_details { 58enum hardware_details {
48 MVS_MAX_PHYS = 8, /* max. possible phys */ 59 MVS_MAX_PHYS = 8, /* max. possible phys */
49 MVS_MAX_PORTS = 8, /* max. possible ports */ 60 MVS_MAX_PORTS = 8, /* max. possible ports */
50 MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100), 61 MVS_SOC_PHYS = 4, /* soc phys */
62 MVS_SOC_PORTS = 4, /* soc phys */
63 MVS_MAX_DEVICES = 1024, /* max supported device */
51}; 64};
52 65
53/* peripheral registers (BAR2) */ 66/* peripheral registers (BAR2) */
@@ -133,6 +146,8 @@ enum hw_register_bits {
133 CINT_PORT = (1U << 8), /* port0 event */ 146 CINT_PORT = (1U << 8), /* port0 event */
134 CINT_PORT_MASK_OFFSET = 8, 147 CINT_PORT_MASK_OFFSET = 8,
135 CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET), 148 CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
149 CINT_PHY_MASK_OFFSET = 4,
150 CINT_PHY_MASK = (0x0F << CINT_PHY_MASK_OFFSET),
136 151
137 /* TX (delivery) ring bits */ 152 /* TX (delivery) ring bits */
138 TXQ_CMD_SHIFT = 29, 153 TXQ_CMD_SHIFT = 29,
@@ -142,7 +157,11 @@ enum hw_register_bits {
142 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */ 157 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
143 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */ 158 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
144 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */ 159 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
160 TXQ_MODE_TARGET = 0,
161 TXQ_MODE_INITIATOR = 1,
145 TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */ 162 TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
163 TXQ_PRI_NORMAL = 0,
164 TXQ_PRI_HIGH = 1,
146 TXQ_SRS_SHIFT = 20, /* SATA register set */ 165 TXQ_SRS_SHIFT = 20, /* SATA register set */
147 TXQ_SRS_MASK = 0x7f, 166 TXQ_SRS_MASK = 0x7f,
148 TXQ_PHY_SHIFT = 12, /* PHY bitmap */ 167 TXQ_PHY_SHIFT = 12, /* PHY bitmap */
@@ -175,6 +194,8 @@ enum hw_register_bits {
175 MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */ 194 MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
176 MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */ 195 MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
177 196
197 MCH_SSP_MODE_PASSTHRU = 1,
198 MCH_SSP_MODE_NORMAL = 0,
178 MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */ 199 MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
179 MCH_FBURST = (1U << 11), /* first burst (SSP) */ 200 MCH_FBURST = (1U << 11), /* first burst (SSP) */
180 MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */ 201 MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
@@ -199,15 +220,12 @@ enum hw_register_bits {
199 PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */ 220 PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
200 PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */ 221 PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
201 PHY_RST = (1U << 0), /* phy reset */ 222 PHY_RST = (1U << 0), /* phy reset */
202 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
203 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
204 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
205 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
206 (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
207 PHY_READY_MASK = (1U << 20), 223 PHY_READY_MASK = (1U << 20),
208 224
209 /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */ 225 /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
210 PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */ 226 PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
227 PHYEV_DCDR_ERR = (1U << 23), /* STP Deocder Error */
228 PHYEV_CRC_ERR = (1U << 22), /* STP CRC Error */
211 PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */ 229 PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
212 PHYEV_AN = (1U << 18), /* SATA async notification */ 230 PHYEV_AN = (1U << 18), /* SATA async notification */
213 PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */ 231 PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
@@ -229,9 +247,10 @@ enum hw_register_bits {
229 /* MVS_PCS */ 247 /* MVS_PCS */
230 PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */ 248 PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
231 PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */ 249 PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
232 PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */ 250 PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6485 */
233 PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */ 251 PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
234 PCS_RSP_RX_EN = (1U << 7), /* raw response rx */ 252 PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
253 PCS_SATA_RETRY_2 = (1U << 6), /* For 9180 */
235 PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */ 254 PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
236 PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */ 255 PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
237 PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */ 256 PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
@@ -246,6 +265,8 @@ enum hw_register_bits {
246 PORT_DEV_SMP_INIT = (1U << 10), 265 PORT_DEV_SMP_INIT = (1U << 10),
247 PORT_DEV_STP_INIT = (1U << 9), 266 PORT_DEV_STP_INIT = (1U << 9),
248 PORT_PHY_ID_MASK = (0xFFU << 24), 267 PORT_PHY_ID_MASK = (0xFFU << 24),
268 PORT_SSP_TRGT_MASK = (0x1U << 19),
269 PORT_SSP_INIT_MASK = (0x1U << 11),
249 PORT_DEV_TRGT_MASK = (0x7U << 17), 270 PORT_DEV_TRGT_MASK = (0x7U << 17),
250 PORT_DEV_INIT_MASK = (0x7U << 9), 271 PORT_DEV_INIT_MASK = (0x7U << 9),
251 PORT_DEV_TYPE_MASK = (0x7U << 0), 272 PORT_DEV_TYPE_MASK = (0x7U << 0),
@@ -283,21 +304,30 @@ enum sas_sata_config_port_regs {
283 PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */ 304 PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
284 PHYR_SATA_CTL = 0x18, /* SATA control */ 305 PHYR_SATA_CTL = 0x18, /* SATA control */
285 PHYR_PHY_STAT = 0x1C, /* PHY status */ 306 PHYR_PHY_STAT = 0x1C, /* PHY status */
286 PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */ 307 PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
287 PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */ 308 PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
288 PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */ 309 PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
289 PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */ 310 PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
290 PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */ 311 PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
291 PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */ 312 PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
292 PHYR_WIDE_PORT = 0x38, /* wide port participating */ 313 PHYR_WIDE_PORT = 0x38, /* wide port participating */
293 PHYR_CURRENT0 = 0x80, /* current connection info 0 */ 314 PHYR_CURRENT0 = 0x80, /* current connection info 0 */
294 PHYR_CURRENT1 = 0x84, /* current connection info 1 */ 315 PHYR_CURRENT1 = 0x84, /* current connection info 1 */
295 PHYR_CURRENT2 = 0x88, /* current connection info 2 */ 316 PHYR_CURRENT2 = 0x88, /* current connection info 2 */
296}; 317 CONFIG_ID_FRAME0 = 0x100, /* Port device ID frame register 0 */
297 318 CONFIG_ID_FRAME1 = 0x104, /* Port device ID frame register 1 */
298enum mvs_info_flags { 319 CONFIG_ID_FRAME2 = 0x108, /* Port device ID frame register 2 */
299 MVF_MSI = (1U << 0), /* MSI is enabled */ 320 CONFIG_ID_FRAME3 = 0x10c, /* Port device ID frame register 3 */
300 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ 321 CONFIG_ID_FRAME4 = 0x110, /* Port device ID frame register 4 */
322 CONFIG_ID_FRAME5 = 0x114, /* Port device ID frame register 5 */
323 CONFIG_ID_FRAME6 = 0x118, /* Port device ID frame register 6 */
324 CONFIG_ATT_ID_FRAME0 = 0x11c, /* attached ID frame register 0 */
325 CONFIG_ATT_ID_FRAME1 = 0x120, /* attached ID frame register 1 */
326 CONFIG_ATT_ID_FRAME2 = 0x124, /* attached ID frame register 2 */
327 CONFIG_ATT_ID_FRAME3 = 0x128, /* attached ID frame register 3 */
328 CONFIG_ATT_ID_FRAME4 = 0x12c, /* attached ID frame register 4 */
329 CONFIG_ATT_ID_FRAME5 = 0x130, /* attached ID frame register 5 */
330 CONFIG_ATT_ID_FRAME6 = 0x134, /* attached ID frame register 6 */
301}; 331};
302 332
303enum sas_cmd_port_registers { 333enum sas_cmd_port_registers {
@@ -305,11 +335,11 @@ enum sas_cmd_port_registers {
305 CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */ 335 CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
306 CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */ 336 CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
307 CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */ 337 CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
308 CMD_OOB_SPACE = 0x110, /* OOB space control register */ 338 CMD_OOB_SPACE = 0x110, /* OOB space control register */
309 CMD_OOB_BURST = 0x114, /* OOB burst control register */ 339 CMD_OOB_BURST = 0x114, /* OOB burst control register */
310 CMD_PHY_TIMER = 0x118, /* PHY timer control register */ 340 CMD_PHY_TIMER = 0x118, /* PHY timer control register */
311 CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */ 341 CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
312 CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */ 342 CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
313 CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */ 343 CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
314 CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */ 344 CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
315 CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */ 345 CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
@@ -318,9 +348,9 @@ enum sas_cmd_port_registers {
318 CMD_PL_TIMER = 0x138, /* PL timer register */ 348 CMD_PL_TIMER = 0x138, /* PL timer register */
319 CMD_WD_TIMER = 0x13c, /* WD timer register */ 349 CMD_WD_TIMER = 0x13c, /* WD timer register */
320 CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */ 350 CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
321 CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */ 351 CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
322 CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */ 352 CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
323 CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */ 353 CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
324 CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */ 354 CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
325 CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */ 355 CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
326 CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */ 356 CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
@@ -353,27 +383,25 @@ enum sas_cmd_port_registers {
353 CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */ 383 CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
354}; 384};
355 385
356enum pci_cfg_register_bits { 386enum mvs_info_flags {
357 PCTL_PWR_ON = (0xFU << 24), 387 MVF_MSI = (1U << 0), /* MSI is enabled */
358 PCTL_OFF = (0xFU << 12), 388 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
359 PRD_REQ_SIZE = (0x4000), 389 MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */
360 PRD_REQ_MASK = (0x00007000),
361}; 390};
362 391
363enum nvram_layout_offsets { 392enum mvs_event_flags {
364 NVR_SIG = 0x00, /* 0xAA, 0x55 */ 393 PHY_PLUG_EVENT = (3U),
365 NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */ 394 PHY_PLUG_IN = (1U << 0), /* phy plug in */
395 PHY_PLUG_OUT = (1U << 1), /* phy plug out */
366}; 396};
367 397
368enum chip_flavors { 398enum mvs_port_type {
369 chip_6320, 399 PORT_TGT_MASK = (1U << 5),
370 chip_6440, 400 PORT_INIT_PORT = (1U << 4),
371 chip_6480, 401 PORT_TGT_PORT = (1U << 3),
372}; 402 PORT_INIT_TGT_PORT = (PORT_INIT_PORT | PORT_TGT_PORT),
373 403 PORT_TYPE_SAS = (1U << 1),
374enum port_type { 404 PORT_TYPE_SATA = (1U << 0),
375 PORT_TYPE_SAS = (1L << 1),
376 PORT_TYPE_SATA = (1L << 0),
377}; 405};
378 406
379/* Command Table Format */ 407/* Command Table Format */
@@ -438,4 +466,37 @@ enum error_info_rec_2 {
438 USR_BLK_NM = (1U << 0), /* User Block Number */ 466 USR_BLK_NM = (1U << 0), /* User Block Number */
439}; 467};
440 468
469enum pci_cfg_register_bits {
470 PCTL_PWR_OFF = (0xFU << 24),
471 PCTL_COM_ON = (0xFU << 20),
472 PCTL_LINK_RST = (0xFU << 16),
473 PCTL_LINK_OFFS = (16),
474 PCTL_PHY_DSBL = (0xFU << 12),
475 PCTL_PHY_DSBL_OFFS = (12),
476 PRD_REQ_SIZE = (0x4000),
477 PRD_REQ_MASK = (0x00007000),
478 PLS_NEG_LINK_WD = (0x3FU << 4),
479 PLS_NEG_LINK_WD_OFFS = 4,
480 PLS_LINK_SPD = (0x0FU << 0),
481 PLS_LINK_SPD_OFFS = 0,
482};
483
484enum open_frame_protocol {
485 PROTOCOL_SMP = 0x0,
486 PROTOCOL_SSP = 0x1,
487 PROTOCOL_STP = 0x2,
488};
489
490/* define for response frame datapres field */
491enum datapres_field {
492 NO_DATA = 0,
493 RESPONSE_DATA = 1,
494 SENSE_DATA = 2,
495};
496
497/* define task management IU */
498struct mvs_tmf_task{
499 u8 tmf;
500 u16 tag_of_task_to_be_managed;
501};
441#endif 502#endif
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 258a1a923290..8646a19f999d 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -1,38 +1,41 @@
1/* 1/*
2 mv_init.c - Marvell 88SE6440 SAS/SATA init support 2 * Marvell 88SE64xx/88SE94xx pci init
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
3 24
4 Copyright 2007 Red Hat, Inc.
5 Copyright 2008 Marvell. <kewei@marvell.com>
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2,
10 or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty
14 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 See the GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public
18 License along with this program; see the file COPYING. If not,
19 write to the Free Software Foundation, 675 Mass Ave, Cambridge,
20 MA 02139, USA.
21
22 */
23 25
24#include "mv_sas.h" 26#include "mv_sas.h"
25#include "mv_64xx.h"
26#include "mv_chips.h"
27 27
28static struct scsi_transport_template *mvs_stt; 28static struct scsi_transport_template *mvs_stt;
29
30static const struct mvs_chip_info mvs_chips[] = { 29static const struct mvs_chip_info mvs_chips[] = {
31 [chip_6320] = { 2, 16, 9 }, 30 [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
32 [chip_6440] = { 4, 16, 9 }, 31 [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
33 [chip_6480] = { 8, 32, 10 }, 32 [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, },
33 [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
34 [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
34}; 35};
35 36
37#define SOC_SAS_NUM 2
38
36static struct scsi_host_template mvs_sht = { 39static struct scsi_host_template mvs_sht = {
37 .module = THIS_MODULE, 40 .module = THIS_MODULE,
38 .name = DRV_NAME, 41 .name = DRV_NAME,
@@ -53,17 +56,29 @@ static struct scsi_host_template mvs_sht = {
53 .use_clustering = ENABLE_CLUSTERING, 56 .use_clustering = ENABLE_CLUSTERING,
54 .eh_device_reset_handler = sas_eh_device_reset_handler, 57 .eh_device_reset_handler = sas_eh_device_reset_handler,
55 .eh_bus_reset_handler = sas_eh_bus_reset_handler, 58 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
56 .slave_alloc = sas_slave_alloc, 59 .slave_alloc = mvs_slave_alloc,
57 .target_destroy = sas_target_destroy, 60 .target_destroy = sas_target_destroy,
58 .ioctl = sas_ioctl, 61 .ioctl = sas_ioctl,
59}; 62};
60 63
61static struct sas_domain_function_template mvs_transport_ops = { 64static struct sas_domain_function_template mvs_transport_ops = {
62 .lldd_execute_task = mvs_task_exec, 65 .lldd_dev_found = mvs_dev_found,
66 .lldd_dev_gone = mvs_dev_gone,
67
68 .lldd_execute_task = mvs_queue_command,
63 .lldd_control_phy = mvs_phy_control, 69 .lldd_control_phy = mvs_phy_control,
64 .lldd_abort_task = mvs_task_abort, 70
65 .lldd_port_formed = mvs_port_formed, 71 .lldd_abort_task = mvs_abort_task,
72 .lldd_abort_task_set = mvs_abort_task_set,
73 .lldd_clear_aca = mvs_clear_aca,
74 .lldd_clear_task_set = mvs_clear_task_set,
66 .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset, 75 .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
76 .lldd_lu_reset = mvs_lu_reset,
77 .lldd_query_task = mvs_query_task,
78
79 .lldd_port_formed = mvs_port_formed,
80 .lldd_port_deformed = mvs_port_deformed,
81
67}; 82};
68 83
69static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id) 84static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
@@ -71,6 +86,8 @@ static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
71 struct mvs_phy *phy = &mvi->phy[phy_id]; 86 struct mvs_phy *phy = &mvi->phy[phy_id];
72 struct asd_sas_phy *sas_phy = &phy->sas_phy; 87 struct asd_sas_phy *sas_phy = &phy->sas_phy;
73 88
89 phy->mvi = mvi;
90 init_timer(&phy->timer);
74 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; 91 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
75 sas_phy->class = SAS; 92 sas_phy->class = SAS;
76 sas_phy->iproto = SAS_PROTOCOL_ALL; 93 sas_phy->iproto = SAS_PROTOCOL_ALL;
@@ -83,248 +100,283 @@ static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
83 sas_phy->id = phy_id; 100 sas_phy->id = phy_id;
84 sas_phy->sas_addr = &mvi->sas_addr[0]; 101 sas_phy->sas_addr = &mvi->sas_addr[0];
85 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 102 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
86 sas_phy->ha = &mvi->sas; 103 sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata;
87 sas_phy->lldd_phy = phy; 104 sas_phy->lldd_phy = phy;
88} 105}
89 106
90static void mvs_free(struct mvs_info *mvi) 107static void mvs_free(struct mvs_info *mvi)
91{ 108{
92 int i; 109 int i;
110 struct mvs_wq *mwq;
111 int slot_nr;
93 112
94 if (!mvi) 113 if (!mvi)
95 return; 114 return;
96 115
97 for (i = 0; i < MVS_SLOTS; i++) { 116 if (mvi->flags & MVF_FLAG_SOC)
98 struct mvs_slot_info *slot = &mvi->slot_info[i]; 117 slot_nr = MVS_SOC_SLOTS;
118 else
119 slot_nr = MVS_SLOTS;
99 120
121 for (i = 0; i < mvi->tags_num; i++) {
122 struct mvs_slot_info *slot = &mvi->slot_info[i];
100 if (slot->buf) 123 if (slot->buf)
101 dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ, 124 dma_free_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
102 slot->buf, slot->buf_dma); 125 slot->buf, slot->buf_dma);
103 } 126 }
104 127
105 if (mvi->tx) 128 if (mvi->tx)
106 dma_free_coherent(&mvi->pdev->dev, 129 dma_free_coherent(mvi->dev,
107 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, 130 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
108 mvi->tx, mvi->tx_dma); 131 mvi->tx, mvi->tx_dma);
109 if (mvi->rx_fis) 132 if (mvi->rx_fis)
110 dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ, 133 dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ,
111 mvi->rx_fis, mvi->rx_fis_dma); 134 mvi->rx_fis, mvi->rx_fis_dma);
112 if (mvi->rx) 135 if (mvi->rx)
113 dma_free_coherent(&mvi->pdev->dev, 136 dma_free_coherent(mvi->dev,
114 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), 137 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
115 mvi->rx, mvi->rx_dma); 138 mvi->rx, mvi->rx_dma);
116 if (mvi->slot) 139 if (mvi->slot)
117 dma_free_coherent(&mvi->pdev->dev, 140 dma_free_coherent(mvi->dev,
118 sizeof(*mvi->slot) * MVS_SLOTS, 141 sizeof(*mvi->slot) * slot_nr,
119 mvi->slot, mvi->slot_dma); 142 mvi->slot, mvi->slot_dma);
120#ifdef MVS_ENABLE_PERI 143#ifndef DISABLE_HOTPLUG_DMA_FIX
121 if (mvi->peri_regs) 144 if (mvi->bulk_buffer)
122 iounmap(mvi->peri_regs); 145 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
146 mvi->bulk_buffer, mvi->bulk_buffer_dma);
123#endif 147#endif
124 if (mvi->regs) 148
125 iounmap(mvi->regs); 149 MVS_CHIP_DISP->chip_iounmap(mvi);
126 if (mvi->shost) 150 if (mvi->shost)
127 scsi_host_put(mvi->shost); 151 scsi_host_put(mvi->shost);
128 kfree(mvi->sas.sas_port); 152 list_for_each_entry(mwq, &mvi->wq_list, entry)
129 kfree(mvi->sas.sas_phy); 153 cancel_delayed_work(&mwq->work_q);
130 kfree(mvi); 154 kfree(mvi);
131} 155}
132 156
133#ifdef MVS_USE_TASKLET 157#ifdef MVS_USE_TASKLET
134static void mvs_tasklet(unsigned long data) 158struct tasklet_struct mv_tasklet;
159static void mvs_tasklet(unsigned long opaque)
135{ 160{
136 struct mvs_info *mvi = (struct mvs_info *) data;
137 unsigned long flags; 161 unsigned long flags;
162 u32 stat;
163 u16 core_nr, i = 0;
138 164
139 spin_lock_irqsave(&mvi->lock, flags); 165 struct mvs_info *mvi;
166 struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque;
167
168 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
169 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
170
171 if (unlikely(!mvi))
172 BUG_ON(1);
173
174 for (i = 0; i < core_nr; i++) {
175 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
176 stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq);
177 if (stat)
178 MVS_CHIP_DISP->isr(mvi, mvi->irq, stat);
179 }
140 180
141#ifdef MVS_DISABLE_MSI
142 mvs_int_full(mvi);
143#else
144 mvs_int_rx(mvi, true);
145#endif
146 spin_unlock_irqrestore(&mvi->lock, flags);
147} 181}
148#endif 182#endif
149 183
150static irqreturn_t mvs_interrupt(int irq, void *opaque) 184static irqreturn_t mvs_interrupt(int irq, void *opaque)
151{ 185{
152 struct mvs_info *mvi = opaque; 186 u32 core_nr, i = 0;
153 void __iomem *regs = mvi->regs;
154 u32 stat; 187 u32 stat;
188 struct mvs_info *mvi;
189 struct sas_ha_struct *sha = opaque;
155 190
156 stat = mr32(GBL_INT_STAT); 191 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
192 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
157 193
158 if (stat == 0 || stat == 0xffffffff) 194 if (unlikely(!mvi))
159 return IRQ_NONE; 195 return IRQ_NONE;
160 196
161 /* clear CMD_CMPLT ASAP */ 197 stat = MVS_CHIP_DISP->isr_status(mvi, irq);
162 mw32_f(INT_STAT, CINT_DONE); 198 if (!stat)
163 199 return IRQ_NONE;
164#ifndef MVS_USE_TASKLET
165 spin_lock(&mvi->lock);
166
167 mvs_int_full(mvi);
168 200
169 spin_unlock(&mvi->lock); 201#ifdef MVS_USE_TASKLET
202 tasklet_schedule(&mv_tasklet);
170#else 203#else
171 tasklet_schedule(&mvi->tasklet); 204 for (i = 0; i < core_nr; i++) {
205 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
206 MVS_CHIP_DISP->isr(mvi, irq, stat);
207 }
172#endif 208#endif
173 return IRQ_HANDLED; 209 return IRQ_HANDLED;
174} 210}
175 211
176static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev, 212static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
177 const struct pci_device_id *ent)
178{ 213{
179 struct mvs_info *mvi; 214 int i, slot_nr;
180 unsigned long res_start, res_len, res_flag;
181 struct asd_sas_phy **arr_phy;
182 struct asd_sas_port **arr_port;
183 const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
184 int i;
185 215
186 /* 216 if (mvi->flags & MVF_FLAG_SOC)
187 * alloc and init our per-HBA mvs_info struct 217 slot_nr = MVS_SOC_SLOTS;
188 */ 218 else
189 219 slot_nr = MVS_SLOTS;
190 mvi = kzalloc(sizeof(*mvi), GFP_KERNEL);
191 if (!mvi)
192 return NULL;
193 220
194 spin_lock_init(&mvi->lock); 221 spin_lock_init(&mvi->lock);
195#ifdef MVS_USE_TASKLET 222 for (i = 0; i < mvi->chip->n_phy; i++) {
196 tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi);
197#endif
198 mvi->pdev = pdev;
199 mvi->chip = chip;
200
201 if (pdev->device == 0x6440 && pdev->revision == 0)
202 mvi->flags |= MVF_PHY_PWR_FIX;
203
204 /*
205 * alloc and init SCSI, SAS glue
206 */
207
208 mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
209 if (!mvi->shost)
210 goto err_out;
211
212 arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
213 arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
214 if (!arr_phy || !arr_port)
215 goto err_out;
216
217 for (i = 0; i < MVS_MAX_PHYS; i++) {
218 mvs_phy_init(mvi, i); 223 mvs_phy_init(mvi, i);
219 arr_phy[i] = &mvi->phy[i].sas_phy;
220 arr_port[i] = &mvi->port[i].sas_port;
221 mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED;
222 mvi->port[i].wide_port_phymap = 0; 224 mvi->port[i].wide_port_phymap = 0;
223 mvi->port[i].port_attached = 0; 225 mvi->port[i].port_attached = 0;
224 INIT_LIST_HEAD(&mvi->port[i].list); 226 INIT_LIST_HEAD(&mvi->port[i].list);
225 } 227 }
226 228 for (i = 0; i < MVS_MAX_DEVICES; i++) {
227 SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas; 229 mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
228 mvi->shost->transportt = mvs_stt; 230 mvi->devices[i].dev_type = NO_DEVICE;
229 mvi->shost->max_id = 21; 231 mvi->devices[i].device_id = i;
230 mvi->shost->max_lun = ~0; 232 mvi->devices[i].dev_status = MVS_DEV_NORMAL;
231 mvi->shost->max_channel = 0; 233 }
232 mvi->shost->max_cmd_len = 16;
233
234 mvi->sas.sas_ha_name = DRV_NAME;
235 mvi->sas.dev = &pdev->dev;
236 mvi->sas.lldd_module = THIS_MODULE;
237 mvi->sas.sas_addr = &mvi->sas_addr[0];
238 mvi->sas.sas_phy = arr_phy;
239 mvi->sas.sas_port = arr_port;
240 mvi->sas.num_phys = chip->n_phy;
241 mvi->sas.lldd_max_execute_num = 1;
242 mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
243 mvi->shost->can_queue = MVS_CAN_QUEUE;
244 mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys;
245 mvi->sas.lldd_ha = mvi;
246 mvi->sas.core.shost = mvi->shost;
247
248 mvs_tag_init(mvi);
249
250 /*
251 * ioremap main and peripheral registers
252 */
253
254#ifdef MVS_ENABLE_PERI
255 res_start = pci_resource_start(pdev, 2);
256 res_len = pci_resource_len(pdev, 2);
257 if (!res_start || !res_len)
258 goto err_out;
259
260 mvi->peri_regs = ioremap_nocache(res_start, res_len);
261 if (!mvi->peri_regs)
262 goto err_out;
263#endif
264
265 res_start = pci_resource_start(pdev, 4);
266 res_len = pci_resource_len(pdev, 4);
267 if (!res_start || !res_len)
268 goto err_out;
269
270 res_flag = pci_resource_flags(pdev, 4);
271 if (res_flag & IORESOURCE_CACHEABLE)
272 mvi->regs = ioremap(res_start, res_len);
273 else
274 mvi->regs = ioremap_nocache(res_start, res_len);
275
276 if (!mvi->regs)
277 goto err_out;
278 234
279 /* 235 /*
280 * alloc and init our DMA areas 236 * alloc and init our DMA areas
281 */ 237 */
282 238 mvi->tx = dma_alloc_coherent(mvi->dev,
283 mvi->tx = dma_alloc_coherent(&pdev->dev,
284 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, 239 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
285 &mvi->tx_dma, GFP_KERNEL); 240 &mvi->tx_dma, GFP_KERNEL);
286 if (!mvi->tx) 241 if (!mvi->tx)
287 goto err_out; 242 goto err_out;
288 memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ); 243 memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
289 244 mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
290 mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
291 &mvi->rx_fis_dma, GFP_KERNEL); 245 &mvi->rx_fis_dma, GFP_KERNEL);
292 if (!mvi->rx_fis) 246 if (!mvi->rx_fis)
293 goto err_out; 247 goto err_out;
294 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); 248 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
295 249
296 mvi->rx = dma_alloc_coherent(&pdev->dev, 250 mvi->rx = dma_alloc_coherent(mvi->dev,
297 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), 251 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
298 &mvi->rx_dma, GFP_KERNEL); 252 &mvi->rx_dma, GFP_KERNEL);
299 if (!mvi->rx) 253 if (!mvi->rx)
300 goto err_out; 254 goto err_out;
301 memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1)); 255 memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
302
303 mvi->rx[0] = cpu_to_le32(0xfff); 256 mvi->rx[0] = cpu_to_le32(0xfff);
304 mvi->rx_cons = 0xfff; 257 mvi->rx_cons = 0xfff;
305 258
306 mvi->slot = dma_alloc_coherent(&pdev->dev, 259 mvi->slot = dma_alloc_coherent(mvi->dev,
307 sizeof(*mvi->slot) * MVS_SLOTS, 260 sizeof(*mvi->slot) * slot_nr,
308 &mvi->slot_dma, GFP_KERNEL); 261 &mvi->slot_dma, GFP_KERNEL);
309 if (!mvi->slot) 262 if (!mvi->slot)
310 goto err_out; 263 goto err_out;
311 memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS); 264 memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
312 265
313 for (i = 0; i < MVS_SLOTS; i++) { 266#ifndef DISABLE_HOTPLUG_DMA_FIX
267 mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
268 TRASH_BUCKET_SIZE,
269 &mvi->bulk_buffer_dma, GFP_KERNEL);
270 if (!mvi->bulk_buffer)
271 goto err_out;
272#endif
273 for (i = 0; i < slot_nr; i++) {
314 struct mvs_slot_info *slot = &mvi->slot_info[i]; 274 struct mvs_slot_info *slot = &mvi->slot_info[i];
315 275
316 slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ, 276 slot->buf = dma_alloc_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
317 &slot->buf_dma, GFP_KERNEL); 277 &slot->buf_dma, GFP_KERNEL);
318 if (!slot->buf) 278 if (!slot->buf) {
279 printk(KERN_DEBUG"failed to allocate slot->buf.\n");
319 goto err_out; 280 goto err_out;
281 }
320 memset(slot->buf, 0, MVS_SLOT_BUF_SZ); 282 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
283 ++mvi->tags_num;
321 } 284 }
285 /* Initialize tags */
286 mvs_tag_init(mvi);
287 return 0;
288err_out:
289 return 1;
290}
291
322 292
323 /* finally, read NVRAM to get our SAS address */ 293int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
324 if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8)) 294{
295 unsigned long res_start, res_len, res_flag, res_flag_ex = 0;
296 struct pci_dev *pdev = mvi->pdev;
297 if (bar_ex != -1) {
298 /*
299 * ioremap main and peripheral registers
300 */
301 res_start = pci_resource_start(pdev, bar_ex);
302 res_len = pci_resource_len(pdev, bar_ex);
303 if (!res_start || !res_len)
304 goto err_out;
305
306 res_flag_ex = pci_resource_flags(pdev, bar_ex);
307 if (res_flag_ex & IORESOURCE_MEM) {
308 if (res_flag_ex & IORESOURCE_CACHEABLE)
309 mvi->regs_ex = ioremap(res_start, res_len);
310 else
311 mvi->regs_ex = ioremap_nocache(res_start,
312 res_len);
313 } else
314 mvi->regs_ex = (void *)res_start;
315 if (!mvi->regs_ex)
316 goto err_out;
317 }
318
319 res_start = pci_resource_start(pdev, bar);
320 res_len = pci_resource_len(pdev, bar);
321 if (!res_start || !res_len)
322 goto err_out;
323
324 res_flag = pci_resource_flags(pdev, bar);
325 if (res_flag & IORESOURCE_CACHEABLE)
326 mvi->regs = ioremap(res_start, res_len);
327 else
328 mvi->regs = ioremap_nocache(res_start, res_len);
329
330 if (!mvi->regs) {
331 if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM))
332 iounmap(mvi->regs_ex);
333 mvi->regs_ex = NULL;
325 goto err_out; 334 goto err_out;
326 return mvi; 335 }
336
337 return 0;
338err_out:
339 return -1;
340}
341
342void mvs_iounmap(void __iomem *regs)
343{
344 iounmap(regs);
345}
346
347static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
348 const struct pci_device_id *ent,
349 struct Scsi_Host *shost, unsigned int id)
350{
351 struct mvs_info *mvi;
352 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
353
354 mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info),
355 GFP_KERNEL);
356 if (!mvi)
357 return NULL;
327 358
359 mvi->pdev = pdev;
360 mvi->dev = &pdev->dev;
361 mvi->chip_id = ent->driver_data;
362 mvi->chip = &mvs_chips[mvi->chip_id];
363 INIT_LIST_HEAD(&mvi->wq_list);
364 mvi->irq = pdev->irq;
365
366 ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
367 ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
368
369 mvi->id = id;
370 mvi->sas = sha;
371 mvi->shost = shost;
372#ifdef MVS_USE_TASKLET
373 tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha);
374#endif
375
376 if (MVS_CHIP_DISP->chip_ioremap(mvi))
377 goto err_out;
378 if (!mvs_alloc(mvi, shost))
379 return mvi;
328err_out: 380err_out:
329 mvs_free(mvi); 381 mvs_free(mvi);
330 return NULL; 382 return NULL;
@@ -363,16 +415,111 @@ static int pci_go_64(struct pci_dev *pdev)
363 return rc; 415 return rc;
364} 416}
365 417
418static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
419 const struct mvs_chip_info *chip_info)
420{
421 int phy_nr, port_nr; unsigned short core_nr;
422 struct asd_sas_phy **arr_phy;
423 struct asd_sas_port **arr_port;
424 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
425
426 core_nr = chip_info->n_host;
427 phy_nr = core_nr * chip_info->n_phy;
428 port_nr = phy_nr;
429
430 memset(sha, 0x00, sizeof(struct sas_ha_struct));
431 arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
432 arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
433 if (!arr_phy || !arr_port)
434 goto exit_free;
435
436 sha->sas_phy = arr_phy;
437 sha->sas_port = arr_port;
438
439 sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
440 if (!sha->lldd_ha)
441 goto exit_free;
442
443 ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
444
445 shost->transportt = mvs_stt;
446 shost->max_id = 128;
447 shost->max_lun = ~0;
448 shost->max_channel = 1;
449 shost->max_cmd_len = 16;
450
451 return 0;
452exit_free:
453 kfree(arr_phy);
454 kfree(arr_port);
455 return -1;
456
457}
458
459static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
460 const struct mvs_chip_info *chip_info)
461{
462 int can_queue, i = 0, j = 0;
463 struct mvs_info *mvi = NULL;
464 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
465 unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
466
467 for (j = 0; j < nr_core; j++) {
468 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
469 for (i = 0; i < chip_info->n_phy; i++) {
470 sha->sas_phy[j * chip_info->n_phy + i] =
471 &mvi->phy[i].sas_phy;
472 sha->sas_port[j * chip_info->n_phy + i] =
473 &mvi->port[i].sas_port;
474 }
475 }
476
477 sha->sas_ha_name = DRV_NAME;
478 sha->dev = mvi->dev;
479 sha->lldd_module = THIS_MODULE;
480 sha->sas_addr = &mvi->sas_addr[0];
481
482 sha->num_phys = nr_core * chip_info->n_phy;
483
484 sha->lldd_max_execute_num = 1;
485
486 if (mvi->flags & MVF_FLAG_SOC)
487 can_queue = MVS_SOC_CAN_QUEUE;
488 else
489 can_queue = MVS_CAN_QUEUE;
490
491 sha->lldd_queue_size = can_queue;
492 shost->can_queue = can_queue;
493 mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys;
494 sha->core.shost = mvi->shost;
495}
496
497static void mvs_init_sas_add(struct mvs_info *mvi)
498{
499 u8 i;
500 for (i = 0; i < mvi->chip->n_phy; i++) {
501 mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL;
502 mvi->phy[i].dev_sas_addr =
503 cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr));
504 }
505
506 memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE);
507}
508
366static int __devinit mvs_pci_init(struct pci_dev *pdev, 509static int __devinit mvs_pci_init(struct pci_dev *pdev,
367 const struct pci_device_id *ent) 510 const struct pci_device_id *ent)
368{ 511{
369 int rc; 512 unsigned int rc, nhost = 0;
370 struct mvs_info *mvi; 513 struct mvs_info *mvi;
371 irq_handler_t irq_handler = mvs_interrupt; 514 irq_handler_t irq_handler = mvs_interrupt;
515 struct Scsi_Host *shost = NULL;
516 const struct mvs_chip_info *chip;
372 517
518 dev_printk(KERN_INFO, &pdev->dev,
519 "mvsas: driver version %s\n", DRV_VERSION);
373 rc = pci_enable_device(pdev); 520 rc = pci_enable_device(pdev);
374 if (rc) 521 if (rc)
375 return rc; 522 goto err_out_enable;
376 523
377 pci_set_master(pdev); 524 pci_set_master(pdev);
378 525
@@ -384,84 +531,110 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
384 if (rc) 531 if (rc)
385 goto err_out_regions; 532 goto err_out_regions;
386 533
387 mvi = mvs_alloc(pdev, ent); 534 shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
388 if (!mvi) { 535 if (!shost) {
389 rc = -ENOMEM; 536 rc = -ENOMEM;
390 goto err_out_regions; 537 goto err_out_regions;
391 } 538 }
392 539
393 rc = mvs_hw_init(mvi); 540 chip = &mvs_chips[ent->driver_data];
394 if (rc) 541 SHOST_TO_SAS_HA(shost) =
395 goto err_out_mvi; 542 kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
396 543 if (!SHOST_TO_SAS_HA(shost)) {
397#ifndef MVS_DISABLE_MSI 544 kfree(shost);
398 if (!pci_enable_msi(pdev)) { 545 rc = -ENOMEM;
399 u32 tmp; 546 goto err_out_regions;
400 void __iomem *regs = mvi->regs;
401 mvi->flags |= MVF_MSI;
402 irq_handler = mvs_msi_interrupt;
403 tmp = mr32(PCS);
404 mw32(PCS, tmp | PCS_SELF_CLEAR);
405 } 547 }
406#endif
407 548
408 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi); 549 rc = mvs_prep_sas_ha_init(shost, chip);
409 if (rc) 550 if (rc) {
410 goto err_out_msi; 551 kfree(shost);
552 rc = -ENOMEM;
553 goto err_out_regions;
554 }
411 555
412 rc = scsi_add_host(mvi->shost, &pdev->dev); 556 pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
413 if (rc)
414 goto err_out_irq;
415 557
416 rc = sas_register_ha(&mvi->sas); 558 do {
559 mvi = mvs_pci_alloc(pdev, ent, shost, nhost);
560 if (!mvi) {
561 rc = -ENOMEM;
562 goto err_out_regions;
563 }
564
565 mvs_init_sas_add(mvi);
566
567 mvi->instance = nhost;
568 rc = MVS_CHIP_DISP->chip_init(mvi);
569 if (rc) {
570 mvs_free(mvi);
571 goto err_out_regions;
572 }
573 nhost++;
574 } while (nhost < chip->n_host);
575
576 mvs_post_sas_ha_init(shost, chip);
577
578 rc = scsi_add_host(shost, &pdev->dev);
417 if (rc) 579 if (rc)
418 goto err_out_shost; 580 goto err_out_shost;
419 581
420 pci_set_drvdata(pdev, mvi); 582 rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
421 583 if (rc)
422 mvs_print_info(mvi); 584 goto err_out_shost;
585 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED,
586 DRV_NAME, SHOST_TO_SAS_HA(shost));
587 if (rc)
588 goto err_not_sas;
423 589
424 mvs_hba_interrupt_enable(mvi); 590 MVS_CHIP_DISP->interrupt_enable(mvi);
425 591
426 scsi_scan_host(mvi->shost); 592 scsi_scan_host(mvi->shost);
427 593
428 return 0; 594 return 0;
429 595
596err_not_sas:
597 sas_unregister_ha(SHOST_TO_SAS_HA(shost));
430err_out_shost: 598err_out_shost:
431 scsi_remove_host(mvi->shost); 599 scsi_remove_host(mvi->shost);
432err_out_irq:
433 free_irq(pdev->irq, mvi);
434err_out_msi:
435 if (mvi->flags |= MVF_MSI)
436 pci_disable_msi(pdev);
437err_out_mvi:
438 mvs_free(mvi);
439err_out_regions: 600err_out_regions:
440 pci_release_regions(pdev); 601 pci_release_regions(pdev);
441err_out_disable: 602err_out_disable:
442 pci_disable_device(pdev); 603 pci_disable_device(pdev);
604err_out_enable:
443 return rc; 605 return rc;
444} 606}
445 607
446static void __devexit mvs_pci_remove(struct pci_dev *pdev) 608static void __devexit mvs_pci_remove(struct pci_dev *pdev)
447{ 609{
448 struct mvs_info *mvi = pci_get_drvdata(pdev); 610 unsigned short core_nr, i = 0;
611 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
612 struct mvs_info *mvi = NULL;
449 613
450 pci_set_drvdata(pdev, NULL); 614 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
615 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
451 616
452 if (mvi) { 617#ifdef MVS_USE_TASKLET
453 sas_unregister_ha(&mvi->sas); 618 tasklet_kill(&mv_tasklet);
454 mvs_hba_interrupt_disable(mvi); 619#endif
455 sas_remove_host(mvi->shost);
456 scsi_remove_host(mvi->shost);
457 620
458 free_irq(pdev->irq, mvi); 621 pci_set_drvdata(pdev, NULL);
459 if (mvi->flags & MVF_MSI) 622 sas_unregister_ha(sha);
460 pci_disable_msi(pdev); 623 sas_remove_host(mvi->shost);
624 scsi_remove_host(mvi->shost);
625
626 MVS_CHIP_DISP->interrupt_disable(mvi);
627 free_irq(mvi->irq, sha);
628 for (i = 0; i < core_nr; i++) {
629 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
461 mvs_free(mvi); 630 mvs_free(mvi);
462 pci_release_regions(pdev);
463 } 631 }
632 kfree(sha->sas_phy);
633 kfree(sha->sas_port);
634 kfree(sha);
635 pci_release_regions(pdev);
464 pci_disable_device(pdev); 636 pci_disable_device(pdev);
637 return;
465} 638}
466 639
467static struct pci_device_id __devinitdata mvs_pci_table[] = { 640static struct pci_device_id __devinitdata mvs_pci_table[] = {
@@ -474,10 +647,12 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
474 .subdevice = 0x6480, 647 .subdevice = 0x6480,
475 .class = 0, 648 .class = 0,
476 .class_mask = 0, 649 .class_mask = 0,
477 .driver_data = chip_6480, 650 .driver_data = chip_6485,
478 }, 651 },
479 { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 }, 652 { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
480 { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 }, 653 { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 },
654 { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 },
655 { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
481 656
482 { } /* terminate list */ 657 { } /* terminate list */
483}; 658};
@@ -489,15 +664,17 @@ static struct pci_driver mvs_pci_driver = {
489 .remove = __devexit_p(mvs_pci_remove), 664 .remove = __devexit_p(mvs_pci_remove),
490}; 665};
491 666
667/* task handler */
668struct task_struct *mvs_th;
492static int __init mvs_init(void) 669static int __init mvs_init(void)
493{ 670{
494 int rc; 671 int rc;
495
496 mvs_stt = sas_domain_attach_transport(&mvs_transport_ops); 672 mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
497 if (!mvs_stt) 673 if (!mvs_stt)
498 return -ENOMEM; 674 return -ENOMEM;
499 675
500 rc = pci_register_driver(&mvs_pci_driver); 676 rc = pci_register_driver(&mvs_pci_driver);
677
501 if (rc) 678 if (rc)
502 goto err_out; 679 goto err_out;
503 680
@@ -521,4 +698,6 @@ MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
521MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver"); 698MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
522MODULE_VERSION(DRV_VERSION); 699MODULE_VERSION(DRV_VERSION);
523MODULE_LICENSE("GPL"); 700MODULE_LICENSE("GPL");
701#ifdef CONFIG_PCI
524MODULE_DEVICE_TABLE(pci, mvs_pci_table); 702MODULE_DEVICE_TABLE(pci, mvs_pci_table);
703#endif
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 6a583c19c6e5..d79ac179eaff 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1,97 +1,65 @@
1/* 1/*
2 mv_sas.c - Marvell 88SE6440 SAS/SATA support 2 * Marvell 88SE64xx/88SE94xx main function
3 3 *
4 Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 6 *
7 This program is free software; you can redistribute it and/or 7 * This file is licensed under GPLv2.
8 modify it under the terms of the GNU General Public License as 8 *
9 published by the Free Software Foundation; either version 2, 9 * This program is free software; you can redistribute it and/or
10 or (at your option) any later version. 10 * modify it under the terms of the GNU General Public License as
11 11 * published by the Free Software Foundation; version 2 of the
12 This program is distributed in the hope that it will be useful, 12 * License.
13 but WITHOUT ANY WARRANTY; without even the implied warranty 13 *
14 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 14 * This program is distributed in the hope that it will be useful,
15 See the GNU General Public License for more details. 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 You should have received a copy of the GNU General Public 17 * General Public License for more details.
18 License along with this program; see the file COPYING. If not, 18 *
19 write to the Free Software Foundation, 675 Mass Ave, Cambridge, 19 * You should have received a copy of the GNU General Public License
20 MA 02139, USA. 20 * along with this program; if not, write to the Free Software
21 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 --------------------------------------------------------------- 22 * USA
23 23*/
24 Random notes:
25 * hardware supports controlling the endian-ness of data
26 structures. this permits elimination of all the le32_to_cpu()
27 and cpu_to_le32() conversions.
28
29 */
30 24
31#include "mv_sas.h" 25#include "mv_sas.h"
32#include "mv_64xx.h"
33#include "mv_chips.h"
34
35/* offset for D2H FIS in the Received FIS List Structure */
36#define SATA_RECEIVED_D2H_FIS(reg_set) \
37 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
38#define SATA_RECEIVED_PIO_FIS(reg_set) \
39 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
40#define UNASSOC_D2H_FIS(id) \
41 ((void *) mvi->rx_fis + 0x100 * id)
42
43struct mvs_task_exec_info {
44 struct sas_task *task;
45 struct mvs_cmd_hdr *hdr;
46 struct mvs_port *port;
47 u32 tag;
48 int n_elem;
49};
50
51static void mvs_release_task(struct mvs_info *mvi, int phy_no);
52static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
53static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
54 int get_st);
55static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
56static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task,
57 u32 slot_idx);
58 26
59static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) 27static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
60{ 28{
61 if (task->lldd_task) { 29 if (task->lldd_task) {
62 struct mvs_slot_info *slot; 30 struct mvs_slot_info *slot;
63 slot = (struct mvs_slot_info *) task->lldd_task; 31 slot = (struct mvs_slot_info *) task->lldd_task;
64 *tag = slot - mvi->slot_info; 32 *tag = slot->slot_tag;
65 return 1; 33 return 1;
66 } 34 }
67 return 0; 35 return 0;
68} 36}
69 37
70static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) 38void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
71{ 39{
72 void *bitmap = (void *) &mvi->tags; 40 void *bitmap = (void *) &mvi->tags;
73 clear_bit(tag, bitmap); 41 clear_bit(tag, bitmap);
74} 42}
75 43
76static void mvs_tag_free(struct mvs_info *mvi, u32 tag) 44void mvs_tag_free(struct mvs_info *mvi, u32 tag)
77{ 45{
78 mvs_tag_clear(mvi, tag); 46 mvs_tag_clear(mvi, tag);
79} 47}
80 48
81static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) 49void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
82{ 50{
83 void *bitmap = (void *) &mvi->tags; 51 void *bitmap = (void *) &mvi->tags;
84 set_bit(tag, bitmap); 52 set_bit(tag, bitmap);
85} 53}
86 54
87static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) 55inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
88{ 56{
89 unsigned int index, tag; 57 unsigned int index, tag;
90 void *bitmap = (void *) &mvi->tags; 58 void *bitmap = (void *) &mvi->tags;
91 59
92 index = find_first_zero_bit(bitmap, MVS_SLOTS); 60 index = find_first_zero_bit(bitmap, mvi->tags_num);
93 tag = index; 61 tag = index;
94 if (tag >= MVS_SLOTS) 62 if (tag >= mvi->tags_num)
95 return -SAS_QUEUE_FULL; 63 return -SAS_QUEUE_FULL;
96 mvs_tag_set(mvi, tag); 64 mvs_tag_set(mvi, tag);
97 *tag_out = tag; 65 *tag_out = tag;
@@ -101,11 +69,11 @@ static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
101void mvs_tag_init(struct mvs_info *mvi) 69void mvs_tag_init(struct mvs_info *mvi)
102{ 70{
103 int i; 71 int i;
104 for (i = 0; i < MVS_SLOTS; ++i) 72 for (i = 0; i < mvi->tags_num; ++i)
105 mvs_tag_clear(mvi, i); 73 mvs_tag_clear(mvi, i);
106} 74}
107 75
108static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) 76void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
109{ 77{
110 u32 i; 78 u32 i;
111 u32 run; 79 u32 run;
@@ -113,7 +81,7 @@ static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
113 81
114 offset = 0; 82 offset = 0;
115 while (size) { 83 while (size) {
116 printk("%08X : ", baseaddr + offset); 84 printk(KERN_DEBUG"%08X : ", baseaddr + offset);
117 if (size >= 16) 85 if (size >= 16)
118 run = 16; 86 run = 16;
119 else 87 else
@@ -121,31 +89,31 @@ static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
121 size -= run; 89 size -= run;
122 for (i = 0; i < 16; i++) { 90 for (i = 0; i < 16; i++) {
123 if (i < run) 91 if (i < run)
124 printk("%02X ", (u32)data[i]); 92 printk(KERN_DEBUG"%02X ", (u32)data[i]);
125 else 93 else
126 printk(" "); 94 printk(KERN_DEBUG" ");
127 } 95 }
128 printk(": "); 96 printk(KERN_DEBUG": ");
129 for (i = 0; i < run; i++) 97 for (i = 0; i < run; i++)
130 printk("%c", isalnum(data[i]) ? data[i] : '.'); 98 printk(KERN_DEBUG"%c",
131 printk("\n"); 99 isalnum(data[i]) ? data[i] : '.');
100 printk(KERN_DEBUG"\n");
132 data = &data[16]; 101 data = &data[16];
133 offset += run; 102 offset += run;
134 } 103 }
135 printk("\n"); 104 printk(KERN_DEBUG"\n");
136} 105}
137 106
138#if _MV_DUMP 107#if (_MV_DUMP > 1)
139static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag, 108static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
140 enum sas_protocol proto) 109 enum sas_protocol proto)
141{ 110{
142 u32 offset; 111 u32 offset;
143 struct pci_dev *pdev = mvi->pdev;
144 struct mvs_slot_info *slot = &mvi->slot_info[tag]; 112 struct mvs_slot_info *slot = &mvi->slot_info[tag];
145 113
146 offset = slot->cmd_size + MVS_OAF_SZ + 114 offset = slot->cmd_size + MVS_OAF_SZ +
147 sizeof(struct mvs_prd) * slot->n_elem; 115 MVS_CHIP_DISP->prd_size() * slot->n_elem;
148 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n", 116 dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n",
149 tag); 117 tag);
150 mvs_hexdump(32, (u8 *) slot->response, 118 mvs_hexdump(32, (u8 *) slot->response,
151 (u32) slot->buf_dma + offset); 119 (u32) slot->buf_dma + offset);
@@ -155,47 +123,45 @@ static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
155static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag, 123static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
156 enum sas_protocol proto) 124 enum sas_protocol proto)
157{ 125{
158#if _MV_DUMP 126#if (_MV_DUMP > 1)
159 u32 sz, w_ptr; 127 u32 sz, w_ptr;
160 u64 addr; 128 u64 addr;
161 void __iomem *regs = mvi->regs;
162 struct pci_dev *pdev = mvi->pdev;
163 struct mvs_slot_info *slot = &mvi->slot_info[tag]; 129 struct mvs_slot_info *slot = &mvi->slot_info[tag];
164 130
165 /*Delivery Queue */ 131 /*Delivery Queue */
166 sz = mr32(TX_CFG) & TX_RING_SZ_MASK; 132 sz = MVS_CHIP_SLOT_SZ;
167 w_ptr = slot->tx; 133 w_ptr = slot->tx;
168 addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO); 134 addr = mvi->tx_dma;
169 dev_printk(KERN_DEBUG, &pdev->dev, 135 dev_printk(KERN_DEBUG, mvi->dev,
170 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr); 136 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
171 dev_printk(KERN_DEBUG, &pdev->dev, 137 dev_printk(KERN_DEBUG, mvi->dev,
172 "Delivery Queue Base Address=0x%llX (PA)" 138 "Delivery Queue Base Address=0x%llX (PA)"
173 "(tx_dma=0x%llX), Entry=%04d\n", 139 "(tx_dma=0x%llX), Entry=%04d\n",
174 addr, mvi->tx_dma, w_ptr); 140 addr, (unsigned long long)mvi->tx_dma, w_ptr);
175 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]), 141 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
176 (u32) mvi->tx_dma + sizeof(u32) * w_ptr); 142 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
177 /*Command List */ 143 /*Command List */
178 addr = mvi->slot_dma; 144 addr = mvi->slot_dma;
179 dev_printk(KERN_DEBUG, &pdev->dev, 145 dev_printk(KERN_DEBUG, mvi->dev,
180 "Command List Base Address=0x%llX (PA)" 146 "Command List Base Address=0x%llX (PA)"
181 "(slot_dma=0x%llX), Header=%03d\n", 147 "(slot_dma=0x%llX), Header=%03d\n",
182 addr, slot->buf_dma, tag); 148 addr, (unsigned long long)slot->buf_dma, tag);
183 dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag); 149 dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag);
184 /*mvs_cmd_hdr */ 150 /*mvs_cmd_hdr */
185 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]), 151 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
186 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr)); 152 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
187 /*1.command table area */ 153 /*1.command table area */
188 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n"); 154 dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n");
189 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma); 155 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
190 /*2.open address frame area */ 156 /*2.open address frame area */
191 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n"); 157 dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n");
192 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size, 158 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
193 (u32) slot->buf_dma + slot->cmd_size); 159 (u32) slot->buf_dma + slot->cmd_size);
194 /*3.status buffer */ 160 /*3.status buffer */
195 mvs_hba_sb_dump(mvi, tag, proto); 161 mvs_hba_sb_dump(mvi, tag, proto);
196 /*4.PRD table */ 162 /*4.PRD table */
197 dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n"); 163 dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n");
198 mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem, 164 mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem,
199 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ, 165 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
200 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ); 166 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
201#endif 167#endif
@@ -206,15 +172,14 @@ static void mvs_hba_cq_dump(struct mvs_info *mvi)
206#if (_MV_DUMP > 2) 172#if (_MV_DUMP > 2)
207 u64 addr; 173 u64 addr;
208 void __iomem *regs = mvi->regs; 174 void __iomem *regs = mvi->regs;
209 struct pci_dev *pdev = mvi->pdev;
210 u32 entry = mvi->rx_cons + 1; 175 u32 entry = mvi->rx_cons + 1;
211 u32 rx_desc = le32_to_cpu(mvi->rx[entry]); 176 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
212 177
213 /*Completion Queue */ 178 /*Completion Queue */
214 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO); 179 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
215 dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n", 180 dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n",
216 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task); 181 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
217 dev_printk(KERN_DEBUG, &pdev->dev, 182 dev_printk(KERN_DEBUG, mvi->dev,
218 "Completion List Base Address=0x%llX (PA), " 183 "Completion List Base Address=0x%llX (PA), "
219 "CQ_Entry=%04d, CQ_WP=0x%08X\n", 184 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
220 addr, entry - 1, mvi->rx[0]); 185 addr, entry - 1, mvi->rx[0]);
@@ -223,62 +188,174 @@ static void mvs_hba_cq_dump(struct mvs_info *mvi)
223#endif 188#endif
224} 189}
225 190
226/* FIXME: locking? */ 191void mvs_get_sas_addr(void *buf, u32 buflen)
227int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata)
228{ 192{
229 struct mvs_info *mvi = sas_phy->ha->lldd_ha; 193 /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
230 int rc = 0, phy_id = sas_phy->id; 194}
231 u32 tmp;
232 195
233 tmp = mvs_read_phy_ctl(mvi, phy_id); 196struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
197{
198 unsigned long i = 0, j = 0, hi = 0;
199 struct sas_ha_struct *sha = dev->port->ha;
200 struct mvs_info *mvi = NULL;
201 struct asd_sas_phy *phy;
202
203 while (sha->sas_port[i]) {
204 if (sha->sas_port[i] == dev->port) {
205 phy = container_of(sha->sas_port[i]->phy_list.next,
206 struct asd_sas_phy, port_phy_el);
207 j = 0;
208 while (sha->sas_phy[j]) {
209 if (sha->sas_phy[j] == phy)
210 break;
211 j++;
212 }
213 break;
214 }
215 i++;
216 }
217 hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
218 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
234 219
235 switch (func) { 220 return mvi;
236 case PHY_FUNC_SET_LINK_RATE:{
237 struct sas_phy_linkrates *rates = funcdata;
238 u32 lrmin = 0, lrmax = 0;
239 221
240 lrmin = (rates->minimum_linkrate << 8); 222}
241 lrmax = (rates->maximum_linkrate << 12);
242 223
243 if (lrmin) { 224/* FIXME */
244 tmp &= ~(0xf << 8); 225int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
245 tmp |= lrmin; 226{
246 } 227 unsigned long i = 0, j = 0, n = 0, num = 0;
247 if (lrmax) { 228 struct mvs_info *mvi = mvs_find_dev_mvi(dev);
248 tmp &= ~(0xf << 12); 229 struct sas_ha_struct *sha = dev->port->ha;
249 tmp |= lrmax; 230
231 while (sha->sas_port[i]) {
232 if (sha->sas_port[i] == dev->port) {
233 struct asd_sas_phy *phy;
234 list_for_each_entry(phy,
235 &sha->sas_port[i]->phy_list, port_phy_el) {
236 j = 0;
237 while (sha->sas_phy[j]) {
238 if (sha->sas_phy[j] == phy)
239 break;
240 j++;
241 }
242 phyno[n] = (j >= mvi->chip->n_phy) ?
243 (j - mvi->chip->n_phy) : j;
244 num++;
245 n++;
250 } 246 }
251 mvs_write_phy_ctl(mvi, phy_id, tmp);
252 break; 247 break;
253 } 248 }
249 i++;
250 }
251 return num;
252}
253
254static inline void mvs_free_reg_set(struct mvs_info *mvi,
255 struct mvs_device *dev)
256{
257 if (!dev) {
258 mv_printk("device has been free.\n");
259 return;
260 }
261 if (dev->runing_req != 0)
262 return;
263 if (dev->taskfileset == MVS_ID_NOT_MAPPED)
264 return;
265 MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
266}
267
268static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
269 struct mvs_device *dev)
270{
271 if (dev->taskfileset != MVS_ID_NOT_MAPPED)
272 return 0;
273 return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
274}
275
276void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
277{
278 u32 no;
279 for_each_phy(phy_mask, phy_mask, no) {
280 if (!(phy_mask & 1))
281 continue;
282 MVS_CHIP_DISP->phy_reset(mvi, no, hard);
283 }
284}
285
286/* FIXME: locking? */
287int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
288 void *funcdata)
289{
290 int rc = 0, phy_id = sas_phy->id;
291 u32 tmp, i = 0, hi;
292 struct sas_ha_struct *sha = sas_phy->ha;
293 struct mvs_info *mvi = NULL;
294
295 while (sha->sas_phy[i]) {
296 if (sha->sas_phy[i] == sas_phy)
297 break;
298 i++;
299 }
300 hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
301 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
302
303 switch (func) {
304 case PHY_FUNC_SET_LINK_RATE:
305 MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata);
306 break;
254 307
255 case PHY_FUNC_HARD_RESET: 308 case PHY_FUNC_HARD_RESET:
309 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
256 if (tmp & PHY_RST_HARD) 310 if (tmp & PHY_RST_HARD)
257 break; 311 break;
258 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD); 312 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1);
259 break; 313 break;
260 314
261 case PHY_FUNC_LINK_RESET: 315 case PHY_FUNC_LINK_RESET:
262 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST); 316 MVS_CHIP_DISP->phy_enable(mvi, phy_id);
317 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0);
263 break; 318 break;
264 319
265 case PHY_FUNC_DISABLE: 320 case PHY_FUNC_DISABLE:
321 MVS_CHIP_DISP->phy_disable(mvi, phy_id);
322 break;
266 case PHY_FUNC_RELEASE_SPINUP_HOLD: 323 case PHY_FUNC_RELEASE_SPINUP_HOLD:
267 default: 324 default:
268 rc = -EOPNOTSUPP; 325 rc = -EOPNOTSUPP;
269 } 326 }
270 327 msleep(200);
271 return rc; 328 return rc;
272} 329}
273 330
331void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
332 u32 off_lo, u32 off_hi, u64 sas_addr)
333{
334 u32 lo = (u32)sas_addr;
335 u32 hi = (u32)(sas_addr>>32);
336
337 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo);
338 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo);
339 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi);
340 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
341}
342
274static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) 343static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
275{ 344{
276 struct mvs_phy *phy = &mvi->phy[i]; 345 struct mvs_phy *phy = &mvi->phy[i];
277 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; 346 struct asd_sas_phy *sas_phy = &phy->sas_phy;
278 347 struct sas_ha_struct *sas_ha;
279 if (!phy->phy_attached) 348 if (!phy->phy_attached)
280 return; 349 return;
281 350
351 if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK)
352 && phy->phy_type & PORT_TYPE_SAS) {
353 return;
354 }
355
356 sas_ha = mvi->sas;
357 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
358
282 if (sas_phy->phy) { 359 if (sas_phy->phy) {
283 struct sas_phy *sphy = sas_phy->phy; 360 struct sas_phy *sphy = sas_phy->phy;
284 361
@@ -286,7 +363,7 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
286 sphy->minimum_linkrate = phy->minimum_linkrate; 363 sphy->minimum_linkrate = phy->minimum_linkrate;
287 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 364 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
288 sphy->maximum_linkrate = phy->maximum_linkrate; 365 sphy->maximum_linkrate = phy->maximum_linkrate;
289 sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS; 366 sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate();
290 } 367 }
291 368
292 if (phy->phy_type & PORT_TYPE_SAS) { 369 if (phy->phy_type & PORT_TYPE_SAS) {
@@ -297,13 +374,31 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
297 id->initiator_bits = SAS_PROTOCOL_ALL; 374 id->initiator_bits = SAS_PROTOCOL_ALL;
298 id->target_bits = phy->identify.target_port_protocols; 375 id->target_bits = phy->identify.target_port_protocols;
299 } else if (phy->phy_type & PORT_TYPE_SATA) { 376 } else if (phy->phy_type & PORT_TYPE_SATA) {
300 /* TODO */ 377 /*Nothing*/
301 } 378 }
302 mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size; 379 mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy);
303 mvi->sas.notify_port_event(mvi->sas.sas_phy[i], 380
381 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
382
383 mvi->sas->notify_port_event(sas_phy,
304 PORTE_BYTES_DMAED); 384 PORTE_BYTES_DMAED);
305} 385}
306 386
387int mvs_slave_alloc(struct scsi_device *scsi_dev)
388{
389 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
390 if (dev_is_sata(dev)) {
391 /* We don't need to rescan targets
392 * if REPORT_LUNS request is failed
393 */
394 if (scsi_dev->lun > 0)
395 return -ENXIO;
396 scsi_dev->tagged_supported = 1;
397 }
398
399 return sas_slave_alloc(scsi_dev);
400}
401
307int mvs_slave_configure(struct scsi_device *sdev) 402int mvs_slave_configure(struct scsi_device *sdev)
308{ 403{
309 struct domain_device *dev = sdev_to_domain_dev(sdev); 404 struct domain_device *dev = sdev_to_domain_dev(sdev);
@@ -311,25 +406,31 @@ int mvs_slave_configure(struct scsi_device *sdev)
311 406
312 if (ret) 407 if (ret)
313 return ret; 408 return ret;
314
315 if (dev_is_sata(dev)) { 409 if (dev_is_sata(dev)) {
316 /* struct ata_port *ap = dev->sata_dev.ap; */ 410 /* may set PIO mode */
317 /* struct ata_device *adev = ap->link.device; */ 411 #if MV_DISABLE_NCQ
318 412 struct ata_port *ap = dev->sata_dev.ap;
319 /* clamp at no NCQ for the time being */ 413 struct ata_device *adev = ap->link.device;
320 /* adev->flags |= ATA_DFLAG_NCQ_OFF; */ 414 adev->flags |= ATA_DFLAG_NCQ_OFF;
321 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1); 415 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
416 #endif
322 } 417 }
323 return 0; 418 return 0;
324} 419}
325 420
326void mvs_scan_start(struct Scsi_Host *shost) 421void mvs_scan_start(struct Scsi_Host *shost)
327{ 422{
328 int i; 423 int i, j;
329 struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha; 424 unsigned short core_nr;
425 struct mvs_info *mvi;
426 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
330 427
331 for (i = 0; i < mvi->chip->n_phy; ++i) { 428 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
332 mvs_bytes_dmaed(mvi, i); 429
430 for (j = 0; j < core_nr; j++) {
431 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
432 for (i = 0; i < mvi->chip->n_phy; ++i)
433 mvs_bytes_dmaed(mvi, i);
333 } 434 }
334} 435}
335 436
@@ -350,15 +451,15 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
350 int elem, rc, i; 451 int elem, rc, i;
351 struct sas_task *task = tei->task; 452 struct sas_task *task = tei->task;
352 struct mvs_cmd_hdr *hdr = tei->hdr; 453 struct mvs_cmd_hdr *hdr = tei->hdr;
454 struct domain_device *dev = task->dev;
455 struct asd_sas_port *sas_port = dev->port;
353 struct scatterlist *sg_req, *sg_resp; 456 struct scatterlist *sg_req, *sg_resp;
354 u32 req_len, resp_len, tag = tei->tag; 457 u32 req_len, resp_len, tag = tei->tag;
355 void *buf_tmp; 458 void *buf_tmp;
356 u8 *buf_oaf; 459 u8 *buf_oaf;
357 dma_addr_t buf_tmp_dma; 460 dma_addr_t buf_tmp_dma;
358 struct mvs_prd *buf_prd; 461 void *buf_prd;
359 struct scatterlist *sg;
360 struct mvs_slot_info *slot = &mvi->slot_info[tag]; 462 struct mvs_slot_info *slot = &mvi->slot_info[tag];
361 struct asd_sas_port *sas_port = task->dev->port;
362 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); 463 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
363#if _MV_DUMP 464#if _MV_DUMP
364 u8 *buf_cmd; 465 u8 *buf_cmd;
@@ -368,18 +469,18 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
368 * DMA-map SMP request, response buffers 469 * DMA-map SMP request, response buffers
369 */ 470 */
370 sg_req = &task->smp_task.smp_req; 471 sg_req = &task->smp_task.smp_req;
371 elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE); 472 elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
372 if (!elem) 473 if (!elem)
373 return -ENOMEM; 474 return -ENOMEM;
374 req_len = sg_dma_len(sg_req); 475 req_len = sg_dma_len(sg_req);
375 476
376 sg_resp = &task->smp_task.smp_resp; 477 sg_resp = &task->smp_task.smp_resp;
377 elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE); 478 elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
378 if (!elem) { 479 if (!elem) {
379 rc = -ENOMEM; 480 rc = -ENOMEM;
380 goto err_out; 481 goto err_out;
381 } 482 }
382 resp_len = sg_dma_len(sg_resp); 483 resp_len = SB_RFB_MAX;
383 484
384 /* must be in dwords */ 485 /* must be in dwords */
385 if ((req_len & 0x3) || (resp_len & 0x3)) { 486 if ((req_len & 0x3) || (resp_len & 0x3)) {
@@ -391,7 +492,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
391 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs 492 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
392 */ 493 */
393 494
394 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ 495 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
395 buf_tmp = slot->buf; 496 buf_tmp = slot->buf;
396 buf_tmp_dma = slot->buf_dma; 497 buf_tmp_dma = slot->buf_dma;
397 498
@@ -412,20 +513,22 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
412 buf_tmp += MVS_OAF_SZ; 513 buf_tmp += MVS_OAF_SZ;
413 buf_tmp_dma += MVS_OAF_SZ; 514 buf_tmp_dma += MVS_OAF_SZ;
414 515
415 /* region 3: PRD table ********************************************* */ 516 /* region 3: PRD table *********************************** */
416 buf_prd = buf_tmp; 517 buf_prd = buf_tmp;
417 if (tei->n_elem) 518 if (tei->n_elem)
418 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); 519 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
419 else 520 else
420 hdr->prd_tbl = 0; 521 hdr->prd_tbl = 0;
421 522
422 i = sizeof(struct mvs_prd) * tei->n_elem; 523 i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
423 buf_tmp += i; 524 buf_tmp += i;
424 buf_tmp_dma += i; 525 buf_tmp_dma += i;
425 526
426 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ 527 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
427 slot->response = buf_tmp; 528 slot->response = buf_tmp;
428 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 529 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
530 if (mvi->flags & MVF_FLAG_SOC)
531 hdr->reserved[0] = 0;
429 532
430 /* 533 /*
431 * Fill in TX ring and command slot header 534 * Fill in TX ring and command slot header
@@ -441,17 +544,14 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
441 hdr->data_len = 0; 544 hdr->data_len = 0;
442 545
443 /* generate open address frame hdr (first 12 bytes) */ 546 /* generate open address frame hdr (first 12 bytes) */
444 buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */ 547 /* initiator, SMP, ftype 1h */
445 buf_oaf[1] = task->dev->linkrate & 0xf; 548 buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
549 buf_oaf[1] = dev->linkrate & 0xf;
446 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ 550 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
447 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); 551 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
448 552
449 /* fill in PRD (scatter/gather) table, if any */ 553 /* fill in PRD (scatter/gather) table, if any */
450 for_each_sg(task->scatter, sg, tei->n_elem, i) { 554 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
451 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
452 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
453 buf_prd++;
454 }
455 555
456#if _MV_DUMP 556#if _MV_DUMP
457 /* copy cmd table */ 557 /* copy cmd table */
@@ -462,10 +562,10 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
462 return 0; 562 return 0;
463 563
464err_out_2: 564err_out_2:
465 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1, 565 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
466 PCI_DMA_FROMDEVICE); 566 PCI_DMA_FROMDEVICE);
467err_out: 567err_out:
468 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1, 568 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
469 PCI_DMA_TODEVICE); 569 PCI_DMA_TODEVICE);
470 return rc; 570 return rc;
471} 571}
@@ -490,30 +590,41 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
490{ 590{
491 struct sas_task *task = tei->task; 591 struct sas_task *task = tei->task;
492 struct domain_device *dev = task->dev; 592 struct domain_device *dev = task->dev;
593 struct mvs_device *mvi_dev =
594 (struct mvs_device *)dev->lldd_dev;
493 struct mvs_cmd_hdr *hdr = tei->hdr; 595 struct mvs_cmd_hdr *hdr = tei->hdr;
494 struct asd_sas_port *sas_port = dev->port; 596 struct asd_sas_port *sas_port = dev->port;
495 struct mvs_slot_info *slot; 597 struct mvs_slot_info *slot;
496 struct scatterlist *sg; 598 void *buf_prd;
497 struct mvs_prd *buf_prd; 599 u32 tag = tei->tag, hdr_tag;
498 struct mvs_port *port = tei->port; 600 u32 flags, del_q;
499 u32 tag = tei->tag;
500 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
501 void *buf_tmp; 601 void *buf_tmp;
502 u8 *buf_cmd, *buf_oaf; 602 u8 *buf_cmd, *buf_oaf;
503 dma_addr_t buf_tmp_dma; 603 dma_addr_t buf_tmp_dma;
504 u32 i, req_len, resp_len; 604 u32 i, req_len, resp_len;
505 const u32 max_resp_len = SB_RFB_MAX; 605 const u32 max_resp_len = SB_RFB_MAX;
506 606
507 if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED) 607 if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) {
608 mv_dprintk("Have not enough regiset for dev %d.\n",
609 mvi_dev->device_id);
508 return -EBUSY; 610 return -EBUSY;
509 611 }
510 slot = &mvi->slot_info[tag]; 612 slot = &mvi->slot_info[tag];
511 slot->tx = mvi->tx_prod; 613 slot->tx = mvi->tx_prod;
512 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | 614 del_q = TXQ_MODE_I | tag |
513 (TXQ_CMD_STP << TXQ_CMD_SHIFT) | 615 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
514 (sas_port->phy_mask << TXQ_PHY_SHIFT) | 616 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
515 (port->taskfileset << TXQ_SRS_SHIFT)); 617 (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
516 618 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
619
620#ifndef DISABLE_HOTPLUG_DMA_FIX
621 if (task->data_dir == DMA_FROM_DEVICE)
622 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
623 else
624 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
625#else
626 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
627#endif
517 if (task->ata_task.use_ncq) 628 if (task->ata_task.use_ncq)
518 flags |= MCH_FPDMA; 629 flags |= MCH_FPDMA;
519 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { 630 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
@@ -526,10 +637,13 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
526 hdr->flags = cpu_to_le32(flags); 637 hdr->flags = cpu_to_le32(flags);
527 638
528 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */ 639 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
529 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags)) 640 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
530 task->ata_task.fis.sector_count |= hdr->tags << 3; 641 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
531 else 642 else
532 hdr->tags = cpu_to_le32(tag); 643 hdr_tag = tag;
644
645 hdr->tags = cpu_to_le32(hdr_tag);
646
533 hdr->data_len = cpu_to_le32(task->total_xfer_len); 647 hdr->data_len = cpu_to_le32(task->total_xfer_len);
534 648
535 /* 649 /*
@@ -558,12 +672,13 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
558 672
559 /* region 3: PRD table ********************************************* */ 673 /* region 3: PRD table ********************************************* */
560 buf_prd = buf_tmp; 674 buf_prd = buf_tmp;
675
561 if (tei->n_elem) 676 if (tei->n_elem)
562 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); 677 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
563 else 678 else
564 hdr->prd_tbl = 0; 679 hdr->prd_tbl = 0;
680 i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count();
565 681
566 i = sizeof(struct mvs_prd) * tei->n_elem;
567 buf_tmp += i; 682 buf_tmp += i;
568 buf_tmp_dma += i; 683 buf_tmp_dma += i;
569 684
@@ -573,6 +688,8 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
573 */ 688 */
574 slot->response = buf_tmp; 689 slot->response = buf_tmp;
575 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 690 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
691 if (mvi->flags & MVF_FLAG_SOC)
692 hdr->reserved[0] = 0;
576 693
577 req_len = sizeof(struct host_to_dev_fis); 694 req_len = sizeof(struct host_to_dev_fis);
578 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - 695 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
@@ -582,7 +699,8 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
582 resp_len = min(resp_len, max_resp_len); 699 resp_len = min(resp_len, max_resp_len);
583 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); 700 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
584 701
585 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 702 if (likely(!task->ata_task.device_control_reg_update))
703 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
586 /* fill in command FIS and ATAPI CDB */ 704 /* fill in command FIS and ATAPI CDB */
587 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 705 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
588 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) 706 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
@@ -590,30 +708,35 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
590 task->ata_task.atapi_packet, 16); 708 task->ata_task.atapi_packet, 16);
591 709
592 /* generate open address frame hdr (first 12 bytes) */ 710 /* generate open address frame hdr (first 12 bytes) */
593 buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */ 711 /* initiator, STP, ftype 1h */
594 buf_oaf[1] = task->dev->linkrate & 0xf; 712 buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
595 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); 713 buf_oaf[1] = dev->linkrate & 0xf;
596 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); 714 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
715 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
597 716
598 /* fill in PRD (scatter/gather) table, if any */ 717 /* fill in PRD (scatter/gather) table, if any */
599 for_each_sg(task->scatter, sg, tei->n_elem, i) { 718 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
600 buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); 719#ifndef DISABLE_HOTPLUG_DMA_FIX
601 buf_prd->len = cpu_to_le32(sg_dma_len(sg)); 720 if (task->data_dir == DMA_FROM_DEVICE)
602 buf_prd++; 721 MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma,
603 } 722 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
604 723#endif
605 return 0; 724 return 0;
606} 725}
607 726
608static int mvs_task_prep_ssp(struct mvs_info *mvi, 727static int mvs_task_prep_ssp(struct mvs_info *mvi,
609 struct mvs_task_exec_info *tei) 728 struct mvs_task_exec_info *tei, int is_tmf,
729 struct mvs_tmf_task *tmf)
610{ 730{
611 struct sas_task *task = tei->task; 731 struct sas_task *task = tei->task;
612 struct mvs_cmd_hdr *hdr = tei->hdr; 732 struct mvs_cmd_hdr *hdr = tei->hdr;
613 struct mvs_port *port = tei->port; 733 struct mvs_port *port = tei->port;
734 struct domain_device *dev = task->dev;
735 struct mvs_device *mvi_dev =
736 (struct mvs_device *)dev->lldd_dev;
737 struct asd_sas_port *sas_port = dev->port;
614 struct mvs_slot_info *slot; 738 struct mvs_slot_info *slot;
615 struct scatterlist *sg; 739 void *buf_prd;
616 struct mvs_prd *buf_prd;
617 struct ssp_frame_hdr *ssp_hdr; 740 struct ssp_frame_hdr *ssp_hdr;
618 void *buf_tmp; 741 void *buf_tmp;
619 u8 *buf_cmd, *buf_oaf, fburst = 0; 742 u8 *buf_cmd, *buf_oaf, fburst = 0;
@@ -621,12 +744,13 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
621 u32 flags; 744 u32 flags;
622 u32 resp_len, req_len, i, tag = tei->tag; 745 u32 resp_len, req_len, i, tag = tei->tag;
623 const u32 max_resp_len = SB_RFB_MAX; 746 const u32 max_resp_len = SB_RFB_MAX;
624 u8 phy_mask; 747 u32 phy_mask;
625 748
626 slot = &mvi->slot_info[tag]; 749 slot = &mvi->slot_info[tag];
627 750
628 phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : 751 phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap :
629 task->dev->port->phy_mask; 752 sas_port->phy_mask) & TXQ_PHY_MASK;
753
630 slot->tx = mvi->tx_prod; 754 slot->tx = mvi->tx_prod;
631 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | 755 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
632 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | 756 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
@@ -640,7 +764,6 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
640 hdr->flags = cpu_to_le32(flags | 764 hdr->flags = cpu_to_le32(flags |
641 (tei->n_elem << MCH_PRD_LEN_SHIFT) | 765 (tei->n_elem << MCH_PRD_LEN_SHIFT) |
642 (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT)); 766 (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
643
644 hdr->tags = cpu_to_le32(tag); 767 hdr->tags = cpu_to_le32(tag);
645 hdr->data_len = cpu_to_le32(task->total_xfer_len); 768 hdr->data_len = cpu_to_le32(task->total_xfer_len);
646 769
@@ -674,13 +797,15 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
674 else 797 else
675 hdr->prd_tbl = 0; 798 hdr->prd_tbl = 0;
676 799
677 i = sizeof(struct mvs_prd) * tei->n_elem; 800 i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
678 buf_tmp += i; 801 buf_tmp += i;
679 buf_tmp_dma += i; 802 buf_tmp_dma += i;
680 803
681 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ 804 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
682 slot->response = buf_tmp; 805 slot->response = buf_tmp;
683 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 806 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
807 if (mvi->flags & MVF_FLAG_SOC)
808 hdr->reserved[0] = 0;
684 809
685 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - 810 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
686 sizeof(struct mvs_err_info) - i; 811 sizeof(struct mvs_err_info) - i;
@@ -692,57 +817,105 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
692 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); 817 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
693 818
694 /* generate open address frame hdr (first 12 bytes) */ 819 /* generate open address frame hdr (first 12 bytes) */
695 buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */ 820 /* initiator, SSP, ftype 1h */
696 buf_oaf[1] = task->dev->linkrate & 0xf; 821 buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
697 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); 822 buf_oaf[1] = dev->linkrate & 0xf;
698 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); 823 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
824 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
699 825
700 /* fill in SSP frame header (Command Table.SSP frame header) */ 826 /* fill in SSP frame header (Command Table.SSP frame header) */
701 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd; 827 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
702 ssp_hdr->frame_type = SSP_COMMAND; 828
703 memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr, 829 if (is_tmf)
830 ssp_hdr->frame_type = SSP_TASK;
831 else
832 ssp_hdr->frame_type = SSP_COMMAND;
833
834 memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr,
704 HASHED_SAS_ADDR_SIZE); 835 HASHED_SAS_ADDR_SIZE);
705 memcpy(ssp_hdr->hashed_src_addr, 836 memcpy(ssp_hdr->hashed_src_addr,
706 task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); 837 dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
707 ssp_hdr->tag = cpu_to_be16(tag); 838 ssp_hdr->tag = cpu_to_be16(tag);
708 839
709 /* fill in command frame IU */ 840 /* fill in IU for TASK and Command Frame */
710 buf_cmd += sizeof(*ssp_hdr); 841 buf_cmd += sizeof(*ssp_hdr);
711 memcpy(buf_cmd, &task->ssp_task.LUN, 8); 842 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
712 buf_cmd[9] = fburst | task->ssp_task.task_attr |
713 (task->ssp_task.task_prio << 3);
714 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
715 843
716 /* fill in PRD (scatter/gather) table, if any */ 844 if (ssp_hdr->frame_type != SSP_TASK) {
717 for_each_sg(task->scatter, sg, tei->n_elem, i) { 845 buf_cmd[9] = fburst | task->ssp_task.task_attr |
718 buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); 846 (task->ssp_task.task_prio << 3);
719 buf_prd->len = cpu_to_le32(sg_dma_len(sg)); 847 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
720 buf_prd++; 848 } else{
849 buf_cmd[10] = tmf->tmf;
850 switch (tmf->tmf) {
851 case TMF_ABORT_TASK:
852 case TMF_QUERY_TASK:
853 buf_cmd[12] =
854 (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
855 buf_cmd[13] =
856 tmf->tag_of_task_to_be_managed & 0xff;
857 break;
858 default:
859 break;
860 }
721 } 861 }
722 862 /* fill in PRD (scatter/gather) table, if any */
863 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
723 return 0; 864 return 0;
724} 865}
725 866
726int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) 867#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
868static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
869 struct completion *completion, int lock,
870 int is_tmf, struct mvs_tmf_task *tmf)
727{ 871{
728 struct domain_device *dev = task->dev; 872 struct domain_device *dev = task->dev;
729 struct mvs_info *mvi = dev->port->ha->lldd_ha; 873 struct mvs_info *mvi;
730 struct pci_dev *pdev = mvi->pdev; 874 struct mvs_device *mvi_dev;
731 void __iomem *regs = mvi->regs;
732 struct mvs_task_exec_info tei; 875 struct mvs_task_exec_info tei;
733 struct sas_task *t = task; 876 struct sas_task *t = task;
734 struct mvs_slot_info *slot; 877 struct mvs_slot_info *slot;
735 u32 tag = 0xdeadbeef, rc, n_elem = 0; 878 u32 tag = 0xdeadbeef, rc, n_elem = 0;
736 unsigned long flags;
737 u32 n = num, pass = 0; 879 u32 n = num, pass = 0;
880 unsigned long flags = 0;
738 881
739 spin_lock_irqsave(&mvi->lock, flags); 882 if (!dev->port) {
883 struct task_status_struct *tsm = &t->task_status;
884
885 tsm->resp = SAS_TASK_UNDELIVERED;
886 tsm->stat = SAS_PHY_DOWN;
887 t->task_done(t);
888 return 0;
889 }
890
891 mvi = mvs_find_dev_mvi(task->dev);
892
893 if (lock)
894 spin_lock_irqsave(&mvi->lock, flags);
740 do { 895 do {
741 dev = t->dev; 896 dev = t->dev;
742 tei.port = &mvi->port[dev->port->id]; 897 mvi_dev = (struct mvs_device *)dev->lldd_dev;
898 if (DEV_IS_GONE(mvi_dev)) {
899 if (mvi_dev)
900 mv_dprintk("device %d not ready.\n",
901 mvi_dev->device_id);
902 else
903 mv_dprintk("device %016llx not ready.\n",
904 SAS_ADDR(dev->sas_addr));
905
906 rc = SAS_PHY_DOWN;
907 goto out_done;
908 }
909
910 if (dev->port->id >= mvi->chip->n_phy)
911 tei.port = &mvi->port[dev->port->id - mvi->chip->n_phy];
912 else
913 tei.port = &mvi->port[dev->port->id];
743 914
744 if (!tei.port->port_attached) { 915 if (!tei.port->port_attached) {
745 if (sas_protocol_ata(t->task_proto)) { 916 if (sas_protocol_ata(t->task_proto)) {
917 mv_dprintk("port %d does not"
918 "attached device.\n", dev->port->id);
746 rc = SAS_PHY_DOWN; 919 rc = SAS_PHY_DOWN;
747 goto out_done; 920 goto out_done;
748 } else { 921 } else {
@@ -759,7 +932,8 @@ int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
759 932
760 if (!sas_protocol_ata(t->task_proto)) { 933 if (!sas_protocol_ata(t->task_proto)) {
761 if (t->num_scatter) { 934 if (t->num_scatter) {
762 n_elem = pci_map_sg(mvi->pdev, t->scatter, 935 n_elem = dma_map_sg(mvi->dev,
936 t->scatter,
763 t->num_scatter, 937 t->num_scatter,
764 t->data_dir); 938 t->data_dir);
765 if (!n_elem) { 939 if (!n_elem) {
@@ -776,20 +950,23 @@ int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
776 goto err_out; 950 goto err_out;
777 951
778 slot = &mvi->slot_info[tag]; 952 slot = &mvi->slot_info[tag];
953
954
779 t->lldd_task = NULL; 955 t->lldd_task = NULL;
780 slot->n_elem = n_elem; 956 slot->n_elem = n_elem;
957 slot->slot_tag = tag;
781 memset(slot->buf, 0, MVS_SLOT_BUF_SZ); 958 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
959
782 tei.task = t; 960 tei.task = t;
783 tei.hdr = &mvi->slot[tag]; 961 tei.hdr = &mvi->slot[tag];
784 tei.tag = tag; 962 tei.tag = tag;
785 tei.n_elem = n_elem; 963 tei.n_elem = n_elem;
786
787 switch (t->task_proto) { 964 switch (t->task_proto) {
788 case SAS_PROTOCOL_SMP: 965 case SAS_PROTOCOL_SMP:
789 rc = mvs_task_prep_smp(mvi, &tei); 966 rc = mvs_task_prep_smp(mvi, &tei);
790 break; 967 break;
791 case SAS_PROTOCOL_SSP: 968 case SAS_PROTOCOL_SSP:
792 rc = mvs_task_prep_ssp(mvi, &tei); 969 rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
793 break; 970 break;
794 case SAS_PROTOCOL_SATA: 971 case SAS_PROTOCOL_SATA:
795 case SAS_PROTOCOL_STP: 972 case SAS_PROTOCOL_STP:
@@ -797,52 +974,61 @@ int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
797 rc = mvs_task_prep_ata(mvi, &tei); 974 rc = mvs_task_prep_ata(mvi, &tei);
798 break; 975 break;
799 default: 976 default:
800 dev_printk(KERN_ERR, &pdev->dev, 977 dev_printk(KERN_ERR, mvi->dev,
801 "unknown sas_task proto: 0x%x\n", 978 "unknown sas_task proto: 0x%x\n",
802 t->task_proto); 979 t->task_proto);
803 rc = -EINVAL; 980 rc = -EINVAL;
804 break; 981 break;
805 } 982 }
806 983
807 if (rc) 984 if (rc) {
985 mv_dprintk("rc is %x\n", rc);
808 goto err_out_tag; 986 goto err_out_tag;
809 987 }
810 slot->task = t; 988 slot->task = t;
811 slot->port = tei.port; 989 slot->port = tei.port;
812 t->lldd_task = (void *) slot; 990 t->lldd_task = (void *) slot;
813 list_add_tail(&slot->list, &slot->port->list); 991 list_add_tail(&slot->entry, &tei.port->list);
814 /* TODO: select normal or high priority */ 992 /* TODO: select normal or high priority */
815
816 spin_lock(&t->task_state_lock); 993 spin_lock(&t->task_state_lock);
817 t->task_state_flags |= SAS_TASK_AT_INITIATOR; 994 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
818 spin_unlock(&t->task_state_lock); 995 spin_unlock(&t->task_state_lock);
819 996
820 mvs_hba_memory_dump(mvi, tag, t->task_proto); 997 mvs_hba_memory_dump(mvi, tag, t->task_proto);
821 998 mvi_dev->runing_req++;
822 ++pass; 999 ++pass;
823 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); 1000 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
824 if (n > 1) 1001 if (n > 1)
825 t = list_entry(t->list.next, struct sas_task, list); 1002 t = list_entry(t->list.next, struct sas_task, list);
826 } while (--n); 1003 } while (--n);
827
828 rc = 0; 1004 rc = 0;
829 goto out_done; 1005 goto out_done;
830 1006
831err_out_tag: 1007err_out_tag:
832 mvs_tag_free(mvi, tag); 1008 mvs_tag_free(mvi, tag);
833err_out: 1009err_out:
834 dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc); 1010
1011 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
835 if (!sas_protocol_ata(t->task_proto)) 1012 if (!sas_protocol_ata(t->task_proto))
836 if (n_elem) 1013 if (n_elem)
837 pci_unmap_sg(mvi->pdev, t->scatter, n_elem, 1014 dma_unmap_sg(mvi->dev, t->scatter, n_elem,
838 t->data_dir); 1015 t->data_dir);
839out_done: 1016out_done:
840 if (pass) 1017 if (likely(pass)) {
841 mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); 1018 MVS_CHIP_DISP->start_delivery(mvi,
842 spin_unlock_irqrestore(&mvi->lock, flags); 1019 (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
1020 }
1021 if (lock)
1022 spin_unlock_irqrestore(&mvi->lock, flags);
843 return rc; 1023 return rc;
844} 1024}
845 1025
1026int mvs_queue_command(struct sas_task *task, const int num,
1027 gfp_t gfp_flags)
1028{
1029 return mvs_task_exec(task, num, gfp_flags, NULL, 1, 0, NULL);
1030}
1031
846static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) 1032static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
847{ 1033{
848 u32 slot_idx = rx_desc & RXQ_SLOT_MASK; 1034 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
@@ -852,16 +1038,18 @@ static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
852static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, 1038static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
853 struct mvs_slot_info *slot, u32 slot_idx) 1039 struct mvs_slot_info *slot, u32 slot_idx)
854{ 1040{
1041 if (!slot->task)
1042 return;
855 if (!sas_protocol_ata(task->task_proto)) 1043 if (!sas_protocol_ata(task->task_proto))
856 if (slot->n_elem) 1044 if (slot->n_elem)
857 pci_unmap_sg(mvi->pdev, task->scatter, 1045 dma_unmap_sg(mvi->dev, task->scatter,
858 slot->n_elem, task->data_dir); 1046 slot->n_elem, task->data_dir);
859 1047
860 switch (task->task_proto) { 1048 switch (task->task_proto) {
861 case SAS_PROTOCOL_SMP: 1049 case SAS_PROTOCOL_SMP:
862 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1, 1050 dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
863 PCI_DMA_FROMDEVICE); 1051 PCI_DMA_FROMDEVICE);
864 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1, 1052 dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
865 PCI_DMA_TODEVICE); 1053 PCI_DMA_TODEVICE);
866 break; 1054 break;
867 1055
@@ -872,10 +1060,12 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
872 /* do nothing */ 1060 /* do nothing */
873 break; 1061 break;
874 } 1062 }
875 list_del(&slot->list); 1063 list_del_init(&slot->entry);
876 task->lldd_task = NULL; 1064 task->lldd_task = NULL;
877 slot->task = NULL; 1065 slot->task = NULL;
878 slot->port = NULL; 1066 slot->port = NULL;
1067 slot->slot_tag = 0xFFFFFFFF;
1068 mvs_slot_free(mvi, slot_idx);
879} 1069}
880 1070
881static void mvs_update_wideport(struct mvs_info *mvi, int i) 1071static void mvs_update_wideport(struct mvs_info *mvi, int i)
@@ -884,25 +1074,28 @@ static void mvs_update_wideport(struct mvs_info *mvi, int i)
884 struct mvs_port *port = phy->port; 1074 struct mvs_port *port = phy->port;
885 int j, no; 1075 int j, no;
886 1076
887 for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy) 1077 for_each_phy(port->wide_port_phymap, j, no) {
888 if (no & 1) { 1078 if (j & 1) {
889 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); 1079 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
890 mvs_write_port_cfg_data(mvi, no, 1080 PHYR_WIDE_PORT);
1081 MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
891 port->wide_port_phymap); 1082 port->wide_port_phymap);
892 } else { 1083 } else {
893 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); 1084 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
894 mvs_write_port_cfg_data(mvi, no, 0); 1085 PHYR_WIDE_PORT);
1086 MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
1087 0);
895 } 1088 }
1089 }
896} 1090}
897 1091
898static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) 1092static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
899{ 1093{
900 u32 tmp; 1094 u32 tmp;
901 struct mvs_phy *phy = &mvi->phy[i]; 1095 struct mvs_phy *phy = &mvi->phy[i];
902 struct mvs_port *port = phy->port;; 1096 struct mvs_port *port = phy->port;
903
904 tmp = mvs_read_phy_ctl(mvi, i);
905 1097
1098 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i);
906 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { 1099 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
907 if (!port) 1100 if (!port)
908 phy->phy_attached = 1; 1101 phy->phy_attached = 1;
@@ -917,7 +1110,6 @@ static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
917 mvs_update_wideport(mvi, i); 1110 mvs_update_wideport(mvi, i);
918 } else if (phy->phy_type & PORT_TYPE_SATA) 1111 } else if (phy->phy_type & PORT_TYPE_SATA)
919 port->port_attached = 0; 1112 port->port_attached = 0;
920 mvs_free_reg_set(mvi, phy->port);
921 phy->port = NULL; 1113 phy->port = NULL;
922 phy->phy_attached = 0; 1114 phy->phy_attached = 0;
923 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 1115 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
@@ -932,17 +1124,21 @@ static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
932 if (!s) 1124 if (!s)
933 return NULL; 1125 return NULL;
934 1126
935 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); 1127 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
936 s[3] = mvs_read_port_cfg_data(mvi, i); 1128 s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1129
1130 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
1131 s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
937 1132
938 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); 1133 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
939 s[2] = mvs_read_port_cfg_data(mvi, i); 1134 s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
940 1135
941 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); 1136 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
942 s[1] = mvs_read_port_cfg_data(mvi, i); 1137 s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
943 1138
944 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); 1139 /* Workaround: take some ATAPI devices for ATA */
945 s[0] = mvs_read_port_cfg_data(mvi, i); 1140 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
1141 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
946 1142
947 return (void *)s; 1143 return (void *)s;
948} 1144}
@@ -952,56 +1148,53 @@ static u32 mvs_is_sig_fis_received(u32 irq_status)
952 return irq_status & PHYEV_SIG_FIS; 1148 return irq_status & PHYEV_SIG_FIS;
953} 1149}
954 1150
955static void mvs_update_phyinfo(struct mvs_info *mvi, int i, 1151void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
956 int get_st)
957{ 1152{
958 struct mvs_phy *phy = &mvi->phy[i]; 1153 struct mvs_phy *phy = &mvi->phy[i];
959 struct pci_dev *pdev = mvi->pdev; 1154 struct sas_identify_frame *id;
960 u32 tmp;
961 u64 tmp64;
962
963 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
964 phy->dev_info = mvs_read_port_cfg_data(mvi, i);
965
966 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
967 phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
968 1155
969 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); 1156 id = (struct sas_identify_frame *)phy->frame_rcvd;
970 phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
971 1157
972 if (get_st) { 1158 if (get_st) {
973 phy->irq_status = mvs_read_port_irq_stat(mvi, i); 1159 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i);
974 phy->phy_status = mvs_is_phy_ready(mvi, i); 1160 phy->phy_status = mvs_is_phy_ready(mvi, i);
975 } 1161 }
976 1162
977 if (phy->phy_status) { 1163 if (phy->phy_status) {
978 u32 phy_st; 1164 int oob_done = 0;
979 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; 1165 struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy;
980
981 mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
982 phy_st = mvs_read_port_cfg_data(mvi, i);
983
984 sas_phy->linkrate =
985 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
986 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
987 phy->minimum_linkrate =
988 (phy->phy_status &
989 PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
990 phy->maximum_linkrate =
991 (phy->phy_status &
992 PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
993 1166
994 if (phy->phy_type & PORT_TYPE_SAS) { 1167 oob_done = MVS_CHIP_DISP->oob_done(mvi, i);
995 /* Updated attached_sas_addr */ 1168
996 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI); 1169 MVS_CHIP_DISP->fix_phy_info(mvi, i, id);
997 phy->att_dev_sas_addr = 1170 if (phy->phy_type & PORT_TYPE_SATA) {
998 (u64) mvs_read_port_cfg_data(mvi, i) << 32; 1171 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
999 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO); 1172 if (mvs_is_sig_fis_received(phy->irq_status)) {
1000 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); 1173 phy->phy_attached = 1;
1001 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO); 1174 phy->att_dev_sas_addr =
1002 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i); 1175 i + mvi->id * mvi->chip->n_phy;
1176 if (oob_done)
1177 sas_phy->oob_mode = SATA_OOB_MODE;
1178 phy->frame_rcvd_size =
1179 sizeof(struct dev_to_host_fis);
1180 mvs_get_d2h_reg(mvi, i, (void *)id);
1181 } else {
1182 u32 tmp;
1183 dev_printk(KERN_DEBUG, mvi->dev,
1184 "Phy%d : No sig fis\n", i);
1185 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i);
1186 MVS_CHIP_DISP->write_port_irq_mask(mvi, i,
1187 tmp | PHYEV_SIG_FIS);
1188 phy->phy_attached = 0;
1189 phy->phy_type &= ~PORT_TYPE_SATA;
1190 MVS_CHIP_DISP->phy_reset(mvi, i, 0);
1191 goto out_done;
1192 }
1193 } else if (phy->phy_type & PORT_TYPE_SAS
1194 || phy->att_dev_info & PORT_SSP_INIT_MASK) {
1195 phy->phy_attached = 1;
1003 phy->identify.device_type = 1196 phy->identify.device_type =
1004 phy->att_dev_info & PORT_DEV_TYPE_MASK; 1197 phy->att_dev_info & PORT_DEV_TYPE_MASK;
1005 1198
1006 if (phy->identify.device_type == SAS_END_DEV) 1199 if (phy->identify.device_type == SAS_END_DEV)
1007 phy->identify.target_port_protocols = 1200 phy->identify.target_port_protocols =
@@ -1009,93 +1202,522 @@ static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
1009 else if (phy->identify.device_type != NO_DEVICE) 1202 else if (phy->identify.device_type != NO_DEVICE)
1010 phy->identify.target_port_protocols = 1203 phy->identify.target_port_protocols =
1011 SAS_PROTOCOL_SMP; 1204 SAS_PROTOCOL_SMP;
1012 if (phy_st & PHY_OOB_DTCTD) 1205 if (oob_done)
1013 sas_phy->oob_mode = SAS_OOB_MODE; 1206 sas_phy->oob_mode = SAS_OOB_MODE;
1014 phy->frame_rcvd_size = 1207 phy->frame_rcvd_size =
1015 sizeof(struct sas_identify_frame); 1208 sizeof(struct sas_identify_frame);
1016 } else if (phy->phy_type & PORT_TYPE_SATA) {
1017 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
1018 if (mvs_is_sig_fis_received(phy->irq_status)) {
1019 phy->att_dev_sas_addr = i; /* temp */
1020 if (phy_st & PHY_OOB_DTCTD)
1021 sas_phy->oob_mode = SATA_OOB_MODE;
1022 phy->frame_rcvd_size =
1023 sizeof(struct dev_to_host_fis);
1024 mvs_get_d2h_reg(mvi, i,
1025 (void *)sas_phy->frame_rcvd);
1026 } else {
1027 dev_printk(KERN_DEBUG, &pdev->dev,
1028 "No sig fis\n");
1029 phy->phy_type &= ~(PORT_TYPE_SATA);
1030 goto out_done;
1031 }
1032 } 1209 }
1033 tmp64 = cpu_to_be64(phy->att_dev_sas_addr); 1210 memcpy(sas_phy->attached_sas_addr,
1034 memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE); 1211 &phy->att_dev_sas_addr, SAS_ADDR_SIZE);
1035
1036 dev_printk(KERN_DEBUG, &pdev->dev,
1037 "phy[%d] Get Attached Address 0x%llX ,"
1038 " SAS Address 0x%llX\n",
1039 i,
1040 (unsigned long long)phy->att_dev_sas_addr,
1041 (unsigned long long)phy->dev_sas_addr);
1042 dev_printk(KERN_DEBUG, &pdev->dev,
1043 "Rate = %x , type = %d\n",
1044 sas_phy->linkrate, phy->phy_type);
1045
1046 /* workaround for HW phy decoding error on 1.5g disk drive */
1047 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
1048 tmp = mvs_read_port_vsr_data(mvi, i);
1049 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
1050 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
1051 SAS_LINK_RATE_1_5_GBPS)
1052 tmp &= ~PHY_MODE6_LATECLK;
1053 else
1054 tmp |= PHY_MODE6_LATECLK;
1055 mvs_write_port_vsr_data(mvi, i, tmp);
1056 1212
1213 if (MVS_CHIP_DISP->phy_work_around)
1214 MVS_CHIP_DISP->phy_work_around(mvi, i);
1057 } 1215 }
1216 mv_dprintk("port %d attach dev info is %x\n",
1217 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
1218 mv_dprintk("port %d attach sas addr is %llx\n",
1219 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
1058out_done: 1220out_done:
1059 if (get_st) 1221 if (get_st)
1060 mvs_write_port_irq_stat(mvi, i, phy->irq_status); 1222 MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status);
1061} 1223}
1062 1224
1063void mvs_port_formed(struct asd_sas_phy *sas_phy) 1225static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
1064{ 1226{
1065 struct sas_ha_struct *sas_ha = sas_phy->ha; 1227 struct sas_ha_struct *sas_ha = sas_phy->ha;
1066 struct mvs_info *mvi = sas_ha->lldd_ha; 1228 struct mvs_info *mvi = NULL; int i = 0, hi;
1067 struct asd_sas_port *sas_port = sas_phy->port;
1068 struct mvs_phy *phy = sas_phy->lldd_phy; 1229 struct mvs_phy *phy = sas_phy->lldd_phy;
1069 struct mvs_port *port = &mvi->port[sas_port->id]; 1230 struct asd_sas_port *sas_port = sas_phy->port;
1070 unsigned long flags; 1231 struct mvs_port *port;
1232 unsigned long flags = 0;
1233 if (!sas_port)
1234 return;
1071 1235
1072 spin_lock_irqsave(&mvi->lock, flags); 1236 while (sas_ha->sas_phy[i]) {
1237 if (sas_ha->sas_phy[i] == sas_phy)
1238 break;
1239 i++;
1240 }
1241 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
1242 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
1243 if (sas_port->id >= mvi->chip->n_phy)
1244 port = &mvi->port[sas_port->id - mvi->chip->n_phy];
1245 else
1246 port = &mvi->port[sas_port->id];
1247 if (lock)
1248 spin_lock_irqsave(&mvi->lock, flags);
1073 port->port_attached = 1; 1249 port->port_attached = 1;
1074 phy->port = port; 1250 phy->port = port;
1075 port->taskfileset = MVS_ID_NOT_MAPPED;
1076 if (phy->phy_type & PORT_TYPE_SAS) { 1251 if (phy->phy_type & PORT_TYPE_SAS) {
1077 port->wide_port_phymap = sas_port->phy_mask; 1252 port->wide_port_phymap = sas_port->phy_mask;
1253 mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
1078 mvs_update_wideport(mvi, sas_phy->id); 1254 mvs_update_wideport(mvi, sas_phy->id);
1079 } 1255 }
1080 spin_unlock_irqrestore(&mvi->lock, flags); 1256 if (lock)
1257 spin_unlock_irqrestore(&mvi->lock, flags);
1258}
1259
1260static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
1261{
1262 /*Nothing*/
1263}
1264
1265
1266void mvs_port_formed(struct asd_sas_phy *sas_phy)
1267{
1268 mvs_port_notify_formed(sas_phy, 1);
1269}
1270
1271void mvs_port_deformed(struct asd_sas_phy *sas_phy)
1272{
1273 mvs_port_notify_deformed(sas_phy, 1);
1274}
1275
1276struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
1277{
1278 u32 dev;
1279 for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
1280 if (mvi->devices[dev].dev_type == NO_DEVICE) {
1281 mvi->devices[dev].device_id = dev;
1282 return &mvi->devices[dev];
1283 }
1284 }
1285
1286 if (dev == MVS_MAX_DEVICES)
1287 mv_printk("max support %d devices, ignore ..\n",
1288 MVS_MAX_DEVICES);
1289
1290 return NULL;
1291}
1292
1293void mvs_free_dev(struct mvs_device *mvi_dev)
1294{
1295 u32 id = mvi_dev->device_id;
1296 memset(mvi_dev, 0, sizeof(*mvi_dev));
1297 mvi_dev->device_id = id;
1298 mvi_dev->dev_type = NO_DEVICE;
1299 mvi_dev->dev_status = MVS_DEV_NORMAL;
1300 mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
1301}
1302
1303int mvs_dev_found_notify(struct domain_device *dev, int lock)
1304{
1305 unsigned long flags = 0;
1306 int res = 0;
1307 struct mvs_info *mvi = NULL;
1308 struct domain_device *parent_dev = dev->parent;
1309 struct mvs_device *mvi_device;
1310
1311 mvi = mvs_find_dev_mvi(dev);
1312
1313 if (lock)
1314 spin_lock_irqsave(&mvi->lock, flags);
1315
1316 mvi_device = mvs_alloc_dev(mvi);
1317 if (!mvi_device) {
1318 res = -1;
1319 goto found_out;
1320 }
1321 dev->lldd_dev = (void *)mvi_device;
1322 mvi_device->dev_type = dev->dev_type;
1323
1324 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
1325 int phy_id;
1326 u8 phy_num = parent_dev->ex_dev.num_phys;
1327 struct ex_phy *phy;
1328 for (phy_id = 0; phy_id < phy_num; phy_id++) {
1329 phy = &parent_dev->ex_dev.ex_phy[phy_id];
1330 if (SAS_ADDR(phy->attached_sas_addr) ==
1331 SAS_ADDR(dev->sas_addr)) {
1332 mvi_device->attached_phy = phy_id;
1333 break;
1334 }
1335 }
1336
1337 if (phy_id == phy_num) {
1338 mv_printk("Error: no attached dev:%016llx"
1339 "at ex:%016llx.\n",
1340 SAS_ADDR(dev->sas_addr),
1341 SAS_ADDR(parent_dev->sas_addr));
1342 res = -1;
1343 }
1344 }
1345
1346found_out:
1347 if (lock)
1348 spin_unlock_irqrestore(&mvi->lock, flags);
1349 return res;
1350}
1351
1352int mvs_dev_found(struct domain_device *dev)
1353{
1354 return mvs_dev_found_notify(dev, 1);
1355}
1356
1357void mvs_dev_gone_notify(struct domain_device *dev, int lock)
1358{
1359 unsigned long flags = 0;
1360 struct mvs_info *mvi;
1361 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1362
1363 mvi = mvs_find_dev_mvi(dev);
1364
1365 if (lock)
1366 spin_lock_irqsave(&mvi->lock, flags);
1367
1368 if (mvi_dev) {
1369 mv_dprintk("found dev[%d:%x] is gone.\n",
1370 mvi_dev->device_id, mvi_dev->dev_type);
1371 mvs_free_reg_set(mvi, mvi_dev);
1372 mvs_free_dev(mvi_dev);
1373 } else {
1374 mv_dprintk("found dev has gone.\n");
1375 }
1376 dev->lldd_dev = NULL;
1377
1378 if (lock)
1379 spin_unlock_irqrestore(&mvi->lock, flags);
1380}
1381
1382
1383void mvs_dev_gone(struct domain_device *dev)
1384{
1385 mvs_dev_gone_notify(dev, 1);
1386}
1387
1388static struct sas_task *mvs_alloc_task(void)
1389{
1390 struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL);
1391
1392 if (task) {
1393 INIT_LIST_HEAD(&task->list);
1394 spin_lock_init(&task->task_state_lock);
1395 task->task_state_flags = SAS_TASK_STATE_PENDING;
1396 init_timer(&task->timer);
1397 init_completion(&task->completion);
1398 }
1399 return task;
1400}
1401
1402static void mvs_free_task(struct sas_task *task)
1403{
1404 if (task) {
1405 BUG_ON(!list_empty(&task->list));
1406 kfree(task);
1407 }
1408}
1409
1410static void mvs_task_done(struct sas_task *task)
1411{
1412 if (!del_timer(&task->timer))
1413 return;
1414 complete(&task->completion);
1415}
1416
1417static void mvs_tmf_timedout(unsigned long data)
1418{
1419 struct sas_task *task = (struct sas_task *)data;
1420
1421 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1422 complete(&task->completion);
1423}
1424
1425/* XXX */
1426#define MVS_TASK_TIMEOUT 20
1427static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1428 void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
1429{
1430 int res, retry;
1431 struct sas_task *task = NULL;
1432
1433 for (retry = 0; retry < 3; retry++) {
1434 task = mvs_alloc_task();
1435 if (!task)
1436 return -ENOMEM;
1437
1438 task->dev = dev;
1439 task->task_proto = dev->tproto;
1440
1441 memcpy(&task->ssp_task, parameter, para_len);
1442 task->task_done = mvs_task_done;
1443
1444 task->timer.data = (unsigned long) task;
1445 task->timer.function = mvs_tmf_timedout;
1446 task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
1447 add_timer(&task->timer);
1448
1449 res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 0, 1, tmf);
1450
1451 if (res) {
1452 del_timer(&task->timer);
1453 mv_printk("executing internel task failed:%d\n", res);
1454 goto ex_err;
1455 }
1456
1457 wait_for_completion(&task->completion);
1458 res = -TMF_RESP_FUNC_FAILED;
1459 /* Even TMF timed out, return direct. */
1460 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1461 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1462 mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
1463 goto ex_err;
1464 }
1465 }
1466
1467 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1468 task->task_status.stat == SAM_GOOD) {
1469 res = TMF_RESP_FUNC_COMPLETE;
1470 break;
1471 }
1472
1473 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1474 task->task_status.stat == SAS_DATA_UNDERRUN) {
1475 /* no error, but return the number of bytes of
1476 * underrun */
1477 res = task->task_status.residual;
1478 break;
1479 }
1480
1481 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1482 task->task_status.stat == SAS_DATA_OVERRUN) {
1483 mv_dprintk("blocked task error.\n");
1484 res = -EMSGSIZE;
1485 break;
1486 } else {
1487 mv_dprintk(" task to dev %016llx response: 0x%x "
1488 "status 0x%x\n",
1489 SAS_ADDR(dev->sas_addr),
1490 task->task_status.resp,
1491 task->task_status.stat);
1492 mvs_free_task(task);
1493 task = NULL;
1494
1495 }
1496 }
1497ex_err:
1498 BUG_ON(retry == 3 && task != NULL);
1499 if (task != NULL)
1500 mvs_free_task(task);
1501 return res;
1502}
1503
1504static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
1505 u8 *lun, struct mvs_tmf_task *tmf)
1506{
1507 struct sas_ssp_task ssp_task;
1508 DECLARE_COMPLETION_ONSTACK(completion);
1509 if (!(dev->tproto & SAS_PROTOCOL_SSP))
1510 return TMF_RESP_FUNC_ESUPP;
1511
1512 strncpy((u8 *)&ssp_task.LUN, lun, 8);
1513
1514 return mvs_exec_internal_tmf_task(dev, &ssp_task,
1515 sizeof(ssp_task), tmf);
1516}
1517
1518
1519/* Standard mandates link reset for ATA (type 0)
1520 and hard reset for SSP (type 1) , only for RECOVERY */
1521static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
1522{
1523 int rc;
1524 struct sas_phy *phy = sas_find_local_phy(dev);
1525 int reset_type = (dev->dev_type == SATA_DEV ||
1526 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1527 rc = sas_phy_reset(phy, reset_type);
1528 msleep(2000);
1529 return rc;
1530}
1531
1532/* mandatory SAM-3 */
1533int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1534{
1535 unsigned long flags;
1536 int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
1537 struct mvs_tmf_task tmf_task;
1538 struct mvs_info *mvi = mvs_find_dev_mvi(dev);
1539 struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
1540
1541 tmf_task.tmf = TMF_LU_RESET;
1542 mvi_dev->dev_status = MVS_DEV_EH;
1543 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1544 if (rc == TMF_RESP_FUNC_COMPLETE) {
1545 num = mvs_find_dev_phyno(dev, phyno);
1546 spin_lock_irqsave(&mvi->lock, flags);
1547 for (i = 0; i < num; i++)
1548 mvs_release_task(mvi, phyno[i], dev);
1549 spin_unlock_irqrestore(&mvi->lock, flags);
1550 }
1551 /* If failed, fall-through I_T_Nexus reset */
1552 mv_printk("%s for device[%x]:rc= %d\n", __func__,
1553 mvi_dev->device_id, rc);
1554 return rc;
1081} 1555}
1082 1556
1083int mvs_I_T_nexus_reset(struct domain_device *dev) 1557int mvs_I_T_nexus_reset(struct domain_device *dev)
1084{ 1558{
1085 return TMF_RESP_FUNC_FAILED; 1559 unsigned long flags;
1560 int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
1561 struct mvs_info *mvi = mvs_find_dev_mvi(dev);
1562 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1563
1564 if (mvi_dev->dev_status != MVS_DEV_EH)
1565 return TMF_RESP_FUNC_COMPLETE;
1566 rc = mvs_debug_I_T_nexus_reset(dev);
1567 mv_printk("%s for device[%x]:rc= %d\n",
1568 __func__, mvi_dev->device_id, rc);
1569
1570 /* housekeeper */
1571 num = mvs_find_dev_phyno(dev, phyno);
1572 spin_lock_irqsave(&mvi->lock, flags);
1573 for (i = 0; i < num; i++)
1574 mvs_release_task(mvi, phyno[i], dev);
1575 spin_unlock_irqrestore(&mvi->lock, flags);
1576
1577 return rc;
1578}
1579/* optional SAM-3 */
1580int mvs_query_task(struct sas_task *task)
1581{
1582 u32 tag;
1583 struct scsi_lun lun;
1584 struct mvs_tmf_task tmf_task;
1585 int rc = TMF_RESP_FUNC_FAILED;
1586
1587 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1588 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1589 struct domain_device *dev = task->dev;
1590 struct mvs_info *mvi = mvs_find_dev_mvi(dev);
1591
1592 int_to_scsilun(cmnd->device->lun, &lun);
1593 rc = mvs_find_tag(mvi, task, &tag);
1594 if (rc == 0) {
1595 rc = TMF_RESP_FUNC_FAILED;
1596 return rc;
1597 }
1598
1599 tmf_task.tmf = TMF_QUERY_TASK;
1600 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1601
1602 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1603 switch (rc) {
1604 /* The task is still in Lun, release it then */
1605 case TMF_RESP_FUNC_SUCC:
1606 /* The task is not in Lun or failed, reset the phy */
1607 case TMF_RESP_FUNC_FAILED:
1608 case TMF_RESP_FUNC_COMPLETE:
1609 break;
1610 }
1611 }
1612 mv_printk("%s:rc= %d\n", __func__, rc);
1613 return rc;
1614}
1615
1616/* mandatory SAM-3, still need free task/slot info */
1617int mvs_abort_task(struct sas_task *task)
1618{
1619 struct scsi_lun lun;
1620 struct mvs_tmf_task tmf_task;
1621 struct domain_device *dev = task->dev;
1622 struct mvs_info *mvi = mvs_find_dev_mvi(dev);
1623 int rc = TMF_RESP_FUNC_FAILED;
1624 unsigned long flags;
1625 u32 tag;
1626 if (mvi->exp_req)
1627 mvi->exp_req--;
1628 spin_lock_irqsave(&task->task_state_lock, flags);
1629 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1630 spin_unlock_irqrestore(&task->task_state_lock, flags);
1631 rc = TMF_RESP_FUNC_COMPLETE;
1632 goto out;
1633 }
1634 spin_unlock_irqrestore(&task->task_state_lock, flags);
1635 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1636 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1637
1638 int_to_scsilun(cmnd->device->lun, &lun);
1639 rc = mvs_find_tag(mvi, task, &tag);
1640 if (rc == 0) {
1641 mv_printk("No such tag in %s\n", __func__);
1642 rc = TMF_RESP_FUNC_FAILED;
1643 return rc;
1644 }
1645
1646 tmf_task.tmf = TMF_ABORT_TASK;
1647 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1648
1649 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1650
1651 /* if successful, clear the task and callback forwards.*/
1652 if (rc == TMF_RESP_FUNC_COMPLETE) {
1653 u32 slot_no;
1654 struct mvs_slot_info *slot;
1655 struct mvs_info *mvi = mvs_find_dev_mvi(dev);
1656
1657 if (task->lldd_task) {
1658 slot = (struct mvs_slot_info *)task->lldd_task;
1659 slot_no = (u32) (slot - mvi->slot_info);
1660 mvs_slot_complete(mvi, slot_no, 1);
1661 }
1662 }
1663 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1664 task->task_proto & SAS_PROTOCOL_STP) {
1665 /* to do free register_set */
1666 } else {
1667 /* SMP */
1668
1669 }
1670out:
1671 if (rc != TMF_RESP_FUNC_COMPLETE)
1672 mv_printk("%s:rc= %d\n", __func__, rc);
1673 return rc;
1674}
1675
1676int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
1677{
1678 int rc = TMF_RESP_FUNC_FAILED;
1679 struct mvs_tmf_task tmf_task;
1680
1681 tmf_task.tmf = TMF_ABORT_TASK_SET;
1682 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1683
1684 return rc;
1685}
1686
1687int mvs_clear_aca(struct domain_device *dev, u8 *lun)
1688{
1689 int rc = TMF_RESP_FUNC_FAILED;
1690 struct mvs_tmf_task tmf_task;
1691
1692 tmf_task.tmf = TMF_CLEAR_ACA;
1693 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1694
1695 return rc;
1696}
1697
1698int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
1699{
1700 int rc = TMF_RESP_FUNC_FAILED;
1701 struct mvs_tmf_task tmf_task;
1702
1703 tmf_task.tmf = TMF_CLEAR_TASK_SET;
1704 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1705
1706 return rc;
1086} 1707}
1087 1708
1088static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, 1709static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1089 u32 slot_idx, int err) 1710 u32 slot_idx, int err)
1090{ 1711{
1091 struct mvs_port *port = mvi->slot_info[slot_idx].port; 1712 struct mvs_device *mvi_dev = (struct mvs_device *)task->dev->lldd_dev;
1092 struct task_status_struct *tstat = &task->task_status; 1713 struct task_status_struct *tstat = &task->task_status;
1093 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; 1714 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
1094 int stat = SAM_GOOD; 1715 int stat = SAM_GOOD;
1095 1716
1717
1096 resp->frame_len = sizeof(struct dev_to_host_fis); 1718 resp->frame_len = sizeof(struct dev_to_host_fis);
1097 memcpy(&resp->ending_fis[0], 1719 memcpy(&resp->ending_fis[0],
1098 SATA_RECEIVED_D2H_FIS(port->taskfileset), 1720 SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
1099 sizeof(struct dev_to_host_fis)); 1721 sizeof(struct dev_to_host_fis));
1100 tstat->buf_valid_size = sizeof(*resp); 1722 tstat->buf_valid_size = sizeof(*resp);
1101 if (unlikely(err)) 1723 if (unlikely(err))
@@ -1107,75 +1729,104 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1107 u32 slot_idx) 1729 u32 slot_idx)
1108{ 1730{
1109 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 1731 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1732 int stat;
1110 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); 1733 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
1111 u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4)); 1734 u32 tfs = 0;
1112 int stat = SAM_CHECK_COND; 1735 enum mvs_port_type type = PORT_TYPE_SAS;
1113 1736
1114 if (err_dw1 & SLOT_BSY_ERR) { 1737 if (err_dw0 & CMD_ISS_STPD)
1115 stat = SAS_QUEUE_FULL; 1738 MVS_CHIP_DISP->issue_stop(mvi, type, tfs);
1116 mvs_slot_reset(mvi, task, slot_idx); 1739
1117 } 1740 MVS_CHIP_DISP->command_active(mvi, slot_idx);
1741
1742 stat = SAM_CHECK_COND;
1118 switch (task->task_proto) { 1743 switch (task->task_proto) {
1119 case SAS_PROTOCOL_SSP: 1744 case SAS_PROTOCOL_SSP:
1745 stat = SAS_ABORTED_TASK;
1120 break; 1746 break;
1121 case SAS_PROTOCOL_SMP: 1747 case SAS_PROTOCOL_SMP:
1748 stat = SAM_CHECK_COND;
1122 break; 1749 break;
1750
1123 case SAS_PROTOCOL_SATA: 1751 case SAS_PROTOCOL_SATA:
1124 case SAS_PROTOCOL_STP: 1752 case SAS_PROTOCOL_STP:
1125 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 1753 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1126 if (err_dw0 & TFILE_ERR) 1754 {
1127 stat = mvs_sata_done(mvi, task, slot_idx, 1); 1755 if (err_dw0 == 0x80400002)
1756 mv_printk("find reserved error, why?\n");
1757
1758 task->ata_task.use_ncq = 0;
1759 stat = SAS_PROTO_RESPONSE;
1760 mvs_sata_done(mvi, task, slot_idx, 1);
1761
1762 }
1128 break; 1763 break;
1129 default: 1764 default:
1130 break; 1765 break;
1131 } 1766 }
1132 1767
1133 mvs_hexdump(16, (u8 *) slot->response, 0);
1134 return stat; 1768 return stat;
1135} 1769}
1136 1770
1137static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) 1771int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1138{ 1772{
1139 u32 slot_idx = rx_desc & RXQ_SLOT_MASK; 1773 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1140 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 1774 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1141 struct sas_task *task = slot->task; 1775 struct sas_task *task = slot->task;
1776 struct mvs_device *mvi_dev = NULL;
1142 struct task_status_struct *tstat; 1777 struct task_status_struct *tstat;
1143 struct mvs_port *port; 1778
1144 bool aborted; 1779 bool aborted;
1145 void *to; 1780 void *to;
1781 enum exec_status sts;
1146 1782
1783 if (mvi->exp_req)
1784 mvi->exp_req--;
1147 if (unlikely(!task || !task->lldd_task)) 1785 if (unlikely(!task || !task->lldd_task))
1148 return -1; 1786 return -1;
1149 1787
1788 tstat = &task->task_status;
1789 mvi_dev = (struct mvs_device *)task->dev->lldd_dev;
1790
1150 mvs_hba_cq_dump(mvi); 1791 mvs_hba_cq_dump(mvi);
1151 1792
1152 spin_lock(&task->task_state_lock); 1793 spin_lock(&task->task_state_lock);
1794 task->task_state_flags &=
1795 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1796 task->task_state_flags |= SAS_TASK_STATE_DONE;
1797 /* race condition*/
1153 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; 1798 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1154 if (!aborted) {
1155 task->task_state_flags &=
1156 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1157 task->task_state_flags |= SAS_TASK_STATE_DONE;
1158 }
1159 spin_unlock(&task->task_state_lock); 1799 spin_unlock(&task->task_state_lock);
1160 1800
1161 if (aborted) { 1801 memset(tstat, 0, sizeof(*tstat));
1802 tstat->resp = SAS_TASK_COMPLETE;
1803
1804 if (unlikely(aborted)) {
1805 tstat->stat = SAS_ABORTED_TASK;
1806 if (mvi_dev)
1807 mvi_dev->runing_req--;
1808 if (sas_protocol_ata(task->task_proto))
1809 mvs_free_reg_set(mvi, mvi_dev);
1810
1162 mvs_slot_task_free(mvi, task, slot, slot_idx); 1811 mvs_slot_task_free(mvi, task, slot, slot_idx);
1163 mvs_slot_free(mvi, rx_desc);
1164 return -1; 1812 return -1;
1165 } 1813 }
1166 1814
1167 port = slot->port; 1815 if (unlikely(!mvi_dev || !slot->port->port_attached || flags)) {
1168 tstat = &task->task_status; 1816 mv_dprintk("port has not device.\n");
1169 memset(tstat, 0, sizeof(*tstat)); 1817 tstat->stat = SAS_PHY_DOWN;
1170 tstat->resp = SAS_TASK_COMPLETE;
1171
1172 if (unlikely(!port->port_attached || flags)) {
1173 mvs_slot_err(mvi, task, slot_idx);
1174 if (!sas_protocol_ata(task->task_proto))
1175 tstat->stat = SAS_PHY_DOWN;
1176 goto out; 1818 goto out;
1177 } 1819 }
1178 1820
1821 /*
1822 if (unlikely((rx_desc & RXQ_ERR) || (*(u64 *) slot->response))) {
1823 mv_dprintk("Find device[%016llx] RXQ_ERR %X,
1824 err info:%016llx\n",
1825 SAS_ADDR(task->dev->sas_addr),
1826 rx_desc, (u64)(*(u64 *) slot->response));
1827 }
1828 */
1829
1179 /* error info record present */ 1830 /* error info record present */
1180 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { 1831 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1181 tstat->stat = mvs_slot_err(mvi, task, slot_idx); 1832 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
@@ -1191,13 +1842,10 @@ static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1191 } 1842 }
1192 /* response frame present */ 1843 /* response frame present */
1193 else if (rx_desc & RXQ_RSP) { 1844 else if (rx_desc & RXQ_RSP) {
1194 struct ssp_response_iu *iu = 1845 struct ssp_response_iu *iu = slot->response +
1195 slot->response + sizeof(struct mvs_err_info); 1846 sizeof(struct mvs_err_info);
1196 sas_ssp_task_response(&mvi->pdev->dev, task, iu); 1847 sas_ssp_task_response(mvi->dev, task, iu);
1197 } 1848 } else
1198
1199 /* should never happen? */
1200 else
1201 tstat->stat = SAM_CHECK_COND; 1849 tstat->stat = SAM_CHECK_COND;
1202 break; 1850 break;
1203 1851
@@ -1225,105 +1873,245 @@ static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1225 } 1873 }
1226 1874
1227out: 1875out:
1876 if (mvi_dev)
1877 mvi_dev->runing_req--;
1878 if (sas_protocol_ata(task->task_proto))
1879 mvs_free_reg_set(mvi, mvi_dev);
1880
1228 mvs_slot_task_free(mvi, task, slot, slot_idx); 1881 mvs_slot_task_free(mvi, task, slot, slot_idx);
1229 if (unlikely(tstat->stat != SAS_QUEUE_FULL)) 1882 sts = tstat->stat;
1230 mvs_slot_free(mvi, rx_desc);
1231 1883
1232 spin_unlock(&mvi->lock); 1884 spin_unlock(&mvi->lock);
1233 task->task_done(task); 1885 if (task->task_done)
1886 task->task_done(task);
1887 else
1888 mv_dprintk("why has not task_done.\n");
1234 spin_lock(&mvi->lock); 1889 spin_lock(&mvi->lock);
1235 return tstat->stat; 1890
1891 return sts;
1236} 1892}
1237 1893
1238static void mvs_release_task(struct mvs_info *mvi, int phy_no) 1894void mvs_release_task(struct mvs_info *mvi,
1895 int phy_no, struct domain_device *dev)
1239{ 1896{
1240 struct list_head *pos, *n; 1897 int i = 0; u32 slot_idx;
1241 struct mvs_slot_info *slot; 1898 struct mvs_phy *phy;
1242 struct mvs_phy *phy = &mvi->phy[phy_no]; 1899 struct mvs_port *port;
1243 struct mvs_port *port = phy->port; 1900 struct mvs_slot_info *slot, *slot2;
1244 u32 rx_desc;
1245 1901
1902 phy = &mvi->phy[phy_no];
1903 port = phy->port;
1246 if (!port) 1904 if (!port)
1247 return; 1905 return;
1248 1906
1249 list_for_each_safe(pos, n, &port->list) { 1907 list_for_each_entry_safe(slot, slot2, &port->list, entry) {
1250 slot = container_of(pos, struct mvs_slot_info, list); 1908 struct sas_task *task;
1251 rx_desc = (u32) (slot - mvi->slot_info); 1909 slot_idx = (u32) (slot - mvi->slot_info);
1252 mvs_slot_complete(mvi, rx_desc, 1); 1910 task = slot->task;
1911
1912 if (dev && task->dev != dev)
1913 continue;
1914
1915 mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
1916 slot_idx, slot->slot_tag, task);
1917
1918 if (task->task_proto & SAS_PROTOCOL_SSP) {
1919 mv_printk("attached with SSP task CDB[");
1920 for (i = 0; i < 16; i++)
1921 mv_printk(" %02x", task->ssp_task.cdb[i]);
1922 mv_printk(" ]\n");
1923 }
1924
1925 mvs_slot_complete(mvi, slot_idx, 1);
1926 }
1927}
1928
1929static void mvs_phy_disconnected(struct mvs_phy *phy)
1930{
1931 phy->phy_attached = 0;
1932 phy->att_dev_info = 0;
1933 phy->att_dev_sas_addr = 0;
1934}
1935
1936static void mvs_work_queue(struct work_struct *work)
1937{
1938 struct delayed_work *dw = container_of(work, struct delayed_work, work);
1939 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
1940 struct mvs_info *mvi = mwq->mvi;
1941 unsigned long flags;
1942
1943 spin_lock_irqsave(&mvi->lock, flags);
1944 if (mwq->handler & PHY_PLUG_EVENT) {
1945 u32 phy_no = (unsigned long) mwq->data;
1946 struct sas_ha_struct *sas_ha = mvi->sas;
1947 struct mvs_phy *phy = &mvi->phy[phy_no];
1948 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1949
1950 if (phy->phy_event & PHY_PLUG_OUT) {
1951 u32 tmp;
1952 struct sas_identify_frame *id;
1953 id = (struct sas_identify_frame *)phy->frame_rcvd;
1954 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
1955 phy->phy_event &= ~PHY_PLUG_OUT;
1956 if (!(tmp & PHY_READY_MASK)) {
1957 sas_phy_disconnected(sas_phy);
1958 mvs_phy_disconnected(phy);
1959 sas_ha->notify_phy_event(sas_phy,
1960 PHYE_LOSS_OF_SIGNAL);
1961 mv_dprintk("phy%d Removed Device\n", phy_no);
1962 } else {
1963 MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
1964 mvs_update_phyinfo(mvi, phy_no, 1);
1965 mvs_bytes_dmaed(mvi, phy_no);
1966 mvs_port_notify_formed(sas_phy, 0);
1967 mv_dprintk("phy%d Attached Device\n", phy_no);
1968 }
1969 }
1970 }
1971 list_del(&mwq->entry);
1972 spin_unlock_irqrestore(&mvi->lock, flags);
1973 kfree(mwq);
1974}
1975
1976static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
1977{
1978 struct mvs_wq *mwq;
1979 int ret = 0;
1980
1981 mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC);
1982 if (mwq) {
1983 mwq->mvi = mvi;
1984 mwq->data = data;
1985 mwq->handler = handler;
1986 MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq);
1987 list_add_tail(&mwq->entry, &mvi->wq_list);
1988 schedule_delayed_work(&mwq->work_q, HZ * 2);
1989 } else
1990 ret = -ENOMEM;
1991
1992 return ret;
1993}
1994
1995static void mvs_sig_time_out(unsigned long tphy)
1996{
1997 struct mvs_phy *phy = (struct mvs_phy *)tphy;
1998 struct mvs_info *mvi = phy->mvi;
1999 u8 phy_no;
2000
2001 for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) {
2002 if (&mvi->phy[phy_no] == phy) {
2003 mv_dprintk("Get signature time out, reset phy %d\n",
2004 phy_no+mvi->id*mvi->chip->n_phy);
2005 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1);
2006 }
1253 } 2007 }
1254} 2008}
1255 2009
1256static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) 2010static void mvs_sig_remove_timer(struct mvs_phy *phy)
1257{ 2011{
1258 struct pci_dev *pdev = mvi->pdev; 2012 if (phy->timer.function)
1259 struct sas_ha_struct *sas_ha = &mvi->sas; 2013 del_timer(&phy->timer);
2014 phy->timer.function = NULL;
2015}
2016
2017void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2018{
2019 u32 tmp;
2020 struct sas_ha_struct *sas_ha = mvi->sas;
1260 struct mvs_phy *phy = &mvi->phy[phy_no]; 2021 struct mvs_phy *phy = &mvi->phy[phy_no];
1261 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2022 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1262 2023
1263 phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no); 2024 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
2025 mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy,
2026 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
2027 mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy,
2028 phy->irq_status);
2029
1264 /* 2030 /*
1265 * events is port event now , 2031 * events is port event now ,
1266 * we need check the interrupt status which belongs to per port. 2032 * we need check the interrupt status which belongs to per port.
1267 */ 2033 */
1268 dev_printk(KERN_DEBUG, &pdev->dev,
1269 "Port %d Event = %X\n",
1270 phy_no, phy->irq_status);
1271
1272 if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
1273 mvs_release_task(mvi, phy_no);
1274 if (!mvs_is_phy_ready(mvi, phy_no)) {
1275 sas_phy_disconnected(sas_phy);
1276 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1277 dev_printk(KERN_INFO, &pdev->dev,
1278 "Port %d Unplug Notice\n", phy_no);
1279 2034
1280 } else 2035 if (phy->irq_status & PHYEV_DCDR_ERR)
1281 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL); 2036 mv_dprintk("port %d STP decoding error.\n",
1282 } 2037 phy_no+mvi->id*mvi->chip->n_phy);
1283 if (!(phy->irq_status & PHYEV_DEC_ERR)) { 2038
1284 if (phy->irq_status & PHYEV_COMWAKE) { 2039 if (phy->irq_status & PHYEV_POOF) {
1285 u32 tmp = mvs_read_port_irq_mask(mvi, phy_no); 2040 if (!(phy->phy_event & PHY_PLUG_OUT)) {
1286 mvs_write_port_irq_mask(mvi, phy_no, 2041 int dev_sata = phy->phy_type & PORT_TYPE_SATA;
1287 tmp | PHYEV_SIG_FIS); 2042 int ready;
2043 mvs_release_task(mvi, phy_no, NULL);
2044 phy->phy_event |= PHY_PLUG_OUT;
2045 mvs_handle_event(mvi,
2046 (void *)(unsigned long)phy_no,
2047 PHY_PLUG_EVENT);
2048 ready = mvs_is_phy_ready(mvi, phy_no);
2049 if (!ready)
2050 mv_dprintk("phy%d Unplug Notice\n",
2051 phy_no +
2052 mvi->id * mvi->chip->n_phy);
2053 if (ready || dev_sata) {
2054 if (MVS_CHIP_DISP->stp_reset)
2055 MVS_CHIP_DISP->stp_reset(mvi,
2056 phy_no);
2057 else
2058 MVS_CHIP_DISP->phy_reset(mvi,
2059 phy_no, 0);
2060 return;
2061 }
1288 } 2062 }
1289 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { 2063 }
1290 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
1291 if (phy->phy_status) {
1292 mvs_detect_porttype(mvi, phy_no);
1293
1294 if (phy->phy_type & PORT_TYPE_SATA) {
1295 u32 tmp = mvs_read_port_irq_mask(mvi,
1296 phy_no);
1297 tmp &= ~PHYEV_SIG_FIS;
1298 mvs_write_port_irq_mask(mvi,
1299 phy_no, tmp);
1300 }
1301 2064
1302 mvs_update_phyinfo(mvi, phy_no, 0); 2065 if (phy->irq_status & PHYEV_COMWAKE) {
1303 sas_ha->notify_phy_event(sas_phy, 2066 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no);
1304 PHYE_OOB_DONE); 2067 MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
1305 mvs_bytes_dmaed(mvi, phy_no); 2068 tmp | PHYEV_SIG_FIS);
1306 } else { 2069 if (phy->timer.function == NULL) {
1307 dev_printk(KERN_DEBUG, &pdev->dev, 2070 phy->timer.data = (unsigned long)phy;
1308 "plugin interrupt but phy is gone\n"); 2071 phy->timer.function = mvs_sig_time_out;
1309 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, 2072 phy->timer.expires = jiffies + 10*HZ;
1310 NULL); 2073 add_timer(&phy->timer);
2074 }
2075 }
2076 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
2077 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
2078 mvs_sig_remove_timer(phy);
2079 mv_dprintk("notify plug in on phy[%d]\n", phy_no);
2080 if (phy->phy_status) {
2081 mdelay(10);
2082 MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
2083 if (phy->phy_type & PORT_TYPE_SATA) {
2084 tmp = MVS_CHIP_DISP->read_port_irq_mask(
2085 mvi, phy_no);
2086 tmp &= ~PHYEV_SIG_FIS;
2087 MVS_CHIP_DISP->write_port_irq_mask(mvi,
2088 phy_no, tmp);
2089 }
2090 mvs_update_phyinfo(mvi, phy_no, 0);
2091 mvs_bytes_dmaed(mvi, phy_no);
2092 /* whether driver is going to handle hot plug */
2093 if (phy->phy_event & PHY_PLUG_OUT) {
2094 mvs_port_notify_formed(sas_phy, 0);
2095 phy->phy_event &= ~PHY_PLUG_OUT;
1311 } 2096 }
1312 } else if (phy->irq_status & PHYEV_BROAD_CH) { 2097 } else {
1313 mvs_release_task(mvi, phy_no); 2098 mv_dprintk("plugin interrupt but phy%d is gone\n",
1314 sas_ha->notify_port_event(sas_phy, 2099 phy_no + mvi->id*mvi->chip->n_phy);
1315 PORTE_BROADCAST_RCVD);
1316 } 2100 }
2101 } else if (phy->irq_status & PHYEV_BROAD_CH) {
2102 mv_dprintk("port %d broadcast change.\n",
2103 phy_no + mvi->id*mvi->chip->n_phy);
2104 /* exception for Samsung disk drive*/
2105 mdelay(1000);
2106 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
1317 } 2107 }
1318 mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status); 2108 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
1319} 2109}
1320 2110
1321static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) 2111int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
1322{ 2112{
1323 void __iomem *regs = mvi->regs;
1324 u32 rx_prod_idx, rx_desc; 2113 u32 rx_prod_idx, rx_desc;
1325 bool attn = false; 2114 bool attn = false;
1326 struct pci_dev *pdev = mvi->pdev;
1327 2115
1328 /* the first dword in the RX ring is special: it contains 2116 /* the first dword in the RX ring is special: it contains
1329 * a mirror of the hardware's RX producer index, so that 2117 * a mirror of the hardware's RX producer index, so that
@@ -1339,480 +2127,31 @@ static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
1339 * note: if coalescing is enabled, 2127 * note: if coalescing is enabled,
1340 * it will need to read from register every time for sure 2128 * it will need to read from register every time for sure
1341 */ 2129 */
1342 if (mvi->rx_cons == rx_prod_idx) 2130 if (unlikely(mvi->rx_cons == rx_prod_idx))
1343 mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK; 2131 mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK;
1344 2132
1345 if (mvi->rx_cons == rx_prod_idx) 2133 if (mvi->rx_cons == rx_prod_idx)
1346 return 0; 2134 return 0;
1347 2135
1348 while (mvi->rx_cons != rx_prod_idx) { 2136 while (mvi->rx_cons != rx_prod_idx) {
1349
1350 /* increment our internal RX consumer pointer */ 2137 /* increment our internal RX consumer pointer */
1351 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); 2138 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
1352
1353 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); 2139 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
1354 2140
1355 if (likely(rx_desc & RXQ_DONE)) 2141 if (likely(rx_desc & RXQ_DONE))
1356 mvs_slot_complete(mvi, rx_desc, 0); 2142 mvs_slot_complete(mvi, rx_desc, 0);
1357 if (rx_desc & RXQ_ATTN) { 2143 if (rx_desc & RXQ_ATTN) {
1358 attn = true; 2144 attn = true;
1359 dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
1360 rx_desc);
1361 } else if (rx_desc & RXQ_ERR) { 2145 } else if (rx_desc & RXQ_ERR) {
1362 if (!(rx_desc & RXQ_DONE)) 2146 if (!(rx_desc & RXQ_DONE))
1363 mvs_slot_complete(mvi, rx_desc, 0); 2147 mvs_slot_complete(mvi, rx_desc, 0);
1364 dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
1365 rx_desc);
1366 } else if (rx_desc & RXQ_SLOT_RESET) { 2148 } else if (rx_desc & RXQ_SLOT_RESET) {
1367 dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n",
1368 rx_desc);
1369 mvs_slot_free(mvi, rx_desc); 2149 mvs_slot_free(mvi, rx_desc);
1370 } 2150 }
1371 } 2151 }
1372 2152
1373 if (attn && self_clear) 2153 if (attn && self_clear)
1374 mvs_int_full(mvi); 2154 MVS_CHIP_DISP->int_full(mvi);
1375
1376 return 0; 2155 return 0;
1377} 2156}
1378 2157
1379#ifndef MVS_DISABLE_NVRAM
1380static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
1381{
1382 int timeout = 1000;
1383
1384 if (addr & ~SPI_ADDR_MASK)
1385 return -EINVAL;
1386
1387 writel(addr, regs + SPI_CMD);
1388 writel(TWSI_RD, regs + SPI_CTL);
1389
1390 while (timeout-- > 0) {
1391 if (readl(regs + SPI_CTL) & TWSI_RDY) {
1392 *data = readl(regs + SPI_DATA);
1393 return 0;
1394 }
1395
1396 udelay(10);
1397 }
1398
1399 return -EBUSY;
1400}
1401
1402static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
1403 void *buf, u32 buflen)
1404{
1405 u32 addr_end, tmp_addr, i, j;
1406 u32 tmp = 0;
1407 int rc;
1408 u8 *tmp8, *buf8 = buf;
1409
1410 addr_end = addr + buflen;
1411 tmp_addr = ALIGN(addr, 4);
1412 if (addr > 0xff)
1413 return -EINVAL;
1414
1415 j = addr & 0x3;
1416 if (j) {
1417 rc = mvs_eep_read(regs, tmp_addr, &tmp);
1418 if (rc)
1419 return rc;
1420
1421 tmp8 = (u8 *)&tmp;
1422 for (i = j; i < 4; i++)
1423 *buf8++ = tmp8[i];
1424
1425 tmp_addr += 4;
1426 }
1427
1428 for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
1429 rc = mvs_eep_read(regs, tmp_addr, &tmp);
1430 if (rc)
1431 return rc;
1432
1433 memcpy(buf8, &tmp, 4);
1434 buf8 += 4;
1435 }
1436
1437 if (tmp_addr < addr_end) {
1438 rc = mvs_eep_read(regs, tmp_addr, &tmp);
1439 if (rc)
1440 return rc;
1441
1442 tmp8 = (u8 *)&tmp;
1443 j = addr_end - tmp_addr;
1444 for (i = 0; i < j; i++)
1445 *buf8++ = tmp8[i];
1446
1447 tmp_addr += 4;
1448 }
1449
1450 return 0;
1451}
1452#endif
1453
1454int mvs_nvram_read(struct mvs_info *mvi, u32 addr, void *buf, u32 buflen)
1455{
1456#ifndef MVS_DISABLE_NVRAM
1457 void __iomem *regs = mvi->regs;
1458 int rc, i;
1459 u32 sum;
1460 u8 hdr[2], *tmp;
1461 const char *msg;
1462
1463 rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
1464 if (rc) {
1465 msg = "nvram hdr read failed";
1466 goto err_out;
1467 }
1468 rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
1469 if (rc) {
1470 msg = "nvram read failed";
1471 goto err_out;
1472 }
1473
1474 if (hdr[0] != 0x5A) {
1475 /* entry id */
1476 msg = "invalid nvram entry id";
1477 rc = -ENOENT;
1478 goto err_out;
1479 }
1480
1481 tmp = buf;
1482 sum = ((u32)hdr[0]) + ((u32)hdr[1]);
1483 for (i = 0; i < buflen; i++)
1484 sum += ((u32)tmp[i]);
1485
1486 if (sum) {
1487 msg = "nvram checksum failure";
1488 rc = -EILSEQ;
1489 goto err_out;
1490 }
1491
1492 return 0;
1493
1494err_out:
1495 dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
1496 return rc;
1497#else
1498 /* FIXME , For SAS target mode */
1499 memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8);
1500 return 0;
1501#endif
1502}
1503
1504static void mvs_int_sata(struct mvs_info *mvi)
1505{
1506 u32 tmp;
1507 void __iomem *regs = mvi->regs;
1508 tmp = mr32(INT_STAT_SRS);
1509 mw32(INT_STAT_SRS, tmp & 0xFFFF);
1510}
1511
1512static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task,
1513 u32 slot_idx)
1514{
1515 void __iomem *regs = mvi->regs;
1516 struct domain_device *dev = task->dev;
1517 struct asd_sas_port *sas_port = dev->port;
1518 struct mvs_port *port = mvi->slot_info[slot_idx].port;
1519 u32 reg_set, phy_mask;
1520
1521 if (!sas_protocol_ata(task->task_proto)) {
1522 reg_set = 0;
1523 phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
1524 sas_port->phy_mask;
1525 } else {
1526 reg_set = port->taskfileset;
1527 phy_mask = sas_port->phy_mask;
1528 }
1529 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx |
1530 (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) |
1531 (phy_mask << TXQ_PHY_SHIFT) |
1532 (reg_set << TXQ_SRS_SHIFT));
1533
1534 mw32(TX_PROD_IDX, mvi->tx_prod);
1535 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
1536}
1537
1538void mvs_int_full(struct mvs_info *mvi)
1539{
1540 void __iomem *regs = mvi->regs;
1541 u32 tmp, stat;
1542 int i;
1543
1544 stat = mr32(INT_STAT);
1545
1546 mvs_int_rx(mvi, false);
1547
1548 for (i = 0; i < MVS_MAX_PORTS; i++) {
1549 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
1550 if (tmp)
1551 mvs_int_port(mvi, i, tmp);
1552 }
1553
1554 if (stat & CINT_SRS)
1555 mvs_int_sata(mvi);
1556
1557 mw32(INT_STAT, stat);
1558}
1559
1560#ifndef MVS_DISABLE_MSI
1561static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
1562{
1563 struct mvs_info *mvi = opaque;
1564
1565#ifndef MVS_USE_TASKLET
1566 spin_lock(&mvi->lock);
1567
1568 mvs_int_rx(mvi, true);
1569
1570 spin_unlock(&mvi->lock);
1571#else
1572 tasklet_schedule(&mvi->tasklet);
1573#endif
1574 return IRQ_HANDLED;
1575}
1576#endif
1577
1578int mvs_task_abort(struct sas_task *task)
1579{
1580 int rc;
1581 unsigned long flags;
1582 struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
1583 struct pci_dev *pdev = mvi->pdev;
1584 int tag;
1585
1586 spin_lock_irqsave(&task->task_state_lock, flags);
1587 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1588 rc = TMF_RESP_FUNC_COMPLETE;
1589 spin_unlock_irqrestore(&task->task_state_lock, flags);
1590 goto out_done;
1591 }
1592 spin_unlock_irqrestore(&task->task_state_lock, flags);
1593
1594 switch (task->task_proto) {
1595 case SAS_PROTOCOL_SMP:
1596 dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n");
1597 break;
1598 case SAS_PROTOCOL_SSP:
1599 dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n");
1600 break;
1601 case SAS_PROTOCOL_SATA:
1602 case SAS_PROTOCOL_STP:
1603 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
1604 dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n");
1605#if _MV_DUMP
1606 dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n");
1607 mvs_hexdump(sizeof(struct host_to_dev_fis),
1608 (void *)&task->ata_task.fis, 0);
1609 dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
1610 mvs_hexdump(16, task->ata_task.atapi_packet, 0);
1611#endif
1612 spin_lock_irqsave(&task->task_state_lock, flags);
1613 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
1614 /* TODO */
1615 ;
1616 }
1617 spin_unlock_irqrestore(&task->task_state_lock, flags);
1618 break;
1619 }
1620 default:
1621 break;
1622 }
1623
1624 if (mvs_find_tag(mvi, task, &tag)) {
1625 spin_lock_irqsave(&mvi->lock, flags);
1626 mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag);
1627 spin_unlock_irqrestore(&mvi->lock, flags);
1628 }
1629 if (!mvs_task_exec(task, 1, GFP_ATOMIC))
1630 rc = TMF_RESP_FUNC_COMPLETE;
1631 else
1632 rc = TMF_RESP_FUNC_FAILED;
1633out_done:
1634 return rc;
1635}
1636
1637int __devinit mvs_hw_init(struct mvs_info *mvi)
1638{
1639 void __iomem *regs = mvi->regs;
1640 int i;
1641 u32 tmp, cctl;
1642
1643 /* make sure interrupts are masked immediately (paranoia) */
1644 mw32(GBL_CTL, 0);
1645 tmp = mr32(GBL_CTL);
1646
1647 /* Reset Controller */
1648 if (!(tmp & HBA_RST)) {
1649 if (mvi->flags & MVF_PHY_PWR_FIX) {
1650 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
1651 tmp &= ~PCTL_PWR_ON;
1652 tmp |= PCTL_OFF;
1653 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
1654
1655 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
1656 tmp &= ~PCTL_PWR_ON;
1657 tmp |= PCTL_OFF;
1658 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
1659 }
1660
1661 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
1662 mw32_f(GBL_CTL, HBA_RST);
1663 }
1664
1665 /* wait for reset to finish; timeout is just a guess */
1666 i = 1000;
1667 while (i-- > 0) {
1668 msleep(10);
1669
1670 if (!(mr32(GBL_CTL) & HBA_RST))
1671 break;
1672 }
1673 if (mr32(GBL_CTL) & HBA_RST) {
1674 dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
1675 return -EBUSY;
1676 }
1677
1678 /* Init Chip */
1679 /* make sure RST is set; HBA_RST /should/ have done that for us */
1680 cctl = mr32(CTL);
1681 if (cctl & CCTL_RST)
1682 cctl &= ~CCTL_RST;
1683 else
1684 mw32_f(CTL, cctl | CCTL_RST);
1685
1686 /* write to device control _AND_ device status register? - A.C. */
1687 pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
1688 tmp &= ~PRD_REQ_MASK;
1689 tmp |= PRD_REQ_SIZE;
1690 pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
1691
1692 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
1693 tmp |= PCTL_PWR_ON;
1694 tmp &= ~PCTL_OFF;
1695 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
1696
1697 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
1698 tmp |= PCTL_PWR_ON;
1699 tmp &= ~PCTL_OFF;
1700 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
1701
1702 mw32_f(CTL, cctl);
1703
1704 /* reset control */
1705 mw32(PCS, 0); /*MVS_PCS */
1706
1707 mvs_phy_hacks(mvi);
1708
1709 mw32(CMD_LIST_LO, mvi->slot_dma);
1710 mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
1711
1712 mw32(RX_FIS_LO, mvi->rx_fis_dma);
1713 mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
1714
1715 mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
1716 mw32(TX_LO, mvi->tx_dma);
1717 mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
1718
1719 mw32(RX_CFG, MVS_RX_RING_SZ);
1720 mw32(RX_LO, mvi->rx_dma);
1721 mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
1722
1723 /* enable auto port detection */
1724 mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
1725 msleep(1100);
1726 /* init and reset phys */
1727 for (i = 0; i < mvi->chip->n_phy; i++) {
1728 u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
1729 u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]);
1730
1731 mvs_detect_porttype(mvi, i);
1732
1733 /* set phy local SAS address */
1734 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
1735 mvs_write_port_cfg_data(mvi, i, lo);
1736 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
1737 mvs_write_port_cfg_data(mvi, i, hi);
1738
1739 /* reset phy */
1740 tmp = mvs_read_phy_ctl(mvi, i);
1741 tmp |= PHY_RST;
1742 mvs_write_phy_ctl(mvi, i, tmp);
1743 }
1744
1745 msleep(100);
1746
1747 for (i = 0; i < mvi->chip->n_phy; i++) {
1748 /* clear phy int status */
1749 tmp = mvs_read_port_irq_stat(mvi, i);
1750 tmp &= ~PHYEV_SIG_FIS;
1751 mvs_write_port_irq_stat(mvi, i, tmp);
1752
1753 /* set phy int mask */
1754 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
1755 PHYEV_ID_DONE | PHYEV_DEC_ERR;
1756 mvs_write_port_irq_mask(mvi, i, tmp);
1757
1758 msleep(100);
1759 mvs_update_phyinfo(mvi, i, 1);
1760 mvs_enable_xmt(mvi, i);
1761 }
1762
1763 /* FIXME: update wide port bitmaps */
1764
1765 /* little endian for open address and command table, etc. */
1766 /* A.C.
1767 * it seems that ( from the spec ) turning on big-endian won't
1768 * do us any good on big-endian machines, need further confirmation
1769 */
1770 cctl = mr32(CTL);
1771 cctl |= CCTL_ENDIAN_CMD;
1772 cctl |= CCTL_ENDIAN_DATA;
1773 cctl &= ~CCTL_ENDIAN_OPEN;
1774 cctl |= CCTL_ENDIAN_RSP;
1775 mw32_f(CTL, cctl);
1776
1777 /* reset CMD queue */
1778 tmp = mr32(PCS);
1779 tmp |= PCS_CMD_RST;
1780 mw32(PCS, tmp);
1781 /* interrupt coalescing may cause missing HW interrput in some case,
1782 * and the max count is 0x1ff, while our max slot is 0x200,
1783 * it will make count 0.
1784 */
1785 tmp = 0;
1786 mw32(INT_COAL, tmp);
1787
1788 tmp = 0x100;
1789 mw32(INT_COAL_TMOUT, tmp);
1790
1791 /* ladies and gentlemen, start your engines */
1792 mw32(TX_CFG, 0);
1793 mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
1794 mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
1795 /* enable CMD/CMPL_Q/RESP mode */
1796 mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
1797
1798 /* enable completion queue interrupt */
1799 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS);
1800 mw32(INT_MASK, tmp);
1801
1802 /* Enable SRS interrupt */
1803 mw32(INT_MASK_SRS, 0xFF);
1804 return 0;
1805}
1806
1807void __devinit mvs_print_info(struct mvs_info *mvi)
1808{
1809 struct pci_dev *pdev = mvi->pdev;
1810 static int printed_version;
1811
1812 if (!printed_version++)
1813 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
1814
1815 dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
1816 mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
1817}
1818
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 7a954a95a217..75b9748ae7cc 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -1,25 +1,26 @@
1/* 1/*
2 mv_sas.h - Marvell 88SE6440 SAS/SATA support 2 * Marvell 88SE64xx/88SE94xx main function head file
3 3 *
4 Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 6 *
7 This program is free software; you can redistribute it and/or 7 * This file is licensed under GPLv2.
8 modify it under the terms of the GNU General Public License as 8 *
9 published by the Free Software Foundation; either version 2, 9 * This program is free software; you can redistribute it and/or
10 or (at your option) any later version. 10 * modify it under the terms of the GNU General Public License as
11 11 * published by the Free Software Foundation; version 2 of the
12 This program is distributed in the hope that it will be useful, 12 * License.
13 but WITHOUT ANY WARRANTY; without even the implied warranty 13 *
14 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 14 * This program is distributed in the hope that it will be useful,
15 See the GNU General Public License for more details. 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 You should have received a copy of the GNU General Public 17 * General Public License for more details.
18 License along with this program; see the file COPYING. If not, 18 *
19 write to the Free Software Foundation, 675 Mass Ave, Cambridge, 19 * You should have received a copy of the GNU General Public License
20 MA 02139, USA. 20 * along with this program; if not, write to the Free Software
21 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 */ 22 * USA
23*/
23 24
24#ifndef _MV_SAS_H_ 25#ifndef _MV_SAS_H_
25#define _MV_SAS_H_ 26#define _MV_SAS_H_
@@ -42,25 +43,144 @@
42#include <linux/version.h> 43#include <linux/version.h>
43#include "mv_defs.h" 44#include "mv_defs.h"
44 45
45#define DRV_NAME "mvsas" 46#define DRV_NAME "mvsas"
46#define DRV_VERSION "0.5.2" 47#define DRV_VERSION "0.8.2"
47#define _MV_DUMP 0 48#define _MV_DUMP 0
48#define MVS_DISABLE_NVRAM
49#define MVS_DISABLE_MSI
50
51#define MVS_ID_NOT_MAPPED 0x7f 49#define MVS_ID_NOT_MAPPED 0x7f
52#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) 50/* #define DISABLE_HOTPLUG_DMA_FIX */
51#define MAX_EXP_RUNNING_REQ 2
52#define WIDE_PORT_MAX_PHY 4
53#define MV_DISABLE_NCQ 0
54#define mv_printk(fmt, arg ...) \
55 printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg)
56#ifdef MV_DEBUG
57#define mv_dprintk(format, arg...) \
58 printk(KERN_DEBUG"%s %d:" format, __FILE__, __LINE__, ## arg)
59#else
60#define mv_dprintk(format, arg...)
61#endif
62#define MV_MAX_U32 0xffffffff
63
64extern struct mvs_tgt_initiator mvs_tgt;
65extern struct mvs_info *tgt_mvi;
66extern const struct mvs_dispatch mvs_64xx_dispatch;
67extern const struct mvs_dispatch mvs_94xx_dispatch;
68
69#define DEV_IS_EXPANDER(type) \
70 ((type == EDGE_DEV) || (type == FANOUT_DEV))
53 71
54#define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \ 72#define bit(n) ((u32)1 << n)
55 for ((__mc) = (__lseq_mask), (__lseq) = 0; \ 73
56 (__mc) != 0 && __rest; \ 74#define for_each_phy(__lseq_mask, __mc, __lseq) \
75 for ((__mc) = (__lseq_mask), (__lseq) = 0; \
76 (__mc) != 0 ; \
57 (++__lseq), (__mc) >>= 1) 77 (++__lseq), (__mc) >>= 1)
58 78
79#define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f)
80#define UNASSOC_D2H_FIS(id) \
81 ((void *) mvi->rx_fis + 0x100 * id)
82#define SATA_RECEIVED_FIS_LIST(reg_set) \
83 ((void *) mvi->rx_fis + mvi->chip->fis_offs + 0x100 * reg_set)
84#define SATA_RECEIVED_SDB_FIS(reg_set) \
85 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x58)
86#define SATA_RECEIVED_D2H_FIS(reg_set) \
87 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x40)
88#define SATA_RECEIVED_PIO_FIS(reg_set) \
89 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x20)
90#define SATA_RECEIVED_DMA_FIS(reg_set) \
91 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x00)
92
93enum dev_status {
94 MVS_DEV_NORMAL = 0x0,
95 MVS_DEV_EH = 0x1,
96};
97
98
99struct mvs_info;
100
101struct mvs_dispatch {
102 char *name;
103 int (*chip_init)(struct mvs_info *mvi);
104 int (*spi_init)(struct mvs_info *mvi);
105 int (*chip_ioremap)(struct mvs_info *mvi);
106 void (*chip_iounmap)(struct mvs_info *mvi);
107 irqreturn_t (*isr)(struct mvs_info *mvi, int irq, u32 stat);
108 u32 (*isr_status)(struct mvs_info *mvi, int irq);
109 void (*interrupt_enable)(struct mvs_info *mvi);
110 void (*interrupt_disable)(struct mvs_info *mvi);
111
112 u32 (*read_phy_ctl)(struct mvs_info *mvi, u32 port);
113 void (*write_phy_ctl)(struct mvs_info *mvi, u32 port, u32 val);
114
115 u32 (*read_port_cfg_data)(struct mvs_info *mvi, u32 port);
116 void (*write_port_cfg_data)(struct mvs_info *mvi, u32 port, u32 val);
117 void (*write_port_cfg_addr)(struct mvs_info *mvi, u32 port, u32 addr);
118
119 u32 (*read_port_vsr_data)(struct mvs_info *mvi, u32 port);
120 void (*write_port_vsr_data)(struct mvs_info *mvi, u32 port, u32 val);
121 void (*write_port_vsr_addr)(struct mvs_info *mvi, u32 port, u32 addr);
122
123 u32 (*read_port_irq_stat)(struct mvs_info *mvi, u32 port);
124 void (*write_port_irq_stat)(struct mvs_info *mvi, u32 port, u32 val);
125
126 u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port);
127 void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val);
128
129 void (*get_sas_addr)(void *buf, u32 buflen);
130 void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
131 void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
132 u32 tfs);
133 void (*start_delivery)(struct mvs_info *mvi, u32 tx);
134 u32 (*rx_update)(struct mvs_info *mvi);
135 void (*int_full)(struct mvs_info *mvi);
136 u8 (*assign_reg_set)(struct mvs_info *mvi, u8 *tfs);
137 void (*free_reg_set)(struct mvs_info *mvi, u8 *tfs);
138 u32 (*prd_size)(void);
139 u32 (*prd_count)(void);
140 void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
141 void (*detect_porttype)(struct mvs_info *mvi, int i);
142 int (*oob_done)(struct mvs_info *mvi, int i);
143 void (*fix_phy_info)(struct mvs_info *mvi, int i,
144 struct sas_identify_frame *id);
145 void (*phy_work_around)(struct mvs_info *mvi, int i);
146 void (*phy_set_link_rate)(struct mvs_info *mvi, u32 phy_id,
147 struct sas_phy_linkrates *rates);
148 u32 (*phy_max_link_rate)(void);
149 void (*phy_disable)(struct mvs_info *mvi, u32 phy_id);
150 void (*phy_enable)(struct mvs_info *mvi, u32 phy_id);
151 void (*phy_reset)(struct mvs_info *mvi, u32 phy_id, int hard);
152 void (*stp_reset)(struct mvs_info *mvi, u32 phy_id);
153 void (*clear_active_cmds)(struct mvs_info *mvi);
154 u32 (*spi_read_data)(struct mvs_info *mvi);
155 void (*spi_write_data)(struct mvs_info *mvi, u32 data);
156 int (*spi_buildcmd)(struct mvs_info *mvi,
157 u32 *dwCmd,
158 u8 cmd,
159 u8 read,
160 u8 length,
161 u32 addr
162 );
163 int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd);
164 int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout);
165#ifndef DISABLE_HOTPLUG_DMA_FIX
166 void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd);
167#endif
168
169};
170
59struct mvs_chip_info { 171struct mvs_chip_info {
60 u32 n_phy; 172 u32 n_host;
61 u32 srs_sz; 173 u32 n_phy;
62 u32 slot_width; 174 u32 fis_offs;
175 u32 fis_count;
176 u32 srs_sz;
177 u32 slot_width;
178 const struct mvs_dispatch *dispatch;
63}; 179};
180#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
181#define MVS_RX_FISL_SZ \
182 (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100))
183#define MVS_CHIP_DISP (mvi->chip->dispatch)
64 184
65struct mvs_err_info { 185struct mvs_err_info {
66 __le32 flags; 186 __le32 flags;
@@ -72,7 +192,7 @@ struct mvs_cmd_hdr {
72 __le32 lens; /* cmd, max resp frame len */ 192 __le32 lens; /* cmd, max resp frame len */
73 __le32 tags; /* targ port xfer tag; tag */ 193 __le32 tags; /* targ port xfer tag; tag */
74 __le32 data_len; /* data xfer len */ 194 __le32 data_len; /* data xfer len */
75 __le64 cmd_tbl; /* command table address */ 195 __le64 cmd_tbl; /* command table address */
76 __le64 open_frame; /* open addr frame address */ 196 __le64 open_frame; /* open addr frame address */
77 __le64 status_buf; /* status buffer address */ 197 __le64 status_buf; /* status buffer address */
78 __le64 prd_tbl; /* PRD tbl address */ 198 __le64 prd_tbl; /* PRD tbl address */
@@ -82,16 +202,17 @@ struct mvs_cmd_hdr {
82struct mvs_port { 202struct mvs_port {
83 struct asd_sas_port sas_port; 203 struct asd_sas_port sas_port;
84 u8 port_attached; 204 u8 port_attached;
85 u8 taskfileset;
86 u8 wide_port_phymap; 205 u8 wide_port_phymap;
87 struct list_head list; 206 struct list_head list;
88}; 207};
89 208
90struct mvs_phy { 209struct mvs_phy {
210 struct mvs_info *mvi;
91 struct mvs_port *port; 211 struct mvs_port *port;
92 struct asd_sas_phy sas_phy; 212 struct asd_sas_phy sas_phy;
93 struct sas_identify identify; 213 struct sas_identify identify;
94 struct scsi_device *sdev; 214 struct scsi_device *sdev;
215 struct timer_list timer;
95 u64 dev_sas_addr; 216 u64 dev_sas_addr;
96 u64 att_dev_sas_addr; 217 u64 att_dev_sas_addr;
97 u32 att_dev_info; 218 u32 att_dev_info;
@@ -102,15 +223,34 @@ struct mvs_phy {
102 u32 frame_rcvd_size; 223 u32 frame_rcvd_size;
103 u8 frame_rcvd[32]; 224 u8 frame_rcvd[32];
104 u8 phy_attached; 225 u8 phy_attached;
226 u8 phy_mode;
227 u8 reserved[2];
228 u32 phy_event;
105 enum sas_linkrate minimum_linkrate; 229 enum sas_linkrate minimum_linkrate;
106 enum sas_linkrate maximum_linkrate; 230 enum sas_linkrate maximum_linkrate;
107}; 231};
108 232
233struct mvs_device {
234 enum sas_dev_type dev_type;
235 struct domain_device *sas_device;
236 u32 attached_phy;
237 u32 device_id;
238 u32 runing_req;
239 u8 taskfileset;
240 u8 dev_status;
241 u16 reserved;
242 struct list_head dev_entry;
243};
244
109struct mvs_slot_info { 245struct mvs_slot_info {
110 struct list_head list; 246 struct list_head entry;
111 struct sas_task *task; 247 union {
248 struct sas_task *task;
249 void *tdata;
250 };
112 u32 n_elem; 251 u32 n_elem;
113 u32 tx; 252 u32 tx;
253 u32 slot_tag;
114 254
115 /* DMA buffer for storing cmd tbl, open addr frame, status buffer, 255 /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
116 * and PRD table 256 * and PRD table
@@ -120,9 +260,10 @@ struct mvs_slot_info {
120#if _MV_DUMP 260#if _MV_DUMP
121 u32 cmd_size; 261 u32 cmd_size;
122#endif 262#endif
123
124 void *response; 263 void *response;
125 struct mvs_port *port; 264 struct mvs_port *port;
265 struct mvs_device *device;
266 void *open_frame;
126}; 267};
127 268
128struct mvs_info { 269struct mvs_info {
@@ -133,17 +274,17 @@ struct mvs_info {
133 274
134 /* our device */ 275 /* our device */
135 struct pci_dev *pdev; 276 struct pci_dev *pdev;
277 struct device *dev;
136 278
137 /* enhanced mode registers */ 279 /* enhanced mode registers */
138 void __iomem *regs; 280 void __iomem *regs;
139 281
140 /* peripheral registers */ 282 /* peripheral or soc registers */
141 void __iomem *peri_regs; 283 void __iomem *regs_ex;
142
143 u8 sas_addr[SAS_ADDR_SIZE]; 284 u8 sas_addr[SAS_ADDR_SIZE];
144 285
145 /* SCSI/SAS glue */ 286 /* SCSI/SAS glue */
146 struct sas_ha_struct sas; 287 struct sas_ha_struct *sas;
147 struct Scsi_Host *shost; 288 struct Scsi_Host *shost;
148 289
149 /* TX (delivery) DMA ring */ 290 /* TX (delivery) DMA ring */
@@ -154,7 +295,7 @@ struct mvs_info {
154 u32 tx_prod; 295 u32 tx_prod;
155 296
156 /* RX (completion) DMA ring */ 297 /* RX (completion) DMA ring */
157 __le32 *rx; 298 __le32 *rx;
158 dma_addr_t rx_dma; 299 dma_addr_t rx_dma;
159 300
160 /* RX consumer idx */ 301 /* RX consumer idx */
@@ -168,38 +309,98 @@ struct mvs_info {
168 struct mvs_cmd_hdr *slot; 309 struct mvs_cmd_hdr *slot;
169 dma_addr_t slot_dma; 310 dma_addr_t slot_dma;
170 311
312 u32 chip_id;
171 const struct mvs_chip_info *chip; 313 const struct mvs_chip_info *chip;
172 314
173 u8 tags[MVS_SLOTS]; 315 int tags_num;
174 struct mvs_slot_info slot_info[MVS_SLOTS]; 316 u8 tags[MVS_SLOTS >> 3];
175 /* further per-slot information */ 317
318 /* further per-slot information */
176 struct mvs_phy phy[MVS_MAX_PHYS]; 319 struct mvs_phy phy[MVS_MAX_PHYS];
177 struct mvs_port port[MVS_MAX_PHYS]; 320 struct mvs_port port[MVS_MAX_PHYS];
178#ifdef MVS_USE_TASKLET 321 u32 irq;
179 struct tasklet_struct tasklet; 322 u32 exp_req;
323 u32 id;
324 u64 sata_reg_set;
325 struct list_head *hba_list;
326 struct list_head soc_entry;
327 struct list_head wq_list;
328 unsigned long instance;
329 u16 flashid;
330 u32 flashsize;
331 u32 flashsectSize;
332
333 void *addon;
334 struct mvs_device devices[MVS_MAX_DEVICES];
335#ifndef DISABLE_HOTPLUG_DMA_FIX
336 void *bulk_buffer;
337 dma_addr_t bulk_buffer_dma;
338#define TRASH_BUCKET_SIZE 0x20000
180#endif 339#endif
340 struct mvs_slot_info slot_info[0];
341};
342
343struct mvs_prv_info{
344 u8 n_host;
345 u8 n_phy;
346 u16 reserve;
347 struct mvs_info *mvi[2];
348};
349
350struct mvs_wq {
351 struct delayed_work work_q;
352 struct mvs_info *mvi;
353 void *data;
354 int handler;
355 struct list_head entry;
181}; 356};
182 357
358struct mvs_task_exec_info {
359 struct sas_task *task;
360 struct mvs_cmd_hdr *hdr;
361 struct mvs_port *port;
362 u32 tag;
363 int n_elem;
364};
365
366
367/******************** function prototype *********************/
368void mvs_get_sas_addr(void *buf, u32 buflen);
369void mvs_tag_clear(struct mvs_info *mvi, u32 tag);
370void mvs_tag_free(struct mvs_info *mvi, u32 tag);
371void mvs_tag_set(struct mvs_info *mvi, unsigned int tag);
372int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out);
373void mvs_tag_init(struct mvs_info *mvi);
374void mvs_iounmap(void __iomem *regs);
375int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex);
376void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard);
183int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, 377int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
184 void *funcdata); 378 void *funcdata);
379void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
380 u32 off_lo, u32 off_hi, u64 sas_addr);
381int mvs_slave_alloc(struct scsi_device *scsi_dev);
185int mvs_slave_configure(struct scsi_device *sdev); 382int mvs_slave_configure(struct scsi_device *sdev);
186void mvs_scan_start(struct Scsi_Host *shost); 383void mvs_scan_start(struct Scsi_Host *shost);
187int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time); 384int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
188int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags); 385int mvs_queue_command(struct sas_task *task, const int num,
189int mvs_task_abort(struct sas_task *task); 386 gfp_t gfp_flags);
387int mvs_abort_task(struct sas_task *task);
388int mvs_abort_task_set(struct domain_device *dev, u8 *lun);
389int mvs_clear_aca(struct domain_device *dev, u8 *lun);
390int mvs_clear_task_set(struct domain_device *dev, u8 * lun);
190void mvs_port_formed(struct asd_sas_phy *sas_phy); 391void mvs_port_formed(struct asd_sas_phy *sas_phy);
392void mvs_port_deformed(struct asd_sas_phy *sas_phy);
393int mvs_dev_found(struct domain_device *dev);
394void mvs_dev_gone(struct domain_device *dev);
395int mvs_lu_reset(struct domain_device *dev, u8 *lun);
396int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags);
191int mvs_I_T_nexus_reset(struct domain_device *dev); 397int mvs_I_T_nexus_reset(struct domain_device *dev);
192void mvs_int_full(struct mvs_info *mvi); 398int mvs_query_task(struct sas_task *task);
193void mvs_tag_init(struct mvs_info *mvi); 399void mvs_release_task(struct mvs_info *mvi, int phy_no,
194int mvs_nvram_read(struct mvs_info *mvi, u32 addr, void *buf, u32 buflen); 400 struct domain_device *dev);
195int __devinit mvs_hw_init(struct mvs_info *mvi); 401void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
196void __devinit mvs_print_info(struct mvs_info *mvi); 402void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
197void mvs_hba_interrupt_enable(struct mvs_info *mvi); 403int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
198void mvs_hba_interrupt_disable(struct mvs_info *mvi); 404void mvs_hexdump(u32 size, u8 *data, u32 baseaddr);
199void mvs_detect_porttype(struct mvs_info *mvi, int i);
200u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port);
201void mvs_enable_xmt(struct mvs_info *mvi, int PhyId);
202void __devinit mvs_phy_hacks(struct mvs_info *mvi);
203void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port);
204
205#endif 405#endif
406