diff options
author | Brett Russ <russb@emc.com> | 2005-09-30 01:36:00 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-10-03 22:08:19 -0400 |
commit | 31961943e3110c5a1c36b1e0069c29f7c4380e51 (patch) | |
tree | d9b3b38afeb8f4e9736ba601b0265f584652a30f /drivers/scsi/sata_mv.c | |
parent | 2b235826098bb653982894dfc3f70fd029f6c2e4 (diff) |
[PATCH] libata: Marvell SATA support (DMA mode) (resend: v0.22)
This is my libata compatible low level driver for the Marvell SATA
family. Currently it runs in DMA mode on a 6081 chip.
The 5xxx series parts are not yet DMA capable in this driver because
the registers have differences that haven't been accounted for yet.
Basically, I'm focused on the 6xxx series right now. I apologize for
those seeing problems on the 5xxx series, I've not had a chance to
look at those problems yet.
For those curious, the previous bug causing the SCSI timeout and
subsequent panics was caused by an improper clear of hc_irq_cause in
mv_host_intr().
This version is running well in my environment (6081 chips,
with/without SW raid1) and is showing equal or better performance
compared to the Marvell driver (mv_sata) in my initial tests (timed
dd's of reads/writes to/from memory/disk).
I still need to look at the causes of occasional problems such as this:
ata11: translating stat 0x35 err 0x00 to sense
ata11: status=0x35 { DeviceFault SeekComplete CorrectedError Error }
SCSI error : <10 0 0 0> return code = 0x8000002
Current sda: sense key Hardware Error
end_request: I/O error, dev sda, sector 3155010
and this, seen at init time:
ATA: abnormal status 0x80 on port 0xE093911C
but they aren't showstoppers.
Signed-off-by: Brett Russ <russb@emc.com>
Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers/scsi/sata_mv.c')
-rw-r--r-- | drivers/scsi/sata_mv.c | 938 |
1 files changed, 749 insertions, 189 deletions
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index ea76fe44585e..ecda7df21142 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c | |||
@@ -35,7 +35,7 @@ | |||
35 | #include <asm/io.h> | 35 | #include <asm/io.h> |
36 | 36 | ||
37 | #define DRV_NAME "sata_mv" | 37 | #define DRV_NAME "sata_mv" |
38 | #define DRV_VERSION "0.12" | 38 | #define DRV_VERSION "0.22" |
39 | 39 | ||
40 | enum { | 40 | enum { |
41 | /* BAR's are enumerated in terms of pci_resource_start() terms */ | 41 | /* BAR's are enumerated in terms of pci_resource_start() terms */ |
@@ -55,31 +55,61 @@ enum { | |||
55 | MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ | 55 | MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ |
56 | MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, | 56 | MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, |
57 | 57 | ||
58 | MV_Q_CT = 32, | 58 | MV_USE_Q_DEPTH = ATA_DEF_QUEUE, |
59 | MV_CRQB_SZ = 32, | ||
60 | MV_CRPB_SZ = 8, | ||
61 | 59 | ||
62 | MV_DMA_BOUNDARY = 0xffffffffU, | 60 | MV_MAX_Q_DEPTH = 32, |
63 | SATAHC_MASK = (~(MV_SATAHC_REG_SZ - 1)), | 61 | MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, |
62 | |||
63 | /* CRQB needs alignment on a 1KB boundary. Size == 1KB | ||
64 | * CRPB needs alignment on a 256B boundary. Size == 256B | ||
65 | * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB | ||
66 | * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B | ||
67 | */ | ||
68 | MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), | ||
69 | MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), | ||
70 | MV_MAX_SG_CT = 176, | ||
71 | MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), | ||
72 | MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ), | ||
73 | |||
74 | /* Our DMA boundary is determined by an ePRD being unable to handle | ||
75 | * anything larger than 64KB | ||
76 | */ | ||
77 | MV_DMA_BOUNDARY = 0xffffU, | ||
64 | 78 | ||
65 | MV_PORTS_PER_HC = 4, | 79 | MV_PORTS_PER_HC = 4, |
66 | /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ | 80 | /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ |
67 | MV_PORT_HC_SHIFT = 2, | 81 | MV_PORT_HC_SHIFT = 2, |
68 | /* == (port % MV_PORTS_PER_HC) to determine port from 0-7 port */ | 82 | /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */ |
69 | MV_PORT_MASK = 3, | 83 | MV_PORT_MASK = 3, |
70 | 84 | ||
71 | /* Host Flags */ | 85 | /* Host Flags */ |
72 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ | 86 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ |
73 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ | 87 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ |
74 | MV_FLAG_BDMA = (1 << 28), /* Basic DMA */ | 88 | MV_FLAG_GLBL_SFT_RST = (1 << 28), /* Global Soft Reset support */ |
89 | MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | ||
90 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO), | ||
91 | MV_6XXX_FLAGS = (MV_FLAG_IRQ_COALESCE | | ||
92 | MV_FLAG_GLBL_SFT_RST), | ||
75 | 93 | ||
76 | chip_504x = 0, | 94 | chip_504x = 0, |
77 | chip_508x = 1, | 95 | chip_508x = 1, |
78 | chip_604x = 2, | 96 | chip_604x = 2, |
79 | chip_608x = 3, | 97 | chip_608x = 3, |
80 | 98 | ||
99 | CRQB_FLAG_READ = (1 << 0), | ||
100 | CRQB_TAG_SHIFT = 1, | ||
101 | CRQB_CMD_ADDR_SHIFT = 8, | ||
102 | CRQB_CMD_CS = (0x2 << 11), | ||
103 | CRQB_CMD_LAST = (1 << 15), | ||
104 | |||
105 | CRPB_FLAG_STATUS_SHIFT = 8, | ||
106 | |||
107 | EPRD_FLAG_END_OF_TBL = (1 << 31), | ||
108 | |||
81 | /* PCI interface registers */ | 109 | /* PCI interface registers */ |
82 | 110 | ||
111 | PCI_COMMAND_OFS = 0xc00, | ||
112 | |||
83 | PCI_MAIN_CMD_STS_OFS = 0xd30, | 113 | PCI_MAIN_CMD_STS_OFS = 0xd30, |
84 | STOP_PCI_MASTER = (1 << 2), | 114 | STOP_PCI_MASTER = (1 << 2), |
85 | PCI_MASTER_EMPTY = (1 << 3), | 115 | PCI_MASTER_EMPTY = (1 << 3), |
@@ -111,20 +141,13 @@ enum { | |||
111 | HC_CFG_OFS = 0, | 141 | HC_CFG_OFS = 0, |
112 | 142 | ||
113 | HC_IRQ_CAUSE_OFS = 0x14, | 143 | HC_IRQ_CAUSE_OFS = 0x14, |
114 | CRBP_DMA_DONE = (1 << 0), /* shift by port # */ | 144 | CRPB_DMA_DONE = (1 << 0), /* shift by port # */ |
115 | HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */ | 145 | HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */ |
116 | DEV_IRQ = (1 << 8), /* shift by port # */ | 146 | DEV_IRQ = (1 << 8), /* shift by port # */ |
117 | 147 | ||
118 | /* Shadow block registers */ | 148 | /* Shadow block registers */ |
119 | SHD_PIO_DATA_OFS = 0x100, | 149 | SHD_BLK_OFS = 0x100, |
120 | SHD_FEA_ERR_OFS = 0x104, | 150 | SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */ |
121 | SHD_SECT_CNT_OFS = 0x108, | ||
122 | SHD_LBA_L_OFS = 0x10C, | ||
123 | SHD_LBA_M_OFS = 0x110, | ||
124 | SHD_LBA_H_OFS = 0x114, | ||
125 | SHD_DEV_HD_OFS = 0x118, | ||
126 | SHD_CMD_STA_OFS = 0x11C, | ||
127 | SHD_CTL_AST_OFS = 0x120, | ||
128 | 151 | ||
129 | /* SATA registers */ | 152 | /* SATA registers */ |
130 | SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ | 153 | SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ |
@@ -132,6 +155,11 @@ enum { | |||
132 | 155 | ||
133 | /* Port registers */ | 156 | /* Port registers */ |
134 | EDMA_CFG_OFS = 0, | 157 | EDMA_CFG_OFS = 0, |
158 | EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */ | ||
159 | EDMA_CFG_NCQ = (1 << 5), | ||
160 | EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ | ||
161 | EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ | ||
162 | EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ | ||
135 | 163 | ||
136 | EDMA_ERR_IRQ_CAUSE_OFS = 0x8, | 164 | EDMA_ERR_IRQ_CAUSE_OFS = 0x8, |
137 | EDMA_ERR_IRQ_MASK_OFS = 0xc, | 165 | EDMA_ERR_IRQ_MASK_OFS = 0xc, |
@@ -161,33 +189,85 @@ enum { | |||
161 | EDMA_ERR_LNK_DATA_TX | | 189 | EDMA_ERR_LNK_DATA_TX | |
162 | EDMA_ERR_TRANS_PROTO), | 190 | EDMA_ERR_TRANS_PROTO), |
163 | 191 | ||
192 | EDMA_REQ_Q_BASE_HI_OFS = 0x10, | ||
193 | EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */ | ||
194 | EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, | ||
195 | |||
196 | EDMA_REQ_Q_OUT_PTR_OFS = 0x18, | ||
197 | EDMA_REQ_Q_PTR_SHIFT = 5, | ||
198 | |||
199 | EDMA_RSP_Q_BASE_HI_OFS = 0x1c, | ||
200 | EDMA_RSP_Q_IN_PTR_OFS = 0x20, | ||
201 | EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */ | ||
202 | EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, | ||
203 | EDMA_RSP_Q_PTR_SHIFT = 3, | ||
204 | |||
164 | EDMA_CMD_OFS = 0x28, | 205 | EDMA_CMD_OFS = 0x28, |
165 | EDMA_EN = (1 << 0), | 206 | EDMA_EN = (1 << 0), |
166 | EDMA_DS = (1 << 1), | 207 | EDMA_DS = (1 << 1), |
167 | ATA_RST = (1 << 2), | 208 | ATA_RST = (1 << 2), |
168 | 209 | ||
169 | /* BDMA is 6xxx part only */ | 210 | /* Host private flags (hp_flags) */ |
170 | BDMA_CMD_OFS = 0x224, | 211 | MV_HP_FLAG_MSI = (1 << 0), |
171 | BDMA_START = (1 << 0), | ||
172 | 212 | ||
173 | MV_UNDEF = 0, | 213 | /* Port private flags (pp_flags) */ |
214 | MV_PP_FLAG_EDMA_EN = (1 << 0), | ||
215 | MV_PP_FLAG_EDMA_DS_ACT = (1 << 1), | ||
174 | }; | 216 | }; |
175 | 217 | ||
176 | struct mv_port_priv { | 218 | /* Command ReQuest Block: 32B */ |
219 | struct mv_crqb { | ||
220 | u32 sg_addr; | ||
221 | u32 sg_addr_hi; | ||
222 | u16 ctrl_flags; | ||
223 | u16 ata_cmd[11]; | ||
224 | }; | ||
177 | 225 | ||
226 | /* Command ResPonse Block: 8B */ | ||
227 | struct mv_crpb { | ||
228 | u16 id; | ||
229 | u16 flags; | ||
230 | u32 tmstmp; | ||
178 | }; | 231 | }; |
179 | 232 | ||
180 | struct mv_host_priv { | 233 | /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ |
234 | struct mv_sg { | ||
235 | u32 addr; | ||
236 | u32 flags_size; | ||
237 | u32 addr_hi; | ||
238 | u32 reserved; | ||
239 | }; | ||
181 | 240 | ||
241 | struct mv_port_priv { | ||
242 | struct mv_crqb *crqb; | ||
243 | dma_addr_t crqb_dma; | ||
244 | struct mv_crpb *crpb; | ||
245 | dma_addr_t crpb_dma; | ||
246 | struct mv_sg *sg_tbl; | ||
247 | dma_addr_t sg_tbl_dma; | ||
248 | |||
249 | unsigned req_producer; /* cp of req_in_ptr */ | ||
250 | unsigned rsp_consumer; /* cp of rsp_out_ptr */ | ||
251 | u32 pp_flags; | ||
252 | }; | ||
253 | |||
254 | struct mv_host_priv { | ||
255 | u32 hp_flags; | ||
182 | }; | 256 | }; |
183 | 257 | ||
184 | static void mv_irq_clear(struct ata_port *ap); | 258 | static void mv_irq_clear(struct ata_port *ap); |
185 | static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); | 259 | static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); |
186 | static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); | 260 | static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); |
261 | static u8 mv_check_err(struct ata_port *ap); | ||
187 | static void mv_phy_reset(struct ata_port *ap); | 262 | static void mv_phy_reset(struct ata_port *ap); |
188 | static int mv_master_reset(void __iomem *mmio_base); | 263 | static void mv_host_stop(struct ata_host_set *host_set); |
264 | static int mv_port_start(struct ata_port *ap); | ||
265 | static void mv_port_stop(struct ata_port *ap); | ||
266 | static void mv_qc_prep(struct ata_queued_cmd *qc); | ||
267 | static int mv_qc_issue(struct ata_queued_cmd *qc); | ||
189 | static irqreturn_t mv_interrupt(int irq, void *dev_instance, | 268 | static irqreturn_t mv_interrupt(int irq, void *dev_instance, |
190 | struct pt_regs *regs); | 269 | struct pt_regs *regs); |
270 | static void mv_eng_timeout(struct ata_port *ap); | ||
191 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); | 271 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
192 | 272 | ||
193 | static Scsi_Host_Template mv_sht = { | 273 | static Scsi_Host_Template mv_sht = { |
@@ -196,13 +276,13 @@ static Scsi_Host_Template mv_sht = { | |||
196 | .ioctl = ata_scsi_ioctl, | 276 | .ioctl = ata_scsi_ioctl, |
197 | .queuecommand = ata_scsi_queuecmd, | 277 | .queuecommand = ata_scsi_queuecmd, |
198 | .eh_strategy_handler = ata_scsi_error, | 278 | .eh_strategy_handler = ata_scsi_error, |
199 | .can_queue = ATA_DEF_QUEUE, | 279 | .can_queue = MV_USE_Q_DEPTH, |
200 | .this_id = ATA_SHT_THIS_ID, | 280 | .this_id = ATA_SHT_THIS_ID, |
201 | .sg_tablesize = MV_UNDEF, | 281 | .sg_tablesize = MV_MAX_SG_CT, |
202 | .max_sectors = ATA_MAX_SECTORS, | 282 | .max_sectors = ATA_MAX_SECTORS, |
203 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | 283 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, |
204 | .emulated = ATA_SHT_EMULATED, | 284 | .emulated = ATA_SHT_EMULATED, |
205 | .use_clustering = MV_UNDEF, | 285 | .use_clustering = ATA_SHT_USE_CLUSTERING, |
206 | .proc_name = DRV_NAME, | 286 | .proc_name = DRV_NAME, |
207 | .dma_boundary = MV_DMA_BOUNDARY, | 287 | .dma_boundary = MV_DMA_BOUNDARY, |
208 | .slave_configure = ata_scsi_slave_config, | 288 | .slave_configure = ata_scsi_slave_config, |
@@ -216,15 +296,16 @@ static struct ata_port_operations mv_ops = { | |||
216 | .tf_load = ata_tf_load, | 296 | .tf_load = ata_tf_load, |
217 | .tf_read = ata_tf_read, | 297 | .tf_read = ata_tf_read, |
218 | .check_status = ata_check_status, | 298 | .check_status = ata_check_status, |
299 | .check_err = mv_check_err, | ||
219 | .exec_command = ata_exec_command, | 300 | .exec_command = ata_exec_command, |
220 | .dev_select = ata_std_dev_select, | 301 | .dev_select = ata_std_dev_select, |
221 | 302 | ||
222 | .phy_reset = mv_phy_reset, | 303 | .phy_reset = mv_phy_reset, |
223 | 304 | ||
224 | .qc_prep = ata_qc_prep, | 305 | .qc_prep = mv_qc_prep, |
225 | .qc_issue = ata_qc_issue_prot, | 306 | .qc_issue = mv_qc_issue, |
226 | 307 | ||
227 | .eng_timeout = ata_eng_timeout, | 308 | .eng_timeout = mv_eng_timeout, |
228 | 309 | ||
229 | .irq_handler = mv_interrupt, | 310 | .irq_handler = mv_interrupt, |
230 | .irq_clear = mv_irq_clear, | 311 | .irq_clear = mv_irq_clear, |
@@ -232,46 +313,39 @@ static struct ata_port_operations mv_ops = { | |||
232 | .scr_read = mv_scr_read, | 313 | .scr_read = mv_scr_read, |
233 | .scr_write = mv_scr_write, | 314 | .scr_write = mv_scr_write, |
234 | 315 | ||
235 | .port_start = ata_port_start, | 316 | .port_start = mv_port_start, |
236 | .port_stop = ata_port_stop, | 317 | .port_stop = mv_port_stop, |
237 | .host_stop = ata_host_stop, | 318 | .host_stop = mv_host_stop, |
238 | }; | 319 | }; |
239 | 320 | ||
240 | static struct ata_port_info mv_port_info[] = { | 321 | static struct ata_port_info mv_port_info[] = { |
241 | { /* chip_504x */ | 322 | { /* chip_504x */ |
242 | .sht = &mv_sht, | 323 | .sht = &mv_sht, |
243 | .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 324 | .host_flags = MV_COMMON_FLAGS, |
244 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO), | 325 | .pio_mask = 0x1f, /* pio0-4 */ |
245 | .pio_mask = 0x1f, /* pio4-0 */ | 326 | .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */ |
246 | .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */ | ||
247 | .port_ops = &mv_ops, | 327 | .port_ops = &mv_ops, |
248 | }, | 328 | }, |
249 | { /* chip_508x */ | 329 | { /* chip_508x */ |
250 | .sht = &mv_sht, | 330 | .sht = &mv_sht, |
251 | .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 331 | .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), |
252 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | | 332 | .pio_mask = 0x1f, /* pio0-4 */ |
253 | MV_FLAG_DUAL_HC), | 333 | .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */ |
254 | .pio_mask = 0x1f, /* pio4-0 */ | ||
255 | .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */ | ||
256 | .port_ops = &mv_ops, | 334 | .port_ops = &mv_ops, |
257 | }, | 335 | }, |
258 | { /* chip_604x */ | 336 | { /* chip_604x */ |
259 | .sht = &mv_sht, | 337 | .sht = &mv_sht, |
260 | .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 338 | .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), |
261 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | | 339 | .pio_mask = 0x1f, /* pio0-4 */ |
262 | MV_FLAG_IRQ_COALESCE | MV_FLAG_BDMA), | 340 | .udma_mask = 0x7f, /* udma0-6 */ |
263 | .pio_mask = 0x1f, /* pio4-0 */ | ||
264 | .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */ | ||
265 | .port_ops = &mv_ops, | 341 | .port_ops = &mv_ops, |
266 | }, | 342 | }, |
267 | { /* chip_608x */ | 343 | { /* chip_608x */ |
268 | .sht = &mv_sht, | 344 | .sht = &mv_sht, |
269 | .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 345 | .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | |
270 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | | 346 | MV_FLAG_DUAL_HC), |
271 | MV_FLAG_IRQ_COALESCE | MV_FLAG_DUAL_HC | | 347 | .pio_mask = 0x1f, /* pio0-4 */ |
272 | MV_FLAG_BDMA), | 348 | .udma_mask = 0x7f, /* udma0-6 */ |
273 | .pio_mask = 0x1f, /* pio4-0 */ | ||
274 | .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */ | ||
275 | .port_ops = &mv_ops, | 349 | .port_ops = &mv_ops, |
276 | }, | 350 | }, |
277 | }; | 351 | }; |
@@ -306,12 +380,6 @@ static inline void writelfl(unsigned long data, void __iomem *addr) | |||
306 | (void) readl(addr); /* flush to avoid PCI posted write */ | 380 | (void) readl(addr); /* flush to avoid PCI posted write */ |
307 | } | 381 | } |
308 | 382 | ||
309 | static inline void __iomem *mv_port_addr_to_hc_base(void __iomem *port_mmio) | ||
310 | { | ||
311 | return ((void __iomem *)((unsigned long)port_mmio & | ||
312 | (unsigned long)SATAHC_MASK)); | ||
313 | } | ||
314 | |||
315 | static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) | 383 | static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) |
316 | { | 384 | { |
317 | return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); | 385 | return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); |
@@ -329,24 +397,141 @@ static inline void __iomem *mv_ap_base(struct ata_port *ap) | |||
329 | return mv_port_base(ap->host_set->mmio_base, ap->port_no); | 397 | return mv_port_base(ap->host_set->mmio_base, ap->port_no); |
330 | } | 398 | } |
331 | 399 | ||
332 | static inline int mv_get_hc_count(unsigned long flags) | 400 | static inline int mv_get_hc_count(unsigned long hp_flags) |
401 | { | ||
402 | return ((hp_flags & MV_FLAG_DUAL_HC) ? 2 : 1); | ||
403 | } | ||
404 | |||
405 | static void mv_irq_clear(struct ata_port *ap) | ||
333 | { | 406 | { |
334 | return ((flags & MV_FLAG_DUAL_HC) ? 2 : 1); | ||
335 | } | 407 | } |
336 | 408 | ||
337 | static inline int mv_is_edma_active(struct ata_port *ap) | 409 | static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp, |
410 | struct ata_port *ap) | ||
338 | { | 411 | { |
339 | void __iomem *port_mmio = mv_ap_base(ap); | 412 | unsigned long flags; |
340 | return (EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); | 413 | |
414 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
415 | |||
416 | writelfl(EDMA_EN, base + EDMA_CMD_OFS); | ||
417 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; | ||
418 | |||
419 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
341 | } | 420 | } |
342 | 421 | ||
343 | static inline int mv_port_bdma_capable(struct ata_port *ap) | 422 | static void mv_stop_dma(struct ata_port *ap) |
344 | { | 423 | { |
345 | return (ap->flags & MV_FLAG_BDMA); | 424 | void __iomem *port_mmio = mv_ap_base(ap); |
425 | struct mv_port_priv *pp = ap->private_data; | ||
426 | unsigned long flags; | ||
427 | u32 reg; | ||
428 | int i; | ||
429 | |||
430 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
431 | |||
432 | if (!(MV_PP_FLAG_EDMA_DS_ACT & pp->pp_flags) && | ||
433 | ((MV_PP_FLAG_EDMA_EN & pp->pp_flags) || | ||
434 | (EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)))) { | ||
435 | /* Disable EDMA if we're not already trying to disable it | ||
436 | * and it is currently active. The disable bit auto clears. | ||
437 | */ | ||
438 | pp->pp_flags |= MV_PP_FLAG_EDMA_DS_ACT; | ||
439 | writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); | ||
440 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | ||
441 | } | ||
442 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
443 | |||
444 | /* now properly wait for the eDMA to stop */ | ||
445 | for (i = 1000; i > 0; i--) { | ||
446 | reg = readl(port_mmio + EDMA_CMD_OFS); | ||
447 | if (!(EDMA_EN & reg)) { | ||
448 | break; | ||
449 | } | ||
450 | udelay(100); | ||
451 | } | ||
452 | |||
453 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
454 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_DS_ACT; | ||
455 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
456 | |||
457 | if (EDMA_EN & reg) { | ||
458 | printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id); | ||
459 | } | ||
346 | } | 460 | } |
347 | 461 | ||
348 | static void mv_irq_clear(struct ata_port *ap) | 462 | static void mv_dump_mem(void __iomem *start, unsigned bytes) |
349 | { | 463 | { |
464 | #ifdef ATA_DEBUG | ||
465 | int b, w; | ||
466 | for (b = 0; b < bytes; ) { | ||
467 | DPRINTK("%p: ", start + b); | ||
468 | for (w = 0; b < bytes && w < 4; w++) { | ||
469 | printk("%08x ",readl(start + b)); | ||
470 | b += sizeof(u32); | ||
471 | } | ||
472 | printk("\n"); | ||
473 | } | ||
474 | #endif | ||
475 | } | ||
476 | static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) | ||
477 | { | ||
478 | #ifdef ATA_DEBUG | ||
479 | int b, w; | ||
480 | u32 dw; | ||
481 | for (b = 0; b < bytes; ) { | ||
482 | DPRINTK("%02x: ", b); | ||
483 | for (w = 0; b < bytes && w < 4; w++) { | ||
484 | (void) pci_read_config_dword(pdev,b,&dw); | ||
485 | printk("%08x ",dw); | ||
486 | b += sizeof(u32); | ||
487 | } | ||
488 | printk("\n"); | ||
489 | } | ||
490 | #endif | ||
491 | } | ||
492 | static void mv_dump_all_regs(void __iomem *mmio_base, int port, | ||
493 | struct pci_dev *pdev) | ||
494 | { | ||
495 | #ifdef ATA_DEBUG | ||
496 | void __iomem *hc_base = mv_hc_base(mmio_base, | ||
497 | port >> MV_PORT_HC_SHIFT); | ||
498 | void __iomem *port_base; | ||
499 | int start_port, num_ports, p, start_hc, num_hcs, hc; | ||
500 | |||
501 | if (0 > port) { | ||
502 | start_hc = start_port = 0; | ||
503 | num_ports = 8; /* shld be benign for 4 port devs */ | ||
504 | num_hcs = 2; | ||
505 | } else { | ||
506 | start_hc = port >> MV_PORT_HC_SHIFT; | ||
507 | start_port = port; | ||
508 | num_ports = num_hcs = 1; | ||
509 | } | ||
510 | DPRINTK("All registers for port(s) %u-%u:\n", start_port, | ||
511 | num_ports > 1 ? num_ports - 1 : start_port); | ||
512 | |||
513 | if (NULL != pdev) { | ||
514 | DPRINTK("PCI config space regs:\n"); | ||
515 | mv_dump_pci_cfg(pdev, 0x68); | ||
516 | } | ||
517 | DPRINTK("PCI regs:\n"); | ||
518 | mv_dump_mem(mmio_base+0xc00, 0x3c); | ||
519 | mv_dump_mem(mmio_base+0xd00, 0x34); | ||
520 | mv_dump_mem(mmio_base+0xf00, 0x4); | ||
521 | mv_dump_mem(mmio_base+0x1d00, 0x6c); | ||
522 | for (hc = start_hc; hc < start_hc + num_hcs; hc++) { | ||
523 | hc_base = mv_hc_base(mmio_base, port >> MV_PORT_HC_SHIFT); | ||
524 | DPRINTK("HC regs (HC %i):\n", hc); | ||
525 | mv_dump_mem(hc_base, 0x1c); | ||
526 | } | ||
527 | for (p = start_port; p < start_port + num_ports; p++) { | ||
528 | port_base = mv_port_base(mmio_base, p); | ||
529 | DPRINTK("EDMA regs (port %i):\n",p); | ||
530 | mv_dump_mem(port_base, 0x54); | ||
531 | DPRINTK("SATA regs (port %i):\n",p); | ||
532 | mv_dump_mem(port_base+0x300, 0x60); | ||
533 | } | ||
534 | #endif | ||
350 | } | 535 | } |
351 | 536 | ||
352 | static unsigned int mv_scr_offset(unsigned int sc_reg_in) | 537 | static unsigned int mv_scr_offset(unsigned int sc_reg_in) |
@@ -389,30 +574,29 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) | |||
389 | } | 574 | } |
390 | } | 575 | } |
391 | 576 | ||
392 | static int mv_master_reset(void __iomem *mmio_base) | 577 | /* This routine only applies to 6xxx parts */ |
578 | static int mv_global_soft_reset(void __iomem *mmio_base) | ||
393 | { | 579 | { |
394 | void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS; | 580 | void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS; |
395 | int i, rc = 0; | 581 | int i, rc = 0; |
396 | u32 t; | 582 | u32 t; |
397 | 583 | ||
398 | VPRINTK("ENTER\n"); | ||
399 | |||
400 | /* Following procedure defined in PCI "main command and status | 584 | /* Following procedure defined in PCI "main command and status |
401 | * register" table. | 585 | * register" table. |
402 | */ | 586 | */ |
403 | t = readl(reg); | 587 | t = readl(reg); |
404 | writel(t | STOP_PCI_MASTER, reg); | 588 | writel(t | STOP_PCI_MASTER, reg); |
405 | 589 | ||
406 | for (i = 0; i < 100; i++) { | 590 | for (i = 0; i < 1000; i++) { |
407 | msleep(10); | 591 | udelay(1); |
408 | t = readl(reg); | 592 | t = readl(reg); |
409 | if (PCI_MASTER_EMPTY & t) { | 593 | if (PCI_MASTER_EMPTY & t) { |
410 | break; | 594 | break; |
411 | } | 595 | } |
412 | } | 596 | } |
413 | if (!(PCI_MASTER_EMPTY & t)) { | 597 | if (!(PCI_MASTER_EMPTY & t)) { |
414 | printk(KERN_ERR DRV_NAME "PCI master won't flush\n"); | 598 | printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); |
415 | rc = 1; /* broken HW? */ | 599 | rc = 1; |
416 | goto done; | 600 | goto done; |
417 | } | 601 | } |
418 | 602 | ||
@@ -425,39 +609,311 @@ static int mv_master_reset(void __iomem *mmio_base) | |||
425 | } while (!(GLOB_SFT_RST & t) && (i-- > 0)); | 609 | } while (!(GLOB_SFT_RST & t) && (i-- > 0)); |
426 | 610 | ||
427 | if (!(GLOB_SFT_RST & t)) { | 611 | if (!(GLOB_SFT_RST & t)) { |
428 | printk(KERN_ERR DRV_NAME "can't set global reset\n"); | 612 | printk(KERN_ERR DRV_NAME ": can't set global reset\n"); |
429 | rc = 1; /* broken HW? */ | 613 | rc = 1; |
430 | goto done; | 614 | goto done; |
431 | } | 615 | } |
432 | 616 | ||
433 | /* clear reset */ | 617 | /* clear reset and *reenable the PCI master* (not mentioned in spec) */ |
434 | i = 5; | 618 | i = 5; |
435 | do { | 619 | do { |
436 | writel(t & ~GLOB_SFT_RST, reg); | 620 | writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg); |
437 | t = readl(reg); | 621 | t = readl(reg); |
438 | udelay(1); | 622 | udelay(1); |
439 | } while ((GLOB_SFT_RST & t) && (i-- > 0)); | 623 | } while ((GLOB_SFT_RST & t) && (i-- > 0)); |
440 | 624 | ||
441 | if (GLOB_SFT_RST & t) { | 625 | if (GLOB_SFT_RST & t) { |
442 | printk(KERN_ERR DRV_NAME "can't clear global reset\n"); | 626 | printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); |
443 | rc = 1; /* broken HW? */ | 627 | rc = 1; |
444 | } | 628 | } |
445 | 629 | done: | |
446 | done: | ||
447 | VPRINTK("EXIT, rc = %i\n", rc); | ||
448 | return rc; | 630 | return rc; |
449 | } | 631 | } |
450 | 632 | ||
451 | static void mv_err_intr(struct ata_port *ap) | 633 | static void mv_host_stop(struct ata_host_set *host_set) |
452 | { | 634 | { |
453 | void __iomem *port_mmio; | 635 | struct mv_host_priv *hpriv = host_set->private_data; |
454 | u32 edma_err_cause, serr = 0; | 636 | struct pci_dev *pdev = to_pci_dev(host_set->dev); |
637 | |||
638 | if (hpriv->hp_flags & MV_HP_FLAG_MSI) { | ||
639 | pci_disable_msi(pdev); | ||
640 | } else { | ||
641 | pci_intx(pdev, 0); | ||
642 | } | ||
643 | kfree(hpriv); | ||
644 | ata_host_stop(host_set); | ||
645 | } | ||
646 | |||
647 | static int mv_port_start(struct ata_port *ap) | ||
648 | { | ||
649 | struct device *dev = ap->host_set->dev; | ||
650 | struct mv_port_priv *pp; | ||
651 | void __iomem *port_mmio = mv_ap_base(ap); | ||
652 | void *mem; | ||
653 | dma_addr_t mem_dma; | ||
654 | |||
655 | pp = kmalloc(sizeof(*pp), GFP_KERNEL); | ||
656 | if (!pp) { | ||
657 | return -ENOMEM; | ||
658 | } | ||
659 | memset(pp, 0, sizeof(*pp)); | ||
660 | |||
661 | mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma, | ||
662 | GFP_KERNEL); | ||
663 | if (!mem) { | ||
664 | kfree(pp); | ||
665 | return -ENOMEM; | ||
666 | } | ||
667 | memset(mem, 0, MV_PORT_PRIV_DMA_SZ); | ||
668 | |||
669 | /* First item in chunk of DMA memory: | ||
670 | * 32-slot command request table (CRQB), 32 bytes each in size | ||
671 | */ | ||
672 | pp->crqb = mem; | ||
673 | pp->crqb_dma = mem_dma; | ||
674 | mem += MV_CRQB_Q_SZ; | ||
675 | mem_dma += MV_CRQB_Q_SZ; | ||
676 | |||
677 | /* Second item: | ||
678 | * 32-slot command response table (CRPB), 8 bytes each in size | ||
679 | */ | ||
680 | pp->crpb = mem; | ||
681 | pp->crpb_dma = mem_dma; | ||
682 | mem += MV_CRPB_Q_SZ; | ||
683 | mem_dma += MV_CRPB_Q_SZ; | ||
684 | |||
685 | /* Third item: | ||
686 | * Table of scatter-gather descriptors (ePRD), 16 bytes each | ||
687 | */ | ||
688 | pp->sg_tbl = mem; | ||
689 | pp->sg_tbl_dma = mem_dma; | ||
690 | |||
691 | writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT | | ||
692 | EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS); | ||
693 | |||
694 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); | ||
695 | writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, | ||
696 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | ||
697 | |||
698 | writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | ||
699 | writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | ||
700 | |||
701 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); | ||
702 | writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, | ||
703 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | ||
704 | |||
705 | pp->req_producer = pp->rsp_consumer = 0; | ||
706 | |||
707 | /* Don't turn on EDMA here...do it before DMA commands only. Else | ||
708 | * we'll be unable to send non-data, PIO, etc due to restricted access | ||
709 | * to shadow regs. | ||
710 | */ | ||
711 | ap->private_data = pp; | ||
712 | return 0; | ||
713 | } | ||
714 | |||
715 | static void mv_port_stop(struct ata_port *ap) | ||
716 | { | ||
717 | struct device *dev = ap->host_set->dev; | ||
718 | struct mv_port_priv *pp = ap->private_data; | ||
719 | |||
720 | mv_stop_dma(ap); | ||
721 | |||
722 | ap->private_data = NULL; | ||
723 | dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma); | ||
724 | kfree(pp); | ||
725 | } | ||
726 | |||
727 | static void mv_fill_sg(struct ata_queued_cmd *qc) | ||
728 | { | ||
729 | struct mv_port_priv *pp = qc->ap->private_data; | ||
730 | unsigned int i; | ||
731 | |||
732 | for (i = 0; i < qc->n_elem; i++) { | ||
733 | u32 sg_len; | ||
734 | dma_addr_t addr; | ||
735 | |||
736 | addr = sg_dma_address(&qc->sg[i]); | ||
737 | sg_len = sg_dma_len(&qc->sg[i]); | ||
738 | |||
739 | pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff); | ||
740 | pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16); | ||
741 | assert(0 == (sg_len & ~MV_DMA_BOUNDARY)); | ||
742 | pp->sg_tbl[i].flags_size = cpu_to_le32(sg_len); | ||
743 | } | ||
744 | if (0 < qc->n_elem) { | ||
745 | pp->sg_tbl[qc->n_elem - 1].flags_size |= EPRD_FLAG_END_OF_TBL; | ||
746 | } | ||
747 | } | ||
748 | |||
749 | static inline unsigned mv_inc_q_index(unsigned *index) | ||
750 | { | ||
751 | *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK; | ||
752 | return *index; | ||
753 | } | ||
754 | |||
755 | static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last) | ||
756 | { | ||
757 | *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | | ||
758 | (last ? CRQB_CMD_LAST : 0); | ||
759 | } | ||
760 | |||
761 | static void mv_qc_prep(struct ata_queued_cmd *qc) | ||
762 | { | ||
763 | struct ata_port *ap = qc->ap; | ||
764 | struct mv_port_priv *pp = ap->private_data; | ||
765 | u16 *cw; | ||
766 | struct ata_taskfile *tf; | ||
767 | u16 flags = 0; | ||
768 | |||
769 | if (ATA_PROT_DMA != qc->tf.protocol) { | ||
770 | return; | ||
771 | } | ||
455 | 772 | ||
456 | /* bug here b/c we got an err int on a port we don't know about, | 773 | /* the req producer index should be the same as we remember it */ |
457 | * so there's no way to clear it | 774 | assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> |
775 | EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | ||
776 | pp->req_producer); | ||
777 | |||
778 | /* Fill in command request block | ||
779 | */ | ||
780 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { | ||
781 | flags |= CRQB_FLAG_READ; | ||
782 | } | ||
783 | assert(MV_MAX_Q_DEPTH > qc->tag); | ||
784 | flags |= qc->tag << CRQB_TAG_SHIFT; | ||
785 | |||
786 | pp->crqb[pp->req_producer].sg_addr = | ||
787 | cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); | ||
788 | pp->crqb[pp->req_producer].sg_addr_hi = | ||
789 | cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); | ||
790 | pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags); | ||
791 | |||
792 | cw = &pp->crqb[pp->req_producer].ata_cmd[0]; | ||
793 | tf = &qc->tf; | ||
794 | |||
795 | /* Sadly, the CRQB cannot accomodate all registers--there are | ||
796 | * only 11 bytes...so we must pick and choose required | ||
797 | * registers based on the command. So, we drop feature and | ||
798 | * hob_feature for [RW] DMA commands, but they are needed for | ||
799 | * NCQ. NCQ will drop hob_nsect. | ||
458 | */ | 800 | */ |
459 | BUG_ON(NULL == ap); | 801 | switch (tf->command) { |
460 | port_mmio = mv_ap_base(ap); | 802 | case ATA_CMD_READ: |
803 | case ATA_CMD_READ_EXT: | ||
804 | case ATA_CMD_WRITE: | ||
805 | case ATA_CMD_WRITE_EXT: | ||
806 | mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); | ||
807 | break; | ||
808 | #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */ | ||
809 | case ATA_CMD_FPDMA_READ: | ||
810 | case ATA_CMD_FPDMA_WRITE: | ||
811 | mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); | ||
812 | mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); | ||
813 | break; | ||
814 | #endif /* FIXME: remove this line when NCQ added */ | ||
815 | default: | ||
816 | /* The only other commands EDMA supports in non-queued and | ||
817 | * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none | ||
818 | * of which are defined/used by Linux. If we get here, this | ||
819 | * driver needs work. | ||
820 | * | ||
821 | * FIXME: modify libata to give qc_prep a return value and | ||
822 | * return error here. | ||
823 | */ | ||
824 | BUG_ON(tf->command); | ||
825 | break; | ||
826 | } | ||
827 | mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); | ||
828 | mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); | ||
829 | mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0); | ||
830 | mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0); | ||
831 | mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0); | ||
832 | mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0); | ||
833 | mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0); | ||
834 | mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); | ||
835 | mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ | ||
836 | |||
837 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) { | ||
838 | return; | ||
839 | } | ||
840 | mv_fill_sg(qc); | ||
841 | } | ||
842 | |||
843 | static int mv_qc_issue(struct ata_queued_cmd *qc) | ||
844 | { | ||
845 | void __iomem *port_mmio = mv_ap_base(qc->ap); | ||
846 | struct mv_port_priv *pp = qc->ap->private_data; | ||
847 | u32 in_ptr; | ||
848 | |||
849 | if (ATA_PROT_DMA != qc->tf.protocol) { | ||
850 | /* We're about to send a non-EDMA capable command to the | ||
851 | * port. Turn off EDMA so there won't be problems accessing | ||
852 | * shadow block, etc registers. | ||
853 | */ | ||
854 | mv_stop_dma(qc->ap); | ||
855 | return ata_qc_issue_prot(qc); | ||
856 | } | ||
857 | |||
858 | in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | ||
859 | |||
860 | /* the req producer index should be the same as we remember it */ | ||
861 | assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | ||
862 | pp->req_producer); | ||
863 | /* until we do queuing, the queue should be empty at this point */ | ||
864 | assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | ||
865 | ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> | ||
866 | EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); | ||
867 | |||
868 | mv_inc_q_index(&pp->req_producer); /* now incr producer index */ | ||
869 | |||
870 | if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) { | ||
871 | /* turn on EDMA if not already on */ | ||
872 | mv_start_dma(port_mmio, pp, qc->ap); | ||
873 | } | ||
874 | assert(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); | ||
875 | |||
876 | /* and write the request in pointer to kick the EDMA to life */ | ||
877 | in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; | ||
878 | in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT; | ||
879 | writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | ||
880 | |||
881 | return 0; | ||
882 | } | ||
883 | |||
884 | static u8 mv_get_crpb_status(struct ata_port *ap) | ||
885 | { | ||
886 | void __iomem *port_mmio = mv_ap_base(ap); | ||
887 | struct mv_port_priv *pp = ap->private_data; | ||
888 | u32 out_ptr; | ||
889 | |||
890 | out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | ||
891 | |||
892 | /* the response consumer index should be the same as we remember it */ | ||
893 | assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | ||
894 | pp->rsp_consumer); | ||
895 | |||
896 | /* increment our consumer index... */ | ||
897 | pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); | ||
898 | |||
899 | /* and, until we do NCQ, there should only be 1 CRPB waiting */ | ||
900 | assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> | ||
901 | EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | ||
902 | pp->rsp_consumer); | ||
903 | |||
904 | /* write out our inc'd consumer index so EDMA knows we're caught up */ | ||
905 | out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; | ||
906 | out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT; | ||
907 | writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | ||
908 | |||
909 | /* Return ATA status register for completed CRPB */ | ||
910 | return (pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT); | ||
911 | } | ||
912 | |||
913 | static void mv_err_intr(struct ata_port *ap) | ||
914 | { | ||
915 | void __iomem *port_mmio = mv_ap_base(ap); | ||
916 | u32 edma_err_cause, serr = 0; | ||
461 | 917 | ||
462 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 918 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
463 | 919 | ||
@@ -477,8 +933,7 @@ static void mv_err_intr(struct ata_port *ap) | |||
477 | } | 933 | } |
478 | } | 934 | } |
479 | 935 | ||
480 | /* Handle any outstanding interrupts in a single SATAHC | 936 | /* Handle any outstanding interrupts in a single SATAHC */ |
481 | */ | ||
482 | static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | 937 | static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, |
483 | unsigned int hc) | 938 | unsigned int hc) |
484 | { | 939 | { |
@@ -487,8 +942,8 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
487 | struct ata_port *ap; | 942 | struct ata_port *ap; |
488 | struct ata_queued_cmd *qc; | 943 | struct ata_queued_cmd *qc; |
489 | u32 hc_irq_cause; | 944 | u32 hc_irq_cause; |
490 | int shift, port, port0, hard_port; | 945 | int shift, port, port0, hard_port, handled; |
491 | u8 ata_status; | 946 | u8 ata_status = 0; |
492 | 947 | ||
493 | if (hc == 0) { | 948 | if (hc == 0) { |
494 | port0 = 0; | 949 | port0 = 0; |
@@ -499,7 +954,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
499 | /* we'll need the HC success int register in most cases */ | 954 | /* we'll need the HC success int register in most cases */ |
500 | hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); | 955 | hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); |
501 | if (hc_irq_cause) { | 956 | if (hc_irq_cause) { |
502 | writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); | 957 | writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); |
503 | } | 958 | } |
504 | 959 | ||
505 | VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", | 960 | VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", |
@@ -508,35 +963,38 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
508 | for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { | 963 | for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { |
509 | ap = host_set->ports[port]; | 964 | ap = host_set->ports[port]; |
510 | hard_port = port & MV_PORT_MASK; /* range 0-3 */ | 965 | hard_port = port & MV_PORT_MASK; /* range 0-3 */ |
511 | ata_status = 0xffU; | 966 | handled = 0; /* ensure ata_status is set if handled++ */ |
512 | 967 | ||
513 | if (((CRBP_DMA_DONE | DEV_IRQ) << hard_port) & hc_irq_cause) { | 968 | if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) { |
514 | BUG_ON(NULL == ap); | 969 | /* new CRPB on the queue; just one at a time until NCQ |
515 | /* rcv'd new resp, basic DMA complete, or ATA IRQ */ | 970 | */ |
516 | /* This is needed to clear the ATA INTRQ. | 971 | ata_status = mv_get_crpb_status(ap); |
517 | * FIXME: don't read the status reg in EDMA mode! | 972 | handled++; |
973 | } else if ((DEV_IRQ << hard_port) & hc_irq_cause) { | ||
974 | /* received ATA IRQ; read the status reg to clear INTRQ | ||
518 | */ | 975 | */ |
519 | ata_status = readb((void __iomem *) | 976 | ata_status = readb((void __iomem *) |
520 | ap->ioaddr.status_addr); | 977 | ap->ioaddr.status_addr); |
978 | handled++; | ||
521 | } | 979 | } |
522 | 980 | ||
523 | shift = port * 2; | 981 | shift = port << 1; /* (port * 2) */ |
524 | if (port >= MV_PORTS_PER_HC) { | 982 | if (port >= MV_PORTS_PER_HC) { |
525 | shift++; /* skip bit 8 in the HC Main IRQ reg */ | 983 | shift++; /* skip bit 8 in the HC Main IRQ reg */ |
526 | } | 984 | } |
527 | if ((PORT0_ERR << shift) & relevant) { | 985 | if ((PORT0_ERR << shift) & relevant) { |
528 | mv_err_intr(ap); | 986 | mv_err_intr(ap); |
529 | /* FIXME: smart to OR in ATA_ERR? */ | 987 | /* OR in ATA_ERR to ensure libata knows we took one */ |
530 | ata_status = readb((void __iomem *) | 988 | ata_status = readb((void __iomem *) |
531 | ap->ioaddr.status_addr) | ATA_ERR; | 989 | ap->ioaddr.status_addr) | ATA_ERR; |
990 | handled++; | ||
532 | } | 991 | } |
533 | 992 | ||
534 | if (ap) { | 993 | if (handled && ap) { |
535 | qc = ata_qc_from_tag(ap, ap->active_tag); | 994 | qc = ata_qc_from_tag(ap, ap->active_tag); |
536 | if (NULL != qc) { | 995 | if (NULL != qc) { |
537 | VPRINTK("port %u IRQ found for qc, " | 996 | VPRINTK("port %u IRQ found for qc, " |
538 | "ata_status 0x%x\n", port,ata_status); | 997 | "ata_status 0x%x\n", port,ata_status); |
539 | BUG_ON(0xffU == ata_status); | ||
540 | /* mark qc status appropriately */ | 998 | /* mark qc status appropriately */ |
541 | ata_qc_complete(qc, ata_status); | 999 | ata_qc_complete(qc, ata_status); |
542 | } | 1000 | } |
@@ -550,12 +1008,10 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance, | |||
550 | { | 1008 | { |
551 | struct ata_host_set *host_set = dev_instance; | 1009 | struct ata_host_set *host_set = dev_instance; |
552 | unsigned int hc, handled = 0, n_hcs; | 1010 | unsigned int hc, handled = 0, n_hcs; |
553 | void __iomem *mmio; | 1011 | void __iomem *mmio = host_set->mmio_base; |
554 | u32 irq_stat; | 1012 | u32 irq_stat; |
555 | 1013 | ||
556 | mmio = host_set->mmio_base; | ||
557 | irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); | 1014 | irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); |
558 | n_hcs = mv_get_hc_count(host_set->ports[0]->flags); | ||
559 | 1015 | ||
560 | /* check the cases where we either have nothing pending or have read | 1016 | /* check the cases where we either have nothing pending or have read |
561 | * a bogus register value which can indicate HW removal or PCI fault | 1017 | * a bogus register value which can indicate HW removal or PCI fault |
@@ -564,64 +1020,87 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance, | |||
564 | return IRQ_NONE; | 1020 | return IRQ_NONE; |
565 | } | 1021 | } |
566 | 1022 | ||
1023 | n_hcs = mv_get_hc_count(host_set->ports[0]->flags); | ||
567 | spin_lock(&host_set->lock); | 1024 | spin_lock(&host_set->lock); |
568 | 1025 | ||
569 | for (hc = 0; hc < n_hcs; hc++) { | 1026 | for (hc = 0; hc < n_hcs; hc++) { |
570 | u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); | 1027 | u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); |
571 | if (relevant) { | 1028 | if (relevant) { |
572 | mv_host_intr(host_set, relevant, hc); | 1029 | mv_host_intr(host_set, relevant, hc); |
573 | handled = 1; | 1030 | handled++; |
574 | } | 1031 | } |
575 | } | 1032 | } |
576 | if (PCI_ERR & irq_stat) { | 1033 | if (PCI_ERR & irq_stat) { |
577 | /* FIXME: these are all masked by default, but still need | 1034 | printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n", |
578 | * to recover from them properly. | 1035 | readl(mmio + PCI_IRQ_CAUSE_OFS)); |
579 | */ | 1036 | |
580 | } | 1037 | VPRINTK("All regs @ PCI error\n"); |
1038 | mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev)); | ||
581 | 1039 | ||
1040 | writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); | ||
1041 | handled++; | ||
1042 | } | ||
582 | spin_unlock(&host_set->lock); | 1043 | spin_unlock(&host_set->lock); |
583 | 1044 | ||
584 | return IRQ_RETVAL(handled); | 1045 | return IRQ_RETVAL(handled); |
585 | } | 1046 | } |
586 | 1047 | ||
1048 | static u8 mv_check_err(struct ata_port *ap) | ||
1049 | { | ||
1050 | mv_stop_dma(ap); /* can't read shadow regs if DMA on */ | ||
1051 | return readb((void __iomem *) ap->ioaddr.error_addr); | ||
1052 | } | ||
1053 | |||
1054 | /* Part of this is taken from __sata_phy_reset and modified to not sleep | ||
1055 | * since this routine gets called from interrupt level. | ||
1056 | */ | ||
587 | static void mv_phy_reset(struct ata_port *ap) | 1057 | static void mv_phy_reset(struct ata_port *ap) |
588 | { | 1058 | { |
589 | void __iomem *port_mmio = mv_ap_base(ap); | 1059 | void __iomem *port_mmio = mv_ap_base(ap); |
590 | struct ata_taskfile tf; | 1060 | struct ata_taskfile tf; |
591 | struct ata_device *dev = &ap->device[0]; | 1061 | struct ata_device *dev = &ap->device[0]; |
592 | u32 edma = 0, bdma; | 1062 | unsigned long timeout; |
593 | 1063 | ||
594 | VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio); | 1064 | VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio); |
595 | 1065 | ||
596 | edma = readl(port_mmio + EDMA_CMD_OFS); | 1066 | mv_stop_dma(ap); |
597 | if (EDMA_EN & edma) { | ||
598 | /* disable EDMA if active */ | ||
599 | edma &= ~EDMA_EN; | ||
600 | writelfl(edma | EDMA_DS, port_mmio + EDMA_CMD_OFS); | ||
601 | udelay(1); | ||
602 | } else if (mv_port_bdma_capable(ap) && | ||
603 | (bdma = readl(port_mmio + BDMA_CMD_OFS)) & BDMA_START) { | ||
604 | /* disable BDMA if active */ | ||
605 | writelfl(bdma & ~BDMA_START, port_mmio + BDMA_CMD_OFS); | ||
606 | } | ||
607 | 1067 | ||
608 | writelfl(edma | ATA_RST, port_mmio + EDMA_CMD_OFS); | 1068 | writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); |
609 | udelay(25); /* allow reset propagation */ | 1069 | udelay(25); /* allow reset propagation */ |
610 | 1070 | ||
611 | /* Spec never mentions clearing the bit. Marvell's driver does | 1071 | /* Spec never mentions clearing the bit. Marvell's driver does |
612 | * clear the bit, however. | 1072 | * clear the bit, however. |
613 | */ | 1073 | */ |
614 | writelfl(edma & ~ATA_RST, port_mmio + EDMA_CMD_OFS); | 1074 | writelfl(0, port_mmio + EDMA_CMD_OFS); |
615 | 1075 | ||
616 | VPRINTK("Done. Now calling __sata_phy_reset()\n"); | 1076 | VPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x " |
1077 | "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), | ||
1078 | mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); | ||
617 | 1079 | ||
618 | /* proceed to init communications via the scr_control reg */ | 1080 | /* proceed to init communications via the scr_control reg */ |
619 | __sata_phy_reset(ap); | 1081 | scr_write_flush(ap, SCR_CONTROL, 0x301); |
1082 | mdelay(1); | ||
1083 | scr_write_flush(ap, SCR_CONTROL, 0x300); | ||
1084 | timeout = jiffies + (HZ * 1); | ||
1085 | do { | ||
1086 | mdelay(10); | ||
1087 | if ((scr_read(ap, SCR_STATUS) & 0xf) != 1) | ||
1088 | break; | ||
1089 | } while (time_before(jiffies, timeout)); | ||
620 | 1090 | ||
621 | if (ap->flags & ATA_FLAG_PORT_DISABLED) { | 1091 | VPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x " |
622 | VPRINTK("Port disabled pre-sig. Exiting.\n"); | 1092 | "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), |
1093 | mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); | ||
1094 | |||
1095 | if (sata_dev_present(ap)) { | ||
1096 | ata_port_probe(ap); | ||
1097 | } else { | ||
1098 | printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n", | ||
1099 | ap->id, scr_read(ap, SCR_STATUS)); | ||
1100 | ata_port_disable(ap); | ||
623 | return; | 1101 | return; |
624 | } | 1102 | } |
1103 | ap->cbl = ATA_CBL_SATA; | ||
625 | 1104 | ||
626 | tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr); | 1105 | tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr); |
627 | tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr); | 1106 | tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr); |
@@ -636,28 +1115,76 @@ static void mv_phy_reset(struct ata_port *ap) | |||
636 | VPRINTK("EXIT\n"); | 1115 | VPRINTK("EXIT\n"); |
637 | } | 1116 | } |
638 | 1117 | ||
639 | static void mv_port_init(struct ata_ioports *port, unsigned long base) | 1118 | static void mv_eng_timeout(struct ata_port *ap) |
1119 | { | ||
1120 | struct ata_queued_cmd *qc; | ||
1121 | unsigned long flags; | ||
1122 | |||
1123 | printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); | ||
1124 | DPRINTK("All regs @ start of eng_timeout\n"); | ||
1125 | mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no, | ||
1126 | to_pci_dev(ap->host_set->dev)); | ||
1127 | |||
1128 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
1129 | printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n", | ||
1130 | ap->host_set->mmio_base, ap, qc, qc->scsicmd, | ||
1131 | &qc->scsicmd->cmnd); | ||
1132 | |||
1133 | mv_err_intr(ap); | ||
1134 | mv_phy_reset(ap); | ||
1135 | |||
1136 | if (!qc) { | ||
1137 | printk(KERN_ERR "ata%u: BUG: timeout without command\n", | ||
1138 | ap->id); | ||
1139 | } else { | ||
1140 | /* hack alert! We cannot use the supplied completion | ||
1141 | * function from inside the ->eh_strategy_handler() thread. | ||
1142 | * libata is the only user of ->eh_strategy_handler() in | ||
1143 | * any kernel, so the default scsi_done() assumes it is | ||
1144 | * not being called from the SCSI EH. | ||
1145 | */ | ||
1146 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
1147 | qc->scsidone = scsi_finish_command; | ||
1148 | ata_qc_complete(qc, ATA_ERR); | ||
1149 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
1150 | } | ||
1151 | } | ||
1152 | |||
1153 | static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) | ||
640 | { | 1154 | { |
641 | /* PIO related setup */ | 1155 | unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS; |
642 | port->data_addr = base + SHD_PIO_DATA_OFS; | 1156 | unsigned serr_ofs; |
643 | port->error_addr = port->feature_addr = base + SHD_FEA_ERR_OFS; | 1157 | |
644 | port->nsect_addr = base + SHD_SECT_CNT_OFS; | 1158 | /* PIO related setup |
645 | port->lbal_addr = base + SHD_LBA_L_OFS; | 1159 | */ |
646 | port->lbam_addr = base + SHD_LBA_M_OFS; | 1160 | port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); |
647 | port->lbah_addr = base + SHD_LBA_H_OFS; | 1161 | port->error_addr = |
648 | port->device_addr = base + SHD_DEV_HD_OFS; | 1162 | port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); |
649 | port->status_addr = port->command_addr = base + SHD_CMD_STA_OFS; | 1163 | port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); |
650 | port->altstatus_addr = port->ctl_addr = base + SHD_CTL_AST_OFS; | 1164 | port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); |
651 | /* unused */ | 1165 | port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); |
1166 | port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); | ||
1167 | port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); | ||
1168 | port->status_addr = | ||
1169 | port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); | ||
1170 | /* special case: control/altstatus doesn't have ATA_REG_ address */ | ||
1171 | port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS; | ||
1172 | |||
1173 | /* unused: */ | ||
652 | port->cmd_addr = port->bmdma_addr = port->scr_addr = 0; | 1174 | port->cmd_addr = port->bmdma_addr = port->scr_addr = 0; |
653 | 1175 | ||
1176 | /* Clear any currently outstanding port interrupt conditions */ | ||
1177 | serr_ofs = mv_scr_offset(SCR_ERROR); | ||
1178 | writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); | ||
1179 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | ||
1180 | |||
654 | /* unmask all EDMA error interrupts */ | 1181 | /* unmask all EDMA error interrupts */ |
655 | writel(~0, (void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS); | 1182 | writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS); |
656 | 1183 | ||
657 | VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", | 1184 | VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", |
658 | readl((void __iomem *)base + EDMA_CFG_OFS), | 1185 | readl(port_mmio + EDMA_CFG_OFS), |
659 | readl((void __iomem *)base + EDMA_ERR_IRQ_CAUSE_OFS), | 1186 | readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS), |
660 | readl((void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS)); | 1187 | readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); |
661 | } | 1188 | } |
662 | 1189 | ||
663 | static int mv_host_init(struct ata_probe_ent *probe_ent) | 1190 | static int mv_host_init(struct ata_probe_ent *probe_ent) |
@@ -666,7 +1193,8 @@ static int mv_host_init(struct ata_probe_ent *probe_ent) | |||
666 | void __iomem *mmio = probe_ent->mmio_base; | 1193 | void __iomem *mmio = probe_ent->mmio_base; |
667 | void __iomem *port_mmio; | 1194 | void __iomem *port_mmio; |
668 | 1195 | ||
669 | if (mv_master_reset(probe_ent->mmio_base)) { | 1196 | if ((MV_FLAG_GLBL_SFT_RST & probe_ent->host_flags) && |
1197 | mv_global_soft_reset(probe_ent->mmio_base)) { | ||
670 | rc = 1; | 1198 | rc = 1; |
671 | goto done; | 1199 | goto done; |
672 | } | 1200 | } |
@@ -676,17 +1204,27 @@ static int mv_host_init(struct ata_probe_ent *probe_ent) | |||
676 | 1204 | ||
677 | for (port = 0; port < probe_ent->n_ports; port++) { | 1205 | for (port = 0; port < probe_ent->n_ports; port++) { |
678 | port_mmio = mv_port_base(mmio, port); | 1206 | port_mmio = mv_port_base(mmio, port); |
679 | mv_port_init(&probe_ent->port[port], (unsigned long)port_mmio); | 1207 | mv_port_init(&probe_ent->port[port], port_mmio); |
680 | } | 1208 | } |
681 | 1209 | ||
682 | for (hc = 0; hc < n_hc; hc++) { | 1210 | for (hc = 0; hc < n_hc; hc++) { |
683 | VPRINTK("HC%i: HC config=0x%08x HC IRQ cause=0x%08x\n", hc, | 1211 | void __iomem *hc_mmio = mv_hc_base(mmio, hc); |
684 | readl(mv_hc_base(mmio, hc) + HC_CFG_OFS), | 1212 | |
685 | readl(mv_hc_base(mmio, hc) + HC_IRQ_CAUSE_OFS)); | 1213 | VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " |
1214 | "(before clear)=0x%08x\n", hc, | ||
1215 | readl(hc_mmio + HC_CFG_OFS), | ||
1216 | readl(hc_mmio + HC_IRQ_CAUSE_OFS)); | ||
1217 | |||
1218 | /* Clear any currently outstanding hc interrupt conditions */ | ||
1219 | writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); | ||
686 | } | 1220 | } |
687 | 1221 | ||
688 | writel(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); | 1222 | /* Clear any currently outstanding host interrupt conditions */ |
689 | writel(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); | 1223 | writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); |
1224 | |||
1225 | /* and unmask interrupt generation for host regs */ | ||
1226 | writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); | ||
1227 | writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); | ||
690 | 1228 | ||
691 | VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " | 1229 | VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " |
692 | "PCI int cause/mask=0x%08x/0x%08x\n", | 1230 | "PCI int cause/mask=0x%08x/0x%08x\n", |
@@ -694,11 +1232,37 @@ static int mv_host_init(struct ata_probe_ent *probe_ent) | |||
694 | readl(mmio + HC_MAIN_IRQ_MASK_OFS), | 1232 | readl(mmio + HC_MAIN_IRQ_MASK_OFS), |
695 | readl(mmio + PCI_IRQ_CAUSE_OFS), | 1233 | readl(mmio + PCI_IRQ_CAUSE_OFS), |
696 | readl(mmio + PCI_IRQ_MASK_OFS)); | 1234 | readl(mmio + PCI_IRQ_MASK_OFS)); |
697 | 1235 | done: | |
698 | done: | ||
699 | return rc; | 1236 | return rc; |
700 | } | 1237 | } |
701 | 1238 | ||
1239 | /* FIXME: complete this */ | ||
1240 | static void mv_print_info(struct ata_probe_ent *probe_ent) | ||
1241 | { | ||
1242 | struct pci_dev *pdev = to_pci_dev(probe_ent->dev); | ||
1243 | struct mv_host_priv *hpriv = probe_ent->private_data; | ||
1244 | u8 rev_id, scc; | ||
1245 | const char *scc_s; | ||
1246 | |||
1247 | /* Use this to determine the HW stepping of the chip so we know | ||
1248 | * what errata to workaround | ||
1249 | */ | ||
1250 | pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); | ||
1251 | |||
1252 | pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); | ||
1253 | if (scc == 0) | ||
1254 | scc_s = "SCSI"; | ||
1255 | else if (scc == 0x01) | ||
1256 | scc_s = "RAID"; | ||
1257 | else | ||
1258 | scc_s = "unknown"; | ||
1259 | |||
1260 | printk(KERN_INFO DRV_NAME | ||
1261 | "(%s) %u slots %u ports %s mode IRQ via %s\n", | ||
1262 | pci_name(pdev), (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports, | ||
1263 | scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); | ||
1264 | } | ||
1265 | |||
702 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 1266 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
703 | { | 1267 | { |
704 | static int printed_version = 0; | 1268 | static int printed_version = 0; |
@@ -706,16 +1270,12 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
706 | struct mv_host_priv *hpriv; | 1270 | struct mv_host_priv *hpriv; |
707 | unsigned int board_idx = (unsigned int)ent->driver_data; | 1271 | unsigned int board_idx = (unsigned int)ent->driver_data; |
708 | void __iomem *mmio_base; | 1272 | void __iomem *mmio_base; |
709 | int pci_dev_busy = 0; | 1273 | int pci_dev_busy = 0, rc; |
710 | int rc; | ||
711 | 1274 | ||
712 | if (!printed_version++) { | 1275 | if (!printed_version++) { |
713 | printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); | 1276 | printk(KERN_INFO DRV_NAME " version " DRV_VERSION "\n"); |
714 | } | 1277 | } |
715 | 1278 | ||
716 | VPRINTK("ENTER for PCI Bus:Slot.Func=%u:%u.%u\n", pdev->bus->number, | ||
717 | PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); | ||
718 | |||
719 | rc = pci_enable_device(pdev); | 1279 | rc = pci_enable_device(pdev); |
720 | if (rc) { | 1280 | if (rc) { |
721 | return rc; | 1281 | return rc; |
@@ -727,8 +1287,6 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
727 | goto err_out; | 1287 | goto err_out; |
728 | } | 1288 | } |
729 | 1289 | ||
730 | pci_intx(pdev, 1); | ||
731 | |||
732 | probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); | 1290 | probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); |
733 | if (probe_ent == NULL) { | 1291 | if (probe_ent == NULL) { |
734 | rc = -ENOMEM; | 1292 | rc = -ENOMEM; |
@@ -739,8 +1297,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
739 | probe_ent->dev = pci_dev_to_dev(pdev); | 1297 | probe_ent->dev = pci_dev_to_dev(pdev); |
740 | INIT_LIST_HEAD(&probe_ent->node); | 1298 | INIT_LIST_HEAD(&probe_ent->node); |
741 | 1299 | ||
742 | mmio_base = ioremap_nocache(pci_resource_start(pdev, MV_PRIMARY_BAR), | 1300 | mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0); |
743 | pci_resource_len(pdev, MV_PRIMARY_BAR)); | ||
744 | if (mmio_base == NULL) { | 1301 | if (mmio_base == NULL) { |
745 | rc = -ENOMEM; | 1302 | rc = -ENOMEM; |
746 | goto err_out_free_ent; | 1303 | goto err_out_free_ent; |
@@ -769,37 +1326,40 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
769 | if (rc) { | 1326 | if (rc) { |
770 | goto err_out_hpriv; | 1327 | goto err_out_hpriv; |
771 | } | 1328 | } |
772 | /* mv_print_info(probe_ent); */ | ||
773 | 1329 | ||
774 | { | 1330 | /* Enable interrupts */ |
775 | int b, w; | 1331 | if (pci_enable_msi(pdev) == 0) { |
776 | u32 dw[4]; /* hold a line of 16b */ | 1332 | hpriv->hp_flags |= MV_HP_FLAG_MSI; |
777 | VPRINTK("PCI config space:\n"); | 1333 | } else { |
778 | for (b = 0; b < 0x40; ) { | 1334 | pci_intx(pdev, 1); |
779 | for (w = 0; w < 4; w++) { | ||
780 | (void) pci_read_config_dword(pdev,b,&dw[w]); | ||
781 | b += sizeof(*dw); | ||
782 | } | ||
783 | VPRINTK("%08x %08x %08x %08x\n", | ||
784 | dw[0],dw[1],dw[2],dw[3]); | ||
785 | } | ||
786 | } | 1335 | } |
787 | 1336 | ||
788 | /* FIXME: check ata_device_add return value */ | 1337 | mv_dump_pci_cfg(pdev, 0x68); |
789 | ata_device_add(probe_ent); | 1338 | mv_print_info(probe_ent); |
790 | kfree(probe_ent); | 1339 | |
1340 | if (ata_device_add(probe_ent) == 0) { | ||
1341 | rc = -ENODEV; /* No devices discovered */ | ||
1342 | goto err_out_dev_add; | ||
1343 | } | ||
791 | 1344 | ||
1345 | kfree(probe_ent); | ||
792 | return 0; | 1346 | return 0; |
793 | 1347 | ||
794 | err_out_hpriv: | 1348 | err_out_dev_add: |
1349 | if (MV_HP_FLAG_MSI & hpriv->hp_flags) { | ||
1350 | pci_disable_msi(pdev); | ||
1351 | } else { | ||
1352 | pci_intx(pdev, 0); | ||
1353 | } | ||
1354 | err_out_hpriv: | ||
795 | kfree(hpriv); | 1355 | kfree(hpriv); |
796 | err_out_iounmap: | 1356 | err_out_iounmap: |
797 | iounmap(mmio_base); | 1357 | pci_iounmap(pdev, mmio_base); |
798 | err_out_free_ent: | 1358 | err_out_free_ent: |
799 | kfree(probe_ent); | 1359 | kfree(probe_ent); |
800 | err_out_regions: | 1360 | err_out_regions: |
801 | pci_release_regions(pdev); | 1361 | pci_release_regions(pdev); |
802 | err_out: | 1362 | err_out: |
803 | if (!pci_dev_busy) { | 1363 | if (!pci_dev_busy) { |
804 | pci_disable_device(pdev); | 1364 | pci_disable_device(pdev); |
805 | } | 1365 | } |