aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/sata_mv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/sata_mv.c')
-rw-r--r--drivers/scsi/sata_mv.c1142
1 files changed, 953 insertions, 189 deletions
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index ea76fe44585e..d457f5673476 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -35,7 +35,7 @@
35#include <asm/io.h> 35#include <asm/io.h>
36 36
37#define DRV_NAME "sata_mv" 37#define DRV_NAME "sata_mv"
38#define DRV_VERSION "0.12" 38#define DRV_VERSION "0.24"
39 39
40enum { 40enum {
41 /* BAR's are enumerated in terms of pci_resource_start() terms */ 41 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -55,31 +55,61 @@ enum {
55 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ 55 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
56 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, 56 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
57 57
58 MV_Q_CT = 32, 58 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
59 MV_CRQB_SZ = 32,
60 MV_CRPB_SZ = 8,
61 59
62 MV_DMA_BOUNDARY = 0xffffffffU, 60 MV_MAX_Q_DEPTH = 32,
63 SATAHC_MASK = (~(MV_SATAHC_REG_SZ - 1)), 61 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
62
63 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
64 * CRPB needs alignment on a 256B boundary. Size == 256B
65 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
66 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
67 */
68 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
69 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
70 MV_MAX_SG_CT = 176,
71 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
72 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
73
74 /* Our DMA boundary is determined by an ePRD being unable to handle
75 * anything larger than 64KB
76 */
77 MV_DMA_BOUNDARY = 0xffffU,
64 78
65 MV_PORTS_PER_HC = 4, 79 MV_PORTS_PER_HC = 4,
66 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ 80 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
67 MV_PORT_HC_SHIFT = 2, 81 MV_PORT_HC_SHIFT = 2,
68 /* == (port % MV_PORTS_PER_HC) to determine port from 0-7 port */ 82 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
69 MV_PORT_MASK = 3, 83 MV_PORT_MASK = 3,
70 84
71 /* Host Flags */ 85 /* Host Flags */
72 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 86 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
73 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 87 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
74 MV_FLAG_BDMA = (1 << 28), /* Basic DMA */ 88 MV_FLAG_GLBL_SFT_RST = (1 << 28), /* Global Soft Reset support */
89 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
90 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO),
91 MV_6XXX_FLAGS = (MV_FLAG_IRQ_COALESCE |
92 MV_FLAG_GLBL_SFT_RST),
75 93
76 chip_504x = 0, 94 chip_504x = 0,
77 chip_508x = 1, 95 chip_508x = 1,
78 chip_604x = 2, 96 chip_604x = 2,
79 chip_608x = 3, 97 chip_608x = 3,
80 98
99 CRQB_FLAG_READ = (1 << 0),
100 CRQB_TAG_SHIFT = 1,
101 CRQB_CMD_ADDR_SHIFT = 8,
102 CRQB_CMD_CS = (0x2 << 11),
103 CRQB_CMD_LAST = (1 << 15),
104
105 CRPB_FLAG_STATUS_SHIFT = 8,
106
107 EPRD_FLAG_END_OF_TBL = (1 << 31),
108
81 /* PCI interface registers */ 109 /* PCI interface registers */
82 110
111 PCI_COMMAND_OFS = 0xc00,
112
83 PCI_MAIN_CMD_STS_OFS = 0xd30, 113 PCI_MAIN_CMD_STS_OFS = 0xd30,
84 STOP_PCI_MASTER = (1 << 2), 114 STOP_PCI_MASTER = (1 << 2),
85 PCI_MASTER_EMPTY = (1 << 3), 115 PCI_MASTER_EMPTY = (1 << 3),
@@ -111,20 +141,13 @@ enum {
111 HC_CFG_OFS = 0, 141 HC_CFG_OFS = 0,
112 142
113 HC_IRQ_CAUSE_OFS = 0x14, 143 HC_IRQ_CAUSE_OFS = 0x14,
114 CRBP_DMA_DONE = (1 << 0), /* shift by port # */ 144 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
115 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */ 145 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
116 DEV_IRQ = (1 << 8), /* shift by port # */ 146 DEV_IRQ = (1 << 8), /* shift by port # */
117 147
118 /* Shadow block registers */ 148 /* Shadow block registers */
119 SHD_PIO_DATA_OFS = 0x100, 149 SHD_BLK_OFS = 0x100,
120 SHD_FEA_ERR_OFS = 0x104, 150 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
121 SHD_SECT_CNT_OFS = 0x108,
122 SHD_LBA_L_OFS = 0x10C,
123 SHD_LBA_M_OFS = 0x110,
124 SHD_LBA_H_OFS = 0x114,
125 SHD_DEV_HD_OFS = 0x118,
126 SHD_CMD_STA_OFS = 0x11C,
127 SHD_CTL_AST_OFS = 0x120,
128 151
129 /* SATA registers */ 152 /* SATA registers */
130 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 153 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
@@ -132,6 +155,11 @@ enum {
132 155
133 /* Port registers */ 156 /* Port registers */
134 EDMA_CFG_OFS = 0, 157 EDMA_CFG_OFS = 0,
158 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
159 EDMA_CFG_NCQ = (1 << 5),
160 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
161 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
162 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
135 163
136 EDMA_ERR_IRQ_CAUSE_OFS = 0x8, 164 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
137 EDMA_ERR_IRQ_MASK_OFS = 0xc, 165 EDMA_ERR_IRQ_MASK_OFS = 0xc,
@@ -161,33 +189,85 @@ enum {
161 EDMA_ERR_LNK_DATA_TX | 189 EDMA_ERR_LNK_DATA_TX |
162 EDMA_ERR_TRANS_PROTO), 190 EDMA_ERR_TRANS_PROTO),
163 191
192 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
193 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
194 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
195
196 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
197 EDMA_REQ_Q_PTR_SHIFT = 5,
198
199 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
200 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
201 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
202 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
203 EDMA_RSP_Q_PTR_SHIFT = 3,
204
164 EDMA_CMD_OFS = 0x28, 205 EDMA_CMD_OFS = 0x28,
165 EDMA_EN = (1 << 0), 206 EDMA_EN = (1 << 0),
166 EDMA_DS = (1 << 1), 207 EDMA_DS = (1 << 1),
167 ATA_RST = (1 << 2), 208 ATA_RST = (1 << 2),
168 209
169 /* BDMA is 6xxx part only */ 210 /* Host private flags (hp_flags) */
170 BDMA_CMD_OFS = 0x224, 211 MV_HP_FLAG_MSI = (1 << 0),
171 BDMA_START = (1 << 0),
172 212
173 MV_UNDEF = 0, 213 /* Port private flags (pp_flags) */
214 MV_PP_FLAG_EDMA_EN = (1 << 0),
215 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
174}; 216};
175 217
176struct mv_port_priv { 218/* Command ReQuest Block: 32B */
219struct mv_crqb {
220 u32 sg_addr;
221 u32 sg_addr_hi;
222 u16 ctrl_flags;
223 u16 ata_cmd[11];
224};
177 225
226/* Command ResPonse Block: 8B */
227struct mv_crpb {
228 u16 id;
229 u16 flags;
230 u32 tmstmp;
178}; 231};
179 232
180struct mv_host_priv { 233/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
234struct mv_sg {
235 u32 addr;
236 u32 flags_size;
237 u32 addr_hi;
238 u32 reserved;
239};
181 240
241struct mv_port_priv {
242 struct mv_crqb *crqb;
243 dma_addr_t crqb_dma;
244 struct mv_crpb *crpb;
245 dma_addr_t crpb_dma;
246 struct mv_sg *sg_tbl;
247 dma_addr_t sg_tbl_dma;
248
249 unsigned req_producer; /* cp of req_in_ptr */
250 unsigned rsp_consumer; /* cp of rsp_out_ptr */
251 u32 pp_flags;
252};
253
254struct mv_host_priv {
255 u32 hp_flags;
182}; 256};
183 257
184static void mv_irq_clear(struct ata_port *ap); 258static void mv_irq_clear(struct ata_port *ap);
185static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); 259static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
186static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 260static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
261static u8 mv_check_err(struct ata_port *ap);
187static void mv_phy_reset(struct ata_port *ap); 262static void mv_phy_reset(struct ata_port *ap);
188static int mv_master_reset(void __iomem *mmio_base); 263static void mv_host_stop(struct ata_host_set *host_set);
264static int mv_port_start(struct ata_port *ap);
265static void mv_port_stop(struct ata_port *ap);
266static void mv_qc_prep(struct ata_queued_cmd *qc);
267static int mv_qc_issue(struct ata_queued_cmd *qc);
189static irqreturn_t mv_interrupt(int irq, void *dev_instance, 268static irqreturn_t mv_interrupt(int irq, void *dev_instance,
190 struct pt_regs *regs); 269 struct pt_regs *regs);
270static void mv_eng_timeout(struct ata_port *ap);
191static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 271static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
192 272
193static Scsi_Host_Template mv_sht = { 273static Scsi_Host_Template mv_sht = {
@@ -196,13 +276,13 @@ static Scsi_Host_Template mv_sht = {
196 .ioctl = ata_scsi_ioctl, 276 .ioctl = ata_scsi_ioctl,
197 .queuecommand = ata_scsi_queuecmd, 277 .queuecommand = ata_scsi_queuecmd,
198 .eh_strategy_handler = ata_scsi_error, 278 .eh_strategy_handler = ata_scsi_error,
199 .can_queue = ATA_DEF_QUEUE, 279 .can_queue = MV_USE_Q_DEPTH,
200 .this_id = ATA_SHT_THIS_ID, 280 .this_id = ATA_SHT_THIS_ID,
201 .sg_tablesize = MV_UNDEF, 281 .sg_tablesize = MV_MAX_SG_CT,
202 .max_sectors = ATA_MAX_SECTORS, 282 .max_sectors = ATA_MAX_SECTORS,
203 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 283 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
204 .emulated = ATA_SHT_EMULATED, 284 .emulated = ATA_SHT_EMULATED,
205 .use_clustering = MV_UNDEF, 285 .use_clustering = ATA_SHT_USE_CLUSTERING,
206 .proc_name = DRV_NAME, 286 .proc_name = DRV_NAME,
207 .dma_boundary = MV_DMA_BOUNDARY, 287 .dma_boundary = MV_DMA_BOUNDARY,
208 .slave_configure = ata_scsi_slave_config, 288 .slave_configure = ata_scsi_slave_config,
@@ -216,15 +296,16 @@ static struct ata_port_operations mv_ops = {
216 .tf_load = ata_tf_load, 296 .tf_load = ata_tf_load,
217 .tf_read = ata_tf_read, 297 .tf_read = ata_tf_read,
218 .check_status = ata_check_status, 298 .check_status = ata_check_status,
299 .check_err = mv_check_err,
219 .exec_command = ata_exec_command, 300 .exec_command = ata_exec_command,
220 .dev_select = ata_std_dev_select, 301 .dev_select = ata_std_dev_select,
221 302
222 .phy_reset = mv_phy_reset, 303 .phy_reset = mv_phy_reset,
223 304
224 .qc_prep = ata_qc_prep, 305 .qc_prep = mv_qc_prep,
225 .qc_issue = ata_qc_issue_prot, 306 .qc_issue = mv_qc_issue,
226 307
227 .eng_timeout = ata_eng_timeout, 308 .eng_timeout = mv_eng_timeout,
228 309
229 .irq_handler = mv_interrupt, 310 .irq_handler = mv_interrupt,
230 .irq_clear = mv_irq_clear, 311 .irq_clear = mv_irq_clear,
@@ -232,46 +313,39 @@ static struct ata_port_operations mv_ops = {
232 .scr_read = mv_scr_read, 313 .scr_read = mv_scr_read,
233 .scr_write = mv_scr_write, 314 .scr_write = mv_scr_write,
234 315
235 .port_start = ata_port_start, 316 .port_start = mv_port_start,
236 .port_stop = ata_port_stop, 317 .port_stop = mv_port_stop,
237 .host_stop = ata_host_stop, 318 .host_stop = mv_host_stop,
238}; 319};
239 320
240static struct ata_port_info mv_port_info[] = { 321static struct ata_port_info mv_port_info[] = {
241 { /* chip_504x */ 322 { /* chip_504x */
242 .sht = &mv_sht, 323 .sht = &mv_sht,
243 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 324 .host_flags = MV_COMMON_FLAGS,
244 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO), 325 .pio_mask = 0x1f, /* pio0-4 */
245 .pio_mask = 0x1f, /* pio4-0 */ 326 .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */
246 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
247 .port_ops = &mv_ops, 327 .port_ops = &mv_ops,
248 }, 328 },
249 { /* chip_508x */ 329 { /* chip_508x */
250 .sht = &mv_sht, 330 .sht = &mv_sht,
251 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 331 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
252 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 332 .pio_mask = 0x1f, /* pio0-4 */
253 MV_FLAG_DUAL_HC), 333 .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */
254 .pio_mask = 0x1f, /* pio4-0 */
255 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
256 .port_ops = &mv_ops, 334 .port_ops = &mv_ops,
257 }, 335 },
258 { /* chip_604x */ 336 { /* chip_604x */
259 .sht = &mv_sht, 337 .sht = &mv_sht,
260 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 338 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
261 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 339 .pio_mask = 0x1f, /* pio0-4 */
262 MV_FLAG_IRQ_COALESCE | MV_FLAG_BDMA), 340 .udma_mask = 0x7f, /* udma0-6 */
263 .pio_mask = 0x1f, /* pio4-0 */
264 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
265 .port_ops = &mv_ops, 341 .port_ops = &mv_ops,
266 }, 342 },
267 { /* chip_608x */ 343 { /* chip_608x */
268 .sht = &mv_sht, 344 .sht = &mv_sht,
269 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 345 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
270 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 346 MV_FLAG_DUAL_HC),
271 MV_FLAG_IRQ_COALESCE | MV_FLAG_DUAL_HC | 347 .pio_mask = 0x1f, /* pio0-4 */
272 MV_FLAG_BDMA), 348 .udma_mask = 0x7f, /* udma0-6 */
273 .pio_mask = 0x1f, /* pio4-0 */
274 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
275 .port_ops = &mv_ops, 349 .port_ops = &mv_ops,
276 }, 350 },
277}; 351};
@@ -306,12 +380,6 @@ static inline void writelfl(unsigned long data, void __iomem *addr)
306 (void) readl(addr); /* flush to avoid PCI posted write */ 380 (void) readl(addr); /* flush to avoid PCI posted write */
307} 381}
308 382
309static inline void __iomem *mv_port_addr_to_hc_base(void __iomem *port_mmio)
310{
311 return ((void __iomem *)((unsigned long)port_mmio &
312 (unsigned long)SATAHC_MASK));
313}
314
315static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) 383static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
316{ 384{
317 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); 385 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
@@ -329,24 +397,150 @@ static inline void __iomem *mv_ap_base(struct ata_port *ap)
329 return mv_port_base(ap->host_set->mmio_base, ap->port_no); 397 return mv_port_base(ap->host_set->mmio_base, ap->port_no);
330} 398}
331 399
332static inline int mv_get_hc_count(unsigned long flags) 400static inline int mv_get_hc_count(unsigned long hp_flags)
333{ 401{
334 return ((flags & MV_FLAG_DUAL_HC) ? 2 : 1); 402 return ((hp_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
335} 403}
336 404
337static inline int mv_is_edma_active(struct ata_port *ap) 405static void mv_irq_clear(struct ata_port *ap)
406{
407}
408
409/**
410 * mv_start_dma - Enable eDMA engine
411 * @base: port base address
412 * @pp: port private data
413 *
414 * Verify the local cache of the eDMA state is accurate with an
415 * assert.
416 *
417 * LOCKING:
418 * Inherited from caller.
419 */
420static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
421{
422 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
423 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
424 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
425 }
426 assert(EDMA_EN & readl(base + EDMA_CMD_OFS));
427}
428
429/**
430 * mv_stop_dma - Disable eDMA engine
431 * @ap: ATA channel to manipulate
432 *
433 * Verify the local cache of the eDMA state is accurate with an
434 * assert.
435 *
436 * LOCKING:
437 * Inherited from caller.
438 */
439static void mv_stop_dma(struct ata_port *ap)
338{ 440{
339 void __iomem *port_mmio = mv_ap_base(ap); 441 void __iomem *port_mmio = mv_ap_base(ap);
340 return (EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); 442 struct mv_port_priv *pp = ap->private_data;
443 u32 reg;
444 int i;
445
446 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
447 /* Disable EDMA if active. The disable bit auto clears.
448 */
449 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
450 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
451 } else {
452 assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
453 }
454
455 /* now properly wait for the eDMA to stop */
456 for (i = 1000; i > 0; i--) {
457 reg = readl(port_mmio + EDMA_CMD_OFS);
458 if (!(EDMA_EN & reg)) {
459 break;
460 }
461 udelay(100);
462 }
463
464 if (EDMA_EN & reg) {
465 printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id);
466 /* FIXME: Consider doing a reset here to recover */
467 }
341} 468}
342 469
343static inline int mv_port_bdma_capable(struct ata_port *ap) 470#ifdef ATA_DEBUG
471static void mv_dump_mem(void __iomem *start, unsigned bytes)
344{ 472{
345 return (ap->flags & MV_FLAG_BDMA); 473 int b, w;
474 for (b = 0; b < bytes; ) {
475 DPRINTK("%p: ", start + b);
476 for (w = 0; b < bytes && w < 4; w++) {
477 printk("%08x ",readl(start + b));
478 b += sizeof(u32);
479 }
480 printk("\n");
481 }
346} 482}
483#endif
347 484
348static void mv_irq_clear(struct ata_port *ap) 485static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
486{
487#ifdef ATA_DEBUG
488 int b, w;
489 u32 dw;
490 for (b = 0; b < bytes; ) {
491 DPRINTK("%02x: ", b);
492 for (w = 0; b < bytes && w < 4; w++) {
493 (void) pci_read_config_dword(pdev,b,&dw);
494 printk("%08x ",dw);
495 b += sizeof(u32);
496 }
497 printk("\n");
498 }
499#endif
500}
501static void mv_dump_all_regs(void __iomem *mmio_base, int port,
502 struct pci_dev *pdev)
349{ 503{
504#ifdef ATA_DEBUG
505 void __iomem *hc_base = mv_hc_base(mmio_base,
506 port >> MV_PORT_HC_SHIFT);
507 void __iomem *port_base;
508 int start_port, num_ports, p, start_hc, num_hcs, hc;
509
510 if (0 > port) {
511 start_hc = start_port = 0;
512 num_ports = 8; /* shld be benign for 4 port devs */
513 num_hcs = 2;
514 } else {
515 start_hc = port >> MV_PORT_HC_SHIFT;
516 start_port = port;
517 num_ports = num_hcs = 1;
518 }
519 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
520 num_ports > 1 ? num_ports - 1 : start_port);
521
522 if (NULL != pdev) {
523 DPRINTK("PCI config space regs:\n");
524 mv_dump_pci_cfg(pdev, 0x68);
525 }
526 DPRINTK("PCI regs:\n");
527 mv_dump_mem(mmio_base+0xc00, 0x3c);
528 mv_dump_mem(mmio_base+0xd00, 0x34);
529 mv_dump_mem(mmio_base+0xf00, 0x4);
530 mv_dump_mem(mmio_base+0x1d00, 0x6c);
531 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
532 hc_base = mv_hc_base(mmio_base, port >> MV_PORT_HC_SHIFT);
533 DPRINTK("HC regs (HC %i):\n", hc);
534 mv_dump_mem(hc_base, 0x1c);
535 }
536 for (p = start_port; p < start_port + num_ports; p++) {
537 port_base = mv_port_base(mmio_base, p);
538 DPRINTK("EDMA regs (port %i):\n",p);
539 mv_dump_mem(port_base, 0x54);
540 DPRINTK("SATA regs (port %i):\n",p);
541 mv_dump_mem(port_base+0x300, 0x60);
542 }
543#endif
350} 544}
351 545
352static unsigned int mv_scr_offset(unsigned int sc_reg_in) 546static unsigned int mv_scr_offset(unsigned int sc_reg_in)
@@ -389,30 +583,37 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
389 } 583 }
390} 584}
391 585
392static int mv_master_reset(void __iomem *mmio_base) 586/**
587 * mv_global_soft_reset - Perform the 6xxx global soft reset
588 * @mmio_base: base address of the HBA
589 *
590 * This routine only applies to 6xxx parts.
591 *
592 * LOCKING:
593 * Inherited from caller.
594 */
595static int mv_global_soft_reset(void __iomem *mmio_base)
393{ 596{
394 void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS; 597 void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS;
395 int i, rc = 0; 598 int i, rc = 0;
396 u32 t; 599 u32 t;
397 600
398 VPRINTK("ENTER\n");
399
400 /* Following procedure defined in PCI "main command and status 601 /* Following procedure defined in PCI "main command and status
401 * register" table. 602 * register" table.
402 */ 603 */
403 t = readl(reg); 604 t = readl(reg);
404 writel(t | STOP_PCI_MASTER, reg); 605 writel(t | STOP_PCI_MASTER, reg);
405 606
406 for (i = 0; i < 100; i++) { 607 for (i = 0; i < 1000; i++) {
407 msleep(10); 608 udelay(1);
408 t = readl(reg); 609 t = readl(reg);
409 if (PCI_MASTER_EMPTY & t) { 610 if (PCI_MASTER_EMPTY & t) {
410 break; 611 break;
411 } 612 }
412 } 613 }
413 if (!(PCI_MASTER_EMPTY & t)) { 614 if (!(PCI_MASTER_EMPTY & t)) {
414 printk(KERN_ERR DRV_NAME "PCI master won't flush\n"); 615 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
415 rc = 1; /* broken HW? */ 616 rc = 1;
416 goto done; 617 goto done;
417 } 618 }
418 619
@@ -425,39 +626,398 @@ static int mv_master_reset(void __iomem *mmio_base)
425 } while (!(GLOB_SFT_RST & t) && (i-- > 0)); 626 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
426 627
427 if (!(GLOB_SFT_RST & t)) { 628 if (!(GLOB_SFT_RST & t)) {
428 printk(KERN_ERR DRV_NAME "can't set global reset\n"); 629 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
429 rc = 1; /* broken HW? */ 630 rc = 1;
430 goto done; 631 goto done;
431 } 632 }
432 633
433 /* clear reset */ 634 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
434 i = 5; 635 i = 5;
435 do { 636 do {
436 writel(t & ~GLOB_SFT_RST, reg); 637 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
437 t = readl(reg); 638 t = readl(reg);
438 udelay(1); 639 udelay(1);
439 } while ((GLOB_SFT_RST & t) && (i-- > 0)); 640 } while ((GLOB_SFT_RST & t) && (i-- > 0));
440 641
441 if (GLOB_SFT_RST & t) { 642 if (GLOB_SFT_RST & t) {
442 printk(KERN_ERR DRV_NAME "can't clear global reset\n"); 643 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
443 rc = 1; /* broken HW? */ 644 rc = 1;
444 } 645 }
445 646done:
446 done:
447 VPRINTK("EXIT, rc = %i\n", rc);
448 return rc; 647 return rc;
449} 648}
450 649
451static void mv_err_intr(struct ata_port *ap) 650/**
651 * mv_host_stop - Host specific cleanup/stop routine.
652 * @host_set: host data structure
653 *
654 * Disable ints, cleanup host memory, call general purpose
655 * host_stop.
656 *
657 * LOCKING:
658 * Inherited from caller.
659 */
660static void mv_host_stop(struct ata_host_set *host_set)
452{ 661{
453 void __iomem *port_mmio; 662 struct mv_host_priv *hpriv = host_set->private_data;
454 u32 edma_err_cause, serr = 0; 663 struct pci_dev *pdev = to_pci_dev(host_set->dev);
664
665 if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
666 pci_disable_msi(pdev);
667 } else {
668 pci_intx(pdev, 0);
669 }
670 kfree(hpriv);
671 ata_host_stop(host_set);
672}
673
674/**
675 * mv_port_start - Port specific init/start routine.
676 * @ap: ATA channel to manipulate
677 *
678 * Allocate and point to DMA memory, init port private memory,
679 * zero indices.
680 *
681 * LOCKING:
682 * Inherited from caller.
683 */
684static int mv_port_start(struct ata_port *ap)
685{
686 struct device *dev = ap->host_set->dev;
687 struct mv_port_priv *pp;
688 void __iomem *port_mmio = mv_ap_base(ap);
689 void *mem;
690 dma_addr_t mem_dma;
691
692 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
693 if (!pp) {
694 return -ENOMEM;
695 }
696 memset(pp, 0, sizeof(*pp));
697
698 mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
699 GFP_KERNEL);
700 if (!mem) {
701 kfree(pp);
702 return -ENOMEM;
703 }
704 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
705
706 /* First item in chunk of DMA memory:
707 * 32-slot command request table (CRQB), 32 bytes each in size
708 */
709 pp->crqb = mem;
710 pp->crqb_dma = mem_dma;
711 mem += MV_CRQB_Q_SZ;
712 mem_dma += MV_CRQB_Q_SZ;
713
714 /* Second item:
715 * 32-slot command response table (CRPB), 8 bytes each in size
716 */
717 pp->crpb = mem;
718 pp->crpb_dma = mem_dma;
719 mem += MV_CRPB_Q_SZ;
720 mem_dma += MV_CRPB_Q_SZ;
721
722 /* Third item:
723 * Table of scatter-gather descriptors (ePRD), 16 bytes each
724 */
725 pp->sg_tbl = mem;
726 pp->sg_tbl_dma = mem_dma;
727
728 writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT |
729 EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS);
730
731 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
732 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
733 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
734
735 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
736 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
737
738 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
739 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
740 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
741
742 pp->req_producer = pp->rsp_consumer = 0;
743
744 /* Don't turn on EDMA here...do it before DMA commands only. Else
745 * we'll be unable to send non-data, PIO, etc due to restricted access
746 * to shadow regs.
747 */
748 ap->private_data = pp;
749 return 0;
750}
751
752/**
753 * mv_port_stop - Port specific cleanup/stop routine.
754 * @ap: ATA channel to manipulate
755 *
756 * Stop DMA, cleanup port memory.
757 *
758 * LOCKING:
759 * This routine uses the host_set lock to protect the DMA stop.
760 */
761static void mv_port_stop(struct ata_port *ap)
762{
763 struct device *dev = ap->host_set->dev;
764 struct mv_port_priv *pp = ap->private_data;
765 unsigned long flags;
766
767 spin_lock_irqsave(&ap->host_set->lock, flags);
768 mv_stop_dma(ap);
769 spin_unlock_irqrestore(&ap->host_set->lock, flags);
770
771 ap->private_data = NULL;
772 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
773 kfree(pp);
774}
775
776/**
777 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
778 * @qc: queued command whose SG list to source from
779 *
780 * Populate the SG list and mark the last entry.
781 *
782 * LOCKING:
783 * Inherited from caller.
784 */
785static void mv_fill_sg(struct ata_queued_cmd *qc)
786{
787 struct mv_port_priv *pp = qc->ap->private_data;
788 unsigned int i;
789
790 for (i = 0; i < qc->n_elem; i++) {
791 u32 sg_len;
792 dma_addr_t addr;
793
794 addr = sg_dma_address(&qc->sg[i]);
795 sg_len = sg_dma_len(&qc->sg[i]);
796
797 pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
798 pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
799 assert(0 == (sg_len & ~MV_DMA_BOUNDARY));
800 pp->sg_tbl[i].flags_size = cpu_to_le32(sg_len);
801 }
802 if (0 < qc->n_elem) {
803 pp->sg_tbl[qc->n_elem - 1].flags_size |= EPRD_FLAG_END_OF_TBL;
804 }
805}
806
807static inline unsigned mv_inc_q_index(unsigned *index)
808{
809 *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK;
810 return *index;
811}
812
813static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last)
814{
815 *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
816 (last ? CRQB_CMD_LAST : 0);
817}
455 818
456 /* bug here b/c we got an err int on a port we don't know about, 819/**
457 * so there's no way to clear it 820 * mv_qc_prep - Host specific command preparation.
821 * @qc: queued command to prepare
822 *
823 * This routine simply redirects to the general purpose routine
824 * if command is not DMA. Else, it handles prep of the CRQB
825 * (command request block), does some sanity checking, and calls
826 * the SG load routine.
827 *
828 * LOCKING:
829 * Inherited from caller.
830 */
831static void mv_qc_prep(struct ata_queued_cmd *qc)
832{
833 struct ata_port *ap = qc->ap;
834 struct mv_port_priv *pp = ap->private_data;
835 u16 *cw;
836 struct ata_taskfile *tf;
837 u16 flags = 0;
838
839 if (ATA_PROT_DMA != qc->tf.protocol) {
840 return;
841 }
842
843 /* the req producer index should be the same as we remember it */
844 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
845 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
846 pp->req_producer);
847
848 /* Fill in command request block
458 */ 849 */
459 BUG_ON(NULL == ap); 850 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
460 port_mmio = mv_ap_base(ap); 851 flags |= CRQB_FLAG_READ;
852 }
853 assert(MV_MAX_Q_DEPTH > qc->tag);
854 flags |= qc->tag << CRQB_TAG_SHIFT;
855
856 pp->crqb[pp->req_producer].sg_addr =
857 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
858 pp->crqb[pp->req_producer].sg_addr_hi =
859 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
860 pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags);
861
862 cw = &pp->crqb[pp->req_producer].ata_cmd[0];
863 tf = &qc->tf;
864
865 /* Sadly, the CRQB cannot accomodate all registers--there are
866 * only 11 bytes...so we must pick and choose required
867 * registers based on the command. So, we drop feature and
868 * hob_feature for [RW] DMA commands, but they are needed for
869 * NCQ. NCQ will drop hob_nsect.
870 */
871 switch (tf->command) {
872 case ATA_CMD_READ:
873 case ATA_CMD_READ_EXT:
874 case ATA_CMD_WRITE:
875 case ATA_CMD_WRITE_EXT:
876 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
877 break;
878#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
879 case ATA_CMD_FPDMA_READ:
880 case ATA_CMD_FPDMA_WRITE:
881 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
882 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
883 break;
884#endif /* FIXME: remove this line when NCQ added */
885 default:
886 /* The only other commands EDMA supports in non-queued and
887 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
888 * of which are defined/used by Linux. If we get here, this
889 * driver needs work.
890 *
891 * FIXME: modify libata to give qc_prep a return value and
892 * return error here.
893 */
894 BUG_ON(tf->command);
895 break;
896 }
897 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
898 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
899 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
900 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
901 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
902 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
903 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
904 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
905 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
906
907 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) {
908 return;
909 }
910 mv_fill_sg(qc);
911}
912
913/**
914 * mv_qc_issue - Initiate a command to the host
915 * @qc: queued command to start
916 *
917 * This routine simply redirects to the general purpose routine
918 * if command is not DMA. Else, it sanity checks our local
919 * caches of the request producer/consumer indices then enables
920 * DMA and bumps the request producer index.
921 *
922 * LOCKING:
923 * Inherited from caller.
924 */
925static int mv_qc_issue(struct ata_queued_cmd *qc)
926{
927 void __iomem *port_mmio = mv_ap_base(qc->ap);
928 struct mv_port_priv *pp = qc->ap->private_data;
929 u32 in_ptr;
930
931 if (ATA_PROT_DMA != qc->tf.protocol) {
932 /* We're about to send a non-EDMA capable command to the
933 * port. Turn off EDMA so there won't be problems accessing
934 * shadow block, etc registers.
935 */
936 mv_stop_dma(qc->ap);
937 return ata_qc_issue_prot(qc);
938 }
939
940 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
941
942 /* the req producer index should be the same as we remember it */
943 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
944 pp->req_producer);
945 /* until we do queuing, the queue should be empty at this point */
946 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
947 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
948 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
949
950 mv_inc_q_index(&pp->req_producer); /* now incr producer index */
951
952 mv_start_dma(port_mmio, pp);
953
954 /* and write the request in pointer to kick the EDMA to life */
955 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
956 in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT;
957 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
958
959 return 0;
960}
961
962/**
963 * mv_get_crpb_status - get status from most recently completed cmd
964 * @ap: ATA channel to manipulate
965 *
966 * This routine is for use when the port is in DMA mode, when it
967 * will be using the CRPB (command response block) method of
968 * returning command completion information. We assert indices
969 * are good, grab status, and bump the response consumer index to
970 * prove that we're up to date.
971 *
972 * LOCKING:
973 * Inherited from caller.
974 */
975static u8 mv_get_crpb_status(struct ata_port *ap)
976{
977 void __iomem *port_mmio = mv_ap_base(ap);
978 struct mv_port_priv *pp = ap->private_data;
979 u32 out_ptr;
980
981 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
982
983 /* the response consumer index should be the same as we remember it */
984 assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
985 pp->rsp_consumer);
986
987 /* increment our consumer index... */
988 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
989
990 /* and, until we do NCQ, there should only be 1 CRPB waiting */
991 assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
992 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
993 pp->rsp_consumer);
994
995 /* write out our inc'd consumer index so EDMA knows we're caught up */
996 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
997 out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT;
998 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
999
1000 /* Return ATA status register for completed CRPB */
1001 return (pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT);
1002}
1003
1004/**
1005 * mv_err_intr - Handle error interrupts on the port
1006 * @ap: ATA channel to manipulate
1007 *
1008 * In most cases, just clear the interrupt and move on. However,
1009 * some cases require an eDMA reset, which is done right before
1010 * the COMRESET in mv_phy_reset(). The SERR case requires a
1011 * clear of pending errors in the SATA SERROR register. Finally,
1012 * if the port disabled DMA, update our cached copy to match.
1013 *
1014 * LOCKING:
1015 * Inherited from caller.
1016 */
1017static void mv_err_intr(struct ata_port *ap)
1018{
1019 void __iomem *port_mmio = mv_ap_base(ap);
1020 u32 edma_err_cause, serr = 0;
461 1021
462 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1022 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
463 1023
@@ -465,8 +1025,12 @@ static void mv_err_intr(struct ata_port *ap)
465 serr = scr_read(ap, SCR_ERROR); 1025 serr = scr_read(ap, SCR_ERROR);
466 scr_write_flush(ap, SCR_ERROR, serr); 1026 scr_write_flush(ap, SCR_ERROR, serr);
467 } 1027 }
468 DPRINTK("port %u error; EDMA err cause: 0x%08x SERR: 0x%08x\n", 1028 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
469 ap->port_no, edma_err_cause, serr); 1029 struct mv_port_priv *pp = ap->private_data;
1030 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1031 }
1032 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1033 "SERR: 0x%08x\n", ap->id, edma_err_cause, serr);
470 1034
471 /* Clear EDMA now that SERR cleanup done */ 1035 /* Clear EDMA now that SERR cleanup done */
472 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1036 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
@@ -477,7 +1041,21 @@ static void mv_err_intr(struct ata_port *ap)
477 } 1041 }
478} 1042}
479 1043
480/* Handle any outstanding interrupts in a single SATAHC 1044/**
1045 * mv_host_intr - Handle all interrupts on the given host controller
1046 * @host_set: host specific structure
1047 * @relevant: port error bits relevant to this host controller
1048 * @hc: which host controller we're to look at
1049 *
1050 * Read then write clear the HC interrupt status then walk each
1051 * port connected to the HC and see if it needs servicing. Port
1052 * success ints are reported in the HC interrupt status reg, the
1053 * port error ints are reported in the higher level main
1054 * interrupt status register and thus are passed in via the
1055 * 'relevant' argument.
1056 *
1057 * LOCKING:
1058 * Inherited from caller.
481 */ 1059 */
482static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, 1060static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
483 unsigned int hc) 1061 unsigned int hc)
@@ -487,8 +1065,8 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
487 struct ata_port *ap; 1065 struct ata_port *ap;
488 struct ata_queued_cmd *qc; 1066 struct ata_queued_cmd *qc;
489 u32 hc_irq_cause; 1067 u32 hc_irq_cause;
490 int shift, port, port0, hard_port; 1068 int shift, port, port0, hard_port, handled;
491 u8 ata_status; 1069 u8 ata_status = 0;
492 1070
493 if (hc == 0) { 1071 if (hc == 0) {
494 port0 = 0; 1072 port0 = 0;
@@ -499,7 +1077,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
499 /* we'll need the HC success int register in most cases */ 1077 /* we'll need the HC success int register in most cases */
500 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 1078 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
501 if (hc_irq_cause) { 1079 if (hc_irq_cause) {
502 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); 1080 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
503 } 1081 }
504 1082
505 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", 1083 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
@@ -508,35 +1086,38 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
508 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { 1086 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
509 ap = host_set->ports[port]; 1087 ap = host_set->ports[port];
510 hard_port = port & MV_PORT_MASK; /* range 0-3 */ 1088 hard_port = port & MV_PORT_MASK; /* range 0-3 */
511 ata_status = 0xffU; 1089 handled = 0; /* ensure ata_status is set if handled++ */
512 1090
513 if (((CRBP_DMA_DONE | DEV_IRQ) << hard_port) & hc_irq_cause) { 1091 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
514 BUG_ON(NULL == ap); 1092 /* new CRPB on the queue; just one at a time until NCQ
515 /* rcv'd new resp, basic DMA complete, or ATA IRQ */ 1093 */
516 /* This is needed to clear the ATA INTRQ. 1094 ata_status = mv_get_crpb_status(ap);
517 * FIXME: don't read the status reg in EDMA mode! 1095 handled++;
1096 } else if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1097 /* received ATA IRQ; read the status reg to clear INTRQ
518 */ 1098 */
519 ata_status = readb((void __iomem *) 1099 ata_status = readb((void __iomem *)
520 ap->ioaddr.status_addr); 1100 ap->ioaddr.status_addr);
1101 handled++;
521 } 1102 }
522 1103
523 shift = port * 2; 1104 shift = port << 1; /* (port * 2) */
524 if (port >= MV_PORTS_PER_HC) { 1105 if (port >= MV_PORTS_PER_HC) {
525 shift++; /* skip bit 8 in the HC Main IRQ reg */ 1106 shift++; /* skip bit 8 in the HC Main IRQ reg */
526 } 1107 }
527 if ((PORT0_ERR << shift) & relevant) { 1108 if ((PORT0_ERR << shift) & relevant) {
528 mv_err_intr(ap); 1109 mv_err_intr(ap);
529 /* FIXME: smart to OR in ATA_ERR? */ 1110 /* OR in ATA_ERR to ensure libata knows we took one */
530 ata_status = readb((void __iomem *) 1111 ata_status = readb((void __iomem *)
531 ap->ioaddr.status_addr) | ATA_ERR; 1112 ap->ioaddr.status_addr) | ATA_ERR;
1113 handled++;
532 } 1114 }
533 1115
534 if (ap) { 1116 if (handled && ap) {
535 qc = ata_qc_from_tag(ap, ap->active_tag); 1117 qc = ata_qc_from_tag(ap, ap->active_tag);
536 if (NULL != qc) { 1118 if (NULL != qc) {
537 VPRINTK("port %u IRQ found for qc, " 1119 VPRINTK("port %u IRQ found for qc, "
538 "ata_status 0x%x\n", port,ata_status); 1120 "ata_status 0x%x\n", port,ata_status);
539 BUG_ON(0xffU == ata_status);
540 /* mark qc status appropriately */ 1121 /* mark qc status appropriately */
541 ata_qc_complete(qc, ata_status); 1122 ata_qc_complete(qc, ata_status);
542 } 1123 }
@@ -545,17 +1126,30 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
545 VPRINTK("EXIT\n"); 1126 VPRINTK("EXIT\n");
546} 1127}
547 1128
1129/**
1130 * mv_interrupt -
1131 * @irq: unused
1132 * @dev_instance: private data; in this case the host structure
1133 * @regs: unused
1134 *
1135 * Read the read only register to determine if any host
1136 * controllers have pending interrupts. If so, call lower level
1137 * routine to handle. Also check for PCI errors which are only
1138 * reported here.
1139 *
1140 * LOCKING:
1141 * This routine holds the host_set lock while processing pending
1142 * interrupts.
1143 */
548static irqreturn_t mv_interrupt(int irq, void *dev_instance, 1144static irqreturn_t mv_interrupt(int irq, void *dev_instance,
549 struct pt_regs *regs) 1145 struct pt_regs *regs)
550{ 1146{
551 struct ata_host_set *host_set = dev_instance; 1147 struct ata_host_set *host_set = dev_instance;
552 unsigned int hc, handled = 0, n_hcs; 1148 unsigned int hc, handled = 0, n_hcs;
553 void __iomem *mmio; 1149 void __iomem *mmio = host_set->mmio_base;
554 u32 irq_stat; 1150 u32 irq_stat;
555 1151
556 mmio = host_set->mmio_base;
557 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); 1152 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
558 n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
559 1153
560 /* check the cases where we either have nothing pending or have read 1154 /* check the cases where we either have nothing pending or have read
561 * a bogus register value which can indicate HW removal or PCI fault 1155 * a bogus register value which can indicate HW removal or PCI fault
@@ -564,64 +1158,105 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
564 return IRQ_NONE; 1158 return IRQ_NONE;
565 } 1159 }
566 1160
1161 n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
567 spin_lock(&host_set->lock); 1162 spin_lock(&host_set->lock);
568 1163
569 for (hc = 0; hc < n_hcs; hc++) { 1164 for (hc = 0; hc < n_hcs; hc++) {
570 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); 1165 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
571 if (relevant) { 1166 if (relevant) {
572 mv_host_intr(host_set, relevant, hc); 1167 mv_host_intr(host_set, relevant, hc);
573 handled = 1; 1168 handled++;
574 } 1169 }
575 } 1170 }
576 if (PCI_ERR & irq_stat) { 1171 if (PCI_ERR & irq_stat) {
577 /* FIXME: these are all masked by default, but still need 1172 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
578 * to recover from them properly. 1173 readl(mmio + PCI_IRQ_CAUSE_OFS));
579 */
580 }
581 1174
1175 DPRINTK("All regs @ PCI error\n");
1176 mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev));
1177
1178 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1179 handled++;
1180 }
582 spin_unlock(&host_set->lock); 1181 spin_unlock(&host_set->lock);
583 1182
584 return IRQ_RETVAL(handled); 1183 return IRQ_RETVAL(handled);
585} 1184}
586 1185
1186/**
1187 * mv_check_err - Return the error shadow register to caller.
1188 * @ap: ATA channel to manipulate
1189 *
1190 * Marvell requires DMA to be stopped before accessing shadow
1191 * registers. So we do that, then return the needed register.
1192 *
1193 * LOCKING:
1194 * Inherited from caller. FIXME: protect mv_stop_dma with lock?
1195 */
1196static u8 mv_check_err(struct ata_port *ap)
1197{
1198 mv_stop_dma(ap); /* can't read shadow regs if DMA on */
1199 return readb((void __iomem *) ap->ioaddr.error_addr);
1200}
1201
1202/**
1203 * mv_phy_reset - Perform eDMA reset followed by COMRESET
1204 * @ap: ATA channel to manipulate
1205 *
1206 * Part of this is taken from __sata_phy_reset and modified to
1207 * not sleep since this routine gets called from interrupt level.
1208 *
1209 * LOCKING:
1210 * Inherited from caller. This is coded to safe to call at
1211 * interrupt level, i.e. it does not sleep.
1212 */
587static void mv_phy_reset(struct ata_port *ap) 1213static void mv_phy_reset(struct ata_port *ap)
588{ 1214{
589 void __iomem *port_mmio = mv_ap_base(ap); 1215 void __iomem *port_mmio = mv_ap_base(ap);
590 struct ata_taskfile tf; 1216 struct ata_taskfile tf;
591 struct ata_device *dev = &ap->device[0]; 1217 struct ata_device *dev = &ap->device[0];
592 u32 edma = 0, bdma; 1218 unsigned long timeout;
593 1219
594 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio); 1220 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
595 1221
596 edma = readl(port_mmio + EDMA_CMD_OFS); 1222 mv_stop_dma(ap);
597 if (EDMA_EN & edma) {
598 /* disable EDMA if active */
599 edma &= ~EDMA_EN;
600 writelfl(edma | EDMA_DS, port_mmio + EDMA_CMD_OFS);
601 udelay(1);
602 } else if (mv_port_bdma_capable(ap) &&
603 (bdma = readl(port_mmio + BDMA_CMD_OFS)) & BDMA_START) {
604 /* disable BDMA if active */
605 writelfl(bdma & ~BDMA_START, port_mmio + BDMA_CMD_OFS);
606 }
607 1223
608 writelfl(edma | ATA_RST, port_mmio + EDMA_CMD_OFS); 1224 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
609 udelay(25); /* allow reset propagation */ 1225 udelay(25); /* allow reset propagation */
610 1226
611 /* Spec never mentions clearing the bit. Marvell's driver does 1227 /* Spec never mentions clearing the bit. Marvell's driver does
612 * clear the bit, however. 1228 * clear the bit, however.
613 */ 1229 */
614 writelfl(edma & ~ATA_RST, port_mmio + EDMA_CMD_OFS); 1230 writelfl(0, port_mmio + EDMA_CMD_OFS);
615 1231
616 VPRINTK("Done. Now calling __sata_phy_reset()\n"); 1232 VPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1233 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1234 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
617 1235
618 /* proceed to init communications via the scr_control reg */ 1236 /* proceed to init communications via the scr_control reg */
619 __sata_phy_reset(ap); 1237 scr_write_flush(ap, SCR_CONTROL, 0x301);
1238 mdelay(1);
1239 scr_write_flush(ap, SCR_CONTROL, 0x300);
1240 timeout = jiffies + (HZ * 1);
1241 do {
1242 mdelay(10);
1243 if ((scr_read(ap, SCR_STATUS) & 0xf) != 1)
1244 break;
1245 } while (time_before(jiffies, timeout));
620 1246
621 if (ap->flags & ATA_FLAG_PORT_DISABLED) { 1247 VPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
622 VPRINTK("Port disabled pre-sig. Exiting.\n"); 1248 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1249 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1250
1251 if (sata_dev_present(ap)) {
1252 ata_port_probe(ap);
1253 } else {
1254 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1255 ap->id, scr_read(ap, SCR_STATUS));
1256 ata_port_disable(ap);
623 return; 1257 return;
624 } 1258 }
1259 ap->cbl = ATA_CBL_SATA;
625 1260
626 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr); 1261 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
627 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr); 1262 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
@@ -636,37 +1271,118 @@ static void mv_phy_reset(struct ata_port *ap)
636 VPRINTK("EXIT\n"); 1271 VPRINTK("EXIT\n");
637} 1272}
638 1273
639static void mv_port_init(struct ata_ioports *port, unsigned long base) 1274/**
1275 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
1276 * @ap: ATA channel to manipulate
1277 *
1278 * Intent is to clear all pending error conditions, reset the
1279 * chip/bus, fail the command, and move on.
1280 *
1281 * LOCKING:
1282 * This routine holds the host_set lock while failing the command.
1283 */
1284static void mv_eng_timeout(struct ata_port *ap)
1285{
1286 struct ata_queued_cmd *qc;
1287 unsigned long flags;
1288
1289 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
1290 DPRINTK("All regs @ start of eng_timeout\n");
1291 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
1292 to_pci_dev(ap->host_set->dev));
1293
1294 qc = ata_qc_from_tag(ap, ap->active_tag);
1295 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
1296 ap->host_set->mmio_base, ap, qc, qc->scsicmd,
1297 &qc->scsicmd->cmnd);
1298
1299 mv_err_intr(ap);
1300 mv_phy_reset(ap);
1301
1302 if (!qc) {
1303 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
1304 ap->id);
1305 } else {
1306 /* hack alert! We cannot use the supplied completion
1307 * function from inside the ->eh_strategy_handler() thread.
1308 * libata is the only user of ->eh_strategy_handler() in
1309 * any kernel, so the default scsi_done() assumes it is
1310 * not being called from the SCSI EH.
1311 */
1312 spin_lock_irqsave(&ap->host_set->lock, flags);
1313 qc->scsidone = scsi_finish_command;
1314 ata_qc_complete(qc, ATA_ERR);
1315 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1316 }
1317}
1318
1319/**
1320 * mv_port_init - Perform some early initialization on a single port.
1321 * @port: libata data structure storing shadow register addresses
1322 * @port_mmio: base address of the port
1323 *
1324 * Initialize shadow register mmio addresses, clear outstanding
1325 * interrupts on the port, and unmask interrupts for the future
1326 * start of the port.
1327 *
1328 * LOCKING:
1329 * Inherited from caller.
1330 */
1331static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
640{ 1332{
641 /* PIO related setup */ 1333 unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS;
642 port->data_addr = base + SHD_PIO_DATA_OFS; 1334 unsigned serr_ofs;
643 port->error_addr = port->feature_addr = base + SHD_FEA_ERR_OFS; 1335
644 port->nsect_addr = base + SHD_SECT_CNT_OFS; 1336 /* PIO related setup
645 port->lbal_addr = base + SHD_LBA_L_OFS; 1337 */
646 port->lbam_addr = base + SHD_LBA_M_OFS; 1338 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
647 port->lbah_addr = base + SHD_LBA_H_OFS; 1339 port->error_addr =
648 port->device_addr = base + SHD_DEV_HD_OFS; 1340 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
649 port->status_addr = port->command_addr = base + SHD_CMD_STA_OFS; 1341 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
650 port->altstatus_addr = port->ctl_addr = base + SHD_CTL_AST_OFS; 1342 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
651 /* unused */ 1343 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
1344 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
1345 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
1346 port->status_addr =
1347 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
1348 /* special case: control/altstatus doesn't have ATA_REG_ address */
1349 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
1350
1351 /* unused: */
652 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0; 1352 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
653 1353
1354 /* Clear any currently outstanding port interrupt conditions */
1355 serr_ofs = mv_scr_offset(SCR_ERROR);
1356 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
1357 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1358
654 /* unmask all EDMA error interrupts */ 1359 /* unmask all EDMA error interrupts */
655 writel(~0, (void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS); 1360 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
656 1361
657 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", 1362 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
658 readl((void __iomem *)base + EDMA_CFG_OFS), 1363 readl(port_mmio + EDMA_CFG_OFS),
659 readl((void __iomem *)base + EDMA_ERR_IRQ_CAUSE_OFS), 1364 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
660 readl((void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS)); 1365 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
661} 1366}
662 1367
1368/**
1369 * mv_host_init - Perform some early initialization of the host.
1370 * @probe_ent: early data struct representing the host
1371 *
1372 * If possible, do an early global reset of the host. Then do
1373 * our port init and clear/unmask all/relevant host interrupts.
1374 *
1375 * LOCKING:
1376 * Inherited from caller.
1377 */
663static int mv_host_init(struct ata_probe_ent *probe_ent) 1378static int mv_host_init(struct ata_probe_ent *probe_ent)
664{ 1379{
665 int rc = 0, n_hc, port, hc; 1380 int rc = 0, n_hc, port, hc;
666 void __iomem *mmio = probe_ent->mmio_base; 1381 void __iomem *mmio = probe_ent->mmio_base;
667 void __iomem *port_mmio; 1382 void __iomem *port_mmio;
668 1383
669 if (mv_master_reset(probe_ent->mmio_base)) { 1384 if ((MV_FLAG_GLBL_SFT_RST & probe_ent->host_flags) &&
1385 mv_global_soft_reset(probe_ent->mmio_base)) {
670 rc = 1; 1386 rc = 1;
671 goto done; 1387 goto done;
672 } 1388 }
@@ -676,17 +1392,27 @@ static int mv_host_init(struct ata_probe_ent *probe_ent)
676 1392
677 for (port = 0; port < probe_ent->n_ports; port++) { 1393 for (port = 0; port < probe_ent->n_ports; port++) {
678 port_mmio = mv_port_base(mmio, port); 1394 port_mmio = mv_port_base(mmio, port);
679 mv_port_init(&probe_ent->port[port], (unsigned long)port_mmio); 1395 mv_port_init(&probe_ent->port[port], port_mmio);
680 } 1396 }
681 1397
682 for (hc = 0; hc < n_hc; hc++) { 1398 for (hc = 0; hc < n_hc; hc++) {
683 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause=0x%08x\n", hc, 1399 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
684 readl(mv_hc_base(mmio, hc) + HC_CFG_OFS), 1400
685 readl(mv_hc_base(mmio, hc) + HC_IRQ_CAUSE_OFS)); 1401 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
1402 "(before clear)=0x%08x\n", hc,
1403 readl(hc_mmio + HC_CFG_OFS),
1404 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
1405
1406 /* Clear any currently outstanding hc interrupt conditions */
1407 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
686 } 1408 }
687 1409
688 writel(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); 1410 /* Clear any currently outstanding host interrupt conditions */
689 writel(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); 1411 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1412
1413 /* and unmask interrupt generation for host regs */
1414 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
1415 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
690 1416
691 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " 1417 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
692 "PCI int cause/mask=0x%08x/0x%08x\n", 1418 "PCI int cause/mask=0x%08x/0x%08x\n",
@@ -694,11 +1420,53 @@ static int mv_host_init(struct ata_probe_ent *probe_ent)
694 readl(mmio + HC_MAIN_IRQ_MASK_OFS), 1420 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
695 readl(mmio + PCI_IRQ_CAUSE_OFS), 1421 readl(mmio + PCI_IRQ_CAUSE_OFS),
696 readl(mmio + PCI_IRQ_MASK_OFS)); 1422 readl(mmio + PCI_IRQ_MASK_OFS));
697 1423done:
698 done:
699 return rc; 1424 return rc;
700} 1425}
701 1426
1427/**
1428 * mv_print_info - Dump key info to kernel log for perusal.
1429 * @probe_ent: early data struct representing the host
1430 *
1431 * FIXME: complete this.
1432 *
1433 * LOCKING:
1434 * Inherited from caller.
1435 */
1436static void mv_print_info(struct ata_probe_ent *probe_ent)
1437{
1438 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1439 struct mv_host_priv *hpriv = probe_ent->private_data;
1440 u8 rev_id, scc;
1441 const char *scc_s;
1442
1443 /* Use this to determine the HW stepping of the chip so we know
1444 * what errata to workaround
1445 */
1446 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1447
1448 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
1449 if (scc == 0)
1450 scc_s = "SCSI";
1451 else if (scc == 0x01)
1452 scc_s = "RAID";
1453 else
1454 scc_s = "unknown";
1455
1456 printk(KERN_INFO DRV_NAME
1457 "(%s) %u slots %u ports %s mode IRQ via %s\n",
1458 pci_name(pdev), (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
1459 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
1460}
1461
1462/**
1463 * mv_init_one - handle a positive probe of a Marvell host
1464 * @pdev: PCI device found
1465 * @ent: PCI device ID entry for the matched host
1466 *
1467 * LOCKING:
1468 * Inherited from caller.
1469 */
702static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1470static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
703{ 1471{
704 static int printed_version = 0; 1472 static int printed_version = 0;
@@ -706,16 +1474,12 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
706 struct mv_host_priv *hpriv; 1474 struct mv_host_priv *hpriv;
707 unsigned int board_idx = (unsigned int)ent->driver_data; 1475 unsigned int board_idx = (unsigned int)ent->driver_data;
708 void __iomem *mmio_base; 1476 void __iomem *mmio_base;
709 int pci_dev_busy = 0; 1477 int pci_dev_busy = 0, rc;
710 int rc;
711 1478
712 if (!printed_version++) { 1479 if (!printed_version++) {
713 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 1480 printk(KERN_INFO DRV_NAME " version " DRV_VERSION "\n");
714 } 1481 }
715 1482
716 VPRINTK("ENTER for PCI Bus:Slot.Func=%u:%u.%u\n", pdev->bus->number,
717 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
718
719 rc = pci_enable_device(pdev); 1483 rc = pci_enable_device(pdev);
720 if (rc) { 1484 if (rc) {
721 return rc; 1485 return rc;
@@ -727,8 +1491,6 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
727 goto err_out; 1491 goto err_out;
728 } 1492 }
729 1493
730 pci_intx(pdev, 1);
731
732 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 1494 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
733 if (probe_ent == NULL) { 1495 if (probe_ent == NULL) {
734 rc = -ENOMEM; 1496 rc = -ENOMEM;
@@ -739,8 +1501,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
739 probe_ent->dev = pci_dev_to_dev(pdev); 1501 probe_ent->dev = pci_dev_to_dev(pdev);
740 INIT_LIST_HEAD(&probe_ent->node); 1502 INIT_LIST_HEAD(&probe_ent->node);
741 1503
742 mmio_base = ioremap_nocache(pci_resource_start(pdev, MV_PRIMARY_BAR), 1504 mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0);
743 pci_resource_len(pdev, MV_PRIMARY_BAR));
744 if (mmio_base == NULL) { 1505 if (mmio_base == NULL) {
745 rc = -ENOMEM; 1506 rc = -ENOMEM;
746 goto err_out_free_ent; 1507 goto err_out_free_ent;
@@ -769,37 +1530,40 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
769 if (rc) { 1530 if (rc) {
770 goto err_out_hpriv; 1531 goto err_out_hpriv;
771 } 1532 }
772/* mv_print_info(probe_ent); */
773 1533
774 { 1534 /* Enable interrupts */
775 int b, w; 1535 if (pci_enable_msi(pdev) == 0) {
776 u32 dw[4]; /* hold a line of 16b */ 1536 hpriv->hp_flags |= MV_HP_FLAG_MSI;
777 VPRINTK("PCI config space:\n"); 1537 } else {
778 for (b = 0; b < 0x40; ) { 1538 pci_intx(pdev, 1);
779 for (w = 0; w < 4; w++) {
780 (void) pci_read_config_dword(pdev,b,&dw[w]);
781 b += sizeof(*dw);
782 }
783 VPRINTK("%08x %08x %08x %08x\n",
784 dw[0],dw[1],dw[2],dw[3]);
785 }
786 } 1539 }
787 1540
788 /* FIXME: check ata_device_add return value */ 1541 mv_dump_pci_cfg(pdev, 0x68);
789 ata_device_add(probe_ent); 1542 mv_print_info(probe_ent);
790 kfree(probe_ent); 1543
1544 if (ata_device_add(probe_ent) == 0) {
1545 rc = -ENODEV; /* No devices discovered */
1546 goto err_out_dev_add;
1547 }
791 1548
1549 kfree(probe_ent);
792 return 0; 1550 return 0;
793 1551
794 err_out_hpriv: 1552err_out_dev_add:
1553 if (MV_HP_FLAG_MSI & hpriv->hp_flags) {
1554 pci_disable_msi(pdev);
1555 } else {
1556 pci_intx(pdev, 0);
1557 }
1558err_out_hpriv:
795 kfree(hpriv); 1559 kfree(hpriv);
796 err_out_iounmap: 1560err_out_iounmap:
797 iounmap(mmio_base); 1561 pci_iounmap(pdev, mmio_base);
798 err_out_free_ent: 1562err_out_free_ent:
799 kfree(probe_ent); 1563 kfree(probe_ent);
800 err_out_regions: 1564err_out_regions:
801 pci_release_regions(pdev); 1565 pci_release_regions(pdev);
802 err_out: 1566err_out:
803 if (!pci_dev_busy) { 1567 if (!pci_dev_busy) {
804 pci_disable_device(pdev); 1568 pci_disable_device(pdev);
805 } 1569 }