diff options
author | Tejun Heo <htejun@gmail.com> | 2007-01-03 03:32:45 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-02-09 17:39:31 -0500 |
commit | 1fd7a697a37bcd484b130a71326e43cd68ced90c (patch) | |
tree | e1fcf2acf6698e2403367a2c4136911af5653f17 /drivers/ata | |
parent | 726f0785b608d09bdd64bdbadc09217ebbf9920e (diff) |
sata_inic162x: finally, driver for initio 162x SATA controllers, take #2
Driver for Initio 162x SATA controllers. ATA r/w, ATAPI r, hotplug
and suspend/resume work. ATAPI w (recording, that is) broken. Feel
free to fix it, but be warned, this controller is weird.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/ata')
-rw-r--r-- | drivers/ata/Kconfig | 6 | ||||
-rw-r--r-- | drivers/ata/Makefile | 1 | ||||
-rw-r--r-- | drivers/ata/sata_inic162x.c | 809 |
3 files changed, 816 insertions, 0 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index f72b3415d75f..ea102c089222 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
@@ -147,6 +147,12 @@ config SATA_VITESSE | |||
147 | 147 | ||
148 | If unsure, say N. | 148 | If unsure, say N. |
149 | 149 | ||
150 | config SATA_INIC162X | ||
151 | tristate "Initio 162x SATA support (HIGHLY EXPERIMENTAL)" | ||
152 | depends on PCI && EXPERIMENTAL | ||
153 | help | ||
154 | This option enables support for Initio 162x Serial ATA. | ||
155 | |||
150 | config SATA_INTEL_COMBINED | 156 | config SATA_INTEL_COMBINED |
151 | bool | 157 | bool |
152 | depends on IDE=y && !BLK_DEV_IDE_SATA && (SATA_AHCI || ATA_PIIX) | 158 | depends on IDE=y && !BLK_DEV_IDE_SATA && (SATA_AHCI || ATA_PIIX) |
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index a0df15d9e4a1..cd096f0c78a1 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile | |||
@@ -15,6 +15,7 @@ obj-$(CONFIG_SATA_SX4) += sata_sx4.o | |||
15 | obj-$(CONFIG_SATA_NV) += sata_nv.o | 15 | obj-$(CONFIG_SATA_NV) += sata_nv.o |
16 | obj-$(CONFIG_SATA_ULI) += sata_uli.o | 16 | obj-$(CONFIG_SATA_ULI) += sata_uli.o |
17 | obj-$(CONFIG_SATA_MV) += sata_mv.o | 17 | obj-$(CONFIG_SATA_MV) += sata_mv.o |
18 | obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o | ||
18 | obj-$(CONFIG_PDC_ADMA) += pdc_adma.o | 19 | obj-$(CONFIG_PDC_ADMA) += pdc_adma.o |
19 | 20 | ||
20 | obj-$(CONFIG_PATA_ALI) += pata_ali.o | 21 | obj-$(CONFIG_PATA_ALI) += pata_ali.o |
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c new file mode 100644 index 000000000000..b67817e440c5 --- /dev/null +++ b/drivers/ata/sata_inic162x.c | |||
@@ -0,0 +1,809 @@ | |||
1 | /* | ||
2 | * sata_inic162x.c - Driver for Initio 162x SATA controllers | ||
3 | * | ||
4 | * Copyright 2006 SUSE Linux Products GmbH | ||
5 | * Copyright 2006 Tejun Heo <teheo@novell.com> | ||
6 | * | ||
7 | * This file is released under GPL v2. | ||
8 | * | ||
9 | * This controller is eccentric and easily locks up if something isn't | ||
10 | * right. Documentation is available at initio's website but it only | ||
11 | * documents registers (not programming model). | ||
12 | * | ||
13 | * - ATA disks work. | ||
14 | * - Hotplug works. | ||
15 | * - ATAPI read works but burning doesn't. This thing is really | ||
16 | * peculiar about ATAPI and I couldn't figure out how ATAPI PIO and | ||
17 | * ATAPI DMA WRITE should be programmed. If you've got a clue, be | ||
18 | * my guest. | ||
19 | * - Both STR and STD work. | ||
20 | */ | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/pci.h> | ||
25 | #include <scsi/scsi_host.h> | ||
26 | #include <linux/libata.h> | ||
27 | #include <linux/blkdev.h> | ||
28 | #include <scsi/scsi_device.h> | ||
29 | |||
30 | #define DRV_NAME "sata_inic162x" | ||
31 | #define DRV_VERSION "0.1" | ||
32 | |||
33 | enum { | ||
34 | MMIO_BAR = 5, | ||
35 | |||
36 | NR_PORTS = 2, | ||
37 | |||
38 | HOST_CTL = 0x7c, | ||
39 | HOST_STAT = 0x7e, | ||
40 | HOST_IRQ_STAT = 0xbc, | ||
41 | HOST_IRQ_MASK = 0xbe, | ||
42 | |||
43 | PORT_SIZE = 0x40, | ||
44 | |||
45 | /* registers for ATA TF operation */ | ||
46 | PORT_TF = 0x00, | ||
47 | PORT_ALT_STAT = 0x08, | ||
48 | PORT_IRQ_STAT = 0x09, | ||
49 | PORT_IRQ_MASK = 0x0a, | ||
50 | PORT_PRD_CTL = 0x0b, | ||
51 | PORT_PRD_ADDR = 0x0c, | ||
52 | PORT_PRD_XFERLEN = 0x10, | ||
53 | |||
54 | /* IDMA register */ | ||
55 | PORT_IDMA_CTL = 0x14, | ||
56 | |||
57 | PORT_SCR = 0x20, | ||
58 | |||
59 | /* HOST_CTL bits */ | ||
60 | HCTL_IRQOFF = (1 << 8), /* global IRQ off */ | ||
61 | HCTL_PWRDWN = (1 << 13), /* power down PHYs */ | ||
62 | HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */ | ||
63 | HCTL_RPGSEL = (1 << 15), /* register page select */ | ||
64 | |||
65 | HCTL_KNOWN_BITS = HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST | | ||
66 | HCTL_RPGSEL, | ||
67 | |||
68 | /* HOST_IRQ_(STAT|MASK) bits */ | ||
69 | HIRQ_PORT0 = (1 << 0), | ||
70 | HIRQ_PORT1 = (1 << 1), | ||
71 | HIRQ_SOFT = (1 << 14), | ||
72 | HIRQ_GLOBAL = (1 << 15), /* STAT only */ | ||
73 | |||
74 | /* PORT_IRQ_(STAT|MASK) bits */ | ||
75 | PIRQ_OFFLINE = (1 << 0), /* device unplugged */ | ||
76 | PIRQ_ONLINE = (1 << 1), /* device plugged */ | ||
77 | PIRQ_COMPLETE = (1 << 2), /* completion interrupt */ | ||
78 | PIRQ_FATAL = (1 << 3), /* fatal error */ | ||
79 | PIRQ_ATA = (1 << 4), /* ATA interrupt */ | ||
80 | PIRQ_REPLY = (1 << 5), /* reply FIFO not empty */ | ||
81 | PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */ | ||
82 | |||
83 | PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, | ||
84 | |||
85 | PIRQ_MASK_DMA_READ = PIRQ_REPLY | PIRQ_ATA, | ||
86 | PIRQ_MASK_OTHER = PIRQ_REPLY | PIRQ_COMPLETE, | ||
87 | PIRQ_MASK_FREEZE = 0xff, | ||
88 | |||
89 | /* PORT_PRD_CTL bits */ | ||
90 | PRD_CTL_START = (1 << 0), | ||
91 | PRD_CTL_WR = (1 << 3), | ||
92 | PRD_CTL_DMAEN = (1 << 7), /* DMA enable */ | ||
93 | |||
94 | /* PORT_IDMA_CTL bits */ | ||
95 | IDMA_CTL_RST_ATA = (1 << 2), /* hardreset ATA bus */ | ||
96 | IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */ | ||
97 | IDMA_CTL_GO = (1 << 7), /* IDMA mode go */ | ||
98 | IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */ | ||
99 | }; | ||
100 | |||
101 | struct inic_host_priv { | ||
102 | u16 cached_hctl; | ||
103 | }; | ||
104 | |||
105 | struct inic_port_priv { | ||
106 | u8 dfl_prdctl; | ||
107 | u8 cached_prdctl; | ||
108 | u8 cached_pirq_mask; | ||
109 | }; | ||
110 | |||
111 | static int inic_slave_config(struct scsi_device *sdev) | ||
112 | { | ||
113 | /* This controller is braindamaged. dma_boundary is 0xffff | ||
114 | * like others but it will lock up the whole machine HARD if | ||
115 | * 65536 byte PRD entry is fed. Reduce maximum segment size. | ||
116 | */ | ||
117 | blk_queue_max_segment_size(sdev->request_queue, 65536 - 512); | ||
118 | |||
119 | return ata_scsi_slave_config(sdev); | ||
120 | } | ||
121 | |||
122 | static struct scsi_host_template inic_sht = { | ||
123 | .module = THIS_MODULE, | ||
124 | .name = DRV_NAME, | ||
125 | .ioctl = ata_scsi_ioctl, | ||
126 | .queuecommand = ata_scsi_queuecmd, | ||
127 | .can_queue = ATA_DEF_QUEUE, | ||
128 | .this_id = ATA_SHT_THIS_ID, | ||
129 | .sg_tablesize = LIBATA_MAX_PRD, | ||
130 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
131 | .emulated = ATA_SHT_EMULATED, | ||
132 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
133 | .proc_name = DRV_NAME, | ||
134 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
135 | .slave_configure = inic_slave_config, | ||
136 | .slave_destroy = ata_scsi_slave_destroy, | ||
137 | .bios_param = ata_std_bios_param, | ||
138 | .suspend = ata_scsi_device_suspend, | ||
139 | .resume = ata_scsi_device_resume, | ||
140 | }; | ||
141 | |||
142 | static const int scr_map[] = { | ||
143 | [SCR_STATUS] = 0, | ||
144 | [SCR_ERROR] = 1, | ||
145 | [SCR_CONTROL] = 2, | ||
146 | }; | ||
147 | |||
148 | static void __iomem * inic_port_base(struct ata_port *ap) | ||
149 | { | ||
150 | return ap->host->mmio_base + ap->port_no * PORT_SIZE; | ||
151 | } | ||
152 | |||
153 | static void __inic_set_pirq_mask(struct ata_port *ap, u8 mask) | ||
154 | { | ||
155 | void __iomem *port_base = inic_port_base(ap); | ||
156 | struct inic_port_priv *pp = ap->private_data; | ||
157 | |||
158 | writeb(mask, port_base + PORT_IRQ_MASK); | ||
159 | pp->cached_pirq_mask = mask; | ||
160 | } | ||
161 | |||
162 | static void inic_set_pirq_mask(struct ata_port *ap, u8 mask) | ||
163 | { | ||
164 | struct inic_port_priv *pp = ap->private_data; | ||
165 | |||
166 | if (pp->cached_pirq_mask != mask) | ||
167 | __inic_set_pirq_mask(ap, mask); | ||
168 | } | ||
169 | |||
170 | static void inic_reset_port(void __iomem *port_base) | ||
171 | { | ||
172 | void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; | ||
173 | u16 ctl; | ||
174 | |||
175 | ctl = readw(idma_ctl); | ||
176 | ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO); | ||
177 | |||
178 | /* mask IRQ and assert reset */ | ||
179 | writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl); | ||
180 | readw(idma_ctl); /* flush */ | ||
181 | |||
182 | /* give it some time */ | ||
183 | msleep(1); | ||
184 | |||
185 | /* release reset */ | ||
186 | writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl); | ||
187 | |||
188 | /* clear irq */ | ||
189 | writeb(0xff, port_base + PORT_IRQ_STAT); | ||
190 | |||
191 | /* reenable ATA IRQ, turn off IDMA mode */ | ||
192 | writew(ctl, idma_ctl); | ||
193 | } | ||
194 | |||
195 | static u32 inic_scr_read(struct ata_port *ap, unsigned sc_reg) | ||
196 | { | ||
197 | void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr; | ||
198 | void __iomem *addr; | ||
199 | u32 val; | ||
200 | |||
201 | if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) | ||
202 | return 0xffffffffU; | ||
203 | |||
204 | addr = scr_addr + scr_map[sc_reg] * 4; | ||
205 | val = readl(scr_addr + scr_map[sc_reg] * 4); | ||
206 | |||
207 | /* this controller has stuck DIAG.N, ignore it */ | ||
208 | if (sc_reg == SCR_ERROR) | ||
209 | val &= ~SERR_PHYRDY_CHG; | ||
210 | return val; | ||
211 | } | ||
212 | |||
213 | static void inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) | ||
214 | { | ||
215 | void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr; | ||
216 | void __iomem *addr; | ||
217 | |||
218 | if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) | ||
219 | return; | ||
220 | |||
221 | addr = scr_addr + scr_map[sc_reg] * 4; | ||
222 | writel(val, scr_addr + scr_map[sc_reg] * 4); | ||
223 | } | ||
224 | |||
225 | /* | ||
226 | * In TF mode, inic162x is very similar to SFF device. TF registers | ||
227 | * function the same. DMA engine behaves similary using the same PRD | ||
228 | * format as BMDMA but different command register, interrupt and event | ||
229 | * notification methods are used. The following inic_bmdma_*() | ||
230 | * functions do the impedance matching. | ||
231 | */ | ||
232 | static void inic_bmdma_setup(struct ata_queued_cmd *qc) | ||
233 | { | ||
234 | struct ata_port *ap = qc->ap; | ||
235 | struct inic_port_priv *pp = ap->private_data; | ||
236 | void __iomem *port_base = inic_port_base(ap); | ||
237 | int rw = qc->tf.flags & ATA_TFLAG_WRITE; | ||
238 | |||
239 | /* make sure device sees PRD table writes */ | ||
240 | wmb(); | ||
241 | |||
242 | /* load transfer length */ | ||
243 | writel(qc->nbytes, port_base + PORT_PRD_XFERLEN); | ||
244 | |||
245 | /* turn on DMA and specify data direction */ | ||
246 | pp->cached_prdctl = pp->dfl_prdctl | PRD_CTL_DMAEN; | ||
247 | if (!rw) | ||
248 | pp->cached_prdctl |= PRD_CTL_WR; | ||
249 | writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); | ||
250 | |||
251 | /* issue r/w command */ | ||
252 | ap->ops->exec_command(ap, &qc->tf); | ||
253 | } | ||
254 | |||
255 | static void inic_bmdma_start(struct ata_queued_cmd *qc) | ||
256 | { | ||
257 | struct ata_port *ap = qc->ap; | ||
258 | struct inic_port_priv *pp = ap->private_data; | ||
259 | void __iomem *port_base = inic_port_base(ap); | ||
260 | |||
261 | /* start host DMA transaction */ | ||
262 | pp->cached_prdctl |= PRD_CTL_START; | ||
263 | writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); | ||
264 | } | ||
265 | |||
266 | static void inic_bmdma_stop(struct ata_queued_cmd *qc) | ||
267 | { | ||
268 | struct ata_port *ap = qc->ap; | ||
269 | struct inic_port_priv *pp = ap->private_data; | ||
270 | void __iomem *port_base = inic_port_base(ap); | ||
271 | |||
272 | /* stop DMA engine */ | ||
273 | writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); | ||
274 | } | ||
275 | |||
276 | static u8 inic_bmdma_status(struct ata_port *ap) | ||
277 | { | ||
278 | /* event is already verified by the interrupt handler */ | ||
279 | return ATA_DMA_INTR; | ||
280 | } | ||
281 | |||
282 | static void inic_irq_clear(struct ata_port *ap) | ||
283 | { | ||
284 | /* noop */ | ||
285 | } | ||
286 | |||
287 | static void inic_host_intr(struct ata_port *ap) | ||
288 | { | ||
289 | void __iomem *port_base = inic_port_base(ap); | ||
290 | struct ata_eh_info *ehi = &ap->eh_info; | ||
291 | u8 irq_stat; | ||
292 | |||
293 | /* fetch and clear irq */ | ||
294 | irq_stat = readb(port_base + PORT_IRQ_STAT); | ||
295 | writeb(irq_stat, port_base + PORT_IRQ_STAT); | ||
296 | |||
297 | if (likely(!(irq_stat & PIRQ_ERR))) { | ||
298 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); | ||
299 | |||
300 | if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { | ||
301 | ata_chk_status(ap); /* clear ATA interrupt */ | ||
302 | return; | ||
303 | } | ||
304 | |||
305 | if (likely(ata_host_intr(ap, qc))) | ||
306 | return; | ||
307 | |||
308 | ata_chk_status(ap); /* clear ATA interrupt */ | ||
309 | ata_port_printk(ap, KERN_WARNING, "unhandled " | ||
310 | "interrupt, irq_stat=%x\n", irq_stat); | ||
311 | return; | ||
312 | } | ||
313 | |||
314 | /* error */ | ||
315 | ata_ehi_push_desc(ehi, "irq_stat=0x%x", irq_stat); | ||
316 | |||
317 | if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) { | ||
318 | ata_ehi_hotplugged(ehi); | ||
319 | ata_port_freeze(ap); | ||
320 | } else | ||
321 | ata_port_abort(ap); | ||
322 | } | ||
323 | |||
324 | static irqreturn_t inic_interrupt(int irq, void *dev_instance) | ||
325 | { | ||
326 | struct ata_host *host = dev_instance; | ||
327 | void __iomem *mmio_base = host->mmio_base; | ||
328 | u16 host_irq_stat; | ||
329 | int i, handled = 0;; | ||
330 | |||
331 | host_irq_stat = readw(mmio_base + HOST_IRQ_STAT); | ||
332 | |||
333 | if (unlikely(!(host_irq_stat & HIRQ_GLOBAL))) | ||
334 | goto out; | ||
335 | |||
336 | spin_lock(&host->lock); | ||
337 | |||
338 | for (i = 0; i < NR_PORTS; i++) { | ||
339 | struct ata_port *ap = host->ports[i]; | ||
340 | |||
341 | if (!(host_irq_stat & (HIRQ_PORT0 << i))) | ||
342 | continue; | ||
343 | |||
344 | if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) { | ||
345 | inic_host_intr(ap); | ||
346 | handled++; | ||
347 | } else { | ||
348 | if (ata_ratelimit()) | ||
349 | dev_printk(KERN_ERR, host->dev, "interrupt " | ||
350 | "from disabled port %d (0x%x)\n", | ||
351 | i, host_irq_stat); | ||
352 | } | ||
353 | } | ||
354 | |||
355 | spin_unlock(&host->lock); | ||
356 | |||
357 | out: | ||
358 | return IRQ_RETVAL(handled); | ||
359 | } | ||
360 | |||
361 | static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) | ||
362 | { | ||
363 | struct ata_port *ap = qc->ap; | ||
364 | |||
365 | /* ATA IRQ doesn't wait for DMA transfer completion and vice | ||
366 | * versa. Mask IRQ selectively to detect command completion. | ||
367 | * Without it, ATA DMA read command can cause data corruption. | ||
368 | * | ||
369 | * Something similar might be needed for ATAPI writes. I | ||
370 | * tried a lot of combinations but couldn't find the solution. | ||
371 | */ | ||
372 | if (qc->tf.protocol == ATA_PROT_DMA && | ||
373 | !(qc->tf.flags & ATA_TFLAG_WRITE)) | ||
374 | inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ); | ||
375 | else | ||
376 | inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); | ||
377 | |||
378 | /* Issuing a command to yet uninitialized port locks up the | ||
379 | * controller. Most of the time, this happens for the first | ||
380 | * command after reset which are ATA and ATAPI IDENTIFYs. | ||
381 | * Fast fail if stat is 0x7f or 0xff for those commands. | ||
382 | */ | ||
383 | if (unlikely(qc->tf.command == ATA_CMD_ID_ATA || | ||
384 | qc->tf.command == ATA_CMD_ID_ATAPI)) { | ||
385 | u8 stat = ata_chk_status(ap); | ||
386 | if (stat == 0x7f || stat == 0xff) | ||
387 | return AC_ERR_HSM; | ||
388 | } | ||
389 | |||
390 | return ata_qc_issue_prot(qc); | ||
391 | } | ||
392 | |||
393 | static void inic_freeze(struct ata_port *ap) | ||
394 | { | ||
395 | void __iomem *port_base = inic_port_base(ap); | ||
396 | |||
397 | __inic_set_pirq_mask(ap, PIRQ_MASK_FREEZE); | ||
398 | |||
399 | ata_chk_status(ap); | ||
400 | writeb(0xff, port_base + PORT_IRQ_STAT); | ||
401 | |||
402 | readb(port_base + PORT_IRQ_STAT); /* flush */ | ||
403 | } | ||
404 | |||
405 | static void inic_thaw(struct ata_port *ap) | ||
406 | { | ||
407 | void __iomem *port_base = inic_port_base(ap); | ||
408 | |||
409 | ata_chk_status(ap); | ||
410 | writeb(0xff, port_base + PORT_IRQ_STAT); | ||
411 | |||
412 | __inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); | ||
413 | |||
414 | readb(port_base + PORT_IRQ_STAT); /* flush */ | ||
415 | } | ||
416 | |||
417 | /* | ||
418 | * SRST and SControl hardreset don't give valid signature on this | ||
419 | * controller. Only controller specific hardreset mechanism works. | ||
420 | */ | ||
421 | static int inic_hardreset(struct ata_port *ap, unsigned int *class) | ||
422 | { | ||
423 | void __iomem *port_base = inic_port_base(ap); | ||
424 | void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; | ||
425 | const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context); | ||
426 | u16 val; | ||
427 | int rc; | ||
428 | |||
429 | /* hammer it into sane state */ | ||
430 | inic_reset_port(port_base); | ||
431 | |||
432 | if (ata_port_offline(ap)) { | ||
433 | *class = ATA_DEV_NONE; | ||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | val = readw(idma_ctl); | ||
438 | writew(val | IDMA_CTL_RST_ATA, idma_ctl); | ||
439 | readw(idma_ctl); /* flush */ | ||
440 | msleep(1); | ||
441 | writew(val & ~IDMA_CTL_RST_ATA, idma_ctl); | ||
442 | |||
443 | rc = sata_phy_resume(ap, timing); | ||
444 | if (rc) { | ||
445 | ata_port_printk(ap, KERN_WARNING, "failed to resume " | ||
446 | "link for reset (errno=%d)\n", rc); | ||
447 | return rc; | ||
448 | } | ||
449 | |||
450 | msleep(150); | ||
451 | |||
452 | *class = ATA_DEV_NONE; | ||
453 | if (ata_port_online(ap)) { | ||
454 | struct ata_taskfile tf; | ||
455 | |||
456 | if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { | ||
457 | ata_port_printk(ap, KERN_WARNING, | ||
458 | "device busy after hardreset\n"); | ||
459 | return -EIO; | ||
460 | } | ||
461 | |||
462 | ata_tf_read(ap, &tf); | ||
463 | *class = ata_dev_classify(&tf); | ||
464 | if (*class == ATA_DEV_UNKNOWN) | ||
465 | *class = ATA_DEV_NONE; | ||
466 | } | ||
467 | |||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | static void inic_error_handler(struct ata_port *ap) | ||
472 | { | ||
473 | void __iomem *port_base = inic_port_base(ap); | ||
474 | struct inic_port_priv *pp = ap->private_data; | ||
475 | unsigned long flags; | ||
476 | |||
477 | /* reset PIO HSM and stop DMA engine */ | ||
478 | inic_reset_port(port_base); | ||
479 | |||
480 | spin_lock_irqsave(ap->lock, flags); | ||
481 | ap->hsm_task_state = HSM_ST_IDLE; | ||
482 | writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); | ||
483 | spin_unlock_irqrestore(ap->lock, flags); | ||
484 | |||
485 | /* PIO and DMA engines have been stopped, perform recovery */ | ||
486 | ata_do_eh(ap, ata_std_prereset, NULL, inic_hardreset, | ||
487 | ata_std_postreset); | ||
488 | } | ||
489 | |||
490 | static void inic_post_internal_cmd(struct ata_queued_cmd *qc) | ||
491 | { | ||
492 | /* make DMA engine forget about the failed command */ | ||
493 | if (qc->err_mask) | ||
494 | inic_reset_port(inic_port_base(qc->ap)); | ||
495 | } | ||
496 | |||
497 | static void inic_dev_config(struct ata_port *ap, struct ata_device *dev) | ||
498 | { | ||
499 | /* inic can only handle upto LBA28 max sectors */ | ||
500 | if (dev->max_sectors > ATA_MAX_SECTORS) | ||
501 | dev->max_sectors = ATA_MAX_SECTORS; | ||
502 | } | ||
503 | |||
504 | static void init_port(struct ata_port *ap) | ||
505 | { | ||
506 | void __iomem *port_base = inic_port_base(ap); | ||
507 | |||
508 | /* Setup PRD address */ | ||
509 | writel(ap->prd_dma, port_base + PORT_PRD_ADDR); | ||
510 | } | ||
511 | |||
512 | static int inic_port_resume(struct ata_port *ap) | ||
513 | { | ||
514 | init_port(ap); | ||
515 | return 0; | ||
516 | } | ||
517 | |||
518 | static int inic_port_start(struct ata_port *ap) | ||
519 | { | ||
520 | void __iomem *port_base = inic_port_base(ap); | ||
521 | struct inic_port_priv *pp; | ||
522 | u8 tmp; | ||
523 | int rc; | ||
524 | |||
525 | /* alloc and initialize private data */ | ||
526 | pp = kzalloc(sizeof(*pp), GFP_KERNEL); | ||
527 | if (!pp) | ||
528 | return -ENOMEM; | ||
529 | ap->private_data = pp; | ||
530 | |||
531 | /* default PRD_CTL value, DMAEN, WR and START off */ | ||
532 | tmp = readb(port_base + PORT_PRD_CTL); | ||
533 | tmp &= ~(PRD_CTL_DMAEN | PRD_CTL_WR | PRD_CTL_START); | ||
534 | pp->dfl_prdctl = tmp; | ||
535 | |||
536 | /* Alloc resources */ | ||
537 | rc = ata_port_start(ap); | ||
538 | if (rc) { | ||
539 | kfree(pp); | ||
540 | return rc; | ||
541 | } | ||
542 | |||
543 | init_port(ap); | ||
544 | |||
545 | return 0; | ||
546 | } | ||
547 | |||
548 | static void inic_port_stop(struct ata_port *ap) | ||
549 | { | ||
550 | ata_port_stop(ap); | ||
551 | kfree(ap->private_data); | ||
552 | } | ||
553 | |||
554 | static struct ata_port_operations inic_port_ops = { | ||
555 | .port_disable = ata_port_disable, | ||
556 | .tf_load = ata_tf_load, | ||
557 | .tf_read = ata_tf_read, | ||
558 | .check_status = ata_check_status, | ||
559 | .exec_command = ata_exec_command, | ||
560 | .dev_select = ata_std_dev_select, | ||
561 | |||
562 | .scr_read = inic_scr_read, | ||
563 | .scr_write = inic_scr_write, | ||
564 | |||
565 | .bmdma_setup = inic_bmdma_setup, | ||
566 | .bmdma_start = inic_bmdma_start, | ||
567 | .bmdma_stop = inic_bmdma_stop, | ||
568 | .bmdma_status = inic_bmdma_status, | ||
569 | |||
570 | .irq_handler = inic_interrupt, | ||
571 | .irq_clear = inic_irq_clear, | ||
572 | |||
573 | .qc_prep = ata_qc_prep, | ||
574 | .qc_issue = inic_qc_issue, | ||
575 | .data_xfer = ata_pio_data_xfer, | ||
576 | |||
577 | .freeze = inic_freeze, | ||
578 | .thaw = inic_thaw, | ||
579 | .error_handler = inic_error_handler, | ||
580 | .post_internal_cmd = inic_post_internal_cmd, | ||
581 | .dev_config = inic_dev_config, | ||
582 | |||
583 | .port_resume = inic_port_resume, | ||
584 | |||
585 | .port_start = inic_port_start, | ||
586 | .port_stop = inic_port_stop, | ||
587 | .host_stop = ata_pci_host_stop | ||
588 | }; | ||
589 | |||
590 | static struct ata_port_info inic_port_info = { | ||
591 | .sht = &inic_sht, | ||
592 | /* For some reason, ATA_PROT_ATAPI is broken on this | ||
593 | * controller, and no, PIO_POLLING does't fix it. It somehow | ||
594 | * manages to report the wrong ireason and ignoring ireason | ||
595 | * results in machine lock up. Tell libata to always prefer | ||
596 | * DMA. | ||
597 | */ | ||
598 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, | ||
599 | .pio_mask = 0x1f, /* pio0-4 */ | ||
600 | .mwdma_mask = 0x07, /* mwdma0-2 */ | ||
601 | .udma_mask = 0x7f, /* udma0-6 */ | ||
602 | .port_ops = &inic_port_ops | ||
603 | }; | ||
604 | |||
605 | static int init_controller(void __iomem *mmio_base, u16 hctl) | ||
606 | { | ||
607 | int i; | ||
608 | u16 val; | ||
609 | |||
610 | hctl &= ~HCTL_KNOWN_BITS; | ||
611 | |||
612 | /* Soft reset whole controller. Spec says reset duration is 3 | ||
613 | * PCI clocks, be generous and give it 10ms. | ||
614 | */ | ||
615 | writew(hctl | HCTL_SOFTRST, mmio_base + HOST_CTL); | ||
616 | readw(mmio_base + HOST_CTL); /* flush */ | ||
617 | |||
618 | for (i = 0; i < 10; i++) { | ||
619 | msleep(1); | ||
620 | val = readw(mmio_base + HOST_CTL); | ||
621 | if (!(val & HCTL_SOFTRST)) | ||
622 | break; | ||
623 | } | ||
624 | |||
625 | if (val & HCTL_SOFTRST) | ||
626 | return -EIO; | ||
627 | |||
628 | /* mask all interrupts and reset ports */ | ||
629 | for (i = 0; i < NR_PORTS; i++) { | ||
630 | void __iomem *port_base = mmio_base + i * PORT_SIZE; | ||
631 | |||
632 | writeb(0xff, port_base + PORT_IRQ_MASK); | ||
633 | inic_reset_port(port_base); | ||
634 | } | ||
635 | |||
636 | /* port IRQ is masked now, unmask global IRQ */ | ||
637 | writew(hctl & ~HCTL_IRQOFF, mmio_base + HOST_CTL); | ||
638 | val = readw(mmio_base + HOST_IRQ_MASK); | ||
639 | val &= ~(HIRQ_PORT0 | HIRQ_PORT1); | ||
640 | writew(val, mmio_base + HOST_IRQ_MASK); | ||
641 | |||
642 | return 0; | ||
643 | } | ||
644 | |||
645 | static int inic_pci_device_resume(struct pci_dev *pdev) | ||
646 | { | ||
647 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
648 | struct inic_host_priv *hpriv = host->private_data; | ||
649 | void __iomem *mmio_base = host->mmio_base; | ||
650 | int rc; | ||
651 | |||
652 | ata_pci_device_do_resume(pdev); | ||
653 | |||
654 | if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { | ||
655 | printk("XXX\n"); | ||
656 | rc = init_controller(mmio_base, hpriv->cached_hctl); | ||
657 | if (rc) | ||
658 | return rc; | ||
659 | } | ||
660 | |||
661 | ata_host_resume(host); | ||
662 | |||
663 | return 0; | ||
664 | } | ||
665 | |||
666 | static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
667 | { | ||
668 | static int printed_version; | ||
669 | struct ata_port_info *pinfo = &inic_port_info; | ||
670 | struct ata_probe_ent *probe_ent; | ||
671 | struct inic_host_priv *hpriv; | ||
672 | void __iomem *mmio_base; | ||
673 | int i, rc; | ||
674 | |||
675 | if (!printed_version++) | ||
676 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); | ||
677 | |||
678 | rc = pci_enable_device(pdev); | ||
679 | if (rc) | ||
680 | return rc; | ||
681 | |||
682 | rc = pci_request_regions(pdev, DRV_NAME); | ||
683 | if (rc) | ||
684 | goto err_out; | ||
685 | |||
686 | rc = -ENOMEM; | ||
687 | mmio_base = pci_iomap(pdev, MMIO_BAR, 0); | ||
688 | if (!mmio_base) | ||
689 | goto err_out_regions; | ||
690 | |||
691 | /* Set dma_mask. This devices doesn't support 64bit addressing. */ | ||
692 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
693 | if (rc) { | ||
694 | dev_printk(KERN_ERR, &pdev->dev, | ||
695 | "32-bit DMA enable failed\n"); | ||
696 | goto err_out_map; | ||
697 | } | ||
698 | |||
699 | rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | ||
700 | if (rc) { | ||
701 | dev_printk(KERN_ERR, &pdev->dev, | ||
702 | "32-bit consistent DMA enable failed\n"); | ||
703 | goto err_out_map; | ||
704 | } | ||
705 | |||
706 | rc = -ENOMEM; | ||
707 | probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL); | ||
708 | if (!probe_ent) | ||
709 | goto err_out_map; | ||
710 | |||
711 | hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL); | ||
712 | if (!hpriv) | ||
713 | goto err_out_ent; | ||
714 | |||
715 | probe_ent->dev = &pdev->dev; | ||
716 | INIT_LIST_HEAD(&probe_ent->node); | ||
717 | |||
718 | probe_ent->sht = pinfo->sht; | ||
719 | probe_ent->port_flags = pinfo->flags; | ||
720 | probe_ent->pio_mask = pinfo->pio_mask; | ||
721 | probe_ent->mwdma_mask = pinfo->mwdma_mask; | ||
722 | probe_ent->udma_mask = pinfo->udma_mask; | ||
723 | probe_ent->port_ops = pinfo->port_ops; | ||
724 | probe_ent->n_ports = NR_PORTS; | ||
725 | |||
726 | probe_ent->irq = pdev->irq; | ||
727 | probe_ent->irq_flags = SA_SHIRQ; | ||
728 | |||
729 | probe_ent->mmio_base = mmio_base; | ||
730 | |||
731 | for (i = 0; i < NR_PORTS; i++) { | ||
732 | struct ata_ioports *port = &probe_ent->port[i]; | ||
733 | unsigned long port_base = | ||
734 | (unsigned long)mmio_base + i * PORT_SIZE; | ||
735 | |||
736 | port->cmd_addr = pci_resource_start(pdev, 2 * i); | ||
737 | port->altstatus_addr = | ||
738 | port->ctl_addr = | ||
739 | pci_resource_start(pdev, 2 * i + 1) | ATA_PCI_CTL_OFS; | ||
740 | port->scr_addr = port_base + PORT_SCR; | ||
741 | |||
742 | ata_std_ports(port); | ||
743 | } | ||
744 | |||
745 | probe_ent->private_data = hpriv; | ||
746 | hpriv->cached_hctl = readw(mmio_base + HOST_CTL); | ||
747 | |||
748 | rc = init_controller(mmio_base, hpriv->cached_hctl); | ||
749 | if (rc) { | ||
750 | dev_printk(KERN_ERR, &pdev->dev, | ||
751 | "failed to initialize controller\n"); | ||
752 | goto err_out_hpriv; | ||
753 | } | ||
754 | |||
755 | pci_set_master(pdev); | ||
756 | |||
757 | rc = -ENODEV; | ||
758 | if (!ata_device_add(probe_ent)) | ||
759 | goto err_out_hpriv; | ||
760 | |||
761 | kfree(probe_ent); | ||
762 | |||
763 | return 0; | ||
764 | |||
765 | err_out_hpriv: | ||
766 | kfree(hpriv); | ||
767 | err_out_ent: | ||
768 | kfree(probe_ent); | ||
769 | err_out_map: | ||
770 | pci_iounmap(pdev, mmio_base); | ||
771 | err_out_regions: | ||
772 | pci_release_regions(pdev); | ||
773 | err_out: | ||
774 | pci_disable_device(pdev); | ||
775 | return rc; | ||
776 | } | ||
777 | |||
778 | static const struct pci_device_id inic_pci_tbl[] = { | ||
779 | { PCI_VDEVICE(INIT, 0x1622), }, | ||
780 | { }, | ||
781 | }; | ||
782 | |||
783 | static struct pci_driver inic_pci_driver = { | ||
784 | .name = DRV_NAME, | ||
785 | .id_table = inic_pci_tbl, | ||
786 | .suspend = ata_pci_device_suspend, | ||
787 | .resume = inic_pci_device_resume, | ||
788 | .probe = inic_init_one, | ||
789 | .remove = ata_pci_remove_one, | ||
790 | }; | ||
791 | |||
792 | static int __init inic_init(void) | ||
793 | { | ||
794 | return pci_register_driver(&inic_pci_driver); | ||
795 | } | ||
796 | |||
797 | static void __exit inic_exit(void) | ||
798 | { | ||
799 | pci_unregister_driver(&inic_pci_driver); | ||
800 | } | ||
801 | |||
802 | MODULE_AUTHOR("Tejun Heo"); | ||
803 | MODULE_DESCRIPTION("low-level driver for Initio 162x SATA"); | ||
804 | MODULE_LICENSE("GPL v2"); | ||
805 | MODULE_DEVICE_TABLE(pci, inic_pci_tbl); | ||
806 | MODULE_VERSION(DRV_VERSION); | ||
807 | |||
808 | module_init(inic_init); | ||
809 | module_exit(inic_exit); | ||