diff options
author | Viresh Kumar <viresh.kumar@st.com> | 2011-02-22 05:16:07 -0500 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2011-03-14 02:52:46 -0400 |
commit | a480167b23ef9b35ec0299bb3e1b11b4ed6b3508 (patch) | |
tree | a445375d94ce680339b88365e0b2d1c3945492d9 /drivers/ata/pata_arasan_cf.c | |
parent | 64b97594251bb909d74d64012a2b9e5cc32bb11d (diff) |
pata_arasan_cf: Adding support for arasan compact flash host controller
The Arasan CompactFlash Device Controller has three basic modes of
operation: PC card ATA using I/O mode, PC card ATA using memory mode, PC card
ATA using true IDE modes.
Currently driver supports only True IDE mode.
Signed-off-by: Viresh Kumar <viresh.kumar@st.com>
Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers/ata/pata_arasan_cf.c')
-rw-r--r-- | drivers/ata/pata_arasan_cf.c | 977 |
1 files changed, 977 insertions, 0 deletions
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c new file mode 100644 index 000000000000..b99b3fce307f --- /dev/null +++ b/drivers/ata/pata_arasan_cf.c | |||
@@ -0,0 +1,977 @@ | |||
1 | /* | ||
2 | * drivers/ata/pata_arasan_cf.c | ||
3 | * | ||
4 | * Arasan Compact Flash host controller source file | ||
5 | * | ||
6 | * Copyright (C) 2011 ST Microelectronics | ||
7 | * Viresh Kumar <viresh.kumar@st.com> | ||
8 | * | ||
9 | * This file is licensed under the terms of the GNU General Public | ||
10 | * License version 2. This program is licensed "as is" without any | ||
11 | * warranty of any kind, whether express or implied. | ||
12 | */ | ||
13 | |||
14 | /* | ||
15 | * The Arasan CompactFlash Device Controller IP core has three basic modes of | ||
16 | * operation: PC card ATA using I/O mode, PC card ATA using memory mode, PC card | ||
17 | * ATA using true IDE modes. This driver supports only True IDE mode currently. | ||
18 | * | ||
19 | * Arasan CF Controller shares global irq register with Arasan XD Controller. | ||
20 | * | ||
21 | * Tested on arch/arm/mach-spear13xx | ||
22 | */ | ||
23 | |||
24 | #include <linux/ata.h> | ||
25 | #include <linux/clk.h> | ||
26 | #include <linux/completion.h> | ||
27 | #include <linux/delay.h> | ||
28 | #include <linux/dmaengine.h> | ||
29 | #include <linux/io.h> | ||
30 | #include <linux/irq.h> | ||
31 | #include <linux/kernel.h> | ||
32 | #include <linux/libata.h> | ||
33 | #include <linux/module.h> | ||
34 | #include <linux/pata_arasan_cf_data.h> | ||
35 | #include <linux/platform_device.h> | ||
36 | #include <linux/pm.h> | ||
37 | #include <linux/slab.h> | ||
38 | #include <linux/spinlock.h> | ||
39 | #include <linux/types.h> | ||
40 | #include <linux/workqueue.h> | ||
41 | |||
42 | #define DRIVER_NAME "arasan_cf" | ||
43 | #define TIMEOUT msecs_to_jiffies(3000) | ||
44 | |||
45 | /* Registers */ | ||
46 | /* CompactFlash Interface Status */ | ||
47 | #define CFI_STS 0x000 | ||
48 | #define STS_CHG (1) | ||
49 | #define BIN_AUDIO_OUT (1 << 1) | ||
50 | #define CARD_DETECT1 (1 << 2) | ||
51 | #define CARD_DETECT2 (1 << 3) | ||
52 | #define INP_ACK (1 << 4) | ||
53 | #define CARD_READY (1 << 5) | ||
54 | #define IO_READY (1 << 6) | ||
55 | #define B16_IO_PORT_SEL (1 << 7) | ||
56 | /* IRQ */ | ||
57 | #define IRQ_STS 0x004 | ||
58 | /* Interrupt Enable */ | ||
59 | #define IRQ_EN 0x008 | ||
60 | #define CARD_DETECT_IRQ (1) | ||
61 | #define STATUS_CHNG_IRQ (1 << 1) | ||
62 | #define MEM_MODE_IRQ (1 << 2) | ||
63 | #define IO_MODE_IRQ (1 << 3) | ||
64 | #define TRUE_IDE_MODE_IRQ (1 << 8) | ||
65 | #define PIO_XFER_ERR_IRQ (1 << 9) | ||
66 | #define BUF_AVAIL_IRQ (1 << 10) | ||
67 | #define XFER_DONE_IRQ (1 << 11) | ||
68 | #define IGNORED_IRQS (STATUS_CHNG_IRQ | MEM_MODE_IRQ | IO_MODE_IRQ |\ | ||
69 | TRUE_IDE_MODE_IRQ) | ||
70 | #define TRUE_IDE_IRQS (CARD_DETECT_IRQ | PIO_XFER_ERR_IRQ |\ | ||
71 | BUF_AVAIL_IRQ | XFER_DONE_IRQ) | ||
72 | /* Operation Mode */ | ||
73 | #define OP_MODE 0x00C | ||
74 | #define CARD_MODE_MASK (0x3) | ||
75 | #define MEM_MODE (0x0) | ||
76 | #define IO_MODE (0x1) | ||
77 | #define TRUE_IDE_MODE (0x2) | ||
78 | |||
79 | #define CARD_TYPE_MASK (1 << 2) | ||
80 | #define CF_CARD (0) | ||
81 | #define CF_PLUS_CARD (1 << 2) | ||
82 | |||
83 | #define CARD_RESET (1 << 3) | ||
84 | #define CFHOST_ENB (1 << 4) | ||
85 | #define OUTPUTS_TRISTATE (1 << 5) | ||
86 | #define ULTRA_DMA_ENB (1 << 8) | ||
87 | #define MULTI_WORD_DMA_ENB (1 << 9) | ||
88 | #define DRQ_BLOCK_SIZE_MASK (0x3 << 11) | ||
89 | #define DRQ_BLOCK_SIZE_512 (0) | ||
90 | #define DRQ_BLOCK_SIZE_1024 (1 << 11) | ||
91 | #define DRQ_BLOCK_SIZE_2048 (2 << 11) | ||
92 | #define DRQ_BLOCK_SIZE_4096 (3 << 11) | ||
93 | /* CF Interface Clock Configuration */ | ||
94 | #define CLK_CFG 0x010 | ||
95 | #define CF_IF_CLK_MASK (0XF) | ||
96 | /* CF Timing Mode Configuration */ | ||
97 | #define TM_CFG 0x014 | ||
98 | #define MEM_MODE_TIMING_MASK (0x3) | ||
99 | #define MEM_MODE_TIMING_250NS (0x0) | ||
100 | #define MEM_MODE_TIMING_120NS (0x1) | ||
101 | #define MEM_MODE_TIMING_100NS (0x2) | ||
102 | #define MEM_MODE_TIMING_80NS (0x3) | ||
103 | |||
104 | #define IO_MODE_TIMING_MASK (0x3 << 2) | ||
105 | #define IO_MODE_TIMING_250NS (0x0 << 2) | ||
106 | #define IO_MODE_TIMING_120NS (0x1 << 2) | ||
107 | #define IO_MODE_TIMING_100NS (0x2 << 2) | ||
108 | #define IO_MODE_TIMING_80NS (0x3 << 2) | ||
109 | |||
110 | #define TRUEIDE_PIO_TIMING_MASK (0x7 << 4) | ||
111 | #define TRUEIDE_PIO_TIMING_SHIFT 4 | ||
112 | |||
113 | #define TRUEIDE_MWORD_DMA_TIMING_MASK (0x7 << 7) | ||
114 | #define TRUEIDE_MWORD_DMA_TIMING_SHIFT 7 | ||
115 | |||
116 | #define ULTRA_DMA_TIMING_MASK (0x7 << 10) | ||
117 | #define ULTRA_DMA_TIMING_SHIFT 10 | ||
118 | /* CF Transfer Address */ | ||
119 | #define XFER_ADDR 0x014 | ||
120 | #define XFER_ADDR_MASK (0x7FF) | ||
121 | #define MAX_XFER_COUNT 0x20000u | ||
122 | /* Transfer Control */ | ||
123 | #define XFER_CTR 0x01C | ||
124 | #define XFER_COUNT_MASK (0x3FFFF) | ||
125 | #define ADDR_INC_DISABLE (1 << 24) | ||
126 | #define XFER_WIDTH_MASK (1 << 25) | ||
127 | #define XFER_WIDTH_8B (0) | ||
128 | #define XFER_WIDTH_16B (1 << 25) | ||
129 | |||
130 | #define MEM_TYPE_MASK (1 << 26) | ||
131 | #define MEM_TYPE_COMMON (0) | ||
132 | #define MEM_TYPE_ATTRIBUTE (1 << 26) | ||
133 | |||
134 | #define MEM_IO_XFER_MASK (1 << 27) | ||
135 | #define MEM_XFER (0) | ||
136 | #define IO_XFER (1 << 27) | ||
137 | |||
138 | #define DMA_XFER_MODE (1 << 28) | ||
139 | |||
140 | #define AHB_BUS_NORMAL_PIO_OPRTN (~(1 << 29)) | ||
141 | #define XFER_DIR_MASK (1 << 30) | ||
142 | #define XFER_READ (0) | ||
143 | #define XFER_WRITE (1 << 30) | ||
144 | |||
145 | #define XFER_START (1 << 31) | ||
146 | /* Write Data Port */ | ||
147 | #define WRITE_PORT 0x024 | ||
148 | /* Read Data Port */ | ||
149 | #define READ_PORT 0x028 | ||
150 | /* ATA Data Port */ | ||
151 | #define ATA_DATA_PORT 0x030 | ||
152 | #define ATA_DATA_PORT_MASK (0xFFFF) | ||
153 | /* ATA Error/Features */ | ||
154 | #define ATA_ERR_FTR 0x034 | ||
155 | /* ATA Sector Count */ | ||
156 | #define ATA_SC 0x038 | ||
157 | /* ATA Sector Number */ | ||
158 | #define ATA_SN 0x03C | ||
159 | /* ATA Cylinder Low */ | ||
160 | #define ATA_CL 0x040 | ||
161 | /* ATA Cylinder High */ | ||
162 | #define ATA_CH 0x044 | ||
163 | /* ATA Select Card/Head */ | ||
164 | #define ATA_SH 0x048 | ||
165 | /* ATA Status-Command */ | ||
166 | #define ATA_STS_CMD 0x04C | ||
167 | /* ATA Alternate Status/Device Control */ | ||
168 | #define ATA_ASTS_DCTR 0x050 | ||
169 | /* Extended Write Data Port 0x200-0x3FC */ | ||
170 | #define EXT_WRITE_PORT 0x200 | ||
171 | /* Extended Read Data Port 0x400-0x5FC */ | ||
172 | #define EXT_READ_PORT 0x400 | ||
173 | #define FIFO_SIZE 0x200u | ||
174 | /* Global Interrupt Status */ | ||
175 | #define GIRQ_STS 0x800 | ||
176 | /* Global Interrupt Status enable */ | ||
177 | #define GIRQ_STS_EN 0x804 | ||
178 | /* Global Interrupt Signal enable */ | ||
179 | #define GIRQ_SGN_EN 0x808 | ||
180 | #define GIRQ_CF (1) | ||
181 | #define GIRQ_XD (1 << 1) | ||
182 | |||
183 | /* Compact Flash Controller Dev Structure */ | ||
184 | struct arasan_cf_dev { | ||
185 | /* pointer to ata_host structure */ | ||
186 | struct ata_host *host; | ||
187 | /* clk structure, only if HAVE_CLK is defined */ | ||
188 | #ifdef CONFIG_HAVE_CLK | ||
189 | struct clk *clk; | ||
190 | #endif | ||
191 | |||
192 | /* physical base address of controller */ | ||
193 | dma_addr_t pbase; | ||
194 | /* virtual base address of controller */ | ||
195 | void __iomem *vbase; | ||
196 | /* irq number*/ | ||
197 | int irq; | ||
198 | |||
199 | /* status to be updated to framework regarding DMA transfer */ | ||
200 | u8 dma_status; | ||
201 | /* Card is present or Not */ | ||
202 | u8 card_present; | ||
203 | |||
204 | /* dma specific */ | ||
205 | /* Completion for transfer complete interrupt from controller */ | ||
206 | struct completion cf_completion; | ||
207 | /* Completion for DMA transfer complete. */ | ||
208 | struct completion dma_completion; | ||
209 | /* Dma channel allocated */ | ||
210 | struct dma_chan *dma_chan; | ||
211 | /* Mask for DMA transfers */ | ||
212 | dma_cap_mask_t mask; | ||
213 | /* DMA transfer work */ | ||
214 | struct work_struct work; | ||
215 | /* DMA delayed finish work */ | ||
216 | struct delayed_work dwork; | ||
217 | /* qc to be transferred using DMA */ | ||
218 | struct ata_queued_cmd *qc; | ||
219 | }; | ||
220 | |||
221 | static struct scsi_host_template arasan_cf_sht = { | ||
222 | ATA_BASE_SHT(DRIVER_NAME), | ||
223 | .sg_tablesize = SG_NONE, | ||
224 | .dma_boundary = 0xFFFFFFFFUL, | ||
225 | }; | ||
226 | |||
227 | static void cf_dumpregs(struct arasan_cf_dev *acdev) | ||
228 | { | ||
229 | struct device *dev = acdev->host->dev; | ||
230 | |||
231 | dev_dbg(dev, ": =========== REGISTER DUMP ==========="); | ||
232 | dev_dbg(dev, ": CFI_STS: %x", readl(acdev->vbase + CFI_STS)); | ||
233 | dev_dbg(dev, ": IRQ_STS: %x", readl(acdev->vbase + IRQ_STS)); | ||
234 | dev_dbg(dev, ": IRQ_EN: %x", readl(acdev->vbase + IRQ_EN)); | ||
235 | dev_dbg(dev, ": OP_MODE: %x", readl(acdev->vbase + OP_MODE)); | ||
236 | dev_dbg(dev, ": CLK_CFG: %x", readl(acdev->vbase + CLK_CFG)); | ||
237 | dev_dbg(dev, ": TM_CFG: %x", readl(acdev->vbase + TM_CFG)); | ||
238 | dev_dbg(dev, ": XFER_CTR: %x", readl(acdev->vbase + XFER_CTR)); | ||
239 | dev_dbg(dev, ": GIRQ_STS: %x", readl(acdev->vbase + GIRQ_STS)); | ||
240 | dev_dbg(dev, ": GIRQ_STS_EN: %x", readl(acdev->vbase + GIRQ_STS_EN)); | ||
241 | dev_dbg(dev, ": GIRQ_SGN_EN: %x", readl(acdev->vbase + GIRQ_SGN_EN)); | ||
242 | dev_dbg(dev, ": ====================================="); | ||
243 | } | ||
244 | |||
245 | /* Enable/Disable global interrupts shared between CF and XD ctrlr. */ | ||
246 | static void cf_ginterrupt_enable(struct arasan_cf_dev *acdev, bool enable) | ||
247 | { | ||
248 | /* enable should be 0 or 1 */ | ||
249 | writel(enable, acdev->vbase + GIRQ_STS_EN); | ||
250 | writel(enable, acdev->vbase + GIRQ_SGN_EN); | ||
251 | } | ||
252 | |||
253 | /* Enable/Disable CF interrupts */ | ||
254 | static inline void | ||
255 | cf_interrupt_enable(struct arasan_cf_dev *acdev, u32 mask, bool enable) | ||
256 | { | ||
257 | u32 val = readl(acdev->vbase + IRQ_EN); | ||
258 | /* clear & enable/disable irqs */ | ||
259 | if (enable) { | ||
260 | writel(mask, acdev->vbase + IRQ_STS); | ||
261 | writel(val | mask, acdev->vbase + IRQ_EN); | ||
262 | } else | ||
263 | writel(val & ~mask, acdev->vbase + IRQ_EN); | ||
264 | } | ||
265 | |||
266 | static inline void cf_card_reset(struct arasan_cf_dev *acdev) | ||
267 | { | ||
268 | u32 val = readl(acdev->vbase + OP_MODE); | ||
269 | |||
270 | writel(val | CARD_RESET, acdev->vbase + OP_MODE); | ||
271 | udelay(200); | ||
272 | writel(val & ~CARD_RESET, acdev->vbase + OP_MODE); | ||
273 | } | ||
274 | |||
275 | static inline void cf_ctrl_reset(struct arasan_cf_dev *acdev) | ||
276 | { | ||
277 | writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB, | ||
278 | acdev->vbase + OP_MODE); | ||
279 | writel(readl(acdev->vbase + OP_MODE) | CFHOST_ENB, | ||
280 | acdev->vbase + OP_MODE); | ||
281 | } | ||
282 | |||
283 | static void cf_card_detect(struct arasan_cf_dev *acdev, bool hotplugged) | ||
284 | { | ||
285 | struct ata_port *ap = acdev->host->ports[0]; | ||
286 | struct ata_eh_info *ehi = &ap->link.eh_info; | ||
287 | u32 val = readl(acdev->vbase + CFI_STS); | ||
288 | |||
289 | /* Both CD1 & CD2 should be low if card inserted completely */ | ||
290 | if (!(val & (CARD_DETECT1 | CARD_DETECT2))) { | ||
291 | if (acdev->card_present) | ||
292 | return; | ||
293 | acdev->card_present = 1; | ||
294 | cf_card_reset(acdev); | ||
295 | } else { | ||
296 | if (!acdev->card_present) | ||
297 | return; | ||
298 | acdev->card_present = 0; | ||
299 | } | ||
300 | |||
301 | if (hotplugged) { | ||
302 | ata_ehi_hotplugged(ehi); | ||
303 | ata_port_freeze(ap); | ||
304 | } | ||
305 | } | ||
306 | |||
307 | static int cf_init(struct arasan_cf_dev *acdev) | ||
308 | { | ||
309 | struct arasan_cf_pdata *pdata = dev_get_platdata(acdev->host->dev); | ||
310 | unsigned long flags; | ||
311 | int ret = 0; | ||
312 | |||
313 | #ifdef CONFIG_HAVE_CLK | ||
314 | ret = clk_enable(acdev->clk); | ||
315 | if (ret) { | ||
316 | dev_dbg(acdev->host->dev, "clock enable failed"); | ||
317 | return ret; | ||
318 | } | ||
319 | #endif | ||
320 | |||
321 | spin_lock_irqsave(&acdev->host->lock, flags); | ||
322 | /* configure CF interface clock */ | ||
323 | writel((pdata->cf_if_clk <= CF_IF_CLK_200M) ? pdata->cf_if_clk : | ||
324 | CF_IF_CLK_166M, acdev->vbase + CLK_CFG); | ||
325 | |||
326 | writel(TRUE_IDE_MODE | CFHOST_ENB, acdev->vbase + OP_MODE); | ||
327 | cf_interrupt_enable(acdev, CARD_DETECT_IRQ, 1); | ||
328 | cf_ginterrupt_enable(acdev, 1); | ||
329 | spin_unlock_irqrestore(&acdev->host->lock, flags); | ||
330 | |||
331 | return ret; | ||
332 | } | ||
333 | |||
334 | static void cf_exit(struct arasan_cf_dev *acdev) | ||
335 | { | ||
336 | unsigned long flags; | ||
337 | |||
338 | spin_lock_irqsave(&acdev->host->lock, flags); | ||
339 | cf_ginterrupt_enable(acdev, 0); | ||
340 | cf_interrupt_enable(acdev, TRUE_IDE_IRQS, 0); | ||
341 | cf_card_reset(acdev); | ||
342 | writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB, | ||
343 | acdev->vbase + OP_MODE); | ||
344 | spin_unlock_irqrestore(&acdev->host->lock, flags); | ||
345 | #ifdef CONFIG_HAVE_CLK | ||
346 | clk_disable(acdev->clk); | ||
347 | #endif | ||
348 | } | ||
349 | |||
350 | static void dma_callback(void *dev) | ||
351 | { | ||
352 | struct arasan_cf_dev *acdev = (struct arasan_cf_dev *) dev; | ||
353 | |||
354 | complete(&acdev->dma_completion); | ||
355 | } | ||
356 | |||
357 | static bool filter(struct dma_chan *chan, void *slave) | ||
358 | { | ||
359 | return true; | ||
360 | } | ||
361 | |||
362 | static inline void dma_complete(struct arasan_cf_dev *acdev) | ||
363 | { | ||
364 | struct ata_queued_cmd *qc = acdev->qc; | ||
365 | unsigned long flags; | ||
366 | |||
367 | acdev->qc = NULL; | ||
368 | ata_sff_interrupt(acdev->irq, acdev->host); | ||
369 | |||
370 | spin_lock_irqsave(&acdev->host->lock, flags); | ||
371 | if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) | ||
372 | ata_ehi_push_desc(&qc->ap->link.eh_info, "DMA Failed: Timeout"); | ||
373 | spin_unlock_irqrestore(&acdev->host->lock, flags); | ||
374 | } | ||
375 | |||
376 | static inline int wait4buf(struct arasan_cf_dev *acdev) | ||
377 | { | ||
378 | if (!wait_for_completion_timeout(&acdev->cf_completion, TIMEOUT)) { | ||
379 | u32 rw = acdev->qc->tf.flags & ATA_TFLAG_WRITE; | ||
380 | |||
381 | dev_err(acdev->host->dev, "%s TimeOut", rw ? "write" : "read"); | ||
382 | return -ETIMEDOUT; | ||
383 | } | ||
384 | |||
385 | /* Check if PIO Error interrupt has occured */ | ||
386 | if (acdev->dma_status & ATA_DMA_ERR) | ||
387 | return -EAGAIN; | ||
388 | |||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | static int | ||
393 | dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len) | ||
394 | { | ||
395 | struct dma_async_tx_descriptor *tx; | ||
396 | struct dma_chan *chan = acdev->dma_chan; | ||
397 | dma_cookie_t cookie; | ||
398 | unsigned long flags = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | | ||
399 | DMA_COMPL_SKIP_DEST_UNMAP; | ||
400 | int ret = 0; | ||
401 | |||
402 | tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags); | ||
403 | if (!tx) { | ||
404 | dev_err(acdev->host->dev, "device_prep_dma_memcpy failed\n"); | ||
405 | return -EAGAIN; | ||
406 | } | ||
407 | |||
408 | tx->callback = dma_callback; | ||
409 | tx->callback_param = acdev; | ||
410 | cookie = tx->tx_submit(tx); | ||
411 | |||
412 | ret = dma_submit_error(cookie); | ||
413 | if (ret) { | ||
414 | dev_err(acdev->host->dev, "dma_submit_error\n"); | ||
415 | return ret; | ||
416 | } | ||
417 | |||
418 | chan->device->device_issue_pending(chan); | ||
419 | |||
420 | /* Wait for DMA to complete */ | ||
421 | if (!wait_for_completion_timeout(&acdev->dma_completion, TIMEOUT)) { | ||
422 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); | ||
423 | dev_err(acdev->host->dev, "wait_for_completion_timeout\n"); | ||
424 | return -ETIMEDOUT; | ||
425 | } | ||
426 | |||
427 | return ret; | ||
428 | } | ||
429 | |||
430 | static int sg_xfer(struct arasan_cf_dev *acdev, struct scatterlist *sg) | ||
431 | { | ||
432 | dma_addr_t dest = 0, src = 0; | ||
433 | u32 xfer_cnt, sglen, dma_len, xfer_ctr; | ||
434 | u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE; | ||
435 | unsigned long flags; | ||
436 | int ret = 0; | ||
437 | |||
438 | sglen = sg_dma_len(sg); | ||
439 | if (write) { | ||
440 | src = sg_dma_address(sg); | ||
441 | dest = acdev->pbase + EXT_WRITE_PORT; | ||
442 | } else { | ||
443 | dest = sg_dma_address(sg); | ||
444 | src = acdev->pbase + EXT_READ_PORT; | ||
445 | } | ||
446 | |||
447 | /* | ||
448 | * For each sg: | ||
449 | * MAX_XFER_COUNT data will be transferred before we get transfer | ||
450 | * complete interrupt. Inbetween after FIFO_SIZE data | ||
451 | * buffer available interrupt will be generated. At this time we will | ||
452 | * fill FIFO again: max FIFO_SIZE data. | ||
453 | */ | ||
454 | while (sglen) { | ||
455 | xfer_cnt = min(sglen, MAX_XFER_COUNT); | ||
456 | spin_lock_irqsave(&acdev->host->lock, flags); | ||
457 | xfer_ctr = readl(acdev->vbase + XFER_CTR) & | ||
458 | ~XFER_COUNT_MASK; | ||
459 | writel(xfer_ctr | xfer_cnt | XFER_START, | ||
460 | acdev->vbase + XFER_CTR); | ||
461 | spin_unlock_irqrestore(&acdev->host->lock, flags); | ||
462 | |||
463 | /* continue dma xfers untill current sg is completed */ | ||
464 | while (xfer_cnt) { | ||
465 | /* wait for read to complete */ | ||
466 | if (!write) { | ||
467 | ret = wait4buf(acdev); | ||
468 | if (ret) | ||
469 | goto fail; | ||
470 | } | ||
471 | |||
472 | /* read/write FIFO in chunk of FIFO_SIZE */ | ||
473 | dma_len = min(xfer_cnt, FIFO_SIZE); | ||
474 | ret = dma_xfer(acdev, src, dest, dma_len); | ||
475 | if (ret) { | ||
476 | dev_err(acdev->host->dev, "dma failed"); | ||
477 | goto fail; | ||
478 | } | ||
479 | |||
480 | if (write) | ||
481 | src += dma_len; | ||
482 | else | ||
483 | dest += dma_len; | ||
484 | |||
485 | sglen -= dma_len; | ||
486 | xfer_cnt -= dma_len; | ||
487 | |||
488 | /* wait for write to complete */ | ||
489 | if (write) { | ||
490 | ret = wait4buf(acdev); | ||
491 | if (ret) | ||
492 | goto fail; | ||
493 | } | ||
494 | } | ||
495 | } | ||
496 | |||
497 | fail: | ||
498 | spin_lock_irqsave(&acdev->host->lock, flags); | ||
499 | writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START, | ||
500 | acdev->vbase + XFER_CTR); | ||
501 | spin_unlock_irqrestore(&acdev->host->lock, flags); | ||
502 | |||
503 | return ret; | ||
504 | } | ||
505 | |||
506 | /* | ||
507 | * This routine uses External DMA controller to read/write data to FIFO of CF | ||
508 | * controller. There are two xfer related interrupt supported by CF controller: | ||
509 | * - buf_avail: This interrupt is generated as soon as we have buffer of 512 | ||
510 | * bytes available for reading or empty buffer available for writing. | ||
511 | * - xfer_done: This interrupt is generated on transfer of "xfer_size" amount of | ||
512 | * data to/from FIFO. xfer_size is programmed in XFER_CTR register. | ||
513 | * | ||
514 | * Max buffer size = FIFO_SIZE = 512 Bytes. | ||
515 | * Max xfer_size = MAX_XFER_COUNT = 256 KB. | ||
516 | */ | ||
517 | static void data_xfer(struct work_struct *work) | ||
518 | { | ||
519 | struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev, | ||
520 | work); | ||
521 | struct ata_queued_cmd *qc = acdev->qc; | ||
522 | struct scatterlist *sg; | ||
523 | unsigned long flags; | ||
524 | u32 temp; | ||
525 | int ret = 0; | ||
526 | |||
527 | /* request dma channels */ | ||
528 | /* dma_request_channel may sleep, so calling from process context */ | ||
529 | acdev->dma_chan = dma_request_channel(acdev->mask, filter, NULL); | ||
530 | if (!acdev->dma_chan) { | ||
531 | dev_err(acdev->host->dev, "Unable to get dma_chan\n"); | ||
532 | goto chan_request_fail; | ||
533 | } | ||
534 | |||
535 | for_each_sg(qc->sg, sg, qc->n_elem, temp) { | ||
536 | ret = sg_xfer(acdev, sg); | ||
537 | if (ret) | ||
538 | break; | ||
539 | } | ||
540 | |||
541 | dma_release_channel(acdev->dma_chan); | ||
542 | |||
543 | /* data xferred successfully */ | ||
544 | if (!ret) { | ||
545 | u32 status; | ||
546 | |||
547 | spin_lock_irqsave(&acdev->host->lock, flags); | ||
548 | status = ioread8(qc->ap->ioaddr.altstatus_addr); | ||
549 | spin_unlock_irqrestore(&acdev->host->lock, flags); | ||
550 | if (status & (ATA_BUSY | ATA_DRQ)) { | ||
551 | ata_sff_queue_delayed_work(&acdev->dwork, 1); | ||
552 | return; | ||
553 | } | ||
554 | |||
555 | goto sff_intr; | ||
556 | } | ||
557 | |||
558 | cf_dumpregs(acdev); | ||
559 | |||
560 | chan_request_fail: | ||
561 | spin_lock_irqsave(&acdev->host->lock, flags); | ||
562 | /* error when transfering data to/from memory */ | ||
563 | qc->err_mask |= AC_ERR_HOST_BUS; | ||
564 | qc->ap->hsm_task_state = HSM_ST_ERR; | ||
565 | |||
566 | cf_ctrl_reset(acdev); | ||
567 | spin_unlock_irqrestore(qc->ap->lock, flags); | ||
568 | sff_intr: | ||
569 | dma_complete(acdev); | ||
570 | } | ||
571 | |||
572 | static void delayed_finish(struct work_struct *work) | ||
573 | { | ||
574 | struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev, | ||
575 | dwork.work); | ||
576 | struct ata_queued_cmd *qc = acdev->qc; | ||
577 | unsigned long flags; | ||
578 | u8 status; | ||
579 | |||
580 | spin_lock_irqsave(&acdev->host->lock, flags); | ||
581 | status = ioread8(qc->ap->ioaddr.altstatus_addr); | ||
582 | spin_unlock_irqrestore(&acdev->host->lock, flags); | ||
583 | |||
584 | if (status & (ATA_BUSY | ATA_DRQ)) | ||
585 | ata_sff_queue_delayed_work(&acdev->dwork, 1); | ||
586 | else | ||
587 | dma_complete(acdev); | ||
588 | } | ||
589 | |||
590 | static irqreturn_t arasan_cf_interrupt(int irq, void *dev) | ||
591 | { | ||
592 | struct arasan_cf_dev *acdev = ((struct ata_host *)dev)->private_data; | ||
593 | unsigned long flags; | ||
594 | u32 irqsts; | ||
595 | |||
596 | irqsts = readl(acdev->vbase + GIRQ_STS); | ||
597 | if (!(irqsts & GIRQ_CF)) | ||
598 | return IRQ_NONE; | ||
599 | |||
600 | spin_lock_irqsave(&acdev->host->lock, flags); | ||
601 | irqsts = readl(acdev->vbase + IRQ_STS); | ||
602 | writel(irqsts, acdev->vbase + IRQ_STS); /* clear irqs */ | ||
603 | writel(GIRQ_CF, acdev->vbase + GIRQ_STS); /* clear girqs */ | ||
604 | |||
605 | /* handle only relevant interrupts */ | ||
606 | irqsts &= ~IGNORED_IRQS; | ||
607 | |||
608 | if (irqsts & CARD_DETECT_IRQ) { | ||
609 | cf_card_detect(acdev, 1); | ||
610 | spin_unlock_irqrestore(&acdev->host->lock, flags); | ||
611 | return IRQ_HANDLED; | ||
612 | } | ||
613 | |||
614 | if (irqsts & PIO_XFER_ERR_IRQ) { | ||
615 | acdev->dma_status = ATA_DMA_ERR; | ||
616 | writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START, | ||
617 | acdev->vbase + XFER_CTR); | ||
618 | spin_unlock_irqrestore(&acdev->host->lock, flags); | ||
619 | complete(&acdev->cf_completion); | ||
620 | dev_err(acdev->host->dev, "pio xfer err irq\n"); | ||
621 | return IRQ_HANDLED; | ||
622 | } | ||
623 | |||
624 | spin_unlock_irqrestore(&acdev->host->lock, flags); | ||
625 | |||
626 | if (irqsts & BUF_AVAIL_IRQ) { | ||
627 | complete(&acdev->cf_completion); | ||
628 | return IRQ_HANDLED; | ||
629 | } | ||
630 | |||
631 | if (irqsts & XFER_DONE_IRQ) { | ||
632 | struct ata_queued_cmd *qc = acdev->qc; | ||
633 | |||
634 | /* Send Complete only for write */ | ||
635 | if (qc->tf.flags & ATA_TFLAG_WRITE) | ||
636 | complete(&acdev->cf_completion); | ||
637 | } | ||
638 | |||
639 | return IRQ_HANDLED; | ||
640 | } | ||
641 | |||
642 | static void arasan_cf_freeze(struct ata_port *ap) | ||
643 | { | ||
644 | struct arasan_cf_dev *acdev = ap->host->private_data; | ||
645 | |||
646 | /* stop transfer and reset controller */ | ||
647 | writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START, | ||
648 | acdev->vbase + XFER_CTR); | ||
649 | cf_ctrl_reset(acdev); | ||
650 | acdev->dma_status = ATA_DMA_ERR; | ||
651 | |||
652 | ata_sff_dma_pause(ap); | ||
653 | ata_sff_freeze(ap); | ||
654 | } | ||
655 | |||
656 | void arasan_cf_error_handler(struct ata_port *ap) | ||
657 | { | ||
658 | struct arasan_cf_dev *acdev = ap->host->private_data; | ||
659 | |||
660 | /* | ||
661 | * DMA transfers using an external DMA controller may be scheduled. | ||
662 | * Abort them before handling error. Refer data_xfer() for further | ||
663 | * details. | ||
664 | */ | ||
665 | cancel_work_sync(&acdev->work); | ||
666 | cancel_delayed_work_sync(&acdev->dwork); | ||
667 | return ata_sff_error_handler(ap); | ||
668 | } | ||
669 | |||
670 | static void arasan_cf_dma_start(struct arasan_cf_dev *acdev) | ||
671 | { | ||
672 | u32 xfer_ctr = readl(acdev->vbase + XFER_CTR) & ~XFER_DIR_MASK; | ||
673 | u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE; | ||
674 | |||
675 | xfer_ctr |= write ? XFER_WRITE : XFER_READ; | ||
676 | writel(xfer_ctr, acdev->vbase + XFER_CTR); | ||
677 | |||
678 | acdev->qc->ap->ops->sff_exec_command(acdev->qc->ap, &acdev->qc->tf); | ||
679 | ata_sff_queue_work(&acdev->work); | ||
680 | } | ||
681 | |||
682 | unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc) | ||
683 | { | ||
684 | struct ata_port *ap = qc->ap; | ||
685 | struct arasan_cf_dev *acdev = ap->host->private_data; | ||
686 | |||
687 | /* defer PIO handling to sff_qc_issue */ | ||
688 | if (!ata_is_dma(qc->tf.protocol)) | ||
689 | return ata_sff_qc_issue(qc); | ||
690 | |||
691 | /* select the device */ | ||
692 | ata_wait_idle(ap); | ||
693 | ata_sff_dev_select(ap, qc->dev->devno); | ||
694 | ata_wait_idle(ap); | ||
695 | |||
696 | /* start the command */ | ||
697 | switch (qc->tf.protocol) { | ||
698 | case ATA_PROT_DMA: | ||
699 | WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); | ||
700 | |||
701 | ap->ops->sff_tf_load(ap, &qc->tf); | ||
702 | acdev->dma_status = 0; | ||
703 | acdev->qc = qc; | ||
704 | arasan_cf_dma_start(acdev); | ||
705 | ap->hsm_task_state = HSM_ST_LAST; | ||
706 | break; | ||
707 | |||
708 | default: | ||
709 | WARN_ON(1); | ||
710 | return AC_ERR_SYSTEM; | ||
711 | } | ||
712 | |||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | static void arasan_cf_set_piomode(struct ata_port *ap, struct ata_device *adev) | ||
717 | { | ||
718 | struct arasan_cf_dev *acdev = ap->host->private_data; | ||
719 | u8 pio = adev->pio_mode - XFER_PIO_0; | ||
720 | unsigned long flags; | ||
721 | u32 val; | ||
722 | |||
723 | /* Arasan ctrl supports Mode0 -> Mode6 */ | ||
724 | if (pio > 6) { | ||
725 | dev_err(ap->dev, "Unknown PIO mode\n"); | ||
726 | return; | ||
727 | } | ||
728 | |||
729 | spin_lock_irqsave(&acdev->host->lock, flags); | ||
730 | val = readl(acdev->vbase + OP_MODE) & | ||
731 | ~(ULTRA_DMA_ENB | MULTI_WORD_DMA_ENB | DRQ_BLOCK_SIZE_MASK); | ||
732 | writel(val, acdev->vbase + OP_MODE); | ||
733 | val = readl(acdev->vbase + TM_CFG) & ~TRUEIDE_PIO_TIMING_MASK; | ||
734 | val |= pio << TRUEIDE_PIO_TIMING_SHIFT; | ||
735 | writel(val, acdev->vbase + TM_CFG); | ||
736 | |||
737 | cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 0); | ||
738 | cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 1); | ||
739 | spin_unlock_irqrestore(&acdev->host->lock, flags); | ||
740 | } | ||
741 | |||
742 | static void arasan_cf_set_dmamode(struct ata_port *ap, struct ata_device *adev) | ||
743 | { | ||
744 | struct arasan_cf_dev *acdev = ap->host->private_data; | ||
745 | u32 opmode, tmcfg, dma_mode = adev->dma_mode; | ||
746 | unsigned long flags; | ||
747 | |||
748 | spin_lock_irqsave(&acdev->host->lock, flags); | ||
749 | opmode = readl(acdev->vbase + OP_MODE) & | ||
750 | ~(MULTI_WORD_DMA_ENB | ULTRA_DMA_ENB); | ||
751 | tmcfg = readl(acdev->vbase + TM_CFG); | ||
752 | |||
753 | if ((dma_mode >= XFER_UDMA_0) && (dma_mode <= XFER_UDMA_6)) { | ||
754 | opmode |= ULTRA_DMA_ENB; | ||
755 | tmcfg &= ~ULTRA_DMA_TIMING_MASK; | ||
756 | tmcfg |= (dma_mode - XFER_UDMA_0) << ULTRA_DMA_TIMING_SHIFT; | ||
757 | } else if ((dma_mode >= XFER_MW_DMA_0) && (dma_mode <= XFER_MW_DMA_4)) { | ||
758 | opmode |= MULTI_WORD_DMA_ENB; | ||
759 | tmcfg &= ~TRUEIDE_MWORD_DMA_TIMING_MASK; | ||
760 | tmcfg |= (dma_mode - XFER_MW_DMA_0) << | ||
761 | TRUEIDE_MWORD_DMA_TIMING_SHIFT; | ||
762 | } else { | ||
763 | dev_err(ap->dev, "Unknown DMA mode\n"); | ||
764 | spin_unlock_irqrestore(&acdev->host->lock, flags); | ||
765 | return; | ||
766 | } | ||
767 | |||
768 | writel(opmode, acdev->vbase + OP_MODE); | ||
769 | writel(tmcfg, acdev->vbase + TM_CFG); | ||
770 | writel(DMA_XFER_MODE, acdev->vbase + XFER_CTR); | ||
771 | |||
772 | cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 0); | ||
773 | cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 1); | ||
774 | spin_unlock_irqrestore(&acdev->host->lock, flags); | ||
775 | } | ||
776 | |||
777 | static struct ata_port_operations arasan_cf_ops = { | ||
778 | .inherits = &ata_sff_port_ops, | ||
779 | .freeze = arasan_cf_freeze, | ||
780 | .error_handler = arasan_cf_error_handler, | ||
781 | .qc_issue = arasan_cf_qc_issue, | ||
782 | .set_piomode = arasan_cf_set_piomode, | ||
783 | .set_dmamode = arasan_cf_set_dmamode, | ||
784 | }; | ||
785 | |||
786 | static int __devinit arasan_cf_probe(struct platform_device *pdev) | ||
787 | { | ||
788 | struct arasan_cf_dev *acdev; | ||
789 | struct arasan_cf_pdata *pdata = dev_get_platdata(&pdev->dev); | ||
790 | struct ata_host *host; | ||
791 | struct ata_port *ap; | ||
792 | struct resource *res; | ||
793 | irq_handler_t irq_handler = NULL; | ||
794 | int ret = 0; | ||
795 | |||
796 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
797 | if (!res) | ||
798 | return -EINVAL; | ||
799 | |||
800 | if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), | ||
801 | DRIVER_NAME)) { | ||
802 | dev_warn(&pdev->dev, "Failed to get memory region resource\n"); | ||
803 | return -ENOENT; | ||
804 | } | ||
805 | |||
806 | acdev = devm_kzalloc(&pdev->dev, sizeof(*acdev), GFP_KERNEL); | ||
807 | if (!acdev) { | ||
808 | dev_warn(&pdev->dev, "kzalloc fail\n"); | ||
809 | return -ENOMEM; | ||
810 | } | ||
811 | |||
812 | /* if irq is 0, support only PIO */ | ||
813 | acdev->irq = platform_get_irq(pdev, 0); | ||
814 | if (acdev->irq) | ||
815 | irq_handler = arasan_cf_interrupt; | ||
816 | else | ||
817 | pdata->quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA; | ||
818 | |||
819 | acdev->pbase = res->start; | ||
820 | acdev->vbase = devm_ioremap_nocache(&pdev->dev, res->start, | ||
821 | resource_size(res)); | ||
822 | if (!acdev->vbase) { | ||
823 | dev_warn(&pdev->dev, "ioremap fail\n"); | ||
824 | return -ENOMEM; | ||
825 | } | ||
826 | |||
827 | #ifdef CONFIG_HAVE_CLK | ||
828 | acdev->clk = clk_get(&pdev->dev, NULL); | ||
829 | if (IS_ERR(acdev->clk)) { | ||
830 | dev_warn(&pdev->dev, "Clock not found\n"); | ||
831 | return PTR_ERR(acdev->clk); | ||
832 | } | ||
833 | #endif | ||
834 | |||
835 | /* allocate host */ | ||
836 | host = ata_host_alloc(&pdev->dev, 1); | ||
837 | if (!host) { | ||
838 | ret = -ENOMEM; | ||
839 | dev_warn(&pdev->dev, "alloc host fail\n"); | ||
840 | goto free_clk; | ||
841 | } | ||
842 | |||
843 | ap = host->ports[0]; | ||
844 | host->private_data = acdev; | ||
845 | acdev->host = host; | ||
846 | ap->ops = &arasan_cf_ops; | ||
847 | ap->pio_mask = ATA_PIO6; | ||
848 | ap->mwdma_mask = ATA_MWDMA4; | ||
849 | ap->udma_mask = ATA_UDMA6; | ||
850 | |||
851 | init_completion(&acdev->cf_completion); | ||
852 | init_completion(&acdev->dma_completion); | ||
853 | INIT_WORK(&acdev->work, data_xfer); | ||
854 | INIT_DELAYED_WORK(&acdev->dwork, delayed_finish); | ||
855 | dma_cap_set(DMA_MEMCPY, acdev->mask); | ||
856 | |||
857 | /* Handle platform specific quirks */ | ||
858 | if (pdata->quirk) { | ||
859 | if (pdata->quirk & CF_BROKEN_PIO) { | ||
860 | ap->ops->set_piomode = NULL; | ||
861 | ap->pio_mask = 0; | ||
862 | } | ||
863 | if (pdata->quirk & CF_BROKEN_MWDMA) | ||
864 | ap->mwdma_mask = 0; | ||
865 | if (pdata->quirk & CF_BROKEN_UDMA) | ||
866 | ap->udma_mask = 0; | ||
867 | } | ||
868 | ap->flags |= ATA_FLAG_PIO_POLLING | ATA_FLAG_NO_ATAPI; | ||
869 | |||
870 | ap->ioaddr.cmd_addr = acdev->vbase + ATA_DATA_PORT; | ||
871 | ap->ioaddr.data_addr = acdev->vbase + ATA_DATA_PORT; | ||
872 | ap->ioaddr.error_addr = acdev->vbase + ATA_ERR_FTR; | ||
873 | ap->ioaddr.feature_addr = acdev->vbase + ATA_ERR_FTR; | ||
874 | ap->ioaddr.nsect_addr = acdev->vbase + ATA_SC; | ||
875 | ap->ioaddr.lbal_addr = acdev->vbase + ATA_SN; | ||
876 | ap->ioaddr.lbam_addr = acdev->vbase + ATA_CL; | ||
877 | ap->ioaddr.lbah_addr = acdev->vbase + ATA_CH; | ||
878 | ap->ioaddr.device_addr = acdev->vbase + ATA_SH; | ||
879 | ap->ioaddr.status_addr = acdev->vbase + ATA_STS_CMD; | ||
880 | ap->ioaddr.command_addr = acdev->vbase + ATA_STS_CMD; | ||
881 | ap->ioaddr.altstatus_addr = acdev->vbase + ATA_ASTS_DCTR; | ||
882 | ap->ioaddr.ctl_addr = acdev->vbase + ATA_ASTS_DCTR; | ||
883 | |||
884 | ata_port_desc(ap, "phy_addr %x virt_addr %p", res->start, acdev->vbase); | ||
885 | |||
886 | ret = cf_init(acdev); | ||
887 | if (ret) | ||
888 | goto free_clk; | ||
889 | |||
890 | cf_card_detect(acdev, 0); | ||
891 | |||
892 | return ata_host_activate(host, acdev->irq, irq_handler, 0, | ||
893 | &arasan_cf_sht); | ||
894 | |||
895 | free_clk: | ||
896 | #ifdef CONFIG_HAVE_CLK | ||
897 | clk_put(acdev->clk); | ||
898 | #endif | ||
899 | return ret; | ||
900 | } | ||
901 | |||
902 | static int __devexit arasan_cf_remove(struct platform_device *pdev) | ||
903 | { | ||
904 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
905 | struct arasan_cf_dev *acdev = host->ports[0]->private_data; | ||
906 | |||
907 | ata_host_detach(host); | ||
908 | cf_exit(acdev); | ||
909 | #ifdef CONFIG_HAVE_CLK | ||
910 | clk_put(acdev->clk); | ||
911 | #endif | ||
912 | |||
913 | return 0; | ||
914 | } | ||
915 | |||
916 | #ifdef CONFIG_PM | ||
917 | static int arasan_cf_suspend(struct device *dev) | ||
918 | { | ||
919 | struct platform_device *pdev = to_platform_device(dev); | ||
920 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
921 | struct arasan_cf_dev *acdev = host->ports[0]->private_data; | ||
922 | |||
923 | if (acdev->dma_chan) { | ||
924 | acdev->dma_chan->device->device_control(acdev->dma_chan, | ||
925 | DMA_TERMINATE_ALL, 0); | ||
926 | dma_release_channel(acdev->dma_chan); | ||
927 | } | ||
928 | cf_exit(acdev); | ||
929 | return ata_host_suspend(host, PMSG_SUSPEND); | ||
930 | } | ||
931 | |||
932 | static int arasan_cf_resume(struct device *dev) | ||
933 | { | ||
934 | struct platform_device *pdev = to_platform_device(dev); | ||
935 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
936 | struct arasan_cf_dev *acdev = host->ports[0]->private_data; | ||
937 | |||
938 | cf_init(acdev); | ||
939 | ata_host_resume(host); | ||
940 | |||
941 | return 0; | ||
942 | } | ||
943 | |||
944 | static const struct dev_pm_ops arasan_cf_pm_ops = { | ||
945 | .suspend = arasan_cf_suspend, | ||
946 | .resume = arasan_cf_resume, | ||
947 | }; | ||
948 | #endif | ||
949 | |||
950 | static struct platform_driver arasan_cf_driver = { | ||
951 | .probe = arasan_cf_probe, | ||
952 | .remove = __devexit_p(arasan_cf_remove), | ||
953 | .driver = { | ||
954 | .name = DRIVER_NAME, | ||
955 | .owner = THIS_MODULE, | ||
956 | #ifdef CONFIG_PM | ||
957 | .pm = &arasan_cf_pm_ops, | ||
958 | #endif | ||
959 | }, | ||
960 | }; | ||
961 | |||
962 | static int __init arasan_cf_init(void) | ||
963 | { | ||
964 | return platform_driver_register(&arasan_cf_driver); | ||
965 | } | ||
966 | module_init(arasan_cf_init); | ||
967 | |||
968 | static void __exit arasan_cf_exit(void) | ||
969 | { | ||
970 | platform_driver_unregister(&arasan_cf_driver); | ||
971 | } | ||
972 | module_exit(arasan_cf_exit); | ||
973 | |||
974 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); | ||
975 | MODULE_DESCRIPTION("Arasan ATA Compact Flash driver"); | ||
976 | MODULE_LICENSE("GPL"); | ||
977 | MODULE_ALIAS("platform:" DRIVER_NAME); | ||