aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/Kconfig16
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/pata_bf54x.c1627
3 files changed, 1644 insertions, 0 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index d8046a113c37..a427945f0bb1 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -596,4 +596,20 @@ config PATA_SCC
596 596
597 If unsure, say N. 597 If unsure, say N.
598 598
599config PATA_BF54X
600 tristate "Blackfin 54x ATAPI support"
601 depends on BF542 || BF548 || BF549
602 help
603 This option enables support for the built-in ATAPI controller on
604 Blackfin 54x family chips.
605
606 If unsure, say N.
607
608config PATA_BF54X_DMA
609 bool "DMA mode"
610 depends on PATA_BF54X
611 default y
612 help
613 Enable DMA mode for Blackfin ATAPI controller.
614
599endif # ATA 615endif # ATA
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 8149c68ac2c7..c2ecba5f0b47 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_PATA_SIS) += pata_sis.o
61obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o 61obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
62obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o 62obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o
63obj-$(CONFIG_PATA_SCC) += pata_scc.o 63obj-$(CONFIG_PATA_SCC) += pata_scc.o
64obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o
64obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o 65obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
65obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o 66obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
66# Should be last but one libata driver 67# Should be last but one libata driver
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
new file mode 100644
index 000000000000..747549e4563a
--- /dev/null
+++ b/drivers/ata/pata_bf54x.c
@@ -0,0 +1,1627 @@
1/*
2 * File: drivers/ata/pata_bf54x.c
3 * Author: Sonic Zhang <sonic.zhang@analog.com>
4 *
5 * Created:
6 * Description: PATA Driver for blackfin 54x
7 *
8 * Modified:
9 * Copyright 2007 Analog Devices Inc.
10 *
11 * Bugs: Enter bugs at http://blackfin.uclinux.org/
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, see the file COPYING, or write
25 * to the Free Software Foundation, Inc.,
26 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/init.h>
33#include <linux/blkdev.h>
34#include <linux/delay.h>
35#include <linux/device.h>
36#include <scsi/scsi_host.h>
37#include <linux/libata.h>
38#include <linux/platform_device.h>
39#include <asm/dma.h>
40#include <asm/gpio.h>
41#include <asm/portmux.h>
42
43#define DRV_NAME "pata-bf54x"
44#define DRV_VERSION "0.9"
45
46#define ATA_REG_CTRL 0x0E
47#define ATA_REG_ALTSTATUS ATA_REG_CTRL
48
49/* These are the offset of the controller's registers */
50#define ATAPI_OFFSET_CONTROL 0x00
51#define ATAPI_OFFSET_STATUS 0x04
52#define ATAPI_OFFSET_DEV_ADDR 0x08
53#define ATAPI_OFFSET_DEV_TXBUF 0x0c
54#define ATAPI_OFFSET_DEV_RXBUF 0x10
55#define ATAPI_OFFSET_INT_MASK 0x14
56#define ATAPI_OFFSET_INT_STATUS 0x18
57#define ATAPI_OFFSET_XFER_LEN 0x1c
58#define ATAPI_OFFSET_LINE_STATUS 0x20
59#define ATAPI_OFFSET_SM_STATE 0x24
60#define ATAPI_OFFSET_TERMINATE 0x28
61#define ATAPI_OFFSET_PIO_TFRCNT 0x2c
62#define ATAPI_OFFSET_DMA_TFRCNT 0x30
63#define ATAPI_OFFSET_UMAIN_TFRCNT 0x34
64#define ATAPI_OFFSET_UDMAOUT_TFRCNT 0x38
65#define ATAPI_OFFSET_REG_TIM_0 0x40
66#define ATAPI_OFFSET_PIO_TIM_0 0x44
67#define ATAPI_OFFSET_PIO_TIM_1 0x48
68#define ATAPI_OFFSET_MULTI_TIM_0 0x50
69#define ATAPI_OFFSET_MULTI_TIM_1 0x54
70#define ATAPI_OFFSET_MULTI_TIM_2 0x58
71#define ATAPI_OFFSET_ULTRA_TIM_0 0x60
72#define ATAPI_OFFSET_ULTRA_TIM_1 0x64
73#define ATAPI_OFFSET_ULTRA_TIM_2 0x68
74#define ATAPI_OFFSET_ULTRA_TIM_3 0x6c
75
76
77#define ATAPI_GET_CONTROL(base)\
78 bfin_read16(base + ATAPI_OFFSET_CONTROL)
79#define ATAPI_SET_CONTROL(base, val)\
80 bfin_write16(base + ATAPI_OFFSET_CONTROL, val)
81#define ATAPI_GET_STATUS(base)\
82 bfin_read16(base + ATAPI_OFFSET_STATUS)
83#define ATAPI_GET_DEV_ADDR(base)\
84 bfin_read16(base + ATAPI_OFFSET_DEV_ADDR)
85#define ATAPI_SET_DEV_ADDR(base, val)\
86 bfin_write16(base + ATAPI_OFFSET_DEV_ADDR, val)
87#define ATAPI_GET_DEV_TXBUF(base)\
88 bfin_read16(base + ATAPI_OFFSET_DEV_TXBUF)
89#define ATAPI_SET_DEV_TXBUF(base, val)\
90 bfin_write16(base + ATAPI_OFFSET_DEV_TXBUF, val)
91#define ATAPI_GET_DEV_RXBUF(base)\
92 bfin_read16(base + ATAPI_OFFSET_DEV_RXBUF)
93#define ATAPI_SET_DEV_RXBUF(base, val)\
94 bfin_write16(base + ATAPI_OFFSET_DEV_RXBUF, val)
95#define ATAPI_GET_INT_MASK(base)\
96 bfin_read16(base + ATAPI_OFFSET_INT_MASK)
97#define ATAPI_SET_INT_MASK(base, val)\
98 bfin_write16(base + ATAPI_OFFSET_INT_MASK, val)
99#define ATAPI_GET_INT_STATUS(base)\
100 bfin_read16(base + ATAPI_OFFSET_INT_STATUS)
101#define ATAPI_SET_INT_STATUS(base, val)\
102 bfin_write16(base + ATAPI_OFFSET_INT_STATUS, val)
103#define ATAPI_GET_XFER_LEN(base)\
104 bfin_read16(base + ATAPI_OFFSET_XFER_LEN)
105#define ATAPI_SET_XFER_LEN(base, val)\
106 bfin_write16(base + ATAPI_OFFSET_XFER_LEN, val)
107#define ATAPI_GET_LINE_STATUS(base)\
108 bfin_read16(base + ATAPI_OFFSET_LINE_STATUS)
109#define ATAPI_GET_SM_STATE(base)\
110 bfin_read16(base + ATAPI_OFFSET_SM_STATE)
111#define ATAPI_GET_TERMINATE(base)\
112 bfin_read16(base + ATAPI_OFFSET_TERMINATE)
113#define ATAPI_SET_TERMINATE(base, val)\
114 bfin_write16(base + ATAPI_OFFSET_TERMINATE, val)
115#define ATAPI_GET_PIO_TFRCNT(base)\
116 bfin_read16(base + ATAPI_OFFSET_PIO_TFRCNT)
117#define ATAPI_GET_DMA_TFRCNT(base)\
118 bfin_read16(base + ATAPI_OFFSET_DMA_TFRCNT)
119#define ATAPI_GET_UMAIN_TFRCNT(base)\
120 bfin_read16(base + ATAPI_OFFSET_UMAIN_TFRCNT)
121#define ATAPI_GET_UDMAOUT_TFRCNT(base)\
122 bfin_read16(base + ATAPI_OFFSET_UDMAOUT_TFRCNT)
123#define ATAPI_GET_REG_TIM_0(base)\
124 bfin_read16(base + ATAPI_OFFSET_REG_TIM_0)
125#define ATAPI_SET_REG_TIM_0(base, val)\
126 bfin_write16(base + ATAPI_OFFSET_REG_TIM_0, val)
127#define ATAPI_GET_PIO_TIM_0(base)\
128 bfin_read16(base + ATAPI_OFFSET_PIO_TIM_0)
129#define ATAPI_SET_PIO_TIM_0(base, val)\
130 bfin_write16(base + ATAPI_OFFSET_PIO_TIM_0, val)
131#define ATAPI_GET_PIO_TIM_1(base)\
132 bfin_read16(base + ATAPI_OFFSET_PIO_TIM_1)
133#define ATAPI_SET_PIO_TIM_1(base, val)\
134 bfin_write16(base + ATAPI_OFFSET_PIO_TIM_1, val)
135#define ATAPI_GET_MULTI_TIM_0(base)\
136 bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_0)
137#define ATAPI_SET_MULTI_TIM_0(base, val)\
138 bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_0, val)
139#define ATAPI_GET_MULTI_TIM_1(base)\
140 bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_1)
141#define ATAPI_SET_MULTI_TIM_1(base, val)\
142 bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_1, val)
143#define ATAPI_GET_MULTI_TIM_2(base)\
144 bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_2)
145#define ATAPI_SET_MULTI_TIM_2(base, val)\
146 bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_2, val)
147#define ATAPI_GET_ULTRA_TIM_0(base)\
148 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_0)
149#define ATAPI_SET_ULTRA_TIM_0(base, val)\
150 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_0, val)
151#define ATAPI_GET_ULTRA_TIM_1(base)\
152 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_1)
153#define ATAPI_SET_ULTRA_TIM_1(base, val)\
154 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_1, val)
155#define ATAPI_GET_ULTRA_TIM_2(base)\
156 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_2)
157#define ATAPI_SET_ULTRA_TIM_2(base, val)\
158 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_2, val)
159#define ATAPI_GET_ULTRA_TIM_3(base)\
160 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_3)
161#define ATAPI_SET_ULTRA_TIM_3(base, val)\
162 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_3, val)
163
164/**
165 * PIO Mode - Frequency compatibility
166 */
167/* mode: 0 1 2 3 4 */
168static const u32 pio_fsclk[] =
169{ 33333333, 33333333, 33333333, 33333333, 33333333 };
170
171/**
172 * MDMA Mode - Frequency compatibility
173 */
174/* mode: 0 1 2 */
175static const u32 mdma_fsclk[] = { 33333333, 33333333, 33333333 };
176
177/**
178 * UDMA Mode - Frequency compatibility
179 *
180 * UDMA5 - 100 MB/s - SCLK = 133 MHz
181 * UDMA4 - 66 MB/s - SCLK >= 80 MHz
182 * UDMA3 - 44.4 MB/s - SCLK >= 50 MHz
183 * UDMA2 - 33 MB/s - SCLK >= 40 MHz
184 */
185/* mode: 0 1 2 3 4 5 */
186static const u32 udma_fsclk[] =
187{ 33333333, 33333333, 40000000, 50000000, 80000000, 133333333 };
188
189/**
190 * Register transfer timing table
191 */
192/* mode: 0 1 2 3 4 */
193/* Cycle Time */
194static const u32 reg_t0min[] = { 600, 383, 330, 180, 120 };
195/* DIOR/DIOW to end cycle */
196static const u32 reg_t2min[] = { 290, 290, 290, 70, 25 };
197/* DIOR/DIOW asserted pulse width */
198static const u32 reg_teocmin[] = { 290, 290, 290, 80, 70 };
199
200/**
201 * PIO timing table
202 */
203/* mode: 0 1 2 3 4 */
204/* Cycle Time */
205static const u32 pio_t0min[] = { 600, 383, 240, 180, 120 };
206/* Address valid to DIOR/DIORW */
207static const u32 pio_t1min[] = { 70, 50, 30, 30, 25 };
208/* DIOR/DIOW to end cycle */
209static const u32 pio_t2min[] = { 165, 125, 100, 80, 70 };
210/* DIOR/DIOW asserted pulse width */
211static const u32 pio_teocmin[] = { 165, 125, 100, 70, 25 };
212/* DIOW data hold */
213static const u32 pio_t4min[] = { 30, 20, 15, 10, 10 };
214
215/* ******************************************************************
216 * Multiword DMA timing table
217 * ******************************************************************
218 */
219/* mode: 0 1 2 */
220/* Cycle Time */
221static const u32 mdma_t0min[] = { 480, 150, 120 };
222/* DIOR/DIOW asserted pulse width */
223static const u32 mdma_tdmin[] = { 215, 80, 70 };
224/* DMACK to read data released */
225static const u32 mdma_thmin[] = { 20, 15, 10 };
226/* DIOR/DIOW to DMACK hold */
227static const u32 mdma_tjmin[] = { 20, 5, 5 };
228/* DIOR negated pulse width */
229static const u32 mdma_tkrmin[] = { 50, 50, 25 };
230/* DIOR negated pulse width */
231static const u32 mdma_tkwmin[] = { 215, 50, 25 };
232/* CS[1:0] valid to DIOR/DIOW */
233static const u32 mdma_tmmin[] = { 50, 30, 25 };
234/* DMACK to read data released */
235static const u32 mdma_tzmax[] = { 20, 25, 25 };
236
237/**
238 * Ultra DMA timing table
239 */
240/* mode: 0 1 2 3 4 5 */
241static const u32 udma_tcycmin[] = { 112, 73, 54, 39, 25, 17 };
242static const u32 udma_tdvsmin[] = { 70, 48, 31, 20, 7, 5 };
243static const u32 udma_tenvmax[] = { 70, 70, 70, 55, 55, 50 };
244static const u32 udma_trpmin[] = { 160, 125, 100, 100, 100, 85 };
245static const u32 udma_tmin[] = { 5, 5, 5, 5, 3, 3 };
246
247
248static const u32 udma_tmlimin = 20;
249static const u32 udma_tzahmin = 20;
250static const u32 udma_tenvmin = 20;
251static const u32 udma_tackmin = 20;
252static const u32 udma_tssmin = 50;
253
254/**
255 *
256 * Function: num_clocks_min
257 *
258 * Description:
259 * calculate number of SCLK cycles to meet minimum timing
260 */
261static unsigned short num_clocks_min(unsigned long tmin,
262 unsigned long fsclk)
263{
264 unsigned long tmp ;
265 unsigned short result;
266
267 tmp = tmin * (fsclk/1000/1000) / 1000;
268 result = (unsigned short)tmp;
269 if ((tmp*1000*1000) < (tmin*(fsclk/1000))) {
270 result++;
271 }
272
273 return result;
274}
275
276/**
277 * bfin_set_piomode - Initialize host controller PATA PIO timings
278 * @ap: Port whose timings we are configuring
279 * @adev: um
280 *
281 * Set PIO mode for device.
282 *
283 * LOCKING:
284 * None (inherited from caller).
285 */
286
287static void bfin_set_piomode(struct ata_port *ap, struct ata_device *adev)
288{
289 int mode = adev->pio_mode - XFER_PIO_0;
290 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
291 unsigned int fsclk = get_sclk();
292 unsigned short teoc_reg, t2_reg, teoc_pio;
293 unsigned short t4_reg, t2_pio, t1_reg;
294 unsigned short n0, n6, t6min = 5;
295
296 /* the most restrictive timing value is t6 and tc, the DIOW - data hold
297 * If one SCLK pulse is longer than this minimum value then register
298 * transfers cannot be supported at this frequency.
299 */
300 n6 = num_clocks_min(t6min, fsclk);
301 if (mode >= 0 && mode <= 4 && n6 >= 1) {
302 pr_debug("set piomode: mode=%d, fsclk=%ud\n", mode, fsclk);
303 /* calculate the timing values for register transfers. */
304 while (mode > 0 && pio_fsclk[mode] > fsclk)
305 mode--;
306
307 /* DIOR/DIOW to end cycle time */
308 t2_reg = num_clocks_min(reg_t2min[mode], fsclk);
309 /* DIOR/DIOW asserted pulse width */
310 teoc_reg = num_clocks_min(reg_teocmin[mode], fsclk);
311 /* Cycle Time */
312 n0 = num_clocks_min(reg_t0min[mode], fsclk);
313
314 /* increase t2 until we meed the minimum cycle length */
315 if (t2_reg + teoc_reg < n0)
316 t2_reg = n0 - teoc_reg;
317
318 /* calculate the timing values for pio transfers. */
319
320 /* DIOR/DIOW to end cycle time */
321 t2_pio = num_clocks_min(pio_t2min[mode], fsclk);
322 /* DIOR/DIOW asserted pulse width */
323 teoc_pio = num_clocks_min(pio_teocmin[mode], fsclk);
324 /* Cycle Time */
325 n0 = num_clocks_min(pio_t0min[mode], fsclk);
326
327 /* increase t2 until we meed the minimum cycle length */
328 if (t2_pio + teoc_pio < n0)
329 t2_pio = n0 - teoc_pio;
330
331 /* Address valid to DIOR/DIORW */
332 t1_reg = num_clocks_min(pio_t1min[mode], fsclk);
333
334 /* DIOW data hold */
335 t4_reg = num_clocks_min(pio_t4min[mode], fsclk);
336
337 ATAPI_SET_REG_TIM_0(base, (teoc_reg<<8 | t2_reg));
338 ATAPI_SET_PIO_TIM_0(base, (t4_reg<<12 | t2_pio<<4 | t1_reg));
339 ATAPI_SET_PIO_TIM_1(base, teoc_pio);
340 if (mode > 2) {
341 ATAPI_SET_CONTROL(base,
342 ATAPI_GET_CONTROL(base) | IORDY_EN);
343 } else {
344 ATAPI_SET_CONTROL(base,
345 ATAPI_GET_CONTROL(base) & ~IORDY_EN);
346 }
347
348 /* Disable host ATAPI PIO interrupts */
349 ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base)
350 & ~(PIO_DONE_MASK | HOST_TERM_XFER_MASK));
351 SSYNC();
352 }
353}
354
355/**
356 * bfin_set_dmamode - Initialize host controller PATA DMA timings
357 * @ap: Port whose timings we are configuring
358 * @adev: um
359 * @udma: udma mode, 0 - 6
360 *
361 * Set UDMA mode for device.
362 *
363 * LOCKING:
364 * None (inherited from caller).
365 */
366
367static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev)
368{
369 int mode;
370 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
371 unsigned long fsclk = get_sclk();
372 unsigned short tenv, tack, tcyc_tdvs, tdvs, tmli, tss, trp, tzah;
373 unsigned short tm, td, tkr, tkw, teoc, th;
374 unsigned short n0, nf, tfmin = 5;
375 unsigned short nmin, tcyc;
376
377 mode = adev->dma_mode - XFER_UDMA_0;
378 if (mode >= 0 && mode <= 5) {
379 pr_debug("set udmamode: mode=%d\n", mode);
380 /* the most restrictive timing value is t6 and tc,
381 * the DIOW - data hold. If one SCLK pulse is longer
382 * than this minimum value then register
383 * transfers cannot be supported at this frequency.
384 */
385 while (mode > 0 && udma_fsclk[mode] > fsclk)
386 mode--;
387
388 nmin = num_clocks_min(udma_tmin[mode], fsclk);
389 if (nmin >= 1) {
390 /* calculate the timing values for Ultra DMA. */
391 tdvs = num_clocks_min(udma_tdvsmin[mode], fsclk);
392 tcyc = num_clocks_min(udma_tcycmin[mode], fsclk);
393 tcyc_tdvs = 2;
394
395 /* increase tcyc - tdvs (tcyc_tdvs) until we meed
396 * the minimum cycle length
397 */
398 if (tdvs + tcyc_tdvs < tcyc)
399 tcyc_tdvs = tcyc - tdvs;
400
401 /* Mow assign the values required for the timing
402 * registers
403 */
404 if (tcyc_tdvs < 2)
405 tcyc_tdvs = 2;
406
407 if (tdvs < 2)
408 tdvs = 2;
409
410 tack = num_clocks_min(udma_tackmin, fsclk);
411 tss = num_clocks_min(udma_tssmin, fsclk);
412 tmli = num_clocks_min(udma_tmlimin, fsclk);
413 tzah = num_clocks_min(udma_tzahmin, fsclk);
414 trp = num_clocks_min(udma_trpmin[mode], fsclk);
415 tenv = num_clocks_min(udma_tenvmin, fsclk);
416 if (tenv <= udma_tenvmax[mode]) {
417 ATAPI_SET_ULTRA_TIM_0(base, (tenv<<8 | tack));
418 ATAPI_SET_ULTRA_TIM_1(base,
419 (tcyc_tdvs<<8 | tdvs));
420 ATAPI_SET_ULTRA_TIM_2(base, (tmli<<8 | tss));
421 ATAPI_SET_ULTRA_TIM_3(base, (trp<<8 | tzah));
422
423 /* Enable host ATAPI Untra DMA interrupts */
424 ATAPI_SET_INT_MASK(base,
425 ATAPI_GET_INT_MASK(base)
426 | UDMAIN_DONE_MASK
427 | UDMAOUT_DONE_MASK
428 | UDMAIN_TERM_MASK
429 | UDMAOUT_TERM_MASK);
430 }
431 }
432 }
433
434 mode = adev->dma_mode - XFER_MW_DMA_0;
435 if (mode >= 0 && mode <= 2) {
436 pr_debug("set mdmamode: mode=%d\n", mode);
437 /* the most restrictive timing value is tf, the DMACK to
438 * read data released. If one SCLK pulse is longer than
439 * this maximum value then the MDMA mode
440 * cannot be supported at this frequency.
441 */
442 while (mode > 0 && mdma_fsclk[mode] > fsclk)
443 mode--;
444
445 nf = num_clocks_min(tfmin, fsclk);
446 if (nf >= 1) {
447 /* calculate the timing values for Multi-word DMA. */
448
449 /* DIOR/DIOW asserted pulse width */
450 td = num_clocks_min(mdma_tdmin[mode], fsclk);
451
452 /* DIOR negated pulse width */
453 tkw = num_clocks_min(mdma_tkwmin[mode], fsclk);
454
455 /* Cycle Time */
456 n0 = num_clocks_min(mdma_t0min[mode], fsclk);
457
458 /* increase tk until we meed the minimum cycle length */
459 if (tkw + td < n0)
460 tkw = n0 - td;
461
462 /* DIOR negated pulse width - read */
463 tkr = num_clocks_min(mdma_tkrmin[mode], fsclk);
464 /* CS{1:0] valid to DIOR/DIOW */
465 tm = num_clocks_min(mdma_tmmin[mode], fsclk);
466 /* DIOR/DIOW to DMACK hold */
467 teoc = num_clocks_min(mdma_tjmin[mode], fsclk);
468 /* DIOW Data hold */
469 th = num_clocks_min(mdma_thmin[mode], fsclk);
470
471 ATAPI_SET_MULTI_TIM_0(base, (tm<<8 | td));
472 ATAPI_SET_MULTI_TIM_1(base, (tkr<<8 | tkw));
473 ATAPI_SET_MULTI_TIM_2(base, (teoc<<8 | th));
474
475 /* Enable host ATAPI Multi DMA interrupts */
476 ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base)
477 | MULTI_DONE_MASK | MULTI_TERM_MASK);
478 SSYNC();
479 }
480 }
481 return;
482}
483
484/**
485 *
486 * Function: wait_complete
487 *
488 * Description: Waits the interrupt from device
489 *
490 */
491static inline void wait_complete(void __iomem *base, unsigned short mask)
492{
493 unsigned short status;
494 unsigned int i = 0;
495
496#define PATA_BF54X_WAIT_TIMEOUT 10000
497
498 for (i = 0; i < PATA_BF54X_WAIT_TIMEOUT; i++) {
499 status = ATAPI_GET_INT_STATUS(base) & mask;
500 if (status)
501 break;
502 }
503
504 ATAPI_SET_INT_STATUS(base, mask);
505}
506
507/**
508 *
509 * Function: write_atapi_register
510 *
511 * Description: Writes to ATA Device Resgister
512 *
513 */
514
515static void write_atapi_register(void __iomem *base,
516 unsigned long ata_reg, unsigned short value)
517{
518 /* Program the ATA_DEV_TXBUF register with write data (to be
519 * written into the device).
520 */
521 ATAPI_SET_DEV_TXBUF(base, value);
522
523 /* Program the ATA_DEV_ADDR register with address of the
524 * device register (0x01 to 0x0F).
525 */
526 ATAPI_SET_DEV_ADDR(base, ata_reg);
527
528 /* Program the ATA_CTRL register with dir set to write (1)
529 */
530 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR));
531
532 /* ensure PIO DMA is not set */
533 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
534
535 /* and start the transfer */
536 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
537
538 /* Wait for the interrupt to indicate the end of the transfer.
539 * (We need to wait on and clear rhe ATA_DEV_INT interrupt status)
540 */
541 wait_complete(base, PIO_DONE_INT);
542}
543
544/**
545 *
546 * Function: read_atapi_register
547 *
548 *Description: Reads from ATA Device Resgister
549 *
550 */
551
552static unsigned short read_atapi_register(void __iomem *base,
553 unsigned long ata_reg)
554{
555 /* Program the ATA_DEV_ADDR register with address of the
556 * device register (0x01 to 0x0F).
557 */
558 ATAPI_SET_DEV_ADDR(base, ata_reg);
559
560 /* Program the ATA_CTRL register with dir set to read (0) and
561 */
562 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR));
563
564 /* ensure PIO DMA is not set */
565 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
566
567 /* and start the transfer */
568 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
569
570 /* Wait for the interrupt to indicate the end of the transfer.
571 * (PIO_DONE interrupt is set and it doesn't seem to matter
572 * that we don't clear it)
573 */
574 wait_complete(base, PIO_DONE_INT);
575
576 /* Read the ATA_DEV_RXBUF register with write data (to be
577 * written into the device).
578 */
579 return ATAPI_GET_DEV_RXBUF(base);
580}
581
582/**
583 *
584 * Function: write_atapi_register_data
585 *
586 * Description: Writes to ATA Device Resgister
587 *
588 */
589
590static void write_atapi_data(void __iomem *base,
591 int len, unsigned short *buf)
592{
593 int i;
594
595 /* Set transfer length to 1 */
596 ATAPI_SET_XFER_LEN(base, 1);
597
598 /* Program the ATA_DEV_ADDR register with address of the
599 * ATA_REG_DATA
600 */
601 ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA);
602
603 /* Program the ATA_CTRL register with dir set to write (1)
604 */
605 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR));
606
607 /* ensure PIO DMA is not set */
608 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
609
610 for (i = 0; i < len; i++) {
611 /* Program the ATA_DEV_TXBUF register with write data (to be
612 * written into the device).
613 */
614 ATAPI_SET_DEV_TXBUF(base, buf[i]);
615
616 /* and start the transfer */
617 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
618
619 /* Wait for the interrupt to indicate the end of the transfer.
620 * (We need to wait on and clear rhe ATA_DEV_INT
621 * interrupt status)
622 */
623 wait_complete(base, PIO_DONE_INT);
624 }
625}
626
627/**
628 *
629 * Function: read_atapi_register_data
630 *
631 * Description: Reads from ATA Device Resgister
632 *
633 */
634
635static void read_atapi_data(void __iomem *base,
636 int len, unsigned short *buf)
637{
638 int i;
639
640 /* Set transfer length to 1 */
641 ATAPI_SET_XFER_LEN(base, 1);
642
643 /* Program the ATA_DEV_ADDR register with address of the
644 * ATA_REG_DATA
645 */
646 ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA);
647
648 /* Program the ATA_CTRL register with dir set to read (0) and
649 */
650 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR));
651
652 /* ensure PIO DMA is not set */
653 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
654
655 for (i = 0; i < len; i++) {
656 /* and start the transfer */
657 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
658
659 /* Wait for the interrupt to indicate the end of the transfer.
660 * (PIO_DONE interrupt is set and it doesn't seem to matter
661 * that we don't clear it)
662 */
663 wait_complete(base, PIO_DONE_INT);
664
665 /* Read the ATA_DEV_RXBUF register with write data (to be
666 * written into the device).
667 */
668 buf[i] = ATAPI_GET_DEV_RXBUF(base);
669 }
670}
671
672/**
673 * bfin_tf_load - send taskfile registers to host controller
674 * @ap: Port to which output is sent
675 * @tf: ATA taskfile register set
676 *
677 * Note: Original code is ata_tf_load().
678 */
679
680static void bfin_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
681{
682 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
683 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
684
685 if (tf->ctl != ap->last_ctl) {
686 write_atapi_register(base, ATA_REG_CTRL, tf->ctl);
687 ap->last_ctl = tf->ctl;
688 ata_wait_idle(ap);
689 }
690
691 if (is_addr) {
692 if (tf->flags & ATA_TFLAG_LBA48) {
693 write_atapi_register(base, ATA_REG_FEATURE,
694 tf->hob_feature);
695 write_atapi_register(base, ATA_REG_NSECT,
696 tf->hob_nsect);
697 write_atapi_register(base, ATA_REG_LBAL, tf->hob_lbal);
698 write_atapi_register(base, ATA_REG_LBAM, tf->hob_lbam);
699 write_atapi_register(base, ATA_REG_LBAH, tf->hob_lbah);
700 pr_debug("hob: feat 0x%X nsect 0x%X, lba 0x%X "
701 "0x%X 0x%X\n",
702 tf->hob_feature,
703 tf->hob_nsect,
704 tf->hob_lbal,
705 tf->hob_lbam,
706 tf->hob_lbah);
707 }
708
709 write_atapi_register(base, ATA_REG_FEATURE, tf->feature);
710 write_atapi_register(base, ATA_REG_NSECT, tf->nsect);
711 write_atapi_register(base, ATA_REG_LBAL, tf->lbal);
712 write_atapi_register(base, ATA_REG_LBAM, tf->lbam);
713 write_atapi_register(base, ATA_REG_LBAH, tf->lbah);
714 pr_debug("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
715 tf->feature,
716 tf->nsect,
717 tf->lbal,
718 tf->lbam,
719 tf->lbah);
720 }
721
722 if (tf->flags & ATA_TFLAG_DEVICE) {
723 write_atapi_register(base, ATA_REG_DEVICE, tf->device);
724 pr_debug("device 0x%X\n", tf->device);
725 }
726
727 ata_wait_idle(ap);
728}
729
730/**
731 * bfin_check_status - Read device status reg & clear interrupt
732 * @ap: port where the device is
733 *
734 * Note: Original code is ata_check_status().
735 */
736
737static u8 bfin_check_status(struct ata_port *ap)
738{
739 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
740 return read_atapi_register(base, ATA_REG_STATUS);
741}
742
743/**
744 * bfin_tf_read - input device's ATA taskfile shadow registers
745 * @ap: Port from which input is read
746 * @tf: ATA taskfile register set for storing input
747 *
748 * Note: Original code is ata_tf_read().
749 */
750
751static void bfin_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
752{
753 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
754
755 tf->command = bfin_check_status(ap);
756 tf->feature = read_atapi_register(base, ATA_REG_ERR);
757 tf->nsect = read_atapi_register(base, ATA_REG_NSECT);
758 tf->lbal = read_atapi_register(base, ATA_REG_LBAL);
759 tf->lbam = read_atapi_register(base, ATA_REG_LBAM);
760 tf->lbah = read_atapi_register(base, ATA_REG_LBAH);
761 tf->device = read_atapi_register(base, ATA_REG_DEVICE);
762
763 if (tf->flags & ATA_TFLAG_LBA48) {
764 write_atapi_register(base, ATA_REG_CTRL, tf->ctl | ATA_HOB);
765 tf->hob_feature = read_atapi_register(base, ATA_REG_ERR);
766 tf->hob_nsect = read_atapi_register(base, ATA_REG_NSECT);
767 tf->hob_lbal = read_atapi_register(base, ATA_REG_LBAL);
768 tf->hob_lbam = read_atapi_register(base, ATA_REG_LBAM);
769 tf->hob_lbah = read_atapi_register(base, ATA_REG_LBAH);
770 }
771}
772
773/**
774 * bfin_exec_command - issue ATA command to host controller
775 * @ap: port to which command is being issued
776 * @tf: ATA taskfile register set
777 *
778 * Note: Original code is ata_exec_command().
779 */
780
781static void bfin_exec_command(struct ata_port *ap,
782 const struct ata_taskfile *tf)
783{
784 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
785 pr_debug("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
786
787 write_atapi_register(base, ATA_REG_CMD, tf->command);
788 ata_pause(ap);
789}
790
791/**
792 * bfin_check_altstatus - Read device alternate status reg
793 * @ap: port where the device is
794 */
795
796static u8 bfin_check_altstatus(struct ata_port *ap)
797{
798 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
799 return read_atapi_register(base, ATA_REG_ALTSTATUS);
800}
801
802/**
803 * bfin_std_dev_select - Select device 0/1 on ATA bus
804 * @ap: ATA channel to manipulate
805 * @device: ATA device (numbered from zero) to select
806 *
807 * Note: Original code is ata_std_dev_select().
808 */
809
810static void bfin_std_dev_select(struct ata_port *ap, unsigned int device)
811{
812 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
813 u8 tmp;
814
815 if (device == 0)
816 tmp = ATA_DEVICE_OBS;
817 else
818 tmp = ATA_DEVICE_OBS | ATA_DEV1;
819
820 write_atapi_register(base, ATA_REG_DEVICE, tmp);
821 ata_pause(ap);
822}
823
824/**
825 * bfin_bmdma_setup - Set up IDE DMA transaction
826 * @qc: Info associated with this ATA transaction.
827 *
828 * Note: Original code is ata_bmdma_setup().
829 */
830
831static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
832{
833 unsigned short config = WDSIZE_16;
834 struct scatterlist *sg;
835
836 pr_debug("in atapi dma setup\n");
837 /* Program the ATA_CTRL register with dir */
838 if (qc->tf.flags & ATA_TFLAG_WRITE) {
839 /* fill the ATAPI DMA controller */
840 set_dma_config(CH_ATAPI_TX, config);
841 set_dma_x_modify(CH_ATAPI_TX, 2);
842 ata_for_each_sg(sg, qc) {
843 set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg));
844 set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1);
845 }
846 } else {
847 config |= WNR;
848 /* fill the ATAPI DMA controller */
849 set_dma_config(CH_ATAPI_RX, config);
850 set_dma_x_modify(CH_ATAPI_RX, 2);
851 ata_for_each_sg(sg, qc) {
852 set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg));
853 set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1);
854 }
855 }
856}
857
858/**
859 * bfin_bmdma_start - Start an IDE DMA transaction
860 * @qc: Info associated with this ATA transaction.
861 *
862 * Note: Original code is ata_bmdma_start().
863 */
864
865static void bfin_bmdma_start(struct ata_queued_cmd *qc)
866{
867 struct ata_port *ap = qc->ap;
868 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
869 struct scatterlist *sg;
870
871 pr_debug("in atapi dma start\n");
872 if (!(ap->udma_mask || ap->mwdma_mask))
873 return;
874
875 /* start ATAPI DMA controller*/
876 if (qc->tf.flags & ATA_TFLAG_WRITE) {
877 /*
878 * On blackfin arch, uncacheable memory is not
879 * allocated with flag GFP_DMA. DMA buffer from
880 * common kenel code should be flushed if WB
881 * data cache is enabled. Otherwise, this loop
882 * is an empty loop and optimized out.
883 */
884 ata_for_each_sg(sg, qc) {
885 flush_dcache_range(sg_dma_address(sg),
886 sg_dma_address(sg) + sg_dma_len(sg));
887 }
888 enable_dma(CH_ATAPI_TX);
889 pr_debug("enable udma write\n");
890
891 /* Send ATA DMA write command */
892 bfin_exec_command(ap, &qc->tf);
893
894 /* set ATA DMA write direction */
895 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
896 | XFER_DIR));
897 } else {
898 enable_dma(CH_ATAPI_RX);
899 pr_debug("enable udma read\n");
900
901 /* Send ATA DMA read command */
902 bfin_exec_command(ap, &qc->tf);
903
904 /* set ATA DMA read direction */
905 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
906 & ~XFER_DIR));
907 }
908
909 /* Reset all transfer count */
910 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST);
911
912 /* Set transfer length to buffer len */
913 ata_for_each_sg(sg, qc) {
914 ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1));
915 }
916
917 /* Enable ATA DMA operation*/
918 if (ap->udma_mask)
919 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
920 | ULTRA_START);
921 else
922 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
923 | MULTI_START);
924}
925
926/**
927 * bfin_bmdma_stop - Stop IDE DMA transfer
928 * @qc: Command we are ending DMA for
929 */
930
931static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
932{
933 struct ata_port *ap = qc->ap;
934 struct scatterlist *sg;
935
936 pr_debug("in atapi dma stop\n");
937 if (!(ap->udma_mask || ap->mwdma_mask))
938 return;
939
940 /* stop ATAPI DMA controller*/
941 if (qc->tf.flags & ATA_TFLAG_WRITE)
942 disable_dma(CH_ATAPI_TX);
943 else {
944 disable_dma(CH_ATAPI_RX);
945 if (ap->hsm_task_state & HSM_ST_LAST) {
946 /*
947 * On blackfin arch, uncacheable memory is not
948 * allocated with flag GFP_DMA. DMA buffer from
949 * common kenel code should be invalidated if
950 * data cache is enabled. Otherwise, this loop
951 * is an empty loop and optimized out.
952 */
953 ata_for_each_sg(sg, qc) {
954 invalidate_dcache_range(
955 sg_dma_address(sg),
956 sg_dma_address(sg)
957 + sg_dma_len(sg));
958 }
959 }
960 }
961}
962
963/**
964 * bfin_devchk - PATA device presence detection
965 * @ap: ATA channel to examine
966 * @device: Device to examine (starting at zero)
967 *
968 * Note: Original code is ata_devchk().
969 */
970
971static unsigned int bfin_devchk(struct ata_port *ap,
972 unsigned int device)
973{
974 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
975 u8 nsect, lbal;
976
977 bfin_std_dev_select(ap, device);
978
979 write_atapi_register(base, ATA_REG_NSECT, 0x55);
980 write_atapi_register(base, ATA_REG_LBAL, 0xaa);
981
982 write_atapi_register(base, ATA_REG_NSECT, 0xaa);
983 write_atapi_register(base, ATA_REG_LBAL, 0x55);
984
985 write_atapi_register(base, ATA_REG_NSECT, 0x55);
986 write_atapi_register(base, ATA_REG_LBAL, 0xaa);
987
988 nsect = read_atapi_register(base, ATA_REG_NSECT);
989 lbal = read_atapi_register(base, ATA_REG_LBAL);
990
991 if ((nsect == 0x55) && (lbal == 0xaa))
992 return 1; /* we found a device */
993
994 return 0; /* nothing found */
995}
996
997/**
998 * bfin_bus_post_reset - PATA device post reset
999 *
1000 * Note: Original code is ata_bus_post_reset().
1001 */
1002
1003static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1004{
1005 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1006 unsigned int dev0 = devmask & (1 << 0);
1007 unsigned int dev1 = devmask & (1 << 1);
1008 unsigned long timeout;
1009
1010 /* if device 0 was found in ata_devchk, wait for its
1011 * BSY bit to clear
1012 */
1013 if (dev0)
1014 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1015
1016 /* if device 1 was found in ata_devchk, wait for
1017 * register access, then wait for BSY to clear
1018 */
1019 timeout = jiffies + ATA_TMOUT_BOOT;
1020 while (dev1) {
1021 u8 nsect, lbal;
1022
1023 bfin_std_dev_select(ap, 1);
1024 nsect = read_atapi_register(base, ATA_REG_NSECT);
1025 lbal = read_atapi_register(base, ATA_REG_LBAL);
1026 if ((nsect == 1) && (lbal == 1))
1027 break;
1028 if (time_after(jiffies, timeout)) {
1029 dev1 = 0;
1030 break;
1031 }
1032 msleep(50); /* give drive a breather */
1033 }
1034 if (dev1)
1035 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1036
1037 /* is all this really necessary? */
1038 bfin_std_dev_select(ap, 0);
1039 if (dev1)
1040 bfin_std_dev_select(ap, 1);
1041 if (dev0)
1042 bfin_std_dev_select(ap, 0);
1043}
1044
1045/**
1046 * bfin_bus_softreset - PATA device software reset
1047 *
1048 * Note: Original code is ata_bus_softreset().
1049 */
1050
1051static unsigned int bfin_bus_softreset(struct ata_port *ap,
1052 unsigned int devmask)
1053{
1054 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1055
1056 /* software reset. causes dev0 to be selected */
1057 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1058 udelay(20);
1059 write_atapi_register(base, ATA_REG_CTRL, ap->ctl | ATA_SRST);
1060 udelay(20);
1061 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1062
1063 /* spec mandates ">= 2ms" before checking status.
1064 * We wait 150ms, because that was the magic delay used for
1065 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1066 * between when the ATA command register is written, and then
1067 * status is checked. Because waiting for "a while" before
1068 * checking status is fine, post SRST, we perform this magic
1069 * delay here as well.
1070 *
1071 * Old drivers/ide uses the 2mS rule and then waits for ready
1072 */
1073 msleep(150);
1074
1075 /* Before we perform post reset processing we want to see if
1076 * the bus shows 0xFF because the odd clown forgets the D7
1077 * pulldown resistor.
1078 */
1079 if (bfin_check_status(ap) == 0xFF)
1080 return 0;
1081
1082 bfin_bus_post_reset(ap, devmask);
1083
1084 return 0;
1085}
1086
1087/**
1088 * bfin_std_softreset - reset host port via ATA SRST
1089 * @ap: port to reset
1090 * @classes: resulting classes of attached devices
1091 *
1092 * Note: Original code is ata_std_softreset().
1093 */
1094
1095static int bfin_std_softreset(struct ata_port *ap, unsigned int *classes,
1096 unsigned long deadline)
1097{
1098 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1099 unsigned int devmask = 0, err_mask;
1100 u8 err;
1101
1102 if (ata_port_offline(ap)) {
1103 classes[0] = ATA_DEV_NONE;
1104 goto out;
1105 }
1106
1107 /* determine if device 0/1 are present */
1108 if (bfin_devchk(ap, 0))
1109 devmask |= (1 << 0);
1110 if (slave_possible && bfin_devchk(ap, 1))
1111 devmask |= (1 << 1);
1112
1113 /* select device 0 again */
1114 bfin_std_dev_select(ap, 0);
1115
1116 /* issue bus reset */
1117 err_mask = bfin_bus_softreset(ap, devmask);
1118 if (err_mask) {
1119 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
1120 err_mask);
1121 return -EIO;
1122 }
1123
1124 /* determine by signature whether we have ATA or ATAPI devices */
1125 classes[0] = ata_dev_try_classify(ap, 0, &err);
1126 if (slave_possible && err != 0x81)
1127 classes[1] = ata_dev_try_classify(ap, 1, &err);
1128
1129 out:
1130 return 0;
1131}
1132
1133/**
1134 * bfin_bmdma_status - Read IDE DMA status
1135 * @ap: Port associated with this ATA transaction.
1136 */
1137
1138static unsigned char bfin_bmdma_status(struct ata_port *ap)
1139{
1140 unsigned char host_stat = 0;
1141 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1142 unsigned short int_status = ATAPI_GET_INT_STATUS(base);
1143
1144 if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON|ULTRA_XFER_ON)) {
1145 host_stat = ATA_DMA_ACTIVE;
1146 }
1147 if (int_status & (MULTI_DONE_INT|UDMAIN_DONE_INT|UDMAOUT_DONE_INT)) {
1148 host_stat = ATA_DMA_INTR;
1149 }
1150 if (int_status & (MULTI_TERM_INT|UDMAIN_TERM_INT|UDMAOUT_TERM_INT)) {
1151 host_stat = ATA_DMA_ERR;
1152 }
1153
1154 return host_stat;
1155}
1156
1157/**
1158 * bfin_data_xfer - Transfer data by PIO
1159 * @adev: device for this I/O
1160 * @buf: data buffer
1161 * @buflen: buffer length
1162 * @write_data: read/write
1163 *
1164 * Note: Original code is ata_data_xfer().
1165 */
1166
1167static void bfin_data_xfer(struct ata_device *adev, unsigned char *buf,
1168 unsigned int buflen, int write_data)
1169{
1170 struct ata_port *ap = adev->ap;
1171 unsigned int words = buflen >> 1;
1172 unsigned short *buf16 = (u16 *) buf;
1173 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1174
1175 /* Transfer multiple of 2 bytes */
1176 if (write_data) {
1177 write_atapi_data(base, words, buf16);
1178 } else {
1179 read_atapi_data(base, words, buf16);
1180 }
1181
1182 /* Transfer trailing 1 byte, if any. */
1183 if (unlikely(buflen & 0x01)) {
1184 unsigned short align_buf[1] = { 0 };
1185 unsigned char *trailing_buf = buf + buflen - 1;
1186
1187 if (write_data) {
1188 memcpy(align_buf, trailing_buf, 1);
1189 write_atapi_data(base, 1, align_buf);
1190 } else {
1191 read_atapi_data(base, 1, align_buf);
1192 memcpy(trailing_buf, align_buf, 1);
1193 }
1194 }
1195}
1196
1197/**
1198 * bfin_irq_clear - Clear ATAPI interrupt.
1199 * @ap: Port associated with this ATA transaction.
1200 *
1201 * Note: Original code is ata_bmdma_irq_clear().
1202 */
1203
1204static void bfin_irq_clear(struct ata_port *ap)
1205{
1206 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1207
1208 pr_debug("in atapi irq clear\n");
1209 ATAPI_SET_INT_STATUS(base, 0x1FF);
1210}
1211
1212/**
1213 * bfin_irq_on - Enable interrupts on a port.
1214 * @ap: Port on which interrupts are enabled.
1215 *
1216 * Note: Original code is ata_irq_on().
1217 */
1218
1219static unsigned char bfin_irq_on(struct ata_port *ap)
1220{
1221 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1222 u8 tmp;
1223
1224 pr_debug("in atapi irq on\n");
1225 ap->ctl &= ~ATA_NIEN;
1226 ap->last_ctl = ap->ctl;
1227
1228 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1229 tmp = ata_wait_idle(ap);
1230
1231 bfin_irq_clear(ap);
1232
1233 return tmp;
1234}
1235
1236/**
1237 * bfin_irq_ack - Acknowledge a device interrupt.
1238 * @ap: Port on which interrupts are enabled.
1239 *
1240 * Note: Original code is ata_irq_ack().
1241 */
1242
1243static unsigned char bfin_irq_ack(struct ata_port *ap, unsigned int chk_drq)
1244{
1245 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1246 unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
1247 unsigned char status;
1248
1249 pr_debug("in atapi irq ack\n");
1250 status = ata_busy_wait(ap, bits, 1000);
1251 if (status & bits)
1252 if (ata_msg_err(ap))
1253 dev_err(ap->dev, "abnormal status 0x%X\n", status);
1254
1255 /* get controller status; clear intr, err bits */
1256 ATAPI_SET_INT_STATUS(base, ATAPI_GET_INT_STATUS(base)|ATAPI_DEV_INT
1257 | MULTI_DONE_INT | UDMAIN_DONE_INT | UDMAOUT_DONE_INT
1258 | MULTI_TERM_INT | UDMAIN_TERM_INT | UDMAOUT_TERM_INT);
1259
1260 return bfin_bmdma_status(ap);
1261}
1262
1263/**
1264 * bfin_bmdma_freeze - Freeze DMA controller port
1265 * @ap: port to freeze
1266 *
1267 * Note: Original code is ata_bmdma_freeze().
1268 */
1269
1270static void bfin_bmdma_freeze(struct ata_port *ap)
1271{
1272 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1273
1274 pr_debug("in atapi dma freeze\n");
1275 ap->ctl |= ATA_NIEN;
1276 ap->last_ctl = ap->ctl;
1277
1278 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1279
1280 /* Under certain circumstances, some controllers raise IRQ on
1281 * ATA_NIEN manipulation. Also, many controllers fail to mask
1282 * previously pending IRQ on ATA_NIEN assertion. Clear it.
1283 */
1284 ata_chk_status(ap);
1285
1286 bfin_irq_clear(ap);
1287}
1288
1289/**
1290 * bfin_bmdma_thaw - Thaw DMA controller port
1291 * @ap: port to thaw
1292 *
1293 * Note: Original code is ata_bmdma_thaw().
1294 */
1295
1296void bfin_bmdma_thaw(struct ata_port *ap)
1297{
1298 bfin_check_status(ap);
1299 bfin_irq_clear(ap);
1300 bfin_irq_on(ap);
1301}
1302
1303/**
1304 * bfin_std_postreset - standard postreset callback
1305 * @ap: the target ata_port
1306 * @classes: classes of attached devices
1307 *
1308 * Note: Original code is ata_std_postreset().
1309 */
1310
1311static void bfin_std_postreset(struct ata_port *ap, unsigned int *classes)
1312{
1313 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1314
1315 /* re-enable interrupts */
1316 bfin_irq_on(ap);
1317
1318 /* is double-select really necessary? */
1319 if (classes[0] != ATA_DEV_NONE)
1320 bfin_std_dev_select(ap, 1);
1321 if (classes[1] != ATA_DEV_NONE)
1322 bfin_std_dev_select(ap, 0);
1323
1324 /* bail out if no device is present */
1325 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
1326 return;
1327 }
1328
1329 /* set up device control */
1330 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1331}
1332
1333/**
1334 * bfin_error_handler - Stock error handler for DMA controller
1335 * @ap: port to handle error for
1336 */
1337
1338static void bfin_error_handler(struct ata_port *ap)
1339{
1340 ata_bmdma_drive_eh(ap, ata_std_prereset, bfin_std_softreset, NULL,
1341 bfin_std_postreset);
1342}
1343
1344static void bfin_port_stop(struct ata_port *ap)
1345{
1346 pr_debug("in atapi port stop\n");
1347 if (ap->udma_mask != 0 || ap->mwdma_mask != 0) {
1348 free_dma(CH_ATAPI_RX);
1349 free_dma(CH_ATAPI_TX);
1350 }
1351}
1352
1353static int bfin_port_start(struct ata_port *ap)
1354{
1355 pr_debug("in atapi port start\n");
1356 if (!(ap->udma_mask || ap->mwdma_mask))
1357 return 0;
1358
1359 if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) {
1360 if (request_dma(CH_ATAPI_TX,
1361 "BFIN ATAPI TX DMA") >= 0)
1362 return 0;
1363
1364 free_dma(CH_ATAPI_RX);
1365 }
1366
1367 ap->udma_mask = 0;
1368 ap->mwdma_mask = 0;
1369 dev_err(ap->dev, "Unable to request ATAPI DMA!"
1370 " Continue in PIO mode.\n");
1371
1372 return 0;
1373}
1374
1375static struct scsi_host_template bfin_sht = {
1376 .module = THIS_MODULE,
1377 .name = DRV_NAME,
1378 .ioctl = ata_scsi_ioctl,
1379 .queuecommand = ata_scsi_queuecmd,
1380 .can_queue = ATA_DEF_QUEUE,
1381 .this_id = ATA_SHT_THIS_ID,
1382 .sg_tablesize = SG_NONE,
1383 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
1384 .emulated = ATA_SHT_EMULATED,
1385 .use_clustering = ATA_SHT_USE_CLUSTERING,
1386 .proc_name = DRV_NAME,
1387 .dma_boundary = ATA_DMA_BOUNDARY,
1388 .slave_configure = ata_scsi_slave_config,
1389 .slave_destroy = ata_scsi_slave_destroy,
1390 .bios_param = ata_std_bios_param,
1391#ifdef CONFIG_PM
1392 .resume = ata_scsi_device_resume,
1393 .suspend = ata_scsi_device_suspend,
1394#endif
1395};
1396
1397static const struct ata_port_operations bfin_pata_ops = {
1398 .port_disable = ata_port_disable,
1399 .set_piomode = bfin_set_piomode,
1400 .set_dmamode = bfin_set_dmamode,
1401
1402 .tf_load = bfin_tf_load,
1403 .tf_read = bfin_tf_read,
1404 .exec_command = bfin_exec_command,
1405 .check_status = bfin_check_status,
1406 .check_altstatus = bfin_check_altstatus,
1407 .dev_select = bfin_std_dev_select,
1408
1409 .bmdma_setup = bfin_bmdma_setup,
1410 .bmdma_start = bfin_bmdma_start,
1411 .bmdma_stop = bfin_bmdma_stop,
1412 .bmdma_status = bfin_bmdma_status,
1413 .data_xfer = bfin_data_xfer,
1414
1415 .qc_prep = ata_noop_qc_prep,
1416 .qc_issue = ata_qc_issue_prot,
1417
1418 .freeze = bfin_bmdma_freeze,
1419 .thaw = bfin_bmdma_thaw,
1420 .error_handler = bfin_error_handler,
1421 .post_internal_cmd = bfin_bmdma_stop,
1422
1423 .irq_handler = ata_interrupt,
1424 .irq_clear = bfin_irq_clear,
1425 .irq_on = bfin_irq_on,
1426 .irq_ack = bfin_irq_ack,
1427
1428 .port_start = bfin_port_start,
1429 .port_stop = bfin_port_stop,
1430};
1431
1432static struct ata_port_info bfin_port_info[] = {
1433 {
1434 .sht = &bfin_sht,
1435 .flags = ATA_FLAG_SLAVE_POSS
1436 | ATA_FLAG_MMIO
1437 | ATA_FLAG_NO_LEGACY,
1438 .pio_mask = 0x1f, /* pio0-4 */
1439 .mwdma_mask = 0,
1440#ifdef CONFIG_PATA_BF54X_DMA
1441 .udma_mask = ATA_UDMA5,
1442#else
1443 .udma_mask = 0,
1444#endif
1445 .port_ops = &bfin_pata_ops,
1446 },
1447};
1448
1449/**
1450 * bfin_reset_controller - initialize BF54x ATAPI controller.
1451 */
1452
1453static int bfin_reset_controller(struct ata_host *host)
1454{
1455 void __iomem *base = (void __iomem *)host->ports[0]->ioaddr.ctl_addr;
1456 int count;
1457 unsigned short status;
1458
1459 /* Disable all ATAPI interrupts */
1460 ATAPI_SET_INT_MASK(base, 0);
1461 SSYNC();
1462
1463 /* Assert the RESET signal 25us*/
1464 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | DEV_RST);
1465 udelay(30);
1466
1467 /* Negate the RESET signal for 2ms*/
1468 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) & ~DEV_RST);
1469 msleep(2);
1470
1471 /* Wait on Busy flag to clear */
1472 count = 10000000;
1473 do {
1474 status = read_atapi_register(base, ATA_REG_STATUS);
1475 } while (count-- && (status & ATA_BUSY));
1476
1477 /* Enable only ATAPI Device interrupt */
1478 ATAPI_SET_INT_MASK(base, 1);
1479 SSYNC();
1480
1481 return (!count);
1482}
1483
1484/**
1485 * atapi_io_port - define atapi peripheral port pins.
1486 */
1487static unsigned short atapi_io_port[] = {
1488 P_ATAPI_RESET,
1489 P_ATAPI_DIOR,
1490 P_ATAPI_DIOW,
1491 P_ATAPI_CS0,
1492 P_ATAPI_CS1,
1493 P_ATAPI_DMACK,
1494 P_ATAPI_DMARQ,
1495 P_ATAPI_INTRQ,
1496 P_ATAPI_IORDY,
1497 0
1498};
1499
1500/**
1501 * bfin_atapi_probe - attach a bfin atapi interface
1502 * @pdev: platform device
1503 *
1504 * Register a bfin atapi interface.
1505 *
1506 *
1507 * Platform devices are expected to contain 2 resources per port:
1508 *
1509 * - I/O Base (IORESOURCE_IO)
1510 * - IRQ (IORESOURCE_IRQ)
1511 *
1512 */
1513static int __devinit bfin_atapi_probe(struct platform_device *pdev)
1514{
1515 int board_idx = 0;
1516 struct resource *res;
1517 struct ata_host *host;
1518 const struct ata_port_info *ppi[] =
1519 { &bfin_port_info[board_idx], NULL };
1520
1521 /*
1522 * Simple resource validation ..
1523 */
1524 if (unlikely(pdev->num_resources != 2)) {
1525 dev_err(&pdev->dev, "invalid number of resources\n");
1526 return -EINVAL;
1527 }
1528
1529 /*
1530 * Get the register base first
1531 */
1532 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1533 if (res == NULL)
1534 return -EINVAL;
1535
1536 /*
1537 * Now that that's out of the way, wire up the port..
1538 */
1539 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1);
1540 if (!host)
1541 return -ENOMEM;
1542
1543 host->ports[0]->ioaddr.ctl_addr = (void *)res->start;
1544
1545 if (peripheral_request_list(atapi_io_port, "atapi-io-port")) {
1546 dev_err(&pdev->dev, "Requesting Peripherals faild\n");
1547 return -EFAULT;
1548 }
1549
1550 if (bfin_reset_controller(host)) {
1551 peripheral_free_list(atapi_io_port);
1552 dev_err(&pdev->dev, "Fail to reset ATAPI device\n");
1553 return -EFAULT;
1554 }
1555
1556 if (ata_host_activate(host, platform_get_irq(pdev, 0),
1557 ata_interrupt, IRQF_SHARED, &bfin_sht) != 0) {
1558 peripheral_free_list(atapi_io_port);
1559 dev_err(&pdev->dev, "Fail to attach ATAPI device\n");
1560 return -ENODEV;
1561 }
1562
1563 return 0;
1564}
1565
1566/**
1567 * bfin_atapi_remove - unplug a bfin atapi interface
1568 * @pdev: platform device
1569 *
1570 * A bfin atapi device has been unplugged. Perform the needed
1571 * cleanup. Also called on module unload for any active devices.
1572 */
1573static int __devexit bfin_atapi_remove(struct platform_device *pdev)
1574{
1575 struct device *dev = &pdev->dev;
1576 struct ata_host *host = dev_get_drvdata(dev);
1577
1578 ata_host_detach(host);
1579
1580 peripheral_free_list(atapi_io_port);
1581
1582 return 0;
1583}
1584
1585#ifdef CONFIG_PM
1586int bfin_atapi_suspend(struct platform_device *pdev, pm_message_t state)
1587{
1588 return 0;
1589}
1590
1591int bfin_atapi_resume(struct platform_device *pdev)
1592{
1593 return 0;
1594}
1595#endif
1596
1597static struct platform_driver bfin_atapi_driver = {
1598 .probe = bfin_atapi_probe,
1599 .remove = __devexit_p(bfin_atapi_remove),
1600 .driver = {
1601 .name = DRV_NAME,
1602 .owner = THIS_MODULE,
1603#ifdef CONFIG_PM
1604 .suspend = bfin_atapi_suspend,
1605 .resume = bfin_atapi_resume,
1606#endif
1607 },
1608};
1609
1610static int __init bfin_atapi_init(void)
1611{
1612 pr_info("register bfin atapi driver\n");
1613 return platform_driver_register(&bfin_atapi_driver);
1614}
1615
1616static void __exit bfin_atapi_exit(void)
1617{
1618 platform_driver_unregister(&bfin_atapi_driver);
1619}
1620
1621module_init(bfin_atapi_init);
1622module_exit(bfin_atapi_exit);
1623
1624MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
1625MODULE_DESCRIPTION("PATA driver for blackfin 54x ATAPI controller");
1626MODULE_LICENSE("GPL");
1627MODULE_VERSION(DRV_VERSION);