aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-08-27 20:01:57 -0400
committerDavid S. Miller <davem@davemloft.net>2008-08-29 05:13:10 -0400
commit334ae614772b1147435dce9be3911f9040dff0d9 (patch)
treea443e687646ab25744b20e43963f728870122e97
parent7f06a3b2c162573c924f425053227a52b4bd7cb1 (diff)
sparc: Kill SBUS DVMA layer.
This thing was completely pointless. Just find the OF device in the parent of drivers that want to program this device, and map the DMA regs inside such drivers too. This also moves the dummy claim_dma_lock() and release_dma_lock() implementation to floppy_32.h, which makes it handle this issue just like floppy_64.h does. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc/include/asm/dma.h141
-rw-r--r--arch/sparc/include/asm/dma_32.h288
-rw-r--r--arch/sparc/include/asm/dma_64.h205
-rw-r--r--arch/sparc/include/asm/floppy_32.h11
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c1
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c1
-rw-r--r--drivers/net/sunlance.c51
-rw-r--r--drivers/sbus/Makefile2
-rw-r--r--drivers/sbus/dvma.c136
-rw-r--r--drivers/sbus/sbus.c2
-rw-r--r--drivers/scsi/esp_scsi.h3
-rw-r--r--drivers/scsi/sun_esp.c122
12 files changed, 246 insertions, 717 deletions
diff --git a/arch/sparc/include/asm/dma.h b/arch/sparc/include/asm/dma.h
index aa1d90ac04c5..493563446790 100644
--- a/arch/sparc/include/asm/dma.h
+++ b/arch/sparc/include/asm/dma.h
@@ -1,8 +1,139 @@
1#ifndef ___ASM_SPARC_DMA_H 1#ifndef _ASM_SPARC_DMA_H
2#define ___ASM_SPARC_DMA_H 2#define _ASM_SPARC_DMA_H
3#if defined(__sparc__) && defined(__arch64__) 3
4#include <asm/dma_64.h> 4/* These are irrelevant for Sparc DMA, but we leave it in so that
5 * things can compile.
6 */
7#define MAX_DMA_CHANNELS 8
8#define DMA_MODE_READ 1
9#define DMA_MODE_WRITE 2
10#define MAX_DMA_ADDRESS (~0UL)
11
12/* Useful constants */
13#define SIZE_16MB (16*1024*1024)
14#define SIZE_64K (64*1024)
15
16/* SBUS DMA controller reg offsets */
17#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
18#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
19#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
20#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
21
22/* Fields in the cond_reg register */
23/* First, the version identification bits */
24#define DMA_DEVICE_ID 0xf0000000 /* Device identification bits */
25#define DMA_VERS0 0x00000000 /* Sunray DMA version */
26#define DMA_ESCV1 0x40000000 /* DMA ESC Version 1 */
27#define DMA_VERS1 0x80000000 /* DMA rev 1 */
28#define DMA_VERS2 0xa0000000 /* DMA rev 2 */
29#define DMA_VERHME 0xb0000000 /* DMA hme gate array */
30#define DMA_VERSPLUS 0x90000000 /* DMA rev 1 PLUS */
31
32#define DMA_HNDL_INTR 0x00000001 /* An IRQ needs to be handled */
33#define DMA_HNDL_ERROR 0x00000002 /* We need to take an error */
34#define DMA_FIFO_ISDRAIN 0x0000000c /* The DMA FIFO is draining */
35#define DMA_INT_ENAB 0x00000010 /* Turn on interrupts */
36#define DMA_FIFO_INV 0x00000020 /* Invalidate the FIFO */
37#define DMA_ACC_SZ_ERR 0x00000040 /* The access size was bad */
38#define DMA_FIFO_STDRAIN 0x00000040 /* DMA_VERS1 Drain the FIFO */
39#define DMA_RST_SCSI 0x00000080 /* Reset the SCSI controller */
40#define DMA_RST_ENET DMA_RST_SCSI /* Reset the ENET controller */
41#define DMA_ST_WRITE 0x00000100 /* write from device to memory */
42#define DMA_ENABLE 0x00000200 /* Fire up DMA, handle requests */
43#define DMA_PEND_READ 0x00000400 /* DMA_VERS1/0/PLUS Pending Read */
44#define DMA_ESC_BURST 0x00000800 /* 1=16byte 0=32byte */
45#define DMA_READ_AHEAD 0x00001800 /* DMA read ahead partial longword */
46#define DMA_DSBL_RD_DRN 0x00001000 /* No EC drain on slave reads */
47#define DMA_BCNT_ENAB 0x00002000 /* If on, use the byte counter */
48#define DMA_TERM_CNTR 0x00004000 /* Terminal counter */
49#define DMA_SCSI_SBUS64 0x00008000 /* HME: Enable 64-bit SBUS mode. */
50#define DMA_CSR_DISAB 0x00010000 /* No FIFO drains during csr */
51#define DMA_SCSI_DISAB 0x00020000 /* No FIFO drains during reg */
52#define DMA_DSBL_WR_INV 0x00020000 /* No EC inval. on slave writes */
53#define DMA_ADD_ENABLE 0x00040000 /* Special ESC DVMA optimization */
54#define DMA_E_BURSTS 0x000c0000 /* ENET: SBUS r/w burst mask */
55#define DMA_E_BURST32 0x00040000 /* ENET: SBUS 32 byte r/w burst */
56#define DMA_E_BURST16 0x00000000 /* ENET: SBUS 16 byte r/w burst */
57#define DMA_BRST_SZ 0x000c0000 /* SCSI: SBUS r/w burst size */
58#define DMA_BRST64 0x000c0000 /* SCSI: 64byte bursts (HME on UltraSparc only) */
59#define DMA_BRST32 0x00040000 /* SCSI: 32byte bursts */
60#define DMA_BRST16 0x00000000 /* SCSI: 16byte bursts */
61#define DMA_BRST0 0x00080000 /* SCSI: no bursts (non-HME gate arrays) */
62#define DMA_ADDR_DISAB 0x00100000 /* No FIFO drains during addr */
63#define DMA_2CLKS 0x00200000 /* Each transfer = 2 clock ticks */
64#define DMA_3CLKS 0x00400000 /* Each transfer = 3 clock ticks */
65#define DMA_EN_ENETAUI DMA_3CLKS /* Put lance into AUI-cable mode */
66#define DMA_CNTR_DISAB 0x00800000 /* No IRQ when DMA_TERM_CNTR set */
67#define DMA_AUTO_NADDR 0x01000000 /* Use "auto nxt addr" feature */
68#define DMA_SCSI_ON 0x02000000 /* Enable SCSI dma */
69#define DMA_PARITY_OFF 0x02000000 /* HME: disable parity checking */
70#define DMA_LOADED_ADDR 0x04000000 /* Address has been loaded */
71#define DMA_LOADED_NADDR 0x08000000 /* Next address has been loaded */
72#define DMA_RESET_FAS366 0x08000000 /* HME: Assert RESET to FAS366 */
73
74/* Values describing the burst-size property from the PROM */
75#define DMA_BURST1 0x01
76#define DMA_BURST2 0x02
77#define DMA_BURST4 0x04
78#define DMA_BURST8 0x08
79#define DMA_BURST16 0x10
80#define DMA_BURST32 0x20
81#define DMA_BURST64 0x40
82#define DMA_BURSTBITS 0x7f
83
84/* From PCI */
85
86#ifdef CONFIG_PCI
87extern int isa_dma_bridge_buggy;
5#else 88#else
6#include <asm/dma_32.h> 89#define isa_dma_bridge_buggy (0)
7#endif 90#endif
91
92#ifdef CONFIG_SPARC32
93
94#include <asm/sbus.h>
95
96/* Routines for data transfer buffers. */
97BTFIXUPDEF_CALL(char *, mmu_lockarea, char *, unsigned long)
98BTFIXUPDEF_CALL(void, mmu_unlockarea, char *, unsigned long)
99
100#define mmu_lockarea(vaddr,len) BTFIXUP_CALL(mmu_lockarea)(vaddr,len)
101#define mmu_unlockarea(vaddr,len) BTFIXUP_CALL(mmu_unlockarea)(vaddr,len)
102
103/* These are implementations for sbus_map_sg/sbus_unmap_sg... collapse later */
104BTFIXUPDEF_CALL(__u32, mmu_get_scsi_one, char *, unsigned long, struct sbus_bus *sbus)
105BTFIXUPDEF_CALL(void, mmu_get_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
106BTFIXUPDEF_CALL(void, mmu_release_scsi_one, __u32, unsigned long, struct sbus_bus *sbus)
107BTFIXUPDEF_CALL(void, mmu_release_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
108
109#define mmu_get_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_get_scsi_one)(vaddr,len,sbus)
110#define mmu_get_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_get_scsi_sgl)(sg,sz,sbus)
111#define mmu_release_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_release_scsi_one)(vaddr,len,sbus)
112#define mmu_release_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_release_scsi_sgl)(sg,sz,sbus)
113
114/*
115 * mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
116 *
117 * The mmu_map_dma_area establishes two mappings in one go.
118 * These mappings point to pages normally mapped at 'va' (linear address).
119 * First mapping is for CPU visible address at 'a', uncached.
120 * This is an alias, but it works because it is an uncached mapping.
121 * Second mapping is for device visible address, or "bus" address.
122 * The bus address is returned at '*pba'.
123 *
124 * These functions seem distinct, but are hard to split. On sun4c,
125 * at least for now, 'a' is equal to bus address, and retured in *pba.
126 * On sun4m, page attributes depend on the CPU type, so we have to
127 * know if we are mapping RAM or I/O, so it has to be an additional argument
128 * to a separate mapping function for CPU visible mappings.
129 */
130BTFIXUPDEF_CALL(int, mmu_map_dma_area, dma_addr_t *, unsigned long, unsigned long, int len)
131BTFIXUPDEF_CALL(struct page *, mmu_translate_dvma, unsigned long busa)
132BTFIXUPDEF_CALL(void, mmu_unmap_dma_area, unsigned long busa, int len)
133
134#define mmu_map_dma_area(pba,va,a,len) BTFIXUP_CALL(mmu_map_dma_area)(pba,va,a,len)
135#define mmu_unmap_dma_area(ba,len) BTFIXUP_CALL(mmu_unmap_dma_area)(ba,len)
136#define mmu_translate_dvma(ba) BTFIXUP_CALL(mmu_translate_dvma)(ba)
8#endif 137#endif
138
139#endif /* !(_ASM_SPARC_DMA_H) */
diff --git a/arch/sparc/include/asm/dma_32.h b/arch/sparc/include/asm/dma_32.h
deleted file mode 100644
index cf7189c0079b..000000000000
--- a/arch/sparc/include/asm/dma_32.h
+++ /dev/null
@@ -1,288 +0,0 @@
1/* include/asm/dma.h
2 *
3 * Copyright 1995 (C) David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _ASM_SPARC_DMA_H
7#define _ASM_SPARC_DMA_H
8
9#include <linux/kernel.h>
10#include <linux/types.h>
11
12#include <asm/vac-ops.h> /* for invalidate's, etc. */
13#include <asm/sbus.h>
14#include <asm/delay.h>
15#include <asm/oplib.h>
16#include <asm/system.h>
17#include <asm/io.h>
18#include <linux/spinlock.h>
19
20struct page;
21extern spinlock_t dma_spin_lock;
22
23static inline unsigned long claim_dma_lock(void)
24{
25 unsigned long flags;
26 spin_lock_irqsave(&dma_spin_lock, flags);
27 return flags;
28}
29
30static inline void release_dma_lock(unsigned long flags)
31{
32 spin_unlock_irqrestore(&dma_spin_lock, flags);
33}
34
35/* These are irrelevant for Sparc DMA, but we leave it in so that
36 * things can compile.
37 */
38#define MAX_DMA_CHANNELS 8
39#define MAX_DMA_ADDRESS (~0UL)
40#define DMA_MODE_READ 1
41#define DMA_MODE_WRITE 2
42
43/* Useful constants */
44#define SIZE_16MB (16*1024*1024)
45#define SIZE_64K (64*1024)
46
47/* SBUS DMA controller reg offsets */
48#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
49#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
50#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
51#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
52
53/* DVMA chip revisions */
54enum dvma_rev {
55 dvmarev0,
56 dvmaesc1,
57 dvmarev1,
58 dvmarev2,
59 dvmarev3,
60 dvmarevplus,
61 dvmahme
62};
63
64#define DMA_HASCOUNT(rev) ((rev)==dvmaesc1)
65
66/* Linux DMA information structure, filled during probe. */
67struct sbus_dma {
68 struct sbus_dma *next;
69 struct sbus_dev *sdev;
70 void __iomem *regs;
71
72 /* Status, misc info */
73 int node; /* Prom node for this DMA device */
74 int running; /* Are we doing DMA now? */
75 int allocated; /* Are we "owned" by anyone yet? */
76
77 /* Transfer information. */
78 unsigned long addr; /* Start address of current transfer */
79 int nbytes; /* Size of current transfer */
80 int realbytes; /* For splitting up large transfers, etc. */
81
82 /* DMA revision */
83 enum dvma_rev revision;
84};
85
86extern struct sbus_dma *dma_chain;
87
88/* Broken hardware... */
89#ifdef CONFIG_SUN4
90/* Have to sort this out. Does rev0 work fine on sun4[cmd] without isbroken?
91 * Or is rev0 present only on sun4 boxes? -jj */
92#define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev0 || (dma)->revision == dvmarev1)
93#else
94#define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev1)
95#endif
96#define DMA_ISESC1(dma) ((dma)->revision == dvmaesc1)
97
98/* Main routines in dma.c */
99extern void dvma_init(struct sbus_bus *);
100
101/* Fields in the cond_reg register */
102/* First, the version identification bits */
103#define DMA_DEVICE_ID 0xf0000000 /* Device identification bits */
104#define DMA_VERS0 0x00000000 /* Sunray DMA version */
105#define DMA_ESCV1 0x40000000 /* DMA ESC Version 1 */
106#define DMA_VERS1 0x80000000 /* DMA rev 1 */
107#define DMA_VERS2 0xa0000000 /* DMA rev 2 */
108#define DMA_VERHME 0xb0000000 /* DMA hme gate array */
109#define DMA_VERSPLUS 0x90000000 /* DMA rev 1 PLUS */
110
111#define DMA_HNDL_INTR 0x00000001 /* An IRQ needs to be handled */
112#define DMA_HNDL_ERROR 0x00000002 /* We need to take an error */
113#define DMA_FIFO_ISDRAIN 0x0000000c /* The DMA FIFO is draining */
114#define DMA_INT_ENAB 0x00000010 /* Turn on interrupts */
115#define DMA_FIFO_INV 0x00000020 /* Invalidate the FIFO */
116#define DMA_ACC_SZ_ERR 0x00000040 /* The access size was bad */
117#define DMA_FIFO_STDRAIN 0x00000040 /* DMA_VERS1 Drain the FIFO */
118#define DMA_RST_SCSI 0x00000080 /* Reset the SCSI controller */
119#define DMA_RST_ENET DMA_RST_SCSI /* Reset the ENET controller */
120#define DMA_RST_BPP DMA_RST_SCSI /* Reset the BPP controller */
121#define DMA_ST_WRITE 0x00000100 /* write from device to memory */
122#define DMA_ENABLE 0x00000200 /* Fire up DMA, handle requests */
123#define DMA_PEND_READ 0x00000400 /* DMA_VERS1/0/PLUS Pending Read */
124#define DMA_ESC_BURST 0x00000800 /* 1=16byte 0=32byte */
125#define DMA_READ_AHEAD 0x00001800 /* DMA read ahead partial longword */
126#define DMA_DSBL_RD_DRN 0x00001000 /* No EC drain on slave reads */
127#define DMA_BCNT_ENAB 0x00002000 /* If on, use the byte counter */
128#define DMA_TERM_CNTR 0x00004000 /* Terminal counter */
129#define DMA_SCSI_SBUS64 0x00008000 /* HME: Enable 64-bit SBUS mode. */
130#define DMA_CSR_DISAB 0x00010000 /* No FIFO drains during csr */
131#define DMA_SCSI_DISAB 0x00020000 /* No FIFO drains during reg */
132#define DMA_DSBL_WR_INV 0x00020000 /* No EC inval. on slave writes */
133#define DMA_ADD_ENABLE 0x00040000 /* Special ESC DVMA optimization */
134#define DMA_E_BURSTS 0x000c0000 /* ENET: SBUS r/w burst mask */
135#define DMA_E_BURST32 0x00040000 /* ENET: SBUS 32 byte r/w burst */
136#define DMA_E_BURST16 0x00000000 /* ENET: SBUS 16 byte r/w burst */
137#define DMA_BRST_SZ 0x000c0000 /* SCSI: SBUS r/w burst size */
138#define DMA_BRST64 0x00080000 /* SCSI: 64byte bursts (HME on UltraSparc only) */
139#define DMA_BRST32 0x00040000 /* SCSI/BPP: 32byte bursts */
140#define DMA_BRST16 0x00000000 /* SCSI/BPP: 16byte bursts */
141#define DMA_BRST0 0x00080000 /* SCSI: no bursts (non-HME gate arrays) */
142#define DMA_ADDR_DISAB 0x00100000 /* No FIFO drains during addr */
143#define DMA_2CLKS 0x00200000 /* Each transfer = 2 clock ticks */
144#define DMA_3CLKS 0x00400000 /* Each transfer = 3 clock ticks */
145#define DMA_EN_ENETAUI DMA_3CLKS /* Put lance into AUI-cable mode */
146#define DMA_CNTR_DISAB 0x00800000 /* No IRQ when DMA_TERM_CNTR set */
147#define DMA_AUTO_NADDR 0x01000000 /* Use "auto nxt addr" feature */
148#define DMA_SCSI_ON 0x02000000 /* Enable SCSI dma */
149#define DMA_BPP_ON DMA_SCSI_ON /* Enable BPP dma */
150#define DMA_PARITY_OFF 0x02000000 /* HME: disable parity checking */
151#define DMA_LOADED_ADDR 0x04000000 /* Address has been loaded */
152#define DMA_LOADED_NADDR 0x08000000 /* Next address has been loaded */
153#define DMA_RESET_FAS366 0x08000000 /* HME: Assert RESET to FAS366 */
154
155/* Values describing the burst-size property from the PROM */
156#define DMA_BURST1 0x01
157#define DMA_BURST2 0x02
158#define DMA_BURST4 0x04
159#define DMA_BURST8 0x08
160#define DMA_BURST16 0x10
161#define DMA_BURST32 0x20
162#define DMA_BURST64 0x40
163#define DMA_BURSTBITS 0x7f
164
165/* Determine highest possible final transfer address given a base */
166#define DMA_MAXEND(addr) (0x01000000UL-(((unsigned long)(addr))&0x00ffffffUL))
167
168/* Yes, I hack a lot of elisp in my spare time... */
169#define DMA_ERROR_P(regs) ((((regs)->cond_reg) & DMA_HNDL_ERROR))
170#define DMA_IRQ_P(regs) ((((regs)->cond_reg) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)))
171#define DMA_WRITE_P(regs) ((((regs)->cond_reg) & DMA_ST_WRITE))
172#define DMA_OFF(regs) ((((regs)->cond_reg) &= (~DMA_ENABLE)))
173#define DMA_INTSOFF(regs) ((((regs)->cond_reg) &= (~DMA_INT_ENAB)))
174#define DMA_INTSON(regs) ((((regs)->cond_reg) |= (DMA_INT_ENAB)))
175#define DMA_PUNTFIFO(regs) ((((regs)->cond_reg) |= DMA_FIFO_INV))
176#define DMA_SETSTART(regs, addr) ((((regs)->st_addr) = (char *) addr))
177#define DMA_BEGINDMA_W(regs) \
178 ((((regs)->cond_reg |= (DMA_ST_WRITE|DMA_ENABLE|DMA_INT_ENAB))))
179#define DMA_BEGINDMA_R(regs) \
180 ((((regs)->cond_reg |= ((DMA_ENABLE|DMA_INT_ENAB)&(~DMA_ST_WRITE)))))
181
182/* For certain DMA chips, we need to disable ints upon irq entry
183 * and turn them back on when we are done. So in any ESP interrupt
184 * handler you *must* call DMA_IRQ_ENTRY upon entry and DMA_IRQ_EXIT
185 * when leaving the handler. You have been warned...
186 */
187#define DMA_IRQ_ENTRY(dma, dregs) do { \
188 if(DMA_ISBROKEN(dma)) DMA_INTSOFF(dregs); \
189 } while (0)
190
191#define DMA_IRQ_EXIT(dma, dregs) do { \
192 if(DMA_ISBROKEN(dma)) DMA_INTSON(dregs); \
193 } while(0)
194
195#if 0 /* P3 this stuff is inline in ledma.c:init_restart_ledma() */
196/* Pause until counter runs out or BIT isn't set in the DMA condition
197 * register.
198 */
199static inline void sparc_dma_pause(struct sparc_dma_registers *regs,
200 unsigned long bit)
201{
202 int ctr = 50000; /* Let's find some bugs ;) */
203
204 /* Busy wait until the bit is not set any more */
205 while((regs->cond_reg&bit) && (ctr>0)) {
206 ctr--;
207 __delay(5);
208 }
209
210 /* Check for bogus outcome. */
211 if(!ctr)
212 panic("DMA timeout");
213}
214
215/* Reset the friggin' thing... */
216#define DMA_RESET(dma) do { \
217 struct sparc_dma_registers *regs = dma->regs; \
218 /* Let the current FIFO drain itself */ \
219 sparc_dma_pause(regs, (DMA_FIFO_ISDRAIN)); \
220 /* Reset the logic */ \
221 regs->cond_reg |= (DMA_RST_SCSI); /* assert */ \
222 __delay(400); /* let the bits set ;) */ \
223 regs->cond_reg &= ~(DMA_RST_SCSI); /* de-assert */ \
224 sparc_dma_enable_interrupts(regs); /* Re-enable interrupts */ \
225 /* Enable FAST transfers if available */ \
226 if(dma->revision>dvmarev1) regs->cond_reg |= DMA_3CLKS; \
227 dma->running = 0; \
228} while(0)
229#endif
230
231#define for_each_dvma(dma) \
232 for((dma) = dma_chain; (dma); (dma) = (dma)->next)
233
234extern int get_dma_list(char *);
235extern int request_dma(unsigned int, __const__ char *);
236extern void free_dma(unsigned int);
237
238/* From PCI */
239
240#ifdef CONFIG_PCI
241extern int isa_dma_bridge_buggy;
242#else
243#define isa_dma_bridge_buggy (0)
244#endif
245
246/* Routines for data transfer buffers. */
247BTFIXUPDEF_CALL(char *, mmu_lockarea, char *, unsigned long)
248BTFIXUPDEF_CALL(void, mmu_unlockarea, char *, unsigned long)
249
250#define mmu_lockarea(vaddr,len) BTFIXUP_CALL(mmu_lockarea)(vaddr,len)
251#define mmu_unlockarea(vaddr,len) BTFIXUP_CALL(mmu_unlockarea)(vaddr,len)
252
253/* These are implementations for sbus_map_sg/sbus_unmap_sg... collapse later */
254BTFIXUPDEF_CALL(__u32, mmu_get_scsi_one, char *, unsigned long, struct sbus_bus *sbus)
255BTFIXUPDEF_CALL(void, mmu_get_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
256BTFIXUPDEF_CALL(void, mmu_release_scsi_one, __u32, unsigned long, struct sbus_bus *sbus)
257BTFIXUPDEF_CALL(void, mmu_release_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
258
259#define mmu_get_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_get_scsi_one)(vaddr,len,sbus)
260#define mmu_get_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_get_scsi_sgl)(sg,sz,sbus)
261#define mmu_release_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_release_scsi_one)(vaddr,len,sbus)
262#define mmu_release_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_release_scsi_sgl)(sg,sz,sbus)
263
264/*
265 * mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
266 *
267 * The mmu_map_dma_area establishes two mappings in one go.
268 * These mappings point to pages normally mapped at 'va' (linear address).
269 * First mapping is for CPU visible address at 'a', uncached.
270 * This is an alias, but it works because it is an uncached mapping.
271 * Second mapping is for device visible address, or "bus" address.
272 * The bus address is returned at '*pba'.
273 *
274 * These functions seem distinct, but are hard to split. On sun4c,
275 * at least for now, 'a' is equal to bus address, and retured in *pba.
276 * On sun4m, page attributes depend on the CPU type, so we have to
277 * know if we are mapping RAM or I/O, so it has to be an additional argument
278 * to a separate mapping function for CPU visible mappings.
279 */
280BTFIXUPDEF_CALL(int, mmu_map_dma_area, dma_addr_t *, unsigned long, unsigned long, int len)
281BTFIXUPDEF_CALL(struct page *, mmu_translate_dvma, unsigned long busa)
282BTFIXUPDEF_CALL(void, mmu_unmap_dma_area, unsigned long busa, int len)
283
284#define mmu_map_dma_area(pba,va,a,len) BTFIXUP_CALL(mmu_map_dma_area)(pba,va,a,len)
285#define mmu_unmap_dma_area(ba,len) BTFIXUP_CALL(mmu_unmap_dma_area)(ba,len)
286#define mmu_translate_dvma(ba) BTFIXUP_CALL(mmu_translate_dvma)(ba)
287
288#endif /* !(_ASM_SPARC_DMA_H) */
diff --git a/arch/sparc/include/asm/dma_64.h b/arch/sparc/include/asm/dma_64.h
deleted file mode 100644
index 46a8aecffc02..000000000000
--- a/arch/sparc/include/asm/dma_64.h
+++ /dev/null
@@ -1,205 +0,0 @@
1/*
2 * include/asm/dma.h
3 *
4 * Copyright 1996 (C) David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#ifndef _ASM_SPARC64_DMA_H
8#define _ASM_SPARC64_DMA_H
9
10#include <linux/kernel.h>
11#include <linux/types.h>
12#include <linux/spinlock.h>
13
14#include <asm/sbus.h>
15#include <asm/delay.h>
16#include <asm/oplib.h>
17
18/* These are irrelevant for Sparc DMA, but we leave it in so that
19 * things can compile.
20 */
21#define MAX_DMA_CHANNELS 8
22#define DMA_MODE_READ 1
23#define DMA_MODE_WRITE 2
24#define MAX_DMA_ADDRESS (~0UL)
25
26/* Useful constants */
27#define SIZE_16MB (16*1024*1024)
28#define SIZE_64K (64*1024)
29
30/* SBUS DMA controller reg offsets */
31#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
32#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
33#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
34#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
35
36/* DVMA chip revisions */
37enum dvma_rev {
38 dvmarev0,
39 dvmaesc1,
40 dvmarev1,
41 dvmarev2,
42 dvmarev3,
43 dvmarevplus,
44 dvmahme
45};
46
47#define DMA_HASCOUNT(rev) ((rev)==dvmaesc1)
48
49/* Linux DMA information structure, filled during probe. */
50struct sbus_dma {
51 struct sbus_dma *next;
52 struct sbus_dev *sdev;
53 void __iomem *regs;
54
55 /* Status, misc info */
56 int node; /* Prom node for this DMA device */
57 int running; /* Are we doing DMA now? */
58 int allocated; /* Are we "owned" by anyone yet? */
59
60 /* Transfer information. */
61 u32 addr; /* Start address of current transfer */
62 int nbytes; /* Size of current transfer */
63 int realbytes; /* For splitting up large transfers, etc. */
64
65 /* DMA revision */
66 enum dvma_rev revision;
67};
68
69extern struct sbus_dma *dma_chain;
70
71/* Broken hardware... */
72#define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev1)
73#define DMA_ISESC1(dma) ((dma)->revision == dvmaesc1)
74
75/* Main routines in dma.c */
76extern void dvma_init(struct sbus_bus *);
77
78/* Fields in the cond_reg register */
79/* First, the version identification bits */
80#define DMA_DEVICE_ID 0xf0000000 /* Device identification bits */
81#define DMA_VERS0 0x00000000 /* Sunray DMA version */
82#define DMA_ESCV1 0x40000000 /* DMA ESC Version 1 */
83#define DMA_VERS1 0x80000000 /* DMA rev 1 */
84#define DMA_VERS2 0xa0000000 /* DMA rev 2 */
85#define DMA_VERHME 0xb0000000 /* DMA hme gate array */
86#define DMA_VERSPLUS 0x90000000 /* DMA rev 1 PLUS */
87
88#define DMA_HNDL_INTR 0x00000001 /* An IRQ needs to be handled */
89#define DMA_HNDL_ERROR 0x00000002 /* We need to take an error */
90#define DMA_FIFO_ISDRAIN 0x0000000c /* The DMA FIFO is draining */
91#define DMA_INT_ENAB 0x00000010 /* Turn on interrupts */
92#define DMA_FIFO_INV 0x00000020 /* Invalidate the FIFO */
93#define DMA_ACC_SZ_ERR 0x00000040 /* The access size was bad */
94#define DMA_FIFO_STDRAIN 0x00000040 /* DMA_VERS1 Drain the FIFO */
95#define DMA_RST_SCSI 0x00000080 /* Reset the SCSI controller */
96#define DMA_RST_ENET DMA_RST_SCSI /* Reset the ENET controller */
97#define DMA_ST_WRITE 0x00000100 /* write from device to memory */
98#define DMA_ENABLE 0x00000200 /* Fire up DMA, handle requests */
99#define DMA_PEND_READ 0x00000400 /* DMA_VERS1/0/PLUS Pending Read */
100#define DMA_ESC_BURST 0x00000800 /* 1=16byte 0=32byte */
101#define DMA_READ_AHEAD 0x00001800 /* DMA read ahead partial longword */
102#define DMA_DSBL_RD_DRN 0x00001000 /* No EC drain on slave reads */
103#define DMA_BCNT_ENAB 0x00002000 /* If on, use the byte counter */
104#define DMA_TERM_CNTR 0x00004000 /* Terminal counter */
105#define DMA_SCSI_SBUS64 0x00008000 /* HME: Enable 64-bit SBUS mode. */
106#define DMA_CSR_DISAB 0x00010000 /* No FIFO drains during csr */
107#define DMA_SCSI_DISAB 0x00020000 /* No FIFO drains during reg */
108#define DMA_DSBL_WR_INV 0x00020000 /* No EC inval. on slave writes */
109#define DMA_ADD_ENABLE 0x00040000 /* Special ESC DVMA optimization */
110#define DMA_E_BURSTS 0x000c0000 /* ENET: SBUS r/w burst mask */
111#define DMA_E_BURST32 0x00040000 /* ENET: SBUS 32 byte r/w burst */
112#define DMA_E_BURST16 0x00000000 /* ENET: SBUS 16 byte r/w burst */
113#define DMA_BRST_SZ 0x000c0000 /* SCSI: SBUS r/w burst size */
114#define DMA_BRST64 0x000c0000 /* SCSI: 64byte bursts (HME on UltraSparc only) */
115#define DMA_BRST32 0x00040000 /* SCSI: 32byte bursts */
116#define DMA_BRST16 0x00000000 /* SCSI: 16byte bursts */
117#define DMA_BRST0 0x00080000 /* SCSI: no bursts (non-HME gate arrays) */
118#define DMA_ADDR_DISAB 0x00100000 /* No FIFO drains during addr */
119#define DMA_2CLKS 0x00200000 /* Each transfer = 2 clock ticks */
120#define DMA_3CLKS 0x00400000 /* Each transfer = 3 clock ticks */
121#define DMA_EN_ENETAUI DMA_3CLKS /* Put lance into AUI-cable mode */
122#define DMA_CNTR_DISAB 0x00800000 /* No IRQ when DMA_TERM_CNTR set */
123#define DMA_AUTO_NADDR 0x01000000 /* Use "auto nxt addr" feature */
124#define DMA_SCSI_ON 0x02000000 /* Enable SCSI dma */
125#define DMA_PARITY_OFF 0x02000000 /* HME: disable parity checking */
126#define DMA_LOADED_ADDR 0x04000000 /* Address has been loaded */
127#define DMA_LOADED_NADDR 0x08000000 /* Next address has been loaded */
128#define DMA_RESET_FAS366 0x08000000 /* HME: Assert RESET to FAS366 */
129
130/* Values describing the burst-size property from the PROM */
131#define DMA_BURST1 0x01
132#define DMA_BURST2 0x02
133#define DMA_BURST4 0x04
134#define DMA_BURST8 0x08
135#define DMA_BURST16 0x10
136#define DMA_BURST32 0x20
137#define DMA_BURST64 0x40
138#define DMA_BURSTBITS 0x7f
139
140/* Determine highest possible final transfer address given a base */
141#define DMA_MAXEND(addr) (0x01000000UL-(((unsigned long)(addr))&0x00ffffffUL))
142
143/* Yes, I hack a lot of elisp in my spare time... */
144#define DMA_ERROR_P(regs) ((sbus_readl((regs) + DMA_CSR) & DMA_HNDL_ERROR))
145#define DMA_IRQ_P(regs) ((sbus_readl((regs) + DMA_CSR)) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
146#define DMA_WRITE_P(regs) ((sbus_readl((regs) + DMA_CSR) & DMA_ST_WRITE))
147#define DMA_OFF(__regs) \
148do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
149 tmp &= ~DMA_ENABLE; \
150 sbus_writel(tmp, (__regs) + DMA_CSR); \
151} while(0)
152#define DMA_INTSOFF(__regs) \
153do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
154 tmp &= ~DMA_INT_ENAB; \
155 sbus_writel(tmp, (__regs) + DMA_CSR); \
156} while(0)
157#define DMA_INTSON(__regs) \
158do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
159 tmp |= DMA_INT_ENAB; \
160 sbus_writel(tmp, (__regs) + DMA_CSR); \
161} while(0)
162#define DMA_PUNTFIFO(__regs) \
163do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
164 tmp |= DMA_FIFO_INV; \
165 sbus_writel(tmp, (__regs) + DMA_CSR); \
166} while(0)
167#define DMA_SETSTART(__regs, __addr) \
168 sbus_writel((u32)(__addr), (__regs) + DMA_ADDR);
169#define DMA_BEGINDMA_W(__regs) \
170do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
171 tmp |= (DMA_ST_WRITE|DMA_ENABLE|DMA_INT_ENAB); \
172 sbus_writel(tmp, (__regs) + DMA_CSR); \
173} while(0)
174#define DMA_BEGINDMA_R(__regs) \
175do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
176 tmp |= (DMA_ENABLE|DMA_INT_ENAB); \
177 tmp &= ~DMA_ST_WRITE; \
178 sbus_writel(tmp, (__regs) + DMA_CSR); \
179} while(0)
180
181/* For certain DMA chips, we need to disable ints upon irq entry
182 * and turn them back on when we are done. So in any ESP interrupt
183 * handler you *must* call DMA_IRQ_ENTRY upon entry and DMA_IRQ_EXIT
184 * when leaving the handler. You have been warned...
185 */
186#define DMA_IRQ_ENTRY(dma, dregs) do { \
187 if(DMA_ISBROKEN(dma)) DMA_INTSOFF(dregs); \
188 } while (0)
189
190#define DMA_IRQ_EXIT(dma, dregs) do { \
191 if(DMA_ISBROKEN(dma)) DMA_INTSON(dregs); \
192 } while(0)
193
194#define for_each_dvma(dma) \
195 for((dma) = dma_chain; (dma); (dma) = (dma)->next)
196
197/* From PCI */
198
199#ifdef CONFIG_PCI
200extern int isa_dma_bridge_buggy;
201#else
202#define isa_dma_bridge_buggy (0)
203#endif
204
205#endif /* !(_ASM_SPARC64_DMA_H) */
diff --git a/arch/sparc/include/asm/floppy_32.h b/arch/sparc/include/asm/floppy_32.h
index ae3f00bf22ff..ff2b91c6eeff 100644
--- a/arch/sparc/include/asm/floppy_32.h
+++ b/arch/sparc/include/asm/floppy_32.h
@@ -385,4 +385,15 @@ static int sparc_eject(void)
385 385
386#define EXTRA_FLOPPY_PARAMS 386#define EXTRA_FLOPPY_PARAMS
387 387
388static DEFINE_SPINLOCK(dma_spin_lock);
389
390#define claim_dma_lock() \
391({ unsigned long flags; \
392 spin_lock_irqsave(&dma_spin_lock, flags); \
393 flags; \
394})
395
396#define release_dma_lock(__flags) \
397 spin_unlock_irqrestore(&dma_spin_lock, __flags);
398
388#endif /* !(__ASM_SPARC_FLOPPY_H) */ 399#endif /* !(__ASM_SPARC_FLOPPY_H) */
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index b23cea5ca5d1..8a392d3d89e4 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -154,7 +154,6 @@ EXPORT_SYMBOL(BTFIXUP_CALL(pgprot_noncached));
154 154
155#ifdef CONFIG_SBUS 155#ifdef CONFIG_SBUS
156EXPORT_SYMBOL(sbus_root); 156EXPORT_SYMBOL(sbus_root);
157EXPORT_SYMBOL(dma_chain);
158EXPORT_SYMBOL(sbus_set_sbus64); 157EXPORT_SYMBOL(sbus_set_sbus64);
159EXPORT_SYMBOL(sbus_alloc_consistent); 158EXPORT_SYMBOL(sbus_alloc_consistent);
160EXPORT_SYMBOL(sbus_free_consistent); 159EXPORT_SYMBOL(sbus_free_consistent);
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 3b2890c207f3..8e6ac5c1b7bd 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -161,7 +161,6 @@ EXPORT_SYMBOL(auxio_set_lte);
161#endif 161#endif
162#ifdef CONFIG_SBUS 162#ifdef CONFIG_SBUS
163EXPORT_SYMBOL(sbus_root); 163EXPORT_SYMBOL(sbus_root);
164EXPORT_SYMBOL(dma_chain);
165EXPORT_SYMBOL(sbus_set_sbus64); 164EXPORT_SYMBOL(sbus_set_sbus64);
166EXPORT_SYMBOL(sbus_alloc_consistent); 165EXPORT_SYMBOL(sbus_alloc_consistent);
167EXPORT_SYMBOL(sbus_free_consistent); 166EXPORT_SYMBOL(sbus_free_consistent);
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 4e994f87469e..24ffecb1ce23 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -248,7 +248,7 @@ struct lance_private {
248 int rx_new, tx_new; 248 int rx_new, tx_new;
249 int rx_old, tx_old; 249 int rx_old, tx_old;
250 250
251 struct sbus_dma *ledma; /* If set this points to ledma */ 251 struct of_device *ledma; /* If set this points to ledma */
252 char tpe; /* cable-selection is TPE */ 252 char tpe; /* cable-selection is TPE */
253 char auto_select; /* cable-selection by carrier */ 253 char auto_select; /* cable-selection by carrier */
254 char burst_sizes; /* ledma SBus burst sizes */ 254 char burst_sizes; /* ledma SBus burst sizes */
@@ -1273,6 +1273,12 @@ static void lance_free_hwresources(struct lance_private *lp)
1273{ 1273{
1274 if (lp->lregs) 1274 if (lp->lregs)
1275 sbus_iounmap(lp->lregs, LANCE_REG_SIZE); 1275 sbus_iounmap(lp->lregs, LANCE_REG_SIZE);
1276 if (lp->dregs) {
1277 struct of_device *ledma = lp->ledma;
1278
1279 of_iounmap(&ledma->resource[0], lp->dregs,
1280 resource_size(&ledma->resource[0]));
1281 }
1276 if (lp->init_block_iomem) { 1282 if (lp->init_block_iomem) {
1277 sbus_iounmap(lp->init_block_iomem, 1283 sbus_iounmap(lp->init_block_iomem,
1278 sizeof(struct lance_init_block)); 1284 sizeof(struct lance_init_block));
@@ -1309,7 +1315,7 @@ static const struct ethtool_ops sparc_lance_ethtool_ops = {
1309}; 1315};
1310 1316
1311static int __devinit sparc_lance_probe_one(struct sbus_dev *sdev, 1317static int __devinit sparc_lance_probe_one(struct sbus_dev *sdev,
1312 struct sbus_dma *ledma, 1318 struct of_device *ledma,
1313 struct sbus_dev *lebuffer) 1319 struct sbus_dev *lebuffer)
1314{ 1320{
1315 static unsigned version_printed; 1321 static unsigned version_printed;
@@ -1345,6 +1351,18 @@ static int __devinit sparc_lance_probe_one(struct sbus_dev *sdev,
1345 goto fail; 1351 goto fail;
1346 } 1352 }
1347 1353
1354 lp->ledma = ledma;
1355 if (lp->ledma) {
1356 lp->dregs = of_ioremap(&ledma->resource[0], 0,
1357 resource_size(&ledma->resource[0]),
1358 "ledma");
1359 if (!lp->dregs) {
1360 printk(KERN_ERR "SunLance: Cannot map "
1361 "ledma registers.\n");
1362 goto fail;
1363 }
1364 }
1365
1348 lp->sdev = sdev; 1366 lp->sdev = sdev;
1349 if (lebuffer) { 1367 if (lebuffer) {
1350 /* sanity check */ 1368 /* sanity check */
@@ -1383,11 +1401,10 @@ static int __devinit sparc_lance_probe_one(struct sbus_dev *sdev,
1383 LE_C3_BCON)); 1401 LE_C3_BCON));
1384 1402
1385 lp->name = lancestr; 1403 lp->name = lancestr;
1386 lp->ledma = ledma;
1387 1404
1388 lp->burst_sizes = 0; 1405 lp->burst_sizes = 0;
1389 if (lp->ledma) { 1406 if (lp->ledma) {
1390 struct device_node *ledma_dp = ledma->sdev->ofdev.node; 1407 struct device_node *ledma_dp = ledma->node;
1391 const char *prop; 1408 const char *prop;
1392 unsigned int sbmask; 1409 unsigned int sbmask;
1393 u32 csr; 1410 u32 csr;
@@ -1435,8 +1452,6 @@ no_link_test:
1435 lp->tpe = 1; 1452 lp->tpe = 1;
1436 } 1453 }
1437 1454
1438 lp->dregs = ledma->regs;
1439
1440 /* Reset ledma */ 1455 /* Reset ledma */
1441 csr = sbus_readl(lp->dregs + DMA_CSR); 1456 csr = sbus_readl(lp->dregs + DMA_CSR);
1442 sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR); 1457 sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR);
@@ -1486,18 +1501,6 @@ fail:
1486 return -ENODEV; 1501 return -ENODEV;
1487} 1502}
1488 1503
1489/* On 4m, find the associated dma for the lance chip */
1490static struct sbus_dma * __devinit find_ledma(struct sbus_dev *sdev)
1491{
1492 struct sbus_dma *p;
1493
1494 for_each_dvma(p) {
1495 if (p->sdev == sdev)
1496 return p;
1497 }
1498 return NULL;
1499}
1500
1501#ifdef CONFIG_SUN4 1504#ifdef CONFIG_SUN4
1502 1505
1503#include <asm/sun4paddr.h> 1506#include <asm/sun4paddr.h>
@@ -1541,13 +1544,13 @@ static int __devinit sunlance_sbus_probe(struct of_device *dev, const struct of_
1541 int err; 1544 int err;
1542 1545
1543 if (sdev->parent) { 1546 if (sdev->parent) {
1544 struct of_device *parent = &sdev->parent->ofdev; 1547 struct device_node *parent_node = sdev->parent->ofdev.node;
1545 1548 struct of_device *parent;
1546 if (!strcmp(parent->node->name, "ledma")) {
1547 struct sbus_dma *ledma = find_ledma(to_sbus_device(&parent->dev));
1548 1549
1549 err = sparc_lance_probe_one(sdev, ledma, NULL); 1550 parent = of_find_device_by_node(parent_node);
1550 } else if (!strcmp(parent->node->name, "lebuffer")) { 1551 if (parent && !strcmp(parent->node->name, "ledma")) {
1552 err = sparc_lance_probe_one(sdev, parent, NULL);
1553 } else if (parent && !strcmp(parent->node->name, "lebuffer")) {
1551 err = sparc_lance_probe_one(sdev, NULL, to_sbus_device(&parent->dev)); 1554 err = sparc_lance_probe_one(sdev, NULL, to_sbus_device(&parent->dev));
1552 } else 1555 } else
1553 err = sparc_lance_probe_one(sdev, NULL, NULL); 1556 err = sparc_lance_probe_one(sdev, NULL, NULL);
diff --git a/drivers/sbus/Makefile b/drivers/sbus/Makefile
index 7b1d24d95308..56f73318eba2 100644
--- a/drivers/sbus/Makefile
+++ b/drivers/sbus/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5ifneq ($(ARCH),m68k) 5ifneq ($(ARCH),m68k)
6obj-y := sbus.o dvma.o 6obj-y := sbus.o
7endif 7endif
8 8
9obj-$(CONFIG_SBUSCHAR) += char/ 9obj-$(CONFIG_SBUSCHAR) += char/
diff --git a/drivers/sbus/dvma.c b/drivers/sbus/dvma.c
deleted file mode 100644
index ab0d2de3324c..000000000000
--- a/drivers/sbus/dvma.c
+++ /dev/null
@@ -1,136 +0,0 @@
1/* dvma.c: Routines that are used to access DMA on the Sparc SBus.
2 *
3 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#include <linux/string.h>
7#include <linux/kernel.h>
8#include <linux/slab.h>
9#include <linux/init.h>
10#include <linux/delay.h>
11
12#include <asm/oplib.h>
13#include <asm/io.h>
14#include <asm/dma.h>
15#include <asm/sbus.h>
16
17struct sbus_dma *dma_chain;
18
19static void __init init_one_dvma(struct sbus_dma *dma, int num_dma)
20{
21 printk("dma%d: ", num_dma);
22
23 dma->next = NULL;
24 dma->running = 0; /* No transfers going on as of yet */
25 dma->allocated = 0; /* No one has allocated us yet */
26 switch(sbus_readl(dma->regs + DMA_CSR)&DMA_DEVICE_ID) {
27 case DMA_VERS0:
28 dma->revision = dvmarev0;
29 printk("Revision 0 ");
30 break;
31 case DMA_ESCV1:
32 dma->revision = dvmaesc1;
33 printk("ESC Revision 1 ");
34 break;
35 case DMA_VERS1:
36 dma->revision = dvmarev1;
37 printk("Revision 1 ");
38 break;
39 case DMA_VERS2:
40 dma->revision = dvmarev2;
41 printk("Revision 2 ");
42 break;
43 case DMA_VERHME:
44 dma->revision = dvmahme;
45 printk("HME DVMA gate array ");
46 break;
47 case DMA_VERSPLUS:
48 dma->revision = dvmarevplus;
49 printk("Revision 1 PLUS ");
50 break;
51 default:
52 printk("unknown dma version %08x",
53 sbus_readl(dma->regs + DMA_CSR) & DMA_DEVICE_ID);
54 dma->allocated = 1;
55 break;
56 }
57 printk("\n");
58}
59
60/* Probe this SBus DMA module(s) */
61void __init dvma_init(struct sbus_bus *sbus)
62{
63 struct sbus_dev *this_dev;
64 struct sbus_dma *dma;
65 struct sbus_dma *dchain;
66 static int num_dma = 0;
67
68 for_each_sbusdev(this_dev, sbus) {
69 char *name = this_dev->prom_name;
70 int hme = 0;
71
72 if(!strcmp(name, "SUNW,fas"))
73 hme = 1;
74 else if(strcmp(name, "dma") &&
75 strcmp(name, "ledma") &&
76 strcmp(name, "espdma"))
77 continue;
78
79 /* Found one... */
80 dma = kmalloc(sizeof(struct sbus_dma), GFP_ATOMIC);
81
82 dma->sdev = this_dev;
83
84 /* Put at end of dma chain */
85 dchain = dma_chain;
86 if(dchain) {
87 while(dchain->next)
88 dchain = dchain->next;
89 dchain->next = dma;
90 } else {
91 /* We're the first in line */
92 dma_chain = dma;
93 }
94
95 dma->regs = sbus_ioremap(&dma->sdev->resource[0], 0,
96 dma->sdev->resource[0].end - dma->sdev->resource[0].start + 1,
97 "dma");
98
99 dma->node = dma->sdev->prom_node;
100
101 init_one_dvma(dma, num_dma++);
102 }
103}
104
105#ifdef CONFIG_SUN4
106
107#include <asm/sun4paddr.h>
108
109void __init sun4_dvma_init(void)
110{
111 struct sbus_dma *dma;
112 struct resource r;
113
114 if(sun4_dma_physaddr) {
115 dma = kmalloc(sizeof(struct sbus_dma), GFP_ATOMIC);
116
117 /* No SBUS */
118 dma->sdev = NULL;
119
120 /* Only one DMA device */
121 dma_chain = dma;
122
123 memset(&r, 0, sizeof(r));
124 r.start = sun4_dma_physaddr;
125 dma->regs = sbus_ioremap(&r, 0, PAGE_SIZE, "dma");
126
127 /* No prom node */
128 dma->node = 0x0;
129
130 init_one_dvma(dma, 0);
131 } else {
132 dma_chain = NULL;
133 }
134}
135
136#endif
diff --git a/drivers/sbus/sbus.c b/drivers/sbus/sbus.c
index 53e5e7bf545c..69491625d869 100644
--- a/drivers/sbus/sbus.c
+++ b/drivers/sbus/sbus.c
@@ -285,8 +285,6 @@ static void __init build_one_sbus(struct device_node *dp, int num_sbus)
285 } 285 }
286 286
287 sbus_fixup_all_regs(sbus->devices); 287 sbus_fixup_all_regs(sbus->devices);
288
289 dvma_init(sbus);
290} 288}
291 289
292static int __init sbus_init(void) 290static int __init sbus_init(void)
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index bb43a1388188..28e22acf87ea 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -521,7 +521,8 @@ struct esp {
521 521
522 struct completion *eh_reset; 522 struct completion *eh_reset;
523 523
524 struct sbus_dma *dma; 524 void *dma;
525 int dmarev;
525}; 526};
526 527
527/* A front-end driver for the ESP chip should do the following in 528/* A front-end driver for the ESP chip should do the following in
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index f9cf70151366..d110b94f111e 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -1,6 +1,6 @@
1/* sun_esp.c: ESP front-end for Sparc SBUS systems. 1/* sun_esp.c: ESP front-end for Sparc SBUS systems.
2 * 2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#include <linux/kernel.h> 6#include <linux/kernel.h>
@@ -30,39 +30,48 @@
30#define dma_write32(VAL, REG) \ 30#define dma_write32(VAL, REG) \
31 sbus_writel((VAL), esp->dma_regs + (REG)) 31 sbus_writel((VAL), esp->dma_regs + (REG))
32 32
33static int __devinit esp_sbus_find_dma(struct esp *esp, struct sbus_dev *dma_sdev) 33/* DVMA chip revisions */
34{ 34enum dvma_rev {
35 struct sbus_dev *sdev = esp->dev; 35 dvmarev0,
36 struct sbus_dma *dma; 36 dvmaesc1,
37 dvmarev1,
38 dvmarev2,
39 dvmarev3,
40 dvmarevplus,
41 dvmahme
42};
37 43
38 if (dma_sdev != NULL) { 44static int __devinit esp_sbus_setup_dma(struct esp *esp,
39 for_each_dvma(dma) { 45 struct of_device *dma_of)
40 if (dma->sdev == dma_sdev) 46{
41 break; 47 esp->dma = dma_of;
42 }
43 } else {
44 for_each_dvma(dma) {
45 if (dma->sdev == NULL)
46 break;
47 48
48 /* If bus + slot are the same and it has the 49 esp->dma_regs = of_ioremap(&dma_of->resource[0], 0,
49 * correct OBP name, it's ours. 50 resource_size(&dma_of->resource[0]),
50 */ 51 "espdma");
51 if (sdev->bus == dma->sdev->bus && 52 if (!esp->dma_regs)
52 sdev->slot == dma->sdev->slot && 53 return -ENOMEM;
53 (!strcmp(dma->sdev->prom_name, "dma") ||
54 !strcmp(dma->sdev->prom_name, "espdma")))
55 break;
56 }
57 }
58 54
59 if (dma == NULL) { 55 switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) {
60 printk(KERN_ERR PFX "[%s] Cannot find dma.\n", 56 case DMA_VERS0:
61 sdev->ofdev.node->full_name); 57 esp->dmarev = dvmarev0;
62 return -ENODEV; 58 break;
59 case DMA_ESCV1:
60 esp->dmarev = dvmaesc1;
61 break;
62 case DMA_VERS1:
63 esp->dmarev = dvmarev1;
64 break;
65 case DMA_VERS2:
66 esp->dmarev = dvmarev2;
67 break;
68 case DMA_VERHME:
69 esp->dmarev = dvmahme;
70 break;
71 case DMA_VERSPLUS:
72 esp->dmarev = dvmarevplus;
73 break;
63 } 74 }
64 esp->dma = dma;
65 esp->dma_regs = dma->regs;
66 75
67 return 0; 76 return 0;
68 77
@@ -165,19 +174,18 @@ static void __devinit esp_get_clock_params(struct esp *esp)
165 esp->cfreq = fmhz; 174 esp->cfreq = fmhz;
166} 175}
167 176
168static void __devinit esp_get_bursts(struct esp *esp, struct sbus_dev *dma) 177static void __devinit esp_get_bursts(struct esp *esp, struct of_device *dma_of)
169{ 178{
179 struct device_node *dma_dp = dma_of->node;
170 struct sbus_dev *sdev = esp->dev; 180 struct sbus_dev *sdev = esp->dev;
171 struct device_node *dp = sdev->ofdev.node; 181 struct device_node *dp;
172 u8 bursts; 182 u8 bursts, val;
173 183
184 dp = sdev->ofdev.node;
174 bursts = of_getintprop_default(dp, "burst-sizes", 0xff); 185 bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
175 if (dma) { 186 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
176 struct device_node *dma_dp = dma->ofdev.node; 187 if (val != 0xff)
177 u8 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff); 188 bursts &= val;
178 if (val != 0xff)
179 bursts &= val;
180 }
181 189
182 if (sdev->bus) { 190 if (sdev->bus) {
183 u8 val = of_getintprop_default(sdev->bus->ofdev.node, 191 u8 val = of_getintprop_default(sdev->bus->ofdev.node,
@@ -194,7 +202,7 @@ static void __devinit esp_get_bursts(struct esp *esp, struct sbus_dev *dma)
194 esp->bursts = bursts; 202 esp->bursts = bursts;
195} 203}
196 204
197static void __devinit esp_sbus_get_props(struct esp *esp, struct sbus_dev *espdma) 205static void __devinit esp_sbus_get_props(struct esp *esp, struct of_device *espdma)
198{ 206{
199 esp_get_scsi_id(esp); 207 esp_get_scsi_id(esp);
200 esp_get_differential(esp); 208 esp_get_differential(esp);
@@ -259,12 +267,12 @@ static void sbus_esp_reset_dma(struct esp *esp)
259 can_do_burst64 = (esp->bursts & DMA_BURST64) != 0; 267 can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
260 268
261 /* Put the DVMA into a known state. */ 269 /* Put the DVMA into a known state. */
262 if (esp->dma->revision != dvmahme) { 270 if (esp->dmarev != dvmahme) {
263 val = dma_read32(DMA_CSR); 271 val = dma_read32(DMA_CSR);
264 dma_write32(val | DMA_RST_SCSI, DMA_CSR); 272 dma_write32(val | DMA_RST_SCSI, DMA_CSR);
265 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); 273 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
266 } 274 }
267 switch (esp->dma->revision) { 275 switch (esp->dmarev) {
268 case dvmahme: 276 case dvmahme:
269 dma_write32(DMA_RESET_FAS366, DMA_CSR); 277 dma_write32(DMA_RESET_FAS366, DMA_CSR);
270 dma_write32(DMA_RST_SCSI, DMA_CSR); 278 dma_write32(DMA_RST_SCSI, DMA_CSR);
@@ -346,14 +354,14 @@ static void sbus_esp_dma_drain(struct esp *esp)
346 u32 csr; 354 u32 csr;
347 int lim; 355 int lim;
348 356
349 if (esp->dma->revision == dvmahme) 357 if (esp->dmarev == dvmahme)
350 return; 358 return;
351 359
352 csr = dma_read32(DMA_CSR); 360 csr = dma_read32(DMA_CSR);
353 if (!(csr & DMA_FIFO_ISDRAIN)) 361 if (!(csr & DMA_FIFO_ISDRAIN))
354 return; 362 return;
355 363
356 if (esp->dma->revision != dvmarev3 && esp->dma->revision != dvmaesc1) 364 if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1)
357 dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR); 365 dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
358 366
359 lim = 1000; 367 lim = 1000;
@@ -369,7 +377,7 @@ static void sbus_esp_dma_drain(struct esp *esp)
369 377
370static void sbus_esp_dma_invalidate(struct esp *esp) 378static void sbus_esp_dma_invalidate(struct esp *esp)
371{ 379{
372 if (esp->dma->revision == dvmahme) { 380 if (esp->dmarev == dvmahme) {
373 dma_write32(DMA_RST_SCSI, DMA_CSR); 381 dma_write32(DMA_RST_SCSI, DMA_CSR);
374 382
375 esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr | 383 esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
@@ -440,7 +448,7 @@ static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
440 else 448 else
441 csr &= ~DMA_ST_WRITE; 449 csr &= ~DMA_ST_WRITE;
442 dma_write32(csr, DMA_CSR); 450 dma_write32(csr, DMA_CSR);
443 if (esp->dma->revision == dvmaesc1) { 451 if (esp->dmarev == dvmaesc1) {
444 u32 end = PAGE_ALIGN(addr + dma_count + 16U); 452 u32 end = PAGE_ALIGN(addr + dma_count + 16U);
445 dma_write32(end - addr, DMA_COUNT); 453 dma_write32(end - addr, DMA_COUNT);
446 } 454 }
@@ -478,7 +486,7 @@ static const struct esp_driver_ops sbus_esp_ops = {
478 486
479static int __devinit esp_sbus_probe_one(struct device *dev, 487static int __devinit esp_sbus_probe_one(struct device *dev,
480 struct sbus_dev *esp_dev, 488 struct sbus_dev *esp_dev,
481 struct sbus_dev *espdma, 489 struct of_device *espdma,
482 struct sbus_bus *sbus, 490 struct sbus_bus *sbus,
483 int hme) 491 int hme)
484{ 492{
@@ -503,7 +511,7 @@ static int __devinit esp_sbus_probe_one(struct device *dev,
503 if (hme) 511 if (hme)
504 esp->flags |= ESP_FLAG_WIDE_CAPABLE; 512 esp->flags |= ESP_FLAG_WIDE_CAPABLE;
505 513
506 err = esp_sbus_find_dma(esp, espdma); 514 err = esp_sbus_setup_dma(esp, espdma);
507 if (err < 0) 515 if (err < 0)
508 goto fail_unlink; 516 goto fail_unlink;
509 517
@@ -525,7 +533,7 @@ static int __devinit esp_sbus_probe_one(struct device *dev,
525 * come up with the reset bit set, so make sure that 533 * come up with the reset bit set, so make sure that
526 * is clear first. 534 * is clear first.
527 */ 535 */
528 if (esp->dma->revision == dvmaesc1) { 536 if (esp->dmarev == dvmaesc1) {
529 u32 val = dma_read32(DMA_CSR); 537 u32 val = dma_read32(DMA_CSR);
530 538
531 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); 539 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
@@ -556,26 +564,32 @@ fail:
556static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match) 564static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match)
557{ 565{
558 struct sbus_dev *sdev = to_sbus_device(&dev->dev); 566 struct sbus_dev *sdev = to_sbus_device(&dev->dev);
567 struct device_node *dma_node = NULL;
559 struct device_node *dp = dev->node; 568 struct device_node *dp = dev->node;
560 struct sbus_dev *dma_sdev = NULL; 569 struct of_device *dma_of = NULL;
561 int hme = 0; 570 int hme = 0;
562 571
563 if (dp->parent && 572 if (dp->parent &&
564 (!strcmp(dp->parent->name, "espdma") || 573 (!strcmp(dp->parent->name, "espdma") ||
565 !strcmp(dp->parent->name, "dma"))) 574 !strcmp(dp->parent->name, "dma")))
566 dma_sdev = sdev->parent; 575 dma_node = dp->parent;
567 else if (!strcmp(dp->name, "SUNW,fas")) { 576 else if (!strcmp(dp->name, "SUNW,fas")) {
568 dma_sdev = sdev; 577 dma_node = sdev->ofdev.node;
569 hme = 1; 578 hme = 1;
570 } 579 }
580 if (dma_node)
581 dma_of = of_find_device_by_node(dma_node);
582 if (!dma_of)
583 return -ENODEV;
571 584
572 return esp_sbus_probe_one(&dev->dev, sdev, dma_sdev, 585 return esp_sbus_probe_one(&dev->dev, sdev, dma_of,
573 sdev->bus, hme); 586 sdev->bus, hme);
574} 587}
575 588
576static int __devexit esp_sbus_remove(struct of_device *dev) 589static int __devexit esp_sbus_remove(struct of_device *dev)
577{ 590{
578 struct esp *esp = dev_get_drvdata(&dev->dev); 591 struct esp *esp = dev_get_drvdata(&dev->dev);
592 struct of_device *dma_of = esp->dma;
579 unsigned int irq = esp->host->irq; 593 unsigned int irq = esp->host->irq;
580 u32 val; 594 u32 val;
581 595
@@ -590,6 +604,8 @@ static int __devexit esp_sbus_remove(struct of_device *dev)
590 esp->command_block, 604 esp->command_block,
591 esp->command_block_dma); 605 esp->command_block_dma);
592 sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); 606 sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE);
607 of_iounmap(&dma_of->resource[0], esp->dma_regs,
608 resource_size(&dma_of->resource[0]));
593 609
594 scsi_host_put(esp->host); 610 scsi_host_put(esp->host);
595 611