aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2007-07-28 01:39:14 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-07-30 03:27:34 -0400
commitad7ad57c6127042c411353dddb723765964815db (patch)
tree600484291d9cfa68d54dc9b230f5bd115f495213
parentc7f439b99efbea74c70a5531f92566db5a6731f2 (diff)
[SPARC64]: Fix conflicts in SBUS/PCI/EBUS/ISA DMA handling.
Fully unify all of the DMA ops so that subordinate bus types to the DMA operation providers (such as ebus, isa, of_device) can work transparently. Basically, we just make sure that for every system device we create, the dev->archdata 'iommu' and 'stc' fields are filled in. Then we have two platform variants of the DMA ops, one for SUN4U which actually programs the real hardware, and one for SUN4V which makes hypervisor calls. This also fixes the crashes in parport_pc on sparc64, reported by Meelis Roos. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/kernel/Makefile4
-rw-r--r--arch/sparc64/kernel/ebus.c2
-rw-r--r--arch/sparc64/kernel/iommu.c (renamed from arch/sparc64/kernel/pci_iommu.c)304
-rw-r--r--arch/sparc64/kernel/isa.c2
-rw-r--r--arch/sparc64/kernel/pci.c62
-rw-r--r--arch/sparc64/kernel/pci_fire.c24
-rw-r--r--arch/sparc64/kernel/pci_psycho.c32
-rw-r--r--arch/sparc64/kernel/pci_sabre.c35
-rw-r--r--arch/sparc64/kernel/pci_schizo.c42
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c168
-rw-r--r--arch/sparc64/kernel/sbus.c568
-rw-r--r--drivers/sbus/sbus.c9
-rw-r--r--include/asm-sparc/device.h4
-rw-r--r--include/asm-sparc64/dma-mapping.h337
-rw-r--r--include/asm-sparc64/iommu.h11
-rw-r--r--include/asm-sparc64/parport.h2
-rw-r--r--include/asm-sparc64/pci.h152
-rw-r--r--include/asm-sparc64/sbus.h86
18 files changed, 620 insertions, 1224 deletions
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index b66876bf410c..40d2f3aae91e 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -8,14 +8,14 @@ EXTRA_CFLAGS := -Werror
8extra-y := head.o init_task.o vmlinux.lds 8extra-y := head.o init_task.o vmlinux.lds
9 9
10obj-y := process.o setup.o cpu.o idprom.o \ 10obj-y := process.o setup.o cpu.o idprom.o \
11 traps.o auxio.o una_asm.o sysfs.o \ 11 traps.o auxio.o una_asm.o sysfs.o iommu.o \
12 irq.o ptrace.o time.o sys_sparc.o signal.o \ 12 irq.o ptrace.o time.o sys_sparc.o signal.o \
13 unaligned.o central.o pci.o starfire.o semaphore.o \ 13 unaligned.o central.o pci.o starfire.o semaphore.o \
14 power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \ 14 power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \
15 visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o 15 visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o
16 16
17obj-$(CONFIG_STACKTRACE) += stacktrace.o 17obj-$(CONFIG_STACKTRACE) += stacktrace.o
18obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \ 18obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o \
19 pci_psycho.o pci_sabre.o pci_schizo.o \ 19 pci_psycho.o pci_sabre.o pci_schizo.o \
20 pci_sun4v.o pci_sun4v_asm.o pci_fire.o 20 pci_sun4v.o pci_sun4v_asm.o pci_fire.o
21obj-$(CONFIG_SMP) += smp.o trampoline.o hvtramp.o 21obj-$(CONFIG_SMP) += smp.o trampoline.o hvtramp.o
diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c
index 6d2956179cde..bc9ae36f7a43 100644
--- a/arch/sparc64/kernel/ebus.c
+++ b/arch/sparc64/kernel/ebus.c
@@ -391,6 +391,8 @@ static void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_de
391 sd = &dev->ofdev.dev.archdata; 391 sd = &dev->ofdev.dev.archdata;
392 sd->prom_node = dp; 392 sd->prom_node = dp;
393 sd->op = &dev->ofdev; 393 sd->op = &dev->ofdev;
394 sd->iommu = dev->bus->ofdev.dev.parent->archdata.iommu;
395 sd->stc = dev->bus->ofdev.dev.parent->archdata.stc;
394 396
395 dev->ofdev.node = dp; 397 dev->ofdev.node = dp;
396 dev->ofdev.dev.parent = &dev->bus->ofdev.dev; 398 dev->ofdev.dev.parent = &dev->bus->ofdev.dev;
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/iommu.c
index 70d2364fdfe0..b35a62167e9c 100644
--- a/arch/sparc64/kernel/pci_iommu.c
+++ b/arch/sparc64/kernel/iommu.c
@@ -1,28 +1,32 @@
1/* pci_iommu.c: UltraSparc PCI controller IOM/STC support. 1/* iommu.c: Generic sparc64 IOMMU support.
2 * 2 *
3 * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com) 4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
5 */ 5 */
6 6
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/sched.h> 8#include <linux/module.h>
9#include <linux/mm.h>
10#include <linux/delay.h> 9#include <linux/delay.h>
10#include <linux/device.h>
11#include <linux/dma-mapping.h>
12#include <linux/errno.h>
13
14#ifdef CONFIG_PCI
11#include <linux/pci.h> 15#include <linux/pci.h>
16#endif
12 17
13#include <asm/oplib.h> 18#include <asm/iommu.h>
14 19
15#include "iommu_common.h" 20#include "iommu_common.h"
16#include "pci_impl.h"
17 21
18#define PCI_STC_CTXMATCH_ADDR(STC, CTX) \ 22#define STC_CTXMATCH_ADDR(STC, CTX) \
19 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3)) 23 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
24#define STC_FLUSHFLAG_INIT(STC) \
25 (*((STC)->strbuf_flushflag) = 0UL)
26#define STC_FLUSHFLAG_SET(STC) \
27 (*((STC)->strbuf_flushflag) != 0UL)
20 28
21/* Accessing IOMMU and Streaming Buffer registers. 29#define iommu_read(__reg) \
22 * REG parameter is a physical address. All registers
23 * are 64-bits in size.
24 */
25#define pci_iommu_read(__reg) \
26({ u64 __ret; \ 30({ u64 __ret; \
27 __asm__ __volatile__("ldxa [%1] %2, %0" \ 31 __asm__ __volatile__("ldxa [%1] %2, %0" \
28 : "=r" (__ret) \ 32 : "=r" (__ret) \
@@ -30,7 +34,7 @@
30 : "memory"); \ 34 : "memory"); \
31 __ret; \ 35 __ret; \
32}) 36})
33#define pci_iommu_write(__reg, __val) \ 37#define iommu_write(__reg, __val) \
34 __asm__ __volatile__("stxa %0, [%1] %2" \ 38 __asm__ __volatile__("stxa %0, [%1] %2" \
35 : /* no outputs */ \ 39 : /* no outputs */ \
36 : "r" (__val), "r" (__reg), \ 40 : "r" (__val), "r" (__reg), \
@@ -40,19 +44,19 @@
40static void __iommu_flushall(struct iommu *iommu) 44static void __iommu_flushall(struct iommu *iommu)
41{ 45{
42 if (iommu->iommu_flushinv) { 46 if (iommu->iommu_flushinv) {
43 pci_iommu_write(iommu->iommu_flushinv, ~(u64)0); 47 iommu_write(iommu->iommu_flushinv, ~(u64)0);
44 } else { 48 } else {
45 unsigned long tag; 49 unsigned long tag;
46 int entry; 50 int entry;
47 51
48 tag = iommu->iommu_flush + (0xa580UL - 0x0210UL); 52 tag = iommu->iommu_tags;
49 for (entry = 0; entry < 16; entry++) { 53 for (entry = 0; entry < 16; entry++) {
50 pci_iommu_write(tag, 0); 54 iommu_write(tag, 0);
51 tag += 8; 55 tag += 8;
52 } 56 }
53 57
54 /* Ensure completion of previous PIO writes. */ 58 /* Ensure completion of previous PIO writes. */
55 (void) pci_iommu_read(iommu->write_complete_reg); 59 (void) iommu_read(iommu->write_complete_reg);
56 } 60 }
57} 61}
58 62
@@ -80,7 +84,7 @@ static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
80} 84}
81 85
82/* Based largely upon the ppc64 iommu allocator. */ 86/* Based largely upon the ppc64 iommu allocator. */
83static long pci_arena_alloc(struct iommu *iommu, unsigned long npages) 87static long arena_alloc(struct iommu *iommu, unsigned long npages)
84{ 88{
85 struct iommu_arena *arena = &iommu->arena; 89 struct iommu_arena *arena = &iommu->arena;
86 unsigned long n, i, start, end, limit; 90 unsigned long n, i, start, end, limit;
@@ -121,7 +125,7 @@ again:
121 return n; 125 return n;
122} 126}
123 127
124static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages) 128static void arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
125{ 129{
126 unsigned long i; 130 unsigned long i;
127 131
@@ -129,7 +133,8 @@ static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsign
129 __clear_bit(i, arena->map); 133 __clear_bit(i, arena->map);
130} 134}
131 135
132void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask) 136int iommu_table_init(struct iommu *iommu, int tsbsize,
137 u32 dma_offset, u32 dma_addr_mask)
133{ 138{
134 unsigned long i, tsbbase, order, sz, num_tsb_entries; 139 unsigned long i, tsbbase, order, sz, num_tsb_entries;
135 140
@@ -146,8 +151,8 @@ void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32
146 sz = (sz + 7UL) & ~7UL; 151 sz = (sz + 7UL) & ~7UL;
147 iommu->arena.map = kzalloc(sz, GFP_KERNEL); 152 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
148 if (!iommu->arena.map) { 153 if (!iommu->arena.map) {
149 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); 154 printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
150 prom_halt(); 155 return -ENOMEM;
151 } 156 }
152 iommu->arena.limit = num_tsb_entries; 157 iommu->arena.limit = num_tsb_entries;
153 158
@@ -156,8 +161,8 @@ void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32
156 */ 161 */
157 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0); 162 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
158 if (!iommu->dummy_page) { 163 if (!iommu->dummy_page) {
159 prom_printf("PCI_IOMMU: Error, gfp(dummy_page) failed.\n"); 164 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
160 prom_halt(); 165 goto out_free_map;
161 } 166 }
162 memset((void *)iommu->dummy_page, 0, PAGE_SIZE); 167 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
163 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page); 168 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
@@ -166,20 +171,32 @@ void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32
166 order = get_order(tsbsize); 171 order = get_order(tsbsize);
167 tsbbase = __get_free_pages(GFP_KERNEL, order); 172 tsbbase = __get_free_pages(GFP_KERNEL, order);
168 if (!tsbbase) { 173 if (!tsbbase) {
169 prom_printf("PCI_IOMMU: Error, gfp(tsb) failed.\n"); 174 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
170 prom_halt(); 175 goto out_free_dummy_page;
171 } 176 }
172 iommu->page_table = (iopte_t *)tsbbase; 177 iommu->page_table = (iopte_t *)tsbbase;
173 178
174 for (i = 0; i < num_tsb_entries; i++) 179 for (i = 0; i < num_tsb_entries; i++)
175 iopte_make_dummy(iommu, &iommu->page_table[i]); 180 iopte_make_dummy(iommu, &iommu->page_table[i]);
181
182 return 0;
183
184out_free_dummy_page:
185 free_page(iommu->dummy_page);
186 iommu->dummy_page = 0UL;
187
188out_free_map:
189 kfree(iommu->arena.map);
190 iommu->arena.map = NULL;
191
192 return -ENOMEM;
176} 193}
177 194
178static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages) 195static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
179{ 196{
180 long entry; 197 long entry;
181 198
182 entry = pci_arena_alloc(iommu, npages); 199 entry = arena_alloc(iommu, npages);
183 if (unlikely(entry < 0)) 200 if (unlikely(entry < 0))
184 return NULL; 201 return NULL;
185 202
@@ -188,7 +205,7 @@ static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
188 205
189static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages) 206static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
190{ 207{
191 pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages); 208 arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
192} 209}
193 210
194static int iommu_alloc_ctx(struct iommu *iommu) 211static int iommu_alloc_ctx(struct iommu *iommu)
@@ -219,11 +236,8 @@ static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
219 } 236 }
220} 237}
221 238
222/* Allocate and map kernel buffer of size SIZE using consistent mode 239static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
223 * DMA for PCI device PDEV. Return non-NULL cpu-side address if 240 dma_addr_t *dma_addrp, gfp_t gfp)
224 * successful and set *DMA_ADDRP to the PCI side dma address.
225 */
226static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
227{ 241{
228 struct iommu *iommu; 242 struct iommu *iommu;
229 iopte_t *iopte; 243 iopte_t *iopte;
@@ -241,7 +255,7 @@ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
241 return NULL; 255 return NULL;
242 memset((char *)first_page, 0, PAGE_SIZE << order); 256 memset((char *)first_page, 0, PAGE_SIZE << order);
243 257
244 iommu = pdev->dev.archdata.iommu; 258 iommu = dev->archdata.iommu;
245 259
246 spin_lock_irqsave(&iommu->lock, flags); 260 spin_lock_irqsave(&iommu->lock, flags);
247 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT); 261 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
@@ -268,15 +282,15 @@ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
268 return ret; 282 return ret;
269} 283}
270 284
271/* Free and unmap a consistent DMA translation. */ 285static void dma_4u_free_coherent(struct device *dev, size_t size,
272static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) 286 void *cpu, dma_addr_t dvma)
273{ 287{
274 struct iommu *iommu; 288 struct iommu *iommu;
275 iopte_t *iopte; 289 iopte_t *iopte;
276 unsigned long flags, order, npages; 290 unsigned long flags, order, npages;
277 291
278 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 292 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
279 iommu = pdev->dev.archdata.iommu; 293 iommu = dev->archdata.iommu;
280 iopte = iommu->page_table + 294 iopte = iommu->page_table +
281 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 295 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
282 296
@@ -291,10 +305,8 @@ static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu,
291 free_pages((unsigned long)cpu, order); 305 free_pages((unsigned long)cpu, order);
292} 306}
293 307
294/* Map a single buffer at PTR of SZ bytes for PCI DMA 308static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
295 * in streaming mode. 309 enum dma_data_direction direction)
296 */
297static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
298{ 310{
299 struct iommu *iommu; 311 struct iommu *iommu;
300 struct strbuf *strbuf; 312 struct strbuf *strbuf;
@@ -304,10 +316,10 @@ static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz,
304 u32 bus_addr, ret; 316 u32 bus_addr, ret;
305 unsigned long iopte_protection; 317 unsigned long iopte_protection;
306 318
307 iommu = pdev->dev.archdata.iommu; 319 iommu = dev->archdata.iommu;
308 strbuf = pdev->dev.archdata.stc; 320 strbuf = dev->archdata.stc;
309 321
310 if (unlikely(direction == PCI_DMA_NONE)) 322 if (unlikely(direction == DMA_NONE))
311 goto bad_no_ctx; 323 goto bad_no_ctx;
312 324
313 oaddr = (unsigned long)ptr; 325 oaddr = (unsigned long)ptr;
@@ -332,7 +344,7 @@ static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz,
332 iopte_protection = IOPTE_STREAMING(ctx); 344 iopte_protection = IOPTE_STREAMING(ctx);
333 else 345 else
334 iopte_protection = IOPTE_CONSISTENT(ctx); 346 iopte_protection = IOPTE_CONSISTENT(ctx);
335 if (direction != PCI_DMA_TODEVICE) 347 if (direction != DMA_TO_DEVICE)
336 iopte_protection |= IOPTE_WRITE; 348 iopte_protection |= IOPTE_WRITE;
337 349
338 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE) 350 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
@@ -345,10 +357,12 @@ bad:
345bad_no_ctx: 357bad_no_ctx:
346 if (printk_ratelimit()) 358 if (printk_ratelimit())
347 WARN_ON(1); 359 WARN_ON(1);
348 return PCI_DMA_ERROR_CODE; 360 return DMA_ERROR_CODE;
349} 361}
350 362
351static void pci_strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction) 363static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
364 u32 vaddr, unsigned long ctx, unsigned long npages,
365 enum dma_data_direction direction)
352{ 366{
353 int limit; 367 int limit;
354 368
@@ -358,22 +372,22 @@ static void pci_strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vad
358 u64 val; 372 u64 val;
359 373
360 flushreg = strbuf->strbuf_ctxflush; 374 flushreg = strbuf->strbuf_ctxflush;
361 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); 375 matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
362 376
363 pci_iommu_write(flushreg, ctx); 377 iommu_write(flushreg, ctx);
364 val = pci_iommu_read(matchreg); 378 val = iommu_read(matchreg);
365 val &= 0xffff; 379 val &= 0xffff;
366 if (!val) 380 if (!val)
367 goto do_flush_sync; 381 goto do_flush_sync;
368 382
369 while (val) { 383 while (val) {
370 if (val & 0x1) 384 if (val & 0x1)
371 pci_iommu_write(flushreg, ctx); 385 iommu_write(flushreg, ctx);
372 val >>= 1; 386 val >>= 1;
373 } 387 }
374 val = pci_iommu_read(matchreg); 388 val = iommu_read(matchreg);
375 if (unlikely(val)) { 389 if (unlikely(val)) {
376 printk(KERN_WARNING "pci_strbuf_flush: ctx flush " 390 printk(KERN_WARNING "strbuf_flush: ctx flush "
377 "timeout matchreg[%lx] ctx[%lx]\n", 391 "timeout matchreg[%lx] ctx[%lx]\n",
378 val, ctx); 392 val, ctx);
379 goto do_page_flush; 393 goto do_page_flush;
@@ -383,7 +397,7 @@ static void pci_strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vad
383 397
384 do_page_flush: 398 do_page_flush:
385 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) 399 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
386 pci_iommu_write(strbuf->strbuf_pflush, vaddr); 400 iommu_write(strbuf->strbuf_pflush, vaddr);
387 } 401 }
388 402
389do_flush_sync: 403do_flush_sync:
@@ -391,15 +405,15 @@ do_flush_sync:
391 * the streaming cache, no flush-flag synchronization needs 405 * the streaming cache, no flush-flag synchronization needs
392 * to be performed. 406 * to be performed.
393 */ 407 */
394 if (direction == PCI_DMA_TODEVICE) 408 if (direction == DMA_TO_DEVICE)
395 return; 409 return;
396 410
397 PCI_STC_FLUSHFLAG_INIT(strbuf); 411 STC_FLUSHFLAG_INIT(strbuf);
398 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); 412 iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
399 (void) pci_iommu_read(iommu->write_complete_reg); 413 (void) iommu_read(iommu->write_complete_reg);
400 414
401 limit = 100000; 415 limit = 100000;
402 while (!PCI_STC_FLUSHFLAG_SET(strbuf)) { 416 while (!STC_FLUSHFLAG_SET(strbuf)) {
403 limit--; 417 limit--;
404 if (!limit) 418 if (!limit)
405 break; 419 break;
@@ -407,37 +421,32 @@ do_flush_sync:
407 rmb(); 421 rmb();
408 } 422 }
409 if (!limit) 423 if (!limit)
410 printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout " 424 printk(KERN_WARNING "strbuf_flush: flushflag timeout "
411 "vaddr[%08x] ctx[%lx] npages[%ld]\n", 425 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
412 vaddr, ctx, npages); 426 vaddr, ctx, npages);
413} 427}
414 428
415/* Unmap a single streaming mode DMA translation. */ 429static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
416static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) 430 size_t sz, enum dma_data_direction direction)
417{ 431{
418 struct iommu *iommu; 432 struct iommu *iommu;
419 struct strbuf *strbuf; 433 struct strbuf *strbuf;
420 iopte_t *base; 434 iopte_t *base;
421 unsigned long flags, npages, ctx, i; 435 unsigned long flags, npages, ctx, i;
422 436
423 if (unlikely(direction == PCI_DMA_NONE)) { 437 if (unlikely(direction == DMA_NONE)) {
424 if (printk_ratelimit()) 438 if (printk_ratelimit())
425 WARN_ON(1); 439 WARN_ON(1);
426 return; 440 return;
427 } 441 }
428 442
429 iommu = pdev->dev.archdata.iommu; 443 iommu = dev->archdata.iommu;
430 strbuf = pdev->dev.archdata.stc; 444 strbuf = dev->archdata.stc;
431 445
432 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 446 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
433 npages >>= IO_PAGE_SHIFT; 447 npages >>= IO_PAGE_SHIFT;
434 base = iommu->page_table + 448 base = iommu->page_table +
435 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 449 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
436#ifdef DEBUG_PCI_IOMMU
437 if (IOPTE_IS_DUMMY(iommu, base))
438 printk("pci_unmap_single called on non-mapped region %08x,%08x from %016lx\n",
439 bus_addr, sz, __builtin_return_address(0));
440#endif
441 bus_addr &= IO_PAGE_MASK; 450 bus_addr &= IO_PAGE_MASK;
442 451
443 spin_lock_irqsave(&iommu->lock, flags); 452 spin_lock_irqsave(&iommu->lock, flags);
@@ -449,8 +458,8 @@ static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_
449 458
450 /* Step 1: Kick data out of streaming buffers if necessary. */ 459 /* Step 1: Kick data out of streaming buffers if necessary. */
451 if (strbuf->strbuf_enabled) 460 if (strbuf->strbuf_enabled)
452 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, 461 strbuf_flush(strbuf, iommu, bus_addr, ctx,
453 npages, direction); 462 npages, direction);
454 463
455 /* Step 2: Clear out TSB entries. */ 464 /* Step 2: Clear out TSB entries. */
456 for (i = 0; i < npages; i++) 465 for (i = 0; i < npages; i++)
@@ -467,7 +476,8 @@ static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_
467 (__pa(page_address((SG)->page)) + (SG)->offset) 476 (__pa(page_address((SG)->page)) + (SG)->offset)
468 477
469static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, 478static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
470 int nused, int nelems, unsigned long iopte_protection) 479 int nused, int nelems,
480 unsigned long iopte_protection)
471{ 481{
472 struct scatterlist *dma_sg = sg; 482 struct scatterlist *dma_sg = sg;
473 struct scatterlist *sg_end = sg + nelems; 483 struct scatterlist *sg_end = sg + nelems;
@@ -539,12 +549,8 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
539 } 549 }
540} 550}
541 551
542/* Map a set of buffers described by SGLIST with NELEMS array 552static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
543 * elements in streaming mode for PCI DMA. 553 int nelems, enum dma_data_direction direction)
544 * When making changes here, inspect the assembly output. I was having
545 * hard time to keep this routine out of using stack slots for holding variables.
546 */
547static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
548{ 554{
549 struct iommu *iommu; 555 struct iommu *iommu;
550 struct strbuf *strbuf; 556 struct strbuf *strbuf;
@@ -557,19 +563,20 @@ static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
557 /* Fast path single entry scatterlists. */ 563 /* Fast path single entry scatterlists. */
558 if (nelems == 1) { 564 if (nelems == 1) {
559 sglist->dma_address = 565 sglist->dma_address =
560 pci_4u_map_single(pdev, 566 dma_4u_map_single(dev,
561 (page_address(sglist->page) + sglist->offset), 567 (page_address(sglist->page) +
568 sglist->offset),
562 sglist->length, direction); 569 sglist->length, direction);
563 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) 570 if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
564 return 0; 571 return 0;
565 sglist->dma_length = sglist->length; 572 sglist->dma_length = sglist->length;
566 return 1; 573 return 1;
567 } 574 }
568 575
569 iommu = pdev->dev.archdata.iommu; 576 iommu = dev->archdata.iommu;
570 strbuf = pdev->dev.archdata.stc; 577 strbuf = dev->archdata.stc;
571 578
572 if (unlikely(direction == PCI_DMA_NONE)) 579 if (unlikely(direction == DMA_NONE))
573 goto bad_no_ctx; 580 goto bad_no_ctx;
574 581
575 /* Step 1: Prepare scatter list. */ 582 /* Step 1: Prepare scatter list. */
@@ -609,7 +616,7 @@ static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
609 iopte_protection = IOPTE_STREAMING(ctx); 616 iopte_protection = IOPTE_STREAMING(ctx);
610 else 617 else
611 iopte_protection = IOPTE_CONSISTENT(ctx); 618 iopte_protection = IOPTE_CONSISTENT(ctx);
612 if (direction != PCI_DMA_TODEVICE) 619 if (direction != DMA_TO_DEVICE)
613 iopte_protection |= IOPTE_WRITE; 620 iopte_protection |= IOPTE_WRITE;
614 621
615 fill_sg(base, sglist, used, nelems, iopte_protection); 622 fill_sg(base, sglist, used, nelems, iopte_protection);
@@ -628,8 +635,8 @@ bad_no_ctx:
628 return 0; 635 return 0;
629} 636}
630 637
631/* Unmap a set of streaming mode DMA translations. */ 638static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
632static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) 639 int nelems, enum dma_data_direction direction)
633{ 640{
634 struct iommu *iommu; 641 struct iommu *iommu;
635 struct strbuf *strbuf; 642 struct strbuf *strbuf;
@@ -637,14 +644,14 @@ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
637 unsigned long flags, ctx, i, npages; 644 unsigned long flags, ctx, i, npages;
638 u32 bus_addr; 645 u32 bus_addr;
639 646
640 if (unlikely(direction == PCI_DMA_NONE)) { 647 if (unlikely(direction == DMA_NONE)) {
641 if (printk_ratelimit()) 648 if (printk_ratelimit())
642 WARN_ON(1); 649 WARN_ON(1);
643 } 650 }
644 651
645 iommu = pdev->dev.archdata.iommu; 652 iommu = dev->archdata.iommu;
646 strbuf = pdev->dev.archdata.stc; 653 strbuf = dev->archdata.stc;
647 654
648 bus_addr = sglist->dma_address & IO_PAGE_MASK; 655 bus_addr = sglist->dma_address & IO_PAGE_MASK;
649 656
650 for (i = 1; i < nelems; i++) 657 for (i = 1; i < nelems; i++)
@@ -657,11 +664,6 @@ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
657 base = iommu->page_table + 664 base = iommu->page_table +
658 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 665 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
659 666
660#ifdef DEBUG_PCI_IOMMU
661 if (IOPTE_IS_DUMMY(iommu, base))
662 printk("pci_unmap_sg called on non-mapped region %016lx,%d from %016lx\n", sglist->dma_address, nelems, __builtin_return_address(0));
663#endif
664
665 spin_lock_irqsave(&iommu->lock, flags); 667 spin_lock_irqsave(&iommu->lock, flags);
666 668
667 /* Record the context, if any. */ 669 /* Record the context, if any. */
@@ -671,7 +673,7 @@ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
671 673
672 /* Step 1: Kick data out of streaming buffers if necessary. */ 674 /* Step 1: Kick data out of streaming buffers if necessary. */
673 if (strbuf->strbuf_enabled) 675 if (strbuf->strbuf_enabled)
674 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); 676 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
675 677
676 /* Step 2: Clear out the TSB entries. */ 678 /* Step 2: Clear out the TSB entries. */
677 for (i = 0; i < npages; i++) 679 for (i = 0; i < npages; i++)
@@ -684,17 +686,16 @@ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
684 spin_unlock_irqrestore(&iommu->lock, flags); 686 spin_unlock_irqrestore(&iommu->lock, flags);
685} 687}
686 688
687/* Make physical memory consistent for a single 689static void dma_4u_sync_single_for_cpu(struct device *dev,
688 * streaming mode DMA translation after a transfer. 690 dma_addr_t bus_addr, size_t sz,
689 */ 691 enum dma_data_direction direction)
690static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
691{ 692{
692 struct iommu *iommu; 693 struct iommu *iommu;
693 struct strbuf *strbuf; 694 struct strbuf *strbuf;
694 unsigned long flags, ctx, npages; 695 unsigned long flags, ctx, npages;
695 696
696 iommu = pdev->dev.archdata.iommu; 697 iommu = dev->archdata.iommu;
697 strbuf = pdev->dev.archdata.stc; 698 strbuf = dev->archdata.stc;
698 699
699 if (!strbuf->strbuf_enabled) 700 if (!strbuf->strbuf_enabled)
700 return; 701 return;
@@ -717,23 +718,22 @@ static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_
717 } 718 }
718 719
719 /* Step 2: Kick data out of streaming buffers. */ 720 /* Step 2: Kick data out of streaming buffers. */
720 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); 721 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
721 722
722 spin_unlock_irqrestore(&iommu->lock, flags); 723 spin_unlock_irqrestore(&iommu->lock, flags);
723} 724}
724 725
725/* Make physical memory consistent for a set of streaming 726static void dma_4u_sync_sg_for_cpu(struct device *dev,
726 * mode DMA translations after a transfer. 727 struct scatterlist *sglist, int nelems,
727 */ 728 enum dma_data_direction direction)
728static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
729{ 729{
730 struct iommu *iommu; 730 struct iommu *iommu;
731 struct strbuf *strbuf; 731 struct strbuf *strbuf;
732 unsigned long flags, ctx, npages, i; 732 unsigned long flags, ctx, npages, i;
733 u32 bus_addr; 733 u32 bus_addr;
734 734
735 iommu = pdev->dev.archdata.iommu; 735 iommu = dev->archdata.iommu;
736 strbuf = pdev->dev.archdata.stc; 736 strbuf = dev->archdata.stc;
737 737
738 if (!strbuf->strbuf_enabled) 738 if (!strbuf->strbuf_enabled)
739 return; 739 return;
@@ -759,65 +759,51 @@ static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist
759 i--; 759 i--;
760 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) 760 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
761 - bus_addr) >> IO_PAGE_SHIFT; 761 - bus_addr) >> IO_PAGE_SHIFT;
762 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); 762 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
763 763
764 spin_unlock_irqrestore(&iommu->lock, flags); 764 spin_unlock_irqrestore(&iommu->lock, flags);
765} 765}
766 766
767const struct pci_iommu_ops pci_sun4u_iommu_ops = { 767const struct dma_ops sun4u_dma_ops = {
768 .alloc_consistent = pci_4u_alloc_consistent, 768 .alloc_coherent = dma_4u_alloc_coherent,
769 .free_consistent = pci_4u_free_consistent, 769 .free_coherent = dma_4u_free_coherent,
770 .map_single = pci_4u_map_single, 770 .map_single = dma_4u_map_single,
771 .unmap_single = pci_4u_unmap_single, 771 .unmap_single = dma_4u_unmap_single,
772 .map_sg = pci_4u_map_sg, 772 .map_sg = dma_4u_map_sg,
773 .unmap_sg = pci_4u_unmap_sg, 773 .unmap_sg = dma_4u_unmap_sg,
774 .dma_sync_single_for_cpu = pci_4u_dma_sync_single_for_cpu, 774 .sync_single_for_cpu = dma_4u_sync_single_for_cpu,
775 .dma_sync_sg_for_cpu = pci_4u_dma_sync_sg_for_cpu, 775 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
776}; 776};
777 777
778static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) 778const struct dma_ops *dma_ops = &sun4u_dma_ops;
779{ 779EXPORT_SYMBOL(dma_ops);
780 struct pci_dev *ali_isa_bridge;
781 u8 val;
782 780
783 /* ALI sound chips generate 31-bits of DMA, a special register 781int dma_supported(struct device *dev, u64 device_mask)
784 * determines what bit 31 is emitted as.
785 */
786 ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
787 PCI_DEVICE_ID_AL_M1533,
788 NULL);
789
790 pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
791 if (set_bit)
792 val |= 0x01;
793 else
794 val &= ~0x01;
795 pci_write_config_byte(ali_isa_bridge, 0x7e, val);
796 pci_dev_put(ali_isa_bridge);
797}
798
799int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
800{ 782{
801 u64 dma_addr_mask; 783 struct iommu *iommu = dev->archdata.iommu;
784 u64 dma_addr_mask = iommu->dma_addr_mask;
802 785
803 if (pdev == NULL) { 786 if (device_mask >= (1UL << 32UL))
804 dma_addr_mask = 0xffffffff; 787 return 0;
805 } else {
806 struct iommu *iommu = pdev->dev.archdata.iommu;
807 788
808 dma_addr_mask = iommu->dma_addr_mask; 789 if ((device_mask & dma_addr_mask) == dma_addr_mask)
790 return 1;
809 791
810 if (pdev->vendor == PCI_VENDOR_ID_AL && 792#ifdef CONFIG_PCI
811 pdev->device == PCI_DEVICE_ID_AL_M5451 && 793 if (dev->bus == &pci_bus_type)
812 device_mask == 0x7fffffff) { 794 return pci_dma_supported(to_pci_dev(dev), device_mask);
813 ali_sound_dma_hack(pdev, 795#endif
814 (dma_addr_mask & 0x80000000) != 0);
815 return 1;
816 }
817 }
818 796
819 if (device_mask >= (1UL << 32UL)) 797 return 0;
820 return 0; 798}
799EXPORT_SYMBOL(dma_supported);
821 800
822 return (device_mask & dma_addr_mask) == dma_addr_mask; 801int dma_set_mask(struct device *dev, u64 dma_mask)
802{
803#ifdef CONFIG_PCI
804 if (dev->bus == &pci_bus_type)
805 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
806#endif
807 return -EINVAL;
823} 808}
809EXPORT_SYMBOL(dma_set_mask);
diff --git a/arch/sparc64/kernel/isa.c b/arch/sparc64/kernel/isa.c
index 1a1043fcf97d..0f19dce1c905 100644
--- a/arch/sparc64/kernel/isa.c
+++ b/arch/sparc64/kernel/isa.c
@@ -90,6 +90,8 @@ static void __init isa_fill_devices(struct sparc_isa_bridge *isa_br)
90 sd = &isa_dev->ofdev.dev.archdata; 90 sd = &isa_dev->ofdev.dev.archdata;
91 sd->prom_node = dp; 91 sd->prom_node = dp;
92 sd->op = &isa_dev->ofdev; 92 sd->op = &isa_dev->ofdev;
93 sd->iommu = isa_br->ofdev.dev.parent->archdata.iommu;
94 sd->stc = isa_br->ofdev.dev.parent->archdata.stc;
93 95
94 isa_dev->ofdev.node = dp; 96 isa_dev->ofdev.node = dp;
95 isa_dev->ofdev.dev.parent = &isa_br->ofdev.dev; 97 isa_dev->ofdev.dev.parent = &isa_br->ofdev.dev;
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index 77449a005752..3d93e9203ba2 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -283,12 +283,6 @@ int __init pcic_present(void)
283 return pci_controller_scan(pci_is_controller); 283 return pci_controller_scan(pci_is_controller);
284} 284}
285 285
286const struct pci_iommu_ops *pci_iommu_ops;
287EXPORT_SYMBOL(pci_iommu_ops);
288
289extern const struct pci_iommu_ops pci_sun4u_iommu_ops,
290 pci_sun4v_iommu_ops;
291
292/* Find each controller in the system, attach and initialize 286/* Find each controller in the system, attach and initialize
293 * software state structure for each and link into the 287 * software state structure for each and link into the
294 * pci_pbm_root. Setup the controller enough such 288 * pci_pbm_root. Setup the controller enough such
@@ -296,11 +290,6 @@ extern const struct pci_iommu_ops pci_sun4u_iommu_ops,
296 */ 290 */
297static void __init pci_controller_probe(void) 291static void __init pci_controller_probe(void)
298{ 292{
299 if (tlb_type == hypervisor)
300 pci_iommu_ops = &pci_sun4v_iommu_ops;
301 else
302 pci_iommu_ops = &pci_sun4u_iommu_ops;
303
304 printk("PCI: Probing for controllers.\n"); 293 printk("PCI: Probing for controllers.\n");
305 294
306 pci_controller_scan(pci_controller_init); 295 pci_controller_scan(pci_controller_init);
@@ -406,6 +395,10 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
406 sd->op = of_find_device_by_node(node); 395 sd->op = of_find_device_by_node(node);
407 sd->msi_num = 0xffffffff; 396 sd->msi_num = 0xffffffff;
408 397
398 sd = &sd->op->dev.archdata;
399 sd->iommu = pbm->iommu;
400 sd->stc = &pbm->stc;
401
409 type = of_get_property(node, "device_type", NULL); 402 type = of_get_property(node, "device_type", NULL);
410 if (type == NULL) 403 if (type == NULL)
411 type = ""; 404 type = "";
@@ -1226,4 +1219,51 @@ struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
1226} 1219}
1227EXPORT_SYMBOL(pci_device_to_OF_node); 1220EXPORT_SYMBOL(pci_device_to_OF_node);
1228 1221
1222static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
1223{
1224 struct pci_dev *ali_isa_bridge;
1225 u8 val;
1226
1227 /* ALI sound chips generate 31-bits of DMA, a special register
1228 * determines what bit 31 is emitted as.
1229 */
1230 ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
1231 PCI_DEVICE_ID_AL_M1533,
1232 NULL);
1233
1234 pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
1235 if (set_bit)
1236 val |= 0x01;
1237 else
1238 val &= ~0x01;
1239 pci_write_config_byte(ali_isa_bridge, 0x7e, val);
1240 pci_dev_put(ali_isa_bridge);
1241}
1242
1243int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
1244{
1245 u64 dma_addr_mask;
1246
1247 if (pdev == NULL) {
1248 dma_addr_mask = 0xffffffff;
1249 } else {
1250 struct iommu *iommu = pdev->dev.archdata.iommu;
1251
1252 dma_addr_mask = iommu->dma_addr_mask;
1253
1254 if (pdev->vendor == PCI_VENDOR_ID_AL &&
1255 pdev->device == PCI_DEVICE_ID_AL_M5451 &&
1256 device_mask == 0x7fffffff) {
1257 ali_sound_dma_hack(pdev,
1258 (dma_addr_mask & 0x80000000) != 0);
1259 return 1;
1260 }
1261 }
1262
1263 if (device_mask >= (1UL << 32UL))
1264 return 0;
1265
1266 return (device_mask & dma_addr_mask) == dma_addr_mask;
1267}
1268
1229#endif /* !(CONFIG_PCI) */ 1269#endif /* !(CONFIG_PCI) */
diff --git a/arch/sparc64/kernel/pci_fire.c b/arch/sparc64/kernel/pci_fire.c
index 7f5d473901c4..14d67fe21ab2 100644
--- a/arch/sparc64/kernel/pci_fire.c
+++ b/arch/sparc64/kernel/pci_fire.c
@@ -39,12 +39,12 @@ static void pci_fire_scan_bus(struct pci_pbm_info *pbm)
39#define FIRE_IOMMU_FLUSH 0x40100UL 39#define FIRE_IOMMU_FLUSH 0x40100UL
40#define FIRE_IOMMU_FLUSHINV 0x40108UL 40#define FIRE_IOMMU_FLUSHINV 0x40108UL
41 41
42static void pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm) 42static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
43{ 43{
44 struct iommu *iommu = pbm->iommu; 44 struct iommu *iommu = pbm->iommu;
45 u32 vdma[2], dma_mask; 45 u32 vdma[2], dma_mask;
46 u64 control; 46 u64 control;
47 int tsbsize; 47 int tsbsize, err;
48 48
49 /* No virtual-dma property on these guys, use largest size. */ 49 /* No virtual-dma property on these guys, use largest size. */
50 vdma[0] = 0xc0000000; /* base */ 50 vdma[0] = 0xc0000000; /* base */
@@ -68,7 +68,9 @@ static void pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
68 */ 68 */
69 fire_write(iommu->iommu_flushinv, ~(u64)0); 69 fire_write(iommu->iommu_flushinv, ~(u64)0);
70 70
71 pci_iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask); 71 err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask);
72 if (err)
73 return err;
72 74
73 fire_write(iommu->iommu_tsbbase, __pa(iommu->page_table) | 0x7UL); 75 fire_write(iommu->iommu_tsbbase, __pa(iommu->page_table) | 0x7UL);
74 76
@@ -78,6 +80,8 @@ static void pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
78 0x00000002 /* Bypass enable */ | 80 0x00000002 /* Bypass enable */ |
79 0x00000001 /* Translation enable */); 81 0x00000001 /* Translation enable */);
80 fire_write(iommu->iommu_control, control); 82 fire_write(iommu->iommu_control, control);
83
84 return 0;
81} 85}
82 86
83/* Based at pbm->controller_regs */ 87/* Based at pbm->controller_regs */
@@ -167,8 +171,8 @@ static void pci_fire_hw_init(struct pci_pbm_info *pbm)
167 fire_write(pbm->pbm_regs + FIRE_PEC_IENAB, ~(u64)0); 171 fire_write(pbm->pbm_regs + FIRE_PEC_IENAB, ~(u64)0);
168} 172}
169 173
170static void pci_fire_pbm_init(struct pci_controller_info *p, 174static int pci_fire_pbm_init(struct pci_controller_info *p,
171 struct device_node *dp, u32 portid) 175 struct device_node *dp, u32 portid)
172{ 176{
173 const struct linux_prom64_registers *regs; 177 const struct linux_prom64_registers *regs;
174 struct pci_pbm_info *pbm; 178 struct pci_pbm_info *pbm;
@@ -203,7 +207,8 @@ static void pci_fire_pbm_init(struct pci_controller_info *p,
203 pci_get_pbm_props(pbm); 207 pci_get_pbm_props(pbm);
204 208
205 pci_fire_hw_init(pbm); 209 pci_fire_hw_init(pbm);
206 pci_fire_pbm_iommu_init(pbm); 210
211 return pci_fire_pbm_iommu_init(pbm);
207} 212}
208 213
209static inline int portid_compare(u32 x, u32 y) 214static inline int portid_compare(u32 x, u32 y)
@@ -222,7 +227,8 @@ void fire_pci_init(struct device_node *dp, const char *model_name)
222 227
223 for (pbm = pci_pbm_root; pbm; pbm = pbm->next) { 228 for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
224 if (portid_compare(pbm->portid, portid)) { 229 if (portid_compare(pbm->portid, portid)) {
225 pci_fire_pbm_init(pbm->parent, dp, portid); 230 if (pci_fire_pbm_init(pbm->parent, dp, portid))
231 goto fatal_memory_error;
226 return; 232 return;
227 } 233 }
228 } 234 }
@@ -250,7 +256,9 @@ void fire_pci_init(struct device_node *dp, const char *model_name)
250 */ 256 */
251 pci_memspace_mask = 0x7fffffffUL; 257 pci_memspace_mask = 0x7fffffffUL;
252 258
253 pci_fire_pbm_init(p, dp, portid); 259 if (pci_fire_pbm_init(p, dp, portid))
260 goto fatal_memory_error;
261
254 return; 262 return;
255 263
256fatal_memory_error: 264fatal_memory_error:
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index 598393a2df16..b6b4cfea5b5f 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -813,16 +813,19 @@ static void psycho_scan_bus(struct pci_pbm_info *pbm)
813 psycho_register_error_handlers(pbm); 813 psycho_register_error_handlers(pbm);
814} 814}
815 815
816static void psycho_iommu_init(struct pci_pbm_info *pbm) 816static int psycho_iommu_init(struct pci_pbm_info *pbm)
817{ 817{
818 struct iommu *iommu = pbm->iommu; 818 struct iommu *iommu = pbm->iommu;
819 unsigned long i; 819 unsigned long i;
820 u64 control; 820 u64 control;
821 int err;
821 822
822 /* Register addresses. */ 823 /* Register addresses. */
823 iommu->iommu_control = pbm->controller_regs + PSYCHO_IOMMU_CONTROL; 824 iommu->iommu_control = pbm->controller_regs + PSYCHO_IOMMU_CONTROL;
824 iommu->iommu_tsbbase = pbm->controller_regs + PSYCHO_IOMMU_TSBBASE; 825 iommu->iommu_tsbbase = pbm->controller_regs + PSYCHO_IOMMU_TSBBASE;
825 iommu->iommu_flush = pbm->controller_regs + PSYCHO_IOMMU_FLUSH; 826 iommu->iommu_flush = pbm->controller_regs + PSYCHO_IOMMU_FLUSH;
827 iommu->iommu_tags = iommu->iommu_flush + (0xa580UL - 0x0210UL);
828
826 /* PSYCHO's IOMMU lacks ctx flushing. */ 829 /* PSYCHO's IOMMU lacks ctx flushing. */
827 iommu->iommu_ctxflush = 0; 830 iommu->iommu_ctxflush = 0;
828 831
@@ -845,7 +848,9 @@ static void psycho_iommu_init(struct pci_pbm_info *pbm)
845 /* Leave diag mode enabled for full-flushing done 848 /* Leave diag mode enabled for full-flushing done
846 * in pci_iommu.c 849 * in pci_iommu.c
847 */ 850 */
848 pci_iommu_table_init(iommu, IO_TSB_SIZE, 0xc0000000, 0xffffffff); 851 err = iommu_table_init(iommu, IO_TSB_SIZE, 0xc0000000, 0xffffffff);
852 if (err)
853 return err;
849 854
850 psycho_write(pbm->controller_regs + PSYCHO_IOMMU_TSBBASE, 855 psycho_write(pbm->controller_regs + PSYCHO_IOMMU_TSBBASE,
851 __pa(iommu->page_table)); 856 __pa(iommu->page_table));
@@ -858,6 +863,8 @@ static void psycho_iommu_init(struct pci_pbm_info *pbm)
858 /* If necessary, hook us up for starfire IRQ translations. */ 863 /* If necessary, hook us up for starfire IRQ translations. */
859 if (this_is_starfire) 864 if (this_is_starfire)
860 starfire_hookup(pbm->portid); 865 starfire_hookup(pbm->portid);
866
867 return 0;
861} 868}
862 869
863#define PSYCHO_IRQ_RETRY 0x1a00UL 870#define PSYCHO_IRQ_RETRY 0x1a00UL
@@ -1031,15 +1038,12 @@ void psycho_init(struct device_node *dp, char *model_name)
1031 } 1038 }
1032 1039
1033 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); 1040 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1034 if (!p) { 1041 if (!p)
1035 prom_printf("PSYCHO: Fatal memory allocation error.\n"); 1042 goto fatal_memory_error;
1036 prom_halt();
1037 }
1038 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); 1043 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1039 if (!iommu) { 1044 if (!iommu)
1040 prom_printf("PSYCHO: Fatal memory allocation error.\n"); 1045 goto fatal_memory_error;
1041 prom_halt(); 1046
1042 }
1043 p->pbm_A.iommu = p->pbm_B.iommu = iommu; 1047 p->pbm_A.iommu = p->pbm_B.iommu = iommu;
1044 1048
1045 p->pbm_A.portid = upa_portid; 1049 p->pbm_A.portid = upa_portid;
@@ -1062,8 +1066,14 @@ void psycho_init(struct device_node *dp, char *model_name)
1062 1066
1063 psycho_controller_hwinit(&p->pbm_A); 1067 psycho_controller_hwinit(&p->pbm_A);
1064 1068
1065 psycho_iommu_init(&p->pbm_A); 1069 if (psycho_iommu_init(&p->pbm_A))
1070 goto fatal_memory_error;
1066 1071
1067 is_pbm_a = ((pr_regs[0].phys_addr & 0x6000) == 0x2000); 1072 is_pbm_a = ((pr_regs[0].phys_addr & 0x6000) == 0x2000);
1068 psycho_pbm_init(p, dp, is_pbm_a); 1073 psycho_pbm_init(p, dp, is_pbm_a);
1074 return;
1075
1076fatal_memory_error:
1077 prom_printf("PSYCHO: Fatal memory allocation error.\n");
1078 prom_halt();
1069} 1079}
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index 22e1be5c7489..fba67c3d8809 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -672,18 +672,20 @@ static void sabre_scan_bus(struct pci_pbm_info *pbm)
672 sabre_register_error_handlers(pbm); 672 sabre_register_error_handlers(pbm);
673} 673}
674 674
675static void sabre_iommu_init(struct pci_pbm_info *pbm, 675static int sabre_iommu_init(struct pci_pbm_info *pbm,
676 int tsbsize, unsigned long dvma_offset, 676 int tsbsize, unsigned long dvma_offset,
677 u32 dma_mask) 677 u32 dma_mask)
678{ 678{
679 struct iommu *iommu = pbm->iommu; 679 struct iommu *iommu = pbm->iommu;
680 unsigned long i; 680 unsigned long i;
681 u64 control; 681 u64 control;
682 int err;
682 683
683 /* Register addresses. */ 684 /* Register addresses. */
684 iommu->iommu_control = pbm->controller_regs + SABRE_IOMMU_CONTROL; 685 iommu->iommu_control = pbm->controller_regs + SABRE_IOMMU_CONTROL;
685 iommu->iommu_tsbbase = pbm->controller_regs + SABRE_IOMMU_TSBBASE; 686 iommu->iommu_tsbbase = pbm->controller_regs + SABRE_IOMMU_TSBBASE;
686 iommu->iommu_flush = pbm->controller_regs + SABRE_IOMMU_FLUSH; 687 iommu->iommu_flush = pbm->controller_regs + SABRE_IOMMU_FLUSH;
688 iommu->iommu_tags = iommu->iommu_flush + (0xa580UL - 0x0210UL);
687 iommu->write_complete_reg = pbm->controller_regs + SABRE_WRSYNC; 689 iommu->write_complete_reg = pbm->controller_regs + SABRE_WRSYNC;
688 /* Sabre's IOMMU lacks ctx flushing. */ 690 /* Sabre's IOMMU lacks ctx flushing. */
689 iommu->iommu_ctxflush = 0; 691 iommu->iommu_ctxflush = 0;
@@ -701,7 +703,10 @@ static void sabre_iommu_init(struct pci_pbm_info *pbm,
701 /* Leave diag mode enabled for full-flushing done 703 /* Leave diag mode enabled for full-flushing done
702 * in pci_iommu.c 704 * in pci_iommu.c
703 */ 705 */
704 pci_iommu_table_init(iommu, tsbsize * 1024 * 8, dvma_offset, dma_mask); 706 err = iommu_table_init(iommu, tsbsize * 1024 * 8,
707 dvma_offset, dma_mask);
708 if (err)
709 return err;
705 710
706 sabre_write(pbm->controller_regs + SABRE_IOMMU_TSBBASE, 711 sabre_write(pbm->controller_regs + SABRE_IOMMU_TSBBASE,
707 __pa(iommu->page_table)); 712 __pa(iommu->page_table));
@@ -722,6 +727,8 @@ static void sabre_iommu_init(struct pci_pbm_info *pbm,
722 break; 727 break;
723 } 728 }
724 sabre_write(pbm->controller_regs + SABRE_IOMMU_CONTROL, control); 729 sabre_write(pbm->controller_regs + SABRE_IOMMU_CONTROL, control);
730
731 return 0;
725} 732}
726 733
727static void sabre_pbm_init(struct pci_controller_info *p, struct pci_pbm_info *pbm, struct device_node *dp) 734static void sabre_pbm_init(struct pci_controller_info *p, struct pci_pbm_info *pbm, struct device_node *dp)
@@ -775,16 +782,12 @@ void sabre_init(struct device_node *dp, char *model_name)
775 } 782 }
776 783
777 p = kzalloc(sizeof(*p), GFP_ATOMIC); 784 p = kzalloc(sizeof(*p), GFP_ATOMIC);
778 if (!p) { 785 if (!p)
779 prom_printf("SABRE: Error, kmalloc(pci_controller_info) failed.\n"); 786 goto fatal_memory_error;
780 prom_halt();
781 }
782 787
783 iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC); 788 iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC);
784 if (!iommu) { 789 if (!iommu)
785 prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n"); 790 goto fatal_memory_error;
786 prom_halt();
787 }
788 pbm = &p->pbm_A; 791 pbm = &p->pbm_A;
789 pbm->iommu = iommu; 792 pbm->iommu = iommu;
790 793
@@ -847,10 +850,16 @@ void sabre_init(struct device_node *dp, char *model_name)
847 prom_halt(); 850 prom_halt();
848 } 851 }
849 852
850 sabre_iommu_init(pbm, tsbsize, vdma[0], dma_mask); 853 if (sabre_iommu_init(pbm, tsbsize, vdma[0], dma_mask))
854 goto fatal_memory_error;
851 855
852 /* 856 /*
853 * Look for APB underneath. 857 * Look for APB underneath.
854 */ 858 */
855 sabre_pbm_init(p, pbm, dp); 859 sabre_pbm_init(p, pbm, dp);
860 return;
861
862fatal_memory_error:
863 prom_printf("SABRE: Fatal memory allocation error.\n");
864 prom_halt();
856} 865}
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
index ae76898bbe2b..3c30bfa1f3a3 100644
--- a/arch/sparc64/kernel/pci_schizo.c
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -1148,14 +1148,14 @@ static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm)
1148#define SCHIZO_IOMMU_FLUSH (0x00210UL) 1148#define SCHIZO_IOMMU_FLUSH (0x00210UL)
1149#define SCHIZO_IOMMU_CTXFLUSH (0x00218UL) 1149#define SCHIZO_IOMMU_CTXFLUSH (0x00218UL)
1150 1150
1151static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm) 1151static int schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
1152{ 1152{
1153 struct iommu *iommu = pbm->iommu; 1153 struct iommu *iommu = pbm->iommu;
1154 unsigned long i, tagbase, database; 1154 unsigned long i, tagbase, database;
1155 struct property *prop; 1155 struct property *prop;
1156 u32 vdma[2], dma_mask; 1156 u32 vdma[2], dma_mask;
1157 int tsbsize, err;
1157 u64 control; 1158 u64 control;
1158 int tsbsize;
1159 1159
1160 prop = of_find_property(pbm->prom_node, "virtual-dma", NULL); 1160 prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
1161 if (prop) { 1161 if (prop) {
@@ -1195,6 +1195,7 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
1195 iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL; 1195 iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL;
1196 iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE; 1196 iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE;
1197 iommu->iommu_flush = pbm->pbm_regs + SCHIZO_IOMMU_FLUSH; 1197 iommu->iommu_flush = pbm->pbm_regs + SCHIZO_IOMMU_FLUSH;
1198 iommu->iommu_tags = iommu->iommu_flush + (0xa580UL - 0x0210UL);
1198 iommu->iommu_ctxflush = pbm->pbm_regs + SCHIZO_IOMMU_CTXFLUSH; 1199 iommu->iommu_ctxflush = pbm->pbm_regs + SCHIZO_IOMMU_CTXFLUSH;
1199 1200
1200 /* We use the main control/status register of SCHIZO as the write 1201 /* We use the main control/status register of SCHIZO as the write
@@ -1219,7 +1220,9 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
1219 /* Leave diag mode enabled for full-flushing done 1220 /* Leave diag mode enabled for full-flushing done
1220 * in pci_iommu.c 1221 * in pci_iommu.c
1221 */ 1222 */
1222 pci_iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask); 1223 err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask);
1224 if (err)
1225 return err;
1223 1226
1224 schizo_write(iommu->iommu_tsbbase, __pa(iommu->page_table)); 1227 schizo_write(iommu->iommu_tsbbase, __pa(iommu->page_table));
1225 1228
@@ -1236,6 +1239,8 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
1236 1239
1237 control |= SCHIZO_IOMMU_CTRL_ENAB; 1240 control |= SCHIZO_IOMMU_CTRL_ENAB;
1238 schizo_write(iommu->iommu_control, control); 1241 schizo_write(iommu->iommu_control, control);
1242
1243 return 0;
1239} 1244}
1240 1245
1241#define SCHIZO_PCI_IRQ_RETRY (0x1a00UL) 1246#define SCHIZO_PCI_IRQ_RETRY (0x1a00UL)
@@ -1328,14 +1333,14 @@ static void schizo_pbm_hw_init(struct pci_pbm_info *pbm)
1328 } 1333 }
1329} 1334}
1330 1335
1331static void schizo_pbm_init(struct pci_controller_info *p, 1336static int schizo_pbm_init(struct pci_controller_info *p,
1332 struct device_node *dp, u32 portid, 1337 struct device_node *dp, u32 portid,
1333 int chip_type) 1338 int chip_type)
1334{ 1339{
1335 const struct linux_prom64_registers *regs; 1340 const struct linux_prom64_registers *regs;
1336 struct pci_pbm_info *pbm; 1341 struct pci_pbm_info *pbm;
1337 const char *chipset_name; 1342 const char *chipset_name;
1338 int is_pbm_a; 1343 int is_pbm_a, err;
1339 1344
1340 switch (chip_type) { 1345 switch (chip_type) {
1341 case PBM_CHIP_TYPE_TOMATILLO: 1346 case PBM_CHIP_TYPE_TOMATILLO:
@@ -1406,8 +1411,13 @@ static void schizo_pbm_init(struct pci_controller_info *p,
1406 1411
1407 pci_get_pbm_props(pbm); 1412 pci_get_pbm_props(pbm);
1408 1413
1409 schizo_pbm_iommu_init(pbm); 1414 err = schizo_pbm_iommu_init(pbm);
1415 if (err)
1416 return err;
1417
1410 schizo_pbm_strbuf_init(pbm); 1418 schizo_pbm_strbuf_init(pbm);
1419
1420 return 0;
1411} 1421}
1412 1422
1413static inline int portid_compare(u32 x, u32 y, int chip_type) 1423static inline int portid_compare(u32 x, u32 y, int chip_type)
@@ -1431,34 +1441,38 @@ static void __schizo_init(struct device_node *dp, char *model_name, int chip_typ
1431 1441
1432 for (pbm = pci_pbm_root; pbm; pbm = pbm->next) { 1442 for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
1433 if (portid_compare(pbm->portid, portid, chip_type)) { 1443 if (portid_compare(pbm->portid, portid, chip_type)) {
1434 schizo_pbm_init(pbm->parent, dp, portid, chip_type); 1444 if (schizo_pbm_init(pbm->parent, dp,
1445 portid, chip_type))
1446 goto fatal_memory_error;
1435 return; 1447 return;
1436 } 1448 }
1437 } 1449 }
1438 1450
1439 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); 1451 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1440 if (!p) 1452 if (!p)
1441 goto memfail; 1453 goto fatal_memory_error;
1442 1454
1443 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); 1455 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1444 if (!iommu) 1456 if (!iommu)
1445 goto memfail; 1457 goto fatal_memory_error;
1446 1458
1447 p->pbm_A.iommu = iommu; 1459 p->pbm_A.iommu = iommu;
1448 1460
1449 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); 1461 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1450 if (!iommu) 1462 if (!iommu)
1451 goto memfail; 1463 goto fatal_memory_error;
1452 1464
1453 p->pbm_B.iommu = iommu; 1465 p->pbm_B.iommu = iommu;
1454 1466
1455 /* Like PSYCHO we have a 2GB aligned area for memory space. */ 1467 /* Like PSYCHO we have a 2GB aligned area for memory space. */
1456 pci_memspace_mask = 0x7fffffffUL; 1468 pci_memspace_mask = 0x7fffffffUL;
1457 1469
1458 schizo_pbm_init(p, dp, portid, chip_type); 1470 if (schizo_pbm_init(p, dp, portid, chip_type))
1471 goto fatal_memory_error;
1472
1459 return; 1473 return;
1460 1474
1461memfail: 1475fatal_memory_error:
1462 prom_printf("SCHIZO: Fatal memory allocation error.\n"); 1476 prom_printf("SCHIZO: Fatal memory allocation error.\n");
1463 prom_halt(); 1477 prom_halt();
1464} 1478}
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 639cf06ca372..466f4aa8fc82 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -33,30 +33,30 @@ static unsigned long vpci_minor = 1;
33#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) 33#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
34 34
35struct iommu_batch { 35struct iommu_batch {
36 struct pci_dev *pdev; /* Device mapping is for. */ 36 struct device *dev; /* Device mapping is for. */
37 unsigned long prot; /* IOMMU page protections */ 37 unsigned long prot; /* IOMMU page protections */
38 unsigned long entry; /* Index into IOTSB. */ 38 unsigned long entry; /* Index into IOTSB. */
39 u64 *pglist; /* List of physical pages */ 39 u64 *pglist; /* List of physical pages */
40 unsigned long npages; /* Number of pages in list. */ 40 unsigned long npages; /* Number of pages in list. */
41}; 41};
42 42
43static DEFINE_PER_CPU(struct iommu_batch, pci_iommu_batch); 43static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
44 44
45/* Interrupts must be disabled. */ 45/* Interrupts must be disabled. */
46static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry) 46static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
47{ 47{
48 struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); 48 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
49 49
50 p->pdev = pdev; 50 p->dev = dev;
51 p->prot = prot; 51 p->prot = prot;
52 p->entry = entry; 52 p->entry = entry;
53 p->npages = 0; 53 p->npages = 0;
54} 54}
55 55
56/* Interrupts must be disabled. */ 56/* Interrupts must be disabled. */
57static long pci_iommu_batch_flush(struct iommu_batch *p) 57static long iommu_batch_flush(struct iommu_batch *p)
58{ 58{
59 struct pci_pbm_info *pbm = p->pdev->dev.archdata.host_controller; 59 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
60 unsigned long devhandle = pbm->devhandle; 60 unsigned long devhandle = pbm->devhandle;
61 unsigned long prot = p->prot; 61 unsigned long prot = p->prot;
62 unsigned long entry = p->entry; 62 unsigned long entry = p->entry;
@@ -70,7 +70,7 @@ static long pci_iommu_batch_flush(struct iommu_batch *p)
70 npages, prot, __pa(pglist)); 70 npages, prot, __pa(pglist));
71 if (unlikely(num < 0)) { 71 if (unlikely(num < 0)) {
72 if (printk_ratelimit()) 72 if (printk_ratelimit())
73 printk("pci_iommu_batch_flush: IOMMU map of " 73 printk("iommu_batch_flush: IOMMU map of "
74 "[%08lx:%08lx:%lx:%lx:%lx] failed with " 74 "[%08lx:%08lx:%lx:%lx:%lx] failed with "
75 "status %ld\n", 75 "status %ld\n",
76 devhandle, HV_PCI_TSBID(0, entry), 76 devhandle, HV_PCI_TSBID(0, entry),
@@ -90,30 +90,30 @@ static long pci_iommu_batch_flush(struct iommu_batch *p)
90} 90}
91 91
92/* Interrupts must be disabled. */ 92/* Interrupts must be disabled. */
93static inline long pci_iommu_batch_add(u64 phys_page) 93static inline long iommu_batch_add(u64 phys_page)
94{ 94{
95 struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); 95 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
96 96
97 BUG_ON(p->npages >= PGLIST_NENTS); 97 BUG_ON(p->npages >= PGLIST_NENTS);
98 98
99 p->pglist[p->npages++] = phys_page; 99 p->pglist[p->npages++] = phys_page;
100 if (p->npages == PGLIST_NENTS) 100 if (p->npages == PGLIST_NENTS)
101 return pci_iommu_batch_flush(p); 101 return iommu_batch_flush(p);
102 102
103 return 0; 103 return 0;
104} 104}
105 105
106/* Interrupts must be disabled. */ 106/* Interrupts must be disabled. */
107static inline long pci_iommu_batch_end(void) 107static inline long iommu_batch_end(void)
108{ 108{
109 struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); 109 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
110 110
111 BUG_ON(p->npages >= PGLIST_NENTS); 111 BUG_ON(p->npages >= PGLIST_NENTS);
112 112
113 return pci_iommu_batch_flush(p); 113 return iommu_batch_flush(p);
114} 114}
115 115
116static long pci_arena_alloc(struct iommu_arena *arena, unsigned long npages) 116static long arena_alloc(struct iommu_arena *arena, unsigned long npages)
117{ 117{
118 unsigned long n, i, start, end, limit; 118 unsigned long n, i, start, end, limit;
119 int pass; 119 int pass;
@@ -152,7 +152,8 @@ again:
152 return n; 152 return n;
153} 153}
154 154
155static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages) 155static void arena_free(struct iommu_arena *arena, unsigned long base,
156 unsigned long npages)
156{ 157{
157 unsigned long i; 158 unsigned long i;
158 159
@@ -160,7 +161,8 @@ static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsign
160 __clear_bit(i, arena->map); 161 __clear_bit(i, arena->map);
161} 162}
162 163
163static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) 164static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
165 dma_addr_t *dma_addrp, gfp_t gfp)
164{ 166{
165 struct iommu *iommu; 167 struct iommu *iommu;
166 unsigned long flags, order, first_page, npages, n; 168 unsigned long flags, order, first_page, npages, n;
@@ -180,10 +182,10 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
180 182
181 memset((char *)first_page, 0, PAGE_SIZE << order); 183 memset((char *)first_page, 0, PAGE_SIZE << order);
182 184
183 iommu = pdev->dev.archdata.iommu; 185 iommu = dev->archdata.iommu;
184 186
185 spin_lock_irqsave(&iommu->lock, flags); 187 spin_lock_irqsave(&iommu->lock, flags);
186 entry = pci_arena_alloc(&iommu->arena, npages); 188 entry = arena_alloc(&iommu->arena, npages);
187 spin_unlock_irqrestore(&iommu->lock, flags); 189 spin_unlock_irqrestore(&iommu->lock, flags);
188 190
189 if (unlikely(entry < 0L)) 191 if (unlikely(entry < 0L))
@@ -196,18 +198,18 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
196 198
197 local_irq_save(flags); 199 local_irq_save(flags);
198 200
199 pci_iommu_batch_start(pdev, 201 iommu_batch_start(dev,
200 (HV_PCI_MAP_ATTR_READ | 202 (HV_PCI_MAP_ATTR_READ |
201 HV_PCI_MAP_ATTR_WRITE), 203 HV_PCI_MAP_ATTR_WRITE),
202 entry); 204 entry);
203 205
204 for (n = 0; n < npages; n++) { 206 for (n = 0; n < npages; n++) {
205 long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE)); 207 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
206 if (unlikely(err < 0L)) 208 if (unlikely(err < 0L))
207 goto iommu_map_fail; 209 goto iommu_map_fail;
208 } 210 }
209 211
210 if (unlikely(pci_iommu_batch_end() < 0L)) 212 if (unlikely(iommu_batch_end() < 0L))
211 goto iommu_map_fail; 213 goto iommu_map_fail;
212 214
213 local_irq_restore(flags); 215 local_irq_restore(flags);
@@ -217,7 +219,7 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
217iommu_map_fail: 219iommu_map_fail:
218 /* Interrupts are disabled. */ 220 /* Interrupts are disabled. */
219 spin_lock(&iommu->lock); 221 spin_lock(&iommu->lock);
220 pci_arena_free(&iommu->arena, entry, npages); 222 arena_free(&iommu->arena, entry, npages);
221 spin_unlock_irqrestore(&iommu->lock, flags); 223 spin_unlock_irqrestore(&iommu->lock, flags);
222 224
223arena_alloc_fail: 225arena_alloc_fail:
@@ -225,7 +227,8 @@ arena_alloc_fail:
225 return NULL; 227 return NULL;
226} 228}
227 229
228static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) 230static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
231 dma_addr_t dvma)
229{ 232{
230 struct pci_pbm_info *pbm; 233 struct pci_pbm_info *pbm;
231 struct iommu *iommu; 234 struct iommu *iommu;
@@ -233,14 +236,14 @@ static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu,
233 u32 devhandle; 236 u32 devhandle;
234 237
235 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 238 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
236 iommu = pdev->dev.archdata.iommu; 239 iommu = dev->archdata.iommu;
237 pbm = pdev->dev.archdata.host_controller; 240 pbm = dev->archdata.host_controller;
238 devhandle = pbm->devhandle; 241 devhandle = pbm->devhandle;
239 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 242 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
240 243
241 spin_lock_irqsave(&iommu->lock, flags); 244 spin_lock_irqsave(&iommu->lock, flags);
242 245
243 pci_arena_free(&iommu->arena, entry, npages); 246 arena_free(&iommu->arena, entry, npages);
244 247
245 do { 248 do {
246 unsigned long num; 249 unsigned long num;
@@ -258,7 +261,8 @@ static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu,
258 free_pages((unsigned long)cpu, order); 261 free_pages((unsigned long)cpu, order);
259} 262}
260 263
261static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) 264static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
265 enum dma_data_direction direction)
262{ 266{
263 struct iommu *iommu; 267 struct iommu *iommu;
264 unsigned long flags, npages, oaddr; 268 unsigned long flags, npages, oaddr;
@@ -267,9 +271,9 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz,
267 unsigned long prot; 271 unsigned long prot;
268 long entry; 272 long entry;
269 273
270 iommu = pdev->dev.archdata.iommu; 274 iommu = dev->archdata.iommu;
271 275
272 if (unlikely(direction == PCI_DMA_NONE)) 276 if (unlikely(direction == DMA_NONE))
273 goto bad; 277 goto bad;
274 278
275 oaddr = (unsigned long)ptr; 279 oaddr = (unsigned long)ptr;
@@ -277,7 +281,7 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz,
277 npages >>= IO_PAGE_SHIFT; 281 npages >>= IO_PAGE_SHIFT;
278 282
279 spin_lock_irqsave(&iommu->lock, flags); 283 spin_lock_irqsave(&iommu->lock, flags);
280 entry = pci_arena_alloc(&iommu->arena, npages); 284 entry = arena_alloc(&iommu->arena, npages);
281 spin_unlock_irqrestore(&iommu->lock, flags); 285 spin_unlock_irqrestore(&iommu->lock, flags);
282 286
283 if (unlikely(entry < 0L)) 287 if (unlikely(entry < 0L))
@@ -288,19 +292,19 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz,
288 ret = bus_addr | (oaddr & ~IO_PAGE_MASK); 292 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
289 base_paddr = __pa(oaddr & IO_PAGE_MASK); 293 base_paddr = __pa(oaddr & IO_PAGE_MASK);
290 prot = HV_PCI_MAP_ATTR_READ; 294 prot = HV_PCI_MAP_ATTR_READ;
291 if (direction != PCI_DMA_TODEVICE) 295 if (direction != DMA_TO_DEVICE)
292 prot |= HV_PCI_MAP_ATTR_WRITE; 296 prot |= HV_PCI_MAP_ATTR_WRITE;
293 297
294 local_irq_save(flags); 298 local_irq_save(flags);
295 299
296 pci_iommu_batch_start(pdev, prot, entry); 300 iommu_batch_start(dev, prot, entry);
297 301
298 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { 302 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
299 long err = pci_iommu_batch_add(base_paddr); 303 long err = iommu_batch_add(base_paddr);
300 if (unlikely(err < 0L)) 304 if (unlikely(err < 0L))
301 goto iommu_map_fail; 305 goto iommu_map_fail;
302 } 306 }
303 if (unlikely(pci_iommu_batch_end() < 0L)) 307 if (unlikely(iommu_batch_end() < 0L))
304 goto iommu_map_fail; 308 goto iommu_map_fail;
305 309
306 local_irq_restore(flags); 310 local_irq_restore(flags);
@@ -310,18 +314,19 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz,
310bad: 314bad:
311 if (printk_ratelimit()) 315 if (printk_ratelimit())
312 WARN_ON(1); 316 WARN_ON(1);
313 return PCI_DMA_ERROR_CODE; 317 return DMA_ERROR_CODE;
314 318
315iommu_map_fail: 319iommu_map_fail:
316 /* Interrupts are disabled. */ 320 /* Interrupts are disabled. */
317 spin_lock(&iommu->lock); 321 spin_lock(&iommu->lock);
318 pci_arena_free(&iommu->arena, entry, npages); 322 arena_free(&iommu->arena, entry, npages);
319 spin_unlock_irqrestore(&iommu->lock, flags); 323 spin_unlock_irqrestore(&iommu->lock, flags);
320 324
321 return PCI_DMA_ERROR_CODE; 325 return DMA_ERROR_CODE;
322} 326}
323 327
324static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) 328static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
329 size_t sz, enum dma_data_direction direction)
325{ 330{
326 struct pci_pbm_info *pbm; 331 struct pci_pbm_info *pbm;
327 struct iommu *iommu; 332 struct iommu *iommu;
@@ -329,14 +334,14 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_
329 long entry; 334 long entry;
330 u32 devhandle; 335 u32 devhandle;
331 336
332 if (unlikely(direction == PCI_DMA_NONE)) { 337 if (unlikely(direction == DMA_NONE)) {
333 if (printk_ratelimit()) 338 if (printk_ratelimit())
334 WARN_ON(1); 339 WARN_ON(1);
335 return; 340 return;
336 } 341 }
337 342
338 iommu = pdev->dev.archdata.iommu; 343 iommu = dev->archdata.iommu;
339 pbm = pdev->dev.archdata.host_controller; 344 pbm = dev->archdata.host_controller;
340 devhandle = pbm->devhandle; 345 devhandle = pbm->devhandle;
341 346
342 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 347 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
@@ -346,7 +351,7 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_
346 spin_lock_irqsave(&iommu->lock, flags); 351 spin_lock_irqsave(&iommu->lock, flags);
347 352
348 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; 353 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
349 pci_arena_free(&iommu->arena, entry, npages); 354 arena_free(&iommu->arena, entry, npages);
350 355
351 do { 356 do {
352 unsigned long num; 357 unsigned long num;
@@ -363,7 +368,7 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_
363#define SG_ENT_PHYS_ADDRESS(SG) \ 368#define SG_ENT_PHYS_ADDRESS(SG) \
364 (__pa(page_address((SG)->page)) + (SG)->offset) 369 (__pa(page_address((SG)->page)) + (SG)->offset)
365 370
366static inline long fill_sg(long entry, struct pci_dev *pdev, 371static inline long fill_sg(long entry, struct device *dev,
367 struct scatterlist *sg, 372 struct scatterlist *sg,
368 int nused, int nelems, unsigned long prot) 373 int nused, int nelems, unsigned long prot)
369{ 374{
@@ -374,7 +379,7 @@ static inline long fill_sg(long entry, struct pci_dev *pdev,
374 379
375 local_irq_save(flags); 380 local_irq_save(flags);
376 381
377 pci_iommu_batch_start(pdev, prot, entry); 382 iommu_batch_start(dev, prot, entry);
378 383
379 for (i = 0; i < nused; i++) { 384 for (i = 0; i < nused; i++) {
380 unsigned long pteval = ~0UL; 385 unsigned long pteval = ~0UL;
@@ -415,7 +420,7 @@ static inline long fill_sg(long entry, struct pci_dev *pdev,
415 while (len > 0) { 420 while (len > 0) {
416 long err; 421 long err;
417 422
418 err = pci_iommu_batch_add(pteval); 423 err = iommu_batch_add(pteval);
419 if (unlikely(err < 0L)) 424 if (unlikely(err < 0L))
420 goto iommu_map_failed; 425 goto iommu_map_failed;
421 426
@@ -446,7 +451,7 @@ static inline long fill_sg(long entry, struct pci_dev *pdev,
446 dma_sg++; 451 dma_sg++;
447 } 452 }
448 453
449 if (unlikely(pci_iommu_batch_end() < 0L)) 454 if (unlikely(iommu_batch_end() < 0L))
450 goto iommu_map_failed; 455 goto iommu_map_failed;
451 456
452 local_irq_restore(flags); 457 local_irq_restore(flags);
@@ -457,7 +462,8 @@ iommu_map_failed:
457 return -1L; 462 return -1L;
458} 463}
459 464
460static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) 465static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
466 int nelems, enum dma_data_direction direction)
461{ 467{
462 struct iommu *iommu; 468 struct iommu *iommu;
463 unsigned long flags, npages, prot; 469 unsigned long flags, npages, prot;
@@ -469,18 +475,19 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
469 /* Fast path single entry scatterlists. */ 475 /* Fast path single entry scatterlists. */
470 if (nelems == 1) { 476 if (nelems == 1) {
471 sglist->dma_address = 477 sglist->dma_address =
472 pci_4v_map_single(pdev, 478 dma_4v_map_single(dev,
473 (page_address(sglist->page) + sglist->offset), 479 (page_address(sglist->page) +
480 sglist->offset),
474 sglist->length, direction); 481 sglist->length, direction);
475 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) 482 if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
476 return 0; 483 return 0;
477 sglist->dma_length = sglist->length; 484 sglist->dma_length = sglist->length;
478 return 1; 485 return 1;
479 } 486 }
480 487
481 iommu = pdev->dev.archdata.iommu; 488 iommu = dev->archdata.iommu;
482 489
483 if (unlikely(direction == PCI_DMA_NONE)) 490 if (unlikely(direction == DMA_NONE))
484 goto bad; 491 goto bad;
485 492
486 /* Step 1: Prepare scatter list. */ 493 /* Step 1: Prepare scatter list. */
@@ -488,7 +495,7 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
488 495
489 /* Step 2: Allocate a cluster and context, if necessary. */ 496 /* Step 2: Allocate a cluster and context, if necessary. */
490 spin_lock_irqsave(&iommu->lock, flags); 497 spin_lock_irqsave(&iommu->lock, flags);
491 entry = pci_arena_alloc(&iommu->arena, npages); 498 entry = arena_alloc(&iommu->arena, npages);
492 spin_unlock_irqrestore(&iommu->lock, flags); 499 spin_unlock_irqrestore(&iommu->lock, flags);
493 500
494 if (unlikely(entry < 0L)) 501 if (unlikely(entry < 0L))
@@ -510,10 +517,10 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
510 517
511 /* Step 4: Create the mappings. */ 518 /* Step 4: Create the mappings. */
512 prot = HV_PCI_MAP_ATTR_READ; 519 prot = HV_PCI_MAP_ATTR_READ;
513 if (direction != PCI_DMA_TODEVICE) 520 if (direction != DMA_TO_DEVICE)
514 prot |= HV_PCI_MAP_ATTR_WRITE; 521 prot |= HV_PCI_MAP_ATTR_WRITE;
515 522
516 err = fill_sg(entry, pdev, sglist, used, nelems, prot); 523 err = fill_sg(entry, dev, sglist, used, nelems, prot);
517 if (unlikely(err < 0L)) 524 if (unlikely(err < 0L))
518 goto iommu_map_failed; 525 goto iommu_map_failed;
519 526
@@ -526,13 +533,14 @@ bad:
526 533
527iommu_map_failed: 534iommu_map_failed:
528 spin_lock_irqsave(&iommu->lock, flags); 535 spin_lock_irqsave(&iommu->lock, flags);
529 pci_arena_free(&iommu->arena, entry, npages); 536 arena_free(&iommu->arena, entry, npages);
530 spin_unlock_irqrestore(&iommu->lock, flags); 537 spin_unlock_irqrestore(&iommu->lock, flags);
531 538
532 return 0; 539 return 0;
533} 540}
534 541
535static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) 542static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
543 int nelems, enum dma_data_direction direction)
536{ 544{
537 struct pci_pbm_info *pbm; 545 struct pci_pbm_info *pbm;
538 struct iommu *iommu; 546 struct iommu *iommu;
@@ -540,13 +548,13 @@ static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
540 long entry; 548 long entry;
541 u32 devhandle, bus_addr; 549 u32 devhandle, bus_addr;
542 550
543 if (unlikely(direction == PCI_DMA_NONE)) { 551 if (unlikely(direction == DMA_NONE)) {
544 if (printk_ratelimit()) 552 if (printk_ratelimit())
545 WARN_ON(1); 553 WARN_ON(1);
546 } 554 }
547 555
548 iommu = pdev->dev.archdata.iommu; 556 iommu = dev->archdata.iommu;
549 pbm = pdev->dev.archdata.host_controller; 557 pbm = dev->archdata.host_controller;
550 devhandle = pbm->devhandle; 558 devhandle = pbm->devhandle;
551 559
552 bus_addr = sglist->dma_address & IO_PAGE_MASK; 560 bus_addr = sglist->dma_address & IO_PAGE_MASK;
@@ -562,7 +570,7 @@ static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
562 570
563 spin_lock_irqsave(&iommu->lock, flags); 571 spin_lock_irqsave(&iommu->lock, flags);
564 572
565 pci_arena_free(&iommu->arena, entry, npages); 573 arena_free(&iommu->arena, entry, npages);
566 574
567 do { 575 do {
568 unsigned long num; 576 unsigned long num;
@@ -576,25 +584,29 @@ static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
576 spin_unlock_irqrestore(&iommu->lock, flags); 584 spin_unlock_irqrestore(&iommu->lock, flags);
577} 585}
578 586
579static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) 587static void dma_4v_sync_single_for_cpu(struct device *dev,
588 dma_addr_t bus_addr, size_t sz,
589 enum dma_data_direction direction)
580{ 590{
581 /* Nothing to do... */ 591 /* Nothing to do... */
582} 592}
583 593
584static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) 594static void dma_4v_sync_sg_for_cpu(struct device *dev,
595 struct scatterlist *sglist, int nelems,
596 enum dma_data_direction direction)
585{ 597{
586 /* Nothing to do... */ 598 /* Nothing to do... */
587} 599}
588 600
589const struct pci_iommu_ops pci_sun4v_iommu_ops = { 601const struct dma_ops sun4v_dma_ops = {
590 .alloc_consistent = pci_4v_alloc_consistent, 602 .alloc_coherent = dma_4v_alloc_coherent,
591 .free_consistent = pci_4v_free_consistent, 603 .free_coherent = dma_4v_free_coherent,
592 .map_single = pci_4v_map_single, 604 .map_single = dma_4v_map_single,
593 .unmap_single = pci_4v_unmap_single, 605 .unmap_single = dma_4v_unmap_single,
594 .map_sg = pci_4v_map_sg, 606 .map_sg = dma_4v_map_sg,
595 .unmap_sg = pci_4v_unmap_sg, 607 .unmap_sg = dma_4v_unmap_sg,
596 .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu, 608 .sync_single_for_cpu = dma_4v_sync_single_for_cpu,
597 .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu, 609 .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu,
598}; 610};
599 611
600static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm) 612static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm)
@@ -1186,6 +1198,8 @@ void __init sun4v_pci_init(struct device_node *dp, char *model_name)
1186 } 1198 }
1187 printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n", 1199 printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n",
1188 vpci_major, vpci_minor); 1200 vpci_major, vpci_minor);
1201
1202 dma_ops = &sun4v_dma_ops;
1189 } 1203 }
1190 1204
1191 prop = of_find_property(dp, "reg", NULL); 1205 prop = of_find_property(dp, "reg", NULL);
@@ -1206,7 +1220,7 @@ void __init sun4v_pci_init(struct device_node *dp, char *model_name)
1206 if (!page) 1220 if (!page)
1207 goto fatal_memory_error; 1221 goto fatal_memory_error;
1208 1222
1209 per_cpu(pci_iommu_batch, i).pglist = (u64 *) page; 1223 per_cpu(iommu_batch, i).pglist = (u64 *) page;
1210 } 1224 }
1211 1225
1212 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); 1226 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
index a1fd9bcc0b87..d1fb13ba02b5 100644
--- a/arch/sparc64/kernel/sbus.c
+++ b/arch/sparc64/kernel/sbus.c
@@ -26,11 +26,6 @@
26 26
27#define MAP_BASE ((u32)0xc0000000) 27#define MAP_BASE ((u32)0xc0000000)
28 28
29struct sbus_info {
30 struct iommu iommu;
31 struct strbuf strbuf;
32};
33
34/* Offsets from iommu_regs */ 29/* Offsets from iommu_regs */
35#define SYSIO_IOMMUREG_BASE 0x2400UL 30#define SYSIO_IOMMUREG_BASE 0x2400UL
36#define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */ 31#define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */
@@ -44,19 +39,6 @@ struct sbus_info {
44 39
45#define IOMMU_DRAM_VALID (1UL << 30UL) 40#define IOMMU_DRAM_VALID (1UL << 30UL)
46 41
47static void __iommu_flushall(struct iommu *iommu)
48{
49 unsigned long tag;
50 int entry;
51
52 tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL);
53 for (entry = 0; entry < 16; entry++) {
54 upa_writeq(0, tag);
55 tag += 8UL;
56 }
57 upa_readq(iommu->write_complete_reg);
58}
59
60/* Offsets from strbuf_regs */ 42/* Offsets from strbuf_regs */
61#define SYSIO_STRBUFREG_BASE 0x2800UL 43#define SYSIO_STRBUFREG_BASE 0x2800UL
62#define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */ 44#define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */
@@ -69,511 +51,10 @@ static void __iommu_flushall(struct iommu *iommu)
69 51
70#define STRBUF_TAG_VALID 0x02UL 52#define STRBUF_TAG_VALID 0x02UL
71 53
72static void sbus_strbuf_flush(struct iommu *iommu, struct strbuf *strbuf, u32 base, unsigned long npages, int direction)
73{
74 unsigned long n;
75 int limit;
76
77 n = npages;
78 while (n--)
79 upa_writeq(base + (n << IO_PAGE_SHIFT), strbuf->strbuf_pflush);
80
81 /* If the device could not have possibly put dirty data into
82 * the streaming cache, no flush-flag synchronization needs
83 * to be performed.
84 */
85 if (direction == SBUS_DMA_TODEVICE)
86 return;
87
88 *(strbuf->strbuf_flushflag) = 0UL;
89
90 /* Whoopee cushion! */
91 upa_writeq(strbuf->strbuf_flushflag_pa, strbuf->strbuf_fsync);
92 upa_readq(iommu->write_complete_reg);
93
94 limit = 100000;
95 while (*(strbuf->strbuf_flushflag) == 0UL) {
96 limit--;
97 if (!limit)
98 break;
99 udelay(1);
100 rmb();
101 }
102 if (!limit)
103 printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout "
104 "vaddr[%08x] npages[%ld]\n",
105 base, npages);
106}
107
108/* Based largely upon the ppc64 iommu allocator. */
109static long sbus_arena_alloc(struct iommu *iommu, unsigned long npages)
110{
111 struct iommu_arena *arena = &iommu->arena;
112 unsigned long n, i, start, end, limit;
113 int pass;
114
115 limit = arena->limit;
116 start = arena->hint;
117 pass = 0;
118
119again:
120 n = find_next_zero_bit(arena->map, limit, start);
121 end = n + npages;
122 if (unlikely(end >= limit)) {
123 if (likely(pass < 1)) {
124 limit = start;
125 start = 0;
126 __iommu_flushall(iommu);
127 pass++;
128 goto again;
129 } else {
130 /* Scanned the whole thing, give up. */
131 return -1;
132 }
133 }
134
135 for (i = n; i < end; i++) {
136 if (test_bit(i, arena->map)) {
137 start = i + 1;
138 goto again;
139 }
140 }
141
142 for (i = n; i < end; i++)
143 __set_bit(i, arena->map);
144
145 arena->hint = end;
146
147 return n;
148}
149
150static void sbus_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
151{
152 unsigned long i;
153
154 for (i = base; i < (base + npages); i++)
155 __clear_bit(i, arena->map);
156}
157
158static void sbus_iommu_table_init(struct iommu *iommu, unsigned int tsbsize)
159{
160 unsigned long tsbbase, order, sz, num_tsb_entries;
161
162 num_tsb_entries = tsbsize / sizeof(iopte_t);
163
164 /* Setup initial software IOMMU state. */
165 spin_lock_init(&iommu->lock);
166 iommu->page_table_map_base = MAP_BASE;
167
168 /* Allocate and initialize the free area map. */
169 sz = num_tsb_entries / 8;
170 sz = (sz + 7UL) & ~7UL;
171 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
172 if (!iommu->arena.map) {
173 prom_printf("SBUS_IOMMU: Error, kmalloc(arena.map) failed.\n");
174 prom_halt();
175 }
176 iommu->arena.limit = num_tsb_entries;
177
178 /* Now allocate and setup the IOMMU page table itself. */
179 order = get_order(tsbsize);
180 tsbbase = __get_free_pages(GFP_KERNEL, order);
181 if (!tsbbase) {
182 prom_printf("IOMMU: Error, gfp(tsb) failed.\n");
183 prom_halt();
184 }
185 iommu->page_table = (iopte_t *)tsbbase;
186 memset(iommu->page_table, 0, tsbsize);
187}
188
189static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
190{
191 long entry;
192
193 entry = sbus_arena_alloc(iommu, npages);
194 if (unlikely(entry < 0))
195 return NULL;
196
197 return iommu->page_table + entry;
198}
199
200static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
201{
202 sbus_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
203}
204
205void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
206{
207 struct sbus_info *info;
208 struct iommu *iommu;
209 iopte_t *iopte;
210 unsigned long flags, order, first_page;
211 void *ret;
212 int npages;
213
214 size = IO_PAGE_ALIGN(size);
215 order = get_order(size);
216 if (order >= 10)
217 return NULL;
218
219 first_page = __get_free_pages(GFP_KERNEL|__GFP_COMP, order);
220 if (first_page == 0UL)
221 return NULL;
222 memset((char *)first_page, 0, PAGE_SIZE << order);
223
224 info = sdev->bus->iommu;
225 iommu = &info->iommu;
226
227 spin_lock_irqsave(&iommu->lock, flags);
228 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
229 spin_unlock_irqrestore(&iommu->lock, flags);
230
231 if (unlikely(iopte == NULL)) {
232 free_pages(first_page, order);
233 return NULL;
234 }
235
236 *dvma_addr = (iommu->page_table_map_base +
237 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
238 ret = (void *) first_page;
239 npages = size >> IO_PAGE_SHIFT;
240 first_page = __pa(first_page);
241 while (npages--) {
242 iopte_val(*iopte) = (IOPTE_VALID | IOPTE_CACHE |
243 IOPTE_WRITE |
244 (first_page & IOPTE_PAGE));
245 iopte++;
246 first_page += IO_PAGE_SIZE;
247 }
248
249 return ret;
250}
251
252void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
253{
254 struct sbus_info *info;
255 struct iommu *iommu;
256 iopte_t *iopte;
257 unsigned long flags, order, npages;
258
259 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
260 info = sdev->bus->iommu;
261 iommu = &info->iommu;
262 iopte = iommu->page_table +
263 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
264
265 spin_lock_irqsave(&iommu->lock, flags);
266
267 free_npages(iommu, dvma - iommu->page_table_map_base, npages);
268
269 spin_unlock_irqrestore(&iommu->lock, flags);
270
271 order = get_order(size);
272 if (order < 10)
273 free_pages((unsigned long)cpu, order);
274}
275
276dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int direction)
277{
278 struct sbus_info *info;
279 struct iommu *iommu;
280 iopte_t *base;
281 unsigned long flags, npages, oaddr;
282 unsigned long i, base_paddr;
283 u32 bus_addr, ret;
284 unsigned long iopte_protection;
285
286 info = sdev->bus->iommu;
287 iommu = &info->iommu;
288
289 if (unlikely(direction == SBUS_DMA_NONE))
290 BUG();
291
292 oaddr = (unsigned long)ptr;
293 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
294 npages >>= IO_PAGE_SHIFT;
295
296 spin_lock_irqsave(&iommu->lock, flags);
297 base = alloc_npages(iommu, npages);
298 spin_unlock_irqrestore(&iommu->lock, flags);
299
300 if (unlikely(!base))
301 BUG();
302
303 bus_addr = (iommu->page_table_map_base +
304 ((base - iommu->page_table) << IO_PAGE_SHIFT));
305 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
306 base_paddr = __pa(oaddr & IO_PAGE_MASK);
307
308 iopte_protection = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
309 if (direction != SBUS_DMA_TODEVICE)
310 iopte_protection |= IOPTE_WRITE;
311
312 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
313 iopte_val(*base) = iopte_protection | base_paddr;
314
315 return ret;
316}
317
318void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
319{
320 struct sbus_info *info = sdev->bus->iommu;
321 struct iommu *iommu = &info->iommu;
322 struct strbuf *strbuf = &info->strbuf;
323 iopte_t *base;
324 unsigned long flags, npages, i;
325
326 if (unlikely(direction == SBUS_DMA_NONE))
327 BUG();
328
329 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
330 npages >>= IO_PAGE_SHIFT;
331 base = iommu->page_table +
332 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
333
334 bus_addr &= IO_PAGE_MASK;
335
336 spin_lock_irqsave(&iommu->lock, flags);
337 sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
338 for (i = 0; i < npages; i++)
339 iopte_val(base[i]) = 0UL;
340 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
341 spin_unlock_irqrestore(&iommu->lock, flags);
342}
343
344#define SG_ENT_PHYS_ADDRESS(SG) \
345 (__pa(page_address((SG)->page)) + (SG)->offset)
346
347static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
348 int nused, int nelems, unsigned long iopte_protection)
349{
350 struct scatterlist *dma_sg = sg;
351 struct scatterlist *sg_end = sg + nelems;
352 int i;
353
354 for (i = 0; i < nused; i++) {
355 unsigned long pteval = ~0UL;
356 u32 dma_npages;
357
358 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
359 dma_sg->dma_length +
360 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
361 do {
362 unsigned long offset;
363 signed int len;
364
365 /* If we are here, we know we have at least one
366 * more page to map. So walk forward until we
367 * hit a page crossing, and begin creating new
368 * mappings from that spot.
369 */
370 for (;;) {
371 unsigned long tmp;
372
373 tmp = SG_ENT_PHYS_ADDRESS(sg);
374 len = sg->length;
375 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
376 pteval = tmp & IO_PAGE_MASK;
377 offset = tmp & (IO_PAGE_SIZE - 1UL);
378 break;
379 }
380 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
381 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
382 offset = 0UL;
383 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
384 break;
385 }
386 sg++;
387 }
388
389 pteval = iopte_protection | (pteval & IOPTE_PAGE);
390 while (len > 0) {
391 *iopte++ = __iopte(pteval);
392 pteval += IO_PAGE_SIZE;
393 len -= (IO_PAGE_SIZE - offset);
394 offset = 0;
395 dma_npages--;
396 }
397
398 pteval = (pteval & IOPTE_PAGE) + len;
399 sg++;
400
401 /* Skip over any tail mappings we've fully mapped,
402 * adjusting pteval along the way. Stop when we
403 * detect a page crossing event.
404 */
405 while (sg < sg_end &&
406 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
407 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
408 ((pteval ^
409 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
410 pteval += sg->length;
411 sg++;
412 }
413 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
414 pteval = ~0UL;
415 } while (dma_npages != 0);
416 dma_sg++;
417 }
418}
419
420int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
421{
422 struct sbus_info *info;
423 struct iommu *iommu;
424 unsigned long flags, npages, iopte_protection;
425 iopte_t *base;
426 u32 dma_base;
427 struct scatterlist *sgtmp;
428 int used;
429
430 /* Fast path single entry scatterlists. */
431 if (nelems == 1) {
432 sglist->dma_address =
433 sbus_map_single(sdev,
434 (page_address(sglist->page) + sglist->offset),
435 sglist->length, direction);
436 sglist->dma_length = sglist->length;
437 return 1;
438 }
439
440 info = sdev->bus->iommu;
441 iommu = &info->iommu;
442
443 if (unlikely(direction == SBUS_DMA_NONE))
444 BUG();
445
446 npages = prepare_sg(sglist, nelems);
447
448 spin_lock_irqsave(&iommu->lock, flags);
449 base = alloc_npages(iommu, npages);
450 spin_unlock_irqrestore(&iommu->lock, flags);
451
452 if (unlikely(base == NULL))
453 BUG();
454
455 dma_base = iommu->page_table_map_base +
456 ((base - iommu->page_table) << IO_PAGE_SHIFT);
457
458 /* Normalize DVMA addresses. */
459 used = nelems;
460
461 sgtmp = sglist;
462 while (used && sgtmp->dma_length) {
463 sgtmp->dma_address += dma_base;
464 sgtmp++;
465 used--;
466 }
467 used = nelems - used;
468
469 iopte_protection = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
470 if (direction != SBUS_DMA_TODEVICE)
471 iopte_protection |= IOPTE_WRITE;
472
473 fill_sg(base, sglist, used, nelems, iopte_protection);
474
475#ifdef VERIFY_SG
476 verify_sglist(sglist, nelems, base, npages);
477#endif
478
479 return used;
480}
481
482void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
483{
484 struct sbus_info *info;
485 struct iommu *iommu;
486 struct strbuf *strbuf;
487 iopte_t *base;
488 unsigned long flags, i, npages;
489 u32 bus_addr;
490
491 if (unlikely(direction == SBUS_DMA_NONE))
492 BUG();
493
494 info = sdev->bus->iommu;
495 iommu = &info->iommu;
496 strbuf = &info->strbuf;
497
498 bus_addr = sglist->dma_address & IO_PAGE_MASK;
499
500 for (i = 1; i < nelems; i++)
501 if (sglist[i].dma_length == 0)
502 break;
503 i--;
504 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
505 bus_addr) >> IO_PAGE_SHIFT;
506
507 base = iommu->page_table +
508 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
509
510 spin_lock_irqsave(&iommu->lock, flags);
511 sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
512 for (i = 0; i < npages; i++)
513 iopte_val(base[i]) = 0UL;
514 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
515 spin_unlock_irqrestore(&iommu->lock, flags);
516}
517
518void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
519{
520 struct sbus_info *info;
521 struct iommu *iommu;
522 struct strbuf *strbuf;
523 unsigned long flags, npages;
524
525 info = sdev->bus->iommu;
526 iommu = &info->iommu;
527 strbuf = &info->strbuf;
528
529 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
530 npages >>= IO_PAGE_SHIFT;
531 bus_addr &= IO_PAGE_MASK;
532
533 spin_lock_irqsave(&iommu->lock, flags);
534 sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
535 spin_unlock_irqrestore(&iommu->lock, flags);
536}
537
538void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
539{
540}
541
542void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
543{
544 struct sbus_info *info;
545 struct iommu *iommu;
546 struct strbuf *strbuf;
547 unsigned long flags, npages, i;
548 u32 bus_addr;
549
550 info = sdev->bus->iommu;
551 iommu = &info->iommu;
552 strbuf = &info->strbuf;
553
554 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
555 for (i = 0; i < nelems; i++) {
556 if (!sglist[i].dma_length)
557 break;
558 }
559 i--;
560 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
561 - bus_addr) >> IO_PAGE_SHIFT;
562
563 spin_lock_irqsave(&iommu->lock, flags);
564 sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
565 spin_unlock_irqrestore(&iommu->lock, flags);
566}
567
568void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
569{
570}
571
572/* Enable 64-bit DVMA mode for the given device. */ 54/* Enable 64-bit DVMA mode for the given device. */
573void sbus_set_sbus64(struct sbus_dev *sdev, int bursts) 55void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
574{ 56{
575 struct sbus_info *info = sdev->bus->iommu; 57 struct iommu *iommu = sdev->ofdev.dev.archdata.iommu;
576 struct iommu *iommu = &info->iommu;
577 int slot = sdev->slot; 58 int slot = sdev->slot;
578 unsigned long cfg_reg; 59 unsigned long cfg_reg;
579 u64 val; 60 u64 val;
@@ -713,8 +194,7 @@ static unsigned long sysio_imap_to_iclr(unsigned long imap)
713unsigned int sbus_build_irq(void *buscookie, unsigned int ino) 194unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
714{ 195{
715 struct sbus_bus *sbus = (struct sbus_bus *)buscookie; 196 struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
716 struct sbus_info *info = sbus->iommu; 197 struct iommu *iommu = sbus->ofdev.dev.archdata.iommu;
717 struct iommu *iommu = &info->iommu;
718 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; 198 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
719 unsigned long imap, iclr; 199 unsigned long imap, iclr;
720 int sbus_level = 0; 200 int sbus_level = 0;
@@ -776,8 +256,7 @@ unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
776static irqreturn_t sysio_ue_handler(int irq, void *dev_id) 256static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
777{ 257{
778 struct sbus_bus *sbus = dev_id; 258 struct sbus_bus *sbus = dev_id;
779 struct sbus_info *info = sbus->iommu; 259 struct iommu *iommu = sbus->ofdev.dev.archdata.iommu;
780 struct iommu *iommu = &info->iommu;
781 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; 260 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
782 unsigned long afsr_reg, afar_reg; 261 unsigned long afsr_reg, afar_reg;
783 unsigned long afsr, afar, error_bits; 262 unsigned long afsr, afar, error_bits;
@@ -849,8 +328,7 @@ static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
849static irqreturn_t sysio_ce_handler(int irq, void *dev_id) 328static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
850{ 329{
851 struct sbus_bus *sbus = dev_id; 330 struct sbus_bus *sbus = dev_id;
852 struct sbus_info *info = sbus->iommu; 331 struct iommu *iommu = sbus->ofdev.dev.archdata.iommu;
853 struct iommu *iommu = &info->iommu;
854 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; 332 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
855 unsigned long afsr_reg, afar_reg; 333 unsigned long afsr_reg, afar_reg;
856 unsigned long afsr, afar, error_bits; 334 unsigned long afsr, afar, error_bits;
@@ -927,8 +405,7 @@ static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
927static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id) 405static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
928{ 406{
929 struct sbus_bus *sbus = dev_id; 407 struct sbus_bus *sbus = dev_id;
930 struct sbus_info *info = sbus->iommu; 408 struct iommu *iommu = sbus->ofdev.dev.archdata.iommu;
931 struct iommu *iommu = &info->iommu;
932 unsigned long afsr_reg, afar_reg, reg_base; 409 unsigned long afsr_reg, afar_reg, reg_base;
933 unsigned long afsr, afar, error_bits; 410 unsigned long afsr, afar, error_bits;
934 int reported; 411 int reported;
@@ -995,8 +472,7 @@ static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
995 472
996static void __init sysio_register_error_handlers(struct sbus_bus *sbus) 473static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
997{ 474{
998 struct sbus_info *info = sbus->iommu; 475 struct iommu *iommu = sbus->ofdev.dev.archdata.iommu;
999 struct iommu *iommu = &info->iommu;
1000 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; 476 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
1001 unsigned int irq; 477 unsigned int irq;
1002 u64 control; 478 u64 control;
@@ -1041,7 +517,6 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1041{ 517{
1042 const struct linux_prom64_registers *pr; 518 const struct linux_prom64_registers *pr;
1043 struct device_node *dp; 519 struct device_node *dp;
1044 struct sbus_info *info;
1045 struct iommu *iommu; 520 struct iommu *iommu;
1046 struct strbuf *strbuf; 521 struct strbuf *strbuf;
1047 unsigned long regs, reg_base; 522 unsigned long regs, reg_base;
@@ -1054,25 +529,28 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1054 529
1055 pr = of_get_property(dp, "reg", NULL); 530 pr = of_get_property(dp, "reg", NULL);
1056 if (!pr) { 531 if (!pr) {
1057 prom_printf("sbus_iommu_init: Cannot map SYSIO control registers.\n"); 532 prom_printf("sbus_iommu_init: Cannot map SYSIO "
533 "control registers.\n");
1058 prom_halt(); 534 prom_halt();
1059 } 535 }
1060 regs = pr->phys_addr; 536 regs = pr->phys_addr;
1061 537
1062 info = kzalloc(sizeof(*info), GFP_ATOMIC); 538 iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC);
1063 if (info == NULL) { 539 if (!iommu)
1064 prom_printf("sbus_iommu_init: Fatal error, " 540 goto fatal_memory_error;
1065 "kmalloc(info) failed\n"); 541 strbuf = kzalloc(sizeof(*strbuf), GFP_ATOMIC);
1066 prom_halt(); 542 if (!strbuf)
1067 } 543 goto fatal_memory_error;
1068 544
1069 iommu = &info->iommu; 545 sbus->ofdev.dev.archdata.iommu = iommu;
1070 strbuf = &info->strbuf; 546 sbus->ofdev.dev.archdata.stc = strbuf;
1071 547
1072 reg_base = regs + SYSIO_IOMMUREG_BASE; 548 reg_base = regs + SYSIO_IOMMUREG_BASE;
1073 iommu->iommu_control = reg_base + IOMMU_CONTROL; 549 iommu->iommu_control = reg_base + IOMMU_CONTROL;
1074 iommu->iommu_tsbbase = reg_base + IOMMU_TSBBASE; 550 iommu->iommu_tsbbase = reg_base + IOMMU_TSBBASE;
1075 iommu->iommu_flush = reg_base + IOMMU_FLUSH; 551 iommu->iommu_flush = reg_base + IOMMU_FLUSH;
552 iommu->iommu_tags = iommu->iommu_control +
553 (IOMMU_TAGDIAG - IOMMU_CONTROL);
1076 554
1077 reg_base = regs + SYSIO_STRBUFREG_BASE; 555 reg_base = regs + SYSIO_STRBUFREG_BASE;
1078 strbuf->strbuf_control = reg_base + STRBUF_CONTROL; 556 strbuf->strbuf_control = reg_base + STRBUF_CONTROL;
@@ -1093,14 +571,12 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1093 */ 571 */
1094 iommu->write_complete_reg = regs + 0x2000UL; 572 iommu->write_complete_reg = regs + 0x2000UL;
1095 573
1096 /* Link into SYSIO software state. */
1097 sbus->iommu = info;
1098
1099 printk("SYSIO: UPA portID %x, at %016lx\n", 574 printk("SYSIO: UPA portID %x, at %016lx\n",
1100 sbus->portid, regs); 575 sbus->portid, regs);
1101 576
1102 /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */ 577 /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
1103 sbus_iommu_table_init(iommu, IO_TSB_SIZE); 578 if (iommu_table_init(iommu, IO_TSB_SIZE, MAP_BASE, 0xffffffff))
579 goto fatal_memory_error;
1104 580
1105 control = upa_readq(iommu->iommu_control); 581 control = upa_readq(iommu->iommu_control);
1106 control = ((7UL << 16UL) | 582 control = ((7UL << 16UL) |
@@ -1157,6 +633,10 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1157 starfire_hookup(sbus->portid); 633 starfire_hookup(sbus->portid);
1158 634
1159 sysio_register_error_handlers(sbus); 635 sysio_register_error_handlers(sbus);
636 return;
637
638fatal_memory_error:
639 prom_printf("sbus_iommu_init: Fatal memory allocation error.\n");
1160} 640}
1161 641
1162void sbus_fill_device_irq(struct sbus_dev *sdev) 642void sbus_fill_device_irq(struct sbus_dev *sdev)
diff --git a/drivers/sbus/sbus.c b/drivers/sbus/sbus.c
index 2553629ec15d..c37d7c2587ff 100644
--- a/drivers/sbus/sbus.c
+++ b/drivers/sbus/sbus.c
@@ -210,6 +210,10 @@ static void __init walk_children(struct device_node *dp, struct sbus_dev *parent
210 210
211 sdev->bus = sbus; 211 sdev->bus = sbus;
212 sdev->parent = parent; 212 sdev->parent = parent;
213 sdev->ofdev.dev.archdata.iommu =
214 sbus->ofdev.dev.archdata.iommu;
215 sdev->ofdev.dev.archdata.stc =
216 sbus->ofdev.dev.archdata.stc;
213 217
214 fill_sbus_device(dp, sdev); 218 fill_sbus_device(dp, sdev);
215 219
@@ -269,6 +273,11 @@ static void __init build_one_sbus(struct device_node *dp, int num_sbus)
269 273
270 sdev->bus = sbus; 274 sdev->bus = sbus;
271 sdev->parent = NULL; 275 sdev->parent = NULL;
276 sdev->ofdev.dev.archdata.iommu =
277 sbus->ofdev.dev.archdata.iommu;
278 sdev->ofdev.dev.archdata.stc =
279 sbus->ofdev.dev.archdata.stc;
280
272 fill_sbus_device(dev_dp, sdev); 281 fill_sbus_device(dev_dp, sdev);
273 282
274 walk_children(dev_dp, sdev, sbus); 283 walk_children(dev_dp, sdev, sbus);
diff --git a/include/asm-sparc/device.h b/include/asm-sparc/device.h
index 4a56d84d69c4..c0a7786d65f7 100644
--- a/include/asm-sparc/device.h
+++ b/include/asm-sparc/device.h
@@ -10,6 +10,10 @@ struct device_node;
10struct of_device; 10struct of_device;
11 11
12struct dev_archdata { 12struct dev_archdata {
13 void *iommu;
14 void *stc;
15 void *host_controller;
16
13 struct device_node *prom_node; 17 struct device_node *prom_node;
14 struct of_device *op; 18 struct of_device *op;
15}; 19};
diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h
index c58ec1661df8..0a1006692bb2 100644
--- a/include/asm-sparc64/dma-mapping.h
+++ b/include/asm-sparc64/dma-mapping.h
@@ -1,307 +1,134 @@
1#ifndef _ASM_SPARC64_DMA_MAPPING_H 1#ifndef _ASM_SPARC64_DMA_MAPPING_H
2#define _ASM_SPARC64_DMA_MAPPING_H 2#define _ASM_SPARC64_DMA_MAPPING_H
3 3
4 4#include <linux/scatterlist.h>
5#ifdef CONFIG_PCI
6
7/* we implement the API below in terms of the existing PCI one,
8 * so include it */
9#include <linux/pci.h>
10/* need struct page definitions */
11#include <linux/mm.h> 5#include <linux/mm.h>
12 6
13#include <asm/of_device.h> 7#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
14 8
15static inline int 9struct dma_ops {
16dma_supported(struct device *dev, u64 mask) 10 void *(*alloc_coherent)(struct device *dev, size_t size,
17{ 11 dma_addr_t *dma_handle, gfp_t flag);
18 BUG_ON(dev->bus != &pci_bus_type); 12 void (*free_coherent)(struct device *dev, size_t size,
19 13 void *cpu_addr, dma_addr_t dma_handle);
20 return pci_dma_supported(to_pci_dev(dev), mask); 14 dma_addr_t (*map_single)(struct device *dev, void *cpu_addr,
21} 15 size_t size,
22 16 enum dma_data_direction direction);
23static inline int 17 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
24dma_set_mask(struct device *dev, u64 dma_mask) 18 size_t size,
25{ 19 enum dma_data_direction direction);
26 BUG_ON(dev->bus != &pci_bus_type); 20 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
27 21 enum dma_data_direction direction);
28 return pci_set_dma_mask(to_pci_dev(dev), dma_mask); 22 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
29} 23 int nhwentries,
30 24 enum dma_data_direction direction);
31static inline void * 25 void (*sync_single_for_cpu)(struct device *dev,
32dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 26 dma_addr_t dma_handle, size_t size,
33 gfp_t flag) 27 enum dma_data_direction direction);
34{ 28 void (*sync_single_for_device)(struct device *dev,
35 BUG_ON(dev->bus != &pci_bus_type); 29 dma_addr_t dma_handle, size_t size,
36 30 enum dma_data_direction direction);
37 return pci_iommu_ops->alloc_consistent(to_pci_dev(dev), size, dma_handle, flag); 31 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
38} 32 int nelems,
39 33 enum dma_data_direction direction);
40static inline void 34 void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
41dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 35 int nelems,
42 dma_addr_t dma_handle) 36 enum dma_data_direction direction);
43{ 37};
44 BUG_ON(dev->bus != &pci_bus_type); 38extern const struct dma_ops *dma_ops;
45 39
46 pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle); 40extern int dma_supported(struct device *dev, u64 mask);
47} 41extern int dma_set_mask(struct device *dev, u64 dma_mask);
48
49static inline dma_addr_t
50dma_map_single(struct device *dev, void *cpu_addr, size_t size,
51 enum dma_data_direction direction)
52{
53 BUG_ON(dev->bus != &pci_bus_type);
54
55 return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
56}
57
58static inline void
59dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
60 enum dma_data_direction direction)
61{
62 BUG_ON(dev->bus != &pci_bus_type);
63
64 pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
65}
66
67static inline dma_addr_t
68dma_map_page(struct device *dev, struct page *page,
69 unsigned long offset, size_t size,
70 enum dma_data_direction direction)
71{
72 BUG_ON(dev->bus != &pci_bus_type);
73
74 return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
75}
76
77static inline void
78dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
79 enum dma_data_direction direction)
80{
81 BUG_ON(dev->bus != &pci_bus_type);
82
83 pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
84}
85
86static inline int
87dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
88 enum dma_data_direction direction)
89{
90 BUG_ON(dev->bus != &pci_bus_type);
91
92 return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
93}
94
95static inline void
96dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
97 enum dma_data_direction direction)
98{
99 BUG_ON(dev->bus != &pci_bus_type);
100
101 pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
102}
103
104static inline void
105dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
106 enum dma_data_direction direction)
107{
108 BUG_ON(dev->bus != &pci_bus_type);
109
110 pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
111 size, (int)direction);
112}
113
114static inline void
115dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
116 enum dma_data_direction direction)
117{
118 BUG_ON(dev->bus != &pci_bus_type);
119
120 pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
121 size, (int)direction);
122}
123
124static inline void
125dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
126 enum dma_data_direction direction)
127{
128 BUG_ON(dev->bus != &pci_bus_type);
129
130 pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
131}
132
133static inline void
134dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
135 enum dma_data_direction direction)
136{
137 BUG_ON(dev->bus != &pci_bus_type);
138
139 pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
140}
141
142static inline int
143dma_mapping_error(dma_addr_t dma_addr)
144{
145 return pci_dma_mapping_error(dma_addr);
146}
147
148#else
149
150struct device;
151struct page;
152struct scatterlist;
153
154static inline int
155dma_supported(struct device *dev, u64 mask)
156{
157 BUG();
158 return 0;
159}
160
161static inline int
162dma_set_mask(struct device *dev, u64 dma_mask)
163{
164 BUG();
165 return 0;
166}
167 42
168static inline void *dma_alloc_coherent(struct device *dev, size_t size, 43static inline void *dma_alloc_coherent(struct device *dev, size_t size,
169 dma_addr_t *dma_handle, gfp_t flag) 44 dma_addr_t *dma_handle, gfp_t flag)
170{ 45{
171 BUG(); 46 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
172 return NULL;
173} 47}
174 48
175static inline void dma_free_coherent(struct device *dev, size_t size, 49static inline void dma_free_coherent(struct device *dev, size_t size,
176 void *vaddr, dma_addr_t dma_handle) 50 void *cpu_addr, dma_addr_t dma_handle)
177{ 51{
178 BUG(); 52 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
179} 53}
180 54
181static inline dma_addr_t 55static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
182dma_map_single(struct device *dev, void *cpu_addr, size_t size, 56 size_t size,
183 enum dma_data_direction direction) 57 enum dma_data_direction direction)
184{ 58{
185 BUG(); 59 return dma_ops->map_single(dev, cpu_addr, size, direction);
186 return 0;
187} 60}
188 61
189static inline void 62static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
190dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 63 size_t size,
191 enum dma_data_direction direction) 64 enum dma_data_direction direction)
192{ 65{
193 BUG(); 66 dma_ops->unmap_single(dev, dma_addr, size, direction);
194} 67}
195 68
196static inline dma_addr_t 69static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
197dma_map_page(struct device *dev, struct page *page, 70 unsigned long offset, size_t size,
198 unsigned long offset, size_t size, 71 enum dma_data_direction direction)
199 enum dma_data_direction direction)
200{ 72{
201 BUG(); 73 return dma_ops->map_single(dev, page_address(page) + offset,
202 return 0; 74 size, direction);
203} 75}
204 76
205static inline void 77static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
206dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 78 size_t size,
207 enum dma_data_direction direction) 79 enum dma_data_direction direction)
208{ 80{
209 BUG(); 81 dma_ops->unmap_single(dev, dma_address, size, direction);
210} 82}
211 83
212static inline int 84static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
213dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 85 int nents, enum dma_data_direction direction)
214 enum dma_data_direction direction)
215{ 86{
216 BUG(); 87 return dma_ops->map_sg(dev, sg, nents, direction);
217 return 0;
218} 88}
219 89
220static inline void 90static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
221dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 91 int nents, enum dma_data_direction direction)
222 enum dma_data_direction direction)
223{ 92{
224 BUG(); 93 dma_ops->unmap_sg(dev, sg, nents, direction);
225} 94}
226 95
227static inline void 96static inline void dma_sync_single_for_cpu(struct device *dev,
228dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, 97 dma_addr_t dma_handle, size_t size,
229 enum dma_data_direction direction) 98 enum dma_data_direction direction)
230{ 99{
231 BUG(); 100 dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
232} 101}
233 102
234static inline void 103static inline void dma_sync_single_for_device(struct device *dev,
235dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, 104 dma_addr_t dma_handle,
236 enum dma_data_direction direction) 105 size_t size,
106 enum dma_data_direction direction)
237{ 107{
238 BUG(); 108 dma_ops->sync_single_for_device(dev, dma_handle, size, direction);
239} 109}
240 110
241static inline void 111static inline void dma_sync_sg_for_cpu(struct device *dev,
242dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, 112 struct scatterlist *sg, int nelems,
243 enum dma_data_direction direction) 113 enum dma_data_direction direction)
244{ 114{
245 BUG(); 115 dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
246} 116}
247 117
248static inline void 118static inline void dma_sync_sg_for_device(struct device *dev,
249dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, 119 struct scatterlist *sg, int nelems,
250 enum dma_data_direction direction) 120 enum dma_data_direction direction)
251{ 121{
252 BUG(); 122 dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
253} 123}
254 124
255static inline int 125static inline int dma_mapping_error(dma_addr_t dma_addr)
256dma_mapping_error(dma_addr_t dma_addr)
257{ 126{
258 BUG(); 127 return (dma_addr == DMA_ERROR_CODE);
259 return 0;
260} 128}
261 129
262#endif /* PCI */
263
264
265/* Now for the API extensions over the pci_ one */
266
267#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 130#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
268#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 131#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
269#define dma_is_consistent(d, h) (1) 132#define dma_is_consistent(d, h) (1)
270 133
271static inline int
272dma_get_cache_alignment(void)
273{
274 /* no easy way to get cache size on all processors, so return
275 * the maximum possible, to be safe */
276 return (1 << INTERNODE_CACHE_SHIFT);
277}
278
279static inline void
280dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
281 unsigned long offset, size_t size,
282 enum dma_data_direction direction)
283{
284 /* just sync everything, that's all the pci API can do */
285 dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
286}
287
288static inline void
289dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
290 unsigned long offset, size_t size,
291 enum dma_data_direction direction)
292{
293 /* just sync everything, that's all the pci API can do */
294 dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
295}
296
297static inline void
298dma_cache_sync(struct device *dev, void *vaddr, size_t size,
299 enum dma_data_direction direction)
300{
301 /* could define this in terms of the dma_cache ... operations,
302 * but if you get this on a platform, you should convert the platform
303 * to using the generic device DMA API */
304 BUG();
305}
306
307#endif /* _ASM_SPARC64_DMA_MAPPING_H */ 134#endif /* _ASM_SPARC64_DMA_MAPPING_H */
diff --git a/include/asm-sparc64/iommu.h b/include/asm-sparc64/iommu.h
index 0b1813f41045..9eac6676caf1 100644
--- a/include/asm-sparc64/iommu.h
+++ b/include/asm-sparc64/iommu.h
@@ -1,7 +1,6 @@
1/* $Id: iommu.h,v 1.10 2001/03/08 09:55:56 davem Exp $ 1/* iommu.h: Definitions for the sun5 IOMMU.
2 * iommu.h: Definitions for the sun5 IOMMU.
3 * 2 *
4 * Copyright (C) 1996, 1999 David S. Miller (davem@caip.rutgers.edu) 3 * Copyright (C) 1996, 1999, 2007 David S. Miller (davem@davemloft.net)
5 */ 4 */
6#ifndef _SPARC64_IOMMU_H 5#ifndef _SPARC64_IOMMU_H
7#define _SPARC64_IOMMU_H 6#define _SPARC64_IOMMU_H
@@ -33,6 +32,7 @@ struct iommu {
33 unsigned long iommu_tsbbase; 32 unsigned long iommu_tsbbase;
34 unsigned long iommu_flush; 33 unsigned long iommu_flush;
35 unsigned long iommu_flushinv; 34 unsigned long iommu_flushinv;
35 unsigned long iommu_tags;
36 unsigned long iommu_ctxflush; 36 unsigned long iommu_ctxflush;
37 unsigned long write_complete_reg; 37 unsigned long write_complete_reg;
38 unsigned long dummy_page; 38 unsigned long dummy_page;
@@ -54,4 +54,7 @@ struct strbuf {
54 volatile unsigned long __flushflag_buf[(64+(64-1)) / sizeof(long)]; 54 volatile unsigned long __flushflag_buf[(64+(64-1)) / sizeof(long)];
55}; 55};
56 56
57#endif /* !(_SPARC_IOMMU_H) */ 57extern int iommu_table_init(struct iommu *iommu, int tsbsize,
58 u32 dma_offset, u32 dma_addr_mask);
59
60#endif /* !(_SPARC64_IOMMU_H) */
diff --git a/include/asm-sparc64/parport.h b/include/asm-sparc64/parport.h
index 600afe5ae2e3..8116e8f6062c 100644
--- a/include/asm-sparc64/parport.h
+++ b/include/asm-sparc64/parport.h
@@ -117,7 +117,7 @@ static int __devinit ecpp_probe(struct of_device *op, const struct of_device_id
117 if (!strcmp(parent->name, "dma")) { 117 if (!strcmp(parent->name, "dma")) {
118 p = parport_pc_probe_port(base, base + 0x400, 118 p = parport_pc_probe_port(base, base + 0x400,
119 op->irqs[0], PARPORT_DMA_NOFIFO, 119 op->irqs[0], PARPORT_DMA_NOFIFO,
120 op->dev.parent); 120 op->dev.parent->parent);
121 if (!p) 121 if (!p)
122 return -ENOMEM; 122 return -ENOMEM;
123 dev_set_drvdata(&op->dev, p); 123 dev_set_drvdata(&op->dev, p);
diff --git a/include/asm-sparc64/pci.h b/include/asm-sparc64/pci.h
index e11ac100f043..1393e57d50fb 100644
--- a/include/asm-sparc64/pci.h
+++ b/include/asm-sparc64/pci.h
@@ -3,8 +3,7 @@
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <linux/fs.h> 6#include <linux/dma-mapping.h>
7#include <linux/mm.h>
8 7
9/* Can be used to override the logic in pci_scan_bus for skipping 8/* Can be used to override the logic in pci_scan_bus for skipping
10 * already-configured bus numbers - to be used for buggy BIOSes 9 * already-configured bus numbers - to be used for buggy BIOSes
@@ -30,80 +29,42 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
30 /* We don't do dynamic PCI IRQ allocation */ 29 /* We don't do dynamic PCI IRQ allocation */
31} 30}
32 31
33/* Dynamic DMA mapping stuff.
34 */
35
36/* The PCI address space does not equal the physical memory 32/* The PCI address space does not equal the physical memory
37 * address space. The networking and block device layers use 33 * address space. The networking and block device layers use
38 * this boolean for bounce buffer decisions. 34 * this boolean for bounce buffer decisions.
39 */ 35 */
40#define PCI_DMA_BUS_IS_PHYS (0) 36#define PCI_DMA_BUS_IS_PHYS (0)
41 37
42#include <asm/scatterlist.h> 38static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
43 39 dma_addr_t *dma_handle)
44struct pci_dev;
45
46struct pci_iommu_ops {
47 void *(*alloc_consistent)(struct pci_dev *, size_t, dma_addr_t *, gfp_t);
48 void (*free_consistent)(struct pci_dev *, size_t, void *, dma_addr_t);
49 dma_addr_t (*map_single)(struct pci_dev *, void *, size_t, int);
50 void (*unmap_single)(struct pci_dev *, dma_addr_t, size_t, int);
51 int (*map_sg)(struct pci_dev *, struct scatterlist *, int, int);
52 void (*unmap_sg)(struct pci_dev *, struct scatterlist *, int, int);
53 void (*dma_sync_single_for_cpu)(struct pci_dev *, dma_addr_t, size_t, int);
54 void (*dma_sync_sg_for_cpu)(struct pci_dev *, struct scatterlist *, int, int);
55};
56
57extern const struct pci_iommu_ops *pci_iommu_ops;
58
59/* Allocate and map kernel buffer using consistent mode DMA for a device.
60 * hwdev should be valid struct pci_dev pointer for PCI devices.
61 */
62static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
63{ 40{
64 return pci_iommu_ops->alloc_consistent(hwdev, size, dma_handle, GFP_ATOMIC); 41 return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC);
65} 42}
66 43
67/* Free and unmap a consistent DMA buffer. 44static inline void pci_free_consistent(struct pci_dev *pdev, size_t size,
68 * cpu_addr is what was returned from pci_alloc_consistent, 45 void *vaddr, dma_addr_t dma_handle)
69 * size must be the same as what as passed into pci_alloc_consistent,
70 * and likewise dma_addr must be the same as what *dma_addrp was set to.
71 *
72 * References to the memory and mappings associated with cpu_addr/dma_addr
73 * past this call are illegal.
74 */
75static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
76{ 46{
77 return pci_iommu_ops->free_consistent(hwdev, size, vaddr, dma_handle); 47 return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle);
78} 48}
79 49
80/* Map a single buffer of the indicated size for DMA in streaming mode. 50static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr,
81 * The 32-bit bus address to use is returned. 51 size_t size, int direction)
82 *
83 * Once the device is given the dma address, the device owns this memory
84 * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
85 */
86static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
87{ 52{
88 return pci_iommu_ops->map_single(hwdev, ptr, size, direction); 53 return dma_map_single(&pdev->dev, ptr, size,
54 (enum dma_data_direction) direction);
89} 55}
90 56
91/* Unmap a single streaming mode DMA translation. The dma_addr and size 57static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
92 * must match what was provided for in a previous pci_map_single call. All 58 size_t size, int direction)
93 * other usages are undefined.
94 *
95 * After this call, reads by the cpu to the buffer are guaranteed to see
96 * whatever the device wrote there.
97 */
98static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction)
99{ 59{
100 pci_iommu_ops->unmap_single(hwdev, dma_addr, size, direction); 60 dma_unmap_single(&pdev->dev, dma_addr, size,
61 (enum dma_data_direction) direction);
101} 62}
102 63
103/* No highmem on sparc64, plus we have an IOMMU, so mapping pages is easy. */
104#define pci_map_page(dev, page, off, size, dir) \ 64#define pci_map_page(dev, page, off, size, dir) \
105 pci_map_single(dev, (page_address(page) + (off)), size, dir) 65 pci_map_single(dev, (page_address(page) + (off)), size, dir)
106#define pci_unmap_page(dev,addr,sz,dir) pci_unmap_single(dev,addr,sz,dir) 66#define pci_unmap_page(dev,addr,sz,dir) \
67 pci_unmap_single(dev,addr,sz,dir)
107 68
108/* pci_unmap_{single,page} is not a nop, thus... */ 69/* pci_unmap_{single,page} is not a nop, thus... */
109#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ 70#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
@@ -119,75 +80,48 @@ static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
119#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ 80#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
120 (((PTR)->LEN_NAME) = (VAL)) 81 (((PTR)->LEN_NAME) = (VAL))
121 82
122/* Map a set of buffers described by scatterlist in streaming 83static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg,
123 * mode for DMA. This is the scatter-gather version of the 84 int nents, int direction)
124 * above pci_map_single interface. Here the scatter gather list
125 * elements are each tagged with the appropriate dma address
126 * and length. They are obtained via sg_dma_{address,length}(SG).
127 *
128 * NOTE: An implementation may be able to use a smaller number of
129 * DMA address/length pairs than there are SG table elements.
130 * (for example via virtual mapping capabilities)
131 * The routine returns the number of addr/length pairs actually
132 * used, at most nents.
133 *
134 * Device ownership issues as mentioned above for pci_map_single are
135 * the same here.
136 */
137static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
138{ 85{
139 return pci_iommu_ops->map_sg(hwdev, sg, nents, direction); 86 return dma_map_sg(&pdev->dev, sg, nents,
87 (enum dma_data_direction) direction);
140} 88}
141 89
142/* Unmap a set of streaming mode DMA translations. 90static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg,
143 * Again, cpu read rules concerning calls here are the same as for 91 int nents, int direction)
144 * pci_unmap_single() above.
145 */
146static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction)
147{ 92{
148 pci_iommu_ops->unmap_sg(hwdev, sg, nhwents, direction); 93 dma_unmap_sg(&pdev->dev, sg, nents,
94 (enum dma_data_direction) direction);
149} 95}
150 96
151/* Make physical memory consistent for a single 97static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev,
152 * streaming mode DMA translation after a transfer. 98 dma_addr_t dma_handle,
153 * 99 size_t size, int direction)
154 * If you perform a pci_map_single() but wish to interrogate the
155 * buffer using the cpu, yet do not wish to teardown the PCI dma
156 * mapping, you must call this function before doing so. At the
157 * next point you give the PCI dma address back to the card, you
158 * must first perform a pci_dma_sync_for_device, and then the
159 * device again owns the buffer.
160 */
161static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction)
162{ 100{
163 pci_iommu_ops->dma_sync_single_for_cpu(hwdev, dma_handle, size, direction); 101 dma_sync_single_for_cpu(&pdev->dev, dma_handle, size,
102 (enum dma_data_direction) direction);
164} 103}
165 104
166static inline void 105static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev,
167pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, 106 dma_addr_t dma_handle,
168 size_t size, int direction) 107 size_t size, int direction)
169{ 108{
170 /* No flushing needed to sync cpu writes to the device. */ 109 /* No flushing needed to sync cpu writes to the device. */
171 BUG_ON(direction == PCI_DMA_NONE);
172} 110}
173 111
174/* Make physical memory consistent for a set of streaming 112static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev,
175 * mode DMA translations after a transfer. 113 struct scatterlist *sg,
176 * 114 int nents, int direction)
177 * The same as pci_dma_sync_single_* but for a scatter-gather list,
178 * same rules and usage.
179 */
180static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
181{ 115{
182 pci_iommu_ops->dma_sync_sg_for_cpu(hwdev, sg, nelems, direction); 116 dma_sync_sg_for_cpu(&pdev->dev, sg, nents,
117 (enum dma_data_direction) direction);
183} 118}
184 119
185static inline void 120static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev,
186pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, 121 struct scatterlist *sg,
187 int nelems, int direction) 122 int nelems, int direction)
188{ 123{
189 /* No flushing needed to sync cpu writes to the device. */ 124 /* No flushing needed to sync cpu writes to the device. */
190 BUG_ON(direction == PCI_DMA_NONE);
191} 125}
192 126
193/* Return whether the given PCI device DMA address mask can 127/* Return whether the given PCI device DMA address mask can
@@ -206,11 +140,9 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
206#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) 140#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
207#define PCI64_ADDR_BASE 0xfffc000000000000UL 141#define PCI64_ADDR_BASE 0xfffc000000000000UL
208 142
209#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
210
211static inline int pci_dma_mapping_error(dma_addr_t dma_addr) 143static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
212{ 144{
213 return (dma_addr == PCI_DMA_ERROR_CODE); 145 return dma_mapping_error(dma_addr);
214} 146}
215 147
216#ifdef CONFIG_PCI 148#ifdef CONFIG_PCI
diff --git a/include/asm-sparc64/sbus.h b/include/asm-sparc64/sbus.h
index 7efd49d31bb8..0151cad486f3 100644
--- a/include/asm-sparc64/sbus.h
+++ b/include/asm-sparc64/sbus.h
@@ -1,7 +1,6 @@
1/* $Id: sbus.h,v 1.14 2000/02/18 13:50:55 davem Exp $ 1/* sbus.h: Defines for the Sun SBus.
2 * sbus.h: Defines for the Sun SBus.
3 * 2 *
4 * Copyright (C) 1996, 1999 David S. Miller (davem@redhat.com) 3 * Copyright (C) 1996, 1999, 2007 David S. Miller (davem@davemloft.net)
5 */ 4 */
6 5
7#ifndef _SPARC64_SBUS_H 6#ifndef _SPARC64_SBUS_H
@@ -69,7 +68,6 @@ struct sbus_dev {
69/* This struct describes the SBus(s) found on this machine. */ 68/* This struct describes the SBus(s) found on this machine. */
70struct sbus_bus { 69struct sbus_bus {
71 struct of_device ofdev; 70 struct of_device ofdev;
72 void *iommu; /* Opaque IOMMU cookie */
73 struct sbus_dev *devices; /* Tree of SBUS devices */ 71 struct sbus_dev *devices; /* Tree of SBUS devices */
74 struct sbus_bus *next; /* Next SBUS in system */ 72 struct sbus_bus *next; /* Next SBUS in system */
75 int prom_node; /* OBP node of SBUS */ 73 int prom_node; /* OBP node of SBUS */
@@ -102,9 +100,18 @@ extern struct sbus_bus *sbus_root;
102extern void sbus_set_sbus64(struct sbus_dev *, int); 100extern void sbus_set_sbus64(struct sbus_dev *, int);
103extern void sbus_fill_device_irq(struct sbus_dev *); 101extern void sbus_fill_device_irq(struct sbus_dev *);
104 102
105/* These yield IOMMU mappings in consistent mode. */ 103static inline void *sbus_alloc_consistent(struct sbus_dev *sdev , size_t size,
106extern void *sbus_alloc_consistent(struct sbus_dev *, size_t, dma_addr_t *dma_addrp); 104 dma_addr_t *dma_handle)
107extern void sbus_free_consistent(struct sbus_dev *, size_t, void *, dma_addr_t); 105{
106 return dma_alloc_coherent(&sdev->ofdev.dev, size,
107 dma_handle, GFP_ATOMIC);
108}
109
110static inline void sbus_free_consistent(struct sbus_dev *sdev, size_t size,
111 void *vaddr, dma_addr_t dma_handle)
112{
113 return dma_free_coherent(&sdev->ofdev.dev, size, vaddr, dma_handle);
114}
108 115
109#define SBUS_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL 116#define SBUS_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
110#define SBUS_DMA_TODEVICE DMA_TO_DEVICE 117#define SBUS_DMA_TODEVICE DMA_TO_DEVICE
@@ -112,18 +119,67 @@ extern void sbus_free_consistent(struct sbus_dev *, size_t, void *, dma_addr_t);
112#define SBUS_DMA_NONE DMA_NONE 119#define SBUS_DMA_NONE DMA_NONE
113 120
114/* All the rest use streaming mode mappings. */ 121/* All the rest use streaming mode mappings. */
115extern dma_addr_t sbus_map_single(struct sbus_dev *, void *, size_t, int); 122static inline dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr,
116extern void sbus_unmap_single(struct sbus_dev *, dma_addr_t, size_t, int); 123 size_t size, int direction)
117extern int sbus_map_sg(struct sbus_dev *, struct scatterlist *, int, int); 124{
118extern void sbus_unmap_sg(struct sbus_dev *, struct scatterlist *, int, int); 125 return dma_map_single(&sdev->ofdev.dev, ptr, size,
126 (enum dma_data_direction) direction);
127}
128
129static inline void sbus_unmap_single(struct sbus_dev *sdev,
130 dma_addr_t dma_addr, size_t size,
131 int direction)
132{
133 dma_unmap_single(&sdev->ofdev.dev, dma_addr, size,
134 (enum dma_data_direction) direction);
135}
136
137static inline int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg,
138 int nents, int direction)
139{
140 return dma_map_sg(&sdev->ofdev.dev, sg, nents,
141 (enum dma_data_direction) direction);
142}
143
144static inline void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg,
145 int nents, int direction)
146{
147 dma_unmap_sg(&sdev->ofdev.dev, sg, nents,
148 (enum dma_data_direction) direction);
149}
119 150
120/* Finally, allow explicit synchronization of streamable mappings. */ 151/* Finally, allow explicit synchronization of streamable mappings. */
121extern void sbus_dma_sync_single_for_cpu(struct sbus_dev *, dma_addr_t, size_t, int); 152static inline void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev,
153 dma_addr_t dma_handle,
154 size_t size, int direction)
155{
156 dma_sync_single_for_cpu(&sdev->ofdev.dev, dma_handle, size,
157 (enum dma_data_direction) direction);
158}
122#define sbus_dma_sync_single sbus_dma_sync_single_for_cpu 159#define sbus_dma_sync_single sbus_dma_sync_single_for_cpu
123extern void sbus_dma_sync_single_for_device(struct sbus_dev *, dma_addr_t, size_t, int); 160
124extern void sbus_dma_sync_sg_for_cpu(struct sbus_dev *, struct scatterlist *, int, int); 161static inline void sbus_dma_sync_single_for_device(struct sbus_dev *sdev,
162 dma_addr_t dma_handle,
163 size_t size, int direction)
164{
165 /* No flushing needed to sync cpu writes to the device. */
166}
167
168static inline void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev,
169 struct scatterlist *sg,
170 int nents, int direction)
171{
172 dma_sync_sg_for_cpu(&sdev->ofdev.dev, sg, nents,
173 (enum dma_data_direction) direction);
174}
125#define sbus_dma_sync_sg sbus_dma_sync_sg_for_cpu 175#define sbus_dma_sync_sg sbus_dma_sync_sg_for_cpu
126extern void sbus_dma_sync_sg_for_device(struct sbus_dev *, struct scatterlist *, int, int); 176
177static inline void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev,
178 struct scatterlist *sg,
179 int nents, int direction)
180{
181 /* No flushing needed to sync cpu writes to the device. */
182}
127 183
128extern void sbus_arch_bus_ranges_init(struct device_node *, struct sbus_bus *); 184extern void sbus_arch_bus_ranges_init(struct device_node *, struct sbus_bus *);
129extern void sbus_setup_iommu(struct sbus_bus *, struct device_node *); 185extern void sbus_setup_iommu(struct sbus_bus *, struct device_node *);