aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@huronp11.davemloft.net>2008-02-08 21:05:46 -0500
committerDavid S. Miller <davem@davemloft.net>2008-02-09 06:15:04 -0500
commitd284142cbad66832d5072a0aebeca7bd9ca841b7 (patch)
treee5c5ad6271b3a61e28f1767b744e0696af0cd1a4
parent19814ea24e9d80583504e336340ab4590841b0b1 (diff)
[SPARC64]: IOMMU allocations using iommu-helper layer.
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/Kconfig4
-rw-r--r--arch/sparc64/kernel/iommu.c125
-rw-r--r--arch/sparc64/kernel/iommu_common.h8
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c84
-rw-r--r--include/asm-sparc64/iommu.h1
5 files changed, 112 insertions, 110 deletions
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index b810f2b7526a..4ac22f4f9798 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -40,6 +40,10 @@ config MMU
40 bool 40 bool
41 default y 41 default y
42 42
43config IOMMU_HELPER
44 bool
45 default y
46
43config QUICKLIST 47config QUICKLIST
44 bool 48 bool
45 default y 49 default y
diff --git a/arch/sparc64/kernel/iommu.c b/arch/sparc64/kernel/iommu.c
index 5623a4d59dff..90a5907080a1 100644
--- a/arch/sparc64/kernel/iommu.c
+++ b/arch/sparc64/kernel/iommu.c
@@ -1,6 +1,6 @@
1/* iommu.c: Generic sparc64 IOMMU support. 1/* iommu.c: Generic sparc64 IOMMU support.
2 * 2 *
3 * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com) 4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
5 */ 5 */
6 6
@@ -10,6 +10,7 @@
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/dma-mapping.h> 11#include <linux/dma-mapping.h>
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/iommu-helper.h>
13 14
14#ifdef CONFIG_PCI 15#ifdef CONFIG_PCI
15#include <linux/pci.h> 16#include <linux/pci.h>
@@ -41,7 +42,7 @@
41 "i" (ASI_PHYS_BYPASS_EC_E)) 42 "i" (ASI_PHYS_BYPASS_EC_E))
42 43
43/* Must be invoked under the IOMMU lock. */ 44/* Must be invoked under the IOMMU lock. */
44static void __iommu_flushall(struct iommu *iommu) 45static void iommu_flushall(struct iommu *iommu)
45{ 46{
46 if (iommu->iommu_flushinv) { 47 if (iommu->iommu_flushinv) {
47 iommu_write(iommu->iommu_flushinv, ~(u64)0); 48 iommu_write(iommu->iommu_flushinv, ~(u64)0);
@@ -83,54 +84,91 @@ static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
83 iopte_val(*iopte) = val; 84 iopte_val(*iopte) = val;
84} 85}
85 86
86/* Based largely upon the ppc64 iommu allocator. */ 87/* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
87static long arena_alloc(struct iommu *iommu, unsigned long npages) 88 * facility it must all be done in one pass while under the iommu lock.
89 *
90 * On sun4u platforms, we only flush the IOMMU once every time we've passed
91 * over the entire page table doing allocations. Therefore we only ever advance
92 * the hint and cannot backtrack it.
93 */
94unsigned long iommu_range_alloc(struct device *dev,
95 struct iommu *iommu,
96 unsigned long npages,
97 unsigned long *handle)
88{ 98{
99 unsigned long n, end, start, limit, boundary_size;
89 struct iommu_arena *arena = &iommu->arena; 100 struct iommu_arena *arena = &iommu->arena;
90 unsigned long n, i, start, end, limit; 101 int pass = 0;
91 int pass; 102
103 /* This allocator was derived from x86_64's bit string search */
104
105 /* Sanity check */
106 if (unlikely(npages == 0)) {
107 if (printk_ratelimit())
108 WARN_ON(1);
109 return DMA_ERROR_CODE;
110 }
111
112 if (handle && *handle)
113 start = *handle;
114 else
115 start = arena->hint;
92 116
93 limit = arena->limit; 117 limit = arena->limit;
94 start = arena->hint;
95 pass = 0;
96 118
97again: 119 /* The case below can happen if we have a small segment appended
98 n = find_next_zero_bit(arena->map, limit, start); 120 * to a large, or when the previous alloc was at the very end of
99 end = n + npages; 121 * the available space. If so, go back to the beginning and flush.
100 if (unlikely(end >= limit)) { 122 */
123 if (start >= limit) {
124 start = 0;
125 if (iommu->flush_all)
126 iommu->flush_all(iommu);
127 }
128
129 again:
130
131 if (dev)
132 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
133 1 << IO_PAGE_SHIFT);
134 else
135 boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
136
137 n = iommu_area_alloc(arena->map, limit, start, npages, 0,
138 boundary_size >> IO_PAGE_SHIFT, 0);
139 if (n == -1) {
101 if (likely(pass < 1)) { 140 if (likely(pass < 1)) {
102 limit = start; 141 /* First failure, rescan from the beginning. */
103 start = 0; 142 start = 0;
104 __iommu_flushall(iommu); 143 if (iommu->flush_all)
144 iommu->flush_all(iommu);
105 pass++; 145 pass++;
106 goto again; 146 goto again;
107 } else { 147 } else {
108 /* Scanned the whole thing, give up. */ 148 /* Second failure, give up */
109 return -1; 149 return DMA_ERROR_CODE;
110 } 150 }
111 } 151 }
112 152
113 for (i = n; i < end; i++) { 153 end = n + npages;
114 if (test_bit(i, arena->map)) {
115 start = i + 1;
116 goto again;
117 }
118 }
119
120 for (i = n; i < end; i++)
121 __set_bit(i, arena->map);
122 154
123 arena->hint = end; 155 arena->hint = end;
124 156
157 /* Update handle for SG allocations */
158 if (handle)
159 *handle = end;
160
125 return n; 161 return n;
126} 162}
127 163
128static void arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages) 164void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
129{ 165{
130 unsigned long i; 166 struct iommu_arena *arena = &iommu->arena;
167 unsigned long entry;
131 168
132 for (i = base; i < (base + npages); i++) 169 entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
133 __clear_bit(i, arena->map); 170
171 iommu_area_free(arena->map, entry, npages);
134} 172}
135 173
136int iommu_table_init(struct iommu *iommu, int tsbsize, 174int iommu_table_init(struct iommu *iommu, int tsbsize,
@@ -156,6 +194,9 @@ int iommu_table_init(struct iommu *iommu, int tsbsize,
156 } 194 }
157 iommu->arena.limit = num_tsb_entries; 195 iommu->arena.limit = num_tsb_entries;
158 196
197 if (tlb_type != hypervisor)
198 iommu->flush_all = iommu_flushall;
199
159 /* Allocate and initialize the dummy page which we 200 /* Allocate and initialize the dummy page which we
160 * set inactive IO PTEs to point to. 201 * set inactive IO PTEs to point to.
161 */ 202 */
@@ -192,22 +233,18 @@ out_free_map:
192 return -ENOMEM; 233 return -ENOMEM;
193} 234}
194 235
195static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages) 236static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
237 unsigned long npages)
196{ 238{
197 long entry; 239 unsigned long entry;
198 240
199 entry = arena_alloc(iommu, npages); 241 entry = iommu_range_alloc(dev, iommu, npages, NULL);
200 if (unlikely(entry < 0)) 242 if (unlikely(entry == DMA_ERROR_CODE))
201 return NULL; 243 return NULL;
202 244
203 return iommu->page_table + entry; 245 return iommu->page_table + entry;
204} 246}
205 247
206static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
207{
208 arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
209}
210
211static int iommu_alloc_ctx(struct iommu *iommu) 248static int iommu_alloc_ctx(struct iommu *iommu)
212{ 249{
213 int lowest = iommu->ctx_lowest_free; 250 int lowest = iommu->ctx_lowest_free;
@@ -258,7 +295,7 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
258 iommu = dev->archdata.iommu; 295 iommu = dev->archdata.iommu;
259 296
260 spin_lock_irqsave(&iommu->lock, flags); 297 spin_lock_irqsave(&iommu->lock, flags);
261 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT); 298 iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
262 spin_unlock_irqrestore(&iommu->lock, flags); 299 spin_unlock_irqrestore(&iommu->lock, flags);
263 300
264 if (unlikely(iopte == NULL)) { 301 if (unlikely(iopte == NULL)) {
@@ -296,7 +333,7 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
296 333
297 spin_lock_irqsave(&iommu->lock, flags); 334 spin_lock_irqsave(&iommu->lock, flags);
298 335
299 free_npages(iommu, dvma - iommu->page_table_map_base, npages); 336 iommu_range_free(iommu, dvma, npages);
300 337
301 spin_unlock_irqrestore(&iommu->lock, flags); 338 spin_unlock_irqrestore(&iommu->lock, flags);
302 339
@@ -327,7 +364,7 @@ static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
327 npages >>= IO_PAGE_SHIFT; 364 npages >>= IO_PAGE_SHIFT;
328 365
329 spin_lock_irqsave(&iommu->lock, flags); 366 spin_lock_irqsave(&iommu->lock, flags);
330 base = alloc_npages(iommu, npages); 367 base = alloc_npages(dev, iommu, npages);
331 ctx = 0; 368 ctx = 0;
332 if (iommu->iommu_ctxflush) 369 if (iommu->iommu_ctxflush)
333 ctx = iommu_alloc_ctx(iommu); 370 ctx = iommu_alloc_ctx(iommu);
@@ -465,7 +502,7 @@ static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
465 for (i = 0; i < npages; i++) 502 for (i = 0; i < npages; i++)
466 iopte_make_dummy(iommu, base + i); 503 iopte_make_dummy(iommu, base + i);
467 504
468 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages); 505 iommu_range_free(iommu, bus_addr, npages);
469 506
470 iommu_free_ctx(iommu, ctx); 507 iommu_free_ctx(iommu, ctx);
471 508
@@ -503,7 +540,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
503 540
504 spin_lock_irqsave(&iommu->lock, flags); 541 spin_lock_irqsave(&iommu->lock, flags);
505 542
506 base = alloc_npages(iommu, npages); 543 base = alloc_npages(dev, iommu, npages);
507 ctx = 0; 544 ctx = 0;
508 if (iommu->iommu_ctxflush) 545 if (iommu->iommu_ctxflush)
509 ctx = iommu_alloc_ctx(iommu); 546 ctx = iommu_alloc_ctx(iommu);
@@ -592,7 +629,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
592 for (i = 0; i < npages; i++) 629 for (i = 0; i < npages; i++)
593 iopte_make_dummy(iommu, base + i); 630 iopte_make_dummy(iommu, base + i);
594 631
595 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages); 632 iommu_range_free(iommu, bus_addr, npages);
596 633
597 iommu_free_ctx(iommu, ctx); 634 iommu_free_ctx(iommu, ctx);
598 635
diff --git a/arch/sparc64/kernel/iommu_common.h b/arch/sparc64/kernel/iommu_common.h
index 8390f043ffff..0713bd58499c 100644
--- a/arch/sparc64/kernel/iommu_common.h
+++ b/arch/sparc64/kernel/iommu_common.h
@@ -58,4 +58,12 @@ static inline unsigned long calc_npages(struct scatterlist *sglist, int nelems)
58 return npages; 58 return npages;
59} 59}
60 60
61extern unsigned long iommu_range_alloc(struct device *dev,
62 struct iommu *iommu,
63 unsigned long npages,
64 unsigned long *handle);
65extern void iommu_range_free(struct iommu *iommu,
66 dma_addr_t dma_addr,
67 unsigned long npages);
68
61#endif /* _IOMMU_COMMON_H */ 69#endif /* _IOMMU_COMMON_H */
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 61baf8dc095e..c8b6199a5dc4 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -1,6 +1,6 @@
1/* pci_sun4v.c: SUN4V specific PCI controller support. 1/* pci_sun4v.c: SUN4V specific PCI controller support.
2 * 2 *
3 * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#include <linux/kernel.h> 6#include <linux/kernel.h>
@@ -113,54 +113,6 @@ static inline long iommu_batch_end(void)
113 return iommu_batch_flush(p); 113 return iommu_batch_flush(p);
114} 114}
115 115
116static long arena_alloc(struct iommu_arena *arena, unsigned long npages)
117{
118 unsigned long n, i, start, end, limit;
119 int pass;
120
121 limit = arena->limit;
122 start = arena->hint;
123 pass = 0;
124
125again:
126 n = find_next_zero_bit(arena->map, limit, start);
127 end = n + npages;
128 if (unlikely(end >= limit)) {
129 if (likely(pass < 1)) {
130 limit = start;
131 start = 0;
132 pass++;
133 goto again;
134 } else {
135 /* Scanned the whole thing, give up. */
136 return -1;
137 }
138 }
139
140 for (i = n; i < end; i++) {
141 if (test_bit(i, arena->map)) {
142 start = i + 1;
143 goto again;
144 }
145 }
146
147 for (i = n; i < end; i++)
148 __set_bit(i, arena->map);
149
150 arena->hint = end;
151
152 return n;
153}
154
155static void arena_free(struct iommu_arena *arena, unsigned long base,
156 unsigned long npages)
157{
158 unsigned long i;
159
160 for (i = base; i < (base + npages); i++)
161 __clear_bit(i, arena->map);
162}
163
164static void *dma_4v_alloc_coherent(struct device *dev, size_t size, 116static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
165 dma_addr_t *dma_addrp, gfp_t gfp) 117 dma_addr_t *dma_addrp, gfp_t gfp)
166{ 118{
@@ -185,11 +137,11 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
185 iommu = dev->archdata.iommu; 137 iommu = dev->archdata.iommu;
186 138
187 spin_lock_irqsave(&iommu->lock, flags); 139 spin_lock_irqsave(&iommu->lock, flags);
188 entry = arena_alloc(&iommu->arena, npages); 140 entry = iommu_range_alloc(dev, iommu, npages, NULL);
189 spin_unlock_irqrestore(&iommu->lock, flags); 141 spin_unlock_irqrestore(&iommu->lock, flags);
190 142
191 if (unlikely(entry < 0L)) 143 if (unlikely(entry == DMA_ERROR_CODE))
192 goto arena_alloc_fail; 144 goto range_alloc_fail;
193 145
194 *dma_addrp = (iommu->page_table_map_base + 146 *dma_addrp = (iommu->page_table_map_base +
195 (entry << IO_PAGE_SHIFT)); 147 (entry << IO_PAGE_SHIFT));
@@ -219,10 +171,10 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
219iommu_map_fail: 171iommu_map_fail:
220 /* Interrupts are disabled. */ 172 /* Interrupts are disabled. */
221 spin_lock(&iommu->lock); 173 spin_lock(&iommu->lock);
222 arena_free(&iommu->arena, entry, npages); 174 iommu_range_free(iommu, *dma_addrp, npages);
223 spin_unlock_irqrestore(&iommu->lock, flags); 175 spin_unlock_irqrestore(&iommu->lock, flags);
224 176
225arena_alloc_fail: 177range_alloc_fail:
226 free_pages(first_page, order); 178 free_pages(first_page, order);
227 return NULL; 179 return NULL;
228} 180}
@@ -243,7 +195,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
243 195
244 spin_lock_irqsave(&iommu->lock, flags); 196 spin_lock_irqsave(&iommu->lock, flags);
245 197
246 arena_free(&iommu->arena, entry, npages); 198 iommu_range_free(iommu, dvma, npages);
247 199
248 do { 200 do {
249 unsigned long num; 201 unsigned long num;
@@ -281,10 +233,10 @@ static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
281 npages >>= IO_PAGE_SHIFT; 233 npages >>= IO_PAGE_SHIFT;
282 234
283 spin_lock_irqsave(&iommu->lock, flags); 235 spin_lock_irqsave(&iommu->lock, flags);
284 entry = arena_alloc(&iommu->arena, npages); 236 entry = iommu_range_alloc(dev, iommu, npages, NULL);
285 spin_unlock_irqrestore(&iommu->lock, flags); 237 spin_unlock_irqrestore(&iommu->lock, flags);
286 238
287 if (unlikely(entry < 0L)) 239 if (unlikely(entry == DMA_ERROR_CODE))
288 goto bad; 240 goto bad;
289 241
290 bus_addr = (iommu->page_table_map_base + 242 bus_addr = (iommu->page_table_map_base +
@@ -319,7 +271,7 @@ bad:
319iommu_map_fail: 271iommu_map_fail:
320 /* Interrupts are disabled. */ 272 /* Interrupts are disabled. */
321 spin_lock(&iommu->lock); 273 spin_lock(&iommu->lock);
322 arena_free(&iommu->arena, entry, npages); 274 iommu_range_free(iommu, bus_addr, npages);
323 spin_unlock_irqrestore(&iommu->lock, flags); 275 spin_unlock_irqrestore(&iommu->lock, flags);
324 276
325 return DMA_ERROR_CODE; 277 return DMA_ERROR_CODE;
@@ -350,9 +302,9 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
350 302
351 spin_lock_irqsave(&iommu->lock, flags); 303 spin_lock_irqsave(&iommu->lock, flags);
352 304
353 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; 305 iommu_range_free(iommu, bus_addr, npages);
354 arena_free(&iommu->arena, entry, npages);
355 306
307 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
356 do { 308 do {
357 unsigned long num; 309 unsigned long num;
358 310
@@ -369,10 +321,10 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
369 int nelems, enum dma_data_direction direction) 321 int nelems, enum dma_data_direction direction)
370{ 322{
371 unsigned long flags, npages, i, prot; 323 unsigned long flags, npages, i, prot;
324 u32 dma_base, orig_dma_base;
372 struct scatterlist *sg; 325 struct scatterlist *sg;
373 struct iommu *iommu; 326 struct iommu *iommu;
374 long entry, err; 327 long entry, err;
375 u32 dma_base;
376 328
377 /* Fast path single entry scatterlists. */ 329 /* Fast path single entry scatterlists. */
378 if (nelems == 1) { 330 if (nelems == 1) {
@@ -393,13 +345,13 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
393 npages = calc_npages(sglist, nelems); 345 npages = calc_npages(sglist, nelems);
394 346
395 spin_lock_irqsave(&iommu->lock, flags); 347 spin_lock_irqsave(&iommu->lock, flags);
396 entry = arena_alloc(&iommu->arena, npages); 348 entry = iommu_range_alloc(dev, iommu, npages, NULL);
397 spin_unlock_irqrestore(&iommu->lock, flags); 349 spin_unlock_irqrestore(&iommu->lock, flags);
398 350
399 if (unlikely(entry < 0L)) 351 if (unlikely(entry == DMA_ERROR_CODE))
400 goto bad; 352 goto bad;
401 353
402 dma_base = iommu->page_table_map_base + 354 orig_dma_base = dma_base = iommu->page_table_map_base +
403 (entry << IO_PAGE_SHIFT); 355 (entry << IO_PAGE_SHIFT);
404 356
405 prot = HV_PCI_MAP_ATTR_READ; 357 prot = HV_PCI_MAP_ATTR_READ;
@@ -449,7 +401,7 @@ bad:
449 401
450iommu_map_failed: 402iommu_map_failed:
451 spin_lock_irqsave(&iommu->lock, flags); 403 spin_lock_irqsave(&iommu->lock, flags);
452 arena_free(&iommu->arena, entry, npages); 404 iommu_range_free(iommu, orig_dma_base, npages);
453 spin_unlock_irqrestore(&iommu->lock, flags); 405 spin_unlock_irqrestore(&iommu->lock, flags);
454 406
455 return 0; 407 return 0;
@@ -481,7 +433,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
481 433
482 spin_lock_irqsave(&iommu->lock, flags); 434 spin_lock_irqsave(&iommu->lock, flags);
483 435
484 arena_free(&iommu->arena, entry, npages); 436 iommu_range_free(iommu, bus_addr, npages);
485 437
486 do { 438 do {
487 unsigned long num; 439 unsigned long num;
diff --git a/include/asm-sparc64/iommu.h b/include/asm-sparc64/iommu.h
index 9eac6676caf1..46325ddee23b 100644
--- a/include/asm-sparc64/iommu.h
+++ b/include/asm-sparc64/iommu.h
@@ -26,6 +26,7 @@ struct iommu_arena {
26struct iommu { 26struct iommu {
27 spinlock_t lock; 27 spinlock_t lock;
28 struct iommu_arena arena; 28 struct iommu_arena arena;
29 void (*flush_all)(struct iommu *);
29 iopte_t *page_table; 30 iopte_t *page_table;
30 u32 page_table_map_base; 31 u32 page_table_map_base;
31 unsigned long iommu_control; 32 unsigned long iommu_control;