diff options
author | David S. Miller <davem@huronp11.davemloft.net> | 2008-02-08 21:05:46 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-02-09 06:15:04 -0500 |
commit | d284142cbad66832d5072a0aebeca7bd9ca841b7 (patch) | |
tree | e5c5ad6271b3a61e28f1767b744e0696af0cd1a4 /arch/sparc64/kernel/pci_sun4v.c | |
parent | 19814ea24e9d80583504e336340ab4590841b0b1 (diff) |
[SPARC64]: IOMMU allocations using iommu-helper layer.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/pci_sun4v.c')
-rw-r--r-- | arch/sparc64/kernel/pci_sun4v.c | 84 |
1 files changed, 18 insertions, 66 deletions
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 61baf8dc095e..c8b6199a5dc4 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* pci_sun4v.c: SUN4V specific PCI controller support. | 1 | /* pci_sun4v.c: SUN4V specific PCI controller support. |
2 | * | 2 | * |
3 | * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) | 3 | * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net) |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
@@ -113,54 +113,6 @@ static inline long iommu_batch_end(void) | |||
113 | return iommu_batch_flush(p); | 113 | return iommu_batch_flush(p); |
114 | } | 114 | } |
115 | 115 | ||
116 | static long arena_alloc(struct iommu_arena *arena, unsigned long npages) | ||
117 | { | ||
118 | unsigned long n, i, start, end, limit; | ||
119 | int pass; | ||
120 | |||
121 | limit = arena->limit; | ||
122 | start = arena->hint; | ||
123 | pass = 0; | ||
124 | |||
125 | again: | ||
126 | n = find_next_zero_bit(arena->map, limit, start); | ||
127 | end = n + npages; | ||
128 | if (unlikely(end >= limit)) { | ||
129 | if (likely(pass < 1)) { | ||
130 | limit = start; | ||
131 | start = 0; | ||
132 | pass++; | ||
133 | goto again; | ||
134 | } else { | ||
135 | /* Scanned the whole thing, give up. */ | ||
136 | return -1; | ||
137 | } | ||
138 | } | ||
139 | |||
140 | for (i = n; i < end; i++) { | ||
141 | if (test_bit(i, arena->map)) { | ||
142 | start = i + 1; | ||
143 | goto again; | ||
144 | } | ||
145 | } | ||
146 | |||
147 | for (i = n; i < end; i++) | ||
148 | __set_bit(i, arena->map); | ||
149 | |||
150 | arena->hint = end; | ||
151 | |||
152 | return n; | ||
153 | } | ||
154 | |||
155 | static void arena_free(struct iommu_arena *arena, unsigned long base, | ||
156 | unsigned long npages) | ||
157 | { | ||
158 | unsigned long i; | ||
159 | |||
160 | for (i = base; i < (base + npages); i++) | ||
161 | __clear_bit(i, arena->map); | ||
162 | } | ||
163 | |||
164 | static void *dma_4v_alloc_coherent(struct device *dev, size_t size, | 116 | static void *dma_4v_alloc_coherent(struct device *dev, size_t size, |
165 | dma_addr_t *dma_addrp, gfp_t gfp) | 117 | dma_addr_t *dma_addrp, gfp_t gfp) |
166 | { | 118 | { |
@@ -185,11 +137,11 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, | |||
185 | iommu = dev->archdata.iommu; | 137 | iommu = dev->archdata.iommu; |
186 | 138 | ||
187 | spin_lock_irqsave(&iommu->lock, flags); | 139 | spin_lock_irqsave(&iommu->lock, flags); |
188 | entry = arena_alloc(&iommu->arena, npages); | 140 | entry = iommu_range_alloc(dev, iommu, npages, NULL); |
189 | spin_unlock_irqrestore(&iommu->lock, flags); | 141 | spin_unlock_irqrestore(&iommu->lock, flags); |
190 | 142 | ||
191 | if (unlikely(entry < 0L)) | 143 | if (unlikely(entry == DMA_ERROR_CODE)) |
192 | goto arena_alloc_fail; | 144 | goto range_alloc_fail; |
193 | 145 | ||
194 | *dma_addrp = (iommu->page_table_map_base + | 146 | *dma_addrp = (iommu->page_table_map_base + |
195 | (entry << IO_PAGE_SHIFT)); | 147 | (entry << IO_PAGE_SHIFT)); |
@@ -219,10 +171,10 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, | |||
219 | iommu_map_fail: | 171 | iommu_map_fail: |
220 | /* Interrupts are disabled. */ | 172 | /* Interrupts are disabled. */ |
221 | spin_lock(&iommu->lock); | 173 | spin_lock(&iommu->lock); |
222 | arena_free(&iommu->arena, entry, npages); | 174 | iommu_range_free(iommu, *dma_addrp, npages); |
223 | spin_unlock_irqrestore(&iommu->lock, flags); | 175 | spin_unlock_irqrestore(&iommu->lock, flags); |
224 | 176 | ||
225 | arena_alloc_fail: | 177 | range_alloc_fail: |
226 | free_pages(first_page, order); | 178 | free_pages(first_page, order); |
227 | return NULL; | 179 | return NULL; |
228 | } | 180 | } |
@@ -243,7 +195,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, | |||
243 | 195 | ||
244 | spin_lock_irqsave(&iommu->lock, flags); | 196 | spin_lock_irqsave(&iommu->lock, flags); |
245 | 197 | ||
246 | arena_free(&iommu->arena, entry, npages); | 198 | iommu_range_free(iommu, dvma, npages); |
247 | 199 | ||
248 | do { | 200 | do { |
249 | unsigned long num; | 201 | unsigned long num; |
@@ -281,10 +233,10 @@ static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz, | |||
281 | npages >>= IO_PAGE_SHIFT; | 233 | npages >>= IO_PAGE_SHIFT; |
282 | 234 | ||
283 | spin_lock_irqsave(&iommu->lock, flags); | 235 | spin_lock_irqsave(&iommu->lock, flags); |
284 | entry = arena_alloc(&iommu->arena, npages); | 236 | entry = iommu_range_alloc(dev, iommu, npages, NULL); |
285 | spin_unlock_irqrestore(&iommu->lock, flags); | 237 | spin_unlock_irqrestore(&iommu->lock, flags); |
286 | 238 | ||
287 | if (unlikely(entry < 0L)) | 239 | if (unlikely(entry == DMA_ERROR_CODE)) |
288 | goto bad; | 240 | goto bad; |
289 | 241 | ||
290 | bus_addr = (iommu->page_table_map_base + | 242 | bus_addr = (iommu->page_table_map_base + |
@@ -319,7 +271,7 @@ bad: | |||
319 | iommu_map_fail: | 271 | iommu_map_fail: |
320 | /* Interrupts are disabled. */ | 272 | /* Interrupts are disabled. */ |
321 | spin_lock(&iommu->lock); | 273 | spin_lock(&iommu->lock); |
322 | arena_free(&iommu->arena, entry, npages); | 274 | iommu_range_free(iommu, bus_addr, npages); |
323 | spin_unlock_irqrestore(&iommu->lock, flags); | 275 | spin_unlock_irqrestore(&iommu->lock, flags); |
324 | 276 | ||
325 | return DMA_ERROR_CODE; | 277 | return DMA_ERROR_CODE; |
@@ -350,9 +302,9 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr, | |||
350 | 302 | ||
351 | spin_lock_irqsave(&iommu->lock, flags); | 303 | spin_lock_irqsave(&iommu->lock, flags); |
352 | 304 | ||
353 | entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; | 305 | iommu_range_free(iommu, bus_addr, npages); |
354 | arena_free(&iommu->arena, entry, npages); | ||
355 | 306 | ||
307 | entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; | ||
356 | do { | 308 | do { |
357 | unsigned long num; | 309 | unsigned long num; |
358 | 310 | ||
@@ -369,10 +321,10 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | |||
369 | int nelems, enum dma_data_direction direction) | 321 | int nelems, enum dma_data_direction direction) |
370 | { | 322 | { |
371 | unsigned long flags, npages, i, prot; | 323 | unsigned long flags, npages, i, prot; |
324 | u32 dma_base, orig_dma_base; | ||
372 | struct scatterlist *sg; | 325 | struct scatterlist *sg; |
373 | struct iommu *iommu; | 326 | struct iommu *iommu; |
374 | long entry, err; | 327 | long entry, err; |
375 | u32 dma_base; | ||
376 | 328 | ||
377 | /* Fast path single entry scatterlists. */ | 329 | /* Fast path single entry scatterlists. */ |
378 | if (nelems == 1) { | 330 | if (nelems == 1) { |
@@ -393,13 +345,13 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | |||
393 | npages = calc_npages(sglist, nelems); | 345 | npages = calc_npages(sglist, nelems); |
394 | 346 | ||
395 | spin_lock_irqsave(&iommu->lock, flags); | 347 | spin_lock_irqsave(&iommu->lock, flags); |
396 | entry = arena_alloc(&iommu->arena, npages); | 348 | entry = iommu_range_alloc(dev, iommu, npages, NULL); |
397 | spin_unlock_irqrestore(&iommu->lock, flags); | 349 | spin_unlock_irqrestore(&iommu->lock, flags); |
398 | 350 | ||
399 | if (unlikely(entry < 0L)) | 351 | if (unlikely(entry == DMA_ERROR_CODE)) |
400 | goto bad; | 352 | goto bad; |
401 | 353 | ||
402 | dma_base = iommu->page_table_map_base + | 354 | orig_dma_base = dma_base = iommu->page_table_map_base + |
403 | (entry << IO_PAGE_SHIFT); | 355 | (entry << IO_PAGE_SHIFT); |
404 | 356 | ||
405 | prot = HV_PCI_MAP_ATTR_READ; | 357 | prot = HV_PCI_MAP_ATTR_READ; |
@@ -449,7 +401,7 @@ bad: | |||
449 | 401 | ||
450 | iommu_map_failed: | 402 | iommu_map_failed: |
451 | spin_lock_irqsave(&iommu->lock, flags); | 403 | spin_lock_irqsave(&iommu->lock, flags); |
452 | arena_free(&iommu->arena, entry, npages); | 404 | iommu_range_free(iommu, orig_dma_base, npages); |
453 | spin_unlock_irqrestore(&iommu->lock, flags); | 405 | spin_unlock_irqrestore(&iommu->lock, flags); |
454 | 406 | ||
455 | return 0; | 407 | return 0; |
@@ -481,7 +433,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
481 | 433 | ||
482 | spin_lock_irqsave(&iommu->lock, flags); | 434 | spin_lock_irqsave(&iommu->lock, flags); |
483 | 435 | ||
484 | arena_free(&iommu->arena, entry, npages); | 436 | iommu_range_free(iommu, bus_addr, npages); |
485 | 437 | ||
486 | do { | 438 | do { |
487 | unsigned long num; | 439 | unsigned long num; |