aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2012-06-03 15:44:25 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-07-03 00:14:47 -0400
commitd362213722c8875b40d712796392682968ce685e (patch)
treecd982b44d3a1a41a52a57ed88fe3897f98001259 /arch/powerpc/kernel
parent67ca141567519a6b0ec81850a7b6569b6d8c2b52 (diff)
powerpc/iommu: Push spinlock into iommu_range_alloc and __iommu_free
In preparation for IOMMU pools, push the spinlock into iommu_range_alloc and __iommu_free. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/iommu.c41
1 files changed, 8 insertions, 33 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index d855cfc0732d..70a212cec587 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -71,6 +71,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
71 int pass = 0; 71 int pass = 0;
72 unsigned long align_mask; 72 unsigned long align_mask;
73 unsigned long boundary_size; 73 unsigned long boundary_size;
74 unsigned long flags;
74 75
75 align_mask = 0xffffffffffffffffl >> (64 - align_order); 76 align_mask = 0xffffffffffffffffl >> (64 - align_order);
76 77
@@ -83,6 +84,8 @@ static unsigned long iommu_range_alloc(struct device *dev,
83 return DMA_ERROR_CODE; 84 return DMA_ERROR_CODE;
84 } 85 }
85 86
87 spin_lock_irqsave(&(tbl->it_lock), flags);
88
86 if (handle && *handle) 89 if (handle && *handle)
87 start = *handle; 90 start = *handle;
88 else 91 else
@@ -136,6 +139,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
136 goto again; 139 goto again;
137 } else { 140 } else {
138 /* Third failure, give up */ 141 /* Third failure, give up */
142 spin_unlock_irqrestore(&(tbl->it_lock), flags);
139 return DMA_ERROR_CODE; 143 return DMA_ERROR_CODE;
140 } 144 }
141 } 145 }
@@ -156,6 +160,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
156 if (handle) 160 if (handle)
157 *handle = end; 161 *handle = end;
158 162
163 spin_unlock_irqrestore(&(tbl->it_lock), flags);
159 return n; 164 return n;
160} 165}
161 166
@@ -165,13 +170,11 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
165 unsigned long mask, unsigned int align_order, 170 unsigned long mask, unsigned int align_order,
166 struct dma_attrs *attrs) 171 struct dma_attrs *attrs)
167{ 172{
168 unsigned long entry, flags; 173 unsigned long entry;
169 dma_addr_t ret = DMA_ERROR_CODE; 174 dma_addr_t ret = DMA_ERROR_CODE;
170 int build_fail; 175 int build_fail;
171 176
172 spin_lock_irqsave(&(tbl->it_lock), flags);
173 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); 177 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
174 spin_unlock_irqrestore(&(tbl->it_lock), flags);
175 178
176 if (unlikely(entry == DMA_ERROR_CODE)) 179 if (unlikely(entry == DMA_ERROR_CODE))
177 return DMA_ERROR_CODE; 180 return DMA_ERROR_CODE;
@@ -232,23 +235,6 @@ static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
232 return true; 235 return true;
233} 236}
234 237
235static void __iommu_free_locked(struct iommu_table *tbl, dma_addr_t dma_addr,
236 unsigned int npages)
237{
238 unsigned long entry, free_entry;
239
240 BUG_ON(!spin_is_locked(&tbl->it_lock));
241
242 entry = dma_addr >> IOMMU_PAGE_SHIFT;
243 free_entry = entry - tbl->it_offset;
244
245 if (!iommu_free_check(tbl, dma_addr, npages))
246 return;
247
248 ppc_md.tce_free(tbl, entry, npages);
249 bitmap_clear(tbl->it_map, free_entry, npages);
250}
251
252static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 238static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
253 unsigned int npages) 239 unsigned int npages)
254{ 240{
@@ -287,7 +273,6 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
287 struct dma_attrs *attrs) 273 struct dma_attrs *attrs)
288{ 274{
289 dma_addr_t dma_next = 0, dma_addr; 275 dma_addr_t dma_next = 0, dma_addr;
290 unsigned long flags;
291 struct scatterlist *s, *outs, *segstart; 276 struct scatterlist *s, *outs, *segstart;
292 int outcount, incount, i, build_fail = 0; 277 int outcount, incount, i, build_fail = 0;
293 unsigned int align; 278 unsigned int align;
@@ -309,8 +294,6 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
309 294
310 DBG("sg mapping %d elements:\n", nelems); 295 DBG("sg mapping %d elements:\n", nelems);
311 296
312 spin_lock_irqsave(&(tbl->it_lock), flags);
313
314 max_seg_size = dma_get_max_seg_size(dev); 297 max_seg_size = dma_get_max_seg_size(dev);
315 for_each_sg(sglist, s, nelems, i) { 298 for_each_sg(sglist, s, nelems, i) {
316 unsigned long vaddr, npages, entry, slen; 299 unsigned long vaddr, npages, entry, slen;
@@ -393,8 +376,6 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
393 if (ppc_md.tce_flush) 376 if (ppc_md.tce_flush)
394 ppc_md.tce_flush(tbl); 377 ppc_md.tce_flush(tbl);
395 378
396 spin_unlock_irqrestore(&(tbl->it_lock), flags);
397
398 DBG("mapped %d elements:\n", outcount); 379 DBG("mapped %d elements:\n", outcount);
399 380
400 /* For the sake of iommu_unmap_sg, we clear out the length in the 381 /* For the sake of iommu_unmap_sg, we clear out the length in the
@@ -419,14 +400,13 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
419 vaddr = s->dma_address & IOMMU_PAGE_MASK; 400 vaddr = s->dma_address & IOMMU_PAGE_MASK;
420 npages = iommu_num_pages(s->dma_address, s->dma_length, 401 npages = iommu_num_pages(s->dma_address, s->dma_length,
421 IOMMU_PAGE_SIZE); 402 IOMMU_PAGE_SIZE);
422 __iommu_free_locked(tbl, vaddr, npages); 403 __iommu_free(tbl, vaddr, npages);
423 s->dma_address = DMA_ERROR_CODE; 404 s->dma_address = DMA_ERROR_CODE;
424 s->dma_length = 0; 405 s->dma_length = 0;
425 } 406 }
426 if (s == outs) 407 if (s == outs)
427 break; 408 break;
428 } 409 }
429 spin_unlock_irqrestore(&(tbl->it_lock), flags);
430 return 0; 410 return 0;
431} 411}
432 412
@@ -436,15 +416,12 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
436 struct dma_attrs *attrs) 416 struct dma_attrs *attrs)
437{ 417{
438 struct scatterlist *sg; 418 struct scatterlist *sg;
439 unsigned long flags;
440 419
441 BUG_ON(direction == DMA_NONE); 420 BUG_ON(direction == DMA_NONE);
442 421
443 if (!tbl) 422 if (!tbl)
444 return; 423 return;
445 424
446 spin_lock_irqsave(&(tbl->it_lock), flags);
447
448 sg = sglist; 425 sg = sglist;
449 while (nelems--) { 426 while (nelems--) {
450 unsigned int npages; 427 unsigned int npages;
@@ -454,7 +431,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
454 break; 431 break;
455 npages = iommu_num_pages(dma_handle, sg->dma_length, 432 npages = iommu_num_pages(dma_handle, sg->dma_length,
456 IOMMU_PAGE_SIZE); 433 IOMMU_PAGE_SIZE);
457 __iommu_free_locked(tbl, dma_handle, npages); 434 __iommu_free(tbl, dma_handle, npages);
458 sg = sg_next(sg); 435 sg = sg_next(sg);
459 } 436 }
460 437
@@ -464,8 +441,6 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
464 */ 441 */
465 if (ppc_md.tce_flush) 442 if (ppc_md.tce_flush)
466 ppc_md.tce_flush(tbl); 443 ppc_md.tce_flush(tbl);
467
468 spin_unlock_irqrestore(&(tbl->it_lock), flags);
469} 444}
470 445
471static void iommu_table_clear(struct iommu_table *tbl) 446static void iommu_table_clear(struct iommu_table *tbl)