aboutsummaryrefslogtreecommitdiffstats
path: root/lib/swiotlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/swiotlb.c')
-rw-r--r--lib/swiotlb.c149
1 files changed, 86 insertions, 63 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 025922807e6e..d568894df8cc 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -31,6 +31,7 @@
31 31
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/bootmem.h> 33#include <linux/bootmem.h>
34#include <linux/iommu-helper.h>
34 35
35#define OFFSET(val,align) ((unsigned long) \ 36#define OFFSET(val,align) ((unsigned long) \
36 ( (val) & ( (align) - 1))) 37 ( (val) & ( (align) - 1)))
@@ -282,15 +283,6 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr)
282 return (addr & ~mask) != 0; 283 return (addr & ~mask) != 0;
283} 284}
284 285
285static inline unsigned int is_span_boundary(unsigned int index,
286 unsigned int nslots,
287 unsigned long offset_slots,
288 unsigned long max_slots)
289{
290 unsigned long offset = (offset_slots + index) & (max_slots - 1);
291 return offset + nslots > max_slots;
292}
293
294/* 286/*
295 * Allocates bounce buffer and returns its kernel virtual address. 287 * Allocates bounce buffer and returns its kernel virtual address.
296 */ 288 */
@@ -331,56 +323,53 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
331 * request and allocate a buffer from that IO TLB pool. 323 * request and allocate a buffer from that IO TLB pool.
332 */ 324 */
333 spin_lock_irqsave(&io_tlb_lock, flags); 325 spin_lock_irqsave(&io_tlb_lock, flags);
334 { 326 index = ALIGN(io_tlb_index, stride);
335 index = ALIGN(io_tlb_index, stride); 327 if (index >= io_tlb_nslabs)
336 if (index >= io_tlb_nslabs) 328 index = 0;
337 index = 0; 329 wrap = index;
338 wrap = index; 330
339 331 do {
340 do { 332 while (iommu_is_span_boundary(index, nslots, offset_slots,
341 while (is_span_boundary(index, nslots, offset_slots, 333 max_slots)) {
342 max_slots)) {
343 index += stride;
344 if (index >= io_tlb_nslabs)
345 index = 0;
346 if (index == wrap)
347 goto not_found;
348 }
349
350 /*
351 * If we find a slot that indicates we have 'nslots'
352 * number of contiguous buffers, we allocate the
353 * buffers from that slot and mark the entries as '0'
354 * indicating unavailable.
355 */
356 if (io_tlb_list[index] >= nslots) {
357 int count = 0;
358
359 for (i = index; i < (int) (index + nslots); i++)
360 io_tlb_list[i] = 0;
361 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
362 io_tlb_list[i] = ++count;
363 dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
364
365 /*
366 * Update the indices to avoid searching in
367 * the next round.
368 */
369 io_tlb_index = ((index + nslots) < io_tlb_nslabs
370 ? (index + nslots) : 0);
371
372 goto found;
373 }
374 index += stride; 334 index += stride;
375 if (index >= io_tlb_nslabs) 335 if (index >= io_tlb_nslabs)
376 index = 0; 336 index = 0;
377 } while (index != wrap); 337 if (index == wrap)
338 goto not_found;
339 }
378 340
379 not_found: 341 /*
380 spin_unlock_irqrestore(&io_tlb_lock, flags); 342 * If we find a slot that indicates we have 'nslots' number of
381 return NULL; 343 * contiguous buffers, we allocate the buffers from that slot
382 } 344 * and mark the entries as '0' indicating unavailable.
383 found: 345 */
346 if (io_tlb_list[index] >= nslots) {
347 int count = 0;
348
349 for (i = index; i < (int) (index + nslots); i++)
350 io_tlb_list[i] = 0;
351 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
352 io_tlb_list[i] = ++count;
353 dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
354
355 /*
356 * Update the indices to avoid searching in the next
357 * round.
358 */
359 io_tlb_index = ((index + nslots) < io_tlb_nslabs
360 ? (index + nslots) : 0);
361
362 goto found;
363 }
364 index += stride;
365 if (index >= io_tlb_nslabs)
366 index = 0;
367 } while (index != wrap);
368
369not_found:
370 spin_unlock_irqrestore(&io_tlb_lock, flags);
371 return NULL;
372found:
384 spin_unlock_irqrestore(&io_tlb_lock, flags); 373 spin_unlock_irqrestore(&io_tlb_lock, flags);
385 374
386 /* 375 /*
@@ -566,7 +555,8 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
566 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. 555 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
567 */ 556 */
568dma_addr_t 557dma_addr_t
569swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) 558swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
559 int dir, struct dma_attrs *attrs)
570{ 560{
571 dma_addr_t dev_addr = virt_to_bus(ptr); 561 dma_addr_t dev_addr = virt_to_bus(ptr);
572 void *map; 562 void *map;
@@ -599,6 +589,13 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
599 589
600 return dev_addr; 590 return dev_addr;
601} 591}
592EXPORT_SYMBOL(swiotlb_map_single_attrs);
593
594dma_addr_t
595swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
596{
597 return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL);
598}
602 599
603/* 600/*
604 * Unmap a single streaming mode DMA translation. The dma_addr and size must 601 * Unmap a single streaming mode DMA translation. The dma_addr and size must
@@ -609,8 +606,8 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
609 * whatever the device wrote there. 606 * whatever the device wrote there.
610 */ 607 */
611void 608void
612swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, 609swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
613 int dir) 610 size_t size, int dir, struct dma_attrs *attrs)
614{ 611{
615 char *dma_addr = bus_to_virt(dev_addr); 612 char *dma_addr = bus_to_virt(dev_addr);
616 613
@@ -620,7 +617,14 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
620 else if (dir == DMA_FROM_DEVICE) 617 else if (dir == DMA_FROM_DEVICE)
621 dma_mark_clean(dma_addr, size); 618 dma_mark_clean(dma_addr, size);
622} 619}
620EXPORT_SYMBOL(swiotlb_unmap_single_attrs);
623 621
622void
623swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
624 int dir)
625{
626 return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL);
627}
624/* 628/*
625 * Make physical memory consistent for a single streaming mode DMA translation 629 * Make physical memory consistent for a single streaming mode DMA translation
626 * after a transfer. 630 * after a transfer.
@@ -691,6 +695,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
691 SYNC_FOR_DEVICE); 695 SYNC_FOR_DEVICE);
692} 696}
693 697
698void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int,
699 struct dma_attrs *);
694/* 700/*
695 * Map a set of buffers described by scatterlist in streaming mode for DMA. 701 * Map a set of buffers described by scatterlist in streaming mode for DMA.
696 * This is the scatter-gather version of the above swiotlb_map_single 702 * This is the scatter-gather version of the above swiotlb_map_single
@@ -708,8 +714,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
708 * same here. 714 * same here.
709 */ 715 */
710int 716int
711swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 717swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
712 int dir) 718 int dir, struct dma_attrs *attrs)
713{ 719{
714 struct scatterlist *sg; 720 struct scatterlist *sg;
715 void *addr; 721 void *addr;
@@ -727,7 +733,8 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
727 /* Don't panic here, we expect map_sg users 733 /* Don't panic here, we expect map_sg users
728 to do proper error handling. */ 734 to do proper error handling. */
729 swiotlb_full(hwdev, sg->length, dir, 0); 735 swiotlb_full(hwdev, sg->length, dir, 0);
730 swiotlb_unmap_sg(hwdev, sgl, i, dir); 736 swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
737 attrs);
731 sgl[0].dma_length = 0; 738 sgl[0].dma_length = 0;
732 return 0; 739 return 0;
733 } 740 }
@@ -738,14 +745,22 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
738 } 745 }
739 return nelems; 746 return nelems;
740} 747}
748EXPORT_SYMBOL(swiotlb_map_sg_attrs);
749
750int
751swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
752 int dir)
753{
754 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
755}
741 756
742/* 757/*
743 * Unmap a set of streaming mode DMA translations. Again, cpu read rules 758 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
744 * concerning calls here are the same as for swiotlb_unmap_single() above. 759 * concerning calls here are the same as for swiotlb_unmap_single() above.
745 */ 760 */
746void 761void
747swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 762swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
748 int dir) 763 int nelems, int dir, struct dma_attrs *attrs)
749{ 764{
750 struct scatterlist *sg; 765 struct scatterlist *sg;
751 int i; 766 int i;
@@ -760,6 +775,14 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
760 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); 775 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
761 } 776 }
762} 777}
778EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
779
780void
781swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
782 int dir)
783{
784 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
785}
763 786
764/* 787/*
765 * Make physical memory consistent for a set of streaming mode DMA translations 788 * Make physical memory consistent for a set of streaming mode DMA translations