aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Makefile1
-rw-r--r--lib/iommu-helper.c80
-rw-r--r--lib/radix-tree.c15
-rw-r--r--lib/swiotlb.c41
4 files changed, 127 insertions, 10 deletions
diff --git a/lib/Makefile b/lib/Makefile
index 543f2502b60a..a18062e4633f 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_SMP) += pcounter.o
65obj-$(CONFIG_AUDIT_GENERIC) += audit.o 65obj-$(CONFIG_AUDIT_GENERIC) += audit.o
66 66
67obj-$(CONFIG_SWIOTLB) += swiotlb.o 67obj-$(CONFIG_SWIOTLB) += swiotlb.o
68obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
68obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o 69obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
69 70
70lib-$(CONFIG_GENERIC_BUG) += bug.o 71lib-$(CONFIG_GENERIC_BUG) += bug.o
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
new file mode 100644
index 000000000000..495575a59ca6
--- /dev/null
+++ b/lib/iommu-helper.c
@@ -0,0 +1,80 @@
1/*
2 * IOMMU helper functions for the free area management
3 */
4
5#include <linux/module.h>
6#include <linux/bitops.h>
7
8static unsigned long find_next_zero_area(unsigned long *map,
9 unsigned long size,
10 unsigned long start,
11 unsigned int nr,
12 unsigned long align_mask)
13{
14 unsigned long index, end, i;
15again:
16 index = find_next_zero_bit(map, size, start);
17
18 /* Align allocation */
19 index = (index + align_mask) & ~align_mask;
20
21 end = index + nr;
22 if (end >= size)
23 return -1;
24 for (i = index; i < end; i++) {
25 if (test_bit(i, map)) {
26 start = i+1;
27 goto again;
28 }
29 }
30 return index;
31}
32
33static inline void set_bit_area(unsigned long *map, unsigned long i,
34 int len)
35{
36 unsigned long end = i + len;
37 while (i < end) {
38 __set_bit(i, map);
39 i++;
40 }
41}
42
43static inline int is_span_boundary(unsigned int index, unsigned int nr,
44 unsigned long shift,
45 unsigned long boundary_size)
46{
47 shift = (shift + index) & (boundary_size - 1);
48 return shift + nr > boundary_size;
49}
50
51unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
52 unsigned long start, unsigned int nr,
53 unsigned long shift, unsigned long boundary_size,
54 unsigned long align_mask)
55{
56 unsigned long index;
57again:
58 index = find_next_zero_area(map, size, start, nr, align_mask);
59 if (index != -1) {
60 if (is_span_boundary(index, nr, shift, boundary_size)) {
61 /* we could do more effectively */
62 start = index + 1;
63 goto again;
64 }
65 set_bit_area(map, index, nr);
66 }
67 return index;
68}
69EXPORT_SYMBOL(iommu_area_alloc);
70
71void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr)
72{
73 unsigned long end = start + nr;
74
75 while (start < end) {
76 __clear_bit(start, map);
77 start++;
78 }
79}
80EXPORT_SYMBOL(iommu_area_free);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 48c250fe2233..65f0e758ec38 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -95,14 +95,17 @@ static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
95static struct radix_tree_node * 95static struct radix_tree_node *
96radix_tree_node_alloc(struct radix_tree_root *root) 96radix_tree_node_alloc(struct radix_tree_root *root)
97{ 97{
98 struct radix_tree_node *ret; 98 struct radix_tree_node *ret = NULL;
99 gfp_t gfp_mask = root_gfp_mask(root); 99 gfp_t gfp_mask = root_gfp_mask(root);
100 100
101 ret = kmem_cache_alloc(radix_tree_node_cachep, 101 if (!(gfp_mask & __GFP_WAIT)) {
102 set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
103 if (ret == NULL && !(gfp_mask & __GFP_WAIT)) {
104 struct radix_tree_preload *rtp; 102 struct radix_tree_preload *rtp;
105 103
104 /*
105 * Provided the caller has preloaded here, we will always
106 * succeed in getting a node here (and never reach
107 * kmem_cache_alloc)
108 */
106 rtp = &__get_cpu_var(radix_tree_preloads); 109 rtp = &__get_cpu_var(radix_tree_preloads);
107 if (rtp->nr) { 110 if (rtp->nr) {
108 ret = rtp->nodes[rtp->nr - 1]; 111 ret = rtp->nodes[rtp->nr - 1];
@@ -110,6 +113,10 @@ radix_tree_node_alloc(struct radix_tree_root *root)
110 rtp->nr--; 113 rtp->nr--;
111 } 114 }
112 } 115 }
116 if (ret == NULL)
117 ret = kmem_cache_alloc(radix_tree_node_cachep,
118 set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
119
113 BUG_ON(radix_tree_is_indirect_ptr(ret)); 120 BUG_ON(radix_tree_is_indirect_ptr(ret));
114 return ret; 121 return ret;
115} 122}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 1a8050ade861..4bb5a11e18a2 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -282,6 +282,15 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr)
282 return (addr & ~mask) != 0; 282 return (addr & ~mask) != 0;
283} 283}
284 284
285static inline unsigned int is_span_boundary(unsigned int index,
286 unsigned int nslots,
287 unsigned long offset_slots,
288 unsigned long max_slots)
289{
290 unsigned long offset = (offset_slots + index) & (max_slots - 1);
291 return offset + nslots > max_slots;
292}
293
285/* 294/*
286 * Allocates bounce buffer and returns its kernel virtual address. 295 * Allocates bounce buffer and returns its kernel virtual address.
287 */ 296 */
@@ -292,6 +301,16 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
292 char *dma_addr; 301 char *dma_addr;
293 unsigned int nslots, stride, index, wrap; 302 unsigned int nslots, stride, index, wrap;
294 int i; 303 int i;
304 unsigned long start_dma_addr;
305 unsigned long mask;
306 unsigned long offset_slots;
307 unsigned long max_slots;
308
309 mask = dma_get_seg_boundary(hwdev);
310 start_dma_addr = virt_to_bus(io_tlb_start) & mask;
311
312 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
313 max_slots = ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
295 314
296 /* 315 /*
297 * For mappings greater than a page, we limit the stride (and 316 * For mappings greater than a page, we limit the stride (and
@@ -311,10 +330,17 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
311 */ 330 */
312 spin_lock_irqsave(&io_tlb_lock, flags); 331 spin_lock_irqsave(&io_tlb_lock, flags);
313 { 332 {
314 wrap = index = ALIGN(io_tlb_index, stride); 333 index = ALIGN(io_tlb_index, stride);
315
316 if (index >= io_tlb_nslabs) 334 if (index >= io_tlb_nslabs)
317 wrap = index = 0; 335 index = 0;
336
337 while (is_span_boundary(index, nslots, offset_slots,
338 max_slots)) {
339 index += stride;
340 if (index >= io_tlb_nslabs)
341 index = 0;
342 }
343 wrap = index;
318 344
319 do { 345 do {
320 /* 346 /*
@@ -341,9 +367,12 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
341 367
342 goto found; 368 goto found;
343 } 369 }
344 index += stride; 370 do {
345 if (index >= io_tlb_nslabs) 371 index += stride;
346 index = 0; 372 if (index >= io_tlb_nslabs)
373 index = 0;
374 } while (is_span_boundary(index, nslots, offset_slots,
375 max_slots));
347 } while (index != wrap); 376 } while (index != wrap);
348 377
349 spin_unlock_irqrestore(&io_tlb_lock, flags); 378 spin_unlock_irqrestore(&io_tlb_lock, flags);