diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/find_next_bit.c | 22 | ||||
-rw-r--r-- | lib/idr.c | 10 | ||||
-rw-r--r-- | lib/inflate.c | 3 | ||||
-rw-r--r-- | lib/iomap.c | 2 | ||||
-rw-r--r-- | lib/ratelimit.c | 51 | ||||
-rw-r--r-- | lib/swiotlb.c | 149 |
7 files changed, 156 insertions, 83 deletions
diff --git a/lib/Makefile b/lib/Makefile index 2d7001b7f5a4..0ae4eb047aac 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -6,7 +6,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ | |||
6 | rbtree.o radix-tree.o dump_stack.o \ | 6 | rbtree.o radix-tree.o dump_stack.o \ |
7 | idr.o int_sqrt.o extable.o prio_tree.o \ | 7 | idr.o int_sqrt.o extable.o prio_tree.o \ |
8 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ | 8 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ |
9 | proportions.o prio_heap.o | 9 | proportions.o prio_heap.o ratelimit.o |
10 | 10 | ||
11 | lib-$(CONFIG_MMU) += ioremap.o | 11 | lib-$(CONFIG_MMU) += ioremap.o |
12 | lib-$(CONFIG_SMP) += cpumask.o | 12 | lib-$(CONFIG_SMP) += cpumask.o |
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index d3f5784807b4..24c59ded47a0 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c | |||
@@ -20,8 +20,8 @@ | |||
20 | /* | 20 | /* |
21 | * Find the next set bit in a memory region. | 21 | * Find the next set bit in a memory region. |
22 | */ | 22 | */ |
23 | unsigned long __find_next_bit(const unsigned long *addr, | 23 | unsigned long find_next_bit(const unsigned long *addr, unsigned long size, |
24 | unsigned long size, unsigned long offset) | 24 | unsigned long offset) |
25 | { | 25 | { |
26 | const unsigned long *p = addr + BITOP_WORD(offset); | 26 | const unsigned long *p = addr + BITOP_WORD(offset); |
27 | unsigned long result = offset & ~(BITS_PER_LONG-1); | 27 | unsigned long result = offset & ~(BITS_PER_LONG-1); |
@@ -58,14 +58,14 @@ found_first: | |||
58 | found_middle: | 58 | found_middle: |
59 | return result + __ffs(tmp); | 59 | return result + __ffs(tmp); |
60 | } | 60 | } |
61 | EXPORT_SYMBOL(__find_next_bit); | 61 | EXPORT_SYMBOL(find_next_bit); |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * This implementation of find_{first,next}_zero_bit was stolen from | 64 | * This implementation of find_{first,next}_zero_bit was stolen from |
65 | * Linus' asm-alpha/bitops.h. | 65 | * Linus' asm-alpha/bitops.h. |
66 | */ | 66 | */ |
67 | unsigned long __find_next_zero_bit(const unsigned long *addr, | 67 | unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
68 | unsigned long size, unsigned long offset) | 68 | unsigned long offset) |
69 | { | 69 | { |
70 | const unsigned long *p = addr + BITOP_WORD(offset); | 70 | const unsigned long *p = addr + BITOP_WORD(offset); |
71 | unsigned long result = offset & ~(BITS_PER_LONG-1); | 71 | unsigned long result = offset & ~(BITS_PER_LONG-1); |
@@ -102,15 +102,14 @@ found_first: | |||
102 | found_middle: | 102 | found_middle: |
103 | return result + ffz(tmp); | 103 | return result + ffz(tmp); |
104 | } | 104 | } |
105 | EXPORT_SYMBOL(__find_next_zero_bit); | 105 | EXPORT_SYMBOL(find_next_zero_bit); |
106 | #endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ | 106 | #endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ |
107 | 107 | ||
108 | #ifdef CONFIG_GENERIC_FIND_FIRST_BIT | 108 | #ifdef CONFIG_GENERIC_FIND_FIRST_BIT |
109 | /* | 109 | /* |
110 | * Find the first set bit in a memory region. | 110 | * Find the first set bit in a memory region. |
111 | */ | 111 | */ |
112 | unsigned long __find_first_bit(const unsigned long *addr, | 112 | unsigned long find_first_bit(const unsigned long *addr, unsigned long size) |
113 | unsigned long size) | ||
114 | { | 113 | { |
115 | const unsigned long *p = addr; | 114 | const unsigned long *p = addr; |
116 | unsigned long result = 0; | 115 | unsigned long result = 0; |
@@ -131,13 +130,12 @@ unsigned long __find_first_bit(const unsigned long *addr, | |||
131 | found: | 130 | found: |
132 | return result + __ffs(tmp); | 131 | return result + __ffs(tmp); |
133 | } | 132 | } |
134 | EXPORT_SYMBOL(__find_first_bit); | 133 | EXPORT_SYMBOL(find_first_bit); |
135 | 134 | ||
136 | /* | 135 | /* |
137 | * Find the first cleared bit in a memory region. | 136 | * Find the first cleared bit in a memory region. |
138 | */ | 137 | */ |
139 | unsigned long __find_first_zero_bit(const unsigned long *addr, | 138 | unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) |
140 | unsigned long size) | ||
141 | { | 139 | { |
142 | const unsigned long *p = addr; | 140 | const unsigned long *p = addr; |
143 | unsigned long result = 0; | 141 | unsigned long result = 0; |
@@ -158,7 +156,7 @@ unsigned long __find_first_zero_bit(const unsigned long *addr, | |||
158 | found: | 156 | found: |
159 | return result + ffz(tmp); | 157 | return result + ffz(tmp); |
160 | } | 158 | } |
161 | EXPORT_SYMBOL(__find_first_zero_bit); | 159 | EXPORT_SYMBOL(find_first_zero_bit); |
162 | #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ | 160 | #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ |
163 | 161 | ||
164 | #ifdef __BIG_ENDIAN | 162 | #ifdef __BIG_ENDIAN |
@@ -585,12 +585,11 @@ static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer) | |||
585 | memset(idr_layer, 0, sizeof(struct idr_layer)); | 585 | memset(idr_layer, 0, sizeof(struct idr_layer)); |
586 | } | 586 | } |
587 | 587 | ||
588 | static int init_id_cache(void) | 588 | void __init idr_init_cache(void) |
589 | { | 589 | { |
590 | if (!idr_layer_cache) | 590 | idr_layer_cache = kmem_cache_create("idr_layer_cache", |
591 | idr_layer_cache = kmem_cache_create("idr_layer_cache", | 591 | sizeof(struct idr_layer), 0, SLAB_PANIC, |
592 | sizeof(struct idr_layer), 0, 0, idr_cache_ctor); | 592 | idr_cache_ctor); |
593 | return 0; | ||
594 | } | 593 | } |
595 | 594 | ||
596 | /** | 595 | /** |
@@ -602,7 +601,6 @@ static int init_id_cache(void) | |||
602 | */ | 601 | */ |
603 | void idr_init(struct idr *idp) | 602 | void idr_init(struct idr *idp) |
604 | { | 603 | { |
605 | init_id_cache(); | ||
606 | memset(idp, 0, sizeof(struct idr)); | 604 | memset(idp, 0, sizeof(struct idr)); |
607 | spin_lock_init(&idp->lock); | 605 | spin_lock_init(&idp->lock); |
608 | } | 606 | } |
diff --git a/lib/inflate.c b/lib/inflate.c index 845f91d3ac12..9762294be062 100644 --- a/lib/inflate.c +++ b/lib/inflate.c | |||
@@ -811,6 +811,9 @@ DEBG("<dyn"); | |||
811 | ll = malloc(sizeof(*ll) * (286+30)); /* literal/length and distance code lengths */ | 811 | ll = malloc(sizeof(*ll) * (286+30)); /* literal/length and distance code lengths */ |
812 | #endif | 812 | #endif |
813 | 813 | ||
814 | if (ll == NULL) | ||
815 | return 1; | ||
816 | |||
814 | /* make local bit buffer */ | 817 | /* make local bit buffer */ |
815 | b = bb; | 818 | b = bb; |
816 | k = bk; | 819 | k = bk; |
diff --git a/lib/iomap.c b/lib/iomap.c index dd6ca48fe6b0..37a3ea4cac9f 100644 --- a/lib/iomap.c +++ b/lib/iomap.c | |||
@@ -257,7 +257,7 @@ EXPORT_SYMBOL(ioport_unmap); | |||
257 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | 257 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
258 | { | 258 | { |
259 | resource_size_t start = pci_resource_start(dev, bar); | 259 | resource_size_t start = pci_resource_start(dev, bar); |
260 | unsigned long len = pci_resource_len(dev, bar); | 260 | resource_size_t len = pci_resource_len(dev, bar); |
261 | unsigned long flags = pci_resource_flags(dev, bar); | 261 | unsigned long flags = pci_resource_flags(dev, bar); |
262 | 262 | ||
263 | if (!len || !start) | 263 | if (!len || !start) |
diff --git a/lib/ratelimit.c b/lib/ratelimit.c new file mode 100644 index 000000000000..485e3040dcd4 --- /dev/null +++ b/lib/ratelimit.c | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * ratelimit.c - Do something with rate limit. | ||
3 | * | ||
4 | * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com> | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/jiffies.h> | ||
12 | #include <linux/module.h> | ||
13 | |||
14 | /* | ||
15 | * __ratelimit - rate limiting | ||
16 | * @ratelimit_jiffies: minimum time in jiffies between two callbacks | ||
17 | * @ratelimit_burst: number of callbacks we do before ratelimiting | ||
18 | * | ||
19 | * This enforces a rate limit: not more than @ratelimit_burst callbacks | ||
20 | * in every ratelimit_jiffies | ||
21 | */ | ||
22 | int __ratelimit(int ratelimit_jiffies, int ratelimit_burst) | ||
23 | { | ||
24 | static DEFINE_SPINLOCK(ratelimit_lock); | ||
25 | static unsigned toks = 10 * 5 * HZ; | ||
26 | static unsigned long last_msg; | ||
27 | static int missed; | ||
28 | unsigned long flags; | ||
29 | unsigned long now = jiffies; | ||
30 | |||
31 | spin_lock_irqsave(&ratelimit_lock, flags); | ||
32 | toks += now - last_msg; | ||
33 | last_msg = now; | ||
34 | if (toks > (ratelimit_burst * ratelimit_jiffies)) | ||
35 | toks = ratelimit_burst * ratelimit_jiffies; | ||
36 | if (toks >= ratelimit_jiffies) { | ||
37 | int lost = missed; | ||
38 | |||
39 | missed = 0; | ||
40 | toks -= ratelimit_jiffies; | ||
41 | spin_unlock_irqrestore(&ratelimit_lock, flags); | ||
42 | if (lost) | ||
43 | printk(KERN_WARNING "%s: %d messages suppressed\n", | ||
44 | __func__, lost); | ||
45 | return 1; | ||
46 | } | ||
47 | missed++; | ||
48 | spin_unlock_irqrestore(&ratelimit_lock, flags); | ||
49 | return 0; | ||
50 | } | ||
51 | EXPORT_SYMBOL(__ratelimit); | ||
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 025922807e6e..d568894df8cc 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/bootmem.h> | 33 | #include <linux/bootmem.h> |
34 | #include <linux/iommu-helper.h> | ||
34 | 35 | ||
35 | #define OFFSET(val,align) ((unsigned long) \ | 36 | #define OFFSET(val,align) ((unsigned long) \ |
36 | ( (val) & ( (align) - 1))) | 37 | ( (val) & ( (align) - 1))) |
@@ -282,15 +283,6 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr) | |||
282 | return (addr & ~mask) != 0; | 283 | return (addr & ~mask) != 0; |
283 | } | 284 | } |
284 | 285 | ||
285 | static inline unsigned int is_span_boundary(unsigned int index, | ||
286 | unsigned int nslots, | ||
287 | unsigned long offset_slots, | ||
288 | unsigned long max_slots) | ||
289 | { | ||
290 | unsigned long offset = (offset_slots + index) & (max_slots - 1); | ||
291 | return offset + nslots > max_slots; | ||
292 | } | ||
293 | |||
294 | /* | 286 | /* |
295 | * Allocates bounce buffer and returns its kernel virtual address. | 287 | * Allocates bounce buffer and returns its kernel virtual address. |
296 | */ | 288 | */ |
@@ -331,56 +323,53 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir) | |||
331 | * request and allocate a buffer from that IO TLB pool. | 323 | * request and allocate a buffer from that IO TLB pool. |
332 | */ | 324 | */ |
333 | spin_lock_irqsave(&io_tlb_lock, flags); | 325 | spin_lock_irqsave(&io_tlb_lock, flags); |
334 | { | 326 | index = ALIGN(io_tlb_index, stride); |
335 | index = ALIGN(io_tlb_index, stride); | 327 | if (index >= io_tlb_nslabs) |
336 | if (index >= io_tlb_nslabs) | 328 | index = 0; |
337 | index = 0; | 329 | wrap = index; |
338 | wrap = index; | 330 | |
339 | 331 | do { | |
340 | do { | 332 | while (iommu_is_span_boundary(index, nslots, offset_slots, |
341 | while (is_span_boundary(index, nslots, offset_slots, | 333 | max_slots)) { |
342 | max_slots)) { | ||
343 | index += stride; | ||
344 | if (index >= io_tlb_nslabs) | ||
345 | index = 0; | ||
346 | if (index == wrap) | ||
347 | goto not_found; | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * If we find a slot that indicates we have 'nslots' | ||
352 | * number of contiguous buffers, we allocate the | ||
353 | * buffers from that slot and mark the entries as '0' | ||
354 | * indicating unavailable. | ||
355 | */ | ||
356 | if (io_tlb_list[index] >= nslots) { | ||
357 | int count = 0; | ||
358 | |||
359 | for (i = index; i < (int) (index + nslots); i++) | ||
360 | io_tlb_list[i] = 0; | ||
361 | for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) | ||
362 | io_tlb_list[i] = ++count; | ||
363 | dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); | ||
364 | |||
365 | /* | ||
366 | * Update the indices to avoid searching in | ||
367 | * the next round. | ||
368 | */ | ||
369 | io_tlb_index = ((index + nslots) < io_tlb_nslabs | ||
370 | ? (index + nslots) : 0); | ||
371 | |||
372 | goto found; | ||
373 | } | ||
374 | index += stride; | 334 | index += stride; |
375 | if (index >= io_tlb_nslabs) | 335 | if (index >= io_tlb_nslabs) |
376 | index = 0; | 336 | index = 0; |
377 | } while (index != wrap); | 337 | if (index == wrap) |
338 | goto not_found; | ||
339 | } | ||
378 | 340 | ||
379 | not_found: | 341 | /* |
380 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 342 | * If we find a slot that indicates we have 'nslots' number of |
381 | return NULL; | 343 | * contiguous buffers, we allocate the buffers from that slot |
382 | } | 344 | * and mark the entries as '0' indicating unavailable. |
383 | found: | 345 | */ |
346 | if (io_tlb_list[index] >= nslots) { | ||
347 | int count = 0; | ||
348 | |||
349 | for (i = index; i < (int) (index + nslots); i++) | ||
350 | io_tlb_list[i] = 0; | ||
351 | for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) | ||
352 | io_tlb_list[i] = ++count; | ||
353 | dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); | ||
354 | |||
355 | /* | ||
356 | * Update the indices to avoid searching in the next | ||
357 | * round. | ||
358 | */ | ||
359 | io_tlb_index = ((index + nslots) < io_tlb_nslabs | ||
360 | ? (index + nslots) : 0); | ||
361 | |||
362 | goto found; | ||
363 | } | ||
364 | index += stride; | ||
365 | if (index >= io_tlb_nslabs) | ||
366 | index = 0; | ||
367 | } while (index != wrap); | ||
368 | |||
369 | not_found: | ||
370 | spin_unlock_irqrestore(&io_tlb_lock, flags); | ||
371 | return NULL; | ||
372 | found: | ||
384 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 373 | spin_unlock_irqrestore(&io_tlb_lock, flags); |
385 | 374 | ||
386 | /* | 375 | /* |
@@ -566,7 +555,8 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) | |||
566 | * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. | 555 | * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. |
567 | */ | 556 | */ |
568 | dma_addr_t | 557 | dma_addr_t |
569 | swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) | 558 | swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, |
559 | int dir, struct dma_attrs *attrs) | ||
570 | { | 560 | { |
571 | dma_addr_t dev_addr = virt_to_bus(ptr); | 561 | dma_addr_t dev_addr = virt_to_bus(ptr); |
572 | void *map; | 562 | void *map; |
@@ -599,6 +589,13 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) | |||
599 | 589 | ||
600 | return dev_addr; | 590 | return dev_addr; |
601 | } | 591 | } |
592 | EXPORT_SYMBOL(swiotlb_map_single_attrs); | ||
593 | |||
594 | dma_addr_t | ||
595 | swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) | ||
596 | { | ||
597 | return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL); | ||
598 | } | ||
602 | 599 | ||
603 | /* | 600 | /* |
604 | * Unmap a single streaming mode DMA translation. The dma_addr and size must | 601 | * Unmap a single streaming mode DMA translation. The dma_addr and size must |
@@ -609,8 +606,8 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) | |||
609 | * whatever the device wrote there. | 606 | * whatever the device wrote there. |
610 | */ | 607 | */ |
611 | void | 608 | void |
612 | swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, | 609 | swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, |
613 | int dir) | 610 | size_t size, int dir, struct dma_attrs *attrs) |
614 | { | 611 | { |
615 | char *dma_addr = bus_to_virt(dev_addr); | 612 | char *dma_addr = bus_to_virt(dev_addr); |
616 | 613 | ||
@@ -620,7 +617,14 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, | |||
620 | else if (dir == DMA_FROM_DEVICE) | 617 | else if (dir == DMA_FROM_DEVICE) |
621 | dma_mark_clean(dma_addr, size); | 618 | dma_mark_clean(dma_addr, size); |
622 | } | 619 | } |
620 | EXPORT_SYMBOL(swiotlb_unmap_single_attrs); | ||
623 | 621 | ||
622 | void | ||
623 | swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, | ||
624 | int dir) | ||
625 | { | ||
626 | return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL); | ||
627 | } | ||
624 | /* | 628 | /* |
625 | * Make physical memory consistent for a single streaming mode DMA translation | 629 | * Make physical memory consistent for a single streaming mode DMA translation |
626 | * after a transfer. | 630 | * after a transfer. |
@@ -691,6 +695,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, | |||
691 | SYNC_FOR_DEVICE); | 695 | SYNC_FOR_DEVICE); |
692 | } | 696 | } |
693 | 697 | ||
698 | void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int, | ||
699 | struct dma_attrs *); | ||
694 | /* | 700 | /* |
695 | * Map a set of buffers described by scatterlist in streaming mode for DMA. | 701 | * Map a set of buffers described by scatterlist in streaming mode for DMA. |
696 | * This is the scatter-gather version of the above swiotlb_map_single | 702 | * This is the scatter-gather version of the above swiotlb_map_single |
@@ -708,8 +714,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, | |||
708 | * same here. | 714 | * same here. |
709 | */ | 715 | */ |
710 | int | 716 | int |
711 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | 717 | swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, |
712 | int dir) | 718 | int dir, struct dma_attrs *attrs) |
713 | { | 719 | { |
714 | struct scatterlist *sg; | 720 | struct scatterlist *sg; |
715 | void *addr; | 721 | void *addr; |
@@ -727,7 +733,8 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
727 | /* Don't panic here, we expect map_sg users | 733 | /* Don't panic here, we expect map_sg users |
728 | to do proper error handling. */ | 734 | to do proper error handling. */ |
729 | swiotlb_full(hwdev, sg->length, dir, 0); | 735 | swiotlb_full(hwdev, sg->length, dir, 0); |
730 | swiotlb_unmap_sg(hwdev, sgl, i, dir); | 736 | swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, |
737 | attrs); | ||
731 | sgl[0].dma_length = 0; | 738 | sgl[0].dma_length = 0; |
732 | return 0; | 739 | return 0; |
733 | } | 740 | } |
@@ -738,14 +745,22 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
738 | } | 745 | } |
739 | return nelems; | 746 | return nelems; |
740 | } | 747 | } |
748 | EXPORT_SYMBOL(swiotlb_map_sg_attrs); | ||
749 | |||
750 | int | ||
751 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | ||
752 | int dir) | ||
753 | { | ||
754 | return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); | ||
755 | } | ||
741 | 756 | ||
742 | /* | 757 | /* |
743 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules | 758 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules |
744 | * concerning calls here are the same as for swiotlb_unmap_single() above. | 759 | * concerning calls here are the same as for swiotlb_unmap_single() above. |
745 | */ | 760 | */ |
746 | void | 761 | void |
747 | swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | 762 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
748 | int dir) | 763 | int nelems, int dir, struct dma_attrs *attrs) |
749 | { | 764 | { |
750 | struct scatterlist *sg; | 765 | struct scatterlist *sg; |
751 | int i; | 766 | int i; |
@@ -760,6 +775,14 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
760 | dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); | 775 | dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); |
761 | } | 776 | } |
762 | } | 777 | } |
778 | EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); | ||
779 | |||
780 | void | ||
781 | swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | ||
782 | int dir) | ||
783 | { | ||
784 | return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); | ||
785 | } | ||
763 | 786 | ||
764 | /* | 787 | /* |
765 | * Make physical memory consistent for a set of streaming mode DMA translations | 788 | * Make physical memory consistent for a set of streaming mode DMA translations |