diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/.gitignore | 6 | ||||
-rw-r--r-- | lib/Kconfig.debug | 27 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/bitmap.c | 166 | ||||
-rw-r--r-- | lib/extable.c | 3 | ||||
-rw-r--r-- | lib/genalloc.c | 14 | ||||
-rw-r--r-- | lib/idr.c | 50 | ||||
-rw-r--r-- | lib/kobject.c | 3 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 6 | ||||
-rw-r--r-- | lib/radix-tree.c | 53 | ||||
-rw-r--r-- | lib/reed_solomon/Makefile | 2 | ||||
-rw-r--r-- | lib/reed_solomon/decode_rs.c | 36 | ||||
-rw-r--r-- | lib/reed_solomon/encode_rs.c | 14 | ||||
-rw-r--r-- | lib/reed_solomon/reed_solomon.c | 64 | ||||
-rw-r--r-- | lib/smp_processor_id.c | 1 | ||||
-rw-r--r-- | lib/sort.c | 1 | ||||
-rw-r--r-- | lib/string.c | 125 | ||||
-rw-r--r-- | lib/swiotlb.c | 811 | ||||
-rw-r--r-- | lib/textsearch.c | 2 | ||||
-rw-r--r-- | lib/ts_bm.c | 2 | ||||
-rw-r--r-- | lib/ts_fsm.c | 2 | ||||
-rw-r--r-- | lib/ts_kmp.c | 2 | ||||
-rw-r--r-- | lib/vsprintf.c | 1 | ||||
-rw-r--r-- | lib/zlib_inflate/inflate.c | 1 |
24 files changed, 1215 insertions, 179 deletions
diff --git a/lib/.gitignore b/lib/.gitignore new file mode 100644 index 000000000000..3bef1ea94c99 --- /dev/null +++ b/lib/.gitignore | |||
@@ -0,0 +1,6 @@ | |||
1 | # | ||
2 | # Generated files | ||
3 | # | ||
4 | gen_crc32table | ||
5 | crc32table.h | ||
6 | |||
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 016e89a44ac8..156822e3cc79 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -128,7 +128,7 @@ config DEBUG_HIGHMEM | |||
128 | config DEBUG_BUGVERBOSE | 128 | config DEBUG_BUGVERBOSE |
129 | bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED | 129 | bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED |
130 | depends on BUG | 130 | depends on BUG |
131 | depends on ARM || ARM26 || M32R || M68K || SPARC32 || SPARC64 || (X86 && !X86_64) || FRV | 131 | depends on ARM || ARM26 || M32R || M68K || SPARC32 || SPARC64 || X86_32 || FRV |
132 | default !EMBEDDED | 132 | default !EMBEDDED |
133 | help | 133 | help |
134 | Say Y here to make BUG() panics output the file name and line number | 134 | Say Y here to make BUG() panics output the file name and line number |
@@ -168,13 +168,34 @@ config DEBUG_FS | |||
168 | 168 | ||
169 | If unsure, say N. | 169 | If unsure, say N. |
170 | 170 | ||
171 | config DEBUG_VM | ||
172 | bool "Debug VM" | ||
173 | depends on DEBUG_KERNEL | ||
174 | help | ||
175 | Enable this to debug the virtual-memory system. | ||
176 | |||
177 | If unsure, say N. | ||
178 | |||
171 | config FRAME_POINTER | 179 | config FRAME_POINTER |
172 | bool "Compile the kernel with frame pointers" | 180 | bool "Compile the kernel with frame pointers" |
173 | depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML) | 181 | depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML) |
174 | default y if DEBUG_INFO && UML | 182 | default y if DEBUG_INFO && UML |
175 | help | 183 | help |
176 | If you say Y here the resulting kernel image will be slightly larger | 184 | If you say Y here the resulting kernel image will be slightly larger |
177 | and slower, but it might give very useful debugging information | 185 | and slower, but it might give very useful debugging information on |
178 | on some architectures or you use external debuggers. | 186 | some architectures or if you use external debuggers. |
179 | If you don't debug the kernel, you can say N. | 187 | If you don't debug the kernel, you can say N. |
180 | 188 | ||
189 | config RCU_TORTURE_TEST | ||
190 | tristate "torture tests for RCU" | ||
191 | depends on DEBUG_KERNEL | ||
192 | default n | ||
193 | help | ||
194 | This option provides a kernel module that runs torture tests | ||
195 | on the RCU infrastructure. The kernel module may be built | ||
196 | after the fact on the running kernel to be tested, if desired. | ||
197 | |||
198 | Say Y here if you want RCU torture tests to start automatically | ||
199 | at boot time (you probably don't). | ||
200 | Say M if you want the RCU torture tests to build as a module. | ||
201 | Say N if you are unsure. | ||
diff --git a/lib/Makefile b/lib/Makefile index 44a46750690a..8535f4d7d1c3 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -44,6 +44,8 @@ obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o | |||
44 | obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o | 44 | obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o |
45 | obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o | 45 | obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o |
46 | 46 | ||
47 | obj-$(CONFIG_SWIOTLB) += swiotlb.o | ||
48 | |||
47 | hostprogs-y := gen_crc32table | 49 | hostprogs-y := gen_crc32table |
48 | clean-files := crc32table.h | 50 | clean-files := crc32table.h |
49 | 51 | ||
diff --git a/lib/bitmap.c b/lib/bitmap.c index fb9371fdd44a..23d3b1147fe9 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
@@ -511,6 +511,172 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) | |||
511 | } | 511 | } |
512 | EXPORT_SYMBOL(bitmap_parselist); | 512 | EXPORT_SYMBOL(bitmap_parselist); |
513 | 513 | ||
514 | /* | ||
515 | * bitmap_pos_to_ord(buf, pos, bits) | ||
516 | * @buf: pointer to a bitmap | ||
517 | * @pos: a bit position in @buf (0 <= @pos < @bits) | ||
518 | * @bits: number of valid bit positions in @buf | ||
519 | * | ||
520 | * Map the bit at position @pos in @buf (of length @bits) to the | ||
521 | * ordinal of which set bit it is. If it is not set or if @pos | ||
522 | * is not a valid bit position, map to zero (0). | ||
523 | * | ||
524 | * If for example, just bits 4 through 7 are set in @buf, then @pos | ||
525 | * values 4 through 7 will get mapped to 0 through 3, respectively, | ||
526 | * and other @pos values will get mapped to 0. When @pos value 7 | ||
527 | * gets mapped to (returns) @ord value 3 in this example, that means | ||
528 | * that bit 7 is the 3rd (starting with 0th) set bit in @buf. | ||
529 | * | ||
530 | * The bit positions 0 through @bits are valid positions in @buf. | ||
531 | */ | ||
532 | static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) | ||
533 | { | ||
534 | int ord = 0; | ||
535 | |||
536 | if (pos >= 0 && pos < bits) { | ||
537 | int i; | ||
538 | |||
539 | for (i = find_first_bit(buf, bits); | ||
540 | i < pos; | ||
541 | i = find_next_bit(buf, bits, i + 1)) | ||
542 | ord++; | ||
543 | if (i > pos) | ||
544 | ord = 0; | ||
545 | } | ||
546 | return ord; | ||
547 | } | ||
548 | |||
549 | /** | ||
550 | * bitmap_ord_to_pos(buf, ord, bits) | ||
551 | * @buf: pointer to bitmap | ||
552 | * @ord: ordinal bit position (n-th set bit, n >= 0) | ||
553 | * @bits: number of valid bit positions in @buf | ||
554 | * | ||
555 | * Map the ordinal offset of bit @ord in @buf to its position in @buf. | ||
556 | * If @ord is not the ordinal offset of a set bit in @buf, map to zero (0). | ||
557 | * | ||
558 | * If for example, just bits 4 through 7 are set in @buf, then @ord | ||
559 | * values 0 through 3 will get mapped to 4 through 7, respectively, | ||
560 | * and all other @ord valuds will get mapped to 0. When @ord value 3 | ||
561 | * gets mapped to (returns) @pos value 7 in this example, that means | ||
562 | * that the 3rd set bit (starting with 0th) is at position 7 in @buf. | ||
563 | * | ||
564 | * The bit positions 0 through @bits are valid positions in @buf. | ||
565 | */ | ||
566 | static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) | ||
567 | { | ||
568 | int pos = 0; | ||
569 | |||
570 | if (ord >= 0 && ord < bits) { | ||
571 | int i; | ||
572 | |||
573 | for (i = find_first_bit(buf, bits); | ||
574 | i < bits && ord > 0; | ||
575 | i = find_next_bit(buf, bits, i + 1)) | ||
576 | ord--; | ||
577 | if (i < bits && ord == 0) | ||
578 | pos = i; | ||
579 | } | ||
580 | |||
581 | return pos; | ||
582 | } | ||
583 | |||
584 | /** | ||
585 | * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap | ||
586 | * @src: subset to be remapped | ||
587 | * @dst: remapped result | ||
588 | * @old: defines domain of map | ||
589 | * @new: defines range of map | ||
590 | * @bits: number of bits in each of these bitmaps | ||
591 | * | ||
592 | * Let @old and @new define a mapping of bit positions, such that | ||
593 | * whatever position is held by the n-th set bit in @old is mapped | ||
594 | * to the n-th set bit in @new. In the more general case, allowing | ||
595 | * for the possibility that the weight 'w' of @new is less than the | ||
596 | * weight of @old, map the position of the n-th set bit in @old to | ||
597 | * the position of the m-th set bit in @new, where m == n % w. | ||
598 | * | ||
599 | * If either of the @old and @new bitmaps are empty, or if@src and @dst | ||
600 | * point to the same location, then this routine does nothing. | ||
601 | * | ||
602 | * The positions of unset bits in @old are mapped to the position of | ||
603 | * the first set bit in @new. | ||
604 | * | ||
605 | * Apply the above specified mapping to @src, placing the result in | ||
606 | * @dst, clearing any bits previously set in @dst. | ||
607 | * | ||
608 | * The resulting value of @dst will have either the same weight as | ||
609 | * @src, or less weight in the general case that the mapping wasn't | ||
610 | * injective due to the weight of @new being less than that of @old. | ||
611 | * The resulting value of @dst will never have greater weight than | ||
612 | * that of @src, except perhaps in the case that one of the above | ||
613 | * conditions was not met and this routine just returned. | ||
614 | * | ||
615 | * For example, lets say that @old has bits 4 through 7 set, and | ||
616 | * @new has bits 12 through 15 set. This defines the mapping of bit | ||
617 | * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other | ||
618 | * bit positions to 12 (the first set bit in @new. So if say @src | ||
619 | * comes into this routine with bits 1, 5 and 7 set, then @dst should | ||
620 | * leave with bits 12, 13 and 15 set. | ||
621 | */ | ||
622 | void bitmap_remap(unsigned long *dst, const unsigned long *src, | ||
623 | const unsigned long *old, const unsigned long *new, | ||
624 | int bits) | ||
625 | { | ||
626 | int s; | ||
627 | |||
628 | if (bitmap_weight(old, bits) == 0) | ||
629 | return; | ||
630 | if (bitmap_weight(new, bits) == 0) | ||
631 | return; | ||
632 | if (dst == src) /* following doesn't handle inplace remaps */ | ||
633 | return; | ||
634 | |||
635 | bitmap_zero(dst, bits); | ||
636 | for (s = find_first_bit(src, bits); | ||
637 | s < bits; | ||
638 | s = find_next_bit(src, bits, s + 1)) { | ||
639 | int x = bitmap_pos_to_ord(old, s, bits); | ||
640 | int y = bitmap_ord_to_pos(new, x, bits); | ||
641 | set_bit(y, dst); | ||
642 | } | ||
643 | } | ||
644 | EXPORT_SYMBOL(bitmap_remap); | ||
645 | |||
646 | /** | ||
647 | * bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit | ||
648 | * @oldbit - bit position to be mapped | ||
649 | * @old: defines domain of map | ||
650 | * @new: defines range of map | ||
651 | * @bits: number of bits in each of these bitmaps | ||
652 | * | ||
653 | * Let @old and @new define a mapping of bit positions, such that | ||
654 | * whatever position is held by the n-th set bit in @old is mapped | ||
655 | * to the n-th set bit in @new. In the more general case, allowing | ||
656 | * for the possibility that the weight 'w' of @new is less than the | ||
657 | * weight of @old, map the position of the n-th set bit in @old to | ||
658 | * the position of the m-th set bit in @new, where m == n % w. | ||
659 | * | ||
660 | * The positions of unset bits in @old are mapped to the position of | ||
661 | * the first set bit in @new. | ||
662 | * | ||
663 | * Apply the above specified mapping to bit position @oldbit, returning | ||
664 | * the new bit position. | ||
665 | * | ||
666 | * For example, lets say that @old has bits 4 through 7 set, and | ||
667 | * @new has bits 12 through 15 set. This defines the mapping of bit | ||
668 | * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other | ||
669 | * bit positions to 12 (the first set bit in @new. So if say @oldbit | ||
670 | * is 5, then this routine returns 13. | ||
671 | */ | ||
672 | int bitmap_bitremap(int oldbit, const unsigned long *old, | ||
673 | const unsigned long *new, int bits) | ||
674 | { | ||
675 | int x = bitmap_pos_to_ord(old, oldbit, bits); | ||
676 | return bitmap_ord_to_pos(new, x, bits); | ||
677 | } | ||
678 | EXPORT_SYMBOL(bitmap_bitremap); | ||
679 | |||
514 | /** | 680 | /** |
515 | * bitmap_find_free_region - find a contiguous aligned mem region | 681 | * bitmap_find_free_region - find a contiguous aligned mem region |
516 | * @bitmap: an array of unsigned longs corresponding to the bitmap | 682 | * @bitmap: an array of unsigned longs corresponding to the bitmap |
diff --git a/lib/extable.c b/lib/extable.c index 3f677a8f0c3c..18df57c029df 100644 --- a/lib/extable.c +++ b/lib/extable.c | |||
@@ -16,9 +16,6 @@ | |||
16 | #include <linux/sort.h> | 16 | #include <linux/sort.h> |
17 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
18 | 18 | ||
19 | extern struct exception_table_entry __start___ex_table[]; | ||
20 | extern struct exception_table_entry __stop___ex_table[]; | ||
21 | |||
22 | #ifndef ARCH_HAS_SORT_EXTABLE | 19 | #ifndef ARCH_HAS_SORT_EXTABLE |
23 | /* | 20 | /* |
24 | * The exception table needs to be sorted so that the binary | 21 | * The exception table needs to be sorted so that the binary |
diff --git a/lib/genalloc.c b/lib/genalloc.c index d6d30d2e7166..9ce0a6a3b85a 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
@@ -95,12 +95,10 @@ unsigned long gen_pool_alloc(struct gen_pool *poolp, int size) | |||
95 | if (size > max_chunk_size) | 95 | if (size > max_chunk_size) |
96 | return 0; | 96 | return 0; |
97 | 97 | ||
98 | i = 0; | ||
99 | |||
100 | size = max(size, 1 << ALLOC_MIN_SHIFT); | 98 | size = max(size, 1 << ALLOC_MIN_SHIFT); |
101 | s = roundup_pow_of_two(size); | 99 | i = fls(size - 1); |
102 | 100 | s = 1 << i; | |
103 | j = i; | 101 | j = i -= ALLOC_MIN_SHIFT; |
104 | 102 | ||
105 | spin_lock_irqsave(&poolp->lock, flags); | 103 | spin_lock_irqsave(&poolp->lock, flags); |
106 | while (!h[j].next) { | 104 | while (!h[j].next) { |
@@ -153,10 +151,10 @@ void gen_pool_free(struct gen_pool *poolp, unsigned long ptr, int size) | |||
153 | if (size > max_chunk_size) | 151 | if (size > max_chunk_size) |
154 | return; | 152 | return; |
155 | 153 | ||
156 | i = 0; | ||
157 | |||
158 | size = max(size, 1 << ALLOC_MIN_SHIFT); | 154 | size = max(size, 1 << ALLOC_MIN_SHIFT); |
159 | s = roundup_pow_of_two(size); | 155 | i = fls(size - 1); |
156 | s = 1 << i; | ||
157 | i -= ALLOC_MIN_SHIFT; | ||
160 | 158 | ||
161 | a = ptr; | 159 | a = ptr; |
162 | 160 | ||
@@ -6,20 +6,20 @@ | |||
6 | * Modified by George Anzinger to reuse immediately and to use | 6 | * Modified by George Anzinger to reuse immediately and to use |
7 | * find bit instructions. Also removed _irq on spinlocks. | 7 | * find bit instructions. Also removed _irq on spinlocks. |
8 | * | 8 | * |
9 | * Small id to pointer translation service. | 9 | * Small id to pointer translation service. |
10 | * | 10 | * |
11 | * It uses a radix tree like structure as a sparse array indexed | 11 | * It uses a radix tree like structure as a sparse array indexed |
12 | * by the id to obtain the pointer. The bitmap makes allocating | 12 | * by the id to obtain the pointer. The bitmap makes allocating |
13 | * a new id quick. | 13 | * a new id quick. |
14 | * | 14 | * |
15 | * You call it to allocate an id (an int) an associate with that id a | 15 | * You call it to allocate an id (an int) an associate with that id a |
16 | * pointer or what ever, we treat it as a (void *). You can pass this | 16 | * pointer or what ever, we treat it as a (void *). You can pass this |
17 | * id to a user for him to pass back at a later time. You then pass | 17 | * id to a user for him to pass back at a later time. You then pass |
18 | * that id to this code and it returns your pointer. | 18 | * that id to this code and it returns your pointer. |
19 | 19 | ||
20 | * You can release ids at any time. When all ids are released, most of | 20 | * You can release ids at any time. When all ids are released, most of |
21 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we | 21 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we |
22 | * don't need to go to the memory "store" during an id allocate, just | 22 | * don't need to go to the memory "store" during an id allocate, just |
23 | * so you don't need to be too concerned about locking and conflicts | 23 | * so you don't need to be too concerned about locking and conflicts |
24 | * with the slab allocator. | 24 | * with the slab allocator. |
25 | */ | 25 | */ |
@@ -72,12 +72,12 @@ static void free_layer(struct idr *idp, struct idr_layer *p) | |||
72 | * If the system is REALLY out of memory this function returns 0, | 72 | * If the system is REALLY out of memory this function returns 0, |
73 | * otherwise 1. | 73 | * otherwise 1. |
74 | */ | 74 | */ |
75 | int idr_pre_get(struct idr *idp, unsigned gfp_mask) | 75 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
76 | { | 76 | { |
77 | while (idp->id_free_cnt < IDR_FREE_MAX) { | 77 | while (idp->id_free_cnt < IDR_FREE_MAX) { |
78 | struct idr_layer *new; | 78 | struct idr_layer *new; |
79 | new = kmem_cache_alloc(idr_layer_cache, gfp_mask); | 79 | new = kmem_cache_alloc(idr_layer_cache, gfp_mask); |
80 | if(new == NULL) | 80 | if (new == NULL) |
81 | return (0); | 81 | return (0); |
82 | free_layer(idp, new); | 82 | free_layer(idp, new); |
83 | } | 83 | } |
@@ -107,7 +107,7 @@ static int sub_alloc(struct idr *idp, void *ptr, int *starting_id) | |||
107 | if (m == IDR_SIZE) { | 107 | if (m == IDR_SIZE) { |
108 | /* no space available go back to previous layer. */ | 108 | /* no space available go back to previous layer. */ |
109 | l++; | 109 | l++; |
110 | id = (id | ((1 << (IDR_BITS*l))-1)) + 1; | 110 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
111 | if (!(p = pa[l])) { | 111 | if (!(p = pa[l])) { |
112 | *starting_id = id; | 112 | *starting_id = id; |
113 | return -2; | 113 | return -2; |
@@ -161,7 +161,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | |||
161 | { | 161 | { |
162 | struct idr_layer *p, *new; | 162 | struct idr_layer *p, *new; |
163 | int layers, v, id; | 163 | int layers, v, id; |
164 | 164 | ||
165 | id = starting_id; | 165 | id = starting_id; |
166 | build_up: | 166 | build_up: |
167 | p = idp->top; | 167 | p = idp->top; |
@@ -225,6 +225,7 @@ build_up: | |||
225 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | 225 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
226 | { | 226 | { |
227 | int rv; | 227 | int rv; |
228 | |||
228 | rv = idr_get_new_above_int(idp, ptr, starting_id); | 229 | rv = idr_get_new_above_int(idp, ptr, starting_id); |
229 | /* | 230 | /* |
230 | * This is a cheap hack until the IDR code can be fixed to | 231 | * This is a cheap hack until the IDR code can be fixed to |
@@ -259,6 +260,7 @@ EXPORT_SYMBOL(idr_get_new_above); | |||
259 | int idr_get_new(struct idr *idp, void *ptr, int *id) | 260 | int idr_get_new(struct idr *idp, void *ptr, int *id) |
260 | { | 261 | { |
261 | int rv; | 262 | int rv; |
263 | |||
262 | rv = idr_get_new_above_int(idp, ptr, 0); | 264 | rv = idr_get_new_above_int(idp, ptr, 0); |
263 | /* | 265 | /* |
264 | * This is a cheap hack until the IDR code can be fixed to | 266 | * This is a cheap hack until the IDR code can be fixed to |
@@ -306,11 +308,10 @@ static void sub_remove(struct idr *idp, int shift, int id) | |||
306 | free_layer(idp, **paa); | 308 | free_layer(idp, **paa); |
307 | **paa-- = NULL; | 309 | **paa-- = NULL; |
308 | } | 310 | } |
309 | if ( ! *paa ) | 311 | if (!*paa) |
310 | idp->layers = 0; | 312 | idp->layers = 0; |
311 | } else { | 313 | } else |
312 | idr_remove_warning(id); | 314 | idr_remove_warning(id); |
313 | } | ||
314 | } | 315 | } |
315 | 316 | ||
316 | /** | 317 | /** |
@@ -326,9 +327,8 @@ void idr_remove(struct idr *idp, int id) | |||
326 | id &= MAX_ID_MASK; | 327 | id &= MAX_ID_MASK; |
327 | 328 | ||
328 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | 329 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
329 | if ( idp->top && idp->top->count == 1 && | 330 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
330 | (idp->layers > 1) && | 331 | idp->top->ary[0]) { // We can drop a layer |
331 | idp->top->ary[0]){ // We can drop a layer | ||
332 | 332 | ||
333 | p = idp->top->ary[0]; | 333 | p = idp->top->ary[0]; |
334 | idp->top->bitmap = idp->top->count = 0; | 334 | idp->top->bitmap = idp->top->count = 0; |
@@ -337,7 +337,6 @@ void idr_remove(struct idr *idp, int id) | |||
337 | --idp->layers; | 337 | --idp->layers; |
338 | } | 338 | } |
339 | while (idp->id_free_cnt >= IDR_FREE_MAX) { | 339 | while (idp->id_free_cnt >= IDR_FREE_MAX) { |
340 | |||
341 | p = alloc_layer(idp); | 340 | p = alloc_layer(idp); |
342 | kmem_cache_free(idr_layer_cache, p); | 341 | kmem_cache_free(idr_layer_cache, p); |
343 | return; | 342 | return; |
@@ -346,6 +345,19 @@ void idr_remove(struct idr *idp, int id) | |||
346 | EXPORT_SYMBOL(idr_remove); | 345 | EXPORT_SYMBOL(idr_remove); |
347 | 346 | ||
348 | /** | 347 | /** |
348 | * idr_destroy - release all cached layers within an idr tree | ||
349 | * idp: idr handle | ||
350 | */ | ||
351 | void idr_destroy(struct idr *idp) | ||
352 | { | ||
353 | while (idp->id_free_cnt) { | ||
354 | struct idr_layer *p = alloc_layer(idp); | ||
355 | kmem_cache_free(idr_layer_cache, p); | ||
356 | } | ||
357 | } | ||
358 | EXPORT_SYMBOL(idr_destroy); | ||
359 | |||
360 | /** | ||
349 | * idr_find - return pointer for given id | 361 | * idr_find - return pointer for given id |
350 | * @idp: idr handle | 362 | * @idp: idr handle |
351 | * @id: lookup key | 363 | * @id: lookup key |
@@ -378,8 +390,8 @@ void *idr_find(struct idr *idp, int id) | |||
378 | } | 390 | } |
379 | EXPORT_SYMBOL(idr_find); | 391 | EXPORT_SYMBOL(idr_find); |
380 | 392 | ||
381 | static void idr_cache_ctor(void * idr_layer, | 393 | static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache, |
382 | kmem_cache_t *idr_layer_cache, unsigned long flags) | 394 | unsigned long flags) |
383 | { | 395 | { |
384 | memset(idr_layer, 0, sizeof(struct idr_layer)); | 396 | memset(idr_layer, 0, sizeof(struct idr_layer)); |
385 | } | 397 | } |
@@ -387,7 +399,7 @@ static void idr_cache_ctor(void * idr_layer, | |||
387 | static int init_id_cache(void) | 399 | static int init_id_cache(void) |
388 | { | 400 | { |
389 | if (!idr_layer_cache) | 401 | if (!idr_layer_cache) |
390 | idr_layer_cache = kmem_cache_create("idr_layer_cache", | 402 | idr_layer_cache = kmem_cache_create("idr_layer_cache", |
391 | sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL); | 403 | sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL); |
392 | return 0; | 404 | return 0; |
393 | } | 405 | } |
diff --git a/lib/kobject.c b/lib/kobject.c index dd0917dd9fa9..a181abed89f6 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/string.h> | 14 | #include <linux/string.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/stat.h> | 16 | #include <linux/stat.h> |
17 | #include <linux/slab.h> | ||
17 | 18 | ||
18 | /** | 19 | /** |
19 | * populate_dir - populate directory with attributes. | 20 | * populate_dir - populate directory with attributes. |
@@ -100,7 +101,7 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length) | |||
100 | * @kobj: kobject in question, with which to build the path | 101 | * @kobj: kobject in question, with which to build the path |
101 | * @gfp_mask: the allocation type used to allocate the path | 102 | * @gfp_mask: the allocation type used to allocate the path |
102 | */ | 103 | */ |
103 | char *kobject_get_path(struct kobject *kobj, int gfp_mask) | 104 | char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask) |
104 | { | 105 | { |
105 | char *path; | 106 | char *path; |
106 | int len; | 107 | int len; |
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 04ca4429ddfa..3ab375411e38 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
@@ -54,7 +54,7 @@ static char *action_to_string(enum kobject_action action) | |||
54 | static struct sock *uevent_sock; | 54 | static struct sock *uevent_sock; |
55 | 55 | ||
56 | /** | 56 | /** |
57 | * send_uevent - notify userspace by sending event trough netlink socket | 57 | * send_uevent - notify userspace by sending event through netlink socket |
58 | * | 58 | * |
59 | * @signal: signal name | 59 | * @signal: signal name |
60 | * @obj: object path (kobject) | 60 | * @obj: object path (kobject) |
@@ -62,7 +62,7 @@ static struct sock *uevent_sock; | |||
62 | * @gfp_mask: | 62 | * @gfp_mask: |
63 | */ | 63 | */ |
64 | static int send_uevent(const char *signal, const char *obj, | 64 | static int send_uevent(const char *signal, const char *obj, |
65 | char **envp, int gfp_mask) | 65 | char **envp, gfp_t gfp_mask) |
66 | { | 66 | { |
67 | struct sk_buff *skb; | 67 | struct sk_buff *skb; |
68 | char *pos; | 68 | char *pos; |
@@ -98,7 +98,7 @@ static int send_uevent(const char *signal, const char *obj, | |||
98 | } | 98 | } |
99 | 99 | ||
100 | static int do_kobject_uevent(struct kobject *kobj, enum kobject_action action, | 100 | static int do_kobject_uevent(struct kobject *kobj, enum kobject_action action, |
101 | struct attribute *attr, int gfp_mask) | 101 | struct attribute *attr, gfp_t gfp_mask) |
102 | { | 102 | { |
103 | char *path; | 103 | char *path; |
104 | char *attrpath; | 104 | char *attrpath; |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 6a8bc6e06431..88511c3805ad 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -110,7 +110,7 @@ radix_tree_node_free(struct radix_tree_node *node) | |||
110 | * success, return zero, with preemption disabled. On error, return -ENOMEM | 110 | * success, return zero, with preemption disabled. On error, return -ENOMEM |
111 | * with preemption not disabled. | 111 | * with preemption not disabled. |
112 | */ | 112 | */ |
113 | int radix_tree_preload(unsigned int __nocast gfp_mask) | 113 | int radix_tree_preload(gfp_t gfp_mask) |
114 | { | 114 | { |
115 | struct radix_tree_preload *rtp; | 115 | struct radix_tree_preload *rtp; |
116 | struct radix_tree_node *node; | 116 | struct radix_tree_node *node; |
@@ -281,35 +281,60 @@ int radix_tree_insert(struct radix_tree_root *root, | |||
281 | } | 281 | } |
282 | EXPORT_SYMBOL(radix_tree_insert); | 282 | EXPORT_SYMBOL(radix_tree_insert); |
283 | 283 | ||
284 | /** | 284 | static inline void **__lookup_slot(struct radix_tree_root *root, |
285 | * radix_tree_lookup - perform lookup operation on a radix tree | 285 | unsigned long index) |
286 | * @root: radix tree root | ||
287 | * @index: index key | ||
288 | * | ||
289 | * Lookup the item at the position @index in the radix tree @root. | ||
290 | */ | ||
291 | void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) | ||
292 | { | 286 | { |
293 | unsigned int height, shift; | 287 | unsigned int height, shift; |
294 | struct radix_tree_node *slot; | 288 | struct radix_tree_node **slot; |
295 | 289 | ||
296 | height = root->height; | 290 | height = root->height; |
297 | if (index > radix_tree_maxindex(height)) | 291 | if (index > radix_tree_maxindex(height)) |
298 | return NULL; | 292 | return NULL; |
299 | 293 | ||
300 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; | 294 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; |
301 | slot = root->rnode; | 295 | slot = &root->rnode; |
302 | 296 | ||
303 | while (height > 0) { | 297 | while (height > 0) { |
304 | if (slot == NULL) | 298 | if (*slot == NULL) |
305 | return NULL; | 299 | return NULL; |
306 | 300 | ||
307 | slot = slot->slots[(index >> shift) & RADIX_TREE_MAP_MASK]; | 301 | slot = (struct radix_tree_node **) |
302 | ((*slot)->slots + | ||
303 | ((index >> shift) & RADIX_TREE_MAP_MASK)); | ||
308 | shift -= RADIX_TREE_MAP_SHIFT; | 304 | shift -= RADIX_TREE_MAP_SHIFT; |
309 | height--; | 305 | height--; |
310 | } | 306 | } |
311 | 307 | ||
312 | return slot; | 308 | return (void **)slot; |
309 | } | ||
310 | |||
311 | /** | ||
312 | * radix_tree_lookup_slot - lookup a slot in a radix tree | ||
313 | * @root: radix tree root | ||
314 | * @index: index key | ||
315 | * | ||
316 | * Lookup the slot corresponding to the position @index in the radix tree | ||
317 | * @root. This is useful for update-if-exists operations. | ||
318 | */ | ||
319 | void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | ||
320 | { | ||
321 | return __lookup_slot(root, index); | ||
322 | } | ||
323 | EXPORT_SYMBOL(radix_tree_lookup_slot); | ||
324 | |||
325 | /** | ||
326 | * radix_tree_lookup - perform lookup operation on a radix tree | ||
327 | * @root: radix tree root | ||
328 | * @index: index key | ||
329 | * | ||
330 | * Lookup the item at the position @index in the radix tree @root. | ||
331 | */ | ||
332 | void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) | ||
333 | { | ||
334 | void **slot; | ||
335 | |||
336 | slot = __lookup_slot(root, index); | ||
337 | return slot != NULL ? *slot : NULL; | ||
313 | } | 338 | } |
314 | EXPORT_SYMBOL(radix_tree_lookup); | 339 | EXPORT_SYMBOL(radix_tree_lookup); |
315 | 340 | ||
diff --git a/lib/reed_solomon/Makefile b/lib/reed_solomon/Makefile index 747a2de29346..c3d7136827ed 100644 --- a/lib/reed_solomon/Makefile +++ b/lib/reed_solomon/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | # | 1 | # |
2 | # This is a modified version of reed solomon lib, | 2 | # This is a modified version of reed solomon lib, |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_REED_SOLOMON) += reed_solomon.o | 5 | obj-$(CONFIG_REED_SOLOMON) += reed_solomon.o |
diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c index d401decd6289..a58df56f09b6 100644 --- a/lib/reed_solomon/decode_rs.c +++ b/lib/reed_solomon/decode_rs.c | |||
@@ -1,22 +1,22 @@ | |||
1 | /* | 1 | /* |
2 | * lib/reed_solomon/decode_rs.c | 2 | * lib/reed_solomon/decode_rs.c |
3 | * | 3 | * |
4 | * Overview: | 4 | * Overview: |
5 | * Generic Reed Solomon encoder / decoder library | 5 | * Generic Reed Solomon encoder / decoder library |
6 | * | 6 | * |
7 | * Copyright 2002, Phil Karn, KA9Q | 7 | * Copyright 2002, Phil Karn, KA9Q |
8 | * May be used under the terms of the GNU General Public License (GPL) | 8 | * May be used under the terms of the GNU General Public License (GPL) |
9 | * | 9 | * |
10 | * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de) | 10 | * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de) |
11 | * | 11 | * |
12 | * $Id: decode_rs.c,v 1.6 2004/10/22 15:41:47 gleixner Exp $ | 12 | * $Id: decode_rs.c,v 1.7 2005/11/07 11:14:59 gleixner Exp $ |
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
16 | /* Generic data width independent code which is included by the | 16 | /* Generic data width independent code which is included by the |
17 | * wrappers. | 17 | * wrappers. |
18 | */ | 18 | */ |
19 | { | 19 | { |
20 | int deg_lambda, el, deg_omega; | 20 | int deg_lambda, el, deg_omega; |
21 | int i, j, r, k, pad; | 21 | int i, j, r, k, pad; |
22 | int nn = rs->nn; | 22 | int nn = rs->nn; |
@@ -41,9 +41,9 @@ | |||
41 | pad = nn - nroots - len; | 41 | pad = nn - nroots - len; |
42 | if (pad < 0 || pad >= nn) | 42 | if (pad < 0 || pad >= nn) |
43 | return -ERANGE; | 43 | return -ERANGE; |
44 | 44 | ||
45 | /* Does the caller provide the syndrome ? */ | 45 | /* Does the caller provide the syndrome ? */ |
46 | if (s != NULL) | 46 | if (s != NULL) |
47 | goto decode; | 47 | goto decode; |
48 | 48 | ||
49 | /* form the syndromes; i.e., evaluate data(x) at roots of | 49 | /* form the syndromes; i.e., evaluate data(x) at roots of |
@@ -54,11 +54,11 @@ | |||
54 | for (j = 1; j < len; j++) { | 54 | for (j = 1; j < len; j++) { |
55 | for (i = 0; i < nroots; i++) { | 55 | for (i = 0; i < nroots; i++) { |
56 | if (syn[i] == 0) { | 56 | if (syn[i] == 0) { |
57 | syn[i] = (((uint16_t) data[j]) ^ | 57 | syn[i] = (((uint16_t) data[j]) ^ |
58 | invmsk) & msk; | 58 | invmsk) & msk; |
59 | } else { | 59 | } else { |
60 | syn[i] = ((((uint16_t) data[j]) ^ | 60 | syn[i] = ((((uint16_t) data[j]) ^ |
61 | invmsk) & msk) ^ | 61 | invmsk) & msk) ^ |
62 | alpha_to[rs_modnn(rs, index_of[syn[i]] + | 62 | alpha_to[rs_modnn(rs, index_of[syn[i]] + |
63 | (fcr + i) * prim)]; | 63 | (fcr + i) * prim)]; |
64 | } | 64 | } |
@@ -70,7 +70,7 @@ | |||
70 | if (syn[i] == 0) { | 70 | if (syn[i] == 0) { |
71 | syn[i] = ((uint16_t) par[j]) & msk; | 71 | syn[i] = ((uint16_t) par[j]) & msk; |
72 | } else { | 72 | } else { |
73 | syn[i] = (((uint16_t) par[j]) & msk) ^ | 73 | syn[i] = (((uint16_t) par[j]) & msk) ^ |
74 | alpha_to[rs_modnn(rs, index_of[syn[i]] + | 74 | alpha_to[rs_modnn(rs, index_of[syn[i]] + |
75 | (fcr+i)*prim)]; | 75 | (fcr+i)*prim)]; |
76 | } | 76 | } |
@@ -99,14 +99,14 @@ | |||
99 | 99 | ||
100 | if (no_eras > 0) { | 100 | if (no_eras > 0) { |
101 | /* Init lambda to be the erasure locator polynomial */ | 101 | /* Init lambda to be the erasure locator polynomial */ |
102 | lambda[1] = alpha_to[rs_modnn(rs, | 102 | lambda[1] = alpha_to[rs_modnn(rs, |
103 | prim * (nn - 1 - eras_pos[0]))]; | 103 | prim * (nn - 1 - eras_pos[0]))]; |
104 | for (i = 1; i < no_eras; i++) { | 104 | for (i = 1; i < no_eras; i++) { |
105 | u = rs_modnn(rs, prim * (nn - 1 - eras_pos[i])); | 105 | u = rs_modnn(rs, prim * (nn - 1 - eras_pos[i])); |
106 | for (j = i + 1; j > 0; j--) { | 106 | for (j = i + 1; j > 0; j--) { |
107 | tmp = index_of[lambda[j - 1]]; | 107 | tmp = index_of[lambda[j - 1]]; |
108 | if (tmp != nn) { | 108 | if (tmp != nn) { |
109 | lambda[j] ^= | 109 | lambda[j] ^= |
110 | alpha_to[rs_modnn(rs, u + tmp)]; | 110 | alpha_to[rs_modnn(rs, u + tmp)]; |
111 | } | 111 | } |
112 | } | 112 | } |
@@ -127,8 +127,8 @@ | |||
127 | discr_r = 0; | 127 | discr_r = 0; |
128 | for (i = 0; i < r; i++) { | 128 | for (i = 0; i < r; i++) { |
129 | if ((lambda[i] != 0) && (s[r - i - 1] != nn)) { | 129 | if ((lambda[i] != 0) && (s[r - i - 1] != nn)) { |
130 | discr_r ^= | 130 | discr_r ^= |
131 | alpha_to[rs_modnn(rs, | 131 | alpha_to[rs_modnn(rs, |
132 | index_of[lambda[i]] + | 132 | index_of[lambda[i]] + |
133 | s[r - i - 1])]; | 133 | s[r - i - 1])]; |
134 | } | 134 | } |
@@ -143,7 +143,7 @@ | |||
143 | t[0] = lambda[0]; | 143 | t[0] = lambda[0]; |
144 | for (i = 0; i < nroots; i++) { | 144 | for (i = 0; i < nroots; i++) { |
145 | if (b[i] != nn) { | 145 | if (b[i] != nn) { |
146 | t[i + 1] = lambda[i + 1] ^ | 146 | t[i + 1] = lambda[i + 1] ^ |
147 | alpha_to[rs_modnn(rs, discr_r + | 147 | alpha_to[rs_modnn(rs, discr_r + |
148 | b[i])]; | 148 | b[i])]; |
149 | } else | 149 | } else |
@@ -229,7 +229,7 @@ | |||
229 | num1 = 0; | 229 | num1 = 0; |
230 | for (i = deg_omega; i >= 0; i--) { | 230 | for (i = deg_omega; i >= 0; i--) { |
231 | if (omega[i] != nn) | 231 | if (omega[i] != nn) |
232 | num1 ^= alpha_to[rs_modnn(rs, omega[i] + | 232 | num1 ^= alpha_to[rs_modnn(rs, omega[i] + |
233 | i * root[j])]; | 233 | i * root[j])]; |
234 | } | 234 | } |
235 | num2 = alpha_to[rs_modnn(rs, root[j] * (fcr - 1) + nn)]; | 235 | num2 = alpha_to[rs_modnn(rs, root[j] * (fcr - 1) + nn)]; |
@@ -239,13 +239,13 @@ | |||
239 | * lambda_pr of lambda[i] */ | 239 | * lambda_pr of lambda[i] */ |
240 | for (i = min(deg_lambda, nroots - 1) & ~1; i >= 0; i -= 2) { | 240 | for (i = min(deg_lambda, nroots - 1) & ~1; i >= 0; i -= 2) { |
241 | if (lambda[i + 1] != nn) { | 241 | if (lambda[i + 1] != nn) { |
242 | den ^= alpha_to[rs_modnn(rs, lambda[i + 1] + | 242 | den ^= alpha_to[rs_modnn(rs, lambda[i + 1] + |
243 | i * root[j])]; | 243 | i * root[j])]; |
244 | } | 244 | } |
245 | } | 245 | } |
246 | /* Apply error to data */ | 246 | /* Apply error to data */ |
247 | if (num1 != 0 && loc[j] >= pad) { | 247 | if (num1 != 0 && loc[j] >= pad) { |
248 | uint16_t cor = alpha_to[rs_modnn(rs,index_of[num1] + | 248 | uint16_t cor = alpha_to[rs_modnn(rs,index_of[num1] + |
249 | index_of[num2] + | 249 | index_of[num2] + |
250 | nn - index_of[den])]; | 250 | nn - index_of[den])]; |
251 | /* Store the error correction pattern, if a | 251 | /* Store the error correction pattern, if a |
diff --git a/lib/reed_solomon/encode_rs.c b/lib/reed_solomon/encode_rs.c index 237bf65ae886..0b5b1a6728ec 100644 --- a/lib/reed_solomon/encode_rs.c +++ b/lib/reed_solomon/encode_rs.c | |||
@@ -1,19 +1,19 @@ | |||
1 | /* | 1 | /* |
2 | * lib/reed_solomon/encode_rs.c | 2 | * lib/reed_solomon/encode_rs.c |
3 | * | 3 | * |
4 | * Overview: | 4 | * Overview: |
5 | * Generic Reed Solomon encoder / decoder library | 5 | * Generic Reed Solomon encoder / decoder library |
6 | * | 6 | * |
7 | * Copyright 2002, Phil Karn, KA9Q | 7 | * Copyright 2002, Phil Karn, KA9Q |
8 | * May be used under the terms of the GNU General Public License (GPL) | 8 | * May be used under the terms of the GNU General Public License (GPL) |
9 | * | 9 | * |
10 | * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de) | 10 | * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de) |
11 | * | 11 | * |
12 | * $Id: encode_rs.c,v 1.4 2004/10/22 15:41:47 gleixner Exp $ | 12 | * $Id: encode_rs.c,v 1.5 2005/11/07 11:14:59 gleixner Exp $ |
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
16 | /* Generic data width independent code which is included by the | 16 | /* Generic data width independent code which is included by the |
17 | * wrappers. | 17 | * wrappers. |
18 | * int encode_rsX (struct rs_control *rs, uintX_t *data, int len, uintY_t *par) | 18 | * int encode_rsX (struct rs_control *rs, uintX_t *data, int len, uintY_t *par) |
19 | */ | 19 | */ |
@@ -35,16 +35,16 @@ | |||
35 | for (i = 0; i < len; i++) { | 35 | for (i = 0; i < len; i++) { |
36 | fb = index_of[((((uint16_t) data[i])^invmsk) & msk) ^ par[0]]; | 36 | fb = index_of[((((uint16_t) data[i])^invmsk) & msk) ^ par[0]]; |
37 | /* feedback term is non-zero */ | 37 | /* feedback term is non-zero */ |
38 | if (fb != nn) { | 38 | if (fb != nn) { |
39 | for (j = 1; j < nroots; j++) { | 39 | for (j = 1; j < nroots; j++) { |
40 | par[j] ^= alpha_to[rs_modnn(rs, fb + | 40 | par[j] ^= alpha_to[rs_modnn(rs, fb + |
41 | genpoly[nroots - j])]; | 41 | genpoly[nroots - j])]; |
42 | } | 42 | } |
43 | } | 43 | } |
44 | /* Shift */ | 44 | /* Shift */ |
45 | memmove(&par[0], &par[1], sizeof(uint16_t) * (nroots - 1)); | 45 | memmove(&par[0], &par[1], sizeof(uint16_t) * (nroots - 1)); |
46 | if (fb != nn) { | 46 | if (fb != nn) { |
47 | par[nroots - 1] = alpha_to[rs_modnn(rs, | 47 | par[nroots - 1] = alpha_to[rs_modnn(rs, |
48 | fb + genpoly[0])]; | 48 | fb + genpoly[0])]; |
49 | } else { | 49 | } else { |
50 | par[nroots - 1] = 0; | 50 | par[nroots - 1] = 0; |
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c index 6604e3b1940c..f5fef948a415 100644 --- a/lib/reed_solomon/reed_solomon.c +++ b/lib/reed_solomon/reed_solomon.c | |||
@@ -1,22 +1,22 @@ | |||
1 | /* | 1 | /* |
2 | * lib/reed_solomon/rslib.c | 2 | * lib/reed_solomon/rslib.c |
3 | * | 3 | * |
4 | * Overview: | 4 | * Overview: |
5 | * Generic Reed Solomon encoder / decoder library | 5 | * Generic Reed Solomon encoder / decoder library |
6 | * | 6 | * |
7 | * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de) | 7 | * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de) |
8 | * | 8 | * |
9 | * Reed Solomon code lifted from reed solomon library written by Phil Karn | 9 | * Reed Solomon code lifted from reed solomon library written by Phil Karn |
10 | * Copyright 2002 Phil Karn, KA9Q | 10 | * Copyright 2002 Phil Karn, KA9Q |
11 | * | 11 | * |
12 | * $Id: rslib.c,v 1.5 2004/10/22 15:41:47 gleixner Exp $ | 12 | * $Id: rslib.c,v 1.7 2005/11/07 11:14:59 gleixner Exp $ |
13 | * | 13 | * |
14 | * This program is free software; you can redistribute it and/or modify | 14 | * This program is free software; you can redistribute it and/or modify |
15 | * it under the terms of the GNU General Public License version 2 as | 15 | * it under the terms of the GNU General Public License version 2 as |
16 | * published by the Free Software Foundation. | 16 | * published by the Free Software Foundation. |
17 | * | 17 | * |
18 | * Description: | 18 | * Description: |
19 | * | 19 | * |
20 | * The generic Reed Solomon library provides runtime configurable | 20 | * The generic Reed Solomon library provides runtime configurable |
21 | * encoding / decoding of RS codes. | 21 | * encoding / decoding of RS codes. |
22 | * Each user must call init_rs to get a pointer to a rs_control | 22 | * Each user must call init_rs to get a pointer to a rs_control |
@@ -25,11 +25,11 @@ | |||
25 | * If a structure is generated then the polynomial arrays for | 25 | * If a structure is generated then the polynomial arrays for |
26 | * fast encoding / decoding are built. This can take some time so | 26 | * fast encoding / decoding are built. This can take some time so |
27 | * make sure not to call this function from a time critical path. | 27 | * make sure not to call this function from a time critical path. |
28 | * Usually a module / driver should initialize the necessary | 28 | * Usually a module / driver should initialize the necessary |
29 | * rs_control structure on module / driver init and release it | 29 | * rs_control structure on module / driver init and release it |
30 | * on exit. | 30 | * on exit. |
31 | * The encoding puts the calculated syndrome into a given syndrome | 31 | * The encoding puts the calculated syndrome into a given syndrome |
32 | * buffer. | 32 | * buffer. |
33 | * The decoding is a two step process. The first step calculates | 33 | * The decoding is a two step process. The first step calculates |
34 | * the syndrome over the received (data + syndrome) and calls the | 34 | * the syndrome over the received (data + syndrome) and calls the |
35 | * second stage, which does the decoding / error correction itself. | 35 | * second stage, which does the decoding / error correction itself. |
@@ -51,7 +51,7 @@ static LIST_HEAD (rslist); | |||
51 | /* Protection for the list */ | 51 | /* Protection for the list */ |
52 | static DECLARE_MUTEX(rslistlock); | 52 | static DECLARE_MUTEX(rslistlock); |
53 | 53 | ||
54 | /** | 54 | /** |
55 | * rs_init - Initialize a Reed-Solomon codec | 55 | * rs_init - Initialize a Reed-Solomon codec |
56 | * | 56 | * |
57 | * @symsize: symbol size, bits (1-8) | 57 | * @symsize: symbol size, bits (1-8) |
@@ -63,7 +63,7 @@ static DECLARE_MUTEX(rslistlock); | |||
63 | * Allocate a control structure and the polynom arrays for faster | 63 | * Allocate a control structure and the polynom arrays for faster |
64 | * en/decoding. Fill the arrays according to the given parameters | 64 | * en/decoding. Fill the arrays according to the given parameters |
65 | */ | 65 | */ |
66 | static struct rs_control *rs_init(int symsize, int gfpoly, int fcr, | 66 | static struct rs_control *rs_init(int symsize, int gfpoly, int fcr, |
67 | int prim, int nroots) | 67 | int prim, int nroots) |
68 | { | 68 | { |
69 | struct rs_control *rs; | 69 | struct rs_control *rs; |
@@ -124,15 +124,15 @@ static struct rs_control *rs_init(int symsize, int gfpoly, int fcr, | |||
124 | /* Multiply rs->genpoly[] by @**(root + x) */ | 124 | /* Multiply rs->genpoly[] by @**(root + x) */ |
125 | for (j = i; j > 0; j--) { | 125 | for (j = i; j > 0; j--) { |
126 | if (rs->genpoly[j] != 0) { | 126 | if (rs->genpoly[j] != 0) { |
127 | rs->genpoly[j] = rs->genpoly[j -1] ^ | 127 | rs->genpoly[j] = rs->genpoly[j -1] ^ |
128 | rs->alpha_to[rs_modnn(rs, | 128 | rs->alpha_to[rs_modnn(rs, |
129 | rs->index_of[rs->genpoly[j]] + root)]; | 129 | rs->index_of[rs->genpoly[j]] + root)]; |
130 | } else | 130 | } else |
131 | rs->genpoly[j] = rs->genpoly[j - 1]; | 131 | rs->genpoly[j] = rs->genpoly[j - 1]; |
132 | } | 132 | } |
133 | /* rs->genpoly[0] can never be zero */ | 133 | /* rs->genpoly[0] can never be zero */ |
134 | rs->genpoly[0] = | 134 | rs->genpoly[0] = |
135 | rs->alpha_to[rs_modnn(rs, | 135 | rs->alpha_to[rs_modnn(rs, |
136 | rs->index_of[rs->genpoly[0]] + root)]; | 136 | rs->index_of[rs->genpoly[0]] + root)]; |
137 | } | 137 | } |
138 | /* convert rs->genpoly[] to index form for quicker encoding */ | 138 | /* convert rs->genpoly[] to index form for quicker encoding */ |
@@ -153,7 +153,7 @@ errrs: | |||
153 | } | 153 | } |
154 | 154 | ||
155 | 155 | ||
156 | /** | 156 | /** |
157 | * free_rs - Free the rs control structure, if its not longer used | 157 | * free_rs - Free the rs control structure, if its not longer used |
158 | * | 158 | * |
159 | * @rs: the control structure which is not longer used by the | 159 | * @rs: the control structure which is not longer used by the |
@@ -173,19 +173,19 @@ void free_rs(struct rs_control *rs) | |||
173 | up(&rslistlock); | 173 | up(&rslistlock); |
174 | } | 174 | } |
175 | 175 | ||
176 | /** | 176 | /** |
177 | * init_rs - Find a matching or allocate a new rs control structure | 177 | * init_rs - Find a matching or allocate a new rs control structure |
178 | * | 178 | * |
179 | * @symsize: the symbol size (number of bits) | 179 | * @symsize: the symbol size (number of bits) |
180 | * @gfpoly: the extended Galois field generator polynomial coefficients, | 180 | * @gfpoly: the extended Galois field generator polynomial coefficients, |
181 | * with the 0th coefficient in the low order bit. The polynomial | 181 | * with the 0th coefficient in the low order bit. The polynomial |
182 | * must be primitive; | 182 | * must be primitive; |
183 | * @fcr: the first consecutive root of the rs code generator polynomial | 183 | * @fcr: the first consecutive root of the rs code generator polynomial |
184 | * in index form | 184 | * in index form |
185 | * @prim: primitive element to generate polynomial roots | 185 | * @prim: primitive element to generate polynomial roots |
186 | * @nroots: RS code generator polynomial degree (number of roots) | 186 | * @nroots: RS code generator polynomial degree (number of roots) |
187 | */ | 187 | */ |
188 | struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, | 188 | struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, |
189 | int nroots) | 189 | int nroots) |
190 | { | 190 | { |
191 | struct list_head *tmp; | 191 | struct list_head *tmp; |
@@ -198,9 +198,9 @@ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, | |||
198 | return NULL; | 198 | return NULL; |
199 | if (prim <= 0 || prim >= (1<<symsize)) | 199 | if (prim <= 0 || prim >= (1<<symsize)) |
200 | return NULL; | 200 | return NULL; |
201 | if (nroots < 0 || nroots >= (1<<symsize) || nroots > 8) | 201 | if (nroots < 0 || nroots >= (1<<symsize)) |
202 | return NULL; | 202 | return NULL; |
203 | 203 | ||
204 | down(&rslistlock); | 204 | down(&rslistlock); |
205 | 205 | ||
206 | /* Walk through the list and look for a matching entry */ | 206 | /* Walk through the list and look for a matching entry */ |
@@ -211,9 +211,9 @@ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, | |||
211 | if (gfpoly != rs->gfpoly) | 211 | if (gfpoly != rs->gfpoly) |
212 | continue; | 212 | continue; |
213 | if (fcr != rs->fcr) | 213 | if (fcr != rs->fcr) |
214 | continue; | 214 | continue; |
215 | if (prim != rs->prim) | 215 | if (prim != rs->prim) |
216 | continue; | 216 | continue; |
217 | if (nroots != rs->nroots) | 217 | if (nroots != rs->nroots) |
218 | continue; | 218 | continue; |
219 | /* We have a matching one already */ | 219 | /* We have a matching one already */ |
@@ -227,18 +227,18 @@ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, | |||
227 | rs->users = 1; | 227 | rs->users = 1; |
228 | list_add(&rs->list, &rslist); | 228 | list_add(&rs->list, &rslist); |
229 | } | 229 | } |
230 | out: | 230 | out: |
231 | up(&rslistlock); | 231 | up(&rslistlock); |
232 | return rs; | 232 | return rs; |
233 | } | 233 | } |
234 | 234 | ||
235 | #ifdef CONFIG_REED_SOLOMON_ENC8 | 235 | #ifdef CONFIG_REED_SOLOMON_ENC8 |
236 | /** | 236 | /** |
237 | * encode_rs8 - Calculate the parity for data values (8bit data width) | 237 | * encode_rs8 - Calculate the parity for data values (8bit data width) |
238 | * | 238 | * |
239 | * @rs: the rs control structure | 239 | * @rs: the rs control structure |
240 | * @data: data field of a given type | 240 | * @data: data field of a given type |
241 | * @len: data length | 241 | * @len: data length |
242 | * @par: parity data, must be initialized by caller (usually all 0) | 242 | * @par: parity data, must be initialized by caller (usually all 0) |
243 | * @invmsk: invert data mask (will be xored on data) | 243 | * @invmsk: invert data mask (will be xored on data) |
244 | * | 244 | * |
@@ -246,7 +246,7 @@ out: | |||
246 | * symbol size > 8. The calling code must take care of encoding of the | 246 | * symbol size > 8. The calling code must take care of encoding of the |
247 | * syndrome result for storage itself. | 247 | * syndrome result for storage itself. |
248 | */ | 248 | */ |
249 | int encode_rs8(struct rs_control *rs, uint8_t *data, int len, uint16_t *par, | 249 | int encode_rs8(struct rs_control *rs, uint8_t *data, int len, uint16_t *par, |
250 | uint16_t invmsk) | 250 | uint16_t invmsk) |
251 | { | 251 | { |
252 | #include "encode_rs.c" | 252 | #include "encode_rs.c" |
@@ -255,7 +255,7 @@ EXPORT_SYMBOL_GPL(encode_rs8); | |||
255 | #endif | 255 | #endif |
256 | 256 | ||
257 | #ifdef CONFIG_REED_SOLOMON_DEC8 | 257 | #ifdef CONFIG_REED_SOLOMON_DEC8 |
258 | /** | 258 | /** |
259 | * decode_rs8 - Decode codeword (8bit data width) | 259 | * decode_rs8 - Decode codeword (8bit data width) |
260 | * | 260 | * |
261 | * @rs: the rs control structure | 261 | * @rs: the rs control structure |
@@ -273,7 +273,7 @@ EXPORT_SYMBOL_GPL(encode_rs8); | |||
273 | * syndrome result and the received parity before calling this code. | 273 | * syndrome result and the received parity before calling this code. |
274 | */ | 274 | */ |
275 | int decode_rs8(struct rs_control *rs, uint8_t *data, uint16_t *par, int len, | 275 | int decode_rs8(struct rs_control *rs, uint8_t *data, uint16_t *par, int len, |
276 | uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, | 276 | uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, |
277 | uint16_t *corr) | 277 | uint16_t *corr) |
278 | { | 278 | { |
279 | #include "decode_rs.c" | 279 | #include "decode_rs.c" |
@@ -287,13 +287,13 @@ EXPORT_SYMBOL_GPL(decode_rs8); | |||
287 | * | 287 | * |
288 | * @rs: the rs control structure | 288 | * @rs: the rs control structure |
289 | * @data: data field of a given type | 289 | * @data: data field of a given type |
290 | * @len: data length | 290 | * @len: data length |
291 | * @par: parity data, must be initialized by caller (usually all 0) | 291 | * @par: parity data, must be initialized by caller (usually all 0) |
292 | * @invmsk: invert data mask (will be xored on data, not on parity!) | 292 | * @invmsk: invert data mask (will be xored on data, not on parity!) |
293 | * | 293 | * |
294 | * Each field in the data array contains up to symbol size bits of valid data. | 294 | * Each field in the data array contains up to symbol size bits of valid data. |
295 | */ | 295 | */ |
296 | int encode_rs16(struct rs_control *rs, uint16_t *data, int len, uint16_t *par, | 296 | int encode_rs16(struct rs_control *rs, uint16_t *data, int len, uint16_t *par, |
297 | uint16_t invmsk) | 297 | uint16_t invmsk) |
298 | { | 298 | { |
299 | #include "encode_rs.c" | 299 | #include "encode_rs.c" |
@@ -302,7 +302,7 @@ EXPORT_SYMBOL_GPL(encode_rs16); | |||
302 | #endif | 302 | #endif |
303 | 303 | ||
304 | #ifdef CONFIG_REED_SOLOMON_DEC16 | 304 | #ifdef CONFIG_REED_SOLOMON_DEC16 |
305 | /** | 305 | /** |
306 | * decode_rs16 - Decode codeword (16bit data width) | 306 | * decode_rs16 - Decode codeword (16bit data width) |
307 | * | 307 | * |
308 | * @rs: the rs control structure | 308 | * @rs: the rs control structure |
@@ -312,13 +312,13 @@ EXPORT_SYMBOL_GPL(encode_rs16); | |||
312 | * @s: syndrome data field (if NULL, syndrome is calculated) | 312 | * @s: syndrome data field (if NULL, syndrome is calculated) |
313 | * @no_eras: number of erasures | 313 | * @no_eras: number of erasures |
314 | * @eras_pos: position of erasures, can be NULL | 314 | * @eras_pos: position of erasures, can be NULL |
315 | * @invmsk: invert data mask (will be xored on data, not on parity!) | 315 | * @invmsk: invert data mask (will be xored on data, not on parity!) |
316 | * @corr: buffer to store correction bitmask on eras_pos | 316 | * @corr: buffer to store correction bitmask on eras_pos |
317 | * | 317 | * |
318 | * Each field in the data array contains up to symbol size bits of valid data. | 318 | * Each field in the data array contains up to symbol size bits of valid data. |
319 | */ | 319 | */ |
320 | int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len, | 320 | int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len, |
321 | uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, | 321 | uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, |
322 | uint16_t *corr) | 322 | uint16_t *corr) |
323 | { | 323 | { |
324 | #include "decode_rs.c" | 324 | #include "decode_rs.c" |
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 42c08ef828c5..eddc9b3d3876 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c | |||
@@ -5,6 +5,7 @@ | |||
5 | */ | 5 | */ |
6 | #include <linux/module.h> | 6 | #include <linux/module.h> |
7 | #include <linux/kallsyms.h> | 7 | #include <linux/kallsyms.h> |
8 | #include <linux/sched.h> | ||
8 | 9 | ||
9 | unsigned int debug_smp_processor_id(void) | 10 | unsigned int debug_smp_processor_id(void) |
10 | { | 11 | { |
diff --git a/lib/sort.c b/lib/sort.c index ddc4d35df289..5f3b51ffa1dc 100644 --- a/lib/sort.c +++ b/lib/sort.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/sort.h> | 9 | #include <linux/sort.h> |
10 | #include <linux/slab.h> | ||
10 | 11 | ||
11 | static void u32_swap(void *a, void *b, int size) | 12 | static void u32_swap(void *a, void *b, int size) |
12 | { | 13 | { |
diff --git a/lib/string.c b/lib/string.c index d886ef157c12..037a48acedbb 100644 --- a/lib/string.c +++ b/lib/string.c | |||
@@ -36,11 +36,13 @@ int strnicmp(const char *s1, const char *s2, size_t len) | |||
36 | /* Yes, Virginia, it had better be unsigned */ | 36 | /* Yes, Virginia, it had better be unsigned */ |
37 | unsigned char c1, c2; | 37 | unsigned char c1, c2; |
38 | 38 | ||
39 | c1 = 0; c2 = 0; | 39 | c1 = c2 = 0; |
40 | if (len) { | 40 | if (len) { |
41 | do { | 41 | do { |
42 | c1 = *s1; c2 = *s2; | 42 | c1 = *s1; |
43 | s1++; s2++; | 43 | c2 = *s2; |
44 | s1++; | ||
45 | s2++; | ||
44 | if (!c1) | 46 | if (!c1) |
45 | break; | 47 | break; |
46 | if (!c2) | 48 | if (!c2) |
@@ -55,7 +57,6 @@ int strnicmp(const char *s1, const char *s2, size_t len) | |||
55 | } | 57 | } |
56 | return (int)c1 - (int)c2; | 58 | return (int)c1 - (int)c2; |
57 | } | 59 | } |
58 | |||
59 | EXPORT_SYMBOL(strnicmp); | 60 | EXPORT_SYMBOL(strnicmp); |
60 | #endif | 61 | #endif |
61 | 62 | ||
@@ -66,7 +67,7 @@ EXPORT_SYMBOL(strnicmp); | |||
66 | * @src: Where to copy the string from | 67 | * @src: Where to copy the string from |
67 | */ | 68 | */ |
68 | #undef strcpy | 69 | #undef strcpy |
69 | char * strcpy(char * dest,const char *src) | 70 | char *strcpy(char *dest, const char *src) |
70 | { | 71 | { |
71 | char *tmp = dest; | 72 | char *tmp = dest; |
72 | 73 | ||
@@ -91,12 +92,13 @@ EXPORT_SYMBOL(strcpy); | |||
91 | * count, the remainder of @dest will be padded with %NUL. | 92 | * count, the remainder of @dest will be padded with %NUL. |
92 | * | 93 | * |
93 | */ | 94 | */ |
94 | char * strncpy(char * dest,const char *src,size_t count) | 95 | char *strncpy(char *dest, const char *src, size_t count) |
95 | { | 96 | { |
96 | char *tmp = dest; | 97 | char *tmp = dest; |
97 | 98 | ||
98 | while (count) { | 99 | while (count) { |
99 | if ((*tmp = *src) != 0) src++; | 100 | if ((*tmp = *src) != 0) |
101 | src++; | ||
100 | tmp++; | 102 | tmp++; |
101 | count--; | 103 | count--; |
102 | } | 104 | } |
@@ -122,7 +124,7 @@ size_t strlcpy(char *dest, const char *src, size_t size) | |||
122 | size_t ret = strlen(src); | 124 | size_t ret = strlen(src); |
123 | 125 | ||
124 | if (size) { | 126 | if (size) { |
125 | size_t len = (ret >= size) ? size-1 : ret; | 127 | size_t len = (ret >= size) ? size - 1 : ret; |
126 | memcpy(dest, src, len); | 128 | memcpy(dest, src, len); |
127 | dest[len] = '\0'; | 129 | dest[len] = '\0'; |
128 | } | 130 | } |
@@ -138,7 +140,7 @@ EXPORT_SYMBOL(strlcpy); | |||
138 | * @src: The string to append to it | 140 | * @src: The string to append to it |
139 | */ | 141 | */ |
140 | #undef strcat | 142 | #undef strcat |
141 | char * strcat(char * dest, const char * src) | 143 | char *strcat(char *dest, const char *src) |
142 | { | 144 | { |
143 | char *tmp = dest; | 145 | char *tmp = dest; |
144 | 146 | ||
@@ -146,7 +148,6 @@ char * strcat(char * dest, const char * src) | |||
146 | dest++; | 148 | dest++; |
147 | while ((*dest++ = *src++) != '\0') | 149 | while ((*dest++ = *src++) != '\0') |
148 | ; | 150 | ; |
149 | |||
150 | return tmp; | 151 | return tmp; |
151 | } | 152 | } |
152 | EXPORT_SYMBOL(strcat); | 153 | EXPORT_SYMBOL(strcat); |
@@ -162,7 +163,7 @@ EXPORT_SYMBOL(strcat); | |||
162 | * Note that in contrast to strncpy, strncat ensures the result is | 163 | * Note that in contrast to strncpy, strncat ensures the result is |
163 | * terminated. | 164 | * terminated. |
164 | */ | 165 | */ |
165 | char * strncat(char *dest, const char *src, size_t count) | 166 | char *strncat(char *dest, const char *src, size_t count) |
166 | { | 167 | { |
167 | char *tmp = dest; | 168 | char *tmp = dest; |
168 | 169 | ||
@@ -176,7 +177,6 @@ char * strncat(char *dest, const char *src, size_t count) | |||
176 | } | 177 | } |
177 | } | 178 | } |
178 | } | 179 | } |
179 | |||
180 | return tmp; | 180 | return tmp; |
181 | } | 181 | } |
182 | EXPORT_SYMBOL(strncat); | 182 | EXPORT_SYMBOL(strncat); |
@@ -216,15 +216,14 @@ EXPORT_SYMBOL(strlcat); | |||
216 | * @ct: Another string | 216 | * @ct: Another string |
217 | */ | 217 | */ |
218 | #undef strcmp | 218 | #undef strcmp |
219 | int strcmp(const char * cs,const char * ct) | 219 | int strcmp(const char *cs, const char *ct) |
220 | { | 220 | { |
221 | register signed char __res; | 221 | signed char __res; |
222 | 222 | ||
223 | while (1) { | 223 | while (1) { |
224 | if ((__res = *cs - *ct++) != 0 || !*cs++) | 224 | if ((__res = *cs - *ct++) != 0 || !*cs++) |
225 | break; | 225 | break; |
226 | } | 226 | } |
227 | |||
228 | return __res; | 227 | return __res; |
229 | } | 228 | } |
230 | EXPORT_SYMBOL(strcmp); | 229 | EXPORT_SYMBOL(strcmp); |
@@ -237,16 +236,15 @@ EXPORT_SYMBOL(strcmp); | |||
237 | * @ct: Another string | 236 | * @ct: Another string |
238 | * @count: The maximum number of bytes to compare | 237 | * @count: The maximum number of bytes to compare |
239 | */ | 238 | */ |
240 | int strncmp(const char * cs,const char * ct,size_t count) | 239 | int strncmp(const char *cs, const char *ct, size_t count) |
241 | { | 240 | { |
242 | register signed char __res = 0; | 241 | signed char __res = 0; |
243 | 242 | ||
244 | while (count) { | 243 | while (count) { |
245 | if ((__res = *cs - *ct++) != 0 || !*cs++) | 244 | if ((__res = *cs - *ct++) != 0 || !*cs++) |
246 | break; | 245 | break; |
247 | count--; | 246 | count--; |
248 | } | 247 | } |
249 | |||
250 | return __res; | 248 | return __res; |
251 | } | 249 | } |
252 | EXPORT_SYMBOL(strncmp); | 250 | EXPORT_SYMBOL(strncmp); |
@@ -258,12 +256,12 @@ EXPORT_SYMBOL(strncmp); | |||
258 | * @s: The string to be searched | 256 | * @s: The string to be searched |
259 | * @c: The character to search for | 257 | * @c: The character to search for |
260 | */ | 258 | */ |
261 | char * strchr(const char * s, int c) | 259 | char *strchr(const char *s, int c) |
262 | { | 260 | { |
263 | for(; *s != (char) c; ++s) | 261 | for (; *s != (char)c; ++s) |
264 | if (*s == '\0') | 262 | if (*s == '\0') |
265 | return NULL; | 263 | return NULL; |
266 | return (char *) s; | 264 | return (char *)s; |
267 | } | 265 | } |
268 | EXPORT_SYMBOL(strchr); | 266 | EXPORT_SYMBOL(strchr); |
269 | #endif | 267 | #endif |
@@ -274,7 +272,7 @@ EXPORT_SYMBOL(strchr); | |||
274 | * @s: The string to be searched | 272 | * @s: The string to be searched |
275 | * @c: The character to search for | 273 | * @c: The character to search for |
276 | */ | 274 | */ |
277 | char * strrchr(const char * s, int c) | 275 | char *strrchr(const char *s, int c) |
278 | { | 276 | { |
279 | const char *p = s + strlen(s); | 277 | const char *p = s + strlen(s); |
280 | do { | 278 | do { |
@@ -296,8 +294,8 @@ EXPORT_SYMBOL(strrchr); | |||
296 | char *strnchr(const char *s, size_t count, int c) | 294 | char *strnchr(const char *s, size_t count, int c) |
297 | { | 295 | { |
298 | for (; count-- && *s != '\0'; ++s) | 296 | for (; count-- && *s != '\0'; ++s) |
299 | if (*s == (char) c) | 297 | if (*s == (char)c) |
300 | return (char *) s; | 298 | return (char *)s; |
301 | return NULL; | 299 | return NULL; |
302 | } | 300 | } |
303 | EXPORT_SYMBOL(strnchr); | 301 | EXPORT_SYMBOL(strnchr); |
@@ -308,7 +306,7 @@ EXPORT_SYMBOL(strnchr); | |||
308 | * strlen - Find the length of a string | 306 | * strlen - Find the length of a string |
309 | * @s: The string to be sized | 307 | * @s: The string to be sized |
310 | */ | 308 | */ |
311 | size_t strlen(const char * s) | 309 | size_t strlen(const char *s) |
312 | { | 310 | { |
313 | const char *sc; | 311 | const char *sc; |
314 | 312 | ||
@@ -325,7 +323,7 @@ EXPORT_SYMBOL(strlen); | |||
325 | * @s: The string to be sized | 323 | * @s: The string to be sized |
326 | * @count: The maximum number of bytes to search | 324 | * @count: The maximum number of bytes to search |
327 | */ | 325 | */ |
328 | size_t strnlen(const char * s, size_t count) | 326 | size_t strnlen(const char *s, size_t count) |
329 | { | 327 | { |
330 | const char *sc; | 328 | const char *sc; |
331 | 329 | ||
@@ -358,7 +356,6 @@ size_t strspn(const char *s, const char *accept) | |||
358 | return count; | 356 | return count; |
359 | ++count; | 357 | ++count; |
360 | } | 358 | } |
361 | |||
362 | return count; | 359 | return count; |
363 | } | 360 | } |
364 | 361 | ||
@@ -384,9 +381,8 @@ size_t strcspn(const char *s, const char *reject) | |||
384 | } | 381 | } |
385 | ++count; | 382 | ++count; |
386 | } | 383 | } |
387 | |||
388 | return count; | 384 | return count; |
389 | } | 385 | } |
390 | EXPORT_SYMBOL(strcspn); | 386 | EXPORT_SYMBOL(strcspn); |
391 | 387 | ||
392 | #ifndef __HAVE_ARCH_STRPBRK | 388 | #ifndef __HAVE_ARCH_STRPBRK |
@@ -395,14 +391,14 @@ EXPORT_SYMBOL(strcspn); | |||
395 | * @cs: The string to be searched | 391 | * @cs: The string to be searched |
396 | * @ct: The characters to search for | 392 | * @ct: The characters to search for |
397 | */ | 393 | */ |
398 | char * strpbrk(const char * cs,const char * ct) | 394 | char *strpbrk(const char *cs, const char *ct) |
399 | { | 395 | { |
400 | const char *sc1,*sc2; | 396 | const char *sc1, *sc2; |
401 | 397 | ||
402 | for( sc1 = cs; *sc1 != '\0'; ++sc1) { | 398 | for (sc1 = cs; *sc1 != '\0'; ++sc1) { |
403 | for( sc2 = ct; *sc2 != '\0'; ++sc2) { | 399 | for (sc2 = ct; *sc2 != '\0'; ++sc2) { |
404 | if (*sc1 == *sc2) | 400 | if (*sc1 == *sc2) |
405 | return (char *) sc1; | 401 | return (char *)sc1; |
406 | } | 402 | } |
407 | } | 403 | } |
408 | return NULL; | 404 | return NULL; |
@@ -422,9 +418,10 @@ EXPORT_SYMBOL(strpbrk); | |||
422 | * of that name. In fact, it was stolen from glibc2 and de-fancy-fied. | 418 | * of that name. In fact, it was stolen from glibc2 and de-fancy-fied. |
423 | * Same semantics, slimmer shape. ;) | 419 | * Same semantics, slimmer shape. ;) |
424 | */ | 420 | */ |
425 | char * strsep(char **s, const char *ct) | 421 | char *strsep(char **s, const char *ct) |
426 | { | 422 | { |
427 | char *sbegin = *s, *end; | 423 | char *sbegin = *s; |
424 | char *end; | ||
428 | 425 | ||
429 | if (sbegin == NULL) | 426 | if (sbegin == NULL) |
430 | return NULL; | 427 | return NULL; |
@@ -433,10 +430,8 @@ char * strsep(char **s, const char *ct) | |||
433 | if (end) | 430 | if (end) |
434 | *end++ = '\0'; | 431 | *end++ = '\0'; |
435 | *s = end; | 432 | *s = end; |
436 | |||
437 | return sbegin; | 433 | return sbegin; |
438 | } | 434 | } |
439 | |||
440 | EXPORT_SYMBOL(strsep); | 435 | EXPORT_SYMBOL(strsep); |
441 | #endif | 436 | #endif |
442 | 437 | ||
@@ -449,13 +444,12 @@ EXPORT_SYMBOL(strsep); | |||
449 | * | 444 | * |
450 | * Do not use memset() to access IO space, use memset_io() instead. | 445 | * Do not use memset() to access IO space, use memset_io() instead. |
451 | */ | 446 | */ |
452 | void * memset(void * s,int c,size_t count) | 447 | void *memset(void *s, int c, size_t count) |
453 | { | 448 | { |
454 | char *xs = (char *) s; | 449 | char *xs = s; |
455 | 450 | ||
456 | while (count--) | 451 | while (count--) |
457 | *xs++ = c; | 452 | *xs++ = c; |
458 | |||
459 | return s; | 453 | return s; |
460 | } | 454 | } |
461 | EXPORT_SYMBOL(memset); | 455 | EXPORT_SYMBOL(memset); |
@@ -471,13 +465,13 @@ EXPORT_SYMBOL(memset); | |||
471 | * You should not use this function to access IO space, use memcpy_toio() | 465 | * You should not use this function to access IO space, use memcpy_toio() |
472 | * or memcpy_fromio() instead. | 466 | * or memcpy_fromio() instead. |
473 | */ | 467 | */ |
474 | void * memcpy(void * dest,const void *src,size_t count) | 468 | void *memcpy(void *dest, const void *src, size_t count) |
475 | { | 469 | { |
476 | char *tmp = (char *) dest, *s = (char *) src; | 470 | char *tmp = dest; |
471 | char *s = src; | ||
477 | 472 | ||
478 | while (count--) | 473 | while (count--) |
479 | *tmp++ = *s++; | 474 | *tmp++ = *s++; |
480 | |||
481 | return dest; | 475 | return dest; |
482 | } | 476 | } |
483 | EXPORT_SYMBOL(memcpy); | 477 | EXPORT_SYMBOL(memcpy); |
@@ -492,23 +486,24 @@ EXPORT_SYMBOL(memcpy); | |||
492 | * | 486 | * |
493 | * Unlike memcpy(), memmove() copes with overlapping areas. | 487 | * Unlike memcpy(), memmove() copes with overlapping areas. |
494 | */ | 488 | */ |
495 | void * memmove(void * dest,const void *src,size_t count) | 489 | void *memmove(void *dest, const void *src, size_t count) |
496 | { | 490 | { |
497 | char *tmp, *s; | 491 | char *tmp; |
492 | const char *s; | ||
498 | 493 | ||
499 | if (dest <= src) { | 494 | if (dest <= src) { |
500 | tmp = (char *) dest; | 495 | tmp = dest; |
501 | s = (char *) src; | 496 | s = src; |
502 | while (count--) | 497 | while (count--) |
503 | *tmp++ = *s++; | 498 | *tmp++ = *s++; |
504 | } | 499 | } else { |
505 | else { | 500 | tmp = dest; |
506 | tmp = (char *) dest + count; | 501 | tmp += count; |
507 | s = (char *) src + count; | 502 | s = src; |
503 | s += count; | ||
508 | while (count--) | 504 | while (count--) |
509 | *--tmp = *--s; | 505 | *--tmp = *--s; |
510 | } | 506 | } |
511 | |||
512 | return dest; | 507 | return dest; |
513 | } | 508 | } |
514 | EXPORT_SYMBOL(memmove); | 509 | EXPORT_SYMBOL(memmove); |
@@ -522,12 +517,12 @@ EXPORT_SYMBOL(memmove); | |||
522 | * @count: The size of the area. | 517 | * @count: The size of the area. |
523 | */ | 518 | */ |
524 | #undef memcmp | 519 | #undef memcmp |
525 | int memcmp(const void * cs,const void * ct,size_t count) | 520 | int memcmp(const void *cs, const void *ct, size_t count) |
526 | { | 521 | { |
527 | const unsigned char *su1, *su2; | 522 | const unsigned char *su1, *su2; |
528 | int res = 0; | 523 | int res = 0; |
529 | 524 | ||
530 | for( su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) | 525 | for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) |
531 | if ((res = *su1 - *su2) != 0) | 526 | if ((res = *su1 - *su2) != 0) |
532 | break; | 527 | break; |
533 | return res; | 528 | return res; |
@@ -545,17 +540,17 @@ EXPORT_SYMBOL(memcmp); | |||
545 | * returns the address of the first occurrence of @c, or 1 byte past | 540 | * returns the address of the first occurrence of @c, or 1 byte past |
546 | * the area if @c is not found | 541 | * the area if @c is not found |
547 | */ | 542 | */ |
548 | void * memscan(void * addr, int c, size_t size) | 543 | void *memscan(void *addr, int c, size_t size) |
549 | { | 544 | { |
550 | unsigned char * p = (unsigned char *) addr; | 545 | unsigned char *p = addr; |
551 | 546 | ||
552 | while (size) { | 547 | while (size) { |
553 | if (*p == c) | 548 | if (*p == c) |
554 | return (void *) p; | 549 | return (void *)p; |
555 | p++; | 550 | p++; |
556 | size--; | 551 | size--; |
557 | } | 552 | } |
558 | return (void *) p; | 553 | return (void *)p; |
559 | } | 554 | } |
560 | EXPORT_SYMBOL(memscan); | 555 | EXPORT_SYMBOL(memscan); |
561 | #endif | 556 | #endif |
@@ -566,18 +561,18 @@ EXPORT_SYMBOL(memscan); | |||
566 | * @s1: The string to be searched | 561 | * @s1: The string to be searched |
567 | * @s2: The string to search for | 562 | * @s2: The string to search for |
568 | */ | 563 | */ |
569 | char * strstr(const char * s1,const char * s2) | 564 | char *strstr(const char *s1, const char *s2) |
570 | { | 565 | { |
571 | int l1, l2; | 566 | int l1, l2; |
572 | 567 | ||
573 | l2 = strlen(s2); | 568 | l2 = strlen(s2); |
574 | if (!l2) | 569 | if (!l2) |
575 | return (char *) s1; | 570 | return (char *)s1; |
576 | l1 = strlen(s1); | 571 | l1 = strlen(s1); |
577 | while (l1 >= l2) { | 572 | while (l1 >= l2) { |
578 | l1--; | 573 | l1--; |
579 | if (!memcmp(s1,s2,l2)) | 574 | if (!memcmp(s1, s2, l2)) |
580 | return (char *) s1; | 575 | return (char *)s1; |
581 | s1++; | 576 | s1++; |
582 | } | 577 | } |
583 | return NULL; | 578 | return NULL; |
@@ -600,7 +595,7 @@ void *memchr(const void *s, int c, size_t n) | |||
600 | const unsigned char *p = s; | 595 | const unsigned char *p = s; |
601 | while (n-- != 0) { | 596 | while (n-- != 0) { |
602 | if ((unsigned char)c == *p++) { | 597 | if ((unsigned char)c == *p++) { |
603 | return (void *)(p-1); | 598 | return (void *)(p - 1); |
604 | } | 599 | } |
605 | } | 600 | } |
606 | return NULL; | 601 | return NULL; |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c new file mode 100644 index 000000000000..57216f3544ca --- /dev/null +++ b/lib/swiotlb.c | |||
@@ -0,0 +1,811 @@ | |||
1 | /* | ||
2 | * Dynamic DMA mapping support. | ||
3 | * | ||
4 | * This implementation is for IA-64 and EM64T platforms that do not support | ||
5 | * I/O TLBs (aka DMA address translation hardware). | ||
6 | * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> | ||
7 | * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> | ||
8 | * Copyright (C) 2000, 2003 Hewlett-Packard Co | ||
9 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
10 | * | ||
11 | * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API. | ||
12 | * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid | ||
13 | * unnecessary i-cache flushing. | ||
14 | * 04/07/.. ak Better overflow handling. Assorted fixes. | ||
15 | * 05/09/10 linville Add support for syncing ranges, support syncing for | ||
16 | * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. | ||
17 | */ | ||
18 | |||
19 | #include <linux/cache.h> | ||
20 | #include <linux/dma-mapping.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/spinlock.h> | ||
24 | #include <linux/string.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <linux/ctype.h> | ||
27 | |||
28 | #include <asm/io.h> | ||
29 | #include <asm/dma.h> | ||
30 | #include <asm/scatterlist.h> | ||
31 | |||
32 | #include <linux/init.h> | ||
33 | #include <linux/bootmem.h> | ||
34 | |||
35 | #define OFFSET(val,align) ((unsigned long) \ | ||
36 | ( (val) & ( (align) - 1))) | ||
37 | |||
38 | #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) | ||
39 | #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) | ||
40 | |||
41 | /* | ||
42 | * Maximum allowable number of contiguous slabs to map, | ||
43 | * must be a power of 2. What is the appropriate value ? | ||
44 | * The complexity of {map,unmap}_single is linearly dependent on this value. | ||
45 | */ | ||
46 | #define IO_TLB_SEGSIZE 128 | ||
47 | |||
48 | /* | ||
49 | * log of the size of each IO TLB slab. The number of slabs is command line | ||
50 | * controllable. | ||
51 | */ | ||
52 | #define IO_TLB_SHIFT 11 | ||
53 | |||
54 | #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) | ||
55 | |||
56 | /* | ||
57 | * Minimum IO TLB size to bother booting with. Systems with mainly | ||
58 | * 64bit capable cards will only lightly use the swiotlb. If we can't | ||
59 | * allocate a contiguous 1MB, we're probably in trouble anyway. | ||
60 | */ | ||
61 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) | ||
62 | |||
63 | /* | ||
64 | * Enumeration for sync targets | ||
65 | */ | ||
66 | enum dma_sync_target { | ||
67 | SYNC_FOR_CPU = 0, | ||
68 | SYNC_FOR_DEVICE = 1, | ||
69 | }; | ||
70 | |||
71 | int swiotlb_force; | ||
72 | |||
73 | /* | ||
74 | * Used to do a quick range check in swiotlb_unmap_single and | ||
75 | * swiotlb_sync_single_*, to see if the memory was in fact allocated by this | ||
76 | * API. | ||
77 | */ | ||
78 | static char *io_tlb_start, *io_tlb_end; | ||
79 | |||
80 | /* | ||
81 | * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and | ||
82 | * io_tlb_end. This is command line adjustable via setup_io_tlb_npages. | ||
83 | */ | ||
84 | static unsigned long io_tlb_nslabs; | ||
85 | |||
86 | /* | ||
87 | * When the IOMMU overflows we return a fallback buffer. This sets the size. | ||
88 | */ | ||
89 | static unsigned long io_tlb_overflow = 32*1024; | ||
90 | |||
91 | void *io_tlb_overflow_buffer; | ||
92 | |||
93 | /* | ||
94 | * This is a free list describing the number of free entries available from | ||
95 | * each index | ||
96 | */ | ||
97 | static unsigned int *io_tlb_list; | ||
98 | static unsigned int io_tlb_index; | ||
99 | |||
100 | /* | ||
101 | * We need to save away the original address corresponding to a mapped entry | ||
102 | * for the sync operations. | ||
103 | */ | ||
104 | static unsigned char **io_tlb_orig_addr; | ||
105 | |||
106 | /* | ||
107 | * Protect the above data structures in the map and unmap calls | ||
108 | */ | ||
109 | static DEFINE_SPINLOCK(io_tlb_lock); | ||
110 | |||
111 | static int __init | ||
112 | setup_io_tlb_npages(char *str) | ||
113 | { | ||
114 | if (isdigit(*str)) { | ||
115 | io_tlb_nslabs = simple_strtoul(str, &str, 0); | ||
116 | /* avoid tail segment of size < IO_TLB_SEGSIZE */ | ||
117 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | ||
118 | } | ||
119 | if (*str == ',') | ||
120 | ++str; | ||
121 | if (!strcmp(str, "force")) | ||
122 | swiotlb_force = 1; | ||
123 | return 1; | ||
124 | } | ||
125 | __setup("swiotlb=", setup_io_tlb_npages); | ||
126 | /* make io_tlb_overflow tunable too? */ | ||
127 | |||
128 | /* | ||
129 | * Statically reserve bounce buffer space and initialize bounce buffer data | ||
130 | * structures for the software IO TLB used to implement the DMA API. | ||
131 | */ | ||
132 | void | ||
133 | swiotlb_init_with_default_size (size_t default_size) | ||
134 | { | ||
135 | unsigned long i; | ||
136 | |||
137 | if (!io_tlb_nslabs) { | ||
138 | io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); | ||
139 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * Get IO TLB memory from the low pages | ||
144 | */ | ||
145 | io_tlb_start = alloc_bootmem_low_pages_limit(io_tlb_nslabs * | ||
146 | (1 << IO_TLB_SHIFT), 0x100000000); | ||
147 | if (!io_tlb_start) | ||
148 | panic("Cannot allocate SWIOTLB buffer"); | ||
149 | io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); | ||
150 | |||
151 | /* | ||
152 | * Allocate and initialize the free list array. This array is used | ||
153 | * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE | ||
154 | * between io_tlb_start and io_tlb_end. | ||
155 | */ | ||
156 | io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); | ||
157 | for (i = 0; i < io_tlb_nslabs; i++) | ||
158 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); | ||
159 | io_tlb_index = 0; | ||
160 | io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *)); | ||
161 | |||
162 | /* | ||
163 | * Get the overflow emergency buffer | ||
164 | */ | ||
165 | io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); | ||
166 | printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", | ||
167 | virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); | ||
168 | } | ||
169 | |||
170 | void | ||
171 | swiotlb_init (void) | ||
172 | { | ||
173 | swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ | ||
174 | } | ||
175 | |||
176 | /* | ||
177 | * Systems with larger DMA zones (those that don't support ISA) can | ||
178 | * initialize the swiotlb later using the slab allocator if needed. | ||
179 | * This should be just like above, but with some error catching. | ||
180 | */ | ||
181 | int | ||
182 | swiotlb_late_init_with_default_size (size_t default_size) | ||
183 | { | ||
184 | unsigned long i, req_nslabs = io_tlb_nslabs; | ||
185 | unsigned int order; | ||
186 | |||
187 | if (!io_tlb_nslabs) { | ||
188 | io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); | ||
189 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * Get IO TLB memory from the low pages | ||
194 | */ | ||
195 | order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); | ||
196 | io_tlb_nslabs = SLABS_PER_PAGE << order; | ||
197 | |||
198 | while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { | ||
199 | io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN, | ||
200 | order); | ||
201 | if (io_tlb_start) | ||
202 | break; | ||
203 | order--; | ||
204 | } | ||
205 | |||
206 | if (!io_tlb_start) | ||
207 | goto cleanup1; | ||
208 | |||
209 | if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) { | ||
210 | printk(KERN_WARNING "Warning: only able to allocate %ld MB " | ||
211 | "for software IO TLB\n", (PAGE_SIZE << order) >> 20); | ||
212 | io_tlb_nslabs = SLABS_PER_PAGE << order; | ||
213 | } | ||
214 | io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); | ||
215 | memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT)); | ||
216 | |||
217 | /* | ||
218 | * Allocate and initialize the free list array. This array is used | ||
219 | * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE | ||
220 | * between io_tlb_start and io_tlb_end. | ||
221 | */ | ||
222 | io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, | ||
223 | get_order(io_tlb_nslabs * sizeof(int))); | ||
224 | if (!io_tlb_list) | ||
225 | goto cleanup2; | ||
226 | |||
227 | for (i = 0; i < io_tlb_nslabs; i++) | ||
228 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); | ||
229 | io_tlb_index = 0; | ||
230 | |||
231 | io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL, | ||
232 | get_order(io_tlb_nslabs * sizeof(char *))); | ||
233 | if (!io_tlb_orig_addr) | ||
234 | goto cleanup3; | ||
235 | |||
236 | memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *)); | ||
237 | |||
238 | /* | ||
239 | * Get the overflow emergency buffer | ||
240 | */ | ||
241 | io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA, | ||
242 | get_order(io_tlb_overflow)); | ||
243 | if (!io_tlb_overflow_buffer) | ||
244 | goto cleanup4; | ||
245 | |||
246 | printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - " | ||
247 | "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20, | ||
248 | virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); | ||
249 | |||
250 | return 0; | ||
251 | |||
252 | cleanup4: | ||
253 | free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * | ||
254 | sizeof(char *))); | ||
255 | io_tlb_orig_addr = NULL; | ||
256 | cleanup3: | ||
257 | free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * | ||
258 | sizeof(int))); | ||
259 | io_tlb_list = NULL; | ||
260 | io_tlb_end = NULL; | ||
261 | cleanup2: | ||
262 | free_pages((unsigned long)io_tlb_start, order); | ||
263 | io_tlb_start = NULL; | ||
264 | cleanup1: | ||
265 | io_tlb_nslabs = req_nslabs; | ||
266 | return -ENOMEM; | ||
267 | } | ||
268 | |||
269 | static inline int | ||
270 | address_needs_mapping(struct device *hwdev, dma_addr_t addr) | ||
271 | { | ||
272 | dma_addr_t mask = 0xffffffff; | ||
273 | /* If the device has a mask, use it, otherwise default to 32 bits */ | ||
274 | if (hwdev && hwdev->dma_mask) | ||
275 | mask = *hwdev->dma_mask; | ||
276 | return (addr & ~mask) != 0; | ||
277 | } | ||
278 | |||
279 | /* | ||
280 | * Allocates bounce buffer and returns its kernel virtual address. | ||
281 | */ | ||
282 | static void * | ||
283 | map_single(struct device *hwdev, char *buffer, size_t size, int dir) | ||
284 | { | ||
285 | unsigned long flags; | ||
286 | char *dma_addr; | ||
287 | unsigned int nslots, stride, index, wrap; | ||
288 | int i; | ||
289 | |||
290 | /* | ||
291 | * For mappings greater than a page, we limit the stride (and | ||
292 | * hence alignment) to a page size. | ||
293 | */ | ||
294 | nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | ||
295 | if (size > PAGE_SIZE) | ||
296 | stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); | ||
297 | else | ||
298 | stride = 1; | ||
299 | |||
300 | if (!nslots) | ||
301 | BUG(); | ||
302 | |||
303 | /* | ||
304 | * Find suitable number of IO TLB entries size that will fit this | ||
305 | * request and allocate a buffer from that IO TLB pool. | ||
306 | */ | ||
307 | spin_lock_irqsave(&io_tlb_lock, flags); | ||
308 | { | ||
309 | wrap = index = ALIGN(io_tlb_index, stride); | ||
310 | |||
311 | if (index >= io_tlb_nslabs) | ||
312 | wrap = index = 0; | ||
313 | |||
314 | do { | ||
315 | /* | ||
316 | * If we find a slot that indicates we have 'nslots' | ||
317 | * number of contiguous buffers, we allocate the | ||
318 | * buffers from that slot and mark the entries as '0' | ||
319 | * indicating unavailable. | ||
320 | */ | ||
321 | if (io_tlb_list[index] >= nslots) { | ||
322 | int count = 0; | ||
323 | |||
324 | for (i = index; i < (int) (index + nslots); i++) | ||
325 | io_tlb_list[i] = 0; | ||
326 | for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) | ||
327 | io_tlb_list[i] = ++count; | ||
328 | dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); | ||
329 | |||
330 | /* | ||
331 | * Update the indices to avoid searching in | ||
332 | * the next round. | ||
333 | */ | ||
334 | io_tlb_index = ((index + nslots) < io_tlb_nslabs | ||
335 | ? (index + nslots) : 0); | ||
336 | |||
337 | goto found; | ||
338 | } | ||
339 | index += stride; | ||
340 | if (index >= io_tlb_nslabs) | ||
341 | index = 0; | ||
342 | } while (index != wrap); | ||
343 | |||
344 | spin_unlock_irqrestore(&io_tlb_lock, flags); | ||
345 | return NULL; | ||
346 | } | ||
347 | found: | ||
348 | spin_unlock_irqrestore(&io_tlb_lock, flags); | ||
349 | |||
350 | /* | ||
351 | * Save away the mapping from the original address to the DMA address. | ||
352 | * This is needed when we sync the memory. Then we sync the buffer if | ||
353 | * needed. | ||
354 | */ | ||
355 | io_tlb_orig_addr[index] = buffer; | ||
356 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) | ||
357 | memcpy(dma_addr, buffer, size); | ||
358 | |||
359 | return dma_addr; | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. | ||
364 | */ | ||
365 | static void | ||
366 | unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | ||
367 | { | ||
368 | unsigned long flags; | ||
369 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | ||
370 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; | ||
371 | char *buffer = io_tlb_orig_addr[index]; | ||
372 | |||
373 | /* | ||
374 | * First, sync the memory before unmapping the entry | ||
375 | */ | ||
376 | if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) | ||
377 | /* | ||
378 | * bounce... copy the data back into the original buffer * and | ||
379 | * delete the bounce buffer. | ||
380 | */ | ||
381 | memcpy(buffer, dma_addr, size); | ||
382 | |||
383 | /* | ||
384 | * Return the buffer to the free list by setting the corresponding | ||
385 | * entries to indicate the number of contigous entries available. | ||
386 | * While returning the entries to the free list, we merge the entries | ||
387 | * with slots below and above the pool being returned. | ||
388 | */ | ||
389 | spin_lock_irqsave(&io_tlb_lock, flags); | ||
390 | { | ||
391 | count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ? | ||
392 | io_tlb_list[index + nslots] : 0); | ||
393 | /* | ||
394 | * Step 1: return the slots to the free list, merging the | ||
395 | * slots with superceeding slots | ||
396 | */ | ||
397 | for (i = index + nslots - 1; i >= index; i--) | ||
398 | io_tlb_list[i] = ++count; | ||
399 | /* | ||
400 | * Step 2: merge the returned slots with the preceding slots, | ||
401 | * if available (non zero) | ||
402 | */ | ||
403 | for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) | ||
404 | io_tlb_list[i] = ++count; | ||
405 | } | ||
406 | spin_unlock_irqrestore(&io_tlb_lock, flags); | ||
407 | } | ||
408 | |||
409 | static void | ||
410 | sync_single(struct device *hwdev, char *dma_addr, size_t size, | ||
411 | int dir, int target) | ||
412 | { | ||
413 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; | ||
414 | char *buffer = io_tlb_orig_addr[index]; | ||
415 | |||
416 | switch (target) { | ||
417 | case SYNC_FOR_CPU: | ||
418 | if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) | ||
419 | memcpy(buffer, dma_addr, size); | ||
420 | else if (dir != DMA_TO_DEVICE) | ||
421 | BUG(); | ||
422 | break; | ||
423 | case SYNC_FOR_DEVICE: | ||
424 | if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) | ||
425 | memcpy(dma_addr, buffer, size); | ||
426 | else if (dir != DMA_FROM_DEVICE) | ||
427 | BUG(); | ||
428 | break; | ||
429 | default: | ||
430 | BUG(); | ||
431 | } | ||
432 | } | ||
433 | |||
434 | void * | ||
435 | swiotlb_alloc_coherent(struct device *hwdev, size_t size, | ||
436 | dma_addr_t *dma_handle, gfp_t flags) | ||
437 | { | ||
438 | unsigned long dev_addr; | ||
439 | void *ret; | ||
440 | int order = get_order(size); | ||
441 | |||
442 | /* | ||
443 | * XXX fix me: the DMA API should pass us an explicit DMA mask | ||
444 | * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32 | ||
445 | * bit range instead of a 16MB one). | ||
446 | */ | ||
447 | flags |= GFP_DMA; | ||
448 | |||
449 | ret = (void *)__get_free_pages(flags, order); | ||
450 | if (ret && address_needs_mapping(hwdev, virt_to_phys(ret))) { | ||
451 | /* | ||
452 | * The allocated memory isn't reachable by the device. | ||
453 | * Fall back on swiotlb_map_single(). | ||
454 | */ | ||
455 | free_pages((unsigned long) ret, order); | ||
456 | ret = NULL; | ||
457 | } | ||
458 | if (!ret) { | ||
459 | /* | ||
460 | * We are either out of memory or the device can't DMA | ||
461 | * to GFP_DMA memory; fall back on | ||
462 | * swiotlb_map_single(), which will grab memory from | ||
463 | * the lowest available address range. | ||
464 | */ | ||
465 | dma_addr_t handle; | ||
466 | handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); | ||
467 | if (dma_mapping_error(handle)) | ||
468 | return NULL; | ||
469 | |||
470 | ret = phys_to_virt(handle); | ||
471 | } | ||
472 | |||
473 | memset(ret, 0, size); | ||
474 | dev_addr = virt_to_phys(ret); | ||
475 | |||
476 | /* Confirm address can be DMA'd by device */ | ||
477 | if (address_needs_mapping(hwdev, dev_addr)) { | ||
478 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016lx\n", | ||
479 | (unsigned long long)*hwdev->dma_mask, dev_addr); | ||
480 | panic("swiotlb_alloc_coherent: allocated memory is out of " | ||
481 | "range for device"); | ||
482 | } | ||
483 | *dma_handle = dev_addr; | ||
484 | return ret; | ||
485 | } | ||
486 | |||
487 | void | ||
488 | swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | ||
489 | dma_addr_t dma_handle) | ||
490 | { | ||
491 | if (!(vaddr >= (void *)io_tlb_start | ||
492 | && vaddr < (void *)io_tlb_end)) | ||
493 | free_pages((unsigned long) vaddr, get_order(size)); | ||
494 | else | ||
495 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | ||
496 | swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); | ||
497 | } | ||
498 | |||
499 | static void | ||
500 | swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) | ||
501 | { | ||
502 | /* | ||
503 | * Ran out of IOMMU space for this operation. This is very bad. | ||
504 | * Unfortunately the drivers cannot handle this operation properly. | ||
505 | * unless they check for dma_mapping_error (most don't) | ||
506 | * When the mapping is small enough return a static buffer to limit | ||
507 | * the damage, or panic when the transfer is too big. | ||
508 | */ | ||
509 | printk(KERN_ERR "DMA: Out of SW-IOMMU space for %lu bytes at " | ||
510 | "device %s\n", size, dev ? dev->bus_id : "?"); | ||
511 | |||
512 | if (size > io_tlb_overflow && do_panic) { | ||
513 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) | ||
514 | panic("DMA: Memory would be corrupted\n"); | ||
515 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) | ||
516 | panic("DMA: Random memory would be DMAed\n"); | ||
517 | } | ||
518 | } | ||
519 | |||
520 | /* | ||
521 | * Map a single buffer of the indicated size for DMA in streaming mode. The | ||
522 | * physical address to use is returned. | ||
523 | * | ||
524 | * Once the device is given the dma address, the device owns this memory until | ||
525 | * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. | ||
526 | */ | ||
527 | dma_addr_t | ||
528 | swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) | ||
529 | { | ||
530 | unsigned long dev_addr = virt_to_phys(ptr); | ||
531 | void *map; | ||
532 | |||
533 | if (dir == DMA_NONE) | ||
534 | BUG(); | ||
535 | /* | ||
536 | * If the pointer passed in happens to be in the device's DMA window, | ||
537 | * we can safely return the device addr and not worry about bounce | ||
538 | * buffering it. | ||
539 | */ | ||
540 | if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) | ||
541 | return dev_addr; | ||
542 | |||
543 | /* | ||
544 | * Oh well, have to allocate and map a bounce buffer. | ||
545 | */ | ||
546 | map = map_single(hwdev, ptr, size, dir); | ||
547 | if (!map) { | ||
548 | swiotlb_full(hwdev, size, dir, 1); | ||
549 | map = io_tlb_overflow_buffer; | ||
550 | } | ||
551 | |||
552 | dev_addr = virt_to_phys(map); | ||
553 | |||
554 | /* | ||
555 | * Ensure that the address returned is DMA'ble | ||
556 | */ | ||
557 | if (address_needs_mapping(hwdev, dev_addr)) | ||
558 | panic("map_single: bounce buffer is not DMA'ble"); | ||
559 | |||
560 | return dev_addr; | ||
561 | } | ||
562 | |||
563 | /* | ||
564 | * Since DMA is i-cache coherent, any (complete) pages that were written via | ||
565 | * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to | ||
566 | * flush them when they get mapped into an executable vm-area. | ||
567 | */ | ||
568 | static void | ||
569 | mark_clean(void *addr, size_t size) | ||
570 | { | ||
571 | unsigned long pg_addr, end; | ||
572 | |||
573 | pg_addr = PAGE_ALIGN((unsigned long) addr); | ||
574 | end = (unsigned long) addr + size; | ||
575 | while (pg_addr + PAGE_SIZE <= end) { | ||
576 | struct page *page = virt_to_page(pg_addr); | ||
577 | set_bit(PG_arch_1, &page->flags); | ||
578 | pg_addr += PAGE_SIZE; | ||
579 | } | ||
580 | } | ||
581 | |||
582 | /* | ||
583 | * Unmap a single streaming mode DMA translation. The dma_addr and size must | ||
584 | * match what was provided for in a previous swiotlb_map_single call. All | ||
585 | * other usages are undefined. | ||
586 | * | ||
587 | * After this call, reads by the cpu to the buffer are guaranteed to see | ||
588 | * whatever the device wrote there. | ||
589 | */ | ||
590 | void | ||
591 | swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, | ||
592 | int dir) | ||
593 | { | ||
594 | char *dma_addr = phys_to_virt(dev_addr); | ||
595 | |||
596 | if (dir == DMA_NONE) | ||
597 | BUG(); | ||
598 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | ||
599 | unmap_single(hwdev, dma_addr, size, dir); | ||
600 | else if (dir == DMA_FROM_DEVICE) | ||
601 | mark_clean(dma_addr, size); | ||
602 | } | ||
603 | |||
604 | /* | ||
605 | * Make physical memory consistent for a single streaming mode DMA translation | ||
606 | * after a transfer. | ||
607 | * | ||
608 | * If you perform a swiotlb_map_single() but wish to interrogate the buffer | ||
609 | * using the cpu, yet do not wish to teardown the dma mapping, you must | ||
610 | * call this function before doing so. At the next point you give the dma | ||
611 | * address back to the card, you must first perform a | ||
612 | * swiotlb_dma_sync_for_device, and then the device again owns the buffer | ||
613 | */ | ||
614 | static inline void | ||
615 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | ||
616 | size_t size, int dir, int target) | ||
617 | { | ||
618 | char *dma_addr = phys_to_virt(dev_addr); | ||
619 | |||
620 | if (dir == DMA_NONE) | ||
621 | BUG(); | ||
622 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | ||
623 | sync_single(hwdev, dma_addr, size, dir, target); | ||
624 | else if (dir == DMA_FROM_DEVICE) | ||
625 | mark_clean(dma_addr, size); | ||
626 | } | ||
627 | |||
628 | void | ||
629 | swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | ||
630 | size_t size, int dir) | ||
631 | { | ||
632 | swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); | ||
633 | } | ||
634 | |||
635 | void | ||
636 | swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, | ||
637 | size_t size, int dir) | ||
638 | { | ||
639 | swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); | ||
640 | } | ||
641 | |||
642 | /* | ||
643 | * Same as above, but for a sub-range of the mapping. | ||
644 | */ | ||
645 | static inline void | ||
646 | swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, | ||
647 | unsigned long offset, size_t size, | ||
648 | int dir, int target) | ||
649 | { | ||
650 | char *dma_addr = phys_to_virt(dev_addr) + offset; | ||
651 | |||
652 | if (dir == DMA_NONE) | ||
653 | BUG(); | ||
654 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | ||
655 | sync_single(hwdev, dma_addr, size, dir, target); | ||
656 | else if (dir == DMA_FROM_DEVICE) | ||
657 | mark_clean(dma_addr, size); | ||
658 | } | ||
659 | |||
660 | void | ||
661 | swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | ||
662 | unsigned long offset, size_t size, int dir) | ||
663 | { | ||
664 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, | ||
665 | SYNC_FOR_CPU); | ||
666 | } | ||
667 | |||
668 | void | ||
669 | swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, | ||
670 | unsigned long offset, size_t size, int dir) | ||
671 | { | ||
672 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, | ||
673 | SYNC_FOR_DEVICE); | ||
674 | } | ||
675 | |||
676 | /* | ||
677 | * Map a set of buffers described by scatterlist in streaming mode for DMA. | ||
678 | * This is the scatter-gather version of the above swiotlb_map_single | ||
679 | * interface. Here the scatter gather list elements are each tagged with the | ||
680 | * appropriate dma address and length. They are obtained via | ||
681 | * sg_dma_{address,length}(SG). | ||
682 | * | ||
683 | * NOTE: An implementation may be able to use a smaller number of | ||
684 | * DMA address/length pairs than there are SG table elements. | ||
685 | * (for example via virtual mapping capabilities) | ||
686 | * The routine returns the number of addr/length pairs actually | ||
687 | * used, at most nents. | ||
688 | * | ||
689 | * Device ownership issues as mentioned above for swiotlb_map_single are the | ||
690 | * same here. | ||
691 | */ | ||
692 | int | ||
693 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, | ||
694 | int dir) | ||
695 | { | ||
696 | void *addr; | ||
697 | unsigned long dev_addr; | ||
698 | int i; | ||
699 | |||
700 | if (dir == DMA_NONE) | ||
701 | BUG(); | ||
702 | |||
703 | for (i = 0; i < nelems; i++, sg++) { | ||
704 | addr = SG_ENT_VIRT_ADDRESS(sg); | ||
705 | dev_addr = virt_to_phys(addr); | ||
706 | if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { | ||
707 | sg->dma_address = (dma_addr_t) virt_to_phys(map_single(hwdev, addr, sg->length, dir)); | ||
708 | if (!sg->dma_address) { | ||
709 | /* Don't panic here, we expect map_sg users | ||
710 | to do proper error handling. */ | ||
711 | swiotlb_full(hwdev, sg->length, dir, 0); | ||
712 | swiotlb_unmap_sg(hwdev, sg - i, i, dir); | ||
713 | sg[0].dma_length = 0; | ||
714 | return 0; | ||
715 | } | ||
716 | } else | ||
717 | sg->dma_address = dev_addr; | ||
718 | sg->dma_length = sg->length; | ||
719 | } | ||
720 | return nelems; | ||
721 | } | ||
722 | |||
723 | /* | ||
724 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules | ||
725 | * concerning calls here are the same as for swiotlb_unmap_single() above. | ||
726 | */ | ||
727 | void | ||
728 | swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems, | ||
729 | int dir) | ||
730 | { | ||
731 | int i; | ||
732 | |||
733 | if (dir == DMA_NONE) | ||
734 | BUG(); | ||
735 | |||
736 | for (i = 0; i < nelems; i++, sg++) | ||
737 | if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) | ||
738 | unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir); | ||
739 | else if (dir == DMA_FROM_DEVICE) | ||
740 | mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); | ||
741 | } | ||
742 | |||
743 | /* | ||
744 | * Make physical memory consistent for a set of streaming mode DMA translations | ||
745 | * after a transfer. | ||
746 | * | ||
747 | * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules | ||
748 | * and usage. | ||
749 | */ | ||
750 | static inline void | ||
751 | swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, | ||
752 | int nelems, int dir, int target) | ||
753 | { | ||
754 | int i; | ||
755 | |||
756 | if (dir == DMA_NONE) | ||
757 | BUG(); | ||
758 | |||
759 | for (i = 0; i < nelems; i++, sg++) | ||
760 | if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) | ||
761 | sync_single(hwdev, (void *) sg->dma_address, | ||
762 | sg->dma_length, dir, target); | ||
763 | } | ||
764 | |||
765 | void | ||
766 | swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | ||
767 | int nelems, int dir) | ||
768 | { | ||
769 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); | ||
770 | } | ||
771 | |||
772 | void | ||
773 | swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | ||
774 | int nelems, int dir) | ||
775 | { | ||
776 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); | ||
777 | } | ||
778 | |||
779 | int | ||
780 | swiotlb_dma_mapping_error(dma_addr_t dma_addr) | ||
781 | { | ||
782 | return (dma_addr == virt_to_phys(io_tlb_overflow_buffer)); | ||
783 | } | ||
784 | |||
785 | /* | ||
786 | * Return whether the given device DMA address mask can be supported | ||
787 | * properly. For example, if your device can only drive the low 24-bits | ||
788 | * during bus mastering, then you would pass 0x00ffffff as the mask to | ||
789 | * this function. | ||
790 | */ | ||
791 | int | ||
792 | swiotlb_dma_supported (struct device *hwdev, u64 mask) | ||
793 | { | ||
794 | return (virt_to_phys (io_tlb_end) - 1) <= mask; | ||
795 | } | ||
796 | |||
797 | EXPORT_SYMBOL(swiotlb_init); | ||
798 | EXPORT_SYMBOL(swiotlb_map_single); | ||
799 | EXPORT_SYMBOL(swiotlb_unmap_single); | ||
800 | EXPORT_SYMBOL(swiotlb_map_sg); | ||
801 | EXPORT_SYMBOL(swiotlb_unmap_sg); | ||
802 | EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); | ||
803 | EXPORT_SYMBOL(swiotlb_sync_single_for_device); | ||
804 | EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu); | ||
805 | EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); | ||
806 | EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); | ||
807 | EXPORT_SYMBOL(swiotlb_sync_sg_for_device); | ||
808 | EXPORT_SYMBOL(swiotlb_dma_mapping_error); | ||
809 | EXPORT_SYMBOL(swiotlb_alloc_coherent); | ||
810 | EXPORT_SYMBOL(swiotlb_free_coherent); | ||
811 | EXPORT_SYMBOL(swiotlb_dma_supported); | ||
diff --git a/lib/textsearch.c b/lib/textsearch.c index 1e934c196f0f..6f3093efbd7b 100644 --- a/lib/textsearch.c +++ b/lib/textsearch.c | |||
@@ -254,7 +254,7 @@ unsigned int textsearch_find_continuous(struct ts_config *conf, | |||
254 | * parameters or a ERR_PTR(). | 254 | * parameters or a ERR_PTR(). |
255 | */ | 255 | */ |
256 | struct ts_config *textsearch_prepare(const char *algo, const void *pattern, | 256 | struct ts_config *textsearch_prepare(const char *algo, const void *pattern, |
257 | unsigned int len, int gfp_mask, int flags) | 257 | unsigned int len, gfp_t gfp_mask, int flags) |
258 | { | 258 | { |
259 | int err = -ENOENT; | 259 | int err = -ENOENT; |
260 | struct ts_config *conf; | 260 | struct ts_config *conf; |
diff --git a/lib/ts_bm.c b/lib/ts_bm.c index 2cc79112ecc3..8a8b3a16133e 100644 --- a/lib/ts_bm.c +++ b/lib/ts_bm.c | |||
@@ -127,7 +127,7 @@ static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern, | |||
127 | } | 127 | } |
128 | 128 | ||
129 | static struct ts_config *bm_init(const void *pattern, unsigned int len, | 129 | static struct ts_config *bm_init(const void *pattern, unsigned int len, |
130 | int gfp_mask) | 130 | gfp_t gfp_mask) |
131 | { | 131 | { |
132 | struct ts_config *conf; | 132 | struct ts_config *conf; |
133 | struct ts_bm *bm; | 133 | struct ts_bm *bm; |
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c index d27c0a072940..ca3211206eef 100644 --- a/lib/ts_fsm.c +++ b/lib/ts_fsm.c | |||
@@ -258,7 +258,7 @@ found_match: | |||
258 | } | 258 | } |
259 | 259 | ||
260 | static struct ts_config *fsm_init(const void *pattern, unsigned int len, | 260 | static struct ts_config *fsm_init(const void *pattern, unsigned int len, |
261 | int gfp_mask) | 261 | gfp_t gfp_mask) |
262 | { | 262 | { |
263 | int i, err = -EINVAL; | 263 | int i, err = -EINVAL; |
264 | struct ts_config *conf; | 264 | struct ts_config *conf; |
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c index 73266b975585..7fd45451b44a 100644 --- a/lib/ts_kmp.c +++ b/lib/ts_kmp.c | |||
@@ -87,7 +87,7 @@ static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len, | |||
87 | } | 87 | } |
88 | 88 | ||
89 | static struct ts_config *kmp_init(const void *pattern, unsigned int len, | 89 | static struct ts_config *kmp_init(const void *pattern, unsigned int len, |
90 | int gfp_mask) | 90 | gfp_t gfp_mask) |
91 | { | 91 | { |
92 | struct ts_config *conf; | 92 | struct ts_config *conf; |
93 | struct ts_kmp *kmp; | 93 | struct ts_kmp *kmp; |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index e4e9031dd9c3..b07db5ca3f66 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/ctype.h> | 23 | #include <linux/ctype.h> |
24 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
25 | 25 | ||
26 | #include <asm/page.h> /* for PAGE_SIZE */ | ||
26 | #include <asm/div64.h> | 27 | #include <asm/div64.h> |
27 | 28 | ||
28 | /** | 29 | /** |
diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c index 3d94cb90c1d3..31b9e9054bf7 100644 --- a/lib/zlib_inflate/inflate.c +++ b/lib/zlib_inflate/inflate.c | |||
@@ -3,7 +3,6 @@ | |||
3 | * For conditions of distribution and use, see copyright notice in zlib.h | 3 | * For conditions of distribution and use, see copyright notice in zlib.h |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/module.h> | ||
7 | #include <linux/zutil.h> | 6 | #include <linux/zutil.h> |
8 | #include "infblock.h" | 7 | #include "infblock.h" |
9 | #include "infutil.h" | 8 | #include "infutil.h" |