diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 60 | ||||
-rw-r--r-- | lib/Makefile | 6 | ||||
-rw-r--r-- | lib/atomic64.c | 66 | ||||
-rw-r--r-- | lib/bitmap.c | 12 | ||||
-rw-r--r-- | lib/dma-debug.c | 69 | ||||
-rw-r--r-- | lib/dynamic_debug.c | 174 | ||||
-rw-r--r-- | lib/fault-inject.c | 25 | ||||
-rw-r--r-- | lib/genalloc.c | 300 | ||||
-rw-r--r-- | lib/hexdump.c | 15 | ||||
-rw-r--r-- | lib/idr.c | 76 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 2 | ||||
-rw-r--r-- | lib/kstrtox.c | 75 | ||||
-rw-r--r-- | lib/kstrtox.h | 8 | ||||
-rw-r--r-- | lib/llist.c | 89 | ||||
-rw-r--r-- | lib/md5.c | 95 | ||||
-rw-r--r-- | lib/nlattr.c | 1 | ||||
-rw-r--r-- | lib/percpu_counter.c | 20 | ||||
-rw-r--r-- | lib/proportions.c | 12 | ||||
-rw-r--r-- | lib/radix-tree.c | 131 | ||||
-rw-r--r-- | lib/raid6/algos.c | 1 | ||||
-rw-r--r-- | lib/raid6/int.uc | 2 | ||||
-rw-r--r-- | lib/raid6/mktables.c | 1 | ||||
-rw-r--r-- | lib/raid6/recov.c | 1 | ||||
-rw-r--r-- | lib/ratelimit.c | 4 | ||||
-rw-r--r-- | lib/rwsem-spinlock.c | 38 | ||||
-rw-r--r-- | lib/rwsem.c | 14 | ||||
-rw-r--r-- | lib/sha1.c | 211 | ||||
-rw-r--r-- | lib/smp_processor_id.c | 2 | ||||
-rw-r--r-- | lib/spinlock_debug.c | 19 | ||||
-rw-r--r-- | lib/string.c | 57 | ||||
-rw-r--r-- | lib/vsprintf.c | 47 | ||||
-rw-r--r-- | lib/xz/xz_dec_bcj.c | 27 |
32 files changed, 1264 insertions, 396 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index c0cb9c4bc46d..82928f5ea049 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -117,31 +117,31 @@ config DEBUG_SECTION_MISMATCH | |||
117 | help | 117 | help |
118 | The section mismatch analysis checks if there are illegal | 118 | The section mismatch analysis checks if there are illegal |
119 | references from one section to another section. | 119 | references from one section to another section. |
120 | Linux will during link or during runtime drop some sections | 120 | During linktime or runtime, some sections are dropped; |
121 | and any use of code/data previously in these sections will | 121 | any use of code/data previously in these sections would |
122 | most likely result in an oops. | 122 | most likely result in an oops. |
123 | In the code functions and variables are annotated with | 123 | In the code, functions and variables are annotated with |
124 | __init, __devinit etc. (see full list in include/linux/init.h) | 124 | __init, __devinit, etc. (see the full list in include/linux/init.h), |
125 | which results in the code/data being placed in specific sections. | 125 | which results in the code/data being placed in specific sections. |
126 | The section mismatch analysis is always done after a full | 126 | The section mismatch analysis is always performed after a full |
127 | kernel build but enabling this option will in addition | 127 | kernel build, and enabling this option causes the following |
128 | do the following: | 128 | additional steps to occur: |
129 | - Add the option -fno-inline-functions-called-once to gcc | 129 | - Add the option -fno-inline-functions-called-once to gcc commands. |
130 | When inlining a function annotated __init in a non-init | 130 | When inlining a function annotated with __init in a non-init |
131 | function we would lose the section information and thus | 131 | function, we would lose the section information and thus |
132 | the analysis would not catch the illegal reference. | 132 | the analysis would not catch the illegal reference. |
133 | This option tells gcc to inline less but will also | 133 | This option tells gcc to inline less (but it does result in |
134 | result in a larger kernel. | 134 | a larger kernel). |
135 | - Run the section mismatch analysis for each module/built-in.o | 135 | - Run the section mismatch analysis for each module/built-in.o file. |
136 | When we run the section mismatch analysis on vmlinux.o we | 136 | When we run the section mismatch analysis on vmlinux.o, we |
137 | lose valueble information about where the mismatch was | 137 | lose valueble information about where the mismatch was |
138 | introduced. | 138 | introduced. |
139 | Running the analysis for each module/built-in.o file | 139 | Running the analysis for each module/built-in.o file |
140 | will tell where the mismatch happens much closer to the | 140 | tells where the mismatch happens much closer to the |
141 | source. The drawback is that we will report the same | 141 | source. The drawback is that the same mismatch is |
142 | mismatch at least twice. | 142 | reported at least twice. |
143 | - Enable verbose reporting from modpost to help solving | 143 | - Enable verbose reporting from modpost in order to help resolve |
144 | the section mismatches reported. | 144 | the section mismatches that are reported. |
145 | 145 | ||
146 | config DEBUG_KERNEL | 146 | config DEBUG_KERNEL |
147 | bool "Kernel debugging" | 147 | bool "Kernel debugging" |
@@ -248,8 +248,9 @@ config DEFAULT_HUNG_TASK_TIMEOUT | |||
248 | to determine when a task has become non-responsive and should | 248 | to determine when a task has become non-responsive and should |
249 | be considered hung. | 249 | be considered hung. |
250 | 250 | ||
251 | It can be adjusted at runtime via the kernel.hung_task_timeout | 251 | It can be adjusted at runtime via the kernel.hung_task_timeout_secs |
252 | sysctl or by writing a value to /proc/sys/kernel/hung_task_timeout. | 252 | sysctl or by writing a value to |
253 | /proc/sys/kernel/hung_task_timeout_secs. | ||
253 | 254 | ||
254 | A timeout of 0 disables the check. The default is two minutes. | 255 | A timeout of 0 disables the check. The default is two minutes. |
255 | Keeping the default should be fine in most cases. | 256 | Keeping the default should be fine in most cases. |
@@ -835,7 +836,7 @@ config DEBUG_CREDENTIALS | |||
835 | 836 | ||
836 | # | 837 | # |
837 | # Select this config option from the architecture Kconfig, if it | 838 | # Select this config option from the architecture Kconfig, if it |
838 | # it is preferred to always offer frame pointers as a config | 839 | # is preferred to always offer frame pointers as a config |
839 | # option on the architecture (regardless of KERNEL_DEBUG): | 840 | # option on the architecture (regardless of KERNEL_DEBUG): |
840 | # | 841 | # |
841 | config ARCH_WANT_FRAME_POINTERS | 842 | config ARCH_WANT_FRAME_POINTERS |
@@ -1070,6 +1071,17 @@ config FAIL_IO_TIMEOUT | |||
1070 | Only works with drivers that use the generic timeout handling, | 1071 | Only works with drivers that use the generic timeout handling, |
1071 | for others it wont do anything. | 1072 | for others it wont do anything. |
1072 | 1073 | ||
1074 | config FAIL_MMC_REQUEST | ||
1075 | bool "Fault-injection capability for MMC IO" | ||
1076 | select DEBUG_FS | ||
1077 | depends on FAULT_INJECTION && MMC | ||
1078 | help | ||
1079 | Provide fault-injection capability for MMC IO. | ||
1080 | This will make the mmc core return data errors. This is | ||
1081 | useful to test the error handling in the mmc block device | ||
1082 | and to test how the mmc host driver handles retries from | ||
1083 | the block device. | ||
1084 | |||
1073 | config FAULT_INJECTION_DEBUG_FS | 1085 | config FAULT_INJECTION_DEBUG_FS |
1074 | bool "Debugfs entries for fault-injection capabilities" | 1086 | bool "Debugfs entries for fault-injection capabilities" |
1075 | depends on FAULT_INJECTION && SYSFS && DEBUG_FS | 1087 | depends on FAULT_INJECTION && SYSFS && DEBUG_FS |
@@ -1081,7 +1093,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER | |||
1081 | depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT | 1093 | depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT |
1082 | depends on !X86_64 | 1094 | depends on !X86_64 |
1083 | select STACKTRACE | 1095 | select STACKTRACE |
1084 | select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE | 1096 | select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND |
1085 | help | 1097 | help |
1086 | Provide stacktrace filter for fault-injection capabilities | 1098 | Provide stacktrace filter for fault-injection capabilities |
1087 | 1099 | ||
@@ -1091,7 +1103,7 @@ config LATENCYTOP | |||
1091 | depends on DEBUG_KERNEL | 1103 | depends on DEBUG_KERNEL |
1092 | depends on STACKTRACE_SUPPORT | 1104 | depends on STACKTRACE_SUPPORT |
1093 | depends on PROC_FS | 1105 | depends on PROC_FS |
1094 | select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE | 1106 | select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND |
1095 | select KALLSYMS | 1107 | select KALLSYMS |
1096 | select KALLSYMS_ALL | 1108 | select KALLSYMS_ALL |
1097 | select STACKTRACE | 1109 | select STACKTRACE |
diff --git a/lib/Makefile b/lib/Makefile index 892f4e282ea1..a4da283f5dc0 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -10,9 +10,9 @@ endif | |||
10 | lib-y := ctype.o string.o vsprintf.o cmdline.o \ | 10 | lib-y := ctype.o string.o vsprintf.o cmdline.o \ |
11 | rbtree.o radix-tree.o dump_stack.o timerqueue.o\ | 11 | rbtree.o radix-tree.o dump_stack.o timerqueue.o\ |
12 | idr.o int_sqrt.o extable.o prio_tree.o \ | 12 | idr.o int_sqrt.o extable.o prio_tree.o \ |
13 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ | 13 | sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ |
14 | proportions.o prio_heap.o ratelimit.o show_mem.o \ | 14 | proportions.o prio_heap.o ratelimit.o show_mem.o \ |
15 | is_single_threaded.o plist.o decompress.o find_next_bit.o | 15 | is_single_threaded.o plist.o decompress.o |
16 | 16 | ||
17 | lib-$(CONFIG_MMU) += ioremap.o | 17 | lib-$(CONFIG_MMU) += ioremap.o |
18 | lib-$(CONFIG_SMP) += cpumask.o | 18 | lib-$(CONFIG_SMP) += cpumask.o |
@@ -22,7 +22,7 @@ lib-y += kobject.o kref.o klist.o | |||
22 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 22 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
23 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 23 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
24 | string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ | 24 | string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ |
25 | bsearch.o find_last_bit.o | 25 | bsearch.o find_last_bit.o find_next_bit.o llist.o |
26 | obj-y += kstrtox.o | 26 | obj-y += kstrtox.o |
27 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o | 27 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o |
28 | 28 | ||
diff --git a/lib/atomic64.c b/lib/atomic64.c index e12ae0dd08a8..3975470caf4f 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c | |||
@@ -29,11 +29,11 @@ | |||
29 | * Ensure each lock is in a separate cacheline. | 29 | * Ensure each lock is in a separate cacheline. |
30 | */ | 30 | */ |
31 | static union { | 31 | static union { |
32 | spinlock_t lock; | 32 | raw_spinlock_t lock; |
33 | char pad[L1_CACHE_BYTES]; | 33 | char pad[L1_CACHE_BYTES]; |
34 | } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; | 34 | } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; |
35 | 35 | ||
36 | static inline spinlock_t *lock_addr(const atomic64_t *v) | 36 | static inline raw_spinlock_t *lock_addr(const atomic64_t *v) |
37 | { | 37 | { |
38 | unsigned long addr = (unsigned long) v; | 38 | unsigned long addr = (unsigned long) v; |
39 | 39 | ||
@@ -45,12 +45,12 @@ static inline spinlock_t *lock_addr(const atomic64_t *v) | |||
45 | long long atomic64_read(const atomic64_t *v) | 45 | long long atomic64_read(const atomic64_t *v) |
46 | { | 46 | { |
47 | unsigned long flags; | 47 | unsigned long flags; |
48 | spinlock_t *lock = lock_addr(v); | 48 | raw_spinlock_t *lock = lock_addr(v); |
49 | long long val; | 49 | long long val; |
50 | 50 | ||
51 | spin_lock_irqsave(lock, flags); | 51 | raw_spin_lock_irqsave(lock, flags); |
52 | val = v->counter; | 52 | val = v->counter; |
53 | spin_unlock_irqrestore(lock, flags); | 53 | raw_spin_unlock_irqrestore(lock, flags); |
54 | return val; | 54 | return val; |
55 | } | 55 | } |
56 | EXPORT_SYMBOL(atomic64_read); | 56 | EXPORT_SYMBOL(atomic64_read); |
@@ -58,34 +58,34 @@ EXPORT_SYMBOL(atomic64_read); | |||
58 | void atomic64_set(atomic64_t *v, long long i) | 58 | void atomic64_set(atomic64_t *v, long long i) |
59 | { | 59 | { |
60 | unsigned long flags; | 60 | unsigned long flags; |
61 | spinlock_t *lock = lock_addr(v); | 61 | raw_spinlock_t *lock = lock_addr(v); |
62 | 62 | ||
63 | spin_lock_irqsave(lock, flags); | 63 | raw_spin_lock_irqsave(lock, flags); |
64 | v->counter = i; | 64 | v->counter = i; |
65 | spin_unlock_irqrestore(lock, flags); | 65 | raw_spin_unlock_irqrestore(lock, flags); |
66 | } | 66 | } |
67 | EXPORT_SYMBOL(atomic64_set); | 67 | EXPORT_SYMBOL(atomic64_set); |
68 | 68 | ||
69 | void atomic64_add(long long a, atomic64_t *v) | 69 | void atomic64_add(long long a, atomic64_t *v) |
70 | { | 70 | { |
71 | unsigned long flags; | 71 | unsigned long flags; |
72 | spinlock_t *lock = lock_addr(v); | 72 | raw_spinlock_t *lock = lock_addr(v); |
73 | 73 | ||
74 | spin_lock_irqsave(lock, flags); | 74 | raw_spin_lock_irqsave(lock, flags); |
75 | v->counter += a; | 75 | v->counter += a; |
76 | spin_unlock_irqrestore(lock, flags); | 76 | raw_spin_unlock_irqrestore(lock, flags); |
77 | } | 77 | } |
78 | EXPORT_SYMBOL(atomic64_add); | 78 | EXPORT_SYMBOL(atomic64_add); |
79 | 79 | ||
80 | long long atomic64_add_return(long long a, atomic64_t *v) | 80 | long long atomic64_add_return(long long a, atomic64_t *v) |
81 | { | 81 | { |
82 | unsigned long flags; | 82 | unsigned long flags; |
83 | spinlock_t *lock = lock_addr(v); | 83 | raw_spinlock_t *lock = lock_addr(v); |
84 | long long val; | 84 | long long val; |
85 | 85 | ||
86 | spin_lock_irqsave(lock, flags); | 86 | raw_spin_lock_irqsave(lock, flags); |
87 | val = v->counter += a; | 87 | val = v->counter += a; |
88 | spin_unlock_irqrestore(lock, flags); | 88 | raw_spin_unlock_irqrestore(lock, flags); |
89 | return val; | 89 | return val; |
90 | } | 90 | } |
91 | EXPORT_SYMBOL(atomic64_add_return); | 91 | EXPORT_SYMBOL(atomic64_add_return); |
@@ -93,23 +93,23 @@ EXPORT_SYMBOL(atomic64_add_return); | |||
93 | void atomic64_sub(long long a, atomic64_t *v) | 93 | void atomic64_sub(long long a, atomic64_t *v) |
94 | { | 94 | { |
95 | unsigned long flags; | 95 | unsigned long flags; |
96 | spinlock_t *lock = lock_addr(v); | 96 | raw_spinlock_t *lock = lock_addr(v); |
97 | 97 | ||
98 | spin_lock_irqsave(lock, flags); | 98 | raw_spin_lock_irqsave(lock, flags); |
99 | v->counter -= a; | 99 | v->counter -= a; |
100 | spin_unlock_irqrestore(lock, flags); | 100 | raw_spin_unlock_irqrestore(lock, flags); |
101 | } | 101 | } |
102 | EXPORT_SYMBOL(atomic64_sub); | 102 | EXPORT_SYMBOL(atomic64_sub); |
103 | 103 | ||
104 | long long atomic64_sub_return(long long a, atomic64_t *v) | 104 | long long atomic64_sub_return(long long a, atomic64_t *v) |
105 | { | 105 | { |
106 | unsigned long flags; | 106 | unsigned long flags; |
107 | spinlock_t *lock = lock_addr(v); | 107 | raw_spinlock_t *lock = lock_addr(v); |
108 | long long val; | 108 | long long val; |
109 | 109 | ||
110 | spin_lock_irqsave(lock, flags); | 110 | raw_spin_lock_irqsave(lock, flags); |
111 | val = v->counter -= a; | 111 | val = v->counter -= a; |
112 | spin_unlock_irqrestore(lock, flags); | 112 | raw_spin_unlock_irqrestore(lock, flags); |
113 | return val; | 113 | return val; |
114 | } | 114 | } |
115 | EXPORT_SYMBOL(atomic64_sub_return); | 115 | EXPORT_SYMBOL(atomic64_sub_return); |
@@ -117,14 +117,14 @@ EXPORT_SYMBOL(atomic64_sub_return); | |||
117 | long long atomic64_dec_if_positive(atomic64_t *v) | 117 | long long atomic64_dec_if_positive(atomic64_t *v) |
118 | { | 118 | { |
119 | unsigned long flags; | 119 | unsigned long flags; |
120 | spinlock_t *lock = lock_addr(v); | 120 | raw_spinlock_t *lock = lock_addr(v); |
121 | long long val; | 121 | long long val; |
122 | 122 | ||
123 | spin_lock_irqsave(lock, flags); | 123 | raw_spin_lock_irqsave(lock, flags); |
124 | val = v->counter - 1; | 124 | val = v->counter - 1; |
125 | if (val >= 0) | 125 | if (val >= 0) |
126 | v->counter = val; | 126 | v->counter = val; |
127 | spin_unlock_irqrestore(lock, flags); | 127 | raw_spin_unlock_irqrestore(lock, flags); |
128 | return val; | 128 | return val; |
129 | } | 129 | } |
130 | EXPORT_SYMBOL(atomic64_dec_if_positive); | 130 | EXPORT_SYMBOL(atomic64_dec_if_positive); |
@@ -132,14 +132,14 @@ EXPORT_SYMBOL(atomic64_dec_if_positive); | |||
132 | long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) | 132 | long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) |
133 | { | 133 | { |
134 | unsigned long flags; | 134 | unsigned long flags; |
135 | spinlock_t *lock = lock_addr(v); | 135 | raw_spinlock_t *lock = lock_addr(v); |
136 | long long val; | 136 | long long val; |
137 | 137 | ||
138 | spin_lock_irqsave(lock, flags); | 138 | raw_spin_lock_irqsave(lock, flags); |
139 | val = v->counter; | 139 | val = v->counter; |
140 | if (val == o) | 140 | if (val == o) |
141 | v->counter = n; | 141 | v->counter = n; |
142 | spin_unlock_irqrestore(lock, flags); | 142 | raw_spin_unlock_irqrestore(lock, flags); |
143 | return val; | 143 | return val; |
144 | } | 144 | } |
145 | EXPORT_SYMBOL(atomic64_cmpxchg); | 145 | EXPORT_SYMBOL(atomic64_cmpxchg); |
@@ -147,13 +147,13 @@ EXPORT_SYMBOL(atomic64_cmpxchg); | |||
147 | long long atomic64_xchg(atomic64_t *v, long long new) | 147 | long long atomic64_xchg(atomic64_t *v, long long new) |
148 | { | 148 | { |
149 | unsigned long flags; | 149 | unsigned long flags; |
150 | spinlock_t *lock = lock_addr(v); | 150 | raw_spinlock_t *lock = lock_addr(v); |
151 | long long val; | 151 | long long val; |
152 | 152 | ||
153 | spin_lock_irqsave(lock, flags); | 153 | raw_spin_lock_irqsave(lock, flags); |
154 | val = v->counter; | 154 | val = v->counter; |
155 | v->counter = new; | 155 | v->counter = new; |
156 | spin_unlock_irqrestore(lock, flags); | 156 | raw_spin_unlock_irqrestore(lock, flags); |
157 | return val; | 157 | return val; |
158 | } | 158 | } |
159 | EXPORT_SYMBOL(atomic64_xchg); | 159 | EXPORT_SYMBOL(atomic64_xchg); |
@@ -161,15 +161,15 @@ EXPORT_SYMBOL(atomic64_xchg); | |||
161 | int atomic64_add_unless(atomic64_t *v, long long a, long long u) | 161 | int atomic64_add_unless(atomic64_t *v, long long a, long long u) |
162 | { | 162 | { |
163 | unsigned long flags; | 163 | unsigned long flags; |
164 | spinlock_t *lock = lock_addr(v); | 164 | raw_spinlock_t *lock = lock_addr(v); |
165 | int ret = 0; | 165 | int ret = 0; |
166 | 166 | ||
167 | spin_lock_irqsave(lock, flags); | 167 | raw_spin_lock_irqsave(lock, flags); |
168 | if (v->counter != u) { | 168 | if (v->counter != u) { |
169 | v->counter += a; | 169 | v->counter += a; |
170 | ret = 1; | 170 | ret = 1; |
171 | } | 171 | } |
172 | spin_unlock_irqrestore(lock, flags); | 172 | raw_spin_unlock_irqrestore(lock, flags); |
173 | return ret; | 173 | return ret; |
174 | } | 174 | } |
175 | EXPORT_SYMBOL(atomic64_add_unless); | 175 | EXPORT_SYMBOL(atomic64_add_unless); |
@@ -179,7 +179,7 @@ static int init_atomic64_lock(void) | |||
179 | int i; | 179 | int i; |
180 | 180 | ||
181 | for (i = 0; i < NR_LOCKS; ++i) | 181 | for (i = 0; i < NR_LOCKS; ++i) |
182 | spin_lock_init(&atomic64_lock[i].lock); | 182 | raw_spin_lock_init(&atomic64_lock[i].lock); |
183 | return 0; | 183 | return 0; |
184 | } | 184 | } |
185 | 185 | ||
diff --git a/lib/bitmap.c b/lib/bitmap.c index 37ef4b048795..0d4a127dd9b3 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
@@ -271,8 +271,6 @@ int __bitmap_weight(const unsigned long *bitmap, int bits) | |||
271 | } | 271 | } |
272 | EXPORT_SYMBOL(__bitmap_weight); | 272 | EXPORT_SYMBOL(__bitmap_weight); |
273 | 273 | ||
274 | #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) | ||
275 | |||
276 | void bitmap_set(unsigned long *map, int start, int nr) | 274 | void bitmap_set(unsigned long *map, int start, int nr) |
277 | { | 275 | { |
278 | unsigned long *p = map + BIT_WORD(start); | 276 | unsigned long *p = map + BIT_WORD(start); |
@@ -421,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen, | |||
421 | { | 419 | { |
422 | int c, old_c, totaldigits, ndigits, nchunks, nbits; | 420 | int c, old_c, totaldigits, ndigits, nchunks, nbits; |
423 | u32 chunk; | 421 | u32 chunk; |
424 | const char __user *ubuf = buf; | 422 | const char __user __force *ubuf = (const char __user __force *)buf; |
425 | 423 | ||
426 | bitmap_zero(maskp, nmaskbits); | 424 | bitmap_zero(maskp, nmaskbits); |
427 | 425 | ||
@@ -506,7 +504,9 @@ int bitmap_parse_user(const char __user *ubuf, | |||
506 | { | 504 | { |
507 | if (!access_ok(VERIFY_READ, ubuf, ulen)) | 505 | if (!access_ok(VERIFY_READ, ubuf, ulen)) |
508 | return -EFAULT; | 506 | return -EFAULT; |
509 | return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits); | 507 | return __bitmap_parse((const char __force *)ubuf, |
508 | ulen, 1, maskp, nmaskbits); | ||
509 | |||
510 | } | 510 | } |
511 | EXPORT_SYMBOL(bitmap_parse_user); | 511 | EXPORT_SYMBOL(bitmap_parse_user); |
512 | 512 | ||
@@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, | |||
596 | { | 596 | { |
597 | unsigned a, b; | 597 | unsigned a, b; |
598 | int c, old_c, totaldigits; | 598 | int c, old_c, totaldigits; |
599 | const char __user *ubuf = buf; | 599 | const char __user __force *ubuf = (const char __user __force *)buf; |
600 | int exp_digit, in_range; | 600 | int exp_digit, in_range; |
601 | 601 | ||
602 | totaldigits = c = 0; | 602 | totaldigits = c = 0; |
@@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf, | |||
696 | { | 696 | { |
697 | if (!access_ok(VERIFY_READ, ubuf, ulen)) | 697 | if (!access_ok(VERIFY_READ, ubuf, ulen)) |
698 | return -EFAULT; | 698 | return -EFAULT; |
699 | return __bitmap_parselist((const char *)ubuf, | 699 | return __bitmap_parselist((const char __force *)ubuf, |
700 | ulen, 1, maskp, nmaskbits); | 700 | ulen, 1, maskp, nmaskbits); |
701 | } | 701 | } |
702 | EXPORT_SYMBOL(bitmap_parselist_user); | 702 | EXPORT_SYMBOL(bitmap_parselist_user); |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index db07bfd9298e..74c6c7fce749 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
25 | #include <linux/debugfs.h> | 25 | #include <linux/debugfs.h> |
26 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
27 | #include <linux/export.h> | ||
27 | #include <linux/device.h> | 28 | #include <linux/device.h> |
28 | #include <linux/types.h> | 29 | #include <linux/types.h> |
29 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
@@ -62,6 +63,8 @@ struct dma_debug_entry { | |||
62 | #endif | 63 | #endif |
63 | }; | 64 | }; |
64 | 65 | ||
66 | typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *); | ||
67 | |||
65 | struct hash_bucket { | 68 | struct hash_bucket { |
66 | struct list_head list; | 69 | struct list_head list; |
67 | spinlock_t lock; | 70 | spinlock_t lock; |
@@ -240,18 +243,37 @@ static void put_hash_bucket(struct hash_bucket *bucket, | |||
240 | spin_unlock_irqrestore(&bucket->lock, __flags); | 243 | spin_unlock_irqrestore(&bucket->lock, __flags); |
241 | } | 244 | } |
242 | 245 | ||
246 | static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) | ||
247 | { | ||
248 | return ((a->dev_addr == a->dev_addr) && | ||
249 | (a->dev == b->dev)) ? true : false; | ||
250 | } | ||
251 | |||
252 | static bool containing_match(struct dma_debug_entry *a, | ||
253 | struct dma_debug_entry *b) | ||
254 | { | ||
255 | if (a->dev != b->dev) | ||
256 | return false; | ||
257 | |||
258 | if ((b->dev_addr <= a->dev_addr) && | ||
259 | ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) | ||
260 | return true; | ||
261 | |||
262 | return false; | ||
263 | } | ||
264 | |||
243 | /* | 265 | /* |
244 | * Search a given entry in the hash bucket list | 266 | * Search a given entry in the hash bucket list |
245 | */ | 267 | */ |
246 | static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, | 268 | static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, |
247 | struct dma_debug_entry *ref) | 269 | struct dma_debug_entry *ref, |
270 | match_fn match) | ||
248 | { | 271 | { |
249 | struct dma_debug_entry *entry, *ret = NULL; | 272 | struct dma_debug_entry *entry, *ret = NULL; |
250 | int matches = 0, match_lvl, last_lvl = 0; | 273 | int matches = 0, match_lvl, last_lvl = 0; |
251 | 274 | ||
252 | list_for_each_entry(entry, &bucket->list, list) { | 275 | list_for_each_entry(entry, &bucket->list, list) { |
253 | if ((entry->dev_addr != ref->dev_addr) || | 276 | if (!match(ref, entry)) |
254 | (entry->dev != ref->dev)) | ||
255 | continue; | 277 | continue; |
256 | 278 | ||
257 | /* | 279 | /* |
@@ -293,6 +315,39 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, | |||
293 | return ret; | 315 | return ret; |
294 | } | 316 | } |
295 | 317 | ||
318 | static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, | ||
319 | struct dma_debug_entry *ref) | ||
320 | { | ||
321 | return __hash_bucket_find(bucket, ref, exact_match); | ||
322 | } | ||
323 | |||
324 | static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, | ||
325 | struct dma_debug_entry *ref, | ||
326 | unsigned long *flags) | ||
327 | { | ||
328 | |||
329 | unsigned int max_range = dma_get_max_seg_size(ref->dev); | ||
330 | struct dma_debug_entry *entry, index = *ref; | ||
331 | unsigned int range = 0; | ||
332 | |||
333 | while (range <= max_range) { | ||
334 | entry = __hash_bucket_find(*bucket, &index, containing_match); | ||
335 | |||
336 | if (entry) | ||
337 | return entry; | ||
338 | |||
339 | /* | ||
340 | * Nothing found, go back a hash bucket | ||
341 | */ | ||
342 | put_hash_bucket(*bucket, flags); | ||
343 | range += (1 << HASH_FN_SHIFT); | ||
344 | index.dev_addr -= (1 << HASH_FN_SHIFT); | ||
345 | *bucket = get_hash_bucket(&index, flags); | ||
346 | } | ||
347 | |||
348 | return NULL; | ||
349 | } | ||
350 | |||
296 | /* | 351 | /* |
297 | * Add an entry to a hash bucket | 352 | * Add an entry to a hash bucket |
298 | */ | 353 | */ |
@@ -802,7 +857,7 @@ static void check_unmap(struct dma_debug_entry *ref) | |||
802 | } | 857 | } |
803 | 858 | ||
804 | bucket = get_hash_bucket(ref, &flags); | 859 | bucket = get_hash_bucket(ref, &flags); |
805 | entry = hash_bucket_find(bucket, ref); | 860 | entry = bucket_find_exact(bucket, ref); |
806 | 861 | ||
807 | if (!entry) { | 862 | if (!entry) { |
808 | err_printk(ref->dev, NULL, "DMA-API: device driver tries " | 863 | err_printk(ref->dev, NULL, "DMA-API: device driver tries " |
@@ -902,7 +957,7 @@ static void check_sync(struct device *dev, | |||
902 | 957 | ||
903 | bucket = get_hash_bucket(ref, &flags); | 958 | bucket = get_hash_bucket(ref, &flags); |
904 | 959 | ||
905 | entry = hash_bucket_find(bucket, ref); | 960 | entry = bucket_find_contain(&bucket, ref, &flags); |
906 | 961 | ||
907 | if (!entry) { | 962 | if (!entry) { |
908 | err_printk(dev, NULL, "DMA-API: device driver tries " | 963 | err_printk(dev, NULL, "DMA-API: device driver tries " |
@@ -1060,7 +1115,7 @@ static int get_nr_mapped_entries(struct device *dev, | |||
1060 | int mapped_ents; | 1115 | int mapped_ents; |
1061 | 1116 | ||
1062 | bucket = get_hash_bucket(ref, &flags); | 1117 | bucket = get_hash_bucket(ref, &flags); |
1063 | entry = hash_bucket_find(bucket, ref); | 1118 | entry = bucket_find_exact(bucket, ref); |
1064 | mapped_ents = 0; | 1119 | mapped_ents = 0; |
1065 | 1120 | ||
1066 | if (entry) | 1121 | if (entry) |
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 75ca78f3a8c9..dcdade39e47f 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
@@ -10,11 +10,12 @@ | |||
10 | * Copyright (C) 2011 Bart Van Assche. All Rights Reserved. | 10 | * Copyright (C) 2011 Bart Van Assche. All Rights Reserved. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ | ||
14 | |||
13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | 16 | #include <linux/module.h> |
15 | #include <linux/moduleparam.h> | 17 | #include <linux/moduleparam.h> |
16 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
17 | #include <linux/version.h> | ||
18 | #include <linux/types.h> | 19 | #include <linux/types.h> |
19 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
20 | #include <linux/proc_fs.h> | 21 | #include <linux/proc_fs.h> |
@@ -30,6 +31,8 @@ | |||
30 | #include <linux/jump_label.h> | 31 | #include <linux/jump_label.h> |
31 | #include <linux/hardirq.h> | 32 | #include <linux/hardirq.h> |
32 | #include <linux/sched.h> | 33 | #include <linux/sched.h> |
34 | #include <linux/device.h> | ||
35 | #include <linux/netdevice.h> | ||
33 | 36 | ||
34 | extern struct _ddebug __start___verbose[]; | 37 | extern struct _ddebug __start___verbose[]; |
35 | extern struct _ddebug __stop___verbose[]; | 38 | extern struct _ddebug __stop___verbose[]; |
@@ -38,7 +41,6 @@ struct ddebug_table { | |||
38 | struct list_head link; | 41 | struct list_head link; |
39 | char *mod_name; | 42 | char *mod_name; |
40 | unsigned int num_ddebugs; | 43 | unsigned int num_ddebugs; |
41 | unsigned int num_enabled; | ||
42 | struct _ddebug *ddebugs; | 44 | struct _ddebug *ddebugs; |
43 | }; | 45 | }; |
44 | 46 | ||
@@ -148,19 +150,13 @@ static void ddebug_change(const struct ddebug_query *query, | |||
148 | newflags = (dp->flags & mask) | flags; | 150 | newflags = (dp->flags & mask) | flags; |
149 | if (newflags == dp->flags) | 151 | if (newflags == dp->flags) |
150 | continue; | 152 | continue; |
151 | |||
152 | if (!newflags) | ||
153 | dt->num_enabled--; | ||
154 | else if (!dp->flags) | ||
155 | dt->num_enabled++; | ||
156 | dp->flags = newflags; | 153 | dp->flags = newflags; |
157 | if (newflags) | 154 | if (newflags) |
158 | dp->enabled = 1; | 155 | dp->enabled = 1; |
159 | else | 156 | else |
160 | dp->enabled = 0; | 157 | dp->enabled = 0; |
161 | if (verbose) | 158 | if (verbose) |
162 | printk(KERN_INFO | 159 | pr_info("changed %s:%d [%s]%s %s\n", |
163 | "ddebug: changed %s:%d [%s]%s %s\n", | ||
164 | dp->filename, dp->lineno, | 160 | dp->filename, dp->lineno, |
165 | dt->mod_name, dp->function, | 161 | dt->mod_name, dp->function, |
166 | ddebug_describe_flags(dp, flagbuf, | 162 | ddebug_describe_flags(dp, flagbuf, |
@@ -170,7 +166,7 @@ static void ddebug_change(const struct ddebug_query *query, | |||
170 | mutex_unlock(&ddebug_lock); | 166 | mutex_unlock(&ddebug_lock); |
171 | 167 | ||
172 | if (!nfound && verbose) | 168 | if (!nfound && verbose) |
173 | printk(KERN_INFO "ddebug: no matches for query\n"); | 169 | pr_info("no matches for query\n"); |
174 | } | 170 | } |
175 | 171 | ||
176 | /* | 172 | /* |
@@ -215,10 +211,10 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords) | |||
215 | 211 | ||
216 | if (verbose) { | 212 | if (verbose) { |
217 | int i; | 213 | int i; |
218 | printk(KERN_INFO "%s: split into words:", __func__); | 214 | pr_info("split into words:"); |
219 | for (i = 0 ; i < nwords ; i++) | 215 | for (i = 0 ; i < nwords ; i++) |
220 | printk(" \"%s\"", words[i]); | 216 | pr_cont(" \"%s\"", words[i]); |
221 | printk("\n"); | 217 | pr_cont("\n"); |
222 | } | 218 | } |
223 | 219 | ||
224 | return nwords; | 220 | return nwords; |
@@ -330,16 +326,15 @@ static int ddebug_parse_query(char *words[], int nwords, | |||
330 | } | 326 | } |
331 | } else { | 327 | } else { |
332 | if (verbose) | 328 | if (verbose) |
333 | printk(KERN_ERR "%s: unknown keyword \"%s\"\n", | 329 | pr_err("unknown keyword \"%s\"\n", words[i]); |
334 | __func__, words[i]); | ||
335 | return -EINVAL; | 330 | return -EINVAL; |
336 | } | 331 | } |
337 | } | 332 | } |
338 | 333 | ||
339 | if (verbose) | 334 | if (verbose) |
340 | printk(KERN_INFO "%s: q->function=\"%s\" q->filename=\"%s\" " | 335 | pr_info("q->function=\"%s\" q->filename=\"%s\" " |
341 | "q->module=\"%s\" q->format=\"%s\" q->lineno=%u-%u\n", | 336 | "q->module=\"%s\" q->format=\"%s\" q->lineno=%u-%u\n", |
342 | __func__, query->function, query->filename, | 337 | query->function, query->filename, |
343 | query->module, query->format, query->first_lineno, | 338 | query->module, query->format, query->first_lineno, |
344 | query->last_lineno); | 339 | query->last_lineno); |
345 | 340 | ||
@@ -368,7 +363,7 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp, | |||
368 | return -EINVAL; | 363 | return -EINVAL; |
369 | } | 364 | } |
370 | if (verbose) | 365 | if (verbose) |
371 | printk(KERN_INFO "%s: op='%c'\n", __func__, op); | 366 | pr_info("op='%c'\n", op); |
372 | 367 | ||
373 | for ( ; *str ; ++str) { | 368 | for ( ; *str ; ++str) { |
374 | for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) { | 369 | for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) { |
@@ -383,7 +378,7 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp, | |||
383 | if (flags == 0) | 378 | if (flags == 0) |
384 | return -EINVAL; | 379 | return -EINVAL; |
385 | if (verbose) | 380 | if (verbose) |
386 | printk(KERN_INFO "%s: flags=0x%x\n", __func__, flags); | 381 | pr_info("flags=0x%x\n", flags); |
387 | 382 | ||
388 | /* calculate final *flagsp, *maskp according to mask and op */ | 383 | /* calculate final *flagsp, *maskp according to mask and op */ |
389 | switch (op) { | 384 | switch (op) { |
@@ -401,8 +396,7 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp, | |||
401 | break; | 396 | break; |
402 | } | 397 | } |
403 | if (verbose) | 398 | if (verbose) |
404 | printk(KERN_INFO "%s: *flagsp=0x%x *maskp=0x%x\n", | 399 | pr_info("*flagsp=0x%x *maskp=0x%x\n", *flagsp, *maskp); |
405 | __func__, *flagsp, *maskp); | ||
406 | return 0; | 400 | return 0; |
407 | } | 401 | } |
408 | 402 | ||
@@ -427,40 +421,117 @@ static int ddebug_exec_query(char *query_string) | |||
427 | return 0; | 421 | return 0; |
428 | } | 422 | } |
429 | 423 | ||
424 | #define PREFIX_SIZE 64 | ||
425 | |||
426 | static int remaining(int wrote) | ||
427 | { | ||
428 | if (PREFIX_SIZE - wrote > 0) | ||
429 | return PREFIX_SIZE - wrote; | ||
430 | return 0; | ||
431 | } | ||
432 | |||
433 | static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf) | ||
434 | { | ||
435 | int pos_after_tid; | ||
436 | int pos = 0; | ||
437 | |||
438 | pos += snprintf(buf + pos, remaining(pos), "%s", KERN_DEBUG); | ||
439 | if (desc->flags & _DPRINTK_FLAGS_INCL_TID) { | ||
440 | if (in_interrupt()) | ||
441 | pos += snprintf(buf + pos, remaining(pos), "%s ", | ||
442 | "<intr>"); | ||
443 | else | ||
444 | pos += snprintf(buf + pos, remaining(pos), "[%d] ", | ||
445 | task_pid_vnr(current)); | ||
446 | } | ||
447 | pos_after_tid = pos; | ||
448 | if (desc->flags & _DPRINTK_FLAGS_INCL_MODNAME) | ||
449 | pos += snprintf(buf + pos, remaining(pos), "%s:", | ||
450 | desc->modname); | ||
451 | if (desc->flags & _DPRINTK_FLAGS_INCL_FUNCNAME) | ||
452 | pos += snprintf(buf + pos, remaining(pos), "%s:", | ||
453 | desc->function); | ||
454 | if (desc->flags & _DPRINTK_FLAGS_INCL_LINENO) | ||
455 | pos += snprintf(buf + pos, remaining(pos), "%d:", desc->lineno); | ||
456 | if (pos - pos_after_tid) | ||
457 | pos += snprintf(buf + pos, remaining(pos), " "); | ||
458 | if (pos >= PREFIX_SIZE) | ||
459 | buf[PREFIX_SIZE - 1] = '\0'; | ||
460 | |||
461 | return buf; | ||
462 | } | ||
463 | |||
430 | int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...) | 464 | int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...) |
431 | { | 465 | { |
432 | va_list args; | 466 | va_list args; |
433 | int res; | 467 | int res; |
468 | struct va_format vaf; | ||
469 | char buf[PREFIX_SIZE]; | ||
434 | 470 | ||
435 | BUG_ON(!descriptor); | 471 | BUG_ON(!descriptor); |
436 | BUG_ON(!fmt); | 472 | BUG_ON(!fmt); |
437 | 473 | ||
438 | va_start(args, fmt); | 474 | va_start(args, fmt); |
439 | res = printk(KERN_DEBUG); | 475 | vaf.fmt = fmt; |
440 | if (descriptor->flags & _DPRINTK_FLAGS_INCL_TID) { | 476 | vaf.va = &args; |
441 | if (in_interrupt()) | 477 | res = printk("%s%pV", dynamic_emit_prefix(descriptor, buf), &vaf); |
442 | res += printk(KERN_CONT "<intr> "); | ||
443 | else | ||
444 | res += printk(KERN_CONT "[%d] ", task_pid_vnr(current)); | ||
445 | } | ||
446 | if (descriptor->flags & _DPRINTK_FLAGS_INCL_MODNAME) | ||
447 | res += printk(KERN_CONT "%s:", descriptor->modname); | ||
448 | if (descriptor->flags & _DPRINTK_FLAGS_INCL_FUNCNAME) | ||
449 | res += printk(KERN_CONT "%s:", descriptor->function); | ||
450 | if (descriptor->flags & _DPRINTK_FLAGS_INCL_LINENO) | ||
451 | res += printk(KERN_CONT "%d ", descriptor->lineno); | ||
452 | res += vprintk(fmt, args); | ||
453 | va_end(args); | 478 | va_end(args); |
454 | 479 | ||
455 | return res; | 480 | return res; |
456 | } | 481 | } |
457 | EXPORT_SYMBOL(__dynamic_pr_debug); | 482 | EXPORT_SYMBOL(__dynamic_pr_debug); |
458 | 483 | ||
484 | int __dynamic_dev_dbg(struct _ddebug *descriptor, | ||
485 | const struct device *dev, const char *fmt, ...) | ||
486 | { | ||
487 | struct va_format vaf; | ||
488 | va_list args; | ||
489 | int res; | ||
490 | char buf[PREFIX_SIZE]; | ||
491 | |||
492 | BUG_ON(!descriptor); | ||
493 | BUG_ON(!fmt); | ||
494 | |||
495 | va_start(args, fmt); | ||
496 | vaf.fmt = fmt; | ||
497 | vaf.va = &args; | ||
498 | res = __dev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf); | ||
499 | va_end(args); | ||
500 | |||
501 | return res; | ||
502 | } | ||
503 | EXPORT_SYMBOL(__dynamic_dev_dbg); | ||
504 | |||
505 | #ifdef CONFIG_NET | ||
506 | |||
507 | int __dynamic_netdev_dbg(struct _ddebug *descriptor, | ||
508 | const struct net_device *dev, const char *fmt, ...) | ||
509 | { | ||
510 | struct va_format vaf; | ||
511 | va_list args; | ||
512 | int res; | ||
513 | char buf[PREFIX_SIZE]; | ||
514 | |||
515 | BUG_ON(!descriptor); | ||
516 | BUG_ON(!fmt); | ||
517 | |||
518 | va_start(args, fmt); | ||
519 | vaf.fmt = fmt; | ||
520 | vaf.va = &args; | ||
521 | res = __netdev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf); | ||
522 | va_end(args); | ||
523 | |||
524 | return res; | ||
525 | } | ||
526 | EXPORT_SYMBOL(__dynamic_netdev_dbg); | ||
527 | |||
528 | #endif | ||
529 | |||
459 | static __initdata char ddebug_setup_string[1024]; | 530 | static __initdata char ddebug_setup_string[1024]; |
460 | static __init int ddebug_setup_query(char *str) | 531 | static __init int ddebug_setup_query(char *str) |
461 | { | 532 | { |
462 | if (strlen(str) >= 1024) { | 533 | if (strlen(str) >= 1024) { |
463 | pr_warning("ddebug boot param string too large\n"); | 534 | pr_warn("ddebug boot param string too large\n"); |
464 | return 0; | 535 | return 0; |
465 | } | 536 | } |
466 | strcpy(ddebug_setup_string, str); | 537 | strcpy(ddebug_setup_string, str); |
@@ -488,8 +559,7 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf, | |||
488 | return -EFAULT; | 559 | return -EFAULT; |
489 | tmpbuf[len] = '\0'; | 560 | tmpbuf[len] = '\0'; |
490 | if (verbose) | 561 | if (verbose) |
491 | printk(KERN_INFO "%s: read %d bytes from userspace\n", | 562 | pr_info("read %d bytes from userspace\n", (int)len); |
492 | __func__, (int)len); | ||
493 | 563 | ||
494 | ret = ddebug_exec_query(tmpbuf); | 564 | ret = ddebug_exec_query(tmpbuf); |
495 | if (ret) | 565 | if (ret) |
@@ -552,8 +622,7 @@ static void *ddebug_proc_start(struct seq_file *m, loff_t *pos) | |||
552 | int n = *pos; | 622 | int n = *pos; |
553 | 623 | ||
554 | if (verbose) | 624 | if (verbose) |
555 | printk(KERN_INFO "%s: called m=%p *pos=%lld\n", | 625 | pr_info("called m=%p *pos=%lld\n", m, (unsigned long long)*pos); |
556 | __func__, m, (unsigned long long)*pos); | ||
557 | 626 | ||
558 | mutex_lock(&ddebug_lock); | 627 | mutex_lock(&ddebug_lock); |
559 | 628 | ||
@@ -578,8 +647,8 @@ static void *ddebug_proc_next(struct seq_file *m, void *p, loff_t *pos) | |||
578 | struct _ddebug *dp; | 647 | struct _ddebug *dp; |
579 | 648 | ||
580 | if (verbose) | 649 | if (verbose) |
581 | printk(KERN_INFO "%s: called m=%p p=%p *pos=%lld\n", | 650 | pr_info("called m=%p p=%p *pos=%lld\n", |
582 | __func__, m, p, (unsigned long long)*pos); | 651 | m, p, (unsigned long long)*pos); |
583 | 652 | ||
584 | if (p == SEQ_START_TOKEN) | 653 | if (p == SEQ_START_TOKEN) |
585 | dp = ddebug_iter_first(iter); | 654 | dp = ddebug_iter_first(iter); |
@@ -602,8 +671,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p) | |||
602 | char flagsbuf[8]; | 671 | char flagsbuf[8]; |
603 | 672 | ||
604 | if (verbose) | 673 | if (verbose) |
605 | printk(KERN_INFO "%s: called m=%p p=%p\n", | 674 | pr_info("called m=%p p=%p\n", m, p); |
606 | __func__, m, p); | ||
607 | 675 | ||
608 | if (p == SEQ_START_TOKEN) { | 676 | if (p == SEQ_START_TOKEN) { |
609 | seq_puts(m, | 677 | seq_puts(m, |
@@ -628,8 +696,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p) | |||
628 | static void ddebug_proc_stop(struct seq_file *m, void *p) | 696 | static void ddebug_proc_stop(struct seq_file *m, void *p) |
629 | { | 697 | { |
630 | if (verbose) | 698 | if (verbose) |
631 | printk(KERN_INFO "%s: called m=%p p=%p\n", | 699 | pr_info("called m=%p p=%p\n", m, p); |
632 | __func__, m, p); | ||
633 | mutex_unlock(&ddebug_lock); | 700 | mutex_unlock(&ddebug_lock); |
634 | } | 701 | } |
635 | 702 | ||
@@ -652,7 +719,7 @@ static int ddebug_proc_open(struct inode *inode, struct file *file) | |||
652 | int err; | 719 | int err; |
653 | 720 | ||
654 | if (verbose) | 721 | if (verbose) |
655 | printk(KERN_INFO "%s: called\n", __func__); | 722 | pr_info("called\n"); |
656 | 723 | ||
657 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 724 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
658 | if (iter == NULL) | 725 | if (iter == NULL) |
@@ -696,7 +763,6 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n, | |||
696 | } | 763 | } |
697 | dt->mod_name = new_name; | 764 | dt->mod_name = new_name; |
698 | dt->num_ddebugs = n; | 765 | dt->num_ddebugs = n; |
699 | dt->num_enabled = 0; | ||
700 | dt->ddebugs = tab; | 766 | dt->ddebugs = tab; |
701 | 767 | ||
702 | mutex_lock(&ddebug_lock); | 768 | mutex_lock(&ddebug_lock); |
@@ -704,8 +770,7 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n, | |||
704 | mutex_unlock(&ddebug_lock); | 770 | mutex_unlock(&ddebug_lock); |
705 | 771 | ||
706 | if (verbose) | 772 | if (verbose) |
707 | printk(KERN_INFO "%u debug prints in module %s\n", | 773 | pr_info("%u debug prints in module %s\n", n, dt->mod_name); |
708 | n, dt->mod_name); | ||
709 | return 0; | 774 | return 0; |
710 | } | 775 | } |
711 | EXPORT_SYMBOL_GPL(ddebug_add_module); | 776 | EXPORT_SYMBOL_GPL(ddebug_add_module); |
@@ -727,8 +792,7 @@ int ddebug_remove_module(const char *mod_name) | |||
727 | int ret = -ENOENT; | 792 | int ret = -ENOENT; |
728 | 793 | ||
729 | if (verbose) | 794 | if (verbose) |
730 | printk(KERN_INFO "%s: removing module \"%s\"\n", | 795 | pr_info("removing module \"%s\"\n", mod_name); |
731 | __func__, mod_name); | ||
732 | 796 | ||
733 | mutex_lock(&ddebug_lock); | 797 | mutex_lock(&ddebug_lock); |
734 | list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) { | 798 | list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) { |
@@ -804,8 +868,8 @@ static int __init dynamic_debug_init(void) | |||
804 | if (ddebug_setup_string[0] != '\0') { | 868 | if (ddebug_setup_string[0] != '\0') { |
805 | ret = ddebug_exec_query(ddebug_setup_string); | 869 | ret = ddebug_exec_query(ddebug_setup_string); |
806 | if (ret) | 870 | if (ret) |
807 | pr_warning("Invalid ddebug boot param %s", | 871 | pr_warn("Invalid ddebug boot param %s", |
808 | ddebug_setup_string); | 872 | ddebug_setup_string); |
809 | else | 873 | else |
810 | pr_info("ddebug initialized with string %s", | 874 | pr_info("ddebug initialized with string %s", |
811 | ddebug_setup_string); | 875 | ddebug_setup_string); |
diff --git a/lib/fault-inject.c b/lib/fault-inject.c index 2577b121c7c1..4f7554025e30 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c | |||
@@ -14,7 +14,7 @@ | |||
14 | * setup_fault_attr() is a helper function for various __setup handlers, so it | 14 | * setup_fault_attr() is a helper function for various __setup handlers, so it |
15 | * returns 0 on error, because that is what __setup handlers do. | 15 | * returns 0 on error, because that is what __setup handlers do. |
16 | */ | 16 | */ |
17 | int __init setup_fault_attr(struct fault_attr *attr, char *str) | 17 | int setup_fault_attr(struct fault_attr *attr, char *str) |
18 | { | 18 | { |
19 | unsigned long probability; | 19 | unsigned long probability; |
20 | unsigned long interval; | 20 | unsigned long interval; |
@@ -36,6 +36,7 @@ int __init setup_fault_attr(struct fault_attr *attr, char *str) | |||
36 | 36 | ||
37 | return 1; | 37 | return 1; |
38 | } | 38 | } |
39 | EXPORT_SYMBOL_GPL(setup_fault_attr); | ||
39 | 40 | ||
40 | static void fail_dump(struct fault_attr *attr) | 41 | static void fail_dump(struct fault_attr *attr) |
41 | { | 42 | { |
@@ -130,6 +131,7 @@ bool should_fail(struct fault_attr *attr, ssize_t size) | |||
130 | 131 | ||
131 | return true; | 132 | return true; |
132 | } | 133 | } |
134 | EXPORT_SYMBOL_GPL(should_fail); | ||
133 | 135 | ||
134 | #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS | 136 | #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS |
135 | 137 | ||
@@ -197,21 +199,15 @@ static struct dentry *debugfs_create_atomic_t(const char *name, mode_t mode, | |||
197 | return debugfs_create_file(name, mode, parent, value, &fops_atomic_t); | 199 | return debugfs_create_file(name, mode, parent, value, &fops_atomic_t); |
198 | } | 200 | } |
199 | 201 | ||
200 | void cleanup_fault_attr_dentries(struct fault_attr *attr) | 202 | struct dentry *fault_create_debugfs_attr(const char *name, |
201 | { | 203 | struct dentry *parent, struct fault_attr *attr) |
202 | debugfs_remove_recursive(attr->dir); | ||
203 | } | ||
204 | |||
205 | int init_fault_attr_dentries(struct fault_attr *attr, const char *name) | ||
206 | { | 204 | { |
207 | mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; | 205 | mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; |
208 | struct dentry *dir; | 206 | struct dentry *dir; |
209 | 207 | ||
210 | dir = debugfs_create_dir(name, NULL); | 208 | dir = debugfs_create_dir(name, parent); |
211 | if (!dir) | 209 | if (!dir) |
212 | return -ENOMEM; | 210 | return ERR_PTR(-ENOMEM); |
213 | |||
214 | attr->dir = dir; | ||
215 | 211 | ||
216 | if (!debugfs_create_ul("probability", mode, dir, &attr->probability)) | 212 | if (!debugfs_create_ul("probability", mode, dir, &attr->probability)) |
217 | goto fail; | 213 | goto fail; |
@@ -243,11 +239,12 @@ int init_fault_attr_dentries(struct fault_attr *attr, const char *name) | |||
243 | 239 | ||
244 | #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ | 240 | #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ |
245 | 241 | ||
246 | return 0; | 242 | return dir; |
247 | fail: | 243 | fail: |
248 | debugfs_remove_recursive(attr->dir); | 244 | debugfs_remove_recursive(dir); |
249 | 245 | ||
250 | return -ENOMEM; | 246 | return ERR_PTR(-ENOMEM); |
251 | } | 247 | } |
248 | EXPORT_SYMBOL_GPL(fault_create_debugfs_attr); | ||
252 | 249 | ||
253 | #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ | 250 | #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ |
diff --git a/lib/genalloc.c b/lib/genalloc.c index 577ddf805975..f352cc42f4f8 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
@@ -1,8 +1,26 @@ | |||
1 | /* | 1 | /* |
2 | * Basic general purpose allocator for managing special purpose memory | 2 | * Basic general purpose allocator for managing special purpose |
3 | * not managed by the regular kmalloc/kfree interface. | 3 | * memory, for example, memory that is not managed by the regular |
4 | * Uses for this includes on-device special memory, uncached memory | 4 | * kmalloc/kfree interface. Uses for this includes on-device special |
5 | * etc. | 5 | * memory, uncached memory etc. |
6 | * | ||
7 | * It is safe to use the allocator in NMI handlers and other special | ||
8 | * unblockable contexts that could otherwise deadlock on locks. This | ||
9 | * is implemented by using atomic operations and retries on any | ||
10 | * conflicts. The disadvantage is that there may be livelocks in | ||
11 | * extreme cases. For better scalability, one allocator can be used | ||
12 | * for each CPU. | ||
13 | * | ||
14 | * The lockless operation only works if there is enough memory | ||
15 | * available. If new memory is added to the pool a lock has to be | ||
16 | * still taken. So any user relying on locklessness has to ensure | ||
17 | * that sufficient memory is preallocated. | ||
18 | * | ||
19 | * The basic atomic operation of this allocator is cmpxchg on long. | ||
20 | * On architectures that don't have NMI-safe cmpxchg implementation, | ||
21 | * the allocator can NOT be used in NMI handler. So code uses the | ||
22 | * allocator in NMI handler should depend on | ||
23 | * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. | ||
6 | * | 24 | * |
7 | * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> | 25 | * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> |
8 | * | 26 | * |
@@ -13,8 +31,109 @@ | |||
13 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
14 | #include <linux/module.h> | 32 | #include <linux/module.h> |
15 | #include <linux/bitmap.h> | 33 | #include <linux/bitmap.h> |
34 | #include <linux/rculist.h> | ||
35 | #include <linux/interrupt.h> | ||
16 | #include <linux/genalloc.h> | 36 | #include <linux/genalloc.h> |
17 | 37 | ||
38 | static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) | ||
39 | { | ||
40 | unsigned long val, nval; | ||
41 | |||
42 | nval = *addr; | ||
43 | do { | ||
44 | val = nval; | ||
45 | if (val & mask_to_set) | ||
46 | return -EBUSY; | ||
47 | cpu_relax(); | ||
48 | } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val); | ||
49 | |||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) | ||
54 | { | ||
55 | unsigned long val, nval; | ||
56 | |||
57 | nval = *addr; | ||
58 | do { | ||
59 | val = nval; | ||
60 | if ((val & mask_to_clear) != mask_to_clear) | ||
61 | return -EBUSY; | ||
62 | cpu_relax(); | ||
63 | } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val); | ||
64 | |||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * bitmap_set_ll - set the specified number of bits at the specified position | ||
70 | * @map: pointer to a bitmap | ||
71 | * @start: a bit position in @map | ||
72 | * @nr: number of bits to set | ||
73 | * | ||
74 | * Set @nr bits start from @start in @map lock-lessly. Several users | ||
75 | * can set/clear the same bitmap simultaneously without lock. If two | ||
76 | * users set the same bit, one user will return remain bits, otherwise | ||
77 | * return 0. | ||
78 | */ | ||
79 | static int bitmap_set_ll(unsigned long *map, int start, int nr) | ||
80 | { | ||
81 | unsigned long *p = map + BIT_WORD(start); | ||
82 | const int size = start + nr; | ||
83 | int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); | ||
84 | unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); | ||
85 | |||
86 | while (nr - bits_to_set >= 0) { | ||
87 | if (set_bits_ll(p, mask_to_set)) | ||
88 | return nr; | ||
89 | nr -= bits_to_set; | ||
90 | bits_to_set = BITS_PER_LONG; | ||
91 | mask_to_set = ~0UL; | ||
92 | p++; | ||
93 | } | ||
94 | if (nr) { | ||
95 | mask_to_set &= BITMAP_LAST_WORD_MASK(size); | ||
96 | if (set_bits_ll(p, mask_to_set)) | ||
97 | return nr; | ||
98 | } | ||
99 | |||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * bitmap_clear_ll - clear the specified number of bits at the specified position | ||
105 | * @map: pointer to a bitmap | ||
106 | * @start: a bit position in @map | ||
107 | * @nr: number of bits to set | ||
108 | * | ||
109 | * Clear @nr bits start from @start in @map lock-lessly. Several users | ||
110 | * can set/clear the same bitmap simultaneously without lock. If two | ||
111 | * users clear the same bit, one user will return remain bits, | ||
112 | * otherwise return 0. | ||
113 | */ | ||
114 | static int bitmap_clear_ll(unsigned long *map, int start, int nr) | ||
115 | { | ||
116 | unsigned long *p = map + BIT_WORD(start); | ||
117 | const int size = start + nr; | ||
118 | int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); | ||
119 | unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); | ||
120 | |||
121 | while (nr - bits_to_clear >= 0) { | ||
122 | if (clear_bits_ll(p, mask_to_clear)) | ||
123 | return nr; | ||
124 | nr -= bits_to_clear; | ||
125 | bits_to_clear = BITS_PER_LONG; | ||
126 | mask_to_clear = ~0UL; | ||
127 | p++; | ||
128 | } | ||
129 | if (nr) { | ||
130 | mask_to_clear &= BITMAP_LAST_WORD_MASK(size); | ||
131 | if (clear_bits_ll(p, mask_to_clear)) | ||
132 | return nr; | ||
133 | } | ||
134 | |||
135 | return 0; | ||
136 | } | ||
18 | 137 | ||
19 | /** | 138 | /** |
20 | * gen_pool_create - create a new special memory pool | 139 | * gen_pool_create - create a new special memory pool |
@@ -30,7 +149,7 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) | |||
30 | 149 | ||
31 | pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); | 150 | pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); |
32 | if (pool != NULL) { | 151 | if (pool != NULL) { |
33 | rwlock_init(&pool->lock); | 152 | spin_lock_init(&pool->lock); |
34 | INIT_LIST_HEAD(&pool->chunks); | 153 | INIT_LIST_HEAD(&pool->chunks); |
35 | pool->min_alloc_order = min_alloc_order; | 154 | pool->min_alloc_order = min_alloc_order; |
36 | } | 155 | } |
@@ -63,14 +182,14 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy | |||
63 | if (unlikely(chunk == NULL)) | 182 | if (unlikely(chunk == NULL)) |
64 | return -ENOMEM; | 183 | return -ENOMEM; |
65 | 184 | ||
66 | spin_lock_init(&chunk->lock); | ||
67 | chunk->phys_addr = phys; | 185 | chunk->phys_addr = phys; |
68 | chunk->start_addr = virt; | 186 | chunk->start_addr = virt; |
69 | chunk->end_addr = virt + size; | 187 | chunk->end_addr = virt + size; |
188 | atomic_set(&chunk->avail, size); | ||
70 | 189 | ||
71 | write_lock(&pool->lock); | 190 | spin_lock(&pool->lock); |
72 | list_add(&chunk->next_chunk, &pool->chunks); | 191 | list_add_rcu(&chunk->next_chunk, &pool->chunks); |
73 | write_unlock(&pool->lock); | 192 | spin_unlock(&pool->lock); |
74 | 193 | ||
75 | return 0; | 194 | return 0; |
76 | } | 195 | } |
@@ -85,19 +204,19 @@ EXPORT_SYMBOL(gen_pool_add_virt); | |||
85 | */ | 204 | */ |
86 | phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) | 205 | phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) |
87 | { | 206 | { |
88 | struct list_head *_chunk; | ||
89 | struct gen_pool_chunk *chunk; | 207 | struct gen_pool_chunk *chunk; |
208 | phys_addr_t paddr = -1; | ||
90 | 209 | ||
91 | read_lock(&pool->lock); | 210 | rcu_read_lock(); |
92 | list_for_each(_chunk, &pool->chunks) { | 211 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
93 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); | 212 | if (addr >= chunk->start_addr && addr < chunk->end_addr) { |
94 | 213 | paddr = chunk->phys_addr + (addr - chunk->start_addr); | |
95 | if (addr >= chunk->start_addr && addr < chunk->end_addr) | 214 | break; |
96 | return chunk->phys_addr + addr - chunk->start_addr; | 215 | } |
97 | } | 216 | } |
98 | read_unlock(&pool->lock); | 217 | rcu_read_unlock(); |
99 | 218 | ||
100 | return -1; | 219 | return paddr; |
101 | } | 220 | } |
102 | EXPORT_SYMBOL(gen_pool_virt_to_phys); | 221 | EXPORT_SYMBOL(gen_pool_virt_to_phys); |
103 | 222 | ||
@@ -115,7 +234,6 @@ void gen_pool_destroy(struct gen_pool *pool) | |||
115 | int order = pool->min_alloc_order; | 234 | int order = pool->min_alloc_order; |
116 | int bit, end_bit; | 235 | int bit, end_bit; |
117 | 236 | ||
118 | |||
119 | list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { | 237 | list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { |
120 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); | 238 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); |
121 | list_del(&chunk->next_chunk); | 239 | list_del(&chunk->next_chunk); |
@@ -137,44 +255,50 @@ EXPORT_SYMBOL(gen_pool_destroy); | |||
137 | * @size: number of bytes to allocate from the pool | 255 | * @size: number of bytes to allocate from the pool |
138 | * | 256 | * |
139 | * Allocate the requested number of bytes from the specified pool. | 257 | * Allocate the requested number of bytes from the specified pool. |
140 | * Uses a first-fit algorithm. | 258 | * Uses a first-fit algorithm. Can not be used in NMI handler on |
259 | * architectures without NMI-safe cmpxchg implementation. | ||
141 | */ | 260 | */ |
142 | unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) | 261 | unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) |
143 | { | 262 | { |
144 | struct list_head *_chunk; | ||
145 | struct gen_pool_chunk *chunk; | 263 | struct gen_pool_chunk *chunk; |
146 | unsigned long addr, flags; | 264 | unsigned long addr = 0; |
147 | int order = pool->min_alloc_order; | 265 | int order = pool->min_alloc_order; |
148 | int nbits, start_bit, end_bit; | 266 | int nbits, start_bit = 0, end_bit, remain; |
267 | |||
268 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | ||
269 | BUG_ON(in_nmi()); | ||
270 | #endif | ||
149 | 271 | ||
150 | if (size == 0) | 272 | if (size == 0) |
151 | return 0; | 273 | return 0; |
152 | 274 | ||
153 | nbits = (size + (1UL << order) - 1) >> order; | 275 | nbits = (size + (1UL << order) - 1) >> order; |
154 | 276 | rcu_read_lock(); | |
155 | read_lock(&pool->lock); | 277 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
156 | list_for_each(_chunk, &pool->chunks) { | 278 | if (size > atomic_read(&chunk->avail)) |
157 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); | 279 | continue; |
158 | 280 | ||
159 | end_bit = (chunk->end_addr - chunk->start_addr) >> order; | 281 | end_bit = (chunk->end_addr - chunk->start_addr) >> order; |
160 | 282 | retry: | |
161 | spin_lock_irqsave(&chunk->lock, flags); | 283 | start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, |
162 | start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0, | 284 | start_bit, nbits, 0); |
163 | nbits, 0); | 285 | if (start_bit >= end_bit) |
164 | if (start_bit >= end_bit) { | ||
165 | spin_unlock_irqrestore(&chunk->lock, flags); | ||
166 | continue; | 286 | continue; |
287 | remain = bitmap_set_ll(chunk->bits, start_bit, nbits); | ||
288 | if (remain) { | ||
289 | remain = bitmap_clear_ll(chunk->bits, start_bit, | ||
290 | nbits - remain); | ||
291 | BUG_ON(remain); | ||
292 | goto retry; | ||
167 | } | 293 | } |
168 | 294 | ||
169 | addr = chunk->start_addr + ((unsigned long)start_bit << order); | 295 | addr = chunk->start_addr + ((unsigned long)start_bit << order); |
170 | 296 | size = nbits << order; | |
171 | bitmap_set(chunk->bits, start_bit, nbits); | 297 | atomic_sub(size, &chunk->avail); |
172 | spin_unlock_irqrestore(&chunk->lock, flags); | 298 | break; |
173 | read_unlock(&pool->lock); | ||
174 | return addr; | ||
175 | } | 299 | } |
176 | read_unlock(&pool->lock); | 300 | rcu_read_unlock(); |
177 | return 0; | 301 | return addr; |
178 | } | 302 | } |
179 | EXPORT_SYMBOL(gen_pool_alloc); | 303 | EXPORT_SYMBOL(gen_pool_alloc); |
180 | 304 | ||
@@ -184,33 +308,95 @@ EXPORT_SYMBOL(gen_pool_alloc); | |||
184 | * @addr: starting address of memory to free back to pool | 308 | * @addr: starting address of memory to free back to pool |
185 | * @size: size in bytes of memory to free | 309 | * @size: size in bytes of memory to free |
186 | * | 310 | * |
187 | * Free previously allocated special memory back to the specified pool. | 311 | * Free previously allocated special memory back to the specified |
312 | * pool. Can not be used in NMI handler on architectures without | ||
313 | * NMI-safe cmpxchg implementation. | ||
188 | */ | 314 | */ |
189 | void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) | 315 | void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) |
190 | { | 316 | { |
191 | struct list_head *_chunk; | ||
192 | struct gen_pool_chunk *chunk; | 317 | struct gen_pool_chunk *chunk; |
193 | unsigned long flags; | ||
194 | int order = pool->min_alloc_order; | 318 | int order = pool->min_alloc_order; |
195 | int bit, nbits; | 319 | int start_bit, nbits, remain; |
196 | 320 | ||
197 | nbits = (size + (1UL << order) - 1) >> order; | 321 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG |
198 | 322 | BUG_ON(in_nmi()); | |
199 | read_lock(&pool->lock); | 323 | #endif |
200 | list_for_each(_chunk, &pool->chunks) { | ||
201 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); | ||
202 | 324 | ||
325 | nbits = (size + (1UL << order) - 1) >> order; | ||
326 | rcu_read_lock(); | ||
327 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { | ||
203 | if (addr >= chunk->start_addr && addr < chunk->end_addr) { | 328 | if (addr >= chunk->start_addr && addr < chunk->end_addr) { |
204 | BUG_ON(addr + size > chunk->end_addr); | 329 | BUG_ON(addr + size > chunk->end_addr); |
205 | spin_lock_irqsave(&chunk->lock, flags); | 330 | start_bit = (addr - chunk->start_addr) >> order; |
206 | bit = (addr - chunk->start_addr) >> order; | 331 | remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); |
207 | while (nbits--) | 332 | BUG_ON(remain); |
208 | __clear_bit(bit++, chunk->bits); | 333 | size = nbits << order; |
209 | spin_unlock_irqrestore(&chunk->lock, flags); | 334 | atomic_add(size, &chunk->avail); |
210 | break; | 335 | rcu_read_unlock(); |
336 | return; | ||
211 | } | 337 | } |
212 | } | 338 | } |
213 | BUG_ON(nbits > 0); | 339 | rcu_read_unlock(); |
214 | read_unlock(&pool->lock); | 340 | BUG(); |
215 | } | 341 | } |
216 | EXPORT_SYMBOL(gen_pool_free); | 342 | EXPORT_SYMBOL(gen_pool_free); |
343 | |||
344 | /** | ||
345 | * gen_pool_for_each_chunk - call func for every chunk of generic memory pool | ||
346 | * @pool: the generic memory pool | ||
347 | * @func: func to call | ||
348 | * @data: additional data used by @func | ||
349 | * | ||
350 | * Call @func for every chunk of generic memory pool. The @func is | ||
351 | * called with rcu_read_lock held. | ||
352 | */ | ||
353 | void gen_pool_for_each_chunk(struct gen_pool *pool, | ||
354 | void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), | ||
355 | void *data) | ||
356 | { | ||
357 | struct gen_pool_chunk *chunk; | ||
358 | |||
359 | rcu_read_lock(); | ||
360 | list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) | ||
361 | func(pool, chunk, data); | ||
362 | rcu_read_unlock(); | ||
363 | } | ||
364 | EXPORT_SYMBOL(gen_pool_for_each_chunk); | ||
365 | |||
366 | /** | ||
367 | * gen_pool_avail - get available free space of the pool | ||
368 | * @pool: pool to get available free space | ||
369 | * | ||
370 | * Return available free space of the specified pool. | ||
371 | */ | ||
372 | size_t gen_pool_avail(struct gen_pool *pool) | ||
373 | { | ||
374 | struct gen_pool_chunk *chunk; | ||
375 | size_t avail = 0; | ||
376 | |||
377 | rcu_read_lock(); | ||
378 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) | ||
379 | avail += atomic_read(&chunk->avail); | ||
380 | rcu_read_unlock(); | ||
381 | return avail; | ||
382 | } | ||
383 | EXPORT_SYMBOL_GPL(gen_pool_avail); | ||
384 | |||
385 | /** | ||
386 | * gen_pool_size - get size in bytes of memory managed by the pool | ||
387 | * @pool: pool to get size | ||
388 | * | ||
389 | * Return size in bytes of memory managed by the pool. | ||
390 | */ | ||
391 | size_t gen_pool_size(struct gen_pool *pool) | ||
392 | { | ||
393 | struct gen_pool_chunk *chunk; | ||
394 | size_t size = 0; | ||
395 | |||
396 | rcu_read_lock(); | ||
397 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) | ||
398 | size += chunk->end_addr - chunk->start_addr; | ||
399 | rcu_read_unlock(); | ||
400 | return size; | ||
401 | } | ||
402 | EXPORT_SYMBOL_GPL(gen_pool_size); | ||
diff --git a/lib/hexdump.c b/lib/hexdump.c index f5fe6ba7a3ab..51d5ae210244 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c | |||
@@ -38,14 +38,21 @@ EXPORT_SYMBOL(hex_to_bin); | |||
38 | * @dst: binary result | 38 | * @dst: binary result |
39 | * @src: ascii hexadecimal string | 39 | * @src: ascii hexadecimal string |
40 | * @count: result length | 40 | * @count: result length |
41 | * | ||
42 | * Return 0 on success, -1 in case of bad input. | ||
41 | */ | 43 | */ |
42 | void hex2bin(u8 *dst, const char *src, size_t count) | 44 | int hex2bin(u8 *dst, const char *src, size_t count) |
43 | { | 45 | { |
44 | while (count--) { | 46 | while (count--) { |
45 | *dst = hex_to_bin(*src++) << 4; | 47 | int hi = hex_to_bin(*src++); |
46 | *dst += hex_to_bin(*src++); | 48 | int lo = hex_to_bin(*src++); |
47 | dst++; | 49 | |
50 | if ((hi < 0) || (lo < 0)) | ||
51 | return -1; | ||
52 | |||
53 | *dst++ = (hi << 4) | lo; | ||
48 | } | 54 | } |
55 | return 0; | ||
49 | } | 56 | } |
50 | EXPORT_SYMBOL(hex2bin); | 57 | EXPORT_SYMBOL(hex2bin); |
51 | 58 | ||
@@ -34,8 +34,10 @@ | |||
34 | #include <linux/err.h> | 34 | #include <linux/err.h> |
35 | #include <linux/string.h> | 35 | #include <linux/string.h> |
36 | #include <linux/idr.h> | 36 | #include <linux/idr.h> |
37 | #include <linux/spinlock.h> | ||
37 | 38 | ||
38 | static struct kmem_cache *idr_layer_cache; | 39 | static struct kmem_cache *idr_layer_cache; |
40 | static DEFINE_SPINLOCK(simple_ida_lock); | ||
39 | 41 | ||
40 | static struct idr_layer *get_from_free_list(struct idr *idp) | 42 | static struct idr_layer *get_from_free_list(struct idr *idp) |
41 | { | 43 | { |
@@ -765,8 +767,8 @@ EXPORT_SYMBOL(ida_pre_get); | |||
765 | * @starting_id: id to start search at | 767 | * @starting_id: id to start search at |
766 | * @p_id: pointer to the allocated handle | 768 | * @p_id: pointer to the allocated handle |
767 | * | 769 | * |
768 | * Allocate new ID above or equal to @ida. It should be called with | 770 | * Allocate new ID above or equal to @starting_id. It should be called |
769 | * any required locks. | 771 | * with any required locks. |
770 | * | 772 | * |
771 | * If memory is required, it will return %-EAGAIN, you should unlock | 773 | * If memory is required, it will return %-EAGAIN, you should unlock |
772 | * and go back to the ida_pre_get() call. If the ida is full, it will | 774 | * and go back to the ida_pre_get() call. If the ida is full, it will |
@@ -858,7 +860,7 @@ EXPORT_SYMBOL(ida_get_new_above); | |||
858 | * and go back to the idr_pre_get() call. If the idr is full, it will | 860 | * and go back to the idr_pre_get() call. If the idr is full, it will |
859 | * return %-ENOSPC. | 861 | * return %-ENOSPC. |
860 | * | 862 | * |
861 | * @id returns a value in the range %0 ... %0x7fffffff. | 863 | * @p_id returns a value in the range %0 ... %0x7fffffff. |
862 | */ | 864 | */ |
863 | int ida_get_new(struct ida *ida, int *p_id) | 865 | int ida_get_new(struct ida *ida, int *p_id) |
864 | { | 866 | { |
@@ -926,6 +928,74 @@ void ida_destroy(struct ida *ida) | |||
926 | EXPORT_SYMBOL(ida_destroy); | 928 | EXPORT_SYMBOL(ida_destroy); |
927 | 929 | ||
928 | /** | 930 | /** |
931 | * ida_simple_get - get a new id. | ||
932 | * @ida: the (initialized) ida. | ||
933 | * @start: the minimum id (inclusive, < 0x8000000) | ||
934 | * @end: the maximum id (exclusive, < 0x8000000 or 0) | ||
935 | * @gfp_mask: memory allocation flags | ||
936 | * | ||
937 | * Allocates an id in the range start <= id < end, or returns -ENOSPC. | ||
938 | * On memory allocation failure, returns -ENOMEM. | ||
939 | * | ||
940 | * Use ida_simple_remove() to get rid of an id. | ||
941 | */ | ||
942 | int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, | ||
943 | gfp_t gfp_mask) | ||
944 | { | ||
945 | int ret, id; | ||
946 | unsigned int max; | ||
947 | unsigned long flags; | ||
948 | |||
949 | BUG_ON((int)start < 0); | ||
950 | BUG_ON((int)end < 0); | ||
951 | |||
952 | if (end == 0) | ||
953 | max = 0x80000000; | ||
954 | else { | ||
955 | BUG_ON(end < start); | ||
956 | max = end - 1; | ||
957 | } | ||
958 | |||
959 | again: | ||
960 | if (!ida_pre_get(ida, gfp_mask)) | ||
961 | return -ENOMEM; | ||
962 | |||
963 | spin_lock_irqsave(&simple_ida_lock, flags); | ||
964 | ret = ida_get_new_above(ida, start, &id); | ||
965 | if (!ret) { | ||
966 | if (id > max) { | ||
967 | ida_remove(ida, id); | ||
968 | ret = -ENOSPC; | ||
969 | } else { | ||
970 | ret = id; | ||
971 | } | ||
972 | } | ||
973 | spin_unlock_irqrestore(&simple_ida_lock, flags); | ||
974 | |||
975 | if (unlikely(ret == -EAGAIN)) | ||
976 | goto again; | ||
977 | |||
978 | return ret; | ||
979 | } | ||
980 | EXPORT_SYMBOL(ida_simple_get); | ||
981 | |||
982 | /** | ||
983 | * ida_simple_remove - remove an allocated id. | ||
984 | * @ida: the (initialized) ida. | ||
985 | * @id: the id returned by ida_simple_get. | ||
986 | */ | ||
987 | void ida_simple_remove(struct ida *ida, unsigned int id) | ||
988 | { | ||
989 | unsigned long flags; | ||
990 | |||
991 | BUG_ON((int)id < 0); | ||
992 | spin_lock_irqsave(&simple_ida_lock, flags); | ||
993 | ida_remove(ida, id); | ||
994 | spin_unlock_irqrestore(&simple_ida_lock, flags); | ||
995 | } | ||
996 | EXPORT_SYMBOL(ida_simple_remove); | ||
997 | |||
998 | /** | ||
929 | * ida_init - initialize ida handle | 999 | * ida_init - initialize ida handle |
930 | * @ida: ida handle | 1000 | * @ida: ida handle |
931 | * | 1001 | * |
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 70af0a7f97c0..ad72a03ce5e9 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
@@ -282,7 +282,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
282 | kobj_bcast_filter, | 282 | kobj_bcast_filter, |
283 | kobj); | 283 | kobj); |
284 | /* ENOBUFS should be handled in userspace */ | 284 | /* ENOBUFS should be handled in userspace */ |
285 | if (retval == -ENOBUFS) | 285 | if (retval == -ENOBUFS || retval == -ESRCH) |
286 | retval = 0; | 286 | retval = 0; |
287 | } else | 287 | } else |
288 | retval = -ENOMEM; | 288 | retval = -ENOMEM; |
diff --git a/lib/kstrtox.c b/lib/kstrtox.c index 5e066759f551..7a94c8f14e29 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c | |||
@@ -18,26 +18,40 @@ | |||
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/types.h> | 19 | #include <linux/types.h> |
20 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
21 | #include "kstrtox.h" | ||
21 | 22 | ||
22 | static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) | 23 | const char *_parse_integer_fixup_radix(const char *s, unsigned int *base) |
23 | { | 24 | { |
24 | unsigned long long acc; | 25 | if (*base == 0) { |
25 | int ok; | ||
26 | |||
27 | if (base == 0) { | ||
28 | if (s[0] == '0') { | 26 | if (s[0] == '0') { |
29 | if (_tolower(s[1]) == 'x' && isxdigit(s[2])) | 27 | if (_tolower(s[1]) == 'x' && isxdigit(s[2])) |
30 | base = 16; | 28 | *base = 16; |
31 | else | 29 | else |
32 | base = 8; | 30 | *base = 8; |
33 | } else | 31 | } else |
34 | base = 10; | 32 | *base = 10; |
35 | } | 33 | } |
36 | if (base == 16 && s[0] == '0' && _tolower(s[1]) == 'x') | 34 | if (*base == 16 && s[0] == '0' && _tolower(s[1]) == 'x') |
37 | s += 2; | 35 | s += 2; |
36 | return s; | ||
37 | } | ||
38 | 38 | ||
39 | acc = 0; | 39 | /* |
40 | ok = 0; | 40 | * Convert non-negative integer string representation in explicitly given radix |
41 | * to an integer. | ||
42 | * Return number of characters consumed maybe or-ed with overflow bit. | ||
43 | * If overflow occurs, result integer (incorrect) is still returned. | ||
44 | * | ||
45 | * Don't you dare use this function. | ||
46 | */ | ||
47 | unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *res) | ||
48 | { | ||
49 | unsigned int rv; | ||
50 | int overflow; | ||
51 | |||
52 | *res = 0; | ||
53 | rv = 0; | ||
54 | overflow = 0; | ||
41 | while (*s) { | 55 | while (*s) { |
42 | unsigned int val; | 56 | unsigned int val; |
43 | 57 | ||
@@ -45,23 +59,40 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) | |||
45 | val = *s - '0'; | 59 | val = *s - '0'; |
46 | else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f') | 60 | else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f') |
47 | val = _tolower(*s) - 'a' + 10; | 61 | val = _tolower(*s) - 'a' + 10; |
48 | else if (*s == '\n' && *(s + 1) == '\0') | ||
49 | break; | ||
50 | else | 62 | else |
51 | return -EINVAL; | 63 | break; |
52 | 64 | ||
53 | if (val >= base) | 65 | if (val >= base) |
54 | return -EINVAL; | 66 | break; |
55 | if (acc > div_u64(ULLONG_MAX - val, base)) | 67 | if (*res > div_u64(ULLONG_MAX - val, base)) |
56 | return -ERANGE; | 68 | overflow = 1; |
57 | acc = acc * base + val; | 69 | *res = *res * base + val; |
58 | ok = 1; | 70 | rv++; |
59 | |||
60 | s++; | 71 | s++; |
61 | } | 72 | } |
62 | if (!ok) | 73 | if (overflow) |
74 | rv |= KSTRTOX_OVERFLOW; | ||
75 | return rv; | ||
76 | } | ||
77 | |||
78 | static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) | ||
79 | { | ||
80 | unsigned long long _res; | ||
81 | unsigned int rv; | ||
82 | |||
83 | s = _parse_integer_fixup_radix(s, &base); | ||
84 | rv = _parse_integer(s, base, &_res); | ||
85 | if (rv & KSTRTOX_OVERFLOW) | ||
86 | return -ERANGE; | ||
87 | rv &= ~KSTRTOX_OVERFLOW; | ||
88 | if (rv == 0) | ||
89 | return -EINVAL; | ||
90 | s += rv; | ||
91 | if (*s == '\n') | ||
92 | s++; | ||
93 | if (*s) | ||
63 | return -EINVAL; | 94 | return -EINVAL; |
64 | *res = acc; | 95 | *res = _res; |
65 | return 0; | 96 | return 0; |
66 | } | 97 | } |
67 | 98 | ||
diff --git a/lib/kstrtox.h b/lib/kstrtox.h new file mode 100644 index 000000000000..f13eeeaf441d --- /dev/null +++ b/lib/kstrtox.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef _LIB_KSTRTOX_H | ||
2 | #define _LIB_KSTRTOX_H | ||
3 | |||
4 | #define KSTRTOX_OVERFLOW (1U << 31) | ||
5 | const char *_parse_integer_fixup_radix(const char *s, unsigned int *base); | ||
6 | unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *res); | ||
7 | |||
8 | #endif | ||
diff --git a/lib/llist.c b/lib/llist.c new file mode 100644 index 000000000000..700cff77a387 --- /dev/null +++ b/lib/llist.c | |||
@@ -0,0 +1,89 @@ | |||
1 | /* | ||
2 | * Lock-less NULL terminated single linked list | ||
3 | * | ||
4 | * The basic atomic operation of this list is cmpxchg on long. On | ||
5 | * architectures that don't have NMI-safe cmpxchg implementation, the | ||
6 | * list can NOT be used in NMI handlers. So code that uses the list in | ||
7 | * an NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. | ||
8 | * | ||
9 | * Copyright 2010,2011 Intel Corp. | ||
10 | * Author: Huang Ying <ying.huang@intel.com> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License version | ||
14 | * 2 as published by the Free Software Foundation; | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
24 | */ | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/llist.h> | ||
29 | |||
30 | #include <asm/system.h> | ||
31 | |||
32 | /** | ||
33 | * llist_add_batch - add several linked entries in batch | ||
34 | * @new_first: first entry in batch to be added | ||
35 | * @new_last: last entry in batch to be added | ||
36 | * @head: the head for your lock-less list | ||
37 | * | ||
38 | * Return whether list is empty before adding. | ||
39 | */ | ||
40 | bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, | ||
41 | struct llist_head *head) | ||
42 | { | ||
43 | struct llist_node *entry, *old_entry; | ||
44 | |||
45 | entry = head->first; | ||
46 | for (;;) { | ||
47 | old_entry = entry; | ||
48 | new_last->next = entry; | ||
49 | entry = cmpxchg(&head->first, old_entry, new_first); | ||
50 | if (entry == old_entry) | ||
51 | break; | ||
52 | } | ||
53 | |||
54 | return old_entry == NULL; | ||
55 | } | ||
56 | EXPORT_SYMBOL_GPL(llist_add_batch); | ||
57 | |||
58 | /** | ||
59 | * llist_del_first - delete the first entry of lock-less list | ||
60 | * @head: the head for your lock-less list | ||
61 | * | ||
62 | * If list is empty, return NULL, otherwise, return the first entry | ||
63 | * deleted, this is the newest added one. | ||
64 | * | ||
65 | * Only one llist_del_first user can be used simultaneously with | ||
66 | * multiple llist_add users without lock. Because otherwise | ||
67 | * llist_del_first, llist_add, llist_add (or llist_del_all, llist_add, | ||
68 | * llist_add) sequence in another user may change @head->first->next, | ||
69 | * but keep @head->first. If multiple consumers are needed, please | ||
70 | * use llist_del_all or use lock between consumers. | ||
71 | */ | ||
72 | struct llist_node *llist_del_first(struct llist_head *head) | ||
73 | { | ||
74 | struct llist_node *entry, *old_entry, *next; | ||
75 | |||
76 | entry = head->first; | ||
77 | for (;;) { | ||
78 | if (entry == NULL) | ||
79 | return NULL; | ||
80 | old_entry = entry; | ||
81 | next = entry->next; | ||
82 | entry = cmpxchg(&head->first, old_entry, next); | ||
83 | if (entry == old_entry) | ||
84 | break; | ||
85 | } | ||
86 | |||
87 | return entry; | ||
88 | } | ||
89 | EXPORT_SYMBOL_GPL(llist_del_first); | ||
diff --git a/lib/md5.c b/lib/md5.c new file mode 100644 index 000000000000..c777180e1f2f --- /dev/null +++ b/lib/md5.c | |||
@@ -0,0 +1,95 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/cryptohash.h> | ||
4 | |||
5 | #define F1(x, y, z) (z ^ (x & (y ^ z))) | ||
6 | #define F2(x, y, z) F1(z, x, y) | ||
7 | #define F3(x, y, z) (x ^ y ^ z) | ||
8 | #define F4(x, y, z) (y ^ (x | ~z)) | ||
9 | |||
10 | #define MD5STEP(f, w, x, y, z, in, s) \ | ||
11 | (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x) | ||
12 | |||
13 | void md5_transform(__u32 *hash, __u32 const *in) | ||
14 | { | ||
15 | u32 a, b, c, d; | ||
16 | |||
17 | a = hash[0]; | ||
18 | b = hash[1]; | ||
19 | c = hash[2]; | ||
20 | d = hash[3]; | ||
21 | |||
22 | MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7); | ||
23 | MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12); | ||
24 | MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17); | ||
25 | MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22); | ||
26 | MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7); | ||
27 | MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12); | ||
28 | MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17); | ||
29 | MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22); | ||
30 | MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7); | ||
31 | MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12); | ||
32 | MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17); | ||
33 | MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22); | ||
34 | MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7); | ||
35 | MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12); | ||
36 | MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17); | ||
37 | MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22); | ||
38 | |||
39 | MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5); | ||
40 | MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9); | ||
41 | MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14); | ||
42 | MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20); | ||
43 | MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5); | ||
44 | MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9); | ||
45 | MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14); | ||
46 | MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20); | ||
47 | MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5); | ||
48 | MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9); | ||
49 | MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14); | ||
50 | MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20); | ||
51 | MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5); | ||
52 | MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9); | ||
53 | MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14); | ||
54 | MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20); | ||
55 | |||
56 | MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4); | ||
57 | MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11); | ||
58 | MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16); | ||
59 | MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23); | ||
60 | MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4); | ||
61 | MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11); | ||
62 | MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16); | ||
63 | MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23); | ||
64 | MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4); | ||
65 | MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11); | ||
66 | MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16); | ||
67 | MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23); | ||
68 | MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4); | ||
69 | MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11); | ||
70 | MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16); | ||
71 | MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23); | ||
72 | |||
73 | MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6); | ||
74 | MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10); | ||
75 | MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15); | ||
76 | MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21); | ||
77 | MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6); | ||
78 | MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10); | ||
79 | MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15); | ||
80 | MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21); | ||
81 | MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6); | ||
82 | MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10); | ||
83 | MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15); | ||
84 | MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21); | ||
85 | MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6); | ||
86 | MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10); | ||
87 | MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15); | ||
88 | MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21); | ||
89 | |||
90 | hash[0] += a; | ||
91 | hash[1] += b; | ||
92 | hash[2] += c; | ||
93 | hash[3] += d; | ||
94 | } | ||
95 | EXPORT_SYMBOL(md5_transform); | ||
diff --git a/lib/nlattr.c b/lib/nlattr.c index ac09f2226dc7..a8408b6cacdf 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c | |||
@@ -20,6 +20,7 @@ static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = { | |||
20 | [NLA_U16] = sizeof(u16), | 20 | [NLA_U16] = sizeof(u16), |
21 | [NLA_U32] = sizeof(u32), | 21 | [NLA_U32] = sizeof(u32), |
22 | [NLA_U64] = sizeof(u64), | 22 | [NLA_U64] = sizeof(u64), |
23 | [NLA_MSECS] = sizeof(u64), | ||
23 | [NLA_NESTED] = NLA_HDRLEN, | 24 | [NLA_NESTED] = NLA_HDRLEN, |
24 | }; | 25 | }; |
25 | 26 | ||
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 28f2c33c6b53..f8a3f1a829b8 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
@@ -10,8 +10,10 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/debugobjects.h> | 11 | #include <linux/debugobjects.h> |
12 | 12 | ||
13 | #ifdef CONFIG_HOTPLUG_CPU | ||
13 | static LIST_HEAD(percpu_counters); | 14 | static LIST_HEAD(percpu_counters); |
14 | static DEFINE_MUTEX(percpu_counters_lock); | 15 | static DEFINE_MUTEX(percpu_counters_lock); |
16 | #endif | ||
15 | 17 | ||
16 | #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER | 18 | #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER |
17 | 19 | ||
@@ -59,13 +61,13 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount) | |||
59 | { | 61 | { |
60 | int cpu; | 62 | int cpu; |
61 | 63 | ||
62 | spin_lock(&fbc->lock); | 64 | raw_spin_lock(&fbc->lock); |
63 | for_each_possible_cpu(cpu) { | 65 | for_each_possible_cpu(cpu) { |
64 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | 66 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
65 | *pcount = 0; | 67 | *pcount = 0; |
66 | } | 68 | } |
67 | fbc->count = amount; | 69 | fbc->count = amount; |
68 | spin_unlock(&fbc->lock); | 70 | raw_spin_unlock(&fbc->lock); |
69 | } | 71 | } |
70 | EXPORT_SYMBOL(percpu_counter_set); | 72 | EXPORT_SYMBOL(percpu_counter_set); |
71 | 73 | ||
@@ -76,10 +78,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) | |||
76 | preempt_disable(); | 78 | preempt_disable(); |
77 | count = __this_cpu_read(*fbc->counters) + amount; | 79 | count = __this_cpu_read(*fbc->counters) + amount; |
78 | if (count >= batch || count <= -batch) { | 80 | if (count >= batch || count <= -batch) { |
79 | spin_lock(&fbc->lock); | 81 | raw_spin_lock(&fbc->lock); |
80 | fbc->count += count; | 82 | fbc->count += count; |
81 | __this_cpu_write(*fbc->counters, 0); | 83 | __this_cpu_write(*fbc->counters, 0); |
82 | spin_unlock(&fbc->lock); | 84 | raw_spin_unlock(&fbc->lock); |
83 | } else { | 85 | } else { |
84 | __this_cpu_write(*fbc->counters, count); | 86 | __this_cpu_write(*fbc->counters, count); |
85 | } | 87 | } |
@@ -96,13 +98,13 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc) | |||
96 | s64 ret; | 98 | s64 ret; |
97 | int cpu; | 99 | int cpu; |
98 | 100 | ||
99 | spin_lock(&fbc->lock); | 101 | raw_spin_lock(&fbc->lock); |
100 | ret = fbc->count; | 102 | ret = fbc->count; |
101 | for_each_online_cpu(cpu) { | 103 | for_each_online_cpu(cpu) { |
102 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | 104 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
103 | ret += *pcount; | 105 | ret += *pcount; |
104 | } | 106 | } |
105 | spin_unlock(&fbc->lock); | 107 | raw_spin_unlock(&fbc->lock); |
106 | return ret; | 108 | return ret; |
107 | } | 109 | } |
108 | EXPORT_SYMBOL(__percpu_counter_sum); | 110 | EXPORT_SYMBOL(__percpu_counter_sum); |
@@ -110,7 +112,7 @@ EXPORT_SYMBOL(__percpu_counter_sum); | |||
110 | int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, | 112 | int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, |
111 | struct lock_class_key *key) | 113 | struct lock_class_key *key) |
112 | { | 114 | { |
113 | spin_lock_init(&fbc->lock); | 115 | raw_spin_lock_init(&fbc->lock); |
114 | lockdep_set_class(&fbc->lock, key); | 116 | lockdep_set_class(&fbc->lock, key); |
115 | fbc->count = amount; | 117 | fbc->count = amount; |
116 | fbc->counters = alloc_percpu(s32); | 118 | fbc->counters = alloc_percpu(s32); |
@@ -173,11 +175,11 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, | |||
173 | s32 *pcount; | 175 | s32 *pcount; |
174 | unsigned long flags; | 176 | unsigned long flags; |
175 | 177 | ||
176 | spin_lock_irqsave(&fbc->lock, flags); | 178 | raw_spin_lock_irqsave(&fbc->lock, flags); |
177 | pcount = per_cpu_ptr(fbc->counters, cpu); | 179 | pcount = per_cpu_ptr(fbc->counters, cpu); |
178 | fbc->count += *pcount; | 180 | fbc->count += *pcount; |
179 | *pcount = 0; | 181 | *pcount = 0; |
180 | spin_unlock_irqrestore(&fbc->lock, flags); | 182 | raw_spin_unlock_irqrestore(&fbc->lock, flags); |
181 | } | 183 | } |
182 | mutex_unlock(&percpu_counters_lock); | 184 | mutex_unlock(&percpu_counters_lock); |
183 | #endif | 185 | #endif |
diff --git a/lib/proportions.c b/lib/proportions.c index d50746a79de2..05df84801b56 100644 --- a/lib/proportions.c +++ b/lib/proportions.c | |||
@@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift) | |||
190 | 190 | ||
191 | int prop_local_init_percpu(struct prop_local_percpu *pl) | 191 | int prop_local_init_percpu(struct prop_local_percpu *pl) |
192 | { | 192 | { |
193 | spin_lock_init(&pl->lock); | 193 | raw_spin_lock_init(&pl->lock); |
194 | pl->shift = 0; | 194 | pl->shift = 0; |
195 | pl->period = 0; | 195 | pl->period = 0; |
196 | return percpu_counter_init(&pl->events, 0); | 196 | return percpu_counter_init(&pl->events, 0); |
@@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl) | |||
226 | if (pl->period == global_period) | 226 | if (pl->period == global_period) |
227 | return; | 227 | return; |
228 | 228 | ||
229 | spin_lock_irqsave(&pl->lock, flags); | 229 | raw_spin_lock_irqsave(&pl->lock, flags); |
230 | prop_adjust_shift(&pl->shift, &pl->period, pg->shift); | 230 | prop_adjust_shift(&pl->shift, &pl->period, pg->shift); |
231 | 231 | ||
232 | /* | 232 | /* |
@@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl) | |||
247 | percpu_counter_set(&pl->events, 0); | 247 | percpu_counter_set(&pl->events, 0); |
248 | 248 | ||
249 | pl->period = global_period; | 249 | pl->period = global_period; |
250 | spin_unlock_irqrestore(&pl->lock, flags); | 250 | raw_spin_unlock_irqrestore(&pl->lock, flags); |
251 | } | 251 | } |
252 | 252 | ||
253 | /* | 253 | /* |
@@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_descriptor *pd, | |||
324 | 324 | ||
325 | int prop_local_init_single(struct prop_local_single *pl) | 325 | int prop_local_init_single(struct prop_local_single *pl) |
326 | { | 326 | { |
327 | spin_lock_init(&pl->lock); | 327 | raw_spin_lock_init(&pl->lock); |
328 | pl->shift = 0; | 328 | pl->shift = 0; |
329 | pl->period = 0; | 329 | pl->period = 0; |
330 | pl->events = 0; | 330 | pl->events = 0; |
@@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl) | |||
356 | if (pl->period == global_period) | 356 | if (pl->period == global_period) |
357 | return; | 357 | return; |
358 | 358 | ||
359 | spin_lock_irqsave(&pl->lock, flags); | 359 | raw_spin_lock_irqsave(&pl->lock, flags); |
360 | prop_adjust_shift(&pl->shift, &pl->period, pg->shift); | 360 | prop_adjust_shift(&pl->shift, &pl->period, pg->shift); |
361 | /* | 361 | /* |
362 | * For each missed period, we half the local counter. | 362 | * For each missed period, we half the local counter. |
@@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl) | |||
367 | else | 367 | else |
368 | pl->events = 0; | 368 | pl->events = 0; |
369 | pl->period = global_period; | 369 | pl->period = global_period; |
370 | spin_unlock_irqrestore(&pl->lock, flags); | 370 | raw_spin_unlock_irqrestore(&pl->lock, flags); |
371 | } | 371 | } |
372 | 372 | ||
373 | /* | 373 | /* |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 7ea2e033d715..d9df7454519c 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -576,7 +576,6 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
576 | { | 576 | { |
577 | unsigned int height, shift; | 577 | unsigned int height, shift; |
578 | struct radix_tree_node *node; | 578 | struct radix_tree_node *node; |
579 | int saw_unset_tag = 0; | ||
580 | 579 | ||
581 | /* check the root's tag bit */ | 580 | /* check the root's tag bit */ |
582 | if (!root_tag_get(root, tag)) | 581 | if (!root_tag_get(root, tag)) |
@@ -603,15 +602,10 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
603 | return 0; | 602 | return 0; |
604 | 603 | ||
605 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | 604 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; |
606 | |||
607 | /* | ||
608 | * This is just a debug check. Later, we can bale as soon as | ||
609 | * we see an unset tag. | ||
610 | */ | ||
611 | if (!tag_get(node, tag, offset)) | 605 | if (!tag_get(node, tag, offset)) |
612 | saw_unset_tag = 1; | 606 | return 0; |
613 | if (height == 1) | 607 | if (height == 1) |
614 | return !!tag_get(node, tag, offset); | 608 | return 1; |
615 | node = rcu_dereference_raw(node->slots[offset]); | 609 | node = rcu_dereference_raw(node->slots[offset]); |
616 | shift -= RADIX_TREE_MAP_SHIFT; | 610 | shift -= RADIX_TREE_MAP_SHIFT; |
617 | height--; | 611 | height--; |
@@ -823,8 +817,8 @@ unsigned long radix_tree_prev_hole(struct radix_tree_root *root, | |||
823 | EXPORT_SYMBOL(radix_tree_prev_hole); | 817 | EXPORT_SYMBOL(radix_tree_prev_hole); |
824 | 818 | ||
825 | static unsigned int | 819 | static unsigned int |
826 | __lookup(struct radix_tree_node *slot, void ***results, unsigned long index, | 820 | __lookup(struct radix_tree_node *slot, void ***results, unsigned long *indices, |
827 | unsigned int max_items, unsigned long *next_index) | 821 | unsigned long index, unsigned int max_items, unsigned long *next_index) |
828 | { | 822 | { |
829 | unsigned int nr_found = 0; | 823 | unsigned int nr_found = 0; |
830 | unsigned int shift, height; | 824 | unsigned int shift, height; |
@@ -857,12 +851,16 @@ __lookup(struct radix_tree_node *slot, void ***results, unsigned long index, | |||
857 | 851 | ||
858 | /* Bottom level: grab some items */ | 852 | /* Bottom level: grab some items */ |
859 | for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { | 853 | for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { |
860 | index++; | ||
861 | if (slot->slots[i]) { | 854 | if (slot->slots[i]) { |
862 | results[nr_found++] = &(slot->slots[i]); | 855 | results[nr_found] = &(slot->slots[i]); |
863 | if (nr_found == max_items) | 856 | if (indices) |
857 | indices[nr_found] = index; | ||
858 | if (++nr_found == max_items) { | ||
859 | index++; | ||
864 | goto out; | 860 | goto out; |
861 | } | ||
865 | } | 862 | } |
863 | index++; | ||
866 | } | 864 | } |
867 | out: | 865 | out: |
868 | *next_index = index; | 866 | *next_index = index; |
@@ -918,8 +916,8 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
918 | 916 | ||
919 | if (cur_index > max_index) | 917 | if (cur_index > max_index) |
920 | break; | 918 | break; |
921 | slots_found = __lookup(node, (void ***)results + ret, cur_index, | 919 | slots_found = __lookup(node, (void ***)results + ret, NULL, |
922 | max_items - ret, &next_index); | 920 | cur_index, max_items - ret, &next_index); |
923 | nr_found = 0; | 921 | nr_found = 0; |
924 | for (i = 0; i < slots_found; i++) { | 922 | for (i = 0; i < slots_found; i++) { |
925 | struct radix_tree_node *slot; | 923 | struct radix_tree_node *slot; |
@@ -944,6 +942,7 @@ EXPORT_SYMBOL(radix_tree_gang_lookup); | |||
944 | * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree | 942 | * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree |
945 | * @root: radix tree root | 943 | * @root: radix tree root |
946 | * @results: where the results of the lookup are placed | 944 | * @results: where the results of the lookup are placed |
945 | * @indices: where their indices should be placed (but usually NULL) | ||
947 | * @first_index: start the lookup from this key | 946 | * @first_index: start the lookup from this key |
948 | * @max_items: place up to this many items at *results | 947 | * @max_items: place up to this many items at *results |
949 | * | 948 | * |
@@ -958,7 +957,8 @@ EXPORT_SYMBOL(radix_tree_gang_lookup); | |||
958 | * protection, radix_tree_deref_slot may fail requiring a retry. | 957 | * protection, radix_tree_deref_slot may fail requiring a retry. |
959 | */ | 958 | */ |
960 | unsigned int | 959 | unsigned int |
961 | radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, | 960 | radix_tree_gang_lookup_slot(struct radix_tree_root *root, |
961 | void ***results, unsigned long *indices, | ||
962 | unsigned long first_index, unsigned int max_items) | 962 | unsigned long first_index, unsigned int max_items) |
963 | { | 963 | { |
964 | unsigned long max_index; | 964 | unsigned long max_index; |
@@ -974,6 +974,8 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, | |||
974 | if (first_index > 0) | 974 | if (first_index > 0) |
975 | return 0; | 975 | return 0; |
976 | results[0] = (void **)&root->rnode; | 976 | results[0] = (void **)&root->rnode; |
977 | if (indices) | ||
978 | indices[0] = 0; | ||
977 | return 1; | 979 | return 1; |
978 | } | 980 | } |
979 | node = indirect_to_ptr(node); | 981 | node = indirect_to_ptr(node); |
@@ -987,8 +989,9 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, | |||
987 | 989 | ||
988 | if (cur_index > max_index) | 990 | if (cur_index > max_index) |
989 | break; | 991 | break; |
990 | slots_found = __lookup(node, results + ret, cur_index, | 992 | slots_found = __lookup(node, results + ret, |
991 | max_items - ret, &next_index); | 993 | indices ? indices + ret : NULL, |
994 | cur_index, max_items - ret, &next_index); | ||
992 | ret += slots_found; | 995 | ret += slots_found; |
993 | if (next_index == 0) | 996 | if (next_index == 0) |
994 | break; | 997 | break; |
@@ -1194,6 +1197,98 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | |||
1194 | } | 1197 | } |
1195 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); | 1198 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); |
1196 | 1199 | ||
1200 | #if defined(CONFIG_SHMEM) && defined(CONFIG_SWAP) | ||
1201 | #include <linux/sched.h> /* for cond_resched() */ | ||
1202 | |||
1203 | /* | ||
1204 | * This linear search is at present only useful to shmem_unuse_inode(). | ||
1205 | */ | ||
1206 | static unsigned long __locate(struct radix_tree_node *slot, void *item, | ||
1207 | unsigned long index, unsigned long *found_index) | ||
1208 | { | ||
1209 | unsigned int shift, height; | ||
1210 | unsigned long i; | ||
1211 | |||
1212 | height = slot->height; | ||
1213 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; | ||
1214 | |||
1215 | for ( ; height > 1; height--) { | ||
1216 | i = (index >> shift) & RADIX_TREE_MAP_MASK; | ||
1217 | for (;;) { | ||
1218 | if (slot->slots[i] != NULL) | ||
1219 | break; | ||
1220 | index &= ~((1UL << shift) - 1); | ||
1221 | index += 1UL << shift; | ||
1222 | if (index == 0) | ||
1223 | goto out; /* 32-bit wraparound */ | ||
1224 | i++; | ||
1225 | if (i == RADIX_TREE_MAP_SIZE) | ||
1226 | goto out; | ||
1227 | } | ||
1228 | |||
1229 | shift -= RADIX_TREE_MAP_SHIFT; | ||
1230 | slot = rcu_dereference_raw(slot->slots[i]); | ||
1231 | if (slot == NULL) | ||
1232 | goto out; | ||
1233 | } | ||
1234 | |||
1235 | /* Bottom level: check items */ | ||
1236 | for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { | ||
1237 | if (slot->slots[i] == item) { | ||
1238 | *found_index = index + i; | ||
1239 | index = 0; | ||
1240 | goto out; | ||
1241 | } | ||
1242 | } | ||
1243 | index += RADIX_TREE_MAP_SIZE; | ||
1244 | out: | ||
1245 | return index; | ||
1246 | } | ||
1247 | |||
1248 | /** | ||
1249 | * radix_tree_locate_item - search through radix tree for item | ||
1250 | * @root: radix tree root | ||
1251 | * @item: item to be found | ||
1252 | * | ||
1253 | * Returns index where item was found, or -1 if not found. | ||
1254 | * Caller must hold no lock (since this time-consuming function needs | ||
1255 | * to be preemptible), and must check afterwards if item is still there. | ||
1256 | */ | ||
1257 | unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item) | ||
1258 | { | ||
1259 | struct radix_tree_node *node; | ||
1260 | unsigned long max_index; | ||
1261 | unsigned long cur_index = 0; | ||
1262 | unsigned long found_index = -1; | ||
1263 | |||
1264 | do { | ||
1265 | rcu_read_lock(); | ||
1266 | node = rcu_dereference_raw(root->rnode); | ||
1267 | if (!radix_tree_is_indirect_ptr(node)) { | ||
1268 | rcu_read_unlock(); | ||
1269 | if (node == item) | ||
1270 | found_index = 0; | ||
1271 | break; | ||
1272 | } | ||
1273 | |||
1274 | node = indirect_to_ptr(node); | ||
1275 | max_index = radix_tree_maxindex(node->height); | ||
1276 | if (cur_index > max_index) | ||
1277 | break; | ||
1278 | |||
1279 | cur_index = __locate(node, item, cur_index, &found_index); | ||
1280 | rcu_read_unlock(); | ||
1281 | cond_resched(); | ||
1282 | } while (cur_index != 0 && cur_index <= max_index); | ||
1283 | |||
1284 | return found_index; | ||
1285 | } | ||
1286 | #else | ||
1287 | unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item) | ||
1288 | { | ||
1289 | return -1; | ||
1290 | } | ||
1291 | #endif /* CONFIG_SHMEM && CONFIG_SWAP */ | ||
1197 | 1292 | ||
1198 | /** | 1293 | /** |
1199 | * radix_tree_shrink - shrink height of a radix tree to minimal | 1294 | * radix_tree_shrink - shrink height of a radix tree to minimal |
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c index b595f560bee7..8b02f60ffc86 100644 --- a/lib/raid6/algos.c +++ b/lib/raid6/algos.c | |||
@@ -17,6 +17,7 @@ | |||
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/raid/pq.h> | 19 | #include <linux/raid/pq.h> |
20 | #include <linux/module.h> | ||
20 | #ifndef __KERNEL__ | 21 | #ifndef __KERNEL__ |
21 | #include <sys/mman.h> | 22 | #include <sys/mman.h> |
22 | #include <stdio.h> | 23 | #include <stdio.h> |
diff --git a/lib/raid6/int.uc b/lib/raid6/int.uc index d1e276a14fab..5b50f8dfc5d2 100644 --- a/lib/raid6/int.uc +++ b/lib/raid6/int.uc | |||
@@ -11,7 +11,7 @@ | |||
11 | * ----------------------------------------------------------------------- */ | 11 | * ----------------------------------------------------------------------- */ |
12 | 12 | ||
13 | /* | 13 | /* |
14 | * raid6int$#.c | 14 | * int$#.c |
15 | * | 15 | * |
16 | * $#-way unrolled portable integer math RAID-6 instruction set | 16 | * $#-way unrolled portable integer math RAID-6 instruction set |
17 | * | 17 | * |
diff --git a/lib/raid6/mktables.c b/lib/raid6/mktables.c index 3b1500843bba..8a3780902cec 100644 --- a/lib/raid6/mktables.c +++ b/lib/raid6/mktables.c | |||
@@ -60,6 +60,7 @@ int main(int argc, char *argv[]) | |||
60 | uint8_t exptbl[256], invtbl[256]; | 60 | uint8_t exptbl[256], invtbl[256]; |
61 | 61 | ||
62 | printf("#include <linux/raid/pq.h>\n"); | 62 | printf("#include <linux/raid/pq.h>\n"); |
63 | printf("#include <linux/export.h>\n"); | ||
63 | 64 | ||
64 | /* Compute multiplication table */ | 65 | /* Compute multiplication table */ |
65 | printf("\nconst u8 __attribute__((aligned(256)))\n" | 66 | printf("\nconst u8 __attribute__((aligned(256)))\n" |
diff --git a/lib/raid6/recov.c b/lib/raid6/recov.c index 8590d19cf522..fe275d7b6b36 100644 --- a/lib/raid6/recov.c +++ b/lib/raid6/recov.c | |||
@@ -18,6 +18,7 @@ | |||
18 | * the syndrome.) | 18 | * the syndrome.) |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/export.h> | ||
21 | #include <linux/raid/pq.h> | 22 | #include <linux/raid/pq.h> |
22 | 23 | ||
23 | /* Recover two failed data blocks. */ | 24 | /* Recover two failed data blocks. */ |
diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 027a03f4c56d..c96d500577de 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c | |||
@@ -39,7 +39,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) | |||
39 | * in addition to the one that will be printed by | 39 | * in addition to the one that will be printed by |
40 | * the entity that is holding the lock already: | 40 | * the entity that is holding the lock already: |
41 | */ | 41 | */ |
42 | if (!spin_trylock_irqsave(&rs->lock, flags)) | 42 | if (!raw_spin_trylock_irqsave(&rs->lock, flags)) |
43 | return 0; | 43 | return 0; |
44 | 44 | ||
45 | if (!rs->begin) | 45 | if (!rs->begin) |
@@ -60,7 +60,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) | |||
60 | rs->missed++; | 60 | rs->missed++; |
61 | ret = 0; | 61 | ret = 0; |
62 | } | 62 | } |
63 | spin_unlock_irqrestore(&rs->lock, flags); | 63 | raw_spin_unlock_irqrestore(&rs->lock, flags); |
64 | 64 | ||
65 | return ret; | 65 | return ret; |
66 | } | 66 | } |
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index ffc9fc7f3b05..f2393c21fe85 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c | |||
@@ -22,9 +22,9 @@ int rwsem_is_locked(struct rw_semaphore *sem) | |||
22 | int ret = 1; | 22 | int ret = 1; |
23 | unsigned long flags; | 23 | unsigned long flags; |
24 | 24 | ||
25 | if (spin_trylock_irqsave(&sem->wait_lock, flags)) { | 25 | if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { |
26 | ret = (sem->activity != 0); | 26 | ret = (sem->activity != 0); |
27 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 27 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
28 | } | 28 | } |
29 | return ret; | 29 | return ret; |
30 | } | 30 | } |
@@ -44,7 +44,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, | |||
44 | lockdep_init_map(&sem->dep_map, name, key, 0); | 44 | lockdep_init_map(&sem->dep_map, name, key, 0); |
45 | #endif | 45 | #endif |
46 | sem->activity = 0; | 46 | sem->activity = 0; |
47 | spin_lock_init(&sem->wait_lock); | 47 | raw_spin_lock_init(&sem->wait_lock); |
48 | INIT_LIST_HEAD(&sem->wait_list); | 48 | INIT_LIST_HEAD(&sem->wait_list); |
49 | } | 49 | } |
50 | EXPORT_SYMBOL(__init_rwsem); | 50 | EXPORT_SYMBOL(__init_rwsem); |
@@ -145,12 +145,12 @@ void __sched __down_read(struct rw_semaphore *sem) | |||
145 | struct task_struct *tsk; | 145 | struct task_struct *tsk; |
146 | unsigned long flags; | 146 | unsigned long flags; |
147 | 147 | ||
148 | spin_lock_irqsave(&sem->wait_lock, flags); | 148 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
149 | 149 | ||
150 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | 150 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { |
151 | /* granted */ | 151 | /* granted */ |
152 | sem->activity++; | 152 | sem->activity++; |
153 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 153 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
154 | goto out; | 154 | goto out; |
155 | } | 155 | } |
156 | 156 | ||
@@ -165,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem) | |||
165 | list_add_tail(&waiter.list, &sem->wait_list); | 165 | list_add_tail(&waiter.list, &sem->wait_list); |
166 | 166 | ||
167 | /* we don't need to touch the semaphore struct anymore */ | 167 | /* we don't need to touch the semaphore struct anymore */ |
168 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 168 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
169 | 169 | ||
170 | /* wait to be given the lock */ | 170 | /* wait to be given the lock */ |
171 | for (;;) { | 171 | for (;;) { |
@@ -189,7 +189,7 @@ int __down_read_trylock(struct rw_semaphore *sem) | |||
189 | int ret = 0; | 189 | int ret = 0; |
190 | 190 | ||
191 | 191 | ||
192 | spin_lock_irqsave(&sem->wait_lock, flags); | 192 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
193 | 193 | ||
194 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | 194 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { |
195 | /* granted */ | 195 | /* granted */ |
@@ -197,7 +197,7 @@ int __down_read_trylock(struct rw_semaphore *sem) | |||
197 | ret = 1; | 197 | ret = 1; |
198 | } | 198 | } |
199 | 199 | ||
200 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 200 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
201 | 201 | ||
202 | return ret; | 202 | return ret; |
203 | } | 203 | } |
@@ -212,12 +212,12 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
212 | struct task_struct *tsk; | 212 | struct task_struct *tsk; |
213 | unsigned long flags; | 213 | unsigned long flags; |
214 | 214 | ||
215 | spin_lock_irqsave(&sem->wait_lock, flags); | 215 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
216 | 216 | ||
217 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 217 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { |
218 | /* granted */ | 218 | /* granted */ |
219 | sem->activity = -1; | 219 | sem->activity = -1; |
220 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 220 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
221 | goto out; | 221 | goto out; |
222 | } | 222 | } |
223 | 223 | ||
@@ -232,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
232 | list_add_tail(&waiter.list, &sem->wait_list); | 232 | list_add_tail(&waiter.list, &sem->wait_list); |
233 | 233 | ||
234 | /* we don't need to touch the semaphore struct anymore */ | 234 | /* we don't need to touch the semaphore struct anymore */ |
235 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 235 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
236 | 236 | ||
237 | /* wait to be given the lock */ | 237 | /* wait to be given the lock */ |
238 | for (;;) { | 238 | for (;;) { |
@@ -260,7 +260,7 @@ int __down_write_trylock(struct rw_semaphore *sem) | |||
260 | unsigned long flags; | 260 | unsigned long flags; |
261 | int ret = 0; | 261 | int ret = 0; |
262 | 262 | ||
263 | spin_lock_irqsave(&sem->wait_lock, flags); | 263 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
264 | 264 | ||
265 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 265 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { |
266 | /* granted */ | 266 | /* granted */ |
@@ -268,7 +268,7 @@ int __down_write_trylock(struct rw_semaphore *sem) | |||
268 | ret = 1; | 268 | ret = 1; |
269 | } | 269 | } |
270 | 270 | ||
271 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 271 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
272 | 272 | ||
273 | return ret; | 273 | return ret; |
274 | } | 274 | } |
@@ -280,12 +280,12 @@ void __up_read(struct rw_semaphore *sem) | |||
280 | { | 280 | { |
281 | unsigned long flags; | 281 | unsigned long flags; |
282 | 282 | ||
283 | spin_lock_irqsave(&sem->wait_lock, flags); | 283 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
284 | 284 | ||
285 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) | 285 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) |
286 | sem = __rwsem_wake_one_writer(sem); | 286 | sem = __rwsem_wake_one_writer(sem); |
287 | 287 | ||
288 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 288 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
289 | } | 289 | } |
290 | 290 | ||
291 | /* | 291 | /* |
@@ -295,13 +295,13 @@ void __up_write(struct rw_semaphore *sem) | |||
295 | { | 295 | { |
296 | unsigned long flags; | 296 | unsigned long flags; |
297 | 297 | ||
298 | spin_lock_irqsave(&sem->wait_lock, flags); | 298 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
299 | 299 | ||
300 | sem->activity = 0; | 300 | sem->activity = 0; |
301 | if (!list_empty(&sem->wait_list)) | 301 | if (!list_empty(&sem->wait_list)) |
302 | sem = __rwsem_do_wake(sem, 1); | 302 | sem = __rwsem_do_wake(sem, 1); |
303 | 303 | ||
304 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 304 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
305 | } | 305 | } |
306 | 306 | ||
307 | /* | 307 | /* |
@@ -312,12 +312,12 @@ void __downgrade_write(struct rw_semaphore *sem) | |||
312 | { | 312 | { |
313 | unsigned long flags; | 313 | unsigned long flags; |
314 | 314 | ||
315 | spin_lock_irqsave(&sem->wait_lock, flags); | 315 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
316 | 316 | ||
317 | sem->activity = 1; | 317 | sem->activity = 1; |
318 | if (!list_empty(&sem->wait_list)) | 318 | if (!list_empty(&sem->wait_list)) |
319 | sem = __rwsem_do_wake(sem, 0); | 319 | sem = __rwsem_do_wake(sem, 0); |
320 | 320 | ||
321 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 321 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
322 | } | 322 | } |
323 | 323 | ||
diff --git a/lib/rwsem.c b/lib/rwsem.c index aa7c3052261f..410aa1189b13 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c | |||
@@ -22,7 +22,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, | |||
22 | lockdep_init_map(&sem->dep_map, name, key, 0); | 22 | lockdep_init_map(&sem->dep_map, name, key, 0); |
23 | #endif | 23 | #endif |
24 | sem->count = RWSEM_UNLOCKED_VALUE; | 24 | sem->count = RWSEM_UNLOCKED_VALUE; |
25 | spin_lock_init(&sem->wait_lock); | 25 | raw_spin_lock_init(&sem->wait_lock); |
26 | INIT_LIST_HEAD(&sem->wait_list); | 26 | INIT_LIST_HEAD(&sem->wait_list); |
27 | } | 27 | } |
28 | 28 | ||
@@ -180,7 +180,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem, | |||
180 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 180 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
181 | 181 | ||
182 | /* set up my own style of waitqueue */ | 182 | /* set up my own style of waitqueue */ |
183 | spin_lock_irq(&sem->wait_lock); | 183 | raw_spin_lock_irq(&sem->wait_lock); |
184 | waiter.task = tsk; | 184 | waiter.task = tsk; |
185 | waiter.flags = flags; | 185 | waiter.flags = flags; |
186 | get_task_struct(tsk); | 186 | get_task_struct(tsk); |
@@ -204,7 +204,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem, | |||
204 | adjustment == -RWSEM_ACTIVE_WRITE_BIAS) | 204 | adjustment == -RWSEM_ACTIVE_WRITE_BIAS) |
205 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); | 205 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); |
206 | 206 | ||
207 | spin_unlock_irq(&sem->wait_lock); | 207 | raw_spin_unlock_irq(&sem->wait_lock); |
208 | 208 | ||
209 | /* wait to be given the lock */ | 209 | /* wait to be given the lock */ |
210 | for (;;) { | 210 | for (;;) { |
@@ -245,13 +245,13 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) | |||
245 | { | 245 | { |
246 | unsigned long flags; | 246 | unsigned long flags; |
247 | 247 | ||
248 | spin_lock_irqsave(&sem->wait_lock, flags); | 248 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
249 | 249 | ||
250 | /* do nothing if list empty */ | 250 | /* do nothing if list empty */ |
251 | if (!list_empty(&sem->wait_list)) | 251 | if (!list_empty(&sem->wait_list)) |
252 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); | 252 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); |
253 | 253 | ||
254 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 254 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
255 | 255 | ||
256 | return sem; | 256 | return sem; |
257 | } | 257 | } |
@@ -265,13 +265,13 @@ struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) | |||
265 | { | 265 | { |
266 | unsigned long flags; | 266 | unsigned long flags; |
267 | 267 | ||
268 | spin_lock_irqsave(&sem->wait_lock, flags); | 268 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
269 | 269 | ||
270 | /* do nothing if list empty */ | 270 | /* do nothing if list empty */ |
271 | if (!list_empty(&sem->wait_list)) | 271 | if (!list_empty(&sem->wait_list)) |
272 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); | 272 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); |
273 | 273 | ||
274 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 274 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
275 | 275 | ||
276 | return sem; | 276 | return sem; |
277 | } | 277 | } |
diff --git a/lib/sha1.c b/lib/sha1.c index 4c45fd50e913..1de509a159c8 100644 --- a/lib/sha1.c +++ b/lib/sha1.c | |||
@@ -1,31 +1,73 @@ | |||
1 | /* | 1 | /* |
2 | * SHA transform algorithm, originally taken from code written by | 2 | * SHA1 routine optimized to do word accesses rather than byte accesses, |
3 | * Peter Gutmann, and placed in the public domain. | 3 | * and to avoid unnecessary copies into the context array. |
4 | * | ||
5 | * This was based on the git SHA1 implementation. | ||
4 | */ | 6 | */ |
5 | 7 | ||
6 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
7 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/bitops.h> | ||
8 | #include <linux/cryptohash.h> | 11 | #include <linux/cryptohash.h> |
12 | #include <asm/unaligned.h> | ||
9 | 13 | ||
10 | /* The SHA f()-functions. */ | 14 | /* |
15 | * If you have 32 registers or more, the compiler can (and should) | ||
16 | * try to change the array[] accesses into registers. However, on | ||
17 | * machines with less than ~25 registers, that won't really work, | ||
18 | * and at least gcc will make an unholy mess of it. | ||
19 | * | ||
20 | * So to avoid that mess which just slows things down, we force | ||
21 | * the stores to memory to actually happen (we might be better off | ||
22 | * with a 'W(t)=(val);asm("":"+m" (W(t))' there instead, as | ||
23 | * suggested by Artur Skawina - that will also make gcc unable to | ||
24 | * try to do the silly "optimize away loads" part because it won't | ||
25 | * see what the value will be). | ||
26 | * | ||
27 | * Ben Herrenschmidt reports that on PPC, the C version comes close | ||
28 | * to the optimized asm with this (ie on PPC you don't want that | ||
29 | * 'volatile', since there are lots of registers). | ||
30 | * | ||
31 | * On ARM we get the best code generation by forcing a full memory barrier | ||
32 | * between each SHA_ROUND, otherwise gcc happily get wild with spilling and | ||
33 | * the stack frame size simply explode and performance goes down the drain. | ||
34 | */ | ||
11 | 35 | ||
12 | #define f1(x,y,z) (z ^ (x & (y ^ z))) /* x ? y : z */ | 36 | #ifdef CONFIG_X86 |
13 | #define f2(x,y,z) (x ^ y ^ z) /* XOR */ | 37 | #define setW(x, val) (*(volatile __u32 *)&W(x) = (val)) |
14 | #define f3(x,y,z) ((x & y) + (z & (x ^ y))) /* majority */ | 38 | #elif defined(CONFIG_ARM) |
39 | #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0) | ||
40 | #else | ||
41 | #define setW(x, val) (W(x) = (val)) | ||
42 | #endif | ||
15 | 43 | ||
16 | /* The SHA Mysterious Constants */ | 44 | /* This "rolls" over the 512-bit array */ |
45 | #define W(x) (array[(x)&15]) | ||
17 | 46 | ||
18 | #define K1 0x5A827999L /* Rounds 0-19: sqrt(2) * 2^30 */ | 47 | /* |
19 | #define K2 0x6ED9EBA1L /* Rounds 20-39: sqrt(3) * 2^30 */ | 48 | * Where do we get the source from? The first 16 iterations get it from |
20 | #define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */ | 49 | * the input data, the next mix it from the 512-bit array. |
21 | #define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */ | 50 | */ |
51 | #define SHA_SRC(t) get_unaligned_be32((__u32 *)data + t) | ||
52 | #define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1) | ||
53 | |||
54 | #define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \ | ||
55 | __u32 TEMP = input(t); setW(t, TEMP); \ | ||
56 | E += TEMP + rol32(A,5) + (fn) + (constant); \ | ||
57 | B = ror32(B, 2); } while (0) | ||
58 | |||
59 | #define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) | ||
60 | #define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) | ||
61 | #define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E ) | ||
62 | #define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E ) | ||
63 | #define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E ) | ||
22 | 64 | ||
23 | /** | 65 | /** |
24 | * sha_transform - single block SHA1 transform | 66 | * sha_transform - single block SHA1 transform |
25 | * | 67 | * |
26 | * @digest: 160 bit digest to update | 68 | * @digest: 160 bit digest to update |
27 | * @data: 512 bits of data to hash | 69 | * @data: 512 bits of data to hash |
28 | * @W: 80 words of workspace (see note) | 70 | * @array: 16 words of workspace (see note) |
29 | * | 71 | * |
30 | * This function generates a SHA1 digest for a single 512-bit block. | 72 | * This function generates a SHA1 digest for a single 512-bit block. |
31 | * Be warned, it does not handle padding and message digest, do not | 73 | * Be warned, it does not handle padding and message digest, do not |
@@ -36,47 +78,111 @@ | |||
36 | * to clear the workspace. This is left to the caller to avoid | 78 | * to clear the workspace. This is left to the caller to avoid |
37 | * unnecessary clears between chained hashing operations. | 79 | * unnecessary clears between chained hashing operations. |
38 | */ | 80 | */ |
39 | void sha_transform(__u32 *digest, const char *in, __u32 *W) | 81 | void sha_transform(__u32 *digest, const char *data, __u32 *array) |
40 | { | 82 | { |
41 | __u32 a, b, c, d, e, t, i; | 83 | __u32 A, B, C, D, E; |
42 | 84 | ||
43 | for (i = 0; i < 16; i++) | 85 | A = digest[0]; |
44 | W[i] = be32_to_cpu(((const __be32 *)in)[i]); | 86 | B = digest[1]; |
45 | 87 | C = digest[2]; | |
46 | for (i = 0; i < 64; i++) | 88 | D = digest[3]; |
47 | W[i+16] = rol32(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 1); | 89 | E = digest[4]; |
48 | 90 | ||
49 | a = digest[0]; | 91 | /* Round 1 - iterations 0-16 take their input from 'data' */ |
50 | b = digest[1]; | 92 | T_0_15( 0, A, B, C, D, E); |
51 | c = digest[2]; | 93 | T_0_15( 1, E, A, B, C, D); |
52 | d = digest[3]; | 94 | T_0_15( 2, D, E, A, B, C); |
53 | e = digest[4]; | 95 | T_0_15( 3, C, D, E, A, B); |
54 | 96 | T_0_15( 4, B, C, D, E, A); | |
55 | for (i = 0; i < 20; i++) { | 97 | T_0_15( 5, A, B, C, D, E); |
56 | t = f1(b, c, d) + K1 + rol32(a, 5) + e + W[i]; | 98 | T_0_15( 6, E, A, B, C, D); |
57 | e = d; d = c; c = rol32(b, 30); b = a; a = t; | 99 | T_0_15( 7, D, E, A, B, C); |
58 | } | 100 | T_0_15( 8, C, D, E, A, B); |
59 | 101 | T_0_15( 9, B, C, D, E, A); | |
60 | for (; i < 40; i ++) { | 102 | T_0_15(10, A, B, C, D, E); |
61 | t = f2(b, c, d) + K2 + rol32(a, 5) + e + W[i]; | 103 | T_0_15(11, E, A, B, C, D); |
62 | e = d; d = c; c = rol32(b, 30); b = a; a = t; | 104 | T_0_15(12, D, E, A, B, C); |
63 | } | 105 | T_0_15(13, C, D, E, A, B); |
64 | 106 | T_0_15(14, B, C, D, E, A); | |
65 | for (; i < 60; i ++) { | 107 | T_0_15(15, A, B, C, D, E); |
66 | t = f3(b, c, d) + K3 + rol32(a, 5) + e + W[i]; | 108 | |
67 | e = d; d = c; c = rol32(b, 30); b = a; a = t; | 109 | /* Round 1 - tail. Input from 512-bit mixing array */ |
68 | } | 110 | T_16_19(16, E, A, B, C, D); |
69 | 111 | T_16_19(17, D, E, A, B, C); | |
70 | for (; i < 80; i ++) { | 112 | T_16_19(18, C, D, E, A, B); |
71 | t = f2(b, c, d) + K4 + rol32(a, 5) + e + W[i]; | 113 | T_16_19(19, B, C, D, E, A); |
72 | e = d; d = c; c = rol32(b, 30); b = a; a = t; | 114 | |
73 | } | 115 | /* Round 2 */ |
74 | 116 | T_20_39(20, A, B, C, D, E); | |
75 | digest[0] += a; | 117 | T_20_39(21, E, A, B, C, D); |
76 | digest[1] += b; | 118 | T_20_39(22, D, E, A, B, C); |
77 | digest[2] += c; | 119 | T_20_39(23, C, D, E, A, B); |
78 | digest[3] += d; | 120 | T_20_39(24, B, C, D, E, A); |
79 | digest[4] += e; | 121 | T_20_39(25, A, B, C, D, E); |
122 | T_20_39(26, E, A, B, C, D); | ||
123 | T_20_39(27, D, E, A, B, C); | ||
124 | T_20_39(28, C, D, E, A, B); | ||
125 | T_20_39(29, B, C, D, E, A); | ||
126 | T_20_39(30, A, B, C, D, E); | ||
127 | T_20_39(31, E, A, B, C, D); | ||
128 | T_20_39(32, D, E, A, B, C); | ||
129 | T_20_39(33, C, D, E, A, B); | ||
130 | T_20_39(34, B, C, D, E, A); | ||
131 | T_20_39(35, A, B, C, D, E); | ||
132 | T_20_39(36, E, A, B, C, D); | ||
133 | T_20_39(37, D, E, A, B, C); | ||
134 | T_20_39(38, C, D, E, A, B); | ||
135 | T_20_39(39, B, C, D, E, A); | ||
136 | |||
137 | /* Round 3 */ | ||
138 | T_40_59(40, A, B, C, D, E); | ||
139 | T_40_59(41, E, A, B, C, D); | ||
140 | T_40_59(42, D, E, A, B, C); | ||
141 | T_40_59(43, C, D, E, A, B); | ||
142 | T_40_59(44, B, C, D, E, A); | ||
143 | T_40_59(45, A, B, C, D, E); | ||
144 | T_40_59(46, E, A, B, C, D); | ||
145 | T_40_59(47, D, E, A, B, C); | ||
146 | T_40_59(48, C, D, E, A, B); | ||
147 | T_40_59(49, B, C, D, E, A); | ||
148 | T_40_59(50, A, B, C, D, E); | ||
149 | T_40_59(51, E, A, B, C, D); | ||
150 | T_40_59(52, D, E, A, B, C); | ||
151 | T_40_59(53, C, D, E, A, B); | ||
152 | T_40_59(54, B, C, D, E, A); | ||
153 | T_40_59(55, A, B, C, D, E); | ||
154 | T_40_59(56, E, A, B, C, D); | ||
155 | T_40_59(57, D, E, A, B, C); | ||
156 | T_40_59(58, C, D, E, A, B); | ||
157 | T_40_59(59, B, C, D, E, A); | ||
158 | |||
159 | /* Round 4 */ | ||
160 | T_60_79(60, A, B, C, D, E); | ||
161 | T_60_79(61, E, A, B, C, D); | ||
162 | T_60_79(62, D, E, A, B, C); | ||
163 | T_60_79(63, C, D, E, A, B); | ||
164 | T_60_79(64, B, C, D, E, A); | ||
165 | T_60_79(65, A, B, C, D, E); | ||
166 | T_60_79(66, E, A, B, C, D); | ||
167 | T_60_79(67, D, E, A, B, C); | ||
168 | T_60_79(68, C, D, E, A, B); | ||
169 | T_60_79(69, B, C, D, E, A); | ||
170 | T_60_79(70, A, B, C, D, E); | ||
171 | T_60_79(71, E, A, B, C, D); | ||
172 | T_60_79(72, D, E, A, B, C); | ||
173 | T_60_79(73, C, D, E, A, B); | ||
174 | T_60_79(74, B, C, D, E, A); | ||
175 | T_60_79(75, A, B, C, D, E); | ||
176 | T_60_79(76, E, A, B, C, D); | ||
177 | T_60_79(77, D, E, A, B, C); | ||
178 | T_60_79(78, C, D, E, A, B); | ||
179 | T_60_79(79, B, C, D, E, A); | ||
180 | |||
181 | digest[0] += A; | ||
182 | digest[1] += B; | ||
183 | digest[2] += C; | ||
184 | digest[3] += D; | ||
185 | digest[4] += E; | ||
80 | } | 186 | } |
81 | EXPORT_SYMBOL(sha_transform); | 187 | EXPORT_SYMBOL(sha_transform); |
82 | 188 | ||
@@ -92,4 +198,3 @@ void sha_init(__u32 *buf) | |||
92 | buf[3] = 0x10325476; | 198 | buf[3] = 0x10325476; |
93 | buf[4] = 0xc3d2e1f0; | 199 | buf[4] = 0xc3d2e1f0; |
94 | } | 200 | } |
95 | |||
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 4689cb073da4..503f087382a4 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c | |||
@@ -22,7 +22,7 @@ notrace unsigned int debug_smp_processor_id(void) | |||
22 | * Kernel threads bound to a single CPU can safely use | 22 | * Kernel threads bound to a single CPU can safely use |
23 | * smp_processor_id(): | 23 | * smp_processor_id(): |
24 | */ | 24 | */ |
25 | if (cpumask_equal(¤t->cpus_allowed, cpumask_of(this_cpu))) | 25 | if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu))) |
26 | goto out; | 26 | goto out; |
27 | 27 | ||
28 | /* | 28 | /* |
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 4755b98b6dfb..5f3eacdd6178 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c | |||
@@ -49,13 +49,10 @@ void __rwlock_init(rwlock_t *lock, const char *name, | |||
49 | 49 | ||
50 | EXPORT_SYMBOL(__rwlock_init); | 50 | EXPORT_SYMBOL(__rwlock_init); |
51 | 51 | ||
52 | static void spin_bug(raw_spinlock_t *lock, const char *msg) | 52 | static void spin_dump(raw_spinlock_t *lock, const char *msg) |
53 | { | 53 | { |
54 | struct task_struct *owner = NULL; | 54 | struct task_struct *owner = NULL; |
55 | 55 | ||
56 | if (!debug_locks_off()) | ||
57 | return; | ||
58 | |||
59 | if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) | 56 | if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) |
60 | owner = lock->owner; | 57 | owner = lock->owner; |
61 | printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", | 58 | printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", |
@@ -70,6 +67,14 @@ static void spin_bug(raw_spinlock_t *lock, const char *msg) | |||
70 | dump_stack(); | 67 | dump_stack(); |
71 | } | 68 | } |
72 | 69 | ||
70 | static void spin_bug(raw_spinlock_t *lock, const char *msg) | ||
71 | { | ||
72 | if (!debug_locks_off()) | ||
73 | return; | ||
74 | |||
75 | spin_dump(lock, msg); | ||
76 | } | ||
77 | |||
73 | #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) | 78 | #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) |
74 | 79 | ||
75 | static inline void | 80 | static inline void |
@@ -113,11 +118,7 @@ static void __spin_lock_debug(raw_spinlock_t *lock) | |||
113 | /* lockup suspected: */ | 118 | /* lockup suspected: */ |
114 | if (print_once) { | 119 | if (print_once) { |
115 | print_once = 0; | 120 | print_once = 0; |
116 | printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, " | 121 | spin_dump(lock, "lockup"); |
117 | "%s/%d, %p\n", | ||
118 | raw_smp_processor_id(), current->comm, | ||
119 | task_pid_nr(current), lock); | ||
120 | dump_stack(); | ||
121 | #ifdef CONFIG_SMP | 122 | #ifdef CONFIG_SMP |
122 | trigger_all_cpu_backtrace(); | 123 | trigger_all_cpu_backtrace(); |
123 | #endif | 124 | #endif |
diff --git a/lib/string.c b/lib/string.c index 01fad9b203e1..dc4a86341f91 100644 --- a/lib/string.c +++ b/lib/string.c | |||
@@ -360,7 +360,6 @@ char *strim(char *s) | |||
360 | size_t size; | 360 | size_t size; |
361 | char *end; | 361 | char *end; |
362 | 362 | ||
363 | s = skip_spaces(s); | ||
364 | size = strlen(s); | 363 | size = strlen(s); |
365 | if (!size) | 364 | if (!size) |
366 | return s; | 365 | return s; |
@@ -370,7 +369,7 @@ char *strim(char *s) | |||
370 | end--; | 369 | end--; |
371 | *(end + 1) = '\0'; | 370 | *(end + 1) = '\0'; |
372 | 371 | ||
373 | return s; | 372 | return skip_spaces(s); |
374 | } | 373 | } |
375 | EXPORT_SYMBOL(strim); | 374 | EXPORT_SYMBOL(strim); |
376 | 375 | ||
@@ -756,3 +755,57 @@ void *memchr(const void *s, int c, size_t n) | |||
756 | } | 755 | } |
757 | EXPORT_SYMBOL(memchr); | 756 | EXPORT_SYMBOL(memchr); |
758 | #endif | 757 | #endif |
758 | |||
759 | static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes) | ||
760 | { | ||
761 | while (bytes) { | ||
762 | if (*start != value) | ||
763 | return (void *)start; | ||
764 | start++; | ||
765 | bytes--; | ||
766 | } | ||
767 | return NULL; | ||
768 | } | ||
769 | |||
770 | /** | ||
771 | * memchr_inv - Find an unmatching character in an area of memory. | ||
772 | * @start: The memory area | ||
773 | * @c: Find a character other than c | ||
774 | * @bytes: The size of the area. | ||
775 | * | ||
776 | * returns the address of the first character other than @c, or %NULL | ||
777 | * if the whole buffer contains just @c. | ||
778 | */ | ||
779 | void *memchr_inv(const void *start, int c, size_t bytes) | ||
780 | { | ||
781 | u8 value = c; | ||
782 | u64 value64; | ||
783 | unsigned int words, prefix; | ||
784 | |||
785 | if (bytes <= 16) | ||
786 | return check_bytes8(start, value, bytes); | ||
787 | |||
788 | value64 = value | value << 8 | value << 16 | value << 24; | ||
789 | value64 = (value64 & 0xffffffff) | value64 << 32; | ||
790 | prefix = 8 - ((unsigned long)start) % 8; | ||
791 | |||
792 | if (prefix) { | ||
793 | u8 *r = check_bytes8(start, value, prefix); | ||
794 | if (r) | ||
795 | return r; | ||
796 | start += prefix; | ||
797 | bytes -= prefix; | ||
798 | } | ||
799 | |||
800 | words = bytes / 8; | ||
801 | |||
802 | while (words) { | ||
803 | if (*(u64 *)start != value64) | ||
804 | return check_bytes8(start, value, 8); | ||
805 | start += 8; | ||
806 | words--; | ||
807 | } | ||
808 | |||
809 | return check_bytes8(start, value, bytes % 8); | ||
810 | } | ||
811 | EXPORT_SYMBOL(memchr_inv); | ||
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index d7222a9c8267..993599e66e5a 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -31,17 +31,7 @@ | |||
31 | #include <asm/div64.h> | 31 | #include <asm/div64.h> |
32 | #include <asm/sections.h> /* for dereference_function_descriptor() */ | 32 | #include <asm/sections.h> /* for dereference_function_descriptor() */ |
33 | 33 | ||
34 | static unsigned int simple_guess_base(const char *cp) | 34 | #include "kstrtox.h" |
35 | { | ||
36 | if (cp[0] == '0') { | ||
37 | if (_tolower(cp[1]) == 'x' && isxdigit(cp[2])) | ||
38 | return 16; | ||
39 | else | ||
40 | return 8; | ||
41 | } else { | ||
42 | return 10; | ||
43 | } | ||
44 | } | ||
45 | 35 | ||
46 | /** | 36 | /** |
47 | * simple_strtoull - convert a string to an unsigned long long | 37 | * simple_strtoull - convert a string to an unsigned long long |
@@ -51,23 +41,14 @@ static unsigned int simple_guess_base(const char *cp) | |||
51 | */ | 41 | */ |
52 | unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) | 42 | unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) |
53 | { | 43 | { |
54 | unsigned long long result = 0; | 44 | unsigned long long result; |
45 | unsigned int rv; | ||
55 | 46 | ||
56 | if (!base) | 47 | cp = _parse_integer_fixup_radix(cp, &base); |
57 | base = simple_guess_base(cp); | 48 | rv = _parse_integer(cp, base, &result); |
49 | /* FIXME */ | ||
50 | cp += (rv & ~KSTRTOX_OVERFLOW); | ||
58 | 51 | ||
59 | if (base == 16 && cp[0] == '0' && _tolower(cp[1]) == 'x') | ||
60 | cp += 2; | ||
61 | |||
62 | while (isxdigit(*cp)) { | ||
63 | unsigned int value; | ||
64 | |||
65 | value = isdigit(*cp) ? *cp - '0' : _tolower(*cp) - 'a' + 10; | ||
66 | if (value >= base) | ||
67 | break; | ||
68 | result = result * base + value; | ||
69 | cp++; | ||
70 | } | ||
71 | if (endp) | 52 | if (endp) |
72 | *endp = (char *)cp; | 53 | *endp = (char *)cp; |
73 | 54 | ||
@@ -566,7 +547,7 @@ char *mac_address_string(char *buf, char *end, u8 *addr, | |||
566 | } | 547 | } |
567 | 548 | ||
568 | for (i = 0; i < 6; i++) { | 549 | for (i = 0; i < 6; i++) { |
569 | p = pack_hex_byte(p, addr[i]); | 550 | p = hex_byte_pack(p, addr[i]); |
570 | if (fmt[0] == 'M' && i != 5) | 551 | if (fmt[0] == 'M' && i != 5) |
571 | *p++ = separator; | 552 | *p++ = separator; |
572 | } | 553 | } |
@@ -686,13 +667,13 @@ char *ip6_compressed_string(char *p, const char *addr) | |||
686 | lo = word & 0xff; | 667 | lo = word & 0xff; |
687 | if (hi) { | 668 | if (hi) { |
688 | if (hi > 0x0f) | 669 | if (hi > 0x0f) |
689 | p = pack_hex_byte(p, hi); | 670 | p = hex_byte_pack(p, hi); |
690 | else | 671 | else |
691 | *p++ = hex_asc_lo(hi); | 672 | *p++ = hex_asc_lo(hi); |
692 | p = pack_hex_byte(p, lo); | 673 | p = hex_byte_pack(p, lo); |
693 | } | 674 | } |
694 | else if (lo > 0x0f) | 675 | else if (lo > 0x0f) |
695 | p = pack_hex_byte(p, lo); | 676 | p = hex_byte_pack(p, lo); |
696 | else | 677 | else |
697 | *p++ = hex_asc_lo(lo); | 678 | *p++ = hex_asc_lo(lo); |
698 | needcolon = true; | 679 | needcolon = true; |
@@ -714,8 +695,8 @@ char *ip6_string(char *p, const char *addr, const char *fmt) | |||
714 | int i; | 695 | int i; |
715 | 696 | ||
716 | for (i = 0; i < 8; i++) { | 697 | for (i = 0; i < 8; i++) { |
717 | p = pack_hex_byte(p, *addr++); | 698 | p = hex_byte_pack(p, *addr++); |
718 | p = pack_hex_byte(p, *addr++); | 699 | p = hex_byte_pack(p, *addr++); |
719 | if (fmt[0] == 'I' && i != 7) | 700 | if (fmt[0] == 'I' && i != 7) |
720 | *p++ = ':'; | 701 | *p++ = ':'; |
721 | } | 702 | } |
@@ -773,7 +754,7 @@ char *uuid_string(char *buf, char *end, const u8 *addr, | |||
773 | } | 754 | } |
774 | 755 | ||
775 | for (i = 0; i < 16; i++) { | 756 | for (i = 0; i < 16; i++) { |
776 | p = pack_hex_byte(p, addr[index[i]]); | 757 | p = hex_byte_pack(p, addr[index[i]]); |
777 | switch (i) { | 758 | switch (i) { |
778 | case 3: | 759 | case 3: |
779 | case 5: | 760 | case 5: |
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c index e51e2558ca9d..a768e6d28bbb 100644 --- a/lib/xz/xz_dec_bcj.c +++ b/lib/xz/xz_dec_bcj.c | |||
@@ -441,8 +441,12 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, | |||
441 | * next filter in the chain. Apply the BCJ filter on the new data | 441 | * next filter in the chain. Apply the BCJ filter on the new data |
442 | * in the output buffer. If everything cannot be filtered, copy it | 442 | * in the output buffer. If everything cannot be filtered, copy it |
443 | * to temp and rewind the output buffer position accordingly. | 443 | * to temp and rewind the output buffer position accordingly. |
444 | * | ||
445 | * This needs to be always run when temp.size == 0 to handle a special | ||
446 | * case where the output buffer is full and the next filter has no | ||
447 | * more output coming but hasn't returned XZ_STREAM_END yet. | ||
444 | */ | 448 | */ |
445 | if (s->temp.size < b->out_size - b->out_pos) { | 449 | if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) { |
446 | out_start = b->out_pos; | 450 | out_start = b->out_pos; |
447 | memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size); | 451 | memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size); |
448 | b->out_pos += s->temp.size; | 452 | b->out_pos += s->temp.size; |
@@ -465,16 +469,25 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, | |||
465 | s->temp.size = b->out_pos - out_start; | 469 | s->temp.size = b->out_pos - out_start; |
466 | b->out_pos -= s->temp.size; | 470 | b->out_pos -= s->temp.size; |
467 | memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size); | 471 | memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size); |
472 | |||
473 | /* | ||
474 | * If there wasn't enough input to the next filter to fill | ||
475 | * the output buffer with unfiltered data, there's no point | ||
476 | * to try decoding more data to temp. | ||
477 | */ | ||
478 | if (b->out_pos + s->temp.size < b->out_size) | ||
479 | return XZ_OK; | ||
468 | } | 480 | } |
469 | 481 | ||
470 | /* | 482 | /* |
471 | * If we have unfiltered data in temp, try to fill by decoding more | 483 | * We have unfiltered data in temp. If the output buffer isn't full |
472 | * data from the next filter. Apply the BCJ filter on temp. Then we | 484 | * yet, try to fill the temp buffer by decoding more data from the |
473 | * hopefully can fill the actual output buffer by copying filtered | 485 | * next filter. Apply the BCJ filter on temp. Then we hopefully can |
474 | * data from temp. A mix of filtered and unfiltered data may be left | 486 | * fill the actual output buffer by copying filtered data from temp. |
475 | * in temp; it will be taken care on the next call to this function. | 487 | * A mix of filtered and unfiltered data may be left in temp; it will |
488 | * be taken care on the next call to this function. | ||
476 | */ | 489 | */ |
477 | if (s->temp.size > 0) { | 490 | if (b->out_pos < b->out_size) { |
478 | /* Make b->out{,_pos,_size} temporarily point to s->temp. */ | 491 | /* Make b->out{,_pos,_size} temporarily point to s->temp. */ |
479 | s->out = b->out; | 492 | s->out = b->out; |
480 | s->out_pos = b->out_pos; | 493 | s->out_pos = b->out_pos; |