aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-09-15 09:08:05 -0400
committerJiri Kosina <jkosina@suse.cz>2011-09-15 09:08:18 -0400
commite060c38434b2caa78efe7cedaff4191040b65a15 (patch)
tree407361230bf6733f63d8e788e4b5e6566ee04818 /lib
parent10e4ac572eeffe5317019bd7330b6058a400dfc2 (diff)
parentcc39c6a9bbdebfcf1a7dee64d83bf302bc38d941 (diff)
Merge branch 'master' into for-next
Fast-forward merge with Linus to be able to merge patches based on more recent version of the tree.
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile8
-rw-r--r--lib/atomic64.c2
-rw-r--r--lib/atomic64_test.c2
-rw-r--r--lib/bitmap.c4
-rw-r--r--lib/cpumask.c4
-rw-r--r--lib/crc32.c2
-rw-r--r--lib/dec_and_lock.c2
-rw-r--r--lib/fault-inject.c156
-rw-r--r--lib/genalloc.c300
-rw-r--r--lib/idr.c67
-rw-r--r--lib/llist.c129
-rw-r--r--lib/md5.c95
-rw-r--r--lib/radix-tree.c121
-rw-r--r--lib/sha1.c211
15 files changed, 862 insertions, 244 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 32f3e5ae2be..6c695ff9cab 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -276,4 +276,7 @@ config CORDIC
276 so its calculations are in fixed point. Modules can select this 276 so its calculations are in fixed point. Modules can select this
277 when they require this function. Module will be called cordic. 277 when they require this function. Module will be called cordic.
278 278
279config LLIST
280 bool
281
279endmenu 282endmenu
diff --git a/lib/Makefile b/lib/Makefile
index 892f4e282ea..3f5bc6d903e 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -10,9 +10,9 @@ endif
10lib-y := ctype.o string.o vsprintf.o cmdline.o \ 10lib-y := ctype.o string.o vsprintf.o cmdline.o \
11 rbtree.o radix-tree.o dump_stack.o timerqueue.o\ 11 rbtree.o radix-tree.o dump_stack.o timerqueue.o\
12 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o prio_tree.o \
13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o prio_heap.o ratelimit.o show_mem.o \ 14 proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o find_next_bit.o 15 is_single_threaded.o plist.o decompress.o
16 16
17lib-$(CONFIG_MMU) += ioremap.o 17lib-$(CONFIG_MMU) += ioremap.o
18lib-$(CONFIG_SMP) += cpumask.o 18lib-$(CONFIG_SMP) += cpumask.o
@@ -22,7 +22,7 @@ lib-y += kobject.o kref.o klist.o
22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
24 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ 24 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \
25 bsearch.o find_last_bit.o 25 bsearch.o find_last_bit.o find_next_bit.o
26obj-y += kstrtox.o 26obj-y += kstrtox.o
27obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 27obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
28 28
@@ -115,6 +115,8 @@ obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
115 115
116obj-$(CONFIG_CORDIC) += cordic.o 116obj-$(CONFIG_CORDIC) += cordic.o
117 117
118obj-$(CONFIG_LLIST) += llist.o
119
118hostprogs-y := gen_crc32table 120hostprogs-y := gen_crc32table
119clean-files := crc32table.h 121clean-files := crc32table.h
120 122
diff --git a/lib/atomic64.c b/lib/atomic64.c
index a21c12bc727..e12ae0dd08a 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -14,7 +14,7 @@
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <asm/atomic.h> 17#include <linux/atomic.h>
18 18
19/* 19/*
20 * We use a hashed array of spinlocks to provide exclusive access 20 * We use a hashed array of spinlocks to provide exclusive access
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 44524cc8c32..0c33cde2a1e 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -10,7 +10,7 @@
10 */ 10 */
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <asm/atomic.h> 13#include <linux/atomic.h>
14 14
15#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0) 15#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
16static __init int test_atomic64(void) 16static __init int test_atomic64(void)
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 3f3b68199d7..2f4412e4d07 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -271,8 +271,6 @@ int __bitmap_weight(const unsigned long *bitmap, int bits)
271} 271}
272EXPORT_SYMBOL(__bitmap_weight); 272EXPORT_SYMBOL(__bitmap_weight);
273 273
274#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
275
276void bitmap_set(unsigned long *map, int start, int nr) 274void bitmap_set(unsigned long *map, int start, int nr)
277{ 275{
278 unsigned long *p = map + BIT_WORD(start); 276 unsigned long *p = map + BIT_WORD(start);
@@ -756,7 +754,7 @@ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits)
756 * 754 *
757 * The bit positions 0 through @bits are valid positions in @buf. 755 * The bit positions 0 through @bits are valid positions in @buf.
758 */ 756 */
759static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) 757int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits)
760{ 758{
761 int pos = 0; 759 int pos = 0;
762 760
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 05d6aca7fc1..af3e5817de9 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -30,7 +30,7 @@ int __any_online_cpu(const cpumask_t *mask)
30{ 30{
31 int cpu; 31 int cpu;
32 32
33 for_each_cpu_mask(cpu, *mask) { 33 for_each_cpu(cpu, mask) {
34 if (cpu_online(cpu)) 34 if (cpu_online(cpu))
35 break; 35 break;
36 } 36 }
@@ -131,7 +131,7 @@ EXPORT_SYMBOL(zalloc_cpumask_var_node);
131 */ 131 */
132bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 132bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
133{ 133{
134 return alloc_cpumask_var_node(mask, flags, numa_node_id()); 134 return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
135} 135}
136EXPORT_SYMBOL(alloc_cpumask_var); 136EXPORT_SYMBOL(alloc_cpumask_var);
137 137
diff --git a/lib/crc32.c b/lib/crc32.c
index 4855995fcde..a6e633a48ce 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -26,7 +26,7 @@
26#include <linux/compiler.h> 26#include <linux/compiler.h>
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <asm/atomic.h> 29#include <linux/atomic.h>
30#include "crc32defs.h" 30#include "crc32defs.h"
31#if CRC_LE_BITS == 8 31#if CRC_LE_BITS == 8
32# define tole(x) __constant_cpu_to_le32(x) 32# define tole(x) __constant_cpu_to_le32(x)
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index e73822aa6e9..b5257725daa 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -1,6 +1,6 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <linux/spinlock.h> 2#include <linux/spinlock.h>
3#include <asm/atomic.h> 3#include <linux/atomic.h>
4 4
5/* 5/*
6 * This is an implementation of the notion of "decrement a 6 * This is an implementation of the notion of "decrement a
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 7e65af70635..f193b779644 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -8,7 +8,6 @@
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/interrupt.h> 9#include <linux/interrupt.h>
10#include <linux/stacktrace.h> 10#include <linux/stacktrace.h>
11#include <linux/kallsyms.h>
12#include <linux/fault-inject.h> 11#include <linux/fault-inject.h>
13 12
14/* 13/*
@@ -140,16 +139,6 @@ static int debugfs_ul_set(void *data, u64 val)
140 return 0; 139 return 0;
141} 140}
142 141
143#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
144static int debugfs_ul_set_MAX_STACK_TRACE_DEPTH(void *data, u64 val)
145{
146 *(unsigned long *)data =
147 val < MAX_STACK_TRACE_DEPTH ?
148 val : MAX_STACK_TRACE_DEPTH;
149 return 0;
150}
151#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
152
153static int debugfs_ul_get(void *data, u64 *val) 142static int debugfs_ul_get(void *data, u64 *val)
154{ 143{
155 *val = *(unsigned long *)data; 144 *val = *(unsigned long *)data;
@@ -165,16 +154,26 @@ static struct dentry *debugfs_create_ul(const char *name, mode_t mode,
165} 154}
166 155
167#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER 156#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
168DEFINE_SIMPLE_ATTRIBUTE(fops_ul_MAX_STACK_TRACE_DEPTH, debugfs_ul_get,
169 debugfs_ul_set_MAX_STACK_TRACE_DEPTH, "%llu\n");
170 157
171static struct dentry *debugfs_create_ul_MAX_STACK_TRACE_DEPTH( 158static int debugfs_stacktrace_depth_set(void *data, u64 val)
159{
160 *(unsigned long *)data =
161 min_t(unsigned long, val, MAX_STACK_TRACE_DEPTH);
162
163 return 0;
164}
165
166DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get,
167 debugfs_stacktrace_depth_set, "%llu\n");
168
169static struct dentry *debugfs_create_stacktrace_depth(
172 const char *name, mode_t mode, 170 const char *name, mode_t mode,
173 struct dentry *parent, unsigned long *value) 171 struct dentry *parent, unsigned long *value)
174{ 172{
175 return debugfs_create_file(name, mode, parent, value, 173 return debugfs_create_file(name, mode, parent, value,
176 &fops_ul_MAX_STACK_TRACE_DEPTH); 174 &fops_stacktrace_depth);
177} 175}
176
178#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ 177#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
179 178
180static int debugfs_atomic_t_set(void *data, u64 val) 179static int debugfs_atomic_t_set(void *data, u64 val)
@@ -198,118 +197,51 @@ static struct dentry *debugfs_create_atomic_t(const char *name, mode_t mode,
198 return debugfs_create_file(name, mode, parent, value, &fops_atomic_t); 197 return debugfs_create_file(name, mode, parent, value, &fops_atomic_t);
199} 198}
200 199
201void cleanup_fault_attr_dentries(struct fault_attr *attr) 200struct dentry *fault_create_debugfs_attr(const char *name,
202{ 201 struct dentry *parent, struct fault_attr *attr)
203 debugfs_remove(attr->dentries.probability_file);
204 attr->dentries.probability_file = NULL;
205
206 debugfs_remove(attr->dentries.interval_file);
207 attr->dentries.interval_file = NULL;
208
209 debugfs_remove(attr->dentries.times_file);
210 attr->dentries.times_file = NULL;
211
212 debugfs_remove(attr->dentries.space_file);
213 attr->dentries.space_file = NULL;
214
215 debugfs_remove(attr->dentries.verbose_file);
216 attr->dentries.verbose_file = NULL;
217
218 debugfs_remove(attr->dentries.task_filter_file);
219 attr->dentries.task_filter_file = NULL;
220
221#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
222
223 debugfs_remove(attr->dentries.stacktrace_depth_file);
224 attr->dentries.stacktrace_depth_file = NULL;
225
226 debugfs_remove(attr->dentries.require_start_file);
227 attr->dentries.require_start_file = NULL;
228
229 debugfs_remove(attr->dentries.require_end_file);
230 attr->dentries.require_end_file = NULL;
231
232 debugfs_remove(attr->dentries.reject_start_file);
233 attr->dentries.reject_start_file = NULL;
234
235 debugfs_remove(attr->dentries.reject_end_file);
236 attr->dentries.reject_end_file = NULL;
237
238#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
239
240 if (attr->dentries.dir)
241 WARN_ON(!simple_empty(attr->dentries.dir));
242
243 debugfs_remove(attr->dentries.dir);
244 attr->dentries.dir = NULL;
245}
246
247int init_fault_attr_dentries(struct fault_attr *attr, const char *name)
248{ 202{
249 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 203 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
250 struct dentry *dir; 204 struct dentry *dir;
251 205
252 memset(&attr->dentries, 0, sizeof(attr->dentries)); 206 dir = debugfs_create_dir(name, parent);
253
254 dir = debugfs_create_dir(name, NULL);
255 if (!dir) 207 if (!dir)
256 goto fail; 208 return ERR_PTR(-ENOMEM);
257 attr->dentries.dir = dir;
258
259 attr->dentries.probability_file =
260 debugfs_create_ul("probability", mode, dir, &attr->probability);
261 209
262 attr->dentries.interval_file = 210 if (!debugfs_create_ul("probability", mode, dir, &attr->probability))
263 debugfs_create_ul("interval", mode, dir, &attr->interval); 211 goto fail;
264 212 if (!debugfs_create_ul("interval", mode, dir, &attr->interval))
265 attr->dentries.times_file = 213 goto fail;
266 debugfs_create_atomic_t("times", mode, dir, &attr->times); 214 if (!debugfs_create_atomic_t("times", mode, dir, &attr->times))
267 215 goto fail;
268 attr->dentries.space_file = 216 if (!debugfs_create_atomic_t("space", mode, dir, &attr->space))
269 debugfs_create_atomic_t("space", mode, dir, &attr->space); 217 goto fail;
270 218 if (!debugfs_create_ul("verbose", mode, dir, &attr->verbose))
271 attr->dentries.verbose_file = 219 goto fail;
272 debugfs_create_ul("verbose", mode, dir, &attr->verbose); 220 if (!debugfs_create_bool("task-filter", mode, dir, &attr->task_filter))
273
274 attr->dentries.task_filter_file = debugfs_create_bool("task-filter",
275 mode, dir, &attr->task_filter);
276
277 if (!attr->dentries.probability_file || !attr->dentries.interval_file ||
278 !attr->dentries.times_file || !attr->dentries.space_file ||
279 !attr->dentries.verbose_file || !attr->dentries.task_filter_file)
280 goto fail; 221 goto fail;
281 222
282#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER 223#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
283 224
284 attr->dentries.stacktrace_depth_file = 225 if (!debugfs_create_stacktrace_depth("stacktrace-depth", mode, dir,
285 debugfs_create_ul_MAX_STACK_TRACE_DEPTH( 226 &attr->stacktrace_depth))
286 "stacktrace-depth", mode, dir, &attr->stacktrace_depth); 227 goto fail;
287 228 if (!debugfs_create_ul("require-start", mode, dir,
288 attr->dentries.require_start_file = 229 &attr->require_start))
289 debugfs_create_ul("require-start", mode, dir, &attr->require_start); 230 goto fail;
290 231 if (!debugfs_create_ul("require-end", mode, dir, &attr->require_end))
291 attr->dentries.require_end_file = 232 goto fail;
292 debugfs_create_ul("require-end", mode, dir, &attr->require_end); 233 if (!debugfs_create_ul("reject-start", mode, dir, &attr->reject_start))
293 234 goto fail;
294 attr->dentries.reject_start_file = 235 if (!debugfs_create_ul("reject-end", mode, dir, &attr->reject_end))
295 debugfs_create_ul("reject-start", mode, dir, &attr->reject_start);
296
297 attr->dentries.reject_end_file =
298 debugfs_create_ul("reject-end", mode, dir, &attr->reject_end);
299
300 if (!attr->dentries.stacktrace_depth_file ||
301 !attr->dentries.require_start_file ||
302 !attr->dentries.require_end_file ||
303 !attr->dentries.reject_start_file ||
304 !attr->dentries.reject_end_file)
305 goto fail; 236 goto fail;
306 237
307#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ 238#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
308 239
309 return 0; 240 return dir;
310fail: 241fail:
311 cleanup_fault_attr_dentries(attr); 242 debugfs_remove_recursive(dir);
312 return -ENOMEM; 243
244 return ERR_PTR(-ENOMEM);
313} 245}
314 246
315#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 247#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 577ddf80597..f352cc42f4f 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -1,8 +1,26 @@
1/* 1/*
2 * Basic general purpose allocator for managing special purpose memory 2 * Basic general purpose allocator for managing special purpose
3 * not managed by the regular kmalloc/kfree interface. 3 * memory, for example, memory that is not managed by the regular
4 * Uses for this includes on-device special memory, uncached memory 4 * kmalloc/kfree interface. Uses for this includes on-device special
5 * etc. 5 * memory, uncached memory etc.
6 *
7 * It is safe to use the allocator in NMI handlers and other special
8 * unblockable contexts that could otherwise deadlock on locks. This
9 * is implemented by using atomic operations and retries on any
10 * conflicts. The disadvantage is that there may be livelocks in
11 * extreme cases. For better scalability, one allocator can be used
12 * for each CPU.
13 *
14 * The lockless operation only works if there is enough memory
15 * available. If new memory is added to the pool a lock has to be
16 * still taken. So any user relying on locklessness has to ensure
17 * that sufficient memory is preallocated.
18 *
19 * The basic atomic operation of this allocator is cmpxchg on long.
20 * On architectures that don't have NMI-safe cmpxchg implementation,
21 * the allocator can NOT be used in NMI handler. So code uses the
22 * allocator in NMI handler should depend on
23 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
6 * 24 *
7 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> 25 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
8 * 26 *
@@ -13,8 +31,109 @@
13#include <linux/slab.h> 31#include <linux/slab.h>
14#include <linux/module.h> 32#include <linux/module.h>
15#include <linux/bitmap.h> 33#include <linux/bitmap.h>
34#include <linux/rculist.h>
35#include <linux/interrupt.h>
16#include <linux/genalloc.h> 36#include <linux/genalloc.h>
17 37
38static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
39{
40 unsigned long val, nval;
41
42 nval = *addr;
43 do {
44 val = nval;
45 if (val & mask_to_set)
46 return -EBUSY;
47 cpu_relax();
48 } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
49
50 return 0;
51}
52
53static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
54{
55 unsigned long val, nval;
56
57 nval = *addr;
58 do {
59 val = nval;
60 if ((val & mask_to_clear) != mask_to_clear)
61 return -EBUSY;
62 cpu_relax();
63 } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
64
65 return 0;
66}
67
68/*
69 * bitmap_set_ll - set the specified number of bits at the specified position
70 * @map: pointer to a bitmap
71 * @start: a bit position in @map
72 * @nr: number of bits to set
73 *
74 * Set @nr bits start from @start in @map lock-lessly. Several users
75 * can set/clear the same bitmap simultaneously without lock. If two
76 * users set the same bit, one user will return remain bits, otherwise
77 * return 0.
78 */
79static int bitmap_set_ll(unsigned long *map, int start, int nr)
80{
81 unsigned long *p = map + BIT_WORD(start);
82 const int size = start + nr;
83 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
84 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
85
86 while (nr - bits_to_set >= 0) {
87 if (set_bits_ll(p, mask_to_set))
88 return nr;
89 nr -= bits_to_set;
90 bits_to_set = BITS_PER_LONG;
91 mask_to_set = ~0UL;
92 p++;
93 }
94 if (nr) {
95 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
96 if (set_bits_ll(p, mask_to_set))
97 return nr;
98 }
99
100 return 0;
101}
102
103/*
104 * bitmap_clear_ll - clear the specified number of bits at the specified position
105 * @map: pointer to a bitmap
106 * @start: a bit position in @map
107 * @nr: number of bits to set
108 *
109 * Clear @nr bits start from @start in @map lock-lessly. Several users
110 * can set/clear the same bitmap simultaneously without lock. If two
111 * users clear the same bit, one user will return remain bits,
112 * otherwise return 0.
113 */
114static int bitmap_clear_ll(unsigned long *map, int start, int nr)
115{
116 unsigned long *p = map + BIT_WORD(start);
117 const int size = start + nr;
118 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
119 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
120
121 while (nr - bits_to_clear >= 0) {
122 if (clear_bits_ll(p, mask_to_clear))
123 return nr;
124 nr -= bits_to_clear;
125 bits_to_clear = BITS_PER_LONG;
126 mask_to_clear = ~0UL;
127 p++;
128 }
129 if (nr) {
130 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
131 if (clear_bits_ll(p, mask_to_clear))
132 return nr;
133 }
134
135 return 0;
136}
18 137
19/** 138/**
20 * gen_pool_create - create a new special memory pool 139 * gen_pool_create - create a new special memory pool
@@ -30,7 +149,7 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
30 149
31 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); 150 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
32 if (pool != NULL) { 151 if (pool != NULL) {
33 rwlock_init(&pool->lock); 152 spin_lock_init(&pool->lock);
34 INIT_LIST_HEAD(&pool->chunks); 153 INIT_LIST_HEAD(&pool->chunks);
35 pool->min_alloc_order = min_alloc_order; 154 pool->min_alloc_order = min_alloc_order;
36 } 155 }
@@ -63,14 +182,14 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
63 if (unlikely(chunk == NULL)) 182 if (unlikely(chunk == NULL))
64 return -ENOMEM; 183 return -ENOMEM;
65 184
66 spin_lock_init(&chunk->lock);
67 chunk->phys_addr = phys; 185 chunk->phys_addr = phys;
68 chunk->start_addr = virt; 186 chunk->start_addr = virt;
69 chunk->end_addr = virt + size; 187 chunk->end_addr = virt + size;
188 atomic_set(&chunk->avail, size);
70 189
71 write_lock(&pool->lock); 190 spin_lock(&pool->lock);
72 list_add(&chunk->next_chunk, &pool->chunks); 191 list_add_rcu(&chunk->next_chunk, &pool->chunks);
73 write_unlock(&pool->lock); 192 spin_unlock(&pool->lock);
74 193
75 return 0; 194 return 0;
76} 195}
@@ -85,19 +204,19 @@ EXPORT_SYMBOL(gen_pool_add_virt);
85 */ 204 */
86phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) 205phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
87{ 206{
88 struct list_head *_chunk;
89 struct gen_pool_chunk *chunk; 207 struct gen_pool_chunk *chunk;
208 phys_addr_t paddr = -1;
90 209
91 read_lock(&pool->lock); 210 rcu_read_lock();
92 list_for_each(_chunk, &pool->chunks) { 211 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
93 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); 212 if (addr >= chunk->start_addr && addr < chunk->end_addr) {
94 213 paddr = chunk->phys_addr + (addr - chunk->start_addr);
95 if (addr >= chunk->start_addr && addr < chunk->end_addr) 214 break;
96 return chunk->phys_addr + addr - chunk->start_addr; 215 }
97 } 216 }
98 read_unlock(&pool->lock); 217 rcu_read_unlock();
99 218
100 return -1; 219 return paddr;
101} 220}
102EXPORT_SYMBOL(gen_pool_virt_to_phys); 221EXPORT_SYMBOL(gen_pool_virt_to_phys);
103 222
@@ -115,7 +234,6 @@ void gen_pool_destroy(struct gen_pool *pool)
115 int order = pool->min_alloc_order; 234 int order = pool->min_alloc_order;
116 int bit, end_bit; 235 int bit, end_bit;
117 236
118
119 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { 237 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
120 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); 238 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
121 list_del(&chunk->next_chunk); 239 list_del(&chunk->next_chunk);
@@ -137,44 +255,50 @@ EXPORT_SYMBOL(gen_pool_destroy);
137 * @size: number of bytes to allocate from the pool 255 * @size: number of bytes to allocate from the pool
138 * 256 *
139 * Allocate the requested number of bytes from the specified pool. 257 * Allocate the requested number of bytes from the specified pool.
140 * Uses a first-fit algorithm. 258 * Uses a first-fit algorithm. Can not be used in NMI handler on
259 * architectures without NMI-safe cmpxchg implementation.
141 */ 260 */
142unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) 261unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
143{ 262{
144 struct list_head *_chunk;
145 struct gen_pool_chunk *chunk; 263 struct gen_pool_chunk *chunk;
146 unsigned long addr, flags; 264 unsigned long addr = 0;
147 int order = pool->min_alloc_order; 265 int order = pool->min_alloc_order;
148 int nbits, start_bit, end_bit; 266 int nbits, start_bit = 0, end_bit, remain;
267
268#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
269 BUG_ON(in_nmi());
270#endif
149 271
150 if (size == 0) 272 if (size == 0)
151 return 0; 273 return 0;
152 274
153 nbits = (size + (1UL << order) - 1) >> order; 275 nbits = (size + (1UL << order) - 1) >> order;
154 276 rcu_read_lock();
155 read_lock(&pool->lock); 277 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
156 list_for_each(_chunk, &pool->chunks) { 278 if (size > atomic_read(&chunk->avail))
157 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); 279 continue;
158 280
159 end_bit = (chunk->end_addr - chunk->start_addr) >> order; 281 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
160 282retry:
161 spin_lock_irqsave(&chunk->lock, flags); 283 start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit,
162 start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0, 284 start_bit, nbits, 0);
163 nbits, 0); 285 if (start_bit >= end_bit)
164 if (start_bit >= end_bit) {
165 spin_unlock_irqrestore(&chunk->lock, flags);
166 continue; 286 continue;
287 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
288 if (remain) {
289 remain = bitmap_clear_ll(chunk->bits, start_bit,
290 nbits - remain);
291 BUG_ON(remain);
292 goto retry;
167 } 293 }
168 294
169 addr = chunk->start_addr + ((unsigned long)start_bit << order); 295 addr = chunk->start_addr + ((unsigned long)start_bit << order);
170 296 size = nbits << order;
171 bitmap_set(chunk->bits, start_bit, nbits); 297 atomic_sub(size, &chunk->avail);
172 spin_unlock_irqrestore(&chunk->lock, flags); 298 break;
173 read_unlock(&pool->lock);
174 return addr;
175 } 299 }
176 read_unlock(&pool->lock); 300 rcu_read_unlock();
177 return 0; 301 return addr;
178} 302}
179EXPORT_SYMBOL(gen_pool_alloc); 303EXPORT_SYMBOL(gen_pool_alloc);
180 304
@@ -184,33 +308,95 @@ EXPORT_SYMBOL(gen_pool_alloc);
184 * @addr: starting address of memory to free back to pool 308 * @addr: starting address of memory to free back to pool
185 * @size: size in bytes of memory to free 309 * @size: size in bytes of memory to free
186 * 310 *
187 * Free previously allocated special memory back to the specified pool. 311 * Free previously allocated special memory back to the specified
312 * pool. Can not be used in NMI handler on architectures without
313 * NMI-safe cmpxchg implementation.
188 */ 314 */
189void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) 315void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
190{ 316{
191 struct list_head *_chunk;
192 struct gen_pool_chunk *chunk; 317 struct gen_pool_chunk *chunk;
193 unsigned long flags;
194 int order = pool->min_alloc_order; 318 int order = pool->min_alloc_order;
195 int bit, nbits; 319 int start_bit, nbits, remain;
196 320
197 nbits = (size + (1UL << order) - 1) >> order; 321#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
198 322 BUG_ON(in_nmi());
199 read_lock(&pool->lock); 323#endif
200 list_for_each(_chunk, &pool->chunks) {
201 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
202 324
325 nbits = (size + (1UL << order) - 1) >> order;
326 rcu_read_lock();
327 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
203 if (addr >= chunk->start_addr && addr < chunk->end_addr) { 328 if (addr >= chunk->start_addr && addr < chunk->end_addr) {
204 BUG_ON(addr + size > chunk->end_addr); 329 BUG_ON(addr + size > chunk->end_addr);
205 spin_lock_irqsave(&chunk->lock, flags); 330 start_bit = (addr - chunk->start_addr) >> order;
206 bit = (addr - chunk->start_addr) >> order; 331 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
207 while (nbits--) 332 BUG_ON(remain);
208 __clear_bit(bit++, chunk->bits); 333 size = nbits << order;
209 spin_unlock_irqrestore(&chunk->lock, flags); 334 atomic_add(size, &chunk->avail);
210 break; 335 rcu_read_unlock();
336 return;
211 } 337 }
212 } 338 }
213 BUG_ON(nbits > 0); 339 rcu_read_unlock();
214 read_unlock(&pool->lock); 340 BUG();
215} 341}
216EXPORT_SYMBOL(gen_pool_free); 342EXPORT_SYMBOL(gen_pool_free);
343
344/**
345 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
346 * @pool: the generic memory pool
347 * @func: func to call
348 * @data: additional data used by @func
349 *
350 * Call @func for every chunk of generic memory pool. The @func is
351 * called with rcu_read_lock held.
352 */
353void gen_pool_for_each_chunk(struct gen_pool *pool,
354 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
355 void *data)
356{
357 struct gen_pool_chunk *chunk;
358
359 rcu_read_lock();
360 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
361 func(pool, chunk, data);
362 rcu_read_unlock();
363}
364EXPORT_SYMBOL(gen_pool_for_each_chunk);
365
366/**
367 * gen_pool_avail - get available free space of the pool
368 * @pool: pool to get available free space
369 *
370 * Return available free space of the specified pool.
371 */
372size_t gen_pool_avail(struct gen_pool *pool)
373{
374 struct gen_pool_chunk *chunk;
375 size_t avail = 0;
376
377 rcu_read_lock();
378 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
379 avail += atomic_read(&chunk->avail);
380 rcu_read_unlock();
381 return avail;
382}
383EXPORT_SYMBOL_GPL(gen_pool_avail);
384
385/**
386 * gen_pool_size - get size in bytes of memory managed by the pool
387 * @pool: pool to get size
388 *
389 * Return size in bytes of memory managed by the pool.
390 */
391size_t gen_pool_size(struct gen_pool *pool)
392{
393 struct gen_pool_chunk *chunk;
394 size_t size = 0;
395
396 rcu_read_lock();
397 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
398 size += chunk->end_addr - chunk->start_addr;
399 rcu_read_unlock();
400 return size;
401}
402EXPORT_SYMBOL_GPL(gen_pool_size);
diff --git a/lib/idr.c b/lib/idr.c
index e0abbc177e3..5acf9bb1096 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -34,8 +34,10 @@
34#include <linux/err.h> 34#include <linux/err.h>
35#include <linux/string.h> 35#include <linux/string.h>
36#include <linux/idr.h> 36#include <linux/idr.h>
37#include <linux/spinlock.h>
37 38
38static struct kmem_cache *idr_layer_cache; 39static struct kmem_cache *idr_layer_cache;
40static DEFINE_SPINLOCK(simple_ida_lock);
39 41
40static struct idr_layer *get_from_free_list(struct idr *idp) 42static struct idr_layer *get_from_free_list(struct idr *idp)
41{ 43{
@@ -926,6 +928,71 @@ void ida_destroy(struct ida *ida)
926EXPORT_SYMBOL(ida_destroy); 928EXPORT_SYMBOL(ida_destroy);
927 929
928/** 930/**
931 * ida_simple_get - get a new id.
932 * @ida: the (initialized) ida.
933 * @start: the minimum id (inclusive, < 0x8000000)
934 * @end: the maximum id (exclusive, < 0x8000000 or 0)
935 * @gfp_mask: memory allocation flags
936 *
937 * Allocates an id in the range start <= id < end, or returns -ENOSPC.
938 * On memory allocation failure, returns -ENOMEM.
939 *
940 * Use ida_simple_remove() to get rid of an id.
941 */
942int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
943 gfp_t gfp_mask)
944{
945 int ret, id;
946 unsigned int max;
947
948 BUG_ON((int)start < 0);
949 BUG_ON((int)end < 0);
950
951 if (end == 0)
952 max = 0x80000000;
953 else {
954 BUG_ON(end < start);
955 max = end - 1;
956 }
957
958again:
959 if (!ida_pre_get(ida, gfp_mask))
960 return -ENOMEM;
961
962 spin_lock(&simple_ida_lock);
963 ret = ida_get_new_above(ida, start, &id);
964 if (!ret) {
965 if (id > max) {
966 ida_remove(ida, id);
967 ret = -ENOSPC;
968 } else {
969 ret = id;
970 }
971 }
972 spin_unlock(&simple_ida_lock);
973
974 if (unlikely(ret == -EAGAIN))
975 goto again;
976
977 return ret;
978}
979EXPORT_SYMBOL(ida_simple_get);
980
981/**
982 * ida_simple_remove - remove an allocated id.
983 * @ida: the (initialized) ida.
984 * @id: the id returned by ida_simple_get.
985 */
986void ida_simple_remove(struct ida *ida, unsigned int id)
987{
988 BUG_ON((int)id < 0);
989 spin_lock(&simple_ida_lock);
990 ida_remove(ida, id);
991 spin_unlock(&simple_ida_lock);
992}
993EXPORT_SYMBOL(ida_simple_remove);
994
995/**
929 * ida_init - initialize ida handle 996 * ida_init - initialize ida handle
930 * @ida: ida handle 997 * @ida: ida handle
931 * 998 *
diff --git a/lib/llist.c b/lib/llist.c
new file mode 100644
index 00000000000..da445724fa1
--- /dev/null
+++ b/lib/llist.c
@@ -0,0 +1,129 @@
1/*
2 * Lock-less NULL terminated single linked list
3 *
4 * The basic atomic operation of this list is cmpxchg on long. On
5 * architectures that don't have NMI-safe cmpxchg implementation, the
6 * list can NOT be used in NMI handler. So code uses the list in NMI
7 * handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
8 *
9 * Copyright 2010,2011 Intel Corp.
10 * Author: Huang Ying <ying.huang@intel.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version
14 * 2 as published by the Free Software Foundation;
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/interrupt.h>
28#include <linux/llist.h>
29
30#include <asm/system.h>
31
32/**
33 * llist_add - add a new entry
34 * @new: new entry to be added
35 * @head: the head for your lock-less list
36 */
37void llist_add(struct llist_node *new, struct llist_head *head)
38{
39 struct llist_node *entry, *old_entry;
40
41#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
42 BUG_ON(in_nmi());
43#endif
44
45 entry = head->first;
46 do {
47 old_entry = entry;
48 new->next = entry;
49 cpu_relax();
50 } while ((entry = cmpxchg(&head->first, old_entry, new)) != old_entry);
51}
52EXPORT_SYMBOL_GPL(llist_add);
53
54/**
55 * llist_add_batch - add several linked entries in batch
56 * @new_first: first entry in batch to be added
57 * @new_last: last entry in batch to be added
58 * @head: the head for your lock-less list
59 */
60void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
61 struct llist_head *head)
62{
63 struct llist_node *entry, *old_entry;
64
65#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
66 BUG_ON(in_nmi());
67#endif
68
69 entry = head->first;
70 do {
71 old_entry = entry;
72 new_last->next = entry;
73 cpu_relax();
74 } while ((entry = cmpxchg(&head->first, old_entry, new_first)) != old_entry);
75}
76EXPORT_SYMBOL_GPL(llist_add_batch);
77
78/**
79 * llist_del_first - delete the first entry of lock-less list
80 * @head: the head for your lock-less list
81 *
82 * If list is empty, return NULL, otherwise, return the first entry
83 * deleted, this is the newest added one.
84 *
85 * Only one llist_del_first user can be used simultaneously with
86 * multiple llist_add users without lock. Because otherwise
87 * llist_del_first, llist_add, llist_add (or llist_del_all, llist_add,
88 * llist_add) sequence in another user may change @head->first->next,
89 * but keep @head->first. If multiple consumers are needed, please
90 * use llist_del_all or use lock between consumers.
91 */
92struct llist_node *llist_del_first(struct llist_head *head)
93{
94 struct llist_node *entry, *old_entry, *next;
95
96#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
97 BUG_ON(in_nmi());
98#endif
99
100 entry = head->first;
101 do {
102 if (entry == NULL)
103 return NULL;
104 old_entry = entry;
105 next = entry->next;
106 cpu_relax();
107 } while ((entry = cmpxchg(&head->first, old_entry, next)) != old_entry);
108
109 return entry;
110}
111EXPORT_SYMBOL_GPL(llist_del_first);
112
113/**
114 * llist_del_all - delete all entries from lock-less list
115 * @head: the head of lock-less list to delete all entries
116 *
117 * If list is empty, return NULL, otherwise, delete all entries and
118 * return the pointer to the first entry. The order of entries
119 * deleted is from the newest to the oldest added one.
120 */
121struct llist_node *llist_del_all(struct llist_head *head)
122{
123#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
124 BUG_ON(in_nmi());
125#endif
126
127 return xchg(&head->first, NULL);
128}
129EXPORT_SYMBOL_GPL(llist_del_all);
diff --git a/lib/md5.c b/lib/md5.c
new file mode 100644
index 00000000000..c777180e1f2
--- /dev/null
+++ b/lib/md5.c
@@ -0,0 +1,95 @@
1#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/cryptohash.h>
4
5#define F1(x, y, z) (z ^ (x & (y ^ z)))
6#define F2(x, y, z) F1(z, x, y)
7#define F3(x, y, z) (x ^ y ^ z)
8#define F4(x, y, z) (y ^ (x | ~z))
9
10#define MD5STEP(f, w, x, y, z, in, s) \
11 (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
12
13void md5_transform(__u32 *hash, __u32 const *in)
14{
15 u32 a, b, c, d;
16
17 a = hash[0];
18 b = hash[1];
19 c = hash[2];
20 d = hash[3];
21
22 MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
23 MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
24 MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
25 MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
26 MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
27 MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
28 MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
29 MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
30 MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
31 MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
32 MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
33 MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
34 MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
35 MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
36 MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
37 MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
38
39 MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
40 MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
41 MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
42 MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
43 MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
44 MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
45 MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
46 MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
47 MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
48 MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
49 MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
50 MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
51 MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
52 MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
53 MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
54 MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
55
56 MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
57 MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
58 MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
59 MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
60 MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
61 MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
62 MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
63 MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
64 MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
65 MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
66 MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
67 MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
68 MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
69 MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
70 MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
71 MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
72
73 MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
74 MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
75 MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
76 MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
77 MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
78 MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
79 MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
80 MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
81 MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
82 MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
83 MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
84 MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
85 MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
86 MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
87 MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
88 MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
89
90 hash[0] += a;
91 hash[1] += b;
92 hash[2] += c;
93 hash[3] += d;
94}
95EXPORT_SYMBOL(md5_transform);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 7ea2e033d71..a2f9da59c19 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -823,8 +823,8 @@ unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
823EXPORT_SYMBOL(radix_tree_prev_hole); 823EXPORT_SYMBOL(radix_tree_prev_hole);
824 824
825static unsigned int 825static unsigned int
826__lookup(struct radix_tree_node *slot, void ***results, unsigned long index, 826__lookup(struct radix_tree_node *slot, void ***results, unsigned long *indices,
827 unsigned int max_items, unsigned long *next_index) 827 unsigned long index, unsigned int max_items, unsigned long *next_index)
828{ 828{
829 unsigned int nr_found = 0; 829 unsigned int nr_found = 0;
830 unsigned int shift, height; 830 unsigned int shift, height;
@@ -857,12 +857,16 @@ __lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
857 857
858 /* Bottom level: grab some items */ 858 /* Bottom level: grab some items */
859 for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { 859 for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) {
860 index++;
861 if (slot->slots[i]) { 860 if (slot->slots[i]) {
862 results[nr_found++] = &(slot->slots[i]); 861 results[nr_found] = &(slot->slots[i]);
863 if (nr_found == max_items) 862 if (indices)
863 indices[nr_found] = index;
864 if (++nr_found == max_items) {
865 index++;
864 goto out; 866 goto out;
867 }
865 } 868 }
869 index++;
866 } 870 }
867out: 871out:
868 *next_index = index; 872 *next_index = index;
@@ -918,8 +922,8 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
918 922
919 if (cur_index > max_index) 923 if (cur_index > max_index)
920 break; 924 break;
921 slots_found = __lookup(node, (void ***)results + ret, cur_index, 925 slots_found = __lookup(node, (void ***)results + ret, NULL,
922 max_items - ret, &next_index); 926 cur_index, max_items - ret, &next_index);
923 nr_found = 0; 927 nr_found = 0;
924 for (i = 0; i < slots_found; i++) { 928 for (i = 0; i < slots_found; i++) {
925 struct radix_tree_node *slot; 929 struct radix_tree_node *slot;
@@ -944,6 +948,7 @@ EXPORT_SYMBOL(radix_tree_gang_lookup);
944 * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree 948 * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
945 * @root: radix tree root 949 * @root: radix tree root
946 * @results: where the results of the lookup are placed 950 * @results: where the results of the lookup are placed
951 * @indices: where their indices should be placed (but usually NULL)
947 * @first_index: start the lookup from this key 952 * @first_index: start the lookup from this key
948 * @max_items: place up to this many items at *results 953 * @max_items: place up to this many items at *results
949 * 954 *
@@ -958,7 +963,8 @@ EXPORT_SYMBOL(radix_tree_gang_lookup);
958 * protection, radix_tree_deref_slot may fail requiring a retry. 963 * protection, radix_tree_deref_slot may fail requiring a retry.
959 */ 964 */
960unsigned int 965unsigned int
961radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, 966radix_tree_gang_lookup_slot(struct radix_tree_root *root,
967 void ***results, unsigned long *indices,
962 unsigned long first_index, unsigned int max_items) 968 unsigned long first_index, unsigned int max_items)
963{ 969{
964 unsigned long max_index; 970 unsigned long max_index;
@@ -974,6 +980,8 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
974 if (first_index > 0) 980 if (first_index > 0)
975 return 0; 981 return 0;
976 results[0] = (void **)&root->rnode; 982 results[0] = (void **)&root->rnode;
983 if (indices)
984 indices[0] = 0;
977 return 1; 985 return 1;
978 } 986 }
979 node = indirect_to_ptr(node); 987 node = indirect_to_ptr(node);
@@ -987,8 +995,9 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
987 995
988 if (cur_index > max_index) 996 if (cur_index > max_index)
989 break; 997 break;
990 slots_found = __lookup(node, results + ret, cur_index, 998 slots_found = __lookup(node, results + ret,
991 max_items - ret, &next_index); 999 indices ? indices + ret : NULL,
1000 cur_index, max_items - ret, &next_index);
992 ret += slots_found; 1001 ret += slots_found;
993 if (next_index == 0) 1002 if (next_index == 0)
994 break; 1003 break;
@@ -1194,6 +1203,98 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
1194} 1203}
1195EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); 1204EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
1196 1205
1206#if defined(CONFIG_SHMEM) && defined(CONFIG_SWAP)
1207#include <linux/sched.h> /* for cond_resched() */
1208
1209/*
1210 * This linear search is at present only useful to shmem_unuse_inode().
1211 */
1212static unsigned long __locate(struct radix_tree_node *slot, void *item,
1213 unsigned long index, unsigned long *found_index)
1214{
1215 unsigned int shift, height;
1216 unsigned long i;
1217
1218 height = slot->height;
1219 shift = (height-1) * RADIX_TREE_MAP_SHIFT;
1220
1221 for ( ; height > 1; height--) {
1222 i = (index >> shift) & RADIX_TREE_MAP_MASK;
1223 for (;;) {
1224 if (slot->slots[i] != NULL)
1225 break;
1226 index &= ~((1UL << shift) - 1);
1227 index += 1UL << shift;
1228 if (index == 0)
1229 goto out; /* 32-bit wraparound */
1230 i++;
1231 if (i == RADIX_TREE_MAP_SIZE)
1232 goto out;
1233 }
1234
1235 shift -= RADIX_TREE_MAP_SHIFT;
1236 slot = rcu_dereference_raw(slot->slots[i]);
1237 if (slot == NULL)
1238 goto out;
1239 }
1240
1241 /* Bottom level: check items */
1242 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
1243 if (slot->slots[i] == item) {
1244 *found_index = index + i;
1245 index = 0;
1246 goto out;
1247 }
1248 }
1249 index += RADIX_TREE_MAP_SIZE;
1250out:
1251 return index;
1252}
1253
1254/**
1255 * radix_tree_locate_item - search through radix tree for item
1256 * @root: radix tree root
1257 * @item: item to be found
1258 *
1259 * Returns index where item was found, or -1 if not found.
1260 * Caller must hold no lock (since this time-consuming function needs
1261 * to be preemptible), and must check afterwards if item is still there.
1262 */
1263unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
1264{
1265 struct radix_tree_node *node;
1266 unsigned long max_index;
1267 unsigned long cur_index = 0;
1268 unsigned long found_index = -1;
1269
1270 do {
1271 rcu_read_lock();
1272 node = rcu_dereference_raw(root->rnode);
1273 if (!radix_tree_is_indirect_ptr(node)) {
1274 rcu_read_unlock();
1275 if (node == item)
1276 found_index = 0;
1277 break;
1278 }
1279
1280 node = indirect_to_ptr(node);
1281 max_index = radix_tree_maxindex(node->height);
1282 if (cur_index > max_index)
1283 break;
1284
1285 cur_index = __locate(node, item, cur_index, &found_index);
1286 rcu_read_unlock();
1287 cond_resched();
1288 } while (cur_index != 0 && cur_index <= max_index);
1289
1290 return found_index;
1291}
1292#else
1293unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
1294{
1295 return -1;
1296}
1297#endif /* CONFIG_SHMEM && CONFIG_SWAP */
1197 1298
1198/** 1299/**
1199 * radix_tree_shrink - shrink height of a radix tree to minimal 1300 * radix_tree_shrink - shrink height of a radix tree to minimal
diff --git a/lib/sha1.c b/lib/sha1.c
index 4c45fd50e91..1de509a159c 100644
--- a/lib/sha1.c
+++ b/lib/sha1.c
@@ -1,31 +1,73 @@
1/* 1/*
2 * SHA transform algorithm, originally taken from code written by 2 * SHA1 routine optimized to do word accesses rather than byte accesses,
3 * Peter Gutmann, and placed in the public domain. 3 * and to avoid unnecessary copies into the context array.
4 *
5 * This was based on the git SHA1 implementation.
4 */ 6 */
5 7
6#include <linux/kernel.h> 8#include <linux/kernel.h>
7#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/bitops.h>
8#include <linux/cryptohash.h> 11#include <linux/cryptohash.h>
12#include <asm/unaligned.h>
9 13
10/* The SHA f()-functions. */ 14/*
15 * If you have 32 registers or more, the compiler can (and should)
16 * try to change the array[] accesses into registers. However, on
17 * machines with less than ~25 registers, that won't really work,
18 * and at least gcc will make an unholy mess of it.
19 *
20 * So to avoid that mess which just slows things down, we force
21 * the stores to memory to actually happen (we might be better off
22 * with a 'W(t)=(val);asm("":"+m" (W(t))' there instead, as
23 * suggested by Artur Skawina - that will also make gcc unable to
24 * try to do the silly "optimize away loads" part because it won't
25 * see what the value will be).
26 *
27 * Ben Herrenschmidt reports that on PPC, the C version comes close
28 * to the optimized asm with this (ie on PPC you don't want that
29 * 'volatile', since there are lots of registers).
30 *
31 * On ARM we get the best code generation by forcing a full memory barrier
32 * between each SHA_ROUND, otherwise gcc happily get wild with spilling and
33 * the stack frame size simply explode and performance goes down the drain.
34 */
11 35
12#define f1(x,y,z) (z ^ (x & (y ^ z))) /* x ? y : z */ 36#ifdef CONFIG_X86
13#define f2(x,y,z) (x ^ y ^ z) /* XOR */ 37 #define setW(x, val) (*(volatile __u32 *)&W(x) = (val))
14#define f3(x,y,z) ((x & y) + (z & (x ^ y))) /* majority */ 38#elif defined(CONFIG_ARM)
39 #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0)
40#else
41 #define setW(x, val) (W(x) = (val))
42#endif
15 43
16/* The SHA Mysterious Constants */ 44/* This "rolls" over the 512-bit array */
45#define W(x) (array[(x)&15])
17 46
18#define K1 0x5A827999L /* Rounds 0-19: sqrt(2) * 2^30 */ 47/*
19#define K2 0x6ED9EBA1L /* Rounds 20-39: sqrt(3) * 2^30 */ 48 * Where do we get the source from? The first 16 iterations get it from
20#define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */ 49 * the input data, the next mix it from the 512-bit array.
21#define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */ 50 */
51#define SHA_SRC(t) get_unaligned_be32((__u32 *)data + t)
52#define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1)
53
54#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \
55 __u32 TEMP = input(t); setW(t, TEMP); \
56 E += TEMP + rol32(A,5) + (fn) + (constant); \
57 B = ror32(B, 2); } while (0)
58
59#define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
60#define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
61#define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E )
62#define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E )
63#define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E )
22 64
23/** 65/**
24 * sha_transform - single block SHA1 transform 66 * sha_transform - single block SHA1 transform
25 * 67 *
26 * @digest: 160 bit digest to update 68 * @digest: 160 bit digest to update
27 * @data: 512 bits of data to hash 69 * @data: 512 bits of data to hash
28 * @W: 80 words of workspace (see note) 70 * @array: 16 words of workspace (see note)
29 * 71 *
30 * This function generates a SHA1 digest for a single 512-bit block. 72 * This function generates a SHA1 digest for a single 512-bit block.
31 * Be warned, it does not handle padding and message digest, do not 73 * Be warned, it does not handle padding and message digest, do not
@@ -36,47 +78,111 @@
36 * to clear the workspace. This is left to the caller to avoid 78 * to clear the workspace. This is left to the caller to avoid
37 * unnecessary clears between chained hashing operations. 79 * unnecessary clears between chained hashing operations.
38 */ 80 */
39void sha_transform(__u32 *digest, const char *in, __u32 *W) 81void sha_transform(__u32 *digest, const char *data, __u32 *array)
40{ 82{
41 __u32 a, b, c, d, e, t, i; 83 __u32 A, B, C, D, E;
42 84
43 for (i = 0; i < 16; i++) 85 A = digest[0];
44 W[i] = be32_to_cpu(((const __be32 *)in)[i]); 86 B = digest[1];
45 87 C = digest[2];
46 for (i = 0; i < 64; i++) 88 D = digest[3];
47 W[i+16] = rol32(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 1); 89 E = digest[4];
48 90
49 a = digest[0]; 91 /* Round 1 - iterations 0-16 take their input from 'data' */
50 b = digest[1]; 92 T_0_15( 0, A, B, C, D, E);
51 c = digest[2]; 93 T_0_15( 1, E, A, B, C, D);
52 d = digest[3]; 94 T_0_15( 2, D, E, A, B, C);
53 e = digest[4]; 95 T_0_15( 3, C, D, E, A, B);
54 96 T_0_15( 4, B, C, D, E, A);
55 for (i = 0; i < 20; i++) { 97 T_0_15( 5, A, B, C, D, E);
56 t = f1(b, c, d) + K1 + rol32(a, 5) + e + W[i]; 98 T_0_15( 6, E, A, B, C, D);
57 e = d; d = c; c = rol32(b, 30); b = a; a = t; 99 T_0_15( 7, D, E, A, B, C);
58 } 100 T_0_15( 8, C, D, E, A, B);
59 101 T_0_15( 9, B, C, D, E, A);
60 for (; i < 40; i ++) { 102 T_0_15(10, A, B, C, D, E);
61 t = f2(b, c, d) + K2 + rol32(a, 5) + e + W[i]; 103 T_0_15(11, E, A, B, C, D);
62 e = d; d = c; c = rol32(b, 30); b = a; a = t; 104 T_0_15(12, D, E, A, B, C);
63 } 105 T_0_15(13, C, D, E, A, B);
64 106 T_0_15(14, B, C, D, E, A);
65 for (; i < 60; i ++) { 107 T_0_15(15, A, B, C, D, E);
66 t = f3(b, c, d) + K3 + rol32(a, 5) + e + W[i]; 108
67 e = d; d = c; c = rol32(b, 30); b = a; a = t; 109 /* Round 1 - tail. Input from 512-bit mixing array */
68 } 110 T_16_19(16, E, A, B, C, D);
69 111 T_16_19(17, D, E, A, B, C);
70 for (; i < 80; i ++) { 112 T_16_19(18, C, D, E, A, B);
71 t = f2(b, c, d) + K4 + rol32(a, 5) + e + W[i]; 113 T_16_19(19, B, C, D, E, A);
72 e = d; d = c; c = rol32(b, 30); b = a; a = t; 114
73 } 115 /* Round 2 */
74 116 T_20_39(20, A, B, C, D, E);
75 digest[0] += a; 117 T_20_39(21, E, A, B, C, D);
76 digest[1] += b; 118 T_20_39(22, D, E, A, B, C);
77 digest[2] += c; 119 T_20_39(23, C, D, E, A, B);
78 digest[3] += d; 120 T_20_39(24, B, C, D, E, A);
79 digest[4] += e; 121 T_20_39(25, A, B, C, D, E);
122 T_20_39(26, E, A, B, C, D);
123 T_20_39(27, D, E, A, B, C);
124 T_20_39(28, C, D, E, A, B);
125 T_20_39(29, B, C, D, E, A);
126 T_20_39(30, A, B, C, D, E);
127 T_20_39(31, E, A, B, C, D);
128 T_20_39(32, D, E, A, B, C);
129 T_20_39(33, C, D, E, A, B);
130 T_20_39(34, B, C, D, E, A);
131 T_20_39(35, A, B, C, D, E);
132 T_20_39(36, E, A, B, C, D);
133 T_20_39(37, D, E, A, B, C);
134 T_20_39(38, C, D, E, A, B);
135 T_20_39(39, B, C, D, E, A);
136
137 /* Round 3 */
138 T_40_59(40, A, B, C, D, E);
139 T_40_59(41, E, A, B, C, D);
140 T_40_59(42, D, E, A, B, C);
141 T_40_59(43, C, D, E, A, B);
142 T_40_59(44, B, C, D, E, A);
143 T_40_59(45, A, B, C, D, E);
144 T_40_59(46, E, A, B, C, D);
145 T_40_59(47, D, E, A, B, C);
146 T_40_59(48, C, D, E, A, B);
147 T_40_59(49, B, C, D, E, A);
148 T_40_59(50, A, B, C, D, E);
149 T_40_59(51, E, A, B, C, D);
150 T_40_59(52, D, E, A, B, C);
151 T_40_59(53, C, D, E, A, B);
152 T_40_59(54, B, C, D, E, A);
153 T_40_59(55, A, B, C, D, E);
154 T_40_59(56, E, A, B, C, D);
155 T_40_59(57, D, E, A, B, C);
156 T_40_59(58, C, D, E, A, B);
157 T_40_59(59, B, C, D, E, A);
158
159 /* Round 4 */
160 T_60_79(60, A, B, C, D, E);
161 T_60_79(61, E, A, B, C, D);
162 T_60_79(62, D, E, A, B, C);
163 T_60_79(63, C, D, E, A, B);
164 T_60_79(64, B, C, D, E, A);
165 T_60_79(65, A, B, C, D, E);
166 T_60_79(66, E, A, B, C, D);
167 T_60_79(67, D, E, A, B, C);
168 T_60_79(68, C, D, E, A, B);
169 T_60_79(69, B, C, D, E, A);
170 T_60_79(70, A, B, C, D, E);
171 T_60_79(71, E, A, B, C, D);
172 T_60_79(72, D, E, A, B, C);
173 T_60_79(73, C, D, E, A, B);
174 T_60_79(74, B, C, D, E, A);
175 T_60_79(75, A, B, C, D, E);
176 T_60_79(76, E, A, B, C, D);
177 T_60_79(77, D, E, A, B, C);
178 T_60_79(78, C, D, E, A, B);
179 T_60_79(79, B, C, D, E, A);
180
181 digest[0] += A;
182 digest[1] += B;
183 digest[2] += C;
184 digest[3] += D;
185 digest[4] += E;
80} 186}
81EXPORT_SYMBOL(sha_transform); 187EXPORT_SYMBOL(sha_transform);
82 188
@@ -92,4 +198,3 @@ void sha_init(__u32 *buf)
92 buf[3] = 0x10325476; 198 buf[3] = 0x10325476;
93 buf[4] = 0xc3d2e1f0; 199 buf[4] = 0xc3d2e1f0;
94} 200}
95