aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig8
-rw-r--r--lib/Kconfig.debug19
-rw-r--r--lib/Makefile4
-rw-r--r--lib/crc16.c67
-rw-r--r--lib/dec_and_lock.c3
-rw-r--r--lib/kernel_lock.c3
-rw-r--r--lib/klist.c18
-rw-r--r--lib/radix-tree.c178
-rw-r--r--lib/sort.c5
-rw-r--r--lib/spinlock_debug.c257
10 files changed, 467 insertions, 95 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index e43197efeb9c..3de93357f5ab 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -12,6 +12,14 @@ config CRC_CCITT
12 the kernel tree does. Such modules that use library CRC-CCITT 12 the kernel tree does. Such modules that use library CRC-CCITT
13 functions require M here. 13 functions require M here.
14 14
15config CRC16
16 tristate "CRC16 functions"
17 help
18 This option is provided for the case where no in-kernel-tree
19 modules require CRC16 functions, but a module built outside
20 the kernel tree does. Such modules that use library CRC16
21 functions require M here.
22
15config CRC32 23config CRC32
16 tristate "CRC32 functions" 24 tristate "CRC32 functions"
17 default y 25 default y
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 299f7f3b5b08..3754c9a8f5c8 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -46,6 +46,25 @@ config LOG_BUF_SHIFT
46 13 => 8 KB 46 13 => 8 KB
47 12 => 4 KB 47 12 => 4 KB
48 48
49config DETECT_SOFTLOCKUP
50 bool "Detect Soft Lockups"
51 depends on DEBUG_KERNEL
52 default y
53 help
54 Say Y here to enable the kernel to detect "soft lockups",
55 which are bugs that cause the kernel to loop in kernel
56 mode for more than 10 seconds, without giving other tasks a
57 chance to run.
58
59 When a soft-lockup is detected, the kernel will print the
60 current stack trace (which you should report), but the
61 system will stay locked up. This feature has negligible
62 overhead.
63
64 (Note that "hard lockups" are separate type of bugs that
65 can be detected via the NMI-watchdog, on platforms that
66 support it.)
67
49config SCHEDSTATS 68config SCHEDSTATS
50 bool "Collect scheduler statistics" 69 bool "Collect scheduler statistics"
51 depends on DEBUG_KERNEL && PROC_FS 70 depends on DEBUG_KERNEL && PROC_FS
diff --git a/lib/Makefile b/lib/Makefile
index 3e2bd0df23bb..44a46750690a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -16,6 +16,7 @@ CFLAGS_kobject.o += -DDEBUG
16CFLAGS_kobject_uevent.o += -DDEBUG 16CFLAGS_kobject_uevent.o += -DDEBUG
17endif 17endif
18 18
19obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
19lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o 20lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
20lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 21lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
21lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o 22lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
@@ -23,11 +24,12 @@ lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
23obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 24obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
24obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 25obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
25 26
26ifneq ($(CONFIG_HAVE_DEC_LOCK),y) 27ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
27 lib-y += dec_and_lock.o 28 lib-y += dec_and_lock.o
28endif 29endif
29 30
30obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o 31obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
32obj-$(CONFIG_CRC16) += crc16.o
31obj-$(CONFIG_CRC32) += crc32.o 33obj-$(CONFIG_CRC32) += crc32.o
32obj-$(CONFIG_LIBCRC32C) += libcrc32c.o 34obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
33obj-$(CONFIG_GENERIC_IOMAP) += iomap.o 35obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
diff --git a/lib/crc16.c b/lib/crc16.c
new file mode 100644
index 000000000000..011fe573c666
--- /dev/null
+++ b/lib/crc16.c
@@ -0,0 +1,67 @@
1/*
2 * crc16.c
3 *
4 * This source code is licensed under the GNU General Public License,
5 * Version 2. See the file COPYING for more details.
6 */
7
8#include <linux/types.h>
9#include <linux/module.h>
10#include <linux/crc16.h>
11
12/** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */
13u16 const crc16_table[256] = {
14 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
15 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
16 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
17 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
18 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
19 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
20 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
21 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
22 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
23 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
24 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
25 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
26 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
27 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
28 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
29 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
30 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
31 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
32 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
33 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
34 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
35 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
36 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
37 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
38 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
39 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
40 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
41 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
42 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
43 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
44 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
45 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
46};
47EXPORT_SYMBOL(crc16_table);
48
49/**
50 * Compute the CRC-16 for the data buffer
51 *
52 * @param crc previous CRC value
53 * @param buffer data pointer
54 * @param len number of bytes in the buffer
55 * @return the updated CRC value
56 */
57u16 crc16(u16 crc, u8 const *buffer, size_t len)
58{
59 while (len--)
60 crc = crc16_byte(crc, *buffer++);
61 return crc;
62}
63EXPORT_SYMBOL(crc16);
64
65MODULE_DESCRIPTION("CRC16 calculations");
66MODULE_LICENSE("GPL");
67
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index 6658d81e1836..2377af057d09 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -25,8 +25,6 @@
25 * this is trivially done efficiently using a load-locked 25 * this is trivially done efficiently using a load-locked
26 * store-conditional approach, for example. 26 * store-conditional approach, for example.
27 */ 27 */
28
29#ifndef ATOMIC_DEC_AND_LOCK
30int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) 28int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
31{ 29{
32 spin_lock(lock); 30 spin_lock(lock);
@@ -37,4 +35,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
37} 35}
38 36
39EXPORT_SYMBOL(_atomic_dec_and_lock); 37EXPORT_SYMBOL(_atomic_dec_and_lock);
40#endif
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index bd2bc5d887b8..cb5490ec00f2 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -177,8 +177,7 @@ static inline void __lock_kernel(void)
177 177
178static inline void __unlock_kernel(void) 178static inline void __unlock_kernel(void)
179{ 179{
180 _raw_spin_unlock(&kernel_flag); 180 spin_unlock(&kernel_flag);
181 preempt_enable();
182} 181}
183 182
184/* 183/*
diff --git a/lib/klist.c b/lib/klist.c
index a70c836c5c4c..bb2f3551d50a 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -42,12 +42,23 @@
42/** 42/**
43 * klist_init - Initialize a klist structure. 43 * klist_init - Initialize a klist structure.
44 * @k: The klist we're initializing. 44 * @k: The klist we're initializing.
45 * @get: The get function for the embedding object (NULL if none)
46 * @put: The put function for the embedding object (NULL if none)
47 *
48 * Initialises the klist structure. If the klist_node structures are
49 * going to be embedded in refcounted objects (necessary for safe
50 * deletion) then the get/put arguments are used to initialise
51 * functions that take and release references on the embedding
52 * objects.
45 */ 53 */
46 54
47void klist_init(struct klist * k) 55void klist_init(struct klist * k, void (*get)(struct klist_node *),
56 void (*put)(struct klist_node *))
48{ 57{
49 INIT_LIST_HEAD(&k->k_list); 58 INIT_LIST_HEAD(&k->k_list);
50 spin_lock_init(&k->k_lock); 59 spin_lock_init(&k->k_lock);
60 k->get = get;
61 k->put = put;
51} 62}
52 63
53EXPORT_SYMBOL_GPL(klist_init); 64EXPORT_SYMBOL_GPL(klist_init);
@@ -74,6 +85,8 @@ static void klist_node_init(struct klist * k, struct klist_node * n)
74 init_completion(&n->n_removed); 85 init_completion(&n->n_removed);
75 kref_init(&n->n_ref); 86 kref_init(&n->n_ref);
76 n->n_klist = k; 87 n->n_klist = k;
88 if (k->get)
89 k->get(n);
77} 90}
78 91
79 92
@@ -110,9 +123,12 @@ EXPORT_SYMBOL_GPL(klist_add_tail);
110static void klist_release(struct kref * kref) 123static void klist_release(struct kref * kref)
111{ 124{
112 struct klist_node * n = container_of(kref, struct klist_node, n_ref); 125 struct klist_node * n = container_of(kref, struct klist_node, n_ref);
126 void (*put)(struct klist_node *) = n->n_klist->put;
113 list_del(&n->n_node); 127 list_del(&n->n_node);
114 complete(&n->n_removed); 128 complete(&n->n_removed);
115 n->n_klist = NULL; 129 n->n_klist = NULL;
130 if (put)
131 put(n);
116} 132}
117 133
118static int klist_dec_and_del(struct klist_node * n) 134static int klist_dec_and_del(struct klist_node * n)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 10bed1c8c3c3..6a8bc6e06431 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (C) 2001 Momchil Velikov 2 * Copyright (C) 2001 Momchil Velikov
3 * Portions Copyright (C) 2001 Christoph Hellwig 3 * Portions Copyright (C) 2001 Christoph Hellwig
4 * Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com>
4 * 5 *
5 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as 7 * modify it under the terms of the GNU General Public License as
@@ -51,7 +52,7 @@ struct radix_tree_node {
51}; 52};
52 53
53struct radix_tree_path { 54struct radix_tree_path {
54 struct radix_tree_node *node, **slot; 55 struct radix_tree_node *node;
55 int offset; 56 int offset;
56}; 57};
57 58
@@ -109,7 +110,7 @@ radix_tree_node_free(struct radix_tree_node *node)
109 * success, return zero, with preemption disabled. On error, return -ENOMEM 110 * success, return zero, with preemption disabled. On error, return -ENOMEM
110 * with preemption not disabled. 111 * with preemption not disabled.
111 */ 112 */
112int radix_tree_preload(int gfp_mask) 113int radix_tree_preload(unsigned int __nocast gfp_mask)
113{ 114{
114 struct radix_tree_preload *rtp; 115 struct radix_tree_preload *rtp;
115 struct radix_tree_node *node; 116 struct radix_tree_node *node;
@@ -227,7 +228,7 @@ out:
227int radix_tree_insert(struct radix_tree_root *root, 228int radix_tree_insert(struct radix_tree_root *root,
228 unsigned long index, void *item) 229 unsigned long index, void *item)
229{ 230{
230 struct radix_tree_node *node = NULL, *tmp, **slot; 231 struct radix_tree_node *node = NULL, *slot;
231 unsigned int height, shift; 232 unsigned int height, shift;
232 int offset; 233 int offset;
233 int error; 234 int error;
@@ -240,38 +241,42 @@ int radix_tree_insert(struct radix_tree_root *root,
240 return error; 241 return error;
241 } 242 }
242 243
243 slot = &root->rnode; 244 slot = root->rnode;
244 height = root->height; 245 height = root->height;
245 shift = (height-1) * RADIX_TREE_MAP_SHIFT; 246 shift = (height-1) * RADIX_TREE_MAP_SHIFT;
246 247
247 offset = 0; /* uninitialised var warning */ 248 offset = 0; /* uninitialised var warning */
248 while (height > 0) { 249 while (height > 0) {
249 if (*slot == NULL) { 250 if (slot == NULL) {
250 /* Have to add a child node. */ 251 /* Have to add a child node. */
251 if (!(tmp = radix_tree_node_alloc(root))) 252 if (!(slot = radix_tree_node_alloc(root)))
252 return -ENOMEM; 253 return -ENOMEM;
253 *slot = tmp; 254 if (node) {
254 if (node) 255 node->slots[offset] = slot;
255 node->count++; 256 node->count++;
257 } else
258 root->rnode = slot;
256 } 259 }
257 260
258 /* Go a level down */ 261 /* Go a level down */
259 offset = (index >> shift) & RADIX_TREE_MAP_MASK; 262 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
260 node = *slot; 263 node = slot;
261 slot = (struct radix_tree_node **)(node->slots + offset); 264 slot = node->slots[offset];
262 shift -= RADIX_TREE_MAP_SHIFT; 265 shift -= RADIX_TREE_MAP_SHIFT;
263 height--; 266 height--;
264 } 267 }
265 268
266 if (*slot != NULL) 269 if (slot != NULL)
267 return -EEXIST; 270 return -EEXIST;
271
268 if (node) { 272 if (node) {
269 node->count++; 273 node->count++;
274 node->slots[offset] = item;
270 BUG_ON(tag_get(node, 0, offset)); 275 BUG_ON(tag_get(node, 0, offset));
271 BUG_ON(tag_get(node, 1, offset)); 276 BUG_ON(tag_get(node, 1, offset));
272 } 277 } else
278 root->rnode = item;
273 279
274 *slot = item;
275 return 0; 280 return 0;
276} 281}
277EXPORT_SYMBOL(radix_tree_insert); 282EXPORT_SYMBOL(radix_tree_insert);
@@ -286,27 +291,25 @@ EXPORT_SYMBOL(radix_tree_insert);
286void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) 291void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
287{ 292{
288 unsigned int height, shift; 293 unsigned int height, shift;
289 struct radix_tree_node **slot; 294 struct radix_tree_node *slot;
290 295
291 height = root->height; 296 height = root->height;
292 if (index > radix_tree_maxindex(height)) 297 if (index > radix_tree_maxindex(height))
293 return NULL; 298 return NULL;
294 299
295 shift = (height-1) * RADIX_TREE_MAP_SHIFT; 300 shift = (height-1) * RADIX_TREE_MAP_SHIFT;
296 slot = &root->rnode; 301 slot = root->rnode;
297 302
298 while (height > 0) { 303 while (height > 0) {
299 if (*slot == NULL) 304 if (slot == NULL)
300 return NULL; 305 return NULL;
301 306
302 slot = (struct radix_tree_node **) 307 slot = slot->slots[(index >> shift) & RADIX_TREE_MAP_MASK];
303 ((*slot)->slots +
304 ((index >> shift) & RADIX_TREE_MAP_MASK));
305 shift -= RADIX_TREE_MAP_SHIFT; 308 shift -= RADIX_TREE_MAP_SHIFT;
306 height--; 309 height--;
307 } 310 }
308 311
309 return *slot; 312 return slot;
310} 313}
311EXPORT_SYMBOL(radix_tree_lookup); 314EXPORT_SYMBOL(radix_tree_lookup);
312 315
@@ -326,27 +329,27 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
326 unsigned long index, int tag) 329 unsigned long index, int tag)
327{ 330{
328 unsigned int height, shift; 331 unsigned int height, shift;
329 struct radix_tree_node **slot; 332 struct radix_tree_node *slot;
330 333
331 height = root->height; 334 height = root->height;
332 if (index > radix_tree_maxindex(height)) 335 if (index > radix_tree_maxindex(height))
333 return NULL; 336 return NULL;
334 337
335 shift = (height - 1) * RADIX_TREE_MAP_SHIFT; 338 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
336 slot = &root->rnode; 339 slot = root->rnode;
337 340
338 while (height > 0) { 341 while (height > 0) {
339 int offset; 342 int offset;
340 343
341 offset = (index >> shift) & RADIX_TREE_MAP_MASK; 344 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
342 tag_set(*slot, tag, offset); 345 tag_set(slot, tag, offset);
343 slot = (struct radix_tree_node **)((*slot)->slots + offset); 346 slot = slot->slots[offset];
344 BUG_ON(*slot == NULL); 347 BUG_ON(slot == NULL);
345 shift -= RADIX_TREE_MAP_SHIFT; 348 shift -= RADIX_TREE_MAP_SHIFT;
346 height--; 349 height--;
347 } 350 }
348 351
349 return *slot; 352 return slot;
350} 353}
351EXPORT_SYMBOL(radix_tree_tag_set); 354EXPORT_SYMBOL(radix_tree_tag_set);
352 355
@@ -367,6 +370,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
367 unsigned long index, int tag) 370 unsigned long index, int tag)
368{ 371{
369 struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path; 372 struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
373 struct radix_tree_node *slot;
370 unsigned int height, shift; 374 unsigned int height, shift;
371 void *ret = NULL; 375 void *ret = NULL;
372 376
@@ -376,38 +380,37 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
376 380
377 shift = (height - 1) * RADIX_TREE_MAP_SHIFT; 381 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
378 pathp->node = NULL; 382 pathp->node = NULL;
379 pathp->slot = &root->rnode; 383 slot = root->rnode;
380 384
381 while (height > 0) { 385 while (height > 0) {
382 int offset; 386 int offset;
383 387
384 if (*pathp->slot == NULL) 388 if (slot == NULL)
385 goto out; 389 goto out;
386 390
387 offset = (index >> shift) & RADIX_TREE_MAP_MASK; 391 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
388 pathp[1].offset = offset; 392 pathp[1].offset = offset;
389 pathp[1].node = *pathp[0].slot; 393 pathp[1].node = slot;
390 pathp[1].slot = (struct radix_tree_node **) 394 slot = slot->slots[offset];
391 (pathp[1].node->slots + offset);
392 pathp++; 395 pathp++;
393 shift -= RADIX_TREE_MAP_SHIFT; 396 shift -= RADIX_TREE_MAP_SHIFT;
394 height--; 397 height--;
395 } 398 }
396 399
397 ret = *pathp[0].slot; 400 ret = slot;
398 if (ret == NULL) 401 if (ret == NULL)
399 goto out; 402 goto out;
400 403
401 do { 404 do {
402 int idx; 405 int idx;
403 406
404 tag_clear(pathp[0].node, tag, pathp[0].offset); 407 tag_clear(pathp->node, tag, pathp->offset);
405 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { 408 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
406 if (pathp[0].node->tags[tag][idx]) 409 if (pathp->node->tags[tag][idx])
407 goto out; 410 goto out;
408 } 411 }
409 pathp--; 412 pathp--;
410 } while (pathp[0].node); 413 } while (pathp->node);
411out: 414out:
412 return ret; 415 return ret;
413} 416}
@@ -415,21 +418,22 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
415 418
416#ifndef __KERNEL__ /* Only the test harness uses this at present */ 419#ifndef __KERNEL__ /* Only the test harness uses this at present */
417/** 420/**
418 * radix_tree_tag_get - get a tag on a radix tree node 421 * radix_tree_tag_get - get a tag on a radix tree node
419 * @root: radix tree root 422 * @root: radix tree root
420 * @index: index key 423 * @index: index key
421 * @tag: tag index 424 * @tag: tag index
422 * 425 *
423 * Return the search tag corresponging to @index in the radix tree. 426 * Return values:
424 * 427 *
425 * Returns zero if the tag is unset, or if there is no corresponding item 428 * 0: tag not present
426 * in the tree. 429 * 1: tag present, set
430 * -1: tag present, unset
427 */ 431 */
428int radix_tree_tag_get(struct radix_tree_root *root, 432int radix_tree_tag_get(struct radix_tree_root *root,
429 unsigned long index, int tag) 433 unsigned long index, int tag)
430{ 434{
431 unsigned int height, shift; 435 unsigned int height, shift;
432 struct radix_tree_node **slot; 436 struct radix_tree_node *slot;
433 int saw_unset_tag = 0; 437 int saw_unset_tag = 0;
434 438
435 height = root->height; 439 height = root->height;
@@ -437,12 +441,12 @@ int radix_tree_tag_get(struct radix_tree_root *root,
437 return 0; 441 return 0;
438 442
439 shift = (height - 1) * RADIX_TREE_MAP_SHIFT; 443 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
440 slot = &root->rnode; 444 slot = root->rnode;
441 445
442 for ( ; ; ) { 446 for ( ; ; ) {
443 int offset; 447 int offset;
444 448
445 if (*slot == NULL) 449 if (slot == NULL)
446 return 0; 450 return 0;
447 451
448 offset = (index >> shift) & RADIX_TREE_MAP_MASK; 452 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
@@ -451,15 +455,15 @@ int radix_tree_tag_get(struct radix_tree_root *root,
451 * This is just a debug check. Later, we can bale as soon as 455 * This is just a debug check. Later, we can bale as soon as
452 * we see an unset tag. 456 * we see an unset tag.
453 */ 457 */
454 if (!tag_get(*slot, tag, offset)) 458 if (!tag_get(slot, tag, offset))
455 saw_unset_tag = 1; 459 saw_unset_tag = 1;
456 if (height == 1) { 460 if (height == 1) {
457 int ret = tag_get(*slot, tag, offset); 461 int ret = tag_get(slot, tag, offset);
458 462
459 BUG_ON(ret && saw_unset_tag); 463 BUG_ON(ret && saw_unset_tag);
460 return ret; 464 return ret ? 1 : -1;
461 } 465 }
462 slot = (struct radix_tree_node **)((*slot)->slots + offset); 466 slot = slot->slots[offset];
463 shift -= RADIX_TREE_MAP_SHIFT; 467 shift -= RADIX_TREE_MAP_SHIFT;
464 height--; 468 height--;
465 } 469 }
@@ -472,17 +476,21 @@ __lookup(struct radix_tree_root *root, void **results, unsigned long index,
472 unsigned int max_items, unsigned long *next_index) 476 unsigned int max_items, unsigned long *next_index)
473{ 477{
474 unsigned int nr_found = 0; 478 unsigned int nr_found = 0;
475 unsigned int shift; 479 unsigned int shift, height;
476 unsigned int height = root->height;
477 struct radix_tree_node *slot; 480 struct radix_tree_node *slot;
481 unsigned long i;
482
483 height = root->height;
484 if (height == 0)
485 goto out;
478 486
479 shift = (height-1) * RADIX_TREE_MAP_SHIFT; 487 shift = (height-1) * RADIX_TREE_MAP_SHIFT;
480 slot = root->rnode; 488 slot = root->rnode;
481 489
482 while (height > 0) { 490 for ( ; height > 1; height--) {
483 unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK;
484 491
485 for ( ; i < RADIX_TREE_MAP_SIZE; i++) { 492 for (i = (index >> shift) & RADIX_TREE_MAP_MASK ;
493 i < RADIX_TREE_MAP_SIZE; i++) {
486 if (slot->slots[i] != NULL) 494 if (slot->slots[i] != NULL)
487 break; 495 break;
488 index &= ~((1UL << shift) - 1); 496 index &= ~((1UL << shift) - 1);
@@ -492,22 +500,20 @@ __lookup(struct radix_tree_root *root, void **results, unsigned long index,
492 } 500 }
493 if (i == RADIX_TREE_MAP_SIZE) 501 if (i == RADIX_TREE_MAP_SIZE)
494 goto out; 502 goto out;
495 height--;
496 if (height == 0) { /* Bottom level: grab some items */
497 unsigned long j = index & RADIX_TREE_MAP_MASK;
498 503
499 for ( ; j < RADIX_TREE_MAP_SIZE; j++) {
500 index++;
501 if (slot->slots[j]) {
502 results[nr_found++] = slot->slots[j];
503 if (nr_found == max_items)
504 goto out;
505 }
506 }
507 }
508 shift -= RADIX_TREE_MAP_SHIFT; 504 shift -= RADIX_TREE_MAP_SHIFT;
509 slot = slot->slots[i]; 505 slot = slot->slots[i];
510 } 506 }
507
508 /* Bottom level: grab some items */
509 for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) {
510 index++;
511 if (slot->slots[i]) {
512 results[nr_found++] = slot->slots[i];
513 if (nr_found == max_items)
514 goto out;
515 }
516 }
511out: 517out:
512 *next_index = index; 518 *next_index = index;
513 return nr_found; 519 return nr_found;
@@ -655,6 +661,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
655{ 661{
656 struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path; 662 struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
657 struct radix_tree_path *orig_pathp; 663 struct radix_tree_path *orig_pathp;
664 struct radix_tree_node *slot;
658 unsigned int height, shift; 665 unsigned int height, shift;
659 void *ret = NULL; 666 void *ret = NULL;
660 char tags[RADIX_TREE_TAGS]; 667 char tags[RADIX_TREE_TAGS];
@@ -666,25 +673,23 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
666 673
667 shift = (height - 1) * RADIX_TREE_MAP_SHIFT; 674 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
668 pathp->node = NULL; 675 pathp->node = NULL;
669 pathp->slot = &root->rnode; 676 slot = root->rnode;
670 677
671 while (height > 0) { 678 for ( ; height > 0; height--) {
672 int offset; 679 int offset;
673 680
674 if (*pathp->slot == NULL) 681 if (slot == NULL)
675 goto out; 682 goto out;
676 683
677 offset = (index >> shift) & RADIX_TREE_MAP_MASK; 684 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
678 pathp[1].offset = offset; 685 pathp[1].offset = offset;
679 pathp[1].node = *pathp[0].slot; 686 pathp[1].node = slot;
680 pathp[1].slot = (struct radix_tree_node **) 687 slot = slot->slots[offset];
681 (pathp[1].node->slots + offset);
682 pathp++; 688 pathp++;
683 shift -= RADIX_TREE_MAP_SHIFT; 689 shift -= RADIX_TREE_MAP_SHIFT;
684 height--;
685 } 690 }
686 691
687 ret = *pathp[0].slot; 692 ret = slot;
688 if (ret == NULL) 693 if (ret == NULL)
689 goto out; 694 goto out;
690 695
@@ -704,10 +709,10 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
704 if (tags[tag]) 709 if (tags[tag])
705 continue; 710 continue;
706 711
707 tag_clear(pathp[0].node, tag, pathp[0].offset); 712 tag_clear(pathp->node, tag, pathp->offset);
708 713
709 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { 714 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
710 if (pathp[0].node->tags[tag][idx]) { 715 if (pathp->node->tags[tag][idx]) {
711 tags[tag] = 1; 716 tags[tag] = 1;
712 nr_cleared_tags--; 717 nr_cleared_tags--;
713 break; 718 break;
@@ -715,18 +720,19 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
715 } 720 }
716 } 721 }
717 pathp--; 722 pathp--;
718 } while (pathp[0].node && nr_cleared_tags); 723 } while (pathp->node && nr_cleared_tags);
719 724
720 pathp = orig_pathp; 725 /* Now free the nodes we do not need anymore */
721 *pathp[0].slot = NULL; 726 for (pathp = orig_pathp; pathp->node; pathp--) {
722 while (pathp[0].node && --pathp[0].node->count == 0) { 727 pathp->node->slots[pathp->offset] = NULL;
723 pathp--; 728 if (--pathp->node->count)
724 BUG_ON(*pathp[0].slot == NULL); 729 goto out;
725 *pathp[0].slot = NULL; 730
726 radix_tree_node_free(pathp[1].node); 731 /* Node with zero slots in use so free it */
732 radix_tree_node_free(pathp->node);
727 } 733 }
728 if (root->rnode == NULL) 734 root->rnode = NULL;
729 root->height = 0; 735 root->height = 0;
730out: 736out:
731 return ret; 737 return ret;
732} 738}
diff --git a/lib/sort.c b/lib/sort.c
index b73dbb0e7c83..ddc4d35df289 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -6,15 +6,16 @@
6 6
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/sort.h>
9 10
10void u32_swap(void *a, void *b, int size) 11static void u32_swap(void *a, void *b, int size)
11{ 12{
12 u32 t = *(u32 *)a; 13 u32 t = *(u32 *)a;
13 *(u32 *)a = *(u32 *)b; 14 *(u32 *)a = *(u32 *)b;
14 *(u32 *)b = t; 15 *(u32 *)b = t;
15} 16}
16 17
17void generic_swap(void *a, void *b, int size) 18static void generic_swap(void *a, void *b, int size)
18{ 19{
19 char t; 20 char t;
20 21
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
new file mode 100644
index 000000000000..906ad101eab3
--- /dev/null
+++ b/lib/spinlock_debug.c
@@ -0,0 +1,257 @@
1/*
2 * Copyright 2005, Red Hat, Inc., Ingo Molnar
3 * Released under the General Public License (GPL).
4 *
5 * This file contains the spinlock/rwlock implementations for
6 * DEBUG_SPINLOCK.
7 */
8
9#include <linux/config.h>
10#include <linux/spinlock.h>
11#include <linux/interrupt.h>
12#include <linux/delay.h>
13
14static void spin_bug(spinlock_t *lock, const char *msg)
15{
16 static long print_once = 1;
17 struct task_struct *owner = NULL;
18
19 if (xchg(&print_once, 0)) {
20 if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
21 owner = lock->owner;
22 printk("BUG: spinlock %s on CPU#%d, %s/%d\n",
23 msg, smp_processor_id(), current->comm, current->pid);
24 printk(" lock: %p, .magic: %08x, .owner: %s/%d, .owner_cpu: %d\n",
25 lock, lock->magic,
26 owner ? owner->comm : "<none>",
27 owner ? owner->pid : -1,
28 lock->owner_cpu);
29 dump_stack();
30#ifdef CONFIG_SMP
31 /*
32 * We cannot continue on SMP:
33 */
34// panic("bad locking");
35#endif
36 }
37}
38
39#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
40
41static inline void debug_spin_lock_before(spinlock_t *lock)
42{
43 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
44 SPIN_BUG_ON(lock->owner == current, lock, "recursion");
45 SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
46 lock, "cpu recursion");
47}
48
49static inline void debug_spin_lock_after(spinlock_t *lock)
50{
51 lock->owner_cpu = raw_smp_processor_id();
52 lock->owner = current;
53}
54
55static inline void debug_spin_unlock(spinlock_t *lock)
56{
57 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
58 SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
59 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
60 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
61 lock, "wrong CPU");
62 lock->owner = SPINLOCK_OWNER_INIT;
63 lock->owner_cpu = -1;
64}
65
66static void __spin_lock_debug(spinlock_t *lock)
67{
68 int print_once = 1;
69 u64 i;
70
71 for (;;) {
72 for (i = 0; i < loops_per_jiffy * HZ; i++) {
73 cpu_relax();
74 if (__raw_spin_trylock(&lock->raw_lock))
75 return;
76 }
77 /* lockup suspected: */
78 if (print_once) {
79 print_once = 0;
80 printk("BUG: spinlock lockup on CPU#%d, %s/%d, %p\n",
81 smp_processor_id(), current->comm, current->pid,
82 lock);
83 dump_stack();
84 }
85 }
86}
87
88void _raw_spin_lock(spinlock_t *lock)
89{
90 debug_spin_lock_before(lock);
91 if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
92 __spin_lock_debug(lock);
93 debug_spin_lock_after(lock);
94}
95
96int _raw_spin_trylock(spinlock_t *lock)
97{
98 int ret = __raw_spin_trylock(&lock->raw_lock);
99
100 if (ret)
101 debug_spin_lock_after(lock);
102#ifndef CONFIG_SMP
103 /*
104 * Must not happen on UP:
105 */
106 SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
107#endif
108 return ret;
109}
110
111void _raw_spin_unlock(spinlock_t *lock)
112{
113 debug_spin_unlock(lock);
114 __raw_spin_unlock(&lock->raw_lock);
115}
116
117static void rwlock_bug(rwlock_t *lock, const char *msg)
118{
119 static long print_once = 1;
120
121 if (xchg(&print_once, 0)) {
122 printk("BUG: rwlock %s on CPU#%d, %s/%d, %p\n", msg,
123 smp_processor_id(), current->comm, current->pid, lock);
124 dump_stack();
125#ifdef CONFIG_SMP
126 /*
127 * We cannot continue on SMP:
128 */
129 panic("bad locking");
130#endif
131 }
132}
133
134#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
135
136static void __read_lock_debug(rwlock_t *lock)
137{
138 int print_once = 1;
139 u64 i;
140
141 for (;;) {
142 for (i = 0; i < loops_per_jiffy * HZ; i++) {
143 cpu_relax();
144 if (__raw_read_trylock(&lock->raw_lock))
145 return;
146 }
147 /* lockup suspected: */
148 if (print_once) {
149 print_once = 0;
150 printk("BUG: read-lock lockup on CPU#%d, %s/%d, %p\n",
151 smp_processor_id(), current->comm, current->pid,
152 lock);
153 dump_stack();
154 }
155 }
156}
157
158void _raw_read_lock(rwlock_t *lock)
159{
160 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
161 if (unlikely(!__raw_read_trylock(&lock->raw_lock)))
162 __read_lock_debug(lock);
163}
164
165int _raw_read_trylock(rwlock_t *lock)
166{
167 int ret = __raw_read_trylock(&lock->raw_lock);
168
169#ifndef CONFIG_SMP
170 /*
171 * Must not happen on UP:
172 */
173 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
174#endif
175 return ret;
176}
177
178void _raw_read_unlock(rwlock_t *lock)
179{
180 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
181 __raw_read_unlock(&lock->raw_lock);
182}
183
184static inline void debug_write_lock_before(rwlock_t *lock)
185{
186 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
187 RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
188 RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
189 lock, "cpu recursion");
190}
191
192static inline void debug_write_lock_after(rwlock_t *lock)
193{
194 lock->owner_cpu = raw_smp_processor_id();
195 lock->owner = current;
196}
197
198static inline void debug_write_unlock(rwlock_t *lock)
199{
200 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
201 RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
202 RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
203 lock, "wrong CPU");
204 lock->owner = SPINLOCK_OWNER_INIT;
205 lock->owner_cpu = -1;
206}
207
208static void __write_lock_debug(rwlock_t *lock)
209{
210 int print_once = 1;
211 u64 i;
212
213 for (;;) {
214 for (i = 0; i < loops_per_jiffy * HZ; i++) {
215 cpu_relax();
216 if (__raw_write_trylock(&lock->raw_lock))
217 return;
218 }
219 /* lockup suspected: */
220 if (print_once) {
221 print_once = 0;
222 printk("BUG: write-lock lockup on CPU#%d, %s/%d, %p\n",
223 smp_processor_id(), current->comm, current->pid,
224 lock);
225 dump_stack();
226 }
227 }
228}
229
230void _raw_write_lock(rwlock_t *lock)
231{
232 debug_write_lock_before(lock);
233 if (unlikely(!__raw_write_trylock(&lock->raw_lock)))
234 __write_lock_debug(lock);
235 debug_write_lock_after(lock);
236}
237
238int _raw_write_trylock(rwlock_t *lock)
239{
240 int ret = __raw_write_trylock(&lock->raw_lock);
241
242 if (ret)
243 debug_write_lock_after(lock);
244#ifndef CONFIG_SMP
245 /*
246 * Must not happen on UP:
247 */
248 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
249#endif
250 return ret;
251}
252
253void _raw_write_unlock(rwlock_t *lock)
254{
255 debug_write_unlock(lock);
256 __raw_write_unlock(&lock->raw_lock);
257}