aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug9
-rw-r--r--lib/Makefile2
-rw-r--r--lib/ioremap.c92
-rw-r--r--lib/list_debug.c76
-rw-r--r--lib/rbtree.c6
-rw-r--r--lib/rwsem.c2
-rw-r--r--lib/spinlock_debug.c15
-rw-r--r--lib/ts_fsm.c10
8 files changed, 200 insertions, 12 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b0f5ca72599f..f9ae75cc0145 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -320,6 +320,15 @@ config DEBUG_VM
320 320
321 If unsure, say N. 321 If unsure, say N.
322 322
323config DEBUG_LIST
324 bool "Debug linked list manipulation"
325 depends on DEBUG_KERNEL
326 help
327 Enable this to turn on extended checks in the linked-list
328 walking routines.
329
330 If unsure, say N.
331
323config FRAME_POINTER 332config FRAME_POINTER
324 bool "Compile the kernel with frame pointers" 333 bool "Compile the kernel with frame pointers"
325 depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390 || AVR32 || SUPERH) 334 depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390 || AVR32 || SUPERH)
diff --git a/lib/Makefile b/lib/Makefile
index ef1d37afbbb6..ddf3e676e1f4 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -7,6 +7,7 @@ lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \
7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \ 7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \
8 sha1.o 8 sha1.o
9 9
10lib-$(CONFIG_MMU) += ioremap.o
10lib-$(CONFIG_SMP) += cpumask.o 11lib-$(CONFIG_SMP) += cpumask.o
11 12
12lib-y += kobject.o kref.o kobject_uevent.o klist.o 13lib-y += kobject.o kref.o kobject_uevent.o klist.o
@@ -28,6 +29,7 @@ lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
28obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 29obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
29obj-$(CONFIG_PLIST) += plist.o 30obj-$(CONFIG_PLIST) += plist.o
30obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 31obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
32obj-$(CONFIG_DEBUG_LIST) += list_debug.o
31 33
32ifneq ($(CONFIG_HAVE_DEC_LOCK),y) 34ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
33 lib-y += dec_and_lock.o 35 lib-y += dec_and_lock.o
diff --git a/lib/ioremap.c b/lib/ioremap.c
new file mode 100644
index 000000000000..99fa277f9f7b
--- /dev/null
+++ b/lib/ioremap.c
@@ -0,0 +1,92 @@
1/*
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8#include <linux/io.h>
9#include <linux/vmalloc.h>
10#include <linux/mm.h>
11
12#include <asm/cacheflush.h>
13#include <asm/pgtable.h>
14
15static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
16 unsigned long end, unsigned long phys_addr, pgprot_t prot)
17{
18 pte_t *pte;
19 unsigned long pfn;
20
21 pfn = phys_addr >> PAGE_SHIFT;
22 pte = pte_alloc_kernel(pmd, addr);
23 if (!pte)
24 return -ENOMEM;
25 do {
26 BUG_ON(!pte_none(*pte));
27 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
28 pfn++;
29 } while (pte++, addr += PAGE_SIZE, addr != end);
30 return 0;
31}
32
33static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
34 unsigned long end, unsigned long phys_addr, pgprot_t prot)
35{
36 pmd_t *pmd;
37 unsigned long next;
38
39 phys_addr -= addr;
40 pmd = pmd_alloc(&init_mm, pud, addr);
41 if (!pmd)
42 return -ENOMEM;
43 do {
44 next = pmd_addr_end(addr, end);
45 if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
46 return -ENOMEM;
47 } while (pmd++, addr = next, addr != end);
48 return 0;
49}
50
51static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
52 unsigned long end, unsigned long phys_addr, pgprot_t prot)
53{
54 pud_t *pud;
55 unsigned long next;
56
57 phys_addr -= addr;
58 pud = pud_alloc(&init_mm, pgd, addr);
59 if (!pud)
60 return -ENOMEM;
61 do {
62 next = pud_addr_end(addr, end);
63 if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
64 return -ENOMEM;
65 } while (pud++, addr = next, addr != end);
66 return 0;
67}
68
69int ioremap_page_range(unsigned long addr,
70 unsigned long end, unsigned long phys_addr, pgprot_t prot)
71{
72 pgd_t *pgd;
73 unsigned long start;
74 unsigned long next;
75 int err;
76
77 BUG_ON(addr >= end);
78
79 start = addr;
80 phys_addr -= addr;
81 pgd = pgd_offset_k(addr);
82 do {
83 next = pgd_addr_end(addr, end);
84 err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot);
85 if (err)
86 break;
87 } while (pgd++, addr = next, addr != end);
88
89 flush_cache_vmap(start, end);
90
91 return err;
92}
diff --git a/lib/list_debug.c b/lib/list_debug.c
new file mode 100644
index 000000000000..7ba9d823d388
--- /dev/null
+++ b/lib/list_debug.c
@@ -0,0 +1,76 @@
1/*
2 * Copyright 2006, Red Hat, Inc., Dave Jones
3 * Released under the General Public License (GPL).
4 *
5 * This file contains the linked list implementations for
6 * DEBUG_LIST.
7 */
8
9#include <linux/module.h>
10#include <linux/list.h>
11
12/*
13 * Insert a new entry between two known consecutive entries.
14 *
15 * This is only for internal list manipulation where we know
16 * the prev/next entries already!
17 */
18
19void __list_add(struct list_head *new,
20 struct list_head *prev,
21 struct list_head *next)
22{
23 if (unlikely(next->prev != prev)) {
24 printk(KERN_ERR "list_add corruption. next->prev should be %p, but was %p\n",
25 prev, next->prev);
26 BUG();
27 }
28 if (unlikely(prev->next != next)) {
29 printk(KERN_ERR "list_add corruption. prev->next should be %p, but was %p\n",
30 next, prev->next);
31 BUG();
32 }
33 next->prev = new;
34 new->next = next;
35 new->prev = prev;
36 prev->next = new;
37}
38EXPORT_SYMBOL(__list_add);
39
40/**
41 * list_add - add a new entry
42 * @new: new entry to be added
43 * @head: list head to add it after
44 *
45 * Insert a new entry after the specified head.
46 * This is good for implementing stacks.
47 */
48void list_add(struct list_head *new, struct list_head *head)
49{
50 __list_add(new, head, head->next);
51}
52EXPORT_SYMBOL(list_add);
53
54/**
55 * list_del - deletes entry from list.
56 * @entry: the element to delete from the list.
57 * Note: list_empty on entry does not return true after this, the entry is
58 * in an undefined state.
59 */
60void list_del(struct list_head *entry)
61{
62 if (unlikely(entry->prev->next != entry)) {
63 printk(KERN_ERR "list_del corruption. prev->next should be %p, "
64 "but was %p\n", entry, entry->prev->next);
65 BUG();
66 }
67 if (unlikely(entry->next->prev != entry)) {
68 printk(KERN_ERR "list_del corruption. next->prev should be %p, "
69 "but was %p\n", entry, entry->next->prev);
70 BUG();
71 }
72 __list_del(entry->prev, entry->next);
73 entry->next = LIST_POISON1;
74 entry->prev = LIST_POISON2;
75}
76EXPORT_SYMBOL(list_del);
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 1e55ba1c2edf..48499c2d88cc 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -322,6 +322,9 @@ struct rb_node *rb_next(struct rb_node *node)
322{ 322{
323 struct rb_node *parent; 323 struct rb_node *parent;
324 324
325 if (rb_parent(node) == node)
326 return NULL;
327
325 /* If we have a right-hand child, go down and then left as far 328 /* If we have a right-hand child, go down and then left as far
326 as we can. */ 329 as we can. */
327 if (node->rb_right) { 330 if (node->rb_right) {
@@ -348,6 +351,9 @@ struct rb_node *rb_prev(struct rb_node *node)
348{ 351{
349 struct rb_node *parent; 352 struct rb_node *parent;
350 353
354 if (rb_parent(node) == node)
355 return NULL;
356
351 /* If we have a left-hand child, go down and then right as far 357 /* If we have a left-hand child, go down and then right as far
352 as we can. */ 358 as we can. */
353 if (node->rb_left) { 359 if (node->rb_left) {
diff --git a/lib/rwsem.c b/lib/rwsem.c
index b322421c2969..901d0e7da892 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -146,7 +146,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
146/* 146/*
147 * wait for a lock to be granted 147 * wait for a lock to be granted
148 */ 148 */
149static inline struct rw_semaphore * 149static struct rw_semaphore *
150rwsem_down_failed_common(struct rw_semaphore *sem, 150rwsem_down_failed_common(struct rw_semaphore *sem,
151 struct rwsem_waiter *waiter, signed long adjustment) 151 struct rwsem_waiter *waiter, signed long adjustment)
152{ 152{
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 58c577dd82e5..dafaf1de2491 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -99,11 +99,12 @@ static inline void debug_spin_unlock(spinlock_t *lock)
99 99
100static void __spin_lock_debug(spinlock_t *lock) 100static void __spin_lock_debug(spinlock_t *lock)
101{ 101{
102 int print_once = 1;
103 u64 i; 102 u64 i;
103 u64 loops = loops_per_jiffy * HZ;
104 int print_once = 1;
104 105
105 for (;;) { 106 for (;;) {
106 for (i = 0; i < loops_per_jiffy * HZ; i++) { 107 for (i = 0; i < loops; i++) {
107 if (__raw_spin_trylock(&lock->raw_lock)) 108 if (__raw_spin_trylock(&lock->raw_lock))
108 return; 109 return;
109 __delay(1); 110 __delay(1);
@@ -165,11 +166,12 @@ static void rwlock_bug(rwlock_t *lock, const char *msg)
165#if 0 /* __write_lock_debug() can lock up - maybe this can too? */ 166#if 0 /* __write_lock_debug() can lock up - maybe this can too? */
166static void __read_lock_debug(rwlock_t *lock) 167static void __read_lock_debug(rwlock_t *lock)
167{ 168{
168 int print_once = 1;
169 u64 i; 169 u64 i;
170 u64 loops = loops_per_jiffy * HZ;
171 int print_once = 1;
170 172
171 for (;;) { 173 for (;;) {
172 for (i = 0; i < loops_per_jiffy * HZ; i++) { 174 for (i = 0; i < loops; i++) {
173 if (__raw_read_trylock(&lock->raw_lock)) 175 if (__raw_read_trylock(&lock->raw_lock))
174 return; 176 return;
175 __delay(1); 177 __delay(1);
@@ -239,11 +241,12 @@ static inline void debug_write_unlock(rwlock_t *lock)
239#if 0 /* This can cause lockups */ 241#if 0 /* This can cause lockups */
240static void __write_lock_debug(rwlock_t *lock) 242static void __write_lock_debug(rwlock_t *lock)
241{ 243{
242 int print_once = 1;
243 u64 i; 244 u64 i;
245 u64 loops = loops_per_jiffy * HZ;
246 int print_once = 1;
244 247
245 for (;;) { 248 for (;;) {
246 for (i = 0; i < loops_per_jiffy * HZ; i++) { 249 for (i = 0; i < loops; i++) {
247 if (__raw_write_trylock(&lock->raw_lock)) 250 if (__raw_write_trylock(&lock->raw_lock))
248 return; 251 return;
249 __delay(1); 252 __delay(1);
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index 87847c2ae9e2..af575b61526b 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -12,13 +12,13 @@
12 * 12 *
13 * A finite state machine consists of n states (struct ts_fsm_token) 13 * A finite state machine consists of n states (struct ts_fsm_token)
14 * representing the pattern as a finite automation. The data is read 14 * representing the pattern as a finite automation. The data is read
15 * sequentially on a octet basis. Every state token specifies the number 15 * sequentially on an octet basis. Every state token specifies the number
16 * of recurrences and the type of value accepted which can be either a 16 * of recurrences and the type of value accepted which can be either a
17 * specific character or ctype based set of characters. The available 17 * specific character or ctype based set of characters. The available
18 * type of recurrences include 1, (0|1), [0 n], and [1 n]. 18 * type of recurrences include 1, (0|1), [0 n], and [1 n].
19 * 19 *
20 * The algorithm differs between strict/non-strict mode specyfing 20 * The algorithm differs between strict/non-strict mode specifying
21 * whether the pattern has to start at the first octect. Strict mode 21 * whether the pattern has to start at the first octet. Strict mode
22 * is enabled by default and can be disabled by inserting 22 * is enabled by default and can be disabled by inserting
23 * TS_FSM_HEAD_IGNORE as the first token in the chain. 23 * TS_FSM_HEAD_IGNORE as the first token in the chain.
24 * 24 *
@@ -44,7 +44,7 @@ struct ts_fsm
44#define _W 0x200 /* wildcard */ 44#define _W 0x200 /* wildcard */
45 45
46/* Map to _ctype flags and some magic numbers */ 46/* Map to _ctype flags and some magic numbers */
47static u16 token_map[TS_FSM_TYPE_MAX+1] = { 47static const u16 token_map[TS_FSM_TYPE_MAX+1] = {
48 [TS_FSM_SPECIFIC] = 0, 48 [TS_FSM_SPECIFIC] = 0,
49 [TS_FSM_WILDCARD] = _W, 49 [TS_FSM_WILDCARD] = _W,
50 [TS_FSM_CNTRL] = _C, 50 [TS_FSM_CNTRL] = _C,
@@ -61,7 +61,7 @@ static u16 token_map[TS_FSM_TYPE_MAX+1] = {
61 [TS_FSM_ASCII] = _A, 61 [TS_FSM_ASCII] = _A,
62}; 62};
63 63
64static u16 token_lookup_tbl[256] = { 64static const u16 token_lookup_tbl[256] = {
65_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 0- 3 */ 65_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 0- 3 */
66_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 4- 7 */ 66_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 4- 7 */
67_W|_A|_C, _W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C|_S, /* 8- 11 */ 67_W|_A|_C, _W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C|_S, /* 8- 11 */