aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2008-07-11 09:36:25 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2008-07-11 09:36:25 -0400
commita8931ef380c92d121ae74ecfb03b2d63f72eea6f (patch)
tree980fb6b019e11e6cb1ece55b7faff184721a8053 /lib
parent90574d0a4d4b73308ae54a2a57a4f3f1fa98e984 (diff)
parente5a5816f7875207cb0a0a7032e39a4686c5e10a4 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.kgdb16
-rw-r--r--lib/bitmap.c16
-rw-r--r--lib/bitrev.c3
-rw-r--r--lib/bug.c2
-rw-r--r--lib/debugobjects.c15
-rw-r--r--lib/devres.c2
-rw-r--r--lib/div64.c10
-rw-r--r--lib/hexdump.c7
-rw-r--r--lib/kernel_lock.c120
-rw-r--r--lib/lmb.c46
-rw-r--r--lib/parser.c32
-rw-r--r--lib/radix-tree.c122
-rw-r--r--lib/ts_bm.c2
-rw-r--r--lib/vsprintf.c128
14 files changed, 324 insertions, 197 deletions
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index f2e01ac5ab09..a5d4b1dac2a5 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -1,4 +1,10 @@
1 1
2config HAVE_ARCH_KGDB_SHADOW_INFO
3 bool
4
5config HAVE_ARCH_KGDB
6 bool
7
2menuconfig KGDB 8menuconfig KGDB
3 bool "KGDB: kernel debugging with remote gdb" 9 bool "KGDB: kernel debugging with remote gdb"
4 select FRAME_POINTER 10 select FRAME_POINTER
@@ -10,15 +16,10 @@ menuconfig KGDB
10 at http://kgdb.sourceforge.net as well as in DocBook form 16 at http://kgdb.sourceforge.net as well as in DocBook form
11 in Documentation/DocBook/. If unsure, say N. 17 in Documentation/DocBook/. If unsure, say N.
12 18
13config HAVE_ARCH_KGDB_SHADOW_INFO 19if KGDB
14 bool
15
16config HAVE_ARCH_KGDB
17 bool
18 20
19config KGDB_SERIAL_CONSOLE 21config KGDB_SERIAL_CONSOLE
20 tristate "KGDB: use kgdb over the serial console" 22 tristate "KGDB: use kgdb over the serial console"
21 depends on KGDB
22 select CONSOLE_POLL 23 select CONSOLE_POLL
23 select MAGIC_SYSRQ 24 select MAGIC_SYSRQ
24 default y 25 default y
@@ -28,7 +29,6 @@ config KGDB_SERIAL_CONSOLE
28 29
29config KGDB_TESTS 30config KGDB_TESTS
30 bool "KGDB: internal test suite" 31 bool "KGDB: internal test suite"
31 depends on KGDB
32 default n 32 default n
33 help 33 help
34 This is a kgdb I/O module specifically designed to test 34 This is a kgdb I/O module specifically designed to test
@@ -56,3 +56,5 @@ config KGDB_TESTS_BOOT_STRING
56 boot. See the drivers/misc/kgdbts.c for detailed 56 boot. See the drivers/misc/kgdbts.c for detailed
57 information about other strings you could use beyond the 57 information about other strings you could use beyond the
58 default of V1F100. 58 default of V1F100.
59
60endif # KGDB
diff --git a/lib/bitmap.c b/lib/bitmap.c
index c4cb48f77f0c..482df94ea21e 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -316,22 +316,6 @@ int bitmap_scnprintf(char *buf, unsigned int buflen,
316EXPORT_SYMBOL(bitmap_scnprintf); 316EXPORT_SYMBOL(bitmap_scnprintf);
317 317
318/** 318/**
319 * bitmap_scnprintf_len - return buffer length needed to convert
320 * bitmap to an ASCII hex string.
321 * @len: number of bits to be converted
322 */
323int bitmap_scnprintf_len(unsigned int len)
324{
325 /* we need 9 chars per word for 32 bit words (8 hexdigits + sep/null) */
326 int bitslen = ALIGN(len, CHUNKSZ);
327 int wordlen = CHUNKSZ / 4;
328 int buflen = (bitslen / wordlen) * (wordlen + 1) * sizeof(char);
329
330 return buflen;
331}
332EXPORT_SYMBOL(bitmap_scnprintf_len);
333
334/**
335 * __bitmap_parse - convert an ASCII hex string into a bitmap. 319 * __bitmap_parse - convert an ASCII hex string into a bitmap.
336 * @buf: pointer to buffer containing string. 320 * @buf: pointer to buffer containing string.
337 * @buflen: buffer size in bytes. If string is smaller than this 321 * @buflen: buffer size in bytes. If string is smaller than this
diff --git a/lib/bitrev.c b/lib/bitrev.c
index 989aff73f881..3956203456d4 100644
--- a/lib/bitrev.c
+++ b/lib/bitrev.c
@@ -42,10 +42,11 @@ const u8 byte_rev_table[256] = {
42}; 42};
43EXPORT_SYMBOL_GPL(byte_rev_table); 43EXPORT_SYMBOL_GPL(byte_rev_table);
44 44
45static __always_inline u16 bitrev16(u16 x) 45u16 bitrev16(u16 x)
46{ 46{
47 return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8); 47 return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8);
48} 48}
49EXPORT_SYMBOL(bitrev16);
49 50
50/** 51/**
51 * bitrev32 - reverse the order of bits in a u32 value 52 * bitrev32 - reverse the order of bits in a u32 value
diff --git a/lib/bug.c b/lib/bug.c
index 530f38f55787..bfeafd60ee9f 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -37,6 +37,7 @@
37 */ 37 */
38#include <linux/list.h> 38#include <linux/list.h>
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/kernel.h>
40#include <linux/bug.h> 41#include <linux/bug.h>
41#include <linux/sched.h> 42#include <linux/sched.h>
42 43
@@ -149,6 +150,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
149 (void *)bugaddr); 150 (void *)bugaddr);
150 151
151 show_regs(regs); 152 show_regs(regs);
153 add_taint(TAINT_WARN);
152 return BUG_TRAP_TYPE_WARN; 154 return BUG_TRAP_TYPE_WARN;
153 } 155 }
154 156
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index a76a5e122ae1..85b18d79be89 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -68,6 +68,7 @@ static int fill_pool(void)
68{ 68{
69 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 69 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
70 struct debug_obj *new; 70 struct debug_obj *new;
71 unsigned long flags;
71 72
72 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) 73 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
73 return obj_pool_free; 74 return obj_pool_free;
@@ -81,10 +82,10 @@ static int fill_pool(void)
81 if (!new) 82 if (!new)
82 return obj_pool_free; 83 return obj_pool_free;
83 84
84 spin_lock(&pool_lock); 85 spin_lock_irqsave(&pool_lock, flags);
85 hlist_add_head(&new->node, &obj_pool); 86 hlist_add_head(&new->node, &obj_pool);
86 obj_pool_free++; 87 obj_pool_free++;
87 spin_unlock(&pool_lock); 88 spin_unlock_irqrestore(&pool_lock, flags);
88 } 89 }
89 return obj_pool_free; 90 return obj_pool_free;
90} 91}
@@ -110,16 +111,13 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
110} 111}
111 112
112/* 113/*
113 * Allocate a new object. If the pool is empty and no refill possible, 114 * Allocate a new object. If the pool is empty, switch off the debugger.
114 * switch off the debugger.
115 */ 115 */
116static struct debug_obj * 116static struct debug_obj *
117alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) 117alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
118{ 118{
119 struct debug_obj *obj = NULL; 119 struct debug_obj *obj = NULL;
120 int retry = 0;
121 120
122repeat:
123 spin_lock(&pool_lock); 121 spin_lock(&pool_lock);
124 if (obj_pool.first) { 122 if (obj_pool.first) {
125 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 123 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
@@ -141,9 +139,6 @@ repeat:
141 } 139 }
142 spin_unlock(&pool_lock); 140 spin_unlock(&pool_lock);
143 141
144 if (fill_pool() && !obj && !retry++)
145 goto repeat;
146
147 return obj; 142 return obj;
148} 143}
149 144
@@ -261,6 +256,8 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
261 struct debug_obj *obj; 256 struct debug_obj *obj;
262 unsigned long flags; 257 unsigned long flags;
263 258
259 fill_pool();
260
264 db = get_bucket((unsigned long) addr); 261 db = get_bucket((unsigned long) addr);
265 262
266 spin_lock_irqsave(&db->lock, flags); 263 spin_lock_irqsave(&db->lock, flags);
diff --git a/lib/devres.c b/lib/devres.c
index 26c87c49d776..72c8909006da 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -2,7 +2,7 @@
2#include <linux/io.h> 2#include <linux/io.h>
3#include <linux/module.h> 3#include <linux/module.h>
4 4
5static void devm_ioremap_release(struct device *dev, void *res) 5void devm_ioremap_release(struct device *dev, void *res)
6{ 6{
7 iounmap(*(void __iomem **)res); 7 iounmap(*(void __iomem **)res);
8} 8}
diff --git a/lib/div64.c b/lib/div64.c
index bb5bd0c0f030..a111eb8de9cf 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -98,3 +98,13 @@ EXPORT_SYMBOL(div64_u64);
98#endif 98#endif
99 99
100#endif /* BITS_PER_LONG == 32 */ 100#endif /* BITS_PER_LONG == 32 */
101
102/*
103 * Iterative div/mod for use when dividend is not expected to be much
104 * bigger than divisor.
105 */
106u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
107{
108 return __iter_div_u64_rem(dividend, divisor, remainder);
109}
110EXPORT_SYMBOL(iter_div_u64_rem);
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 343546550dc9..f07c0db81d26 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -12,6 +12,9 @@
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/module.h> 13#include <linux/module.h>
14 14
15const char hex_asc[] = "0123456789abcdef";
16EXPORT_SYMBOL(hex_asc);
17
15/** 18/**
16 * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory 19 * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
17 * @buf: data blob to dump 20 * @buf: data blob to dump
@@ -93,8 +96,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
93 for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen; 96 for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen;
94 j++) { 97 j++) {
95 ch = ptr[j]; 98 ch = ptr[j];
96 linebuf[lx++] = hex_asc(ch >> 4); 99 linebuf[lx++] = hex_asc_hi(ch);
97 linebuf[lx++] = hex_asc(ch & 0x0f); 100 linebuf[lx++] = hex_asc_lo(ch);
98 linebuf[lx++] = ' '; 101 linebuf[lx++] = ' ';
99 } 102 }
100 ascii_column = 3 * rowsize + 2; 103 ascii_column = 3 * rowsize + 2;
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index cd3e82530b03..01a3c22c1b5a 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -11,79 +11,121 @@
11#include <linux/semaphore.h> 11#include <linux/semaphore.h>
12 12
13/* 13/*
14 * The 'big kernel semaphore' 14 * The 'big kernel lock'
15 * 15 *
16 * This mutex is taken and released recursively by lock_kernel() 16 * This spinlock is taken and released recursively by lock_kernel()
17 * and unlock_kernel(). It is transparently dropped and reacquired 17 * and unlock_kernel(). It is transparently dropped and reacquired
18 * over schedule(). It is used to protect legacy code that hasn't 18 * over schedule(). It is used to protect legacy code that hasn't
19 * been migrated to a proper locking design yet. 19 * been migrated to a proper locking design yet.
20 * 20 *
21 * Note: code locked by this semaphore will only be serialized against
22 * other code using the same locking facility. The code guarantees that
23 * the task remains on the same CPU.
24 *
25 * Don't use in new code. 21 * Don't use in new code.
26 */ 22 */
27static DECLARE_MUTEX(kernel_sem); 23static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
24
28 25
29/* 26/*
30 * Re-acquire the kernel semaphore. 27 * Acquire/release the underlying lock from the scheduler.
31 * 28 *
32 * This function is called with preemption off. 29 * This is called with preemption disabled, and should
30 * return an error value if it cannot get the lock and
31 * TIF_NEED_RESCHED gets set.
33 * 32 *
34 * We are executing in schedule() so the code must be extremely careful 33 * If it successfully gets the lock, it should increment
35 * about recursion, both due to the down() and due to the enabling of 34 * the preemption count like any spinlock does.
36 * preemption. schedule() will re-check the preemption flag after 35 *
37 * reacquiring the semaphore. 36 * (This works on UP too - _raw_spin_trylock will never
37 * return false in that case)
38 */ 38 */
39int __lockfunc __reacquire_kernel_lock(void) 39int __lockfunc __reacquire_kernel_lock(void)
40{ 40{
41 struct task_struct *task = current; 41 while (!_raw_spin_trylock(&kernel_flag)) {
42 int saved_lock_depth = task->lock_depth; 42 if (test_thread_flag(TIF_NEED_RESCHED))
43 43 return -EAGAIN;
44 BUG_ON(saved_lock_depth < 0); 44 cpu_relax();
45 45 }
46 task->lock_depth = -1;
47 preempt_enable_no_resched();
48
49 down(&kernel_sem);
50
51 preempt_disable(); 46 preempt_disable();
52 task->lock_depth = saved_lock_depth;
53
54 return 0; 47 return 0;
55} 48}
56 49
57void __lockfunc __release_kernel_lock(void) 50void __lockfunc __release_kernel_lock(void)
58{ 51{
59 up(&kernel_sem); 52 _raw_spin_unlock(&kernel_flag);
53 preempt_enable_no_resched();
60} 54}
61 55
62/* 56/*
63 * Getting the big kernel semaphore. 57 * These are the BKL spinlocks - we try to be polite about preemption.
58 * If SMP is not on (ie UP preemption), this all goes away because the
59 * _raw_spin_trylock() will always succeed.
64 */ 60 */
65void __lockfunc lock_kernel(void) 61#ifdef CONFIG_PREEMPT
62static inline void __lock_kernel(void)
66{ 63{
67 struct task_struct *task = current; 64 preempt_disable();
68 int depth = task->lock_depth + 1; 65 if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
66 /*
67 * If preemption was disabled even before this
68 * was called, there's nothing we can be polite
69 * about - just spin.
70 */
71 if (preempt_count() > 1) {
72 _raw_spin_lock(&kernel_flag);
73 return;
74 }
69 75
70 if (likely(!depth))
71 /* 76 /*
72 * No recursion worries - we set up lock_depth _after_ 77 * Otherwise, let's wait for the kernel lock
78 * with preemption enabled..
73 */ 79 */
74 down(&kernel_sem); 80 do {
81 preempt_enable();
82 while (spin_is_locked(&kernel_flag))
83 cpu_relax();
84 preempt_disable();
85 } while (!_raw_spin_trylock(&kernel_flag));
86 }
87}
75 88
76 task->lock_depth = depth; 89#else
90
91/*
92 * Non-preemption case - just get the spinlock
93 */
94static inline void __lock_kernel(void)
95{
96 _raw_spin_lock(&kernel_flag);
77} 97}
98#endif
78 99
79void __lockfunc unlock_kernel(void) 100static inline void __unlock_kernel(void)
80{ 101{
81 struct task_struct *task = current; 102 /*
103 * the BKL is not covered by lockdep, so we open-code the
104 * unlocking sequence (and thus avoid the dep-chain ops):
105 */
106 _raw_spin_unlock(&kernel_flag);
107 preempt_enable();
108}
82 109
83 BUG_ON(task->lock_depth < 0); 110/*
111 * Getting the big kernel lock.
112 *
113 * This cannot happen asynchronously, so we only need to
114 * worry about other CPU's.
115 */
116void __lockfunc lock_kernel(void)
117{
118 int depth = current->lock_depth+1;
119 if (likely(!depth))
120 __lock_kernel();
121 current->lock_depth = depth;
122}
84 123
85 if (likely(--task->lock_depth < 0)) 124void __lockfunc unlock_kernel(void)
86 up(&kernel_sem); 125{
126 BUG_ON(current->lock_depth < 0);
127 if (likely(--current->lock_depth < 0))
128 __unlock_kernel();
87} 129}
88 130
89EXPORT_SYMBOL(lock_kernel); 131EXPORT_SYMBOL(lock_kernel);
diff --git a/lib/lmb.c b/lib/lmb.c
index 83287d3869a3..5d7b9286503e 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -19,31 +19,43 @@
19 19
20struct lmb lmb; 20struct lmb lmb;
21 21
22static int lmb_debug;
23
24static int __init early_lmb(char *p)
25{
26 if (p && strstr(p, "debug"))
27 lmb_debug = 1;
28 return 0;
29}
30early_param("lmb", early_lmb);
31
22void lmb_dump_all(void) 32void lmb_dump_all(void)
23{ 33{
24#ifdef DEBUG
25 unsigned long i; 34 unsigned long i;
26 35
27 pr_debug("lmb_dump_all:\n"); 36 if (!lmb_debug)
28 pr_debug(" memory.cnt = 0x%lx\n", lmb.memory.cnt); 37 return;
29 pr_debug(" memory.size = 0x%llx\n", 38
39 pr_info("lmb_dump_all:\n");
40 pr_info(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
41 pr_info(" memory.size = 0x%llx\n",
30 (unsigned long long)lmb.memory.size); 42 (unsigned long long)lmb.memory.size);
31 for (i=0; i < lmb.memory.cnt ;i++) { 43 for (i=0; i < lmb.memory.cnt ;i++) {
32 pr_debug(" memory.region[0x%x].base = 0x%llx\n", 44 pr_info(" memory.region[0x%lx].base = 0x%llx\n",
33 i, (unsigned long long)lmb.memory.region[i].base); 45 i, (unsigned long long)lmb.memory.region[i].base);
34 pr_debug(" .size = 0x%llx\n", 46 pr_info(" .size = 0x%llx\n",
35 (unsigned long long)lmb.memory.region[i].size); 47 (unsigned long long)lmb.memory.region[i].size);
36 } 48 }
37 49
38 pr_debug(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt); 50 pr_info(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
39 pr_debug(" reserved.size = 0x%lx\n", lmb.reserved.size); 51 pr_info(" reserved.size = 0x%llx\n",
52 (unsigned long long)lmb.memory.size);
40 for (i=0; i < lmb.reserved.cnt ;i++) { 53 for (i=0; i < lmb.reserved.cnt ;i++) {
41 pr_debug(" reserved.region[0x%x].base = 0x%llx\n", 54 pr_info(" reserved.region[0x%lx].base = 0x%llx\n",
42 i, (unsigned long long)lmb.reserved.region[i].base); 55 i, (unsigned long long)lmb.reserved.region[i].base);
43 pr_debug(" .size = 0x%llx\n", 56 pr_info(" .size = 0x%llx\n",
44 (unsigned long long)lmb.reserved.region[i].size); 57 (unsigned long long)lmb.reserved.region[i].size);
45 } 58 }
46#endif /* DEBUG */
47} 59}
48 60
49static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2, 61static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
@@ -286,8 +298,7 @@ static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
286 j = lmb_overlaps_region(&lmb.reserved, base, size); 298 j = lmb_overlaps_region(&lmb.reserved, base, size);
287 if (j < 0) { 299 if (j < 0) {
288 /* this area isn't reserved, take it */ 300 /* this area isn't reserved, take it */
289 if (lmb_add_region(&lmb.reserved, base, 301 if (lmb_add_region(&lmb.reserved, base, size) < 0)
290 lmb_align_up(size, align)) < 0)
291 base = ~(u64)0; 302 base = ~(u64)0;
292 return base; 303 return base;
293 } 304 }
@@ -333,6 +344,10 @@ u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
333 struct lmb_region *mem = &lmb.memory; 344 struct lmb_region *mem = &lmb.memory;
334 int i; 345 int i;
335 346
347 BUG_ON(0 == size);
348
349 size = lmb_align_up(size, align);
350
336 for (i = 0; i < mem->cnt; i++) { 351 for (i = 0; i < mem->cnt; i++) {
337 u64 ret = lmb_alloc_nid_region(&mem->region[i], 352 u64 ret = lmb_alloc_nid_region(&mem->region[i],
338 nid_range, 353 nid_range,
@@ -370,6 +385,8 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
370 385
371 BUG_ON(0 == size); 386 BUG_ON(0 == size);
372 387
388 size = lmb_align_up(size, align);
389
373 /* On some platforms, make sure we allocate lowmem */ 390 /* On some platforms, make sure we allocate lowmem */
374 /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */ 391 /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
375 if (max_addr == LMB_ALLOC_ANYWHERE) 392 if (max_addr == LMB_ALLOC_ANYWHERE)
@@ -393,8 +410,7 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
393 j = lmb_overlaps_region(&lmb.reserved, base, size); 410 j = lmb_overlaps_region(&lmb.reserved, base, size);
394 if (j < 0) { 411 if (j < 0) {
395 /* this area isn't reserved, take it */ 412 /* this area isn't reserved, take it */
396 if (lmb_add_region(&lmb.reserved, base, 413 if (lmb_add_region(&lmb.reserved, base, size) < 0)
397 lmb_align_up(size, align)) < 0)
398 return 0; 414 return 0;
399 return base; 415 return base;
400 } 416 }
diff --git a/lib/parser.c b/lib/parser.c
index 703c8c13b346..4f0cbc03e0e8 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -182,18 +182,25 @@ int match_hex(substring_t *s, int *result)
182} 182}
183 183
184/** 184/**
185 * match_strcpy: - copies the characters from a substring_t to a string 185 * match_strlcpy: - Copy the characters from a substring_t to a sized buffer
186 * @to: string to copy characters to. 186 * @dest: where to copy to
187 * @s: &substring_t to copy 187 * @src: &substring_t to copy
188 * @size: size of destination buffer
188 * 189 *
189 * Description: Copies the set of characters represented by the given 190 * Description: Copy the characters in &substring_t @src to the
190 * &substring_t @s to the c-style string @to. Caller guarantees that @to is 191 * c-style string @dest. Copy no more than @size - 1 characters, plus
191 * large enough to hold the characters of @s. 192 * the terminating NUL. Return length of @src.
192 */ 193 */
193void match_strcpy(char *to, const substring_t *s) 194size_t match_strlcpy(char *dest, const substring_t *src, size_t size)
194{ 195{
195 memcpy(to, s->from, s->to - s->from); 196 size_t ret = src->to - src->from;
196 to[s->to - s->from] = '\0'; 197
198 if (size) {
199 size_t len = ret >= size ? size - 1 : ret;
200 memcpy(dest, src->from, len);
201 dest[len] = '\0';
202 }
203 return ret;
197} 204}
198 205
199/** 206/**
@@ -206,9 +213,10 @@ void match_strcpy(char *to, const substring_t *s)
206 */ 213 */
207char *match_strdup(const substring_t *s) 214char *match_strdup(const substring_t *s)
208{ 215{
209 char *p = kmalloc(s->to - s->from + 1, GFP_KERNEL); 216 size_t sz = s->to - s->from + 1;
217 char *p = kmalloc(sz, GFP_KERNEL);
210 if (p) 218 if (p)
211 match_strcpy(p, s); 219 match_strlcpy(p, s, sz);
212 return p; 220 return p;
213} 221}
214 222
@@ -216,5 +224,5 @@ EXPORT_SYMBOL(match_token);
216EXPORT_SYMBOL(match_int); 224EXPORT_SYMBOL(match_int);
217EXPORT_SYMBOL(match_octal); 225EXPORT_SYMBOL(match_octal);
218EXPORT_SYMBOL(match_hex); 226EXPORT_SYMBOL(match_hex);
219EXPORT_SYMBOL(match_strcpy); 227EXPORT_SYMBOL(match_strlcpy);
220EXPORT_SYMBOL(match_strdup); 228EXPORT_SYMBOL(match_strdup);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index bd521716ab1a..56ec21a7f73d 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (C) 2001 Momchil Velikov 2 * Copyright (C) 2001 Momchil Velikov
3 * Portions Copyright (C) 2001 Christoph Hellwig 3 * Portions Copyright (C) 2001 Christoph Hellwig
4 * Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com> 4 * Copyright (C) 2005 SGI, Christoph Lameter
5 * Copyright (C) 2006 Nick Piggin 5 * Copyright (C) 2006 Nick Piggin
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
@@ -88,6 +88,57 @@ static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
88 return root->gfp_mask & __GFP_BITS_MASK; 88 return root->gfp_mask & __GFP_BITS_MASK;
89} 89}
90 90
91static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
92 int offset)
93{
94 __set_bit(offset, node->tags[tag]);
95}
96
97static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
98 int offset)
99{
100 __clear_bit(offset, node->tags[tag]);
101}
102
103static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
104 int offset)
105{
106 return test_bit(offset, node->tags[tag]);
107}
108
109static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
110{
111 root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT));
112}
113
114static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag)
115{
116 root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT));
117}
118
119static inline void root_tag_clear_all(struct radix_tree_root *root)
120{
121 root->gfp_mask &= __GFP_BITS_MASK;
122}
123
124static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
125{
126 return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
127}
128
129/*
130 * Returns 1 if any slot in the node has this tag set.
131 * Otherwise returns 0.
132 */
133static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
134{
135 int idx;
136 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
137 if (node->tags[tag][idx])
138 return 1;
139 }
140 return 0;
141}
91/* 142/*
92 * This assumes that the caller has performed appropriate preallocation, and 143 * This assumes that the caller has performed appropriate preallocation, and
93 * that the caller has pinned this thread of control to the current CPU. 144 * that the caller has pinned this thread of control to the current CPU.
@@ -124,6 +175,17 @@ static void radix_tree_node_rcu_free(struct rcu_head *head)
124{ 175{
125 struct radix_tree_node *node = 176 struct radix_tree_node *node =
126 container_of(head, struct radix_tree_node, rcu_head); 177 container_of(head, struct radix_tree_node, rcu_head);
178
179 /*
180 * must only free zeroed nodes into the slab. radix_tree_shrink
181 * can leave us with a non-NULL entry in the first slot, so clear
182 * that here to make sure.
183 */
184 tag_clear(node, 0, 0);
185 tag_clear(node, 1, 0);
186 node->slots[0] = NULL;
187 node->count = 0;
188
127 kmem_cache_free(radix_tree_node_cachep, node); 189 kmem_cache_free(radix_tree_node_cachep, node);
128} 190}
129 191
@@ -165,59 +227,6 @@ out:
165} 227}
166EXPORT_SYMBOL(radix_tree_preload); 228EXPORT_SYMBOL(radix_tree_preload);
167 229
168static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
169 int offset)
170{
171 __set_bit(offset, node->tags[tag]);
172}
173
174static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
175 int offset)
176{
177 __clear_bit(offset, node->tags[tag]);
178}
179
180static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
181 int offset)
182{
183 return test_bit(offset, node->tags[tag]);
184}
185
186static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
187{
188 root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT));
189}
190
191
192static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag)
193{
194 root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT));
195}
196
197static inline void root_tag_clear_all(struct radix_tree_root *root)
198{
199 root->gfp_mask &= __GFP_BITS_MASK;
200}
201
202static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
203{
204 return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
205}
206
207/*
208 * Returns 1 if any slot in the node has this tag set.
209 * Otherwise returns 0.
210 */
211static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
212{
213 int idx;
214 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
215 if (node->tags[tag][idx])
216 return 1;
217 }
218 return 0;
219}
220
221/* 230/*
222 * Return the maximum key which can be store into a 231 * Return the maximum key which can be store into a
223 * radix tree with height HEIGHT. 232 * radix tree with height HEIGHT.
@@ -930,11 +939,6 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
930 newptr = radix_tree_ptr_to_indirect(newptr); 939 newptr = radix_tree_ptr_to_indirect(newptr);
931 root->rnode = newptr; 940 root->rnode = newptr;
932 root->height--; 941 root->height--;
933 /* must only free zeroed nodes into the slab */
934 tag_clear(to_free, 0, 0);
935 tag_clear(to_free, 1, 0);
936 to_free->slots[0] = NULL;
937 to_free->count = 0;
938 radix_tree_node_free(to_free); 942 radix_tree_node_free(to_free);
939 } 943 }
940} 944}
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index d90822c378a4..4a7fce72898e 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -63,7 +63,7 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
63 struct ts_bm *bm = ts_config_priv(conf); 63 struct ts_bm *bm = ts_config_priv(conf);
64 unsigned int i, text_len, consumed = state->offset; 64 unsigned int i, text_len, consumed = state->offset;
65 const u8 *text; 65 const u8 *text;
66 int shift = bm->patlen, bs; 66 int shift = bm->patlen - 1, bs;
67 67
68 for (;;) { 68 for (;;) {
69 text_len = conf->get_next_block(consumed, &text, conf, state); 69 text_len = conf->get_next_block(consumed, &text, conf, state);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 6021757a4496..1dc2d1d18fa8 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -22,6 +22,8 @@
22#include <linux/string.h> 22#include <linux/string.h>
23#include <linux/ctype.h> 23#include <linux/ctype.h>
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/kallsyms.h>
26#include <linux/uaccess.h>
25 27
26#include <asm/page.h> /* for PAGE_SIZE */ 28#include <asm/page.h> /* for PAGE_SIZE */
27#include <asm/div64.h> 29#include <asm/div64.h>
@@ -482,6 +484,89 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
482 return buf; 484 return buf;
483} 485}
484 486
487static char *string(char *buf, char *end, char *s, int field_width, int precision, int flags)
488{
489 int len, i;
490
491 if ((unsigned long)s < PAGE_SIZE)
492 s = "<NULL>";
493
494 len = strnlen(s, precision);
495
496 if (!(flags & LEFT)) {
497 while (len < field_width--) {
498 if (buf < end)
499 *buf = ' ';
500 ++buf;
501 }
502 }
503 for (i = 0; i < len; ++i) {
504 if (buf < end)
505 *buf = *s;
506 ++buf; ++s;
507 }
508 while (len < field_width--) {
509 if (buf < end)
510 *buf = ' ';
511 ++buf;
512 }
513 return buf;
514}
515
516static inline void *dereference_function_descriptor(void *ptr)
517{
518#if defined(CONFIG_IA64) || defined(CONFIG_PPC64)
519 void *p;
520 if (!probe_kernel_address(ptr, p))
521 ptr = p;
522#endif
523 return ptr;
524}
525
526static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags)
527{
528 unsigned long value = (unsigned long) ptr;
529#ifdef CONFIG_KALLSYMS
530 char sym[KSYM_SYMBOL_LEN];
531 sprint_symbol(sym, value);
532 return string(buf, end, sym, field_width, precision, flags);
533#else
534 field_width = 2*sizeof(void *);
535 flags |= SPECIAL | SMALL | ZEROPAD;
536 return number(buf, end, value, 16, field_width, precision, flags);
537#endif
538}
539
540/*
541 * Show a '%p' thing. A kernel extension is that the '%p' is followed
542 * by an extra set of alphanumeric characters that are extended format
543 * specifiers.
544 *
545 * Right now we just handle 'F' (for symbolic Function descriptor pointers)
546 * and 'S' (for Symbolic direct pointers), but this can easily be
547 * extended in the future (network address types etc).
548 *
549 * The difference between 'S' and 'F' is that on ia64 and ppc64 function
550 * pointers are really function descriptors, which contain a pointer the
551 * real address.
552 */
553static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags)
554{
555 switch (*fmt) {
556 case 'F':
557 ptr = dereference_function_descriptor(ptr);
558 /* Fallthrough */
559 case 'S':
560 return symbol_string(buf, end, ptr, field_width, precision, flags);
561 }
562 flags |= SMALL;
563 if (field_width == -1) {
564 field_width = 2*sizeof(void *);
565 flags |= ZEROPAD;
566 }
567 return number(buf, end, (unsigned long) ptr, 16, field_width, precision, flags);
568}
569
485/** 570/**
486 * vsnprintf - Format a string and place it in a buffer 571 * vsnprintf - Format a string and place it in a buffer
487 * @buf: The buffer to place the result into 572 * @buf: The buffer to place the result into
@@ -502,11 +587,9 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
502 */ 587 */
503int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) 588int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
504{ 589{
505 int len;
506 unsigned long long num; 590 unsigned long long num;
507 int i, base; 591 int base;
508 char *str, *end, c; 592 char *str, *end, c;
509 const char *s;
510 593
511 int flags; /* flags to number() */ 594 int flags; /* flags to number() */
512 595
@@ -622,43 +705,18 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
622 continue; 705 continue;
623 706
624 case 's': 707 case 's':
625 s = va_arg(args, char *); 708 str = string(str, end, va_arg(args, char *), field_width, precision, flags);
626 if ((unsigned long)s < PAGE_SIZE)
627 s = "<NULL>";
628
629 len = strnlen(s, precision);
630
631 if (!(flags & LEFT)) {
632 while (len < field_width--) {
633 if (str < end)
634 *str = ' ';
635 ++str;
636 }
637 }
638 for (i = 0; i < len; ++i) {
639 if (str < end)
640 *str = *s;
641 ++str; ++s;
642 }
643 while (len < field_width--) {
644 if (str < end)
645 *str = ' ';
646 ++str;
647 }
648 continue; 709 continue;
649 710
650 case 'p': 711 case 'p':
651 flags |= SMALL; 712 str = pointer(fmt+1, str, end,
652 if (field_width == -1) { 713 va_arg(args, void *),
653 field_width = 2*sizeof(void *); 714 field_width, precision, flags);
654 flags |= ZEROPAD; 715 /* Skip all alphanumeric pointer suffixes */
655 } 716 while (isalnum(fmt[1]))
656 str = number(str, end, 717 fmt++;
657 (unsigned long) va_arg(args, void *),
658 16, field_width, precision, flags);
659 continue; 718 continue;
660 719
661
662 case 'n': 720 case 'n':
663 /* FIXME: 721 /* FIXME:
664 * What does C99 say about the overflow case here? */ 722 * What does C99 say about the overflow case here? */