aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-05-15 03:34:44 -0400
committerDavid S. Miller <davem@davemloft.net>2008-05-15 03:34:44 -0400
commit63fe46da9c380b3f2bbdf3765044649517cc717c (patch)
tree9478c1aca1d692b408955aea20c9cd9a37e589c0 /lib
parent99dd1a2b8347ac2ae802300b7862f6f7bcf17139 (diff)
parent066b2118976e6e7cc50eed39e2747c75343a23c4 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/wireless/iwlwifi/iwl-4965-rs.c drivers/net/wireless/rt2x00/rt61pci.c
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.kgdb16
-rw-r--r--lib/bitmap.c16
-rw-r--r--lib/devres.c2
-rw-r--r--lib/hexdump.c7
-rw-r--r--lib/kernel_lock.c120
-rw-r--r--lib/lmb.c45
-rw-r--r--lib/parser.c32
7 files changed, 146 insertions, 92 deletions
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index f2e01ac5ab09..a5d4b1dac2a5 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -1,4 +1,10 @@
1 1
2config HAVE_ARCH_KGDB_SHADOW_INFO
3 bool
4
5config HAVE_ARCH_KGDB
6 bool
7
2menuconfig KGDB 8menuconfig KGDB
3 bool "KGDB: kernel debugging with remote gdb" 9 bool "KGDB: kernel debugging with remote gdb"
4 select FRAME_POINTER 10 select FRAME_POINTER
@@ -10,15 +16,10 @@ menuconfig KGDB
10 at http://kgdb.sourceforge.net as well as in DocBook form 16 at http://kgdb.sourceforge.net as well as in DocBook form
11 in Documentation/DocBook/. If unsure, say N. 17 in Documentation/DocBook/. If unsure, say N.
12 18
13config HAVE_ARCH_KGDB_SHADOW_INFO 19if KGDB
14 bool
15
16config HAVE_ARCH_KGDB
17 bool
18 20
19config KGDB_SERIAL_CONSOLE 21config KGDB_SERIAL_CONSOLE
20 tristate "KGDB: use kgdb over the serial console" 22 tristate "KGDB: use kgdb over the serial console"
21 depends on KGDB
22 select CONSOLE_POLL 23 select CONSOLE_POLL
23 select MAGIC_SYSRQ 24 select MAGIC_SYSRQ
24 default y 25 default y
@@ -28,7 +29,6 @@ config KGDB_SERIAL_CONSOLE
28 29
29config KGDB_TESTS 30config KGDB_TESTS
30 bool "KGDB: internal test suite" 31 bool "KGDB: internal test suite"
31 depends on KGDB
32 default n 32 default n
33 help 33 help
34 This is a kgdb I/O module specifically designed to test 34 This is a kgdb I/O module specifically designed to test
@@ -56,3 +56,5 @@ config KGDB_TESTS_BOOT_STRING
56 boot. See the drivers/misc/kgdbts.c for detailed 56 boot. See the drivers/misc/kgdbts.c for detailed
57 information about other strings you could use beyond the 57 information about other strings you could use beyond the
58 default of V1F100. 58 default of V1F100.
59
60endif # KGDB
diff --git a/lib/bitmap.c b/lib/bitmap.c
index c4cb48f77f0c..482df94ea21e 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -316,22 +316,6 @@ int bitmap_scnprintf(char *buf, unsigned int buflen,
316EXPORT_SYMBOL(bitmap_scnprintf); 316EXPORT_SYMBOL(bitmap_scnprintf);
317 317
318/** 318/**
319 * bitmap_scnprintf_len - return buffer length needed to convert
320 * bitmap to an ASCII hex string.
321 * @len: number of bits to be converted
322 */
323int bitmap_scnprintf_len(unsigned int len)
324{
325 /* we need 9 chars per word for 32 bit words (8 hexdigits + sep/null) */
326 int bitslen = ALIGN(len, CHUNKSZ);
327 int wordlen = CHUNKSZ / 4;
328 int buflen = (bitslen / wordlen) * (wordlen + 1) * sizeof(char);
329
330 return buflen;
331}
332EXPORT_SYMBOL(bitmap_scnprintf_len);
333
334/**
335 * __bitmap_parse - convert an ASCII hex string into a bitmap. 319 * __bitmap_parse - convert an ASCII hex string into a bitmap.
336 * @buf: pointer to buffer containing string. 320 * @buf: pointer to buffer containing string.
337 * @buflen: buffer size in bytes. If string is smaller than this 321 * @buflen: buffer size in bytes. If string is smaller than this
diff --git a/lib/devres.c b/lib/devres.c
index 26c87c49d776..72c8909006da 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -2,7 +2,7 @@
2#include <linux/io.h> 2#include <linux/io.h>
3#include <linux/module.h> 3#include <linux/module.h>
4 4
5static void devm_ioremap_release(struct device *dev, void *res) 5void devm_ioremap_release(struct device *dev, void *res)
6{ 6{
7 iounmap(*(void __iomem **)res); 7 iounmap(*(void __iomem **)res);
8} 8}
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 343546550dc9..f07c0db81d26 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -12,6 +12,9 @@
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/module.h> 13#include <linux/module.h>
14 14
15const char hex_asc[] = "0123456789abcdef";
16EXPORT_SYMBOL(hex_asc);
17
15/** 18/**
16 * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory 19 * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
17 * @buf: data blob to dump 20 * @buf: data blob to dump
@@ -93,8 +96,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
93 for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen; 96 for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen;
94 j++) { 97 j++) {
95 ch = ptr[j]; 98 ch = ptr[j];
96 linebuf[lx++] = hex_asc(ch >> 4); 99 linebuf[lx++] = hex_asc_hi(ch);
97 linebuf[lx++] = hex_asc(ch & 0x0f); 100 linebuf[lx++] = hex_asc_lo(ch);
98 linebuf[lx++] = ' '; 101 linebuf[lx++] = ' ';
99 } 102 }
100 ascii_column = 3 * rowsize + 2; 103 ascii_column = 3 * rowsize + 2;
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index cd3e82530b03..01a3c22c1b5a 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -11,79 +11,121 @@
11#include <linux/semaphore.h> 11#include <linux/semaphore.h>
12 12
13/* 13/*
14 * The 'big kernel semaphore' 14 * The 'big kernel lock'
15 * 15 *
16 * This mutex is taken and released recursively by lock_kernel() 16 * This spinlock is taken and released recursively by lock_kernel()
17 * and unlock_kernel(). It is transparently dropped and reacquired 17 * and unlock_kernel(). It is transparently dropped and reacquired
18 * over schedule(). It is used to protect legacy code that hasn't 18 * over schedule(). It is used to protect legacy code that hasn't
19 * been migrated to a proper locking design yet. 19 * been migrated to a proper locking design yet.
20 * 20 *
21 * Note: code locked by this semaphore will only be serialized against
22 * other code using the same locking facility. The code guarantees that
23 * the task remains on the same CPU.
24 *
25 * Don't use in new code. 21 * Don't use in new code.
26 */ 22 */
27static DECLARE_MUTEX(kernel_sem); 23static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
24
28 25
29/* 26/*
30 * Re-acquire the kernel semaphore. 27 * Acquire/release the underlying lock from the scheduler.
31 * 28 *
32 * This function is called with preemption off. 29 * This is called with preemption disabled, and should
30 * return an error value if it cannot get the lock and
31 * TIF_NEED_RESCHED gets set.
33 * 32 *
34 * We are executing in schedule() so the code must be extremely careful 33 * If it successfully gets the lock, it should increment
35 * about recursion, both due to the down() and due to the enabling of 34 * the preemption count like any spinlock does.
36 * preemption. schedule() will re-check the preemption flag after 35 *
37 * reacquiring the semaphore. 36 * (This works on UP too - _raw_spin_trylock will never
37 * return false in that case)
38 */ 38 */
39int __lockfunc __reacquire_kernel_lock(void) 39int __lockfunc __reacquire_kernel_lock(void)
40{ 40{
41 struct task_struct *task = current; 41 while (!_raw_spin_trylock(&kernel_flag)) {
42 int saved_lock_depth = task->lock_depth; 42 if (test_thread_flag(TIF_NEED_RESCHED))
43 43 return -EAGAIN;
44 BUG_ON(saved_lock_depth < 0); 44 cpu_relax();
45 45 }
46 task->lock_depth = -1;
47 preempt_enable_no_resched();
48
49 down(&kernel_sem);
50
51 preempt_disable(); 46 preempt_disable();
52 task->lock_depth = saved_lock_depth;
53
54 return 0; 47 return 0;
55} 48}
56 49
57void __lockfunc __release_kernel_lock(void) 50void __lockfunc __release_kernel_lock(void)
58{ 51{
59 up(&kernel_sem); 52 _raw_spin_unlock(&kernel_flag);
53 preempt_enable_no_resched();
60} 54}
61 55
62/* 56/*
63 * Getting the big kernel semaphore. 57 * These are the BKL spinlocks - we try to be polite about preemption.
58 * If SMP is not on (ie UP preemption), this all goes away because the
59 * _raw_spin_trylock() will always succeed.
64 */ 60 */
65void __lockfunc lock_kernel(void) 61#ifdef CONFIG_PREEMPT
62static inline void __lock_kernel(void)
66{ 63{
67 struct task_struct *task = current; 64 preempt_disable();
68 int depth = task->lock_depth + 1; 65 if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
66 /*
67 * If preemption was disabled even before this
68 * was called, there's nothing we can be polite
69 * about - just spin.
70 */
71 if (preempt_count() > 1) {
72 _raw_spin_lock(&kernel_flag);
73 return;
74 }
69 75
70 if (likely(!depth))
71 /* 76 /*
72 * No recursion worries - we set up lock_depth _after_ 77 * Otherwise, let's wait for the kernel lock
78 * with preemption enabled..
73 */ 79 */
74 down(&kernel_sem); 80 do {
81 preempt_enable();
82 while (spin_is_locked(&kernel_flag))
83 cpu_relax();
84 preempt_disable();
85 } while (!_raw_spin_trylock(&kernel_flag));
86 }
87}
75 88
76 task->lock_depth = depth; 89#else
90
91/*
92 * Non-preemption case - just get the spinlock
93 */
94static inline void __lock_kernel(void)
95{
96 _raw_spin_lock(&kernel_flag);
77} 97}
98#endif
78 99
79void __lockfunc unlock_kernel(void) 100static inline void __unlock_kernel(void)
80{ 101{
81 struct task_struct *task = current; 102 /*
103 * the BKL is not covered by lockdep, so we open-code the
104 * unlocking sequence (and thus avoid the dep-chain ops):
105 */
106 _raw_spin_unlock(&kernel_flag);
107 preempt_enable();
108}
82 109
83 BUG_ON(task->lock_depth < 0); 110/*
111 * Getting the big kernel lock.
112 *
113 * This cannot happen asynchronously, so we only need to
114 * worry about other CPU's.
115 */
116void __lockfunc lock_kernel(void)
117{
118 int depth = current->lock_depth+1;
119 if (likely(!depth))
120 __lock_kernel();
121 current->lock_depth = depth;
122}
84 123
85 if (likely(--task->lock_depth < 0)) 124void __lockfunc unlock_kernel(void)
86 up(&kernel_sem); 125{
126 BUG_ON(current->lock_depth < 0);
127 if (likely(--current->lock_depth < 0))
128 __unlock_kernel();
87} 129}
88 130
89EXPORT_SYMBOL(lock_kernel); 131EXPORT_SYMBOL(lock_kernel);
diff --git a/lib/lmb.c b/lib/lmb.c
index 83287d3869a3..867f7b5a8231 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -19,31 +19,42 @@
19 19
20struct lmb lmb; 20struct lmb lmb;
21 21
22static int lmb_debug;
23
24static int __init early_lmb(char *p)
25{
26 if (p && strstr(p, "debug"))
27 lmb_debug = 1;
28 return 0;
29}
30early_param("lmb", early_lmb);
31
22void lmb_dump_all(void) 32void lmb_dump_all(void)
23{ 33{
24#ifdef DEBUG
25 unsigned long i; 34 unsigned long i;
26 35
27 pr_debug("lmb_dump_all:\n"); 36 if (!lmb_debug)
28 pr_debug(" memory.cnt = 0x%lx\n", lmb.memory.cnt); 37 return;
29 pr_debug(" memory.size = 0x%llx\n", 38
39 pr_info("lmb_dump_all:\n");
40 pr_info(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
41 pr_info(" memory.size = 0x%llx\n",
30 (unsigned long long)lmb.memory.size); 42 (unsigned long long)lmb.memory.size);
31 for (i=0; i < lmb.memory.cnt ;i++) { 43 for (i=0; i < lmb.memory.cnt ;i++) {
32 pr_debug(" memory.region[0x%x].base = 0x%llx\n", 44 pr_info(" memory.region[0x%lx].base = 0x%llx\n",
33 i, (unsigned long long)lmb.memory.region[i].base); 45 i, (unsigned long long)lmb.memory.region[i].base);
34 pr_debug(" .size = 0x%llx\n", 46 pr_info(" .size = 0x%llx\n",
35 (unsigned long long)lmb.memory.region[i].size); 47 (unsigned long long)lmb.memory.region[i].size);
36 } 48 }
37 49
38 pr_debug(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt); 50 pr_info(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
39 pr_debug(" reserved.size = 0x%lx\n", lmb.reserved.size); 51 pr_info(" reserved.size = 0x%lx\n", lmb.reserved.size);
40 for (i=0; i < lmb.reserved.cnt ;i++) { 52 for (i=0; i < lmb.reserved.cnt ;i++) {
41 pr_debug(" reserved.region[0x%x].base = 0x%llx\n", 53 pr_info(" reserved.region[0x%lx].base = 0x%llx\n",
42 i, (unsigned long long)lmb.reserved.region[i].base); 54 i, (unsigned long long)lmb.reserved.region[i].base);
43 pr_debug(" .size = 0x%llx\n", 55 pr_info(" .size = 0x%llx\n",
44 (unsigned long long)lmb.reserved.region[i].size); 56 (unsigned long long)lmb.reserved.region[i].size);
45 } 57 }
46#endif /* DEBUG */
47} 58}
48 59
49static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2, 60static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
@@ -286,8 +297,7 @@ static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
286 j = lmb_overlaps_region(&lmb.reserved, base, size); 297 j = lmb_overlaps_region(&lmb.reserved, base, size);
287 if (j < 0) { 298 if (j < 0) {
288 /* this area isn't reserved, take it */ 299 /* this area isn't reserved, take it */
289 if (lmb_add_region(&lmb.reserved, base, 300 if (lmb_add_region(&lmb.reserved, base, size) < 0)
290 lmb_align_up(size, align)) < 0)
291 base = ~(u64)0; 301 base = ~(u64)0;
292 return base; 302 return base;
293 } 303 }
@@ -333,6 +343,10 @@ u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
333 struct lmb_region *mem = &lmb.memory; 343 struct lmb_region *mem = &lmb.memory;
334 int i; 344 int i;
335 345
346 BUG_ON(0 == size);
347
348 size = lmb_align_up(size, align);
349
336 for (i = 0; i < mem->cnt; i++) { 350 for (i = 0; i < mem->cnt; i++) {
337 u64 ret = lmb_alloc_nid_region(&mem->region[i], 351 u64 ret = lmb_alloc_nid_region(&mem->region[i],
338 nid_range, 352 nid_range,
@@ -370,6 +384,8 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
370 384
371 BUG_ON(0 == size); 385 BUG_ON(0 == size);
372 386
387 size = lmb_align_up(size, align);
388
373 /* On some platforms, make sure we allocate lowmem */ 389 /* On some platforms, make sure we allocate lowmem */
374 /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */ 390 /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
375 if (max_addr == LMB_ALLOC_ANYWHERE) 391 if (max_addr == LMB_ALLOC_ANYWHERE)
@@ -393,8 +409,7 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
393 j = lmb_overlaps_region(&lmb.reserved, base, size); 409 j = lmb_overlaps_region(&lmb.reserved, base, size);
394 if (j < 0) { 410 if (j < 0) {
395 /* this area isn't reserved, take it */ 411 /* this area isn't reserved, take it */
396 if (lmb_add_region(&lmb.reserved, base, 412 if (lmb_add_region(&lmb.reserved, base, size) < 0)
397 lmb_align_up(size, align)) < 0)
398 return 0; 413 return 0;
399 return base; 414 return base;
400 } 415 }
diff --git a/lib/parser.c b/lib/parser.c
index 703c8c13b346..4f0cbc03e0e8 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -182,18 +182,25 @@ int match_hex(substring_t *s, int *result)
182} 182}
183 183
184/** 184/**
185 * match_strcpy: - copies the characters from a substring_t to a string 185 * match_strlcpy: - Copy the characters from a substring_t to a sized buffer
186 * @to: string to copy characters to. 186 * @dest: where to copy to
187 * @s: &substring_t to copy 187 * @src: &substring_t to copy
188 * @size: size of destination buffer
188 * 189 *
189 * Description: Copies the set of characters represented by the given 190 * Description: Copy the characters in &substring_t @src to the
190 * &substring_t @s to the c-style string @to. Caller guarantees that @to is 191 * c-style string @dest. Copy no more than @size - 1 characters, plus
191 * large enough to hold the characters of @s. 192 * the terminating NUL. Return length of @src.
192 */ 193 */
193void match_strcpy(char *to, const substring_t *s) 194size_t match_strlcpy(char *dest, const substring_t *src, size_t size)
194{ 195{
195 memcpy(to, s->from, s->to - s->from); 196 size_t ret = src->to - src->from;
196 to[s->to - s->from] = '\0'; 197
198 if (size) {
199 size_t len = ret >= size ? size - 1 : ret;
200 memcpy(dest, src->from, len);
201 dest[len] = '\0';
202 }
203 return ret;
197} 204}
198 205
199/** 206/**
@@ -206,9 +213,10 @@ void match_strcpy(char *to, const substring_t *s)
206 */ 213 */
207char *match_strdup(const substring_t *s) 214char *match_strdup(const substring_t *s)
208{ 215{
209 char *p = kmalloc(s->to - s->from + 1, GFP_KERNEL); 216 size_t sz = s->to - s->from + 1;
217 char *p = kmalloc(sz, GFP_KERNEL);
210 if (p) 218 if (p)
211 match_strcpy(p, s); 219 match_strlcpy(p, s, sz);
212 return p; 220 return p;
213} 221}
214 222
@@ -216,5 +224,5 @@ EXPORT_SYMBOL(match_token);
216EXPORT_SYMBOL(match_int); 224EXPORT_SYMBOL(match_int);
217EXPORT_SYMBOL(match_octal); 225EXPORT_SYMBOL(match_octal);
218EXPORT_SYMBOL(match_hex); 226EXPORT_SYMBOL(match_hex);
219EXPORT_SYMBOL(match_strcpy); 227EXPORT_SYMBOL(match_strlcpy);
220EXPORT_SYMBOL(match_strdup); 228EXPORT_SYMBOL(match_strdup);