aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug27
-rw-r--r--lib/Kconfig.kgdb58
-rw-r--r--lib/Makefile1
-rw-r--r--lib/kernel_lock.c1
-rw-r--r--lib/scatterlist.c102
-rw-r--r--lib/semaphore-sleepers.c176
6 files changed, 176 insertions, 189 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0796c1a090c0..95de3102bc87 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -211,7 +211,7 @@ config SLUB_DEBUG_ON
211config SLUB_STATS 211config SLUB_STATS
212 default n 212 default n
213 bool "Enable SLUB performance statistics" 213 bool "Enable SLUB performance statistics"
214 depends on SLUB 214 depends on SLUB && SLUB_DEBUG && SYSFS
215 help 215 help
216 SLUB statistics are useful to debug SLUBs allocation behavior in 216 SLUB statistics are useful to debug SLUBs allocation behavior in
217 order find ways to optimize the allocator. This should never be 217 order find ways to optimize the allocator. This should never be
@@ -265,16 +265,6 @@ config DEBUG_MUTEXES
265 This feature allows mutex semantics violations to be detected and 265 This feature allows mutex semantics violations to be detected and
266 reported. 266 reported.
267 267
268config DEBUG_SEMAPHORE
269 bool "Semaphore debugging"
270 depends on DEBUG_KERNEL
271 depends on ALPHA || FRV
272 default n
273 help
274 If you say Y here then semaphore processing will issue lots of
275 verbose debugging messages. If you suspect a semaphore problem or a
276 kernel hacker asks for this option then say Y. Otherwise say N.
277
278config DEBUG_LOCK_ALLOC 268config DEBUG_LOCK_ALLOC
279 bool "Lock debugging: detect incorrect freeing of live locks" 269 bool "Lock debugging: detect incorrect freeing of live locks"
280 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 270 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -593,7 +583,7 @@ config LATENCYTOP
593 to find out which userspace is blocking on what kernel operations. 583 to find out which userspace is blocking on what kernel operations.
594 584
595config PROVIDE_OHCI1394_DMA_INIT 585config PROVIDE_OHCI1394_DMA_INIT
596 bool "Provide code for enabling DMA over FireWire early on boot" 586 bool "Remote debugging over FireWire early on boot"
597 depends on PCI && X86 587 depends on PCI && X86
598 help 588 help
599 If you want to debug problems which hang or crash the kernel early 589 If you want to debug problems which hang or crash the kernel early
@@ -621,4 +611,17 @@ config PROVIDE_OHCI1394_DMA_INIT
621 611
622 See Documentation/debugging-via-ohci1394.txt for more information. 612 See Documentation/debugging-via-ohci1394.txt for more information.
623 613
614config FIREWIRE_OHCI_REMOTE_DMA
615 bool "Remote debugging over FireWire with firewire-ohci"
616 depends on FIREWIRE_OHCI
617 help
618 This option lets you use the FireWire bus for remote debugging
619 with help of the firewire-ohci driver. It enables unfiltered
620 remote DMA in firewire-ohci.
621 See Documentation/debugging-via-ohci1394.txt for more information.
622
623 If unsure, say N.
624
624source "samples/Kconfig" 625source "samples/Kconfig"
626
627source "lib/Kconfig.kgdb"
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
new file mode 100644
index 000000000000..f2e01ac5ab09
--- /dev/null
+++ b/lib/Kconfig.kgdb
@@ -0,0 +1,58 @@
1
2menuconfig KGDB
3 bool "KGDB: kernel debugging with remote gdb"
4 select FRAME_POINTER
5 depends on HAVE_ARCH_KGDB
6 depends on DEBUG_KERNEL && EXPERIMENTAL
7 help
8 If you say Y here, it will be possible to remotely debug the
9 kernel using gdb. Documentation of kernel debugger is available
10 at http://kgdb.sourceforge.net as well as in DocBook form
11 in Documentation/DocBook/. If unsure, say N.
12
13config HAVE_ARCH_KGDB_SHADOW_INFO
14 bool
15
16config HAVE_ARCH_KGDB
17 bool
18
19config KGDB_SERIAL_CONSOLE
20 tristate "KGDB: use kgdb over the serial console"
21 depends on KGDB
22 select CONSOLE_POLL
23 select MAGIC_SYSRQ
24 default y
25 help
26 Share a serial console with kgdb. Sysrq-g must be used
27 to break in initially.
28
29config KGDB_TESTS
30 bool "KGDB: internal test suite"
31 depends on KGDB
32 default n
33 help
34 This is a kgdb I/O module specifically designed to test
35 kgdb's internal functions. This kgdb I/O module is
36 intended to for the development of new kgdb stubs
37 as well as regression testing the kgdb internals.
38 See the drivers/misc/kgdbts.c for the details about
39 the tests. The most basic of this I/O module is to boot
40 a kernel boot arguments "kgdbwait kgdbts=V1F100"
41
42config KGDB_TESTS_ON_BOOT
43 bool "KGDB: Run tests on boot"
44 depends on KGDB_TESTS
45 default n
46 help
47 Run the kgdb tests on boot up automatically without the need
48 to pass in a kernel parameter
49
50config KGDB_TESTS_BOOT_STRING
51 string "KGDB: which internal kgdb tests to run"
52 depends on KGDB_TESTS_ON_BOOT
53 default "V1F100"
54 help
55 This is the command string to send the kgdb test suite on
56 boot. See the drivers/misc/kgdbts.c for detailed
57 information about other strings you could use beyond the
58 default of V1F100.
diff --git a/lib/Makefile b/lib/Makefile
index 4d059d469554..4d7649c326f6 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -29,7 +29,6 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
29obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o 29obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
30lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o 30lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
31lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 31lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
32lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
33lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 32lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
34obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 33obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
35obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 34obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 812dbf00844b..fbc11a336bc5 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -8,6 +8,7 @@
8#include <linux/smp_lock.h> 8#include <linux/smp_lock.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/kallsyms.h> 10#include <linux/kallsyms.h>
11#include <asm/semaphore.h>
11 12
12/* 13/*
13 * The 'big kernel semaphore' 14 * The 'big kernel semaphore'
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index acca4901046c..b80c21100d78 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -8,6 +8,7 @@
8 */ 8 */
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/scatterlist.h> 10#include <linux/scatterlist.h>
11#include <linux/highmem.h>
11 12
12/** 13/**
13 * sg_next - return the next scatterlist entry in a list 14 * sg_next - return the next scatterlist entry in a list
@@ -292,3 +293,104 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
292 return ret; 293 return ret;
293} 294}
294EXPORT_SYMBOL(sg_alloc_table); 295EXPORT_SYMBOL(sg_alloc_table);
296
297/**
298 * sg_copy_buffer - Copy data between a linear buffer and an SG list
299 * @sgl: The SG list
300 * @nents: Number of SG entries
301 * @buf: Where to copy from
302 * @buflen: The number of bytes to copy
303 * @to_buffer: transfer direction (non zero == from an sg list to a
304 * buffer, 0 == from a buffer to an sg list
305 *
306 * Returns the number of copied bytes.
307 *
308 **/
309static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
310 void *buf, size_t buflen, int to_buffer)
311{
312 struct scatterlist *sg;
313 size_t buf_off = 0;
314 int i;
315
316 WARN_ON(!irqs_disabled());
317
318 for_each_sg(sgl, sg, nents, i) {
319 struct page *page;
320 int n = 0;
321 unsigned int sg_off = sg->offset;
322 unsigned int sg_copy = sg->length;
323
324 if (sg_copy > buflen)
325 sg_copy = buflen;
326 buflen -= sg_copy;
327
328 while (sg_copy > 0) {
329 unsigned int page_copy;
330 void *p;
331
332 page_copy = PAGE_SIZE - sg_off;
333 if (page_copy > sg_copy)
334 page_copy = sg_copy;
335
336 page = nth_page(sg_page(sg), n);
337 p = kmap_atomic(page, KM_BIO_SRC_IRQ);
338
339 if (to_buffer)
340 memcpy(buf + buf_off, p + sg_off, page_copy);
341 else {
342 memcpy(p + sg_off, buf + buf_off, page_copy);
343 flush_kernel_dcache_page(page);
344 }
345
346 kunmap_atomic(p, KM_BIO_SRC_IRQ);
347
348 buf_off += page_copy;
349 sg_off += page_copy;
350 if (sg_off == PAGE_SIZE) {
351 sg_off = 0;
352 n++;
353 }
354 sg_copy -= page_copy;
355 }
356
357 if (!buflen)
358 break;
359 }
360
361 return buf_off;
362}
363
364/**
365 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
366 * @sgl: The SG list
367 * @nents: Number of SG entries
368 * @buf: Where to copy from
369 * @buflen: The number of bytes to copy
370 *
371 * Returns the number of copied bytes.
372 *
373 **/
374size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
375 void *buf, size_t buflen)
376{
377 return sg_copy_buffer(sgl, nents, buf, buflen, 0);
378}
379EXPORT_SYMBOL(sg_copy_from_buffer);
380
381/**
382 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
383 * @sgl: The SG list
384 * @nents: Number of SG entries
385 * @buf: Where to copy to
386 * @buflen: The number of bytes to copy
387 *
388 * Returns the number of copied bytes.
389 *
390 **/
391size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
392 void *buf, size_t buflen)
393{
394 return sg_copy_buffer(sgl, nents, buf, buflen, 1);
395}
396EXPORT_SYMBOL(sg_copy_to_buffer);
diff --git a/lib/semaphore-sleepers.c b/lib/semaphore-sleepers.c
deleted file mode 100644
index 0198782cdacb..000000000000
--- a/lib/semaphore-sleepers.c
+++ /dev/null
@@ -1,176 +0,0 @@
1/*
2 * i386 and x86-64 semaphore implementation.
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Portions Copyright 1999 Red Hat, Inc.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
14 */
15#include <linux/sched.h>
16#include <linux/err.h>
17#include <linux/init.h>
18#include <asm/semaphore.h>
19
20/*
21 * Semaphores are implemented using a two-way counter:
22 * The "count" variable is decremented for each process
23 * that tries to acquire the semaphore, while the "sleeping"
24 * variable is a count of such acquires.
25 *
26 * Notably, the inline "up()" and "down()" functions can
27 * efficiently test if they need to do any extra work (up
28 * needs to do something only if count was negative before
29 * the increment operation.
30 *
31 * "sleeping" and the contention routine ordering is protected
32 * by the spinlock in the semaphore's waitqueue head.
33 *
34 * Note that these functions are only called when there is
35 * contention on the lock, and as such all this is the
36 * "non-critical" part of the whole semaphore business. The
37 * critical part is the inline stuff in <asm/semaphore.h>
38 * where we want to avoid any extra jumps and calls.
39 */
40
41/*
42 * Logic:
43 * - only on a boundary condition do we need to care. When we go
44 * from a negative count to a non-negative, we wake people up.
45 * - when we go from a non-negative count to a negative do we
46 * (a) synchronize with the "sleeper" count and (b) make sure
47 * that we're on the wakeup list before we synchronize so that
48 * we cannot lose wakeup events.
49 */
50
51void __up(struct semaphore *sem)
52{
53 wake_up(&sem->wait);
54}
55
56void __sched __down(struct semaphore *sem)
57{
58 struct task_struct *tsk = current;
59 DECLARE_WAITQUEUE(wait, tsk);
60 unsigned long flags;
61
62 tsk->state = TASK_UNINTERRUPTIBLE;
63 spin_lock_irqsave(&sem->wait.lock, flags);
64 add_wait_queue_exclusive_locked(&sem->wait, &wait);
65
66 sem->sleepers++;
67 for (;;) {
68 int sleepers = sem->sleepers;
69
70 /*
71 * Add "everybody else" into it. They aren't
72 * playing, because we own the spinlock in
73 * the wait_queue_head.
74 */
75 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
76 sem->sleepers = 0;
77 break;
78 }
79 sem->sleepers = 1; /* us - see -1 above */
80 spin_unlock_irqrestore(&sem->wait.lock, flags);
81
82 schedule();
83
84 spin_lock_irqsave(&sem->wait.lock, flags);
85 tsk->state = TASK_UNINTERRUPTIBLE;
86 }
87 remove_wait_queue_locked(&sem->wait, &wait);
88 wake_up_locked(&sem->wait);
89 spin_unlock_irqrestore(&sem->wait.lock, flags);
90 tsk->state = TASK_RUNNING;
91}
92
93int __sched __down_interruptible(struct semaphore *sem)
94{
95 int retval = 0;
96 struct task_struct *tsk = current;
97 DECLARE_WAITQUEUE(wait, tsk);
98 unsigned long flags;
99
100 tsk->state = TASK_INTERRUPTIBLE;
101 spin_lock_irqsave(&sem->wait.lock, flags);
102 add_wait_queue_exclusive_locked(&sem->wait, &wait);
103
104 sem->sleepers++;
105 for (;;) {
106 int sleepers = sem->sleepers;
107
108 /*
109 * With signals pending, this turns into
110 * the trylock failure case - we won't be
111 * sleeping, and we* can't get the lock as
112 * it has contention. Just correct the count
113 * and exit.
114 */
115 if (signal_pending(current)) {
116 retval = -EINTR;
117 sem->sleepers = 0;
118 atomic_add(sleepers, &sem->count);
119 break;
120 }
121
122 /*
123 * Add "everybody else" into it. They aren't
124 * playing, because we own the spinlock in
125 * wait_queue_head. The "-1" is because we're
126 * still hoping to get the semaphore.
127 */
128 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
129 sem->sleepers = 0;
130 break;
131 }
132 sem->sleepers = 1; /* us - see -1 above */
133 spin_unlock_irqrestore(&sem->wait.lock, flags);
134
135 schedule();
136
137 spin_lock_irqsave(&sem->wait.lock, flags);
138 tsk->state = TASK_INTERRUPTIBLE;
139 }
140 remove_wait_queue_locked(&sem->wait, &wait);
141 wake_up_locked(&sem->wait);
142 spin_unlock_irqrestore(&sem->wait.lock, flags);
143
144 tsk->state = TASK_RUNNING;
145 return retval;
146}
147
148/*
149 * Trylock failed - make sure we correct for
150 * having decremented the count.
151 *
152 * We could have done the trylock with a
153 * single "cmpxchg" without failure cases,
154 * but then it wouldn't work on a 386.
155 */
156int __down_trylock(struct semaphore *sem)
157{
158 int sleepers;
159 unsigned long flags;
160
161 spin_lock_irqsave(&sem->wait.lock, flags);
162 sleepers = sem->sleepers + 1;
163 sem->sleepers = 0;
164
165 /*
166 * Add "everybody else" and us into it. They aren't
167 * playing, because we own the spinlock in the
168 * wait_queue_head.
169 */
170 if (!atomic_add_negative(sleepers, &sem->count)) {
171 wake_up_locked(&sem->wait);
172 }
173
174 spin_unlock_irqrestore(&sem->wait.lock, flags);
175 return 1;
176}