diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 6 | ||||
-rw-r--r-- | lib/Kconfig.debug | 48 | ||||
-rw-r--r-- | lib/Kconfig.kgdb | 58 | ||||
-rw-r--r-- | lib/Makefile | 3 | ||||
-rw-r--r-- | lib/bitmap.c | 174 | ||||
-rw-r--r-- | lib/find_next_bit.c | 77 | ||||
-rw-r--r-- | lib/kernel_lock.c | 1 | ||||
-rw-r--r-- | lib/kobject.c | 19 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 6 | ||||
-rw-r--r-- | lib/lmb.c | 101 | ||||
-rw-r--r-- | lib/pcounter.c | 58 | ||||
-rw-r--r-- | lib/radix-tree.c | 9 | ||||
-rw-r--r-- | lib/reed_solomon/reed_solomon.c | 1 | ||||
-rw-r--r-- | lib/scatterlist.c | 102 | ||||
-rw-r--r-- | lib/semaphore-sleepers.c | 176 |
15 files changed, 552 insertions, 287 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 2d53dc092e8b..8cc8e8722a3f 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -7,6 +7,12 @@ menu "Library routines" | |||
7 | config BITREVERSE | 7 | config BITREVERSE |
8 | tristate | 8 | tristate |
9 | 9 | ||
10 | config GENERIC_FIND_FIRST_BIT | ||
11 | def_bool n | ||
12 | |||
13 | config GENERIC_FIND_NEXT_BIT | ||
14 | def_bool n | ||
15 | |||
10 | config CRC_CCITT | 16 | config CRC_CCITT |
11 | tristate "CRC-CCITT functions" | 17 | tristate "CRC-CCITT functions" |
12 | help | 18 | help |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 0796c1a090c0..754cc0027f2a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -25,6 +25,17 @@ config ENABLE_MUST_CHECK | |||
25 | suppress the "warning: ignoring return value of 'foo', declared with | 25 | suppress the "warning: ignoring return value of 'foo', declared with |
26 | attribute warn_unused_result" messages. | 26 | attribute warn_unused_result" messages. |
27 | 27 | ||
28 | config FRAME_WARN | ||
29 | int "Warn for stack frames larger than (needs gcc 4.4)" | ||
30 | range 0 8192 | ||
31 | default 1024 if !64BIT | ||
32 | default 2048 if 64BIT | ||
33 | help | ||
34 | Tell gcc to warn at build time for stack frames larger than this. | ||
35 | Setting this too low will cause a lot of warnings. | ||
36 | Setting it to 0 disables the warning. | ||
37 | Requires gcc 4.4 | ||
38 | |||
28 | config MAGIC_SYSRQ | 39 | config MAGIC_SYSRQ |
29 | bool "Magic SysRq key" | 40 | bool "Magic SysRq key" |
30 | depends on !UML | 41 | depends on !UML |
@@ -211,7 +222,7 @@ config SLUB_DEBUG_ON | |||
211 | config SLUB_STATS | 222 | config SLUB_STATS |
212 | default n | 223 | default n |
213 | bool "Enable SLUB performance statistics" | 224 | bool "Enable SLUB performance statistics" |
214 | depends on SLUB | 225 | depends on SLUB && SLUB_DEBUG && SYSFS |
215 | help | 226 | help |
216 | SLUB statistics are useful to debug SLUBs allocation behavior in | 227 | SLUB statistics are useful to debug SLUBs allocation behavior in |
217 | order find ways to optimize the allocator. This should never be | 228 | order find ways to optimize the allocator. This should never be |
@@ -265,16 +276,6 @@ config DEBUG_MUTEXES | |||
265 | This feature allows mutex semantics violations to be detected and | 276 | This feature allows mutex semantics violations to be detected and |
266 | reported. | 277 | reported. |
267 | 278 | ||
268 | config DEBUG_SEMAPHORE | ||
269 | bool "Semaphore debugging" | ||
270 | depends on DEBUG_KERNEL | ||
271 | depends on ALPHA || FRV | ||
272 | default n | ||
273 | help | ||
274 | If you say Y here then semaphore processing will issue lots of | ||
275 | verbose debugging messages. If you suspect a semaphore problem or a | ||
276 | kernel hacker asks for this option then say Y. Otherwise say N. | ||
277 | |||
278 | config DEBUG_LOCK_ALLOC | 279 | config DEBUG_LOCK_ALLOC |
279 | bool "Lock debugging: detect incorrect freeing of live locks" | 280 | bool "Lock debugging: detect incorrect freeing of live locks" |
280 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 281 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
@@ -437,6 +438,16 @@ config DEBUG_VM | |||
437 | 438 | ||
438 | If unsure, say N. | 439 | If unsure, say N. |
439 | 440 | ||
441 | config DEBUG_WRITECOUNT | ||
442 | bool "Debug filesystem writers count" | ||
443 | depends on DEBUG_KERNEL | ||
444 | help | ||
445 | Enable this to catch wrong use of the writers count in struct | ||
446 | vfsmount. This will increase the size of each file struct by | ||
447 | 32 bits. | ||
448 | |||
449 | If unsure, say N. | ||
450 | |||
440 | config DEBUG_LIST | 451 | config DEBUG_LIST |
441 | bool "Debug linked list manipulation" | 452 | bool "Debug linked list manipulation" |
442 | depends on DEBUG_KERNEL | 453 | depends on DEBUG_KERNEL |
@@ -593,7 +604,7 @@ config LATENCYTOP | |||
593 | to find out which userspace is blocking on what kernel operations. | 604 | to find out which userspace is blocking on what kernel operations. |
594 | 605 | ||
595 | config PROVIDE_OHCI1394_DMA_INIT | 606 | config PROVIDE_OHCI1394_DMA_INIT |
596 | bool "Provide code for enabling DMA over FireWire early on boot" | 607 | bool "Remote debugging over FireWire early on boot" |
597 | depends on PCI && X86 | 608 | depends on PCI && X86 |
598 | help | 609 | help |
599 | If you want to debug problems which hang or crash the kernel early | 610 | If you want to debug problems which hang or crash the kernel early |
@@ -621,4 +632,17 @@ config PROVIDE_OHCI1394_DMA_INIT | |||
621 | 632 | ||
622 | See Documentation/debugging-via-ohci1394.txt for more information. | 633 | See Documentation/debugging-via-ohci1394.txt for more information. |
623 | 634 | ||
635 | config FIREWIRE_OHCI_REMOTE_DMA | ||
636 | bool "Remote debugging over FireWire with firewire-ohci" | ||
637 | depends on FIREWIRE_OHCI | ||
638 | help | ||
639 | This option lets you use the FireWire bus for remote debugging | ||
640 | with help of the firewire-ohci driver. It enables unfiltered | ||
641 | remote DMA in firewire-ohci. | ||
642 | See Documentation/debugging-via-ohci1394.txt for more information. | ||
643 | |||
644 | If unsure, say N. | ||
645 | |||
624 | source "samples/Kconfig" | 646 | source "samples/Kconfig" |
647 | |||
648 | source "lib/Kconfig.kgdb" | ||
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb new file mode 100644 index 000000000000..f2e01ac5ab09 --- /dev/null +++ b/lib/Kconfig.kgdb | |||
@@ -0,0 +1,58 @@ | |||
1 | |||
2 | menuconfig KGDB | ||
3 | bool "KGDB: kernel debugging with remote gdb" | ||
4 | select FRAME_POINTER | ||
5 | depends on HAVE_ARCH_KGDB | ||
6 | depends on DEBUG_KERNEL && EXPERIMENTAL | ||
7 | help | ||
8 | If you say Y here, it will be possible to remotely debug the | ||
9 | kernel using gdb. Documentation of kernel debugger is available | ||
10 | at http://kgdb.sourceforge.net as well as in DocBook form | ||
11 | in Documentation/DocBook/. If unsure, say N. | ||
12 | |||
13 | config HAVE_ARCH_KGDB_SHADOW_INFO | ||
14 | bool | ||
15 | |||
16 | config HAVE_ARCH_KGDB | ||
17 | bool | ||
18 | |||
19 | config KGDB_SERIAL_CONSOLE | ||
20 | tristate "KGDB: use kgdb over the serial console" | ||
21 | depends on KGDB | ||
22 | select CONSOLE_POLL | ||
23 | select MAGIC_SYSRQ | ||
24 | default y | ||
25 | help | ||
26 | Share a serial console with kgdb. Sysrq-g must be used | ||
27 | to break in initially. | ||
28 | |||
29 | config KGDB_TESTS | ||
30 | bool "KGDB: internal test suite" | ||
31 | depends on KGDB | ||
32 | default n | ||
33 | help | ||
34 | This is a kgdb I/O module specifically designed to test | ||
35 | kgdb's internal functions. This kgdb I/O module is | ||
36 | intended to for the development of new kgdb stubs | ||
37 | as well as regression testing the kgdb internals. | ||
38 | See the drivers/misc/kgdbts.c for the details about | ||
39 | the tests. The most basic of this I/O module is to boot | ||
40 | a kernel boot arguments "kgdbwait kgdbts=V1F100" | ||
41 | |||
42 | config KGDB_TESTS_ON_BOOT | ||
43 | bool "KGDB: Run tests on boot" | ||
44 | depends on KGDB_TESTS | ||
45 | default n | ||
46 | help | ||
47 | Run the kgdb tests on boot up automatically without the need | ||
48 | to pass in a kernel parameter | ||
49 | |||
50 | config KGDB_TESTS_BOOT_STRING | ||
51 | string "KGDB: which internal kgdb tests to run" | ||
52 | depends on KGDB_TESTS_ON_BOOT | ||
53 | default "V1F100" | ||
54 | help | ||
55 | This is the command string to send the kgdb test suite on | ||
56 | boot. See the drivers/misc/kgdbts.c for detailed | ||
57 | information about other strings you could use beyond the | ||
58 | default of V1F100. | ||
diff --git a/lib/Makefile b/lib/Makefile index 61bba16a0a2f..2d7001b7f5a4 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -29,7 +29,7 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o | |||
29 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o | 29 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o |
30 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o | 30 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o |
31 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o | 31 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o |
32 | lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o | 32 | lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o |
33 | lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o | 33 | lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o |
34 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o | 34 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o |
35 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o | 35 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o |
@@ -61,7 +61,6 @@ obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o | |||
61 | obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o | 61 | obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o |
62 | obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o | 62 | obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o |
63 | obj-$(CONFIG_SMP) += percpu_counter.o | 63 | obj-$(CONFIG_SMP) += percpu_counter.o |
64 | obj-$(CONFIG_SMP) += pcounter.o | ||
65 | obj-$(CONFIG_AUDIT_GENERIC) += audit.o | 64 | obj-$(CONFIG_AUDIT_GENERIC) += audit.o |
66 | 65 | ||
67 | obj-$(CONFIG_SWIOTLB) += swiotlb.o | 66 | obj-$(CONFIG_SWIOTLB) += swiotlb.o |
diff --git a/lib/bitmap.c b/lib/bitmap.c index 2c9242e3fed0..c4cb48f77f0c 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
@@ -316,6 +316,22 @@ int bitmap_scnprintf(char *buf, unsigned int buflen, | |||
316 | EXPORT_SYMBOL(bitmap_scnprintf); | 316 | EXPORT_SYMBOL(bitmap_scnprintf); |
317 | 317 | ||
318 | /** | 318 | /** |
319 | * bitmap_scnprintf_len - return buffer length needed to convert | ||
320 | * bitmap to an ASCII hex string. | ||
321 | * @len: number of bits to be converted | ||
322 | */ | ||
323 | int bitmap_scnprintf_len(unsigned int len) | ||
324 | { | ||
325 | /* we need 9 chars per word for 32 bit words (8 hexdigits + sep/null) */ | ||
326 | int bitslen = ALIGN(len, CHUNKSZ); | ||
327 | int wordlen = CHUNKSZ / 4; | ||
328 | int buflen = (bitslen / wordlen) * (wordlen + 1) * sizeof(char); | ||
329 | |||
330 | return buflen; | ||
331 | } | ||
332 | EXPORT_SYMBOL(bitmap_scnprintf_len); | ||
333 | |||
334 | /** | ||
319 | * __bitmap_parse - convert an ASCII hex string into a bitmap. | 335 | * __bitmap_parse - convert an ASCII hex string into a bitmap. |
320 | * @buf: pointer to buffer containing string. | 336 | * @buf: pointer to buffer containing string. |
321 | * @buflen: buffer size in bytes. If string is smaller than this | 337 | * @buflen: buffer size in bytes. If string is smaller than this |
@@ -698,6 +714,164 @@ int bitmap_bitremap(int oldbit, const unsigned long *old, | |||
698 | } | 714 | } |
699 | EXPORT_SYMBOL(bitmap_bitremap); | 715 | EXPORT_SYMBOL(bitmap_bitremap); |
700 | 716 | ||
717 | /** | ||
718 | * bitmap_onto - translate one bitmap relative to another | ||
719 | * @dst: resulting translated bitmap | ||
720 | * @orig: original untranslated bitmap | ||
721 | * @relmap: bitmap relative to which translated | ||
722 | * @bits: number of bits in each of these bitmaps | ||
723 | * | ||
724 | * Set the n-th bit of @dst iff there exists some m such that the | ||
725 | * n-th bit of @relmap is set, the m-th bit of @orig is set, and | ||
726 | * the n-th bit of @relmap is also the m-th _set_ bit of @relmap. | ||
727 | * (If you understood the previous sentence the first time your | ||
728 | * read it, you're overqualified for your current job.) | ||
729 | * | ||
730 | * In other words, @orig is mapped onto (surjectively) @dst, | ||
731 | * using the the map { <n, m> | the n-th bit of @relmap is the | ||
732 | * m-th set bit of @relmap }. | ||
733 | * | ||
734 | * Any set bits in @orig above bit number W, where W is the | ||
735 | * weight of (number of set bits in) @relmap are mapped nowhere. | ||
736 | * In particular, if for all bits m set in @orig, m >= W, then | ||
737 | * @dst will end up empty. In situations where the possibility | ||
738 | * of such an empty result is not desired, one way to avoid it is | ||
739 | * to use the bitmap_fold() operator, below, to first fold the | ||
740 | * @orig bitmap over itself so that all its set bits x are in the | ||
741 | * range 0 <= x < W. The bitmap_fold() operator does this by | ||
742 | * setting the bit (m % W) in @dst, for each bit (m) set in @orig. | ||
743 | * | ||
744 | * Example [1] for bitmap_onto(): | ||
745 | * Let's say @relmap has bits 30-39 set, and @orig has bits | ||
746 | * 1, 3, 5, 7, 9 and 11 set. Then on return from this routine, | ||
747 | * @dst will have bits 31, 33, 35, 37 and 39 set. | ||
748 | * | ||
749 | * When bit 0 is set in @orig, it means turn on the bit in | ||
750 | * @dst corresponding to whatever is the first bit (if any) | ||
751 | * that is turned on in @relmap. Since bit 0 was off in the | ||
752 | * above example, we leave off that bit (bit 30) in @dst. | ||
753 | * | ||
754 | * When bit 1 is set in @orig (as in the above example), it | ||
755 | * means turn on the bit in @dst corresponding to whatever | ||
756 | * is the second bit that is turned on in @relmap. The second | ||
757 | * bit in @relmap that was turned on in the above example was | ||
758 | * bit 31, so we turned on bit 31 in @dst. | ||
759 | * | ||
760 | * Similarly, we turned on bits 33, 35, 37 and 39 in @dst, | ||
761 | * because they were the 4th, 6th, 8th and 10th set bits | ||
762 | * set in @relmap, and the 4th, 6th, 8th and 10th bits of | ||
763 | * @orig (i.e. bits 3, 5, 7 and 9) were also set. | ||
764 | * | ||
765 | * When bit 11 is set in @orig, it means turn on the bit in | ||
766 | * @dst corresponding to whatever is the twelth bit that is | ||
767 | * turned on in @relmap. In the above example, there were | ||
768 | * only ten bits turned on in @relmap (30..39), so that bit | ||
769 | * 11 was set in @orig had no affect on @dst. | ||
770 | * | ||
771 | * Example [2] for bitmap_fold() + bitmap_onto(): | ||
772 | * Let's say @relmap has these ten bits set: | ||
773 | * 40 41 42 43 45 48 53 61 74 95 | ||
774 | * (for the curious, that's 40 plus the first ten terms of the | ||
775 | * Fibonacci sequence.) | ||
776 | * | ||
777 | * Further lets say we use the following code, invoking | ||
778 | * bitmap_fold() then bitmap_onto, as suggested above to | ||
779 | * avoid the possitility of an empty @dst result: | ||
780 | * | ||
781 | * unsigned long *tmp; // a temporary bitmap's bits | ||
782 | * | ||
783 | * bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits); | ||
784 | * bitmap_onto(dst, tmp, relmap, bits); | ||
785 | * | ||
786 | * Then this table shows what various values of @dst would be, for | ||
787 | * various @orig's. I list the zero-based positions of each set bit. | ||
788 | * The tmp column shows the intermediate result, as computed by | ||
789 | * using bitmap_fold() to fold the @orig bitmap modulo ten | ||
790 | * (the weight of @relmap). | ||
791 | * | ||
792 | * @orig tmp @dst | ||
793 | * 0 0 40 | ||
794 | * 1 1 41 | ||
795 | * 9 9 95 | ||
796 | * 10 0 40 (*) | ||
797 | * 1 3 5 7 1 3 5 7 41 43 48 61 | ||
798 | * 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45 | ||
799 | * 0 9 18 27 0 9 8 7 40 61 74 95 | ||
800 | * 0 10 20 30 0 40 | ||
801 | * 0 11 22 33 0 1 2 3 40 41 42 43 | ||
802 | * 0 12 24 36 0 2 4 6 40 42 45 53 | ||
803 | * 78 102 211 1 2 8 41 42 74 (*) | ||
804 | * | ||
805 | * (*) For these marked lines, if we hadn't first done bitmap_fold() | ||
806 | * into tmp, then the @dst result would have been empty. | ||
807 | * | ||
808 | * If either of @orig or @relmap is empty (no set bits), then @dst | ||
809 | * will be returned empty. | ||
810 | * | ||
811 | * If (as explained above) the only set bits in @orig are in positions | ||
812 | * m where m >= W, (where W is the weight of @relmap) then @dst will | ||
813 | * once again be returned empty. | ||
814 | * | ||
815 | * All bits in @dst not set by the above rule are cleared. | ||
816 | */ | ||
817 | void bitmap_onto(unsigned long *dst, const unsigned long *orig, | ||
818 | const unsigned long *relmap, int bits) | ||
819 | { | ||
820 | int n, m; /* same meaning as in above comment */ | ||
821 | |||
822 | if (dst == orig) /* following doesn't handle inplace mappings */ | ||
823 | return; | ||
824 | bitmap_zero(dst, bits); | ||
825 | |||
826 | /* | ||
827 | * The following code is a more efficient, but less | ||
828 | * obvious, equivalent to the loop: | ||
829 | * for (m = 0; m < bitmap_weight(relmap, bits); m++) { | ||
830 | * n = bitmap_ord_to_pos(orig, m, bits); | ||
831 | * if (test_bit(m, orig)) | ||
832 | * set_bit(n, dst); | ||
833 | * } | ||
834 | */ | ||
835 | |||
836 | m = 0; | ||
837 | for (n = find_first_bit(relmap, bits); | ||
838 | n < bits; | ||
839 | n = find_next_bit(relmap, bits, n + 1)) { | ||
840 | /* m == bitmap_pos_to_ord(relmap, n, bits) */ | ||
841 | if (test_bit(m, orig)) | ||
842 | set_bit(n, dst); | ||
843 | m++; | ||
844 | } | ||
845 | } | ||
846 | EXPORT_SYMBOL(bitmap_onto); | ||
847 | |||
848 | /** | ||
849 | * bitmap_fold - fold larger bitmap into smaller, modulo specified size | ||
850 | * @dst: resulting smaller bitmap | ||
851 | * @orig: original larger bitmap | ||
852 | * @sz: specified size | ||
853 | * @bits: number of bits in each of these bitmaps | ||
854 | * | ||
855 | * For each bit oldbit in @orig, set bit oldbit mod @sz in @dst. | ||
856 | * Clear all other bits in @dst. See further the comment and | ||
857 | * Example [2] for bitmap_onto() for why and how to use this. | ||
858 | */ | ||
859 | void bitmap_fold(unsigned long *dst, const unsigned long *orig, | ||
860 | int sz, int bits) | ||
861 | { | ||
862 | int oldbit; | ||
863 | |||
864 | if (dst == orig) /* following doesn't handle inplace mappings */ | ||
865 | return; | ||
866 | bitmap_zero(dst, bits); | ||
867 | |||
868 | for (oldbit = find_first_bit(orig, bits); | ||
869 | oldbit < bits; | ||
870 | oldbit = find_next_bit(orig, bits, oldbit + 1)) | ||
871 | set_bit(oldbit % sz, dst); | ||
872 | } | ||
873 | EXPORT_SYMBOL(bitmap_fold); | ||
874 | |||
701 | /* | 875 | /* |
702 | * Common code for bitmap_*_region() routines. | 876 | * Common code for bitmap_*_region() routines. |
703 | * bitmap: array of unsigned longs corresponding to the bitmap | 877 | * bitmap: array of unsigned longs corresponding to the bitmap |
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index 78ccd73a8841..d3f5784807b4 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c | |||
@@ -16,14 +16,12 @@ | |||
16 | 16 | ||
17 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) | 17 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) |
18 | 18 | ||
19 | /** | 19 | #ifdef CONFIG_GENERIC_FIND_NEXT_BIT |
20 | * find_next_bit - find the next set bit in a memory region | 20 | /* |
21 | * @addr: The address to base the search on | 21 | * Find the next set bit in a memory region. |
22 | * @offset: The bitnumber to start searching at | ||
23 | * @size: The maximum size to search | ||
24 | */ | 22 | */ |
25 | unsigned long find_next_bit(const unsigned long *addr, unsigned long size, | 23 | unsigned long __find_next_bit(const unsigned long *addr, |
26 | unsigned long offset) | 24 | unsigned long size, unsigned long offset) |
27 | { | 25 | { |
28 | const unsigned long *p = addr + BITOP_WORD(offset); | 26 | const unsigned long *p = addr + BITOP_WORD(offset); |
29 | unsigned long result = offset & ~(BITS_PER_LONG-1); | 27 | unsigned long result = offset & ~(BITS_PER_LONG-1); |
@@ -60,15 +58,14 @@ found_first: | |||
60 | found_middle: | 58 | found_middle: |
61 | return result + __ffs(tmp); | 59 | return result + __ffs(tmp); |
62 | } | 60 | } |
63 | 61 | EXPORT_SYMBOL(__find_next_bit); | |
64 | EXPORT_SYMBOL(find_next_bit); | ||
65 | 62 | ||
66 | /* | 63 | /* |
67 | * This implementation of find_{first,next}_zero_bit was stolen from | 64 | * This implementation of find_{first,next}_zero_bit was stolen from |
68 | * Linus' asm-alpha/bitops.h. | 65 | * Linus' asm-alpha/bitops.h. |
69 | */ | 66 | */ |
70 | unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, | 67 | unsigned long __find_next_zero_bit(const unsigned long *addr, |
71 | unsigned long offset) | 68 | unsigned long size, unsigned long offset) |
72 | { | 69 | { |
73 | const unsigned long *p = addr + BITOP_WORD(offset); | 70 | const unsigned long *p = addr + BITOP_WORD(offset); |
74 | unsigned long result = offset & ~(BITS_PER_LONG-1); | 71 | unsigned long result = offset & ~(BITS_PER_LONG-1); |
@@ -105,8 +102,64 @@ found_first: | |||
105 | found_middle: | 102 | found_middle: |
106 | return result + ffz(tmp); | 103 | return result + ffz(tmp); |
107 | } | 104 | } |
105 | EXPORT_SYMBOL(__find_next_zero_bit); | ||
106 | #endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ | ||
107 | |||
108 | #ifdef CONFIG_GENERIC_FIND_FIRST_BIT | ||
109 | /* | ||
110 | * Find the first set bit in a memory region. | ||
111 | */ | ||
112 | unsigned long __find_first_bit(const unsigned long *addr, | ||
113 | unsigned long size) | ||
114 | { | ||
115 | const unsigned long *p = addr; | ||
116 | unsigned long result = 0; | ||
117 | unsigned long tmp; | ||
108 | 118 | ||
109 | EXPORT_SYMBOL(find_next_zero_bit); | 119 | while (size & ~(BITS_PER_LONG-1)) { |
120 | if ((tmp = *(p++))) | ||
121 | goto found; | ||
122 | result += BITS_PER_LONG; | ||
123 | size -= BITS_PER_LONG; | ||
124 | } | ||
125 | if (!size) | ||
126 | return result; | ||
127 | |||
128 | tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); | ||
129 | if (tmp == 0UL) /* Are any bits set? */ | ||
130 | return result + size; /* Nope. */ | ||
131 | found: | ||
132 | return result + __ffs(tmp); | ||
133 | } | ||
134 | EXPORT_SYMBOL(__find_first_bit); | ||
135 | |||
136 | /* | ||
137 | * Find the first cleared bit in a memory region. | ||
138 | */ | ||
139 | unsigned long __find_first_zero_bit(const unsigned long *addr, | ||
140 | unsigned long size) | ||
141 | { | ||
142 | const unsigned long *p = addr; | ||
143 | unsigned long result = 0; | ||
144 | unsigned long tmp; | ||
145 | |||
146 | while (size & ~(BITS_PER_LONG-1)) { | ||
147 | if (~(tmp = *(p++))) | ||
148 | goto found; | ||
149 | result += BITS_PER_LONG; | ||
150 | size -= BITS_PER_LONG; | ||
151 | } | ||
152 | if (!size) | ||
153 | return result; | ||
154 | |||
155 | tmp = (*p) | (~0UL << size); | ||
156 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
157 | return result + size; /* Nope. */ | ||
158 | found: | ||
159 | return result + ffz(tmp); | ||
160 | } | ||
161 | EXPORT_SYMBOL(__find_first_zero_bit); | ||
162 | #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ | ||
110 | 163 | ||
111 | #ifdef __BIG_ENDIAN | 164 | #ifdef __BIG_ENDIAN |
112 | 165 | ||
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index 812dbf00844b..cd3e82530b03 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/smp_lock.h> | 8 | #include <linux/smp_lock.h> |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/kallsyms.h> | 10 | #include <linux/kallsyms.h> |
11 | #include <linux/semaphore.h> | ||
11 | 12 | ||
12 | /* | 13 | /* |
13 | * The 'big kernel semaphore' | 14 | * The 'big kernel semaphore' |
diff --git a/lib/kobject.c b/lib/kobject.c index 0d03252f87a8..2c6490370922 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
@@ -58,11 +58,6 @@ static int create_dir(struct kobject *kobj) | |||
58 | return error; | 58 | return error; |
59 | } | 59 | } |
60 | 60 | ||
61 | static inline struct kobject *to_kobj(struct list_head *entry) | ||
62 | { | ||
63 | return container_of(entry, struct kobject, entry); | ||
64 | } | ||
65 | |||
66 | static int get_kobj_path_length(struct kobject *kobj) | 61 | static int get_kobj_path_length(struct kobject *kobj) |
67 | { | 62 | { |
68 | int length = 1; | 63 | int length = 1; |
@@ -592,8 +587,15 @@ static void kobject_release(struct kref *kref) | |||
592 | */ | 587 | */ |
593 | void kobject_put(struct kobject *kobj) | 588 | void kobject_put(struct kobject *kobj) |
594 | { | 589 | { |
595 | if (kobj) | 590 | if (kobj) { |
591 | if (!kobj->state_initialized) { | ||
592 | printk(KERN_WARNING "kobject: '%s' (%p): is not " | ||
593 | "initialized, yet kobject_put() is being " | ||
594 | "called.\n", kobject_name(kobj), kobj); | ||
595 | WARN_ON(1); | ||
596 | } | ||
596 | kref_put(&kobj->kref, kobject_release); | 597 | kref_put(&kobj->kref, kobject_release); |
598 | } | ||
597 | } | 599 | } |
598 | 600 | ||
599 | static void dynamic_kobj_release(struct kobject *kobj) | 601 | static void dynamic_kobj_release(struct kobject *kobj) |
@@ -745,12 +747,11 @@ void kset_unregister(struct kset *k) | |||
745 | */ | 747 | */ |
746 | struct kobject *kset_find_obj(struct kset *kset, const char *name) | 748 | struct kobject *kset_find_obj(struct kset *kset, const char *name) |
747 | { | 749 | { |
748 | struct list_head *entry; | 750 | struct kobject *k; |
749 | struct kobject *ret = NULL; | 751 | struct kobject *ret = NULL; |
750 | 752 | ||
751 | spin_lock(&kset->list_lock); | 753 | spin_lock(&kset->list_lock); |
752 | list_for_each(entry, &kset->list) { | 754 | list_for_each_entry(k, &kset->list, entry) { |
753 | struct kobject *k = to_kobj(entry); | ||
754 | if (kobject_name(k) && !strcmp(kobject_name(k), name)) { | 755 | if (kobject_name(k) && !strcmp(kobject_name(k), name)) { |
755 | ret = kobject_get(k); | 756 | ret = kobject_get(k); |
756 | break; | 757 | break; |
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 5b6d7f6956b9..9fb6b86cf6b1 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
@@ -15,11 +15,13 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
18 | #include <linux/string.h> | ||
19 | #include <linux/kobject.h> | ||
20 | #include <linux/module.h> | ||
21 | |||
18 | #include <linux/socket.h> | 22 | #include <linux/socket.h> |
19 | #include <linux/skbuff.h> | 23 | #include <linux/skbuff.h> |
20 | #include <linux/netlink.h> | 24 | #include <linux/netlink.h> |
21 | #include <linux/string.h> | ||
22 | #include <linux/kobject.h> | ||
23 | #include <net/sock.h> | 25 | #include <net/sock.h> |
24 | 26 | ||
25 | 27 | ||
@@ -46,14 +46,13 @@ void lmb_dump_all(void) | |||
46 | #endif /* DEBUG */ | 46 | #endif /* DEBUG */ |
47 | } | 47 | } |
48 | 48 | ||
49 | static unsigned long __init lmb_addrs_overlap(u64 base1, u64 size1, | 49 | static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2, |
50 | u64 base2, u64 size2) | 50 | u64 size2) |
51 | { | 51 | { |
52 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); | 52 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); |
53 | } | 53 | } |
54 | 54 | ||
55 | static long __init lmb_addrs_adjacent(u64 base1, u64 size1, | 55 | static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) |
56 | u64 base2, u64 size2) | ||
57 | { | 56 | { |
58 | if (base2 == base1 + size1) | 57 | if (base2 == base1 + size1) |
59 | return 1; | 58 | return 1; |
@@ -63,7 +62,7 @@ static long __init lmb_addrs_adjacent(u64 base1, u64 size1, | |||
63 | return 0; | 62 | return 0; |
64 | } | 63 | } |
65 | 64 | ||
66 | static long __init lmb_regions_adjacent(struct lmb_region *rgn, | 65 | static long lmb_regions_adjacent(struct lmb_region *rgn, |
67 | unsigned long r1, unsigned long r2) | 66 | unsigned long r1, unsigned long r2) |
68 | { | 67 | { |
69 | u64 base1 = rgn->region[r1].base; | 68 | u64 base1 = rgn->region[r1].base; |
@@ -74,7 +73,7 @@ static long __init lmb_regions_adjacent(struct lmb_region *rgn, | |||
74 | return lmb_addrs_adjacent(base1, size1, base2, size2); | 73 | return lmb_addrs_adjacent(base1, size1, base2, size2); |
75 | } | 74 | } |
76 | 75 | ||
77 | static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r) | 76 | static void lmb_remove_region(struct lmb_region *rgn, unsigned long r) |
78 | { | 77 | { |
79 | unsigned long i; | 78 | unsigned long i; |
80 | 79 | ||
@@ -86,7 +85,7 @@ static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r) | |||
86 | } | 85 | } |
87 | 86 | ||
88 | /* Assumption: base addr of region 1 < base addr of region 2 */ | 87 | /* Assumption: base addr of region 1 < base addr of region 2 */ |
89 | static void __init lmb_coalesce_regions(struct lmb_region *rgn, | 88 | static void lmb_coalesce_regions(struct lmb_region *rgn, |
90 | unsigned long r1, unsigned long r2) | 89 | unsigned long r1, unsigned long r2) |
91 | { | 90 | { |
92 | rgn->region[r1].size += rgn->region[r2].size; | 91 | rgn->region[r1].size += rgn->region[r2].size; |
@@ -118,7 +117,7 @@ void __init lmb_analyze(void) | |||
118 | lmb.memory.size += lmb.memory.region[i].size; | 117 | lmb.memory.size += lmb.memory.region[i].size; |
119 | } | 118 | } |
120 | 119 | ||
121 | static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) | 120 | static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) |
122 | { | 121 | { |
123 | unsigned long coalesced = 0; | 122 | unsigned long coalesced = 0; |
124 | long adjacent, i; | 123 | long adjacent, i; |
@@ -182,7 +181,7 @@ static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) | |||
182 | return 0; | 181 | return 0; |
183 | } | 182 | } |
184 | 183 | ||
185 | long __init lmb_add(u64 base, u64 size) | 184 | long lmb_add(u64 base, u64 size) |
186 | { | 185 | { |
187 | struct lmb_region *_rgn = &lmb.memory; | 186 | struct lmb_region *_rgn = &lmb.memory; |
188 | 187 | ||
@@ -194,6 +193,55 @@ long __init lmb_add(u64 base, u64 size) | |||
194 | 193 | ||
195 | } | 194 | } |
196 | 195 | ||
196 | long lmb_remove(u64 base, u64 size) | ||
197 | { | ||
198 | struct lmb_region *rgn = &(lmb.memory); | ||
199 | u64 rgnbegin, rgnend; | ||
200 | u64 end = base + size; | ||
201 | int i; | ||
202 | |||
203 | rgnbegin = rgnend = 0; /* supress gcc warnings */ | ||
204 | |||
205 | /* Find the region where (base, size) belongs to */ | ||
206 | for (i=0; i < rgn->cnt; i++) { | ||
207 | rgnbegin = rgn->region[i].base; | ||
208 | rgnend = rgnbegin + rgn->region[i].size; | ||
209 | |||
210 | if ((rgnbegin <= base) && (end <= rgnend)) | ||
211 | break; | ||
212 | } | ||
213 | |||
214 | /* Didn't find the region */ | ||
215 | if (i == rgn->cnt) | ||
216 | return -1; | ||
217 | |||
218 | /* Check to see if we are removing entire region */ | ||
219 | if ((rgnbegin == base) && (rgnend == end)) { | ||
220 | lmb_remove_region(rgn, i); | ||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | /* Check to see if region is matching at the front */ | ||
225 | if (rgnbegin == base) { | ||
226 | rgn->region[i].base = end; | ||
227 | rgn->region[i].size -= size; | ||
228 | return 0; | ||
229 | } | ||
230 | |||
231 | /* Check to see if the region is matching at the end */ | ||
232 | if (rgnend == end) { | ||
233 | rgn->region[i].size -= size; | ||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * We need to split the entry - adjust the current one to the | ||
239 | * beginging of the hole and add the region after hole. | ||
240 | */ | ||
241 | rgn->region[i].size = base - rgn->region[i].base; | ||
242 | return lmb_add_region(rgn, end, rgnend - end); | ||
243 | } | ||
244 | |||
197 | long __init lmb_reserve(u64 base, u64 size) | 245 | long __init lmb_reserve(u64 base, u64 size) |
198 | { | 246 | { |
199 | struct lmb_region *_rgn = &lmb.reserved; | 247 | struct lmb_region *_rgn = &lmb.reserved; |
@@ -346,7 +394,7 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) | |||
346 | if (j < 0) { | 394 | if (j < 0) { |
347 | /* this area isn't reserved, take it */ | 395 | /* this area isn't reserved, take it */ |
348 | if (lmb_add_region(&lmb.reserved, base, | 396 | if (lmb_add_region(&lmb.reserved, base, |
349 | size) < 0) | 397 | lmb_align_up(size, align)) < 0) |
350 | return 0; | 398 | return 0; |
351 | return base; | 399 | return base; |
352 | } | 400 | } |
@@ -426,3 +474,36 @@ int __init lmb_is_reserved(u64 addr) | |||
426 | } | 474 | } |
427 | return 0; | 475 | return 0; |
428 | } | 476 | } |
477 | |||
478 | /* | ||
479 | * Given a <base, len>, find which memory regions belong to this range. | ||
480 | * Adjust the request and return a contiguous chunk. | ||
481 | */ | ||
482 | int lmb_find(struct lmb_property *res) | ||
483 | { | ||
484 | int i; | ||
485 | u64 rstart, rend; | ||
486 | |||
487 | rstart = res->base; | ||
488 | rend = rstart + res->size - 1; | ||
489 | |||
490 | for (i = 0; i < lmb.memory.cnt; i++) { | ||
491 | u64 start = lmb.memory.region[i].base; | ||
492 | u64 end = start + lmb.memory.region[i].size - 1; | ||
493 | |||
494 | if (start > rend) | ||
495 | return -1; | ||
496 | |||
497 | if ((end >= rstart) && (start < rend)) { | ||
498 | /* adjust the request */ | ||
499 | if (rstart < start) | ||
500 | rstart = start; | ||
501 | if (rend > end) | ||
502 | rend = end; | ||
503 | res->base = rstart; | ||
504 | res->size = rend - rstart + 1; | ||
505 | return 0; | ||
506 | } | ||
507 | } | ||
508 | return -1; | ||
509 | } | ||
diff --git a/lib/pcounter.c b/lib/pcounter.c deleted file mode 100644 index 9b56807da93b..000000000000 --- a/lib/pcounter.c +++ /dev/null | |||
@@ -1,58 +0,0 @@ | |||
1 | /* | ||
2 | * Define default pcounter functions | ||
3 | * Note that often used pcounters use dedicated functions to get a speed increase. | ||
4 | * (see DEFINE_PCOUNTER/REF_PCOUNTER_MEMBER) | ||
5 | */ | ||
6 | |||
7 | #include <linux/module.h> | ||
8 | #include <linux/pcounter.h> | ||
9 | #include <linux/smp.h> | ||
10 | #include <linux/cpumask.h> | ||
11 | |||
12 | static void pcounter_dyn_add(struct pcounter *self, int inc) | ||
13 | { | ||
14 | per_cpu_ptr(self->per_cpu_values, smp_processor_id())[0] += inc; | ||
15 | } | ||
16 | |||
17 | static int pcounter_dyn_getval(const struct pcounter *self, int cpu) | ||
18 | { | ||
19 | return per_cpu_ptr(self->per_cpu_values, cpu)[0]; | ||
20 | } | ||
21 | |||
22 | int pcounter_getval(const struct pcounter *self) | ||
23 | { | ||
24 | int res = 0, cpu; | ||
25 | |||
26 | for_each_possible_cpu(cpu) | ||
27 | res += self->getval(self, cpu); | ||
28 | |||
29 | return res; | ||
30 | } | ||
31 | EXPORT_SYMBOL_GPL(pcounter_getval); | ||
32 | |||
33 | int pcounter_alloc(struct pcounter *self) | ||
34 | { | ||
35 | int rc = 0; | ||
36 | if (self->add == NULL) { | ||
37 | self->per_cpu_values = alloc_percpu(int); | ||
38 | if (self->per_cpu_values != NULL) { | ||
39 | self->add = pcounter_dyn_add; | ||
40 | self->getval = pcounter_dyn_getval; | ||
41 | } else | ||
42 | rc = 1; | ||
43 | } | ||
44 | return rc; | ||
45 | } | ||
46 | EXPORT_SYMBOL_GPL(pcounter_alloc); | ||
47 | |||
48 | void pcounter_free(struct pcounter *self) | ||
49 | { | ||
50 | if (self->per_cpu_values != NULL) { | ||
51 | free_percpu(self->per_cpu_values); | ||
52 | self->per_cpu_values = NULL; | ||
53 | self->getval = NULL; | ||
54 | self->add = NULL; | ||
55 | } | ||
56 | } | ||
57 | EXPORT_SYMBOL_GPL(pcounter_free); | ||
58 | |||
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 65f0e758ec38..bd521716ab1a 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -114,8 +114,7 @@ radix_tree_node_alloc(struct radix_tree_root *root) | |||
114 | } | 114 | } |
115 | } | 115 | } |
116 | if (ret == NULL) | 116 | if (ret == NULL) |
117 | ret = kmem_cache_alloc(radix_tree_node_cachep, | 117 | ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
118 | set_migrateflags(gfp_mask, __GFP_RECLAIMABLE)); | ||
119 | 118 | ||
120 | BUG_ON(radix_tree_is_indirect_ptr(ret)); | 119 | BUG_ON(radix_tree_is_indirect_ptr(ret)); |
121 | return ret; | 120 | return ret; |
@@ -150,8 +149,7 @@ int radix_tree_preload(gfp_t gfp_mask) | |||
150 | rtp = &__get_cpu_var(radix_tree_preloads); | 149 | rtp = &__get_cpu_var(radix_tree_preloads); |
151 | while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { | 150 | while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { |
152 | preempt_enable(); | 151 | preempt_enable(); |
153 | node = kmem_cache_alloc(radix_tree_node_cachep, | 152 | node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
154 | set_migrateflags(gfp_mask, __GFP_RECLAIMABLE)); | ||
155 | if (node == NULL) | 153 | if (node == NULL) |
156 | goto out; | 154 | goto out; |
157 | preempt_disable(); | 155 | preempt_disable(); |
@@ -1098,7 +1096,8 @@ void __init radix_tree_init(void) | |||
1098 | { | 1096 | { |
1099 | radix_tree_node_cachep = kmem_cache_create("radix_tree_node", | 1097 | radix_tree_node_cachep = kmem_cache_create("radix_tree_node", |
1100 | sizeof(struct radix_tree_node), 0, | 1098 | sizeof(struct radix_tree_node), 0, |
1101 | SLAB_PANIC, radix_tree_node_ctor); | 1099 | SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, |
1100 | radix_tree_node_ctor); | ||
1102 | radix_tree_init_maxindex(); | 1101 | radix_tree_init_maxindex(); |
1103 | hotcpu_notifier(radix_tree_callback, 0); | 1102 | hotcpu_notifier(radix_tree_callback, 0); |
1104 | } | 1103 | } |
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c index 3ea2db94d5b0..06d04cfa9339 100644 --- a/lib/reed_solomon/reed_solomon.c +++ b/lib/reed_solomon/reed_solomon.c | |||
@@ -45,7 +45,6 @@ | |||
45 | #include <linux/rslib.h> | 45 | #include <linux/rslib.h> |
46 | #include <linux/slab.h> | 46 | #include <linux/slab.h> |
47 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
48 | #include <asm/semaphore.h> | ||
49 | 48 | ||
50 | /* This list holds all currently allocated rs control structures */ | 49 | /* This list holds all currently allocated rs control structures */ |
51 | static LIST_HEAD (rslist); | 50 | static LIST_HEAD (rslist); |
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index acca4901046c..b80c21100d78 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
@@ -8,6 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/scatterlist.h> | 10 | #include <linux/scatterlist.h> |
11 | #include <linux/highmem.h> | ||
11 | 12 | ||
12 | /** | 13 | /** |
13 | * sg_next - return the next scatterlist entry in a list | 14 | * sg_next - return the next scatterlist entry in a list |
@@ -292,3 +293,104 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) | |||
292 | return ret; | 293 | return ret; |
293 | } | 294 | } |
294 | EXPORT_SYMBOL(sg_alloc_table); | 295 | EXPORT_SYMBOL(sg_alloc_table); |
296 | |||
297 | /** | ||
298 | * sg_copy_buffer - Copy data between a linear buffer and an SG list | ||
299 | * @sgl: The SG list | ||
300 | * @nents: Number of SG entries | ||
301 | * @buf: Where to copy from | ||
302 | * @buflen: The number of bytes to copy | ||
303 | * @to_buffer: transfer direction (non zero == from an sg list to a | ||
304 | * buffer, 0 == from a buffer to an sg list | ||
305 | * | ||
306 | * Returns the number of copied bytes. | ||
307 | * | ||
308 | **/ | ||
309 | static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, | ||
310 | void *buf, size_t buflen, int to_buffer) | ||
311 | { | ||
312 | struct scatterlist *sg; | ||
313 | size_t buf_off = 0; | ||
314 | int i; | ||
315 | |||
316 | WARN_ON(!irqs_disabled()); | ||
317 | |||
318 | for_each_sg(sgl, sg, nents, i) { | ||
319 | struct page *page; | ||
320 | int n = 0; | ||
321 | unsigned int sg_off = sg->offset; | ||
322 | unsigned int sg_copy = sg->length; | ||
323 | |||
324 | if (sg_copy > buflen) | ||
325 | sg_copy = buflen; | ||
326 | buflen -= sg_copy; | ||
327 | |||
328 | while (sg_copy > 0) { | ||
329 | unsigned int page_copy; | ||
330 | void *p; | ||
331 | |||
332 | page_copy = PAGE_SIZE - sg_off; | ||
333 | if (page_copy > sg_copy) | ||
334 | page_copy = sg_copy; | ||
335 | |||
336 | page = nth_page(sg_page(sg), n); | ||
337 | p = kmap_atomic(page, KM_BIO_SRC_IRQ); | ||
338 | |||
339 | if (to_buffer) | ||
340 | memcpy(buf + buf_off, p + sg_off, page_copy); | ||
341 | else { | ||
342 | memcpy(p + sg_off, buf + buf_off, page_copy); | ||
343 | flush_kernel_dcache_page(page); | ||
344 | } | ||
345 | |||
346 | kunmap_atomic(p, KM_BIO_SRC_IRQ); | ||
347 | |||
348 | buf_off += page_copy; | ||
349 | sg_off += page_copy; | ||
350 | if (sg_off == PAGE_SIZE) { | ||
351 | sg_off = 0; | ||
352 | n++; | ||
353 | } | ||
354 | sg_copy -= page_copy; | ||
355 | } | ||
356 | |||
357 | if (!buflen) | ||
358 | break; | ||
359 | } | ||
360 | |||
361 | return buf_off; | ||
362 | } | ||
363 | |||
364 | /** | ||
365 | * sg_copy_from_buffer - Copy from a linear buffer to an SG list | ||
366 | * @sgl: The SG list | ||
367 | * @nents: Number of SG entries | ||
368 | * @buf: Where to copy from | ||
369 | * @buflen: The number of bytes to copy | ||
370 | * | ||
371 | * Returns the number of copied bytes. | ||
372 | * | ||
373 | **/ | ||
374 | size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, | ||
375 | void *buf, size_t buflen) | ||
376 | { | ||
377 | return sg_copy_buffer(sgl, nents, buf, buflen, 0); | ||
378 | } | ||
379 | EXPORT_SYMBOL(sg_copy_from_buffer); | ||
380 | |||
381 | /** | ||
382 | * sg_copy_to_buffer - Copy from an SG list to a linear buffer | ||
383 | * @sgl: The SG list | ||
384 | * @nents: Number of SG entries | ||
385 | * @buf: Where to copy to | ||
386 | * @buflen: The number of bytes to copy | ||
387 | * | ||
388 | * Returns the number of copied bytes. | ||
389 | * | ||
390 | **/ | ||
391 | size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, | ||
392 | void *buf, size_t buflen) | ||
393 | { | ||
394 | return sg_copy_buffer(sgl, nents, buf, buflen, 1); | ||
395 | } | ||
396 | EXPORT_SYMBOL(sg_copy_to_buffer); | ||
diff --git a/lib/semaphore-sleepers.c b/lib/semaphore-sleepers.c deleted file mode 100644 index 0198782cdacb..000000000000 --- a/lib/semaphore-sleepers.c +++ /dev/null | |||
@@ -1,176 +0,0 @@ | |||
1 | /* | ||
2 | * i386 and x86-64 semaphore implementation. | ||
3 | * | ||
4 | * (C) Copyright 1999 Linus Torvalds | ||
5 | * | ||
6 | * Portions Copyright 1999 Red Hat, Inc. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version | ||
11 | * 2 of the License, or (at your option) any later version. | ||
12 | * | ||
13 | * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org> | ||
14 | */ | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/err.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <asm/semaphore.h> | ||
19 | |||
20 | /* | ||
21 | * Semaphores are implemented using a two-way counter: | ||
22 | * The "count" variable is decremented for each process | ||
23 | * that tries to acquire the semaphore, while the "sleeping" | ||
24 | * variable is a count of such acquires. | ||
25 | * | ||
26 | * Notably, the inline "up()" and "down()" functions can | ||
27 | * efficiently test if they need to do any extra work (up | ||
28 | * needs to do something only if count was negative before | ||
29 | * the increment operation. | ||
30 | * | ||
31 | * "sleeping" and the contention routine ordering is protected | ||
32 | * by the spinlock in the semaphore's waitqueue head. | ||
33 | * | ||
34 | * Note that these functions are only called when there is | ||
35 | * contention on the lock, and as such all this is the | ||
36 | * "non-critical" part of the whole semaphore business. The | ||
37 | * critical part is the inline stuff in <asm/semaphore.h> | ||
38 | * where we want to avoid any extra jumps and calls. | ||
39 | */ | ||
40 | |||
41 | /* | ||
42 | * Logic: | ||
43 | * - only on a boundary condition do we need to care. When we go | ||
44 | * from a negative count to a non-negative, we wake people up. | ||
45 | * - when we go from a non-negative count to a negative do we | ||
46 | * (a) synchronize with the "sleeper" count and (b) make sure | ||
47 | * that we're on the wakeup list before we synchronize so that | ||
48 | * we cannot lose wakeup events. | ||
49 | */ | ||
50 | |||
51 | void __up(struct semaphore *sem) | ||
52 | { | ||
53 | wake_up(&sem->wait); | ||
54 | } | ||
55 | |||
56 | void __sched __down(struct semaphore *sem) | ||
57 | { | ||
58 | struct task_struct *tsk = current; | ||
59 | DECLARE_WAITQUEUE(wait, tsk); | ||
60 | unsigned long flags; | ||
61 | |||
62 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
63 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
64 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
65 | |||
66 | sem->sleepers++; | ||
67 | for (;;) { | ||
68 | int sleepers = sem->sleepers; | ||
69 | |||
70 | /* | ||
71 | * Add "everybody else" into it. They aren't | ||
72 | * playing, because we own the spinlock in | ||
73 | * the wait_queue_head. | ||
74 | */ | ||
75 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
76 | sem->sleepers = 0; | ||
77 | break; | ||
78 | } | ||
79 | sem->sleepers = 1; /* us - see -1 above */ | ||
80 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
81 | |||
82 | schedule(); | ||
83 | |||
84 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
85 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
86 | } | ||
87 | remove_wait_queue_locked(&sem->wait, &wait); | ||
88 | wake_up_locked(&sem->wait); | ||
89 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
90 | tsk->state = TASK_RUNNING; | ||
91 | } | ||
92 | |||
93 | int __sched __down_interruptible(struct semaphore *sem) | ||
94 | { | ||
95 | int retval = 0; | ||
96 | struct task_struct *tsk = current; | ||
97 | DECLARE_WAITQUEUE(wait, tsk); | ||
98 | unsigned long flags; | ||
99 | |||
100 | tsk->state = TASK_INTERRUPTIBLE; | ||
101 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
102 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
103 | |||
104 | sem->sleepers++; | ||
105 | for (;;) { | ||
106 | int sleepers = sem->sleepers; | ||
107 | |||
108 | /* | ||
109 | * With signals pending, this turns into | ||
110 | * the trylock failure case - we won't be | ||
111 | * sleeping, and we* can't get the lock as | ||
112 | * it has contention. Just correct the count | ||
113 | * and exit. | ||
114 | */ | ||
115 | if (signal_pending(current)) { | ||
116 | retval = -EINTR; | ||
117 | sem->sleepers = 0; | ||
118 | atomic_add(sleepers, &sem->count); | ||
119 | break; | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * Add "everybody else" into it. They aren't | ||
124 | * playing, because we own the spinlock in | ||
125 | * wait_queue_head. The "-1" is because we're | ||
126 | * still hoping to get the semaphore. | ||
127 | */ | ||
128 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
129 | sem->sleepers = 0; | ||
130 | break; | ||
131 | } | ||
132 | sem->sleepers = 1; /* us - see -1 above */ | ||
133 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
134 | |||
135 | schedule(); | ||
136 | |||
137 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
138 | tsk->state = TASK_INTERRUPTIBLE; | ||
139 | } | ||
140 | remove_wait_queue_locked(&sem->wait, &wait); | ||
141 | wake_up_locked(&sem->wait); | ||
142 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
143 | |||
144 | tsk->state = TASK_RUNNING; | ||
145 | return retval; | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * Trylock failed - make sure we correct for | ||
150 | * having decremented the count. | ||
151 | * | ||
152 | * We could have done the trylock with a | ||
153 | * single "cmpxchg" without failure cases, | ||
154 | * but then it wouldn't work on a 386. | ||
155 | */ | ||
156 | int __down_trylock(struct semaphore *sem) | ||
157 | { | ||
158 | int sleepers; | ||
159 | unsigned long flags; | ||
160 | |||
161 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
162 | sleepers = sem->sleepers + 1; | ||
163 | sem->sleepers = 0; | ||
164 | |||
165 | /* | ||
166 | * Add "everybody else" and us into it. They aren't | ||
167 | * playing, because we own the spinlock in the | ||
168 | * wait_queue_head. | ||
169 | */ | ||
170 | if (!atomic_add_negative(sleepers, &sem->count)) { | ||
171 | wake_up_locked(&sem->wait); | ||
172 | } | ||
173 | |||
174 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
175 | return 1; | ||
176 | } | ||