diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 9 | ||||
-rw-r--r-- | lib/Kconfig.debug | 66 | ||||
-rw-r--r-- | lib/Kconfig.kgdb | 58 | ||||
-rw-r--r-- | lib/Makefile | 8 | ||||
-rw-r--r-- | lib/bitmap.c | 174 | ||||
-rw-r--r-- | lib/debugobjects.c | 890 | ||||
-rw-r--r-- | lib/devres.c | 4 | ||||
-rw-r--r-- | lib/div64.c | 35 | ||||
-rw-r--r-- | lib/find_next_bit.c | 69 | ||||
-rw-r--r-- | lib/idr.c | 12 | ||||
-rw-r--r-- | lib/inflate.c | 3 | ||||
-rw-r--r-- | lib/iomap.c | 2 | ||||
-rw-r--r-- | lib/kernel_lock.c | 1 | ||||
-rw-r--r-- | lib/klist.c | 235 | ||||
-rw-r--r-- | lib/kobject.c | 63 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 16 | ||||
-rw-r--r-- | lib/lmb.c | 509 | ||||
-rw-r--r-- | lib/pcounter.c | 58 | ||||
-rw-r--r-- | lib/percpu_counter.c | 1 | ||||
-rw-r--r-- | lib/proportions.c | 38 | ||||
-rw-r--r-- | lib/radix-tree.c | 9 | ||||
-rw-r--r-- | lib/ratelimit.c | 51 | ||||
-rw-r--r-- | lib/reed_solomon/reed_solomon.c | 1 | ||||
-rw-r--r-- | lib/scatterlist.c | 102 | ||||
-rw-r--r-- | lib/semaphore-sleepers.c | 176 | ||||
-rw-r--r-- | lib/string.c | 27 | ||||
-rw-r--r-- | lib/swiotlb.c | 149 |
27 files changed, 2253 insertions, 513 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index ba3d104994d9..8cc8e8722a3f 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -7,6 +7,12 @@ menu "Library routines" | |||
7 | config BITREVERSE | 7 | config BITREVERSE |
8 | tristate | 8 | tristate |
9 | 9 | ||
10 | config GENERIC_FIND_FIRST_BIT | ||
11 | def_bool n | ||
12 | |||
13 | config GENERIC_FIND_NEXT_BIT | ||
14 | def_bool n | ||
15 | |||
10 | config CRC_CCITT | 16 | config CRC_CCITT |
11 | tristate "CRC-CCITT functions" | 17 | tristate "CRC-CCITT functions" |
12 | help | 18 | help |
@@ -141,4 +147,7 @@ config HAS_DMA | |||
141 | config CHECK_SIGNATURE | 147 | config CHECK_SIGNATURE |
142 | bool | 148 | bool |
143 | 149 | ||
150 | config HAVE_LMB | ||
151 | boolean | ||
152 | |||
144 | endmenu | 153 | endmenu |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 78955eb6bd94..d2099f41aa1e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -25,6 +25,17 @@ config ENABLE_MUST_CHECK | |||
25 | suppress the "warning: ignoring return value of 'foo', declared with | 25 | suppress the "warning: ignoring return value of 'foo', declared with |
26 | attribute warn_unused_result" messages. | 26 | attribute warn_unused_result" messages. |
27 | 27 | ||
28 | config FRAME_WARN | ||
29 | int "Warn for stack frames larger than (needs gcc 4.4)" | ||
30 | range 0 8192 | ||
31 | default 1024 if !64BIT | ||
32 | default 2048 if 64BIT | ||
33 | help | ||
34 | Tell gcc to warn at build time for stack frames larger than this. | ||
35 | Setting this too low will cause a lot of warnings. | ||
36 | Setting it to 0 disables the warning. | ||
37 | Requires gcc 4.4 | ||
38 | |||
28 | config MAGIC_SYSRQ | 39 | config MAGIC_SYSRQ |
29 | bool "Magic SysRq key" | 40 | bool "Magic SysRq key" |
30 | depends on !UML | 41 | depends on !UML |
@@ -183,6 +194,37 @@ config TIMER_STATS | |||
183 | (it defaults to deactivated on bootup and will only be activated | 194 | (it defaults to deactivated on bootup and will only be activated |
184 | if some application like powertop activates it explicitly). | 195 | if some application like powertop activates it explicitly). |
185 | 196 | ||
197 | config DEBUG_OBJECTS | ||
198 | bool "Debug object operations" | ||
199 | depends on DEBUG_KERNEL | ||
200 | help | ||
201 | If you say Y here, additional code will be inserted into the | ||
202 | kernel to track the life time of various objects and validate | ||
203 | the operations on those objects. | ||
204 | |||
205 | config DEBUG_OBJECTS_SELFTEST | ||
206 | bool "Debug objects selftest" | ||
207 | depends on DEBUG_OBJECTS | ||
208 | help | ||
209 | This enables the selftest of the object debug code. | ||
210 | |||
211 | config DEBUG_OBJECTS_FREE | ||
212 | bool "Debug objects in freed memory" | ||
213 | depends on DEBUG_OBJECTS | ||
214 | help | ||
215 | This enables checks whether a k/v free operation frees an area | ||
216 | which contains an object which has not been deactivated | ||
217 | properly. This can make kmalloc/kfree-intensive workloads | ||
218 | much slower. | ||
219 | |||
220 | config DEBUG_OBJECTS_TIMERS | ||
221 | bool "Debug timer objects" | ||
222 | depends on DEBUG_OBJECTS | ||
223 | help | ||
224 | If you say Y here, additional code will be inserted into the | ||
225 | timer routines to track the life time of timer objects and | ||
226 | validate the timer operations. | ||
227 | |||
186 | config DEBUG_SLAB | 228 | config DEBUG_SLAB |
187 | bool "Debug slab memory allocations" | 229 | bool "Debug slab memory allocations" |
188 | depends on DEBUG_KERNEL && SLAB | 230 | depends on DEBUG_KERNEL && SLAB |
@@ -211,7 +253,7 @@ config SLUB_DEBUG_ON | |||
211 | config SLUB_STATS | 253 | config SLUB_STATS |
212 | default n | 254 | default n |
213 | bool "Enable SLUB performance statistics" | 255 | bool "Enable SLUB performance statistics" |
214 | depends on SLUB | 256 | depends on SLUB && SLUB_DEBUG && SYSFS |
215 | help | 257 | help |
216 | SLUB statistics are useful to debug SLUBs allocation behavior in | 258 | SLUB statistics are useful to debug SLUBs allocation behavior in |
217 | order find ways to optimize the allocator. This should never be | 259 | order find ways to optimize the allocator. This should never be |
@@ -265,16 +307,6 @@ config DEBUG_MUTEXES | |||
265 | This feature allows mutex semantics violations to be detected and | 307 | This feature allows mutex semantics violations to be detected and |
266 | reported. | 308 | reported. |
267 | 309 | ||
268 | config DEBUG_SEMAPHORE | ||
269 | bool "Semaphore debugging" | ||
270 | depends on DEBUG_KERNEL | ||
271 | depends on ALPHA || FRV | ||
272 | default n | ||
273 | help | ||
274 | If you say Y here then semaphore processing will issue lots of | ||
275 | verbose debugging messages. If you suspect a semaphore problem or a | ||
276 | kernel hacker asks for this option then say Y. Otherwise say N. | ||
277 | |||
278 | config DEBUG_LOCK_ALLOC | 310 | config DEBUG_LOCK_ALLOC |
279 | bool "Lock debugging: detect incorrect freeing of live locks" | 311 | bool "Lock debugging: detect incorrect freeing of live locks" |
280 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 312 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
@@ -437,6 +469,16 @@ config DEBUG_VM | |||
437 | 469 | ||
438 | If unsure, say N. | 470 | If unsure, say N. |
439 | 471 | ||
472 | config DEBUG_WRITECOUNT | ||
473 | bool "Debug filesystem writers count" | ||
474 | depends on DEBUG_KERNEL | ||
475 | help | ||
476 | Enable this to catch wrong use of the writers count in struct | ||
477 | vfsmount. This will increase the size of each file struct by | ||
478 | 32 bits. | ||
479 | |||
480 | If unsure, say N. | ||
481 | |||
440 | config DEBUG_LIST | 482 | config DEBUG_LIST |
441 | bool "Debug linked list manipulation" | 483 | bool "Debug linked list manipulation" |
442 | depends on DEBUG_KERNEL | 484 | depends on DEBUG_KERNEL |
@@ -633,3 +675,5 @@ config FIREWIRE_OHCI_REMOTE_DMA | |||
633 | If unsure, say N. | 675 | If unsure, say N. |
634 | 676 | ||
635 | source "samples/Kconfig" | 677 | source "samples/Kconfig" |
678 | |||
679 | source "lib/Kconfig.kgdb" | ||
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb new file mode 100644 index 000000000000..f2e01ac5ab09 --- /dev/null +++ b/lib/Kconfig.kgdb | |||
@@ -0,0 +1,58 @@ | |||
1 | |||
2 | menuconfig KGDB | ||
3 | bool "KGDB: kernel debugging with remote gdb" | ||
4 | select FRAME_POINTER | ||
5 | depends on HAVE_ARCH_KGDB | ||
6 | depends on DEBUG_KERNEL && EXPERIMENTAL | ||
7 | help | ||
8 | If you say Y here, it will be possible to remotely debug the | ||
9 | kernel using gdb. Documentation of kernel debugger is available | ||
10 | at http://kgdb.sourceforge.net as well as in DocBook form | ||
11 | in Documentation/DocBook/. If unsure, say N. | ||
12 | |||
13 | config HAVE_ARCH_KGDB_SHADOW_INFO | ||
14 | bool | ||
15 | |||
16 | config HAVE_ARCH_KGDB | ||
17 | bool | ||
18 | |||
19 | config KGDB_SERIAL_CONSOLE | ||
20 | tristate "KGDB: use kgdb over the serial console" | ||
21 | depends on KGDB | ||
22 | select CONSOLE_POLL | ||
23 | select MAGIC_SYSRQ | ||
24 | default y | ||
25 | help | ||
26 | Share a serial console with kgdb. Sysrq-g must be used | ||
27 | to break in initially. | ||
28 | |||
29 | config KGDB_TESTS | ||
30 | bool "KGDB: internal test suite" | ||
31 | depends on KGDB | ||
32 | default n | ||
33 | help | ||
34 | This is a kgdb I/O module specifically designed to test | ||
35 | kgdb's internal functions. This kgdb I/O module is | ||
36 | intended to for the development of new kgdb stubs | ||
37 | as well as regression testing the kgdb internals. | ||
38 | See the drivers/misc/kgdbts.c for the details about | ||
39 | the tests. The most basic of this I/O module is to boot | ||
40 | a kernel boot arguments "kgdbwait kgdbts=V1F100" | ||
41 | |||
42 | config KGDB_TESTS_ON_BOOT | ||
43 | bool "KGDB: Run tests on boot" | ||
44 | depends on KGDB_TESTS | ||
45 | default n | ||
46 | help | ||
47 | Run the kgdb tests on boot up automatically without the need | ||
48 | to pass in a kernel parameter | ||
49 | |||
50 | config KGDB_TESTS_BOOT_STRING | ||
51 | string "KGDB: which internal kgdb tests to run" | ||
52 | depends on KGDB_TESTS_ON_BOOT | ||
53 | default "V1F100" | ||
54 | help | ||
55 | This is the command string to send the kgdb test suite on | ||
56 | boot. See the drivers/misc/kgdbts.c for detailed | ||
57 | information about other strings you could use beyond the | ||
58 | default of V1F100. | ||
diff --git a/lib/Makefile b/lib/Makefile index 23de261a4c83..74b0cfb1fcc3 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -6,7 +6,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ | |||
6 | rbtree.o radix-tree.o dump_stack.o \ | 6 | rbtree.o radix-tree.o dump_stack.o \ |
7 | idr.o int_sqrt.o extable.o prio_tree.o \ | 7 | idr.o int_sqrt.o extable.o prio_tree.o \ |
8 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ | 8 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ |
9 | proportions.o prio_heap.o | 9 | proportions.o prio_heap.o ratelimit.o |
10 | 10 | ||
11 | lib-$(CONFIG_MMU) += ioremap.o | 11 | lib-$(CONFIG_MMU) += ioremap.o |
12 | lib-$(CONFIG_SMP) += cpumask.o | 12 | lib-$(CONFIG_SMP) += cpumask.o |
@@ -29,13 +29,14 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o | |||
29 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o | 29 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o |
30 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o | 30 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o |
31 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o | 31 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o |
32 | lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o | 32 | lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o |
33 | lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o | 33 | lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o |
34 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o | 34 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o |
35 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o | 35 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o |
36 | obj-$(CONFIG_PLIST) += plist.o | 36 | obj-$(CONFIG_PLIST) += plist.o |
37 | obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o | 37 | obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o |
38 | obj-$(CONFIG_DEBUG_LIST) += list_debug.o | 38 | obj-$(CONFIG_DEBUG_LIST) += list_debug.o |
39 | obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o | ||
39 | 40 | ||
40 | ifneq ($(CONFIG_HAVE_DEC_LOCK),y) | 41 | ifneq ($(CONFIG_HAVE_DEC_LOCK),y) |
41 | lib-y += dec_and_lock.o | 42 | lib-y += dec_and_lock.o |
@@ -61,7 +62,6 @@ obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o | |||
61 | obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o | 62 | obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o |
62 | obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o | 63 | obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o |
63 | obj-$(CONFIG_SMP) += percpu_counter.o | 64 | obj-$(CONFIG_SMP) += percpu_counter.o |
64 | obj-$(CONFIG_SMP) += pcounter.o | ||
65 | obj-$(CONFIG_AUDIT_GENERIC) += audit.o | 65 | obj-$(CONFIG_AUDIT_GENERIC) += audit.o |
66 | 66 | ||
67 | obj-$(CONFIG_SWIOTLB) += swiotlb.o | 67 | obj-$(CONFIG_SWIOTLB) += swiotlb.o |
@@ -70,6 +70,8 @@ obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o | |||
70 | 70 | ||
71 | lib-$(CONFIG_GENERIC_BUG) += bug.o | 71 | lib-$(CONFIG_GENERIC_BUG) += bug.o |
72 | 72 | ||
73 | obj-$(CONFIG_HAVE_LMB) += lmb.o | ||
74 | |||
73 | hostprogs-y := gen_crc32table | 75 | hostprogs-y := gen_crc32table |
74 | clean-files := crc32table.h | 76 | clean-files := crc32table.h |
75 | 77 | ||
diff --git a/lib/bitmap.c b/lib/bitmap.c index 2c9242e3fed0..c4cb48f77f0c 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
@@ -316,6 +316,22 @@ int bitmap_scnprintf(char *buf, unsigned int buflen, | |||
316 | EXPORT_SYMBOL(bitmap_scnprintf); | 316 | EXPORT_SYMBOL(bitmap_scnprintf); |
317 | 317 | ||
318 | /** | 318 | /** |
319 | * bitmap_scnprintf_len - return buffer length needed to convert | ||
320 | * bitmap to an ASCII hex string. | ||
321 | * @len: number of bits to be converted | ||
322 | */ | ||
323 | int bitmap_scnprintf_len(unsigned int len) | ||
324 | { | ||
325 | /* we need 9 chars per word for 32 bit words (8 hexdigits + sep/null) */ | ||
326 | int bitslen = ALIGN(len, CHUNKSZ); | ||
327 | int wordlen = CHUNKSZ / 4; | ||
328 | int buflen = (bitslen / wordlen) * (wordlen + 1) * sizeof(char); | ||
329 | |||
330 | return buflen; | ||
331 | } | ||
332 | EXPORT_SYMBOL(bitmap_scnprintf_len); | ||
333 | |||
334 | /** | ||
319 | * __bitmap_parse - convert an ASCII hex string into a bitmap. | 335 | * __bitmap_parse - convert an ASCII hex string into a bitmap. |
320 | * @buf: pointer to buffer containing string. | 336 | * @buf: pointer to buffer containing string. |
321 | * @buflen: buffer size in bytes. If string is smaller than this | 337 | * @buflen: buffer size in bytes. If string is smaller than this |
@@ -698,6 +714,164 @@ int bitmap_bitremap(int oldbit, const unsigned long *old, | |||
698 | } | 714 | } |
699 | EXPORT_SYMBOL(bitmap_bitremap); | 715 | EXPORT_SYMBOL(bitmap_bitremap); |
700 | 716 | ||
717 | /** | ||
718 | * bitmap_onto - translate one bitmap relative to another | ||
719 | * @dst: resulting translated bitmap | ||
720 | * @orig: original untranslated bitmap | ||
721 | * @relmap: bitmap relative to which translated | ||
722 | * @bits: number of bits in each of these bitmaps | ||
723 | * | ||
724 | * Set the n-th bit of @dst iff there exists some m such that the | ||
725 | * n-th bit of @relmap is set, the m-th bit of @orig is set, and | ||
726 | * the n-th bit of @relmap is also the m-th _set_ bit of @relmap. | ||
727 | * (If you understood the previous sentence the first time your | ||
728 | * read it, you're overqualified for your current job.) | ||
729 | * | ||
730 | * In other words, @orig is mapped onto (surjectively) @dst, | ||
731 | * using the the map { <n, m> | the n-th bit of @relmap is the | ||
732 | * m-th set bit of @relmap }. | ||
733 | * | ||
734 | * Any set bits in @orig above bit number W, where W is the | ||
735 | * weight of (number of set bits in) @relmap are mapped nowhere. | ||
736 | * In particular, if for all bits m set in @orig, m >= W, then | ||
737 | * @dst will end up empty. In situations where the possibility | ||
738 | * of such an empty result is not desired, one way to avoid it is | ||
739 | * to use the bitmap_fold() operator, below, to first fold the | ||
740 | * @orig bitmap over itself so that all its set bits x are in the | ||
741 | * range 0 <= x < W. The bitmap_fold() operator does this by | ||
742 | * setting the bit (m % W) in @dst, for each bit (m) set in @orig. | ||
743 | * | ||
744 | * Example [1] for bitmap_onto(): | ||
745 | * Let's say @relmap has bits 30-39 set, and @orig has bits | ||
746 | * 1, 3, 5, 7, 9 and 11 set. Then on return from this routine, | ||
747 | * @dst will have bits 31, 33, 35, 37 and 39 set. | ||
748 | * | ||
749 | * When bit 0 is set in @orig, it means turn on the bit in | ||
750 | * @dst corresponding to whatever is the first bit (if any) | ||
751 | * that is turned on in @relmap. Since bit 0 was off in the | ||
752 | * above example, we leave off that bit (bit 30) in @dst. | ||
753 | * | ||
754 | * When bit 1 is set in @orig (as in the above example), it | ||
755 | * means turn on the bit in @dst corresponding to whatever | ||
756 | * is the second bit that is turned on in @relmap. The second | ||
757 | * bit in @relmap that was turned on in the above example was | ||
758 | * bit 31, so we turned on bit 31 in @dst. | ||
759 | * | ||
760 | * Similarly, we turned on bits 33, 35, 37 and 39 in @dst, | ||
761 | * because they were the 4th, 6th, 8th and 10th set bits | ||
762 | * set in @relmap, and the 4th, 6th, 8th and 10th bits of | ||
763 | * @orig (i.e. bits 3, 5, 7 and 9) were also set. | ||
764 | * | ||
765 | * When bit 11 is set in @orig, it means turn on the bit in | ||
766 | * @dst corresponding to whatever is the twelth bit that is | ||
767 | * turned on in @relmap. In the above example, there were | ||
768 | * only ten bits turned on in @relmap (30..39), so that bit | ||
769 | * 11 was set in @orig had no affect on @dst. | ||
770 | * | ||
771 | * Example [2] for bitmap_fold() + bitmap_onto(): | ||
772 | * Let's say @relmap has these ten bits set: | ||
773 | * 40 41 42 43 45 48 53 61 74 95 | ||
774 | * (for the curious, that's 40 plus the first ten terms of the | ||
775 | * Fibonacci sequence.) | ||
776 | * | ||
777 | * Further lets say we use the following code, invoking | ||
778 | * bitmap_fold() then bitmap_onto, as suggested above to | ||
779 | * avoid the possitility of an empty @dst result: | ||
780 | * | ||
781 | * unsigned long *tmp; // a temporary bitmap's bits | ||
782 | * | ||
783 | * bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits); | ||
784 | * bitmap_onto(dst, tmp, relmap, bits); | ||
785 | * | ||
786 | * Then this table shows what various values of @dst would be, for | ||
787 | * various @orig's. I list the zero-based positions of each set bit. | ||
788 | * The tmp column shows the intermediate result, as computed by | ||
789 | * using bitmap_fold() to fold the @orig bitmap modulo ten | ||
790 | * (the weight of @relmap). | ||
791 | * | ||
792 | * @orig tmp @dst | ||
793 | * 0 0 40 | ||
794 | * 1 1 41 | ||
795 | * 9 9 95 | ||
796 | * 10 0 40 (*) | ||
797 | * 1 3 5 7 1 3 5 7 41 43 48 61 | ||
798 | * 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45 | ||
799 | * 0 9 18 27 0 9 8 7 40 61 74 95 | ||
800 | * 0 10 20 30 0 40 | ||
801 | * 0 11 22 33 0 1 2 3 40 41 42 43 | ||
802 | * 0 12 24 36 0 2 4 6 40 42 45 53 | ||
803 | * 78 102 211 1 2 8 41 42 74 (*) | ||
804 | * | ||
805 | * (*) For these marked lines, if we hadn't first done bitmap_fold() | ||
806 | * into tmp, then the @dst result would have been empty. | ||
807 | * | ||
808 | * If either of @orig or @relmap is empty (no set bits), then @dst | ||
809 | * will be returned empty. | ||
810 | * | ||
811 | * If (as explained above) the only set bits in @orig are in positions | ||
812 | * m where m >= W, (where W is the weight of @relmap) then @dst will | ||
813 | * once again be returned empty. | ||
814 | * | ||
815 | * All bits in @dst not set by the above rule are cleared. | ||
816 | */ | ||
817 | void bitmap_onto(unsigned long *dst, const unsigned long *orig, | ||
818 | const unsigned long *relmap, int bits) | ||
819 | { | ||
820 | int n, m; /* same meaning as in above comment */ | ||
821 | |||
822 | if (dst == orig) /* following doesn't handle inplace mappings */ | ||
823 | return; | ||
824 | bitmap_zero(dst, bits); | ||
825 | |||
826 | /* | ||
827 | * The following code is a more efficient, but less | ||
828 | * obvious, equivalent to the loop: | ||
829 | * for (m = 0; m < bitmap_weight(relmap, bits); m++) { | ||
830 | * n = bitmap_ord_to_pos(orig, m, bits); | ||
831 | * if (test_bit(m, orig)) | ||
832 | * set_bit(n, dst); | ||
833 | * } | ||
834 | */ | ||
835 | |||
836 | m = 0; | ||
837 | for (n = find_first_bit(relmap, bits); | ||
838 | n < bits; | ||
839 | n = find_next_bit(relmap, bits, n + 1)) { | ||
840 | /* m == bitmap_pos_to_ord(relmap, n, bits) */ | ||
841 | if (test_bit(m, orig)) | ||
842 | set_bit(n, dst); | ||
843 | m++; | ||
844 | } | ||
845 | } | ||
846 | EXPORT_SYMBOL(bitmap_onto); | ||
847 | |||
848 | /** | ||
849 | * bitmap_fold - fold larger bitmap into smaller, modulo specified size | ||
850 | * @dst: resulting smaller bitmap | ||
851 | * @orig: original larger bitmap | ||
852 | * @sz: specified size | ||
853 | * @bits: number of bits in each of these bitmaps | ||
854 | * | ||
855 | * For each bit oldbit in @orig, set bit oldbit mod @sz in @dst. | ||
856 | * Clear all other bits in @dst. See further the comment and | ||
857 | * Example [2] for bitmap_onto() for why and how to use this. | ||
858 | */ | ||
859 | void bitmap_fold(unsigned long *dst, const unsigned long *orig, | ||
860 | int sz, int bits) | ||
861 | { | ||
862 | int oldbit; | ||
863 | |||
864 | if (dst == orig) /* following doesn't handle inplace mappings */ | ||
865 | return; | ||
866 | bitmap_zero(dst, bits); | ||
867 | |||
868 | for (oldbit = find_first_bit(orig, bits); | ||
869 | oldbit < bits; | ||
870 | oldbit = find_next_bit(orig, bits, oldbit + 1)) | ||
871 | set_bit(oldbit % sz, dst); | ||
872 | } | ||
873 | EXPORT_SYMBOL(bitmap_fold); | ||
874 | |||
701 | /* | 875 | /* |
702 | * Common code for bitmap_*_region() routines. | 876 | * Common code for bitmap_*_region() routines. |
703 | * bitmap: array of unsigned longs corresponding to the bitmap | 877 | * bitmap: array of unsigned longs corresponding to the bitmap |
diff --git a/lib/debugobjects.c b/lib/debugobjects.c new file mode 100644 index 000000000000..a76a5e122ae1 --- /dev/null +++ b/lib/debugobjects.c | |||
@@ -0,0 +1,890 @@ | |||
1 | /* | ||
2 | * Generic infrastructure for lifetime debugging of objects. | ||
3 | * | ||
4 | * Started by Thomas Gleixner | ||
5 | * | ||
6 | * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> | ||
7 | * | ||
8 | * For licencing details see kernel-base/COPYING | ||
9 | */ | ||
10 | #include <linux/debugobjects.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/seq_file.h> | ||
13 | #include <linux/debugfs.h> | ||
14 | #include <linux/hash.h> | ||
15 | |||
16 | #define ODEBUG_HASH_BITS 14 | ||
17 | #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) | ||
18 | |||
19 | #define ODEBUG_POOL_SIZE 512 | ||
20 | #define ODEBUG_POOL_MIN_LEVEL 256 | ||
21 | |||
22 | #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT | ||
23 | #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) | ||
24 | #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) | ||
25 | |||
26 | struct debug_bucket { | ||
27 | struct hlist_head list; | ||
28 | spinlock_t lock; | ||
29 | }; | ||
30 | |||
31 | static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; | ||
32 | |||
33 | static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE]; | ||
34 | |||
35 | static DEFINE_SPINLOCK(pool_lock); | ||
36 | |||
37 | static HLIST_HEAD(obj_pool); | ||
38 | |||
39 | static int obj_pool_min_free = ODEBUG_POOL_SIZE; | ||
40 | static int obj_pool_free = ODEBUG_POOL_SIZE; | ||
41 | static int obj_pool_used; | ||
42 | static int obj_pool_max_used; | ||
43 | static struct kmem_cache *obj_cache; | ||
44 | |||
45 | static int debug_objects_maxchain __read_mostly; | ||
46 | static int debug_objects_fixups __read_mostly; | ||
47 | static int debug_objects_warnings __read_mostly; | ||
48 | static int debug_objects_enabled __read_mostly; | ||
49 | static struct debug_obj_descr *descr_test __read_mostly; | ||
50 | |||
51 | static int __init enable_object_debug(char *str) | ||
52 | { | ||
53 | debug_objects_enabled = 1; | ||
54 | return 0; | ||
55 | } | ||
56 | early_param("debug_objects", enable_object_debug); | ||
57 | |||
58 | static const char *obj_states[ODEBUG_STATE_MAX] = { | ||
59 | [ODEBUG_STATE_NONE] = "none", | ||
60 | [ODEBUG_STATE_INIT] = "initialized", | ||
61 | [ODEBUG_STATE_INACTIVE] = "inactive", | ||
62 | [ODEBUG_STATE_ACTIVE] = "active", | ||
63 | [ODEBUG_STATE_DESTROYED] = "destroyed", | ||
64 | [ODEBUG_STATE_NOTAVAILABLE] = "not available", | ||
65 | }; | ||
66 | |||
67 | static int fill_pool(void) | ||
68 | { | ||
69 | gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; | ||
70 | struct debug_obj *new; | ||
71 | |||
72 | if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) | ||
73 | return obj_pool_free; | ||
74 | |||
75 | if (unlikely(!obj_cache)) | ||
76 | return obj_pool_free; | ||
77 | |||
78 | while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { | ||
79 | |||
80 | new = kmem_cache_zalloc(obj_cache, gfp); | ||
81 | if (!new) | ||
82 | return obj_pool_free; | ||
83 | |||
84 | spin_lock(&pool_lock); | ||
85 | hlist_add_head(&new->node, &obj_pool); | ||
86 | obj_pool_free++; | ||
87 | spin_unlock(&pool_lock); | ||
88 | } | ||
89 | return obj_pool_free; | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * Lookup an object in the hash bucket. | ||
94 | */ | ||
95 | static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) | ||
96 | { | ||
97 | struct hlist_node *node; | ||
98 | struct debug_obj *obj; | ||
99 | int cnt = 0; | ||
100 | |||
101 | hlist_for_each_entry(obj, node, &b->list, node) { | ||
102 | cnt++; | ||
103 | if (obj->object == addr) | ||
104 | return obj; | ||
105 | } | ||
106 | if (cnt > debug_objects_maxchain) | ||
107 | debug_objects_maxchain = cnt; | ||
108 | |||
109 | return NULL; | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * Allocate a new object. If the pool is empty and no refill possible, | ||
114 | * switch off the debugger. | ||
115 | */ | ||
116 | static struct debug_obj * | ||
117 | alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | ||
118 | { | ||
119 | struct debug_obj *obj = NULL; | ||
120 | int retry = 0; | ||
121 | |||
122 | repeat: | ||
123 | spin_lock(&pool_lock); | ||
124 | if (obj_pool.first) { | ||
125 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); | ||
126 | |||
127 | obj->object = addr; | ||
128 | obj->descr = descr; | ||
129 | obj->state = ODEBUG_STATE_NONE; | ||
130 | hlist_del(&obj->node); | ||
131 | |||
132 | hlist_add_head(&obj->node, &b->list); | ||
133 | |||
134 | obj_pool_used++; | ||
135 | if (obj_pool_used > obj_pool_max_used) | ||
136 | obj_pool_max_used = obj_pool_used; | ||
137 | |||
138 | obj_pool_free--; | ||
139 | if (obj_pool_free < obj_pool_min_free) | ||
140 | obj_pool_min_free = obj_pool_free; | ||
141 | } | ||
142 | spin_unlock(&pool_lock); | ||
143 | |||
144 | if (fill_pool() && !obj && !retry++) | ||
145 | goto repeat; | ||
146 | |||
147 | return obj; | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * Put the object back into the pool or give it back to kmem_cache: | ||
152 | */ | ||
153 | static void free_object(struct debug_obj *obj) | ||
154 | { | ||
155 | unsigned long idx = (unsigned long)(obj - obj_static_pool); | ||
156 | |||
157 | if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { | ||
158 | spin_lock(&pool_lock); | ||
159 | hlist_add_head(&obj->node, &obj_pool); | ||
160 | obj_pool_free++; | ||
161 | obj_pool_used--; | ||
162 | spin_unlock(&pool_lock); | ||
163 | } else { | ||
164 | spin_lock(&pool_lock); | ||
165 | obj_pool_used--; | ||
166 | spin_unlock(&pool_lock); | ||
167 | kmem_cache_free(obj_cache, obj); | ||
168 | } | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * We run out of memory. That means we probably have tons of objects | ||
173 | * allocated. | ||
174 | */ | ||
175 | static void debug_objects_oom(void) | ||
176 | { | ||
177 | struct debug_bucket *db = obj_hash; | ||
178 | struct hlist_node *node, *tmp; | ||
179 | struct debug_obj *obj; | ||
180 | unsigned long flags; | ||
181 | int i; | ||
182 | |||
183 | printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); | ||
184 | |||
185 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { | ||
186 | spin_lock_irqsave(&db->lock, flags); | ||
187 | hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { | ||
188 | hlist_del(&obj->node); | ||
189 | free_object(obj); | ||
190 | } | ||
191 | spin_unlock_irqrestore(&db->lock, flags); | ||
192 | } | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * We use the pfn of the address for the hash. That way we can check | ||
197 | * for freed objects simply by checking the affected bucket. | ||
198 | */ | ||
199 | static struct debug_bucket *get_bucket(unsigned long addr) | ||
200 | { | ||
201 | unsigned long hash; | ||
202 | |||
203 | hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); | ||
204 | return &obj_hash[hash]; | ||
205 | } | ||
206 | |||
207 | static void debug_print_object(struct debug_obj *obj, char *msg) | ||
208 | { | ||
209 | static int limit; | ||
210 | |||
211 | if (limit < 5 && obj->descr != descr_test) { | ||
212 | limit++; | ||
213 | printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, | ||
214 | obj_states[obj->state], obj->descr->name); | ||
215 | WARN_ON(1); | ||
216 | } | ||
217 | debug_objects_warnings++; | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * Try to repair the damage, so we have a better chance to get useful | ||
222 | * debug output. | ||
223 | */ | ||
224 | static void | ||
225 | debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), | ||
226 | void * addr, enum debug_obj_state state) | ||
227 | { | ||
228 | if (fixup) | ||
229 | debug_objects_fixups += fixup(addr, state); | ||
230 | } | ||
231 | |||
232 | static void debug_object_is_on_stack(void *addr, int onstack) | ||
233 | { | ||
234 | void *stack = current->stack; | ||
235 | int is_on_stack; | ||
236 | static int limit; | ||
237 | |||
238 | if (limit > 4) | ||
239 | return; | ||
240 | |||
241 | is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE)); | ||
242 | |||
243 | if (is_on_stack == onstack) | ||
244 | return; | ||
245 | |||
246 | limit++; | ||
247 | if (is_on_stack) | ||
248 | printk(KERN_WARNING | ||
249 | "ODEBUG: object is on stack, but not annotated\n"); | ||
250 | else | ||
251 | printk(KERN_WARNING | ||
252 | "ODEBUG: object is not on stack, but annotated\n"); | ||
253 | WARN_ON(1); | ||
254 | } | ||
255 | |||
256 | static void | ||
257 | __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) | ||
258 | { | ||
259 | enum debug_obj_state state; | ||
260 | struct debug_bucket *db; | ||
261 | struct debug_obj *obj; | ||
262 | unsigned long flags; | ||
263 | |||
264 | db = get_bucket((unsigned long) addr); | ||
265 | |||
266 | spin_lock_irqsave(&db->lock, flags); | ||
267 | |||
268 | obj = lookup_object(addr, db); | ||
269 | if (!obj) { | ||
270 | obj = alloc_object(addr, db, descr); | ||
271 | if (!obj) { | ||
272 | debug_objects_enabled = 0; | ||
273 | spin_unlock_irqrestore(&db->lock, flags); | ||
274 | debug_objects_oom(); | ||
275 | return; | ||
276 | } | ||
277 | debug_object_is_on_stack(addr, onstack); | ||
278 | } | ||
279 | |||
280 | switch (obj->state) { | ||
281 | case ODEBUG_STATE_NONE: | ||
282 | case ODEBUG_STATE_INIT: | ||
283 | case ODEBUG_STATE_INACTIVE: | ||
284 | obj->state = ODEBUG_STATE_INIT; | ||
285 | break; | ||
286 | |||
287 | case ODEBUG_STATE_ACTIVE: | ||
288 | debug_print_object(obj, "init"); | ||
289 | state = obj->state; | ||
290 | spin_unlock_irqrestore(&db->lock, flags); | ||
291 | debug_object_fixup(descr->fixup_init, addr, state); | ||
292 | return; | ||
293 | |||
294 | case ODEBUG_STATE_DESTROYED: | ||
295 | debug_print_object(obj, "init"); | ||
296 | break; | ||
297 | default: | ||
298 | break; | ||
299 | } | ||
300 | |||
301 | spin_unlock_irqrestore(&db->lock, flags); | ||
302 | } | ||
303 | |||
304 | /** | ||
305 | * debug_object_init - debug checks when an object is initialized | ||
306 | * @addr: address of the object | ||
307 | * @descr: pointer to an object specific debug description structure | ||
308 | */ | ||
309 | void debug_object_init(void *addr, struct debug_obj_descr *descr) | ||
310 | { | ||
311 | if (!debug_objects_enabled) | ||
312 | return; | ||
313 | |||
314 | __debug_object_init(addr, descr, 0); | ||
315 | } | ||
316 | |||
317 | /** | ||
318 | * debug_object_init_on_stack - debug checks when an object on stack is | ||
319 | * initialized | ||
320 | * @addr: address of the object | ||
321 | * @descr: pointer to an object specific debug description structure | ||
322 | */ | ||
323 | void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) | ||
324 | { | ||
325 | if (!debug_objects_enabled) | ||
326 | return; | ||
327 | |||
328 | __debug_object_init(addr, descr, 1); | ||
329 | } | ||
330 | |||
331 | /** | ||
332 | * debug_object_activate - debug checks when an object is activated | ||
333 | * @addr: address of the object | ||
334 | * @descr: pointer to an object specific debug description structure | ||
335 | */ | ||
336 | void debug_object_activate(void *addr, struct debug_obj_descr *descr) | ||
337 | { | ||
338 | enum debug_obj_state state; | ||
339 | struct debug_bucket *db; | ||
340 | struct debug_obj *obj; | ||
341 | unsigned long flags; | ||
342 | |||
343 | if (!debug_objects_enabled) | ||
344 | return; | ||
345 | |||
346 | db = get_bucket((unsigned long) addr); | ||
347 | |||
348 | spin_lock_irqsave(&db->lock, flags); | ||
349 | |||
350 | obj = lookup_object(addr, db); | ||
351 | if (obj) { | ||
352 | switch (obj->state) { | ||
353 | case ODEBUG_STATE_INIT: | ||
354 | case ODEBUG_STATE_INACTIVE: | ||
355 | obj->state = ODEBUG_STATE_ACTIVE; | ||
356 | break; | ||
357 | |||
358 | case ODEBUG_STATE_ACTIVE: | ||
359 | debug_print_object(obj, "activate"); | ||
360 | state = obj->state; | ||
361 | spin_unlock_irqrestore(&db->lock, flags); | ||
362 | debug_object_fixup(descr->fixup_activate, addr, state); | ||
363 | return; | ||
364 | |||
365 | case ODEBUG_STATE_DESTROYED: | ||
366 | debug_print_object(obj, "activate"); | ||
367 | break; | ||
368 | default: | ||
369 | break; | ||
370 | } | ||
371 | spin_unlock_irqrestore(&db->lock, flags); | ||
372 | return; | ||
373 | } | ||
374 | |||
375 | spin_unlock_irqrestore(&db->lock, flags); | ||
376 | /* | ||
377 | * This happens when a static object is activated. We | ||
378 | * let the type specific code decide whether this is | ||
379 | * true or not. | ||
380 | */ | ||
381 | debug_object_fixup(descr->fixup_activate, addr, | ||
382 | ODEBUG_STATE_NOTAVAILABLE); | ||
383 | } | ||
384 | |||
385 | /** | ||
386 | * debug_object_deactivate - debug checks when an object is deactivated | ||
387 | * @addr: address of the object | ||
388 | * @descr: pointer to an object specific debug description structure | ||
389 | */ | ||
390 | void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) | ||
391 | { | ||
392 | struct debug_bucket *db; | ||
393 | struct debug_obj *obj; | ||
394 | unsigned long flags; | ||
395 | |||
396 | if (!debug_objects_enabled) | ||
397 | return; | ||
398 | |||
399 | db = get_bucket((unsigned long) addr); | ||
400 | |||
401 | spin_lock_irqsave(&db->lock, flags); | ||
402 | |||
403 | obj = lookup_object(addr, db); | ||
404 | if (obj) { | ||
405 | switch (obj->state) { | ||
406 | case ODEBUG_STATE_INIT: | ||
407 | case ODEBUG_STATE_INACTIVE: | ||
408 | case ODEBUG_STATE_ACTIVE: | ||
409 | obj->state = ODEBUG_STATE_INACTIVE; | ||
410 | break; | ||
411 | |||
412 | case ODEBUG_STATE_DESTROYED: | ||
413 | debug_print_object(obj, "deactivate"); | ||
414 | break; | ||
415 | default: | ||
416 | break; | ||
417 | } | ||
418 | } else { | ||
419 | struct debug_obj o = { .object = addr, | ||
420 | .state = ODEBUG_STATE_NOTAVAILABLE, | ||
421 | .descr = descr }; | ||
422 | |||
423 | debug_print_object(&o, "deactivate"); | ||
424 | } | ||
425 | |||
426 | spin_unlock_irqrestore(&db->lock, flags); | ||
427 | } | ||
428 | |||
429 | /** | ||
430 | * debug_object_destroy - debug checks when an object is destroyed | ||
431 | * @addr: address of the object | ||
432 | * @descr: pointer to an object specific debug description structure | ||
433 | */ | ||
434 | void debug_object_destroy(void *addr, struct debug_obj_descr *descr) | ||
435 | { | ||
436 | enum debug_obj_state state; | ||
437 | struct debug_bucket *db; | ||
438 | struct debug_obj *obj; | ||
439 | unsigned long flags; | ||
440 | |||
441 | if (!debug_objects_enabled) | ||
442 | return; | ||
443 | |||
444 | db = get_bucket((unsigned long) addr); | ||
445 | |||
446 | spin_lock_irqsave(&db->lock, flags); | ||
447 | |||
448 | obj = lookup_object(addr, db); | ||
449 | if (!obj) | ||
450 | goto out_unlock; | ||
451 | |||
452 | switch (obj->state) { | ||
453 | case ODEBUG_STATE_NONE: | ||
454 | case ODEBUG_STATE_INIT: | ||
455 | case ODEBUG_STATE_INACTIVE: | ||
456 | obj->state = ODEBUG_STATE_DESTROYED; | ||
457 | break; | ||
458 | case ODEBUG_STATE_ACTIVE: | ||
459 | debug_print_object(obj, "destroy"); | ||
460 | state = obj->state; | ||
461 | spin_unlock_irqrestore(&db->lock, flags); | ||
462 | debug_object_fixup(descr->fixup_destroy, addr, state); | ||
463 | return; | ||
464 | |||
465 | case ODEBUG_STATE_DESTROYED: | ||
466 | debug_print_object(obj, "destroy"); | ||
467 | break; | ||
468 | default: | ||
469 | break; | ||
470 | } | ||
471 | out_unlock: | ||
472 | spin_unlock_irqrestore(&db->lock, flags); | ||
473 | } | ||
474 | |||
475 | /** | ||
476 | * debug_object_free - debug checks when an object is freed | ||
477 | * @addr: address of the object | ||
478 | * @descr: pointer to an object specific debug description structure | ||
479 | */ | ||
480 | void debug_object_free(void *addr, struct debug_obj_descr *descr) | ||
481 | { | ||
482 | enum debug_obj_state state; | ||
483 | struct debug_bucket *db; | ||
484 | struct debug_obj *obj; | ||
485 | unsigned long flags; | ||
486 | |||
487 | if (!debug_objects_enabled) | ||
488 | return; | ||
489 | |||
490 | db = get_bucket((unsigned long) addr); | ||
491 | |||
492 | spin_lock_irqsave(&db->lock, flags); | ||
493 | |||
494 | obj = lookup_object(addr, db); | ||
495 | if (!obj) | ||
496 | goto out_unlock; | ||
497 | |||
498 | switch (obj->state) { | ||
499 | case ODEBUG_STATE_ACTIVE: | ||
500 | debug_print_object(obj, "free"); | ||
501 | state = obj->state; | ||
502 | spin_unlock_irqrestore(&db->lock, flags); | ||
503 | debug_object_fixup(descr->fixup_free, addr, state); | ||
504 | return; | ||
505 | default: | ||
506 | hlist_del(&obj->node); | ||
507 | free_object(obj); | ||
508 | break; | ||
509 | } | ||
510 | out_unlock: | ||
511 | spin_unlock_irqrestore(&db->lock, flags); | ||
512 | } | ||
513 | |||
514 | #ifdef CONFIG_DEBUG_OBJECTS_FREE | ||
515 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) | ||
516 | { | ||
517 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; | ||
518 | struct hlist_node *node, *tmp; | ||
519 | struct debug_obj_descr *descr; | ||
520 | enum debug_obj_state state; | ||
521 | struct debug_bucket *db; | ||
522 | struct debug_obj *obj; | ||
523 | int cnt; | ||
524 | |||
525 | saddr = (unsigned long) address; | ||
526 | eaddr = saddr + size; | ||
527 | paddr = saddr & ODEBUG_CHUNK_MASK; | ||
528 | chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); | ||
529 | chunks >>= ODEBUG_CHUNK_SHIFT; | ||
530 | |||
531 | for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { | ||
532 | db = get_bucket(paddr); | ||
533 | |||
534 | repeat: | ||
535 | cnt = 0; | ||
536 | spin_lock_irqsave(&db->lock, flags); | ||
537 | hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { | ||
538 | cnt++; | ||
539 | oaddr = (unsigned long) obj->object; | ||
540 | if (oaddr < saddr || oaddr >= eaddr) | ||
541 | continue; | ||
542 | |||
543 | switch (obj->state) { | ||
544 | case ODEBUG_STATE_ACTIVE: | ||
545 | debug_print_object(obj, "free"); | ||
546 | descr = obj->descr; | ||
547 | state = obj->state; | ||
548 | spin_unlock_irqrestore(&db->lock, flags); | ||
549 | debug_object_fixup(descr->fixup_free, | ||
550 | (void *) oaddr, state); | ||
551 | goto repeat; | ||
552 | default: | ||
553 | hlist_del(&obj->node); | ||
554 | free_object(obj); | ||
555 | break; | ||
556 | } | ||
557 | } | ||
558 | spin_unlock_irqrestore(&db->lock, flags); | ||
559 | if (cnt > debug_objects_maxchain) | ||
560 | debug_objects_maxchain = cnt; | ||
561 | } | ||
562 | } | ||
563 | |||
564 | void debug_check_no_obj_freed(const void *address, unsigned long size) | ||
565 | { | ||
566 | if (debug_objects_enabled) | ||
567 | __debug_check_no_obj_freed(address, size); | ||
568 | } | ||
569 | #endif | ||
570 | |||
571 | #ifdef CONFIG_DEBUG_FS | ||
572 | |||
573 | static int debug_stats_show(struct seq_file *m, void *v) | ||
574 | { | ||
575 | seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); | ||
576 | seq_printf(m, "warnings :%d\n", debug_objects_warnings); | ||
577 | seq_printf(m, "fixups :%d\n", debug_objects_fixups); | ||
578 | seq_printf(m, "pool_free :%d\n", obj_pool_free); | ||
579 | seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); | ||
580 | seq_printf(m, "pool_used :%d\n", obj_pool_used); | ||
581 | seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); | ||
582 | return 0; | ||
583 | } | ||
584 | |||
585 | static int debug_stats_open(struct inode *inode, struct file *filp) | ||
586 | { | ||
587 | return single_open(filp, debug_stats_show, NULL); | ||
588 | } | ||
589 | |||
590 | static const struct file_operations debug_stats_fops = { | ||
591 | .open = debug_stats_open, | ||
592 | .read = seq_read, | ||
593 | .llseek = seq_lseek, | ||
594 | .release = single_release, | ||
595 | }; | ||
596 | |||
597 | static int __init debug_objects_init_debugfs(void) | ||
598 | { | ||
599 | struct dentry *dbgdir, *dbgstats; | ||
600 | |||
601 | if (!debug_objects_enabled) | ||
602 | return 0; | ||
603 | |||
604 | dbgdir = debugfs_create_dir("debug_objects", NULL); | ||
605 | if (!dbgdir) | ||
606 | return -ENOMEM; | ||
607 | |||
608 | dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, | ||
609 | &debug_stats_fops); | ||
610 | if (!dbgstats) | ||
611 | goto err; | ||
612 | |||
613 | return 0; | ||
614 | |||
615 | err: | ||
616 | debugfs_remove(dbgdir); | ||
617 | |||
618 | return -ENOMEM; | ||
619 | } | ||
620 | __initcall(debug_objects_init_debugfs); | ||
621 | |||
622 | #else | ||
623 | static inline void debug_objects_init_debugfs(void) { } | ||
624 | #endif | ||
625 | |||
626 | #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST | ||
627 | |||
628 | /* Random data structure for the self test */ | ||
629 | struct self_test { | ||
630 | unsigned long dummy1[6]; | ||
631 | int static_init; | ||
632 | unsigned long dummy2[3]; | ||
633 | }; | ||
634 | |||
635 | static __initdata struct debug_obj_descr descr_type_test; | ||
636 | |||
637 | /* | ||
638 | * fixup_init is called when: | ||
639 | * - an active object is initialized | ||
640 | */ | ||
641 | static int __init fixup_init(void *addr, enum debug_obj_state state) | ||
642 | { | ||
643 | struct self_test *obj = addr; | ||
644 | |||
645 | switch (state) { | ||
646 | case ODEBUG_STATE_ACTIVE: | ||
647 | debug_object_deactivate(obj, &descr_type_test); | ||
648 | debug_object_init(obj, &descr_type_test); | ||
649 | return 1; | ||
650 | default: | ||
651 | return 0; | ||
652 | } | ||
653 | } | ||
654 | |||
655 | /* | ||
656 | * fixup_activate is called when: | ||
657 | * - an active object is activated | ||
658 | * - an unknown object is activated (might be a statically initialized object) | ||
659 | */ | ||
660 | static int __init fixup_activate(void *addr, enum debug_obj_state state) | ||
661 | { | ||
662 | struct self_test *obj = addr; | ||
663 | |||
664 | switch (state) { | ||
665 | case ODEBUG_STATE_NOTAVAILABLE: | ||
666 | if (obj->static_init == 1) { | ||
667 | debug_object_init(obj, &descr_type_test); | ||
668 | debug_object_activate(obj, &descr_type_test); | ||
669 | /* | ||
670 | * Real code should return 0 here ! This is | ||
671 | * not a fixup of some bad behaviour. We | ||
672 | * merily call the debug_init function to keep | ||
673 | * track of the object. | ||
674 | */ | ||
675 | return 1; | ||
676 | } else { | ||
677 | /* Real code needs to emit a warning here */ | ||
678 | } | ||
679 | return 0; | ||
680 | |||
681 | case ODEBUG_STATE_ACTIVE: | ||
682 | debug_object_deactivate(obj, &descr_type_test); | ||
683 | debug_object_activate(obj, &descr_type_test); | ||
684 | return 1; | ||
685 | |||
686 | default: | ||
687 | return 0; | ||
688 | } | ||
689 | } | ||
690 | |||
691 | /* | ||
692 | * fixup_destroy is called when: | ||
693 | * - an active object is destroyed | ||
694 | */ | ||
695 | static int __init fixup_destroy(void *addr, enum debug_obj_state state) | ||
696 | { | ||
697 | struct self_test *obj = addr; | ||
698 | |||
699 | switch (state) { | ||
700 | case ODEBUG_STATE_ACTIVE: | ||
701 | debug_object_deactivate(obj, &descr_type_test); | ||
702 | debug_object_destroy(obj, &descr_type_test); | ||
703 | return 1; | ||
704 | default: | ||
705 | return 0; | ||
706 | } | ||
707 | } | ||
708 | |||
709 | /* | ||
710 | * fixup_free is called when: | ||
711 | * - an active object is freed | ||
712 | */ | ||
713 | static int __init fixup_free(void *addr, enum debug_obj_state state) | ||
714 | { | ||
715 | struct self_test *obj = addr; | ||
716 | |||
717 | switch (state) { | ||
718 | case ODEBUG_STATE_ACTIVE: | ||
719 | debug_object_deactivate(obj, &descr_type_test); | ||
720 | debug_object_free(obj, &descr_type_test); | ||
721 | return 1; | ||
722 | default: | ||
723 | return 0; | ||
724 | } | ||
725 | } | ||
726 | |||
727 | static int | ||
728 | check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) | ||
729 | { | ||
730 | struct debug_bucket *db; | ||
731 | struct debug_obj *obj; | ||
732 | unsigned long flags; | ||
733 | int res = -EINVAL; | ||
734 | |||
735 | db = get_bucket((unsigned long) addr); | ||
736 | |||
737 | spin_lock_irqsave(&db->lock, flags); | ||
738 | |||
739 | obj = lookup_object(addr, db); | ||
740 | if (!obj && state != ODEBUG_STATE_NONE) { | ||
741 | printk(KERN_ERR "ODEBUG: selftest object not found\n"); | ||
742 | WARN_ON(1); | ||
743 | goto out; | ||
744 | } | ||
745 | if (obj && obj->state != state) { | ||
746 | printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", | ||
747 | obj->state, state); | ||
748 | WARN_ON(1); | ||
749 | goto out; | ||
750 | } | ||
751 | if (fixups != debug_objects_fixups) { | ||
752 | printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", | ||
753 | fixups, debug_objects_fixups); | ||
754 | WARN_ON(1); | ||
755 | goto out; | ||
756 | } | ||
757 | if (warnings != debug_objects_warnings) { | ||
758 | printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", | ||
759 | warnings, debug_objects_warnings); | ||
760 | WARN_ON(1); | ||
761 | goto out; | ||
762 | } | ||
763 | res = 0; | ||
764 | out: | ||
765 | spin_unlock_irqrestore(&db->lock, flags); | ||
766 | if (res) | ||
767 | debug_objects_enabled = 0; | ||
768 | return res; | ||
769 | } | ||
770 | |||
771 | static __initdata struct debug_obj_descr descr_type_test = { | ||
772 | .name = "selftest", | ||
773 | .fixup_init = fixup_init, | ||
774 | .fixup_activate = fixup_activate, | ||
775 | .fixup_destroy = fixup_destroy, | ||
776 | .fixup_free = fixup_free, | ||
777 | }; | ||
778 | |||
779 | static __initdata struct self_test obj = { .static_init = 0 }; | ||
780 | |||
781 | static void __init debug_objects_selftest(void) | ||
782 | { | ||
783 | int fixups, oldfixups, warnings, oldwarnings; | ||
784 | unsigned long flags; | ||
785 | |||
786 | local_irq_save(flags); | ||
787 | |||
788 | fixups = oldfixups = debug_objects_fixups; | ||
789 | warnings = oldwarnings = debug_objects_warnings; | ||
790 | descr_test = &descr_type_test; | ||
791 | |||
792 | debug_object_init(&obj, &descr_type_test); | ||
793 | if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) | ||
794 | goto out; | ||
795 | debug_object_activate(&obj, &descr_type_test); | ||
796 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) | ||
797 | goto out; | ||
798 | debug_object_activate(&obj, &descr_type_test); | ||
799 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) | ||
800 | goto out; | ||
801 | debug_object_deactivate(&obj, &descr_type_test); | ||
802 | if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) | ||
803 | goto out; | ||
804 | debug_object_destroy(&obj, &descr_type_test); | ||
805 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) | ||
806 | goto out; | ||
807 | debug_object_init(&obj, &descr_type_test); | ||
808 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) | ||
809 | goto out; | ||
810 | debug_object_activate(&obj, &descr_type_test); | ||
811 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) | ||
812 | goto out; | ||
813 | debug_object_deactivate(&obj, &descr_type_test); | ||
814 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) | ||
815 | goto out; | ||
816 | debug_object_free(&obj, &descr_type_test); | ||
817 | if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) | ||
818 | goto out; | ||
819 | |||
820 | obj.static_init = 1; | ||
821 | debug_object_activate(&obj, &descr_type_test); | ||
822 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings)) | ||
823 | goto out; | ||
824 | debug_object_init(&obj, &descr_type_test); | ||
825 | if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) | ||
826 | goto out; | ||
827 | debug_object_free(&obj, &descr_type_test); | ||
828 | if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) | ||
829 | goto out; | ||
830 | |||
831 | #ifdef CONFIG_DEBUG_OBJECTS_FREE | ||
832 | debug_object_init(&obj, &descr_type_test); | ||
833 | if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) | ||
834 | goto out; | ||
835 | debug_object_activate(&obj, &descr_type_test); | ||
836 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) | ||
837 | goto out; | ||
838 | __debug_check_no_obj_freed(&obj, sizeof(obj)); | ||
839 | if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) | ||
840 | goto out; | ||
841 | #endif | ||
842 | printk(KERN_INFO "ODEBUG: selftest passed\n"); | ||
843 | |||
844 | out: | ||
845 | debug_objects_fixups = oldfixups; | ||
846 | debug_objects_warnings = oldwarnings; | ||
847 | descr_test = NULL; | ||
848 | |||
849 | local_irq_restore(flags); | ||
850 | } | ||
851 | #else | ||
852 | static inline void debug_objects_selftest(void) { } | ||
853 | #endif | ||
854 | |||
855 | /* | ||
856 | * Called during early boot to initialize the hash buckets and link | ||
857 | * the static object pool objects into the poll list. After this call | ||
858 | * the object tracker is fully operational. | ||
859 | */ | ||
860 | void __init debug_objects_early_init(void) | ||
861 | { | ||
862 | int i; | ||
863 | |||
864 | for (i = 0; i < ODEBUG_HASH_SIZE; i++) | ||
865 | spin_lock_init(&obj_hash[i].lock); | ||
866 | |||
867 | for (i = 0; i < ODEBUG_POOL_SIZE; i++) | ||
868 | hlist_add_head(&obj_static_pool[i].node, &obj_pool); | ||
869 | } | ||
870 | |||
871 | /* | ||
872 | * Called after the kmem_caches are functional to setup a dedicated | ||
873 | * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag | ||
874 | * prevents that the debug code is called on kmem_cache_free() for the | ||
875 | * debug tracker objects to avoid recursive calls. | ||
876 | */ | ||
877 | void __init debug_objects_mem_init(void) | ||
878 | { | ||
879 | if (!debug_objects_enabled) | ||
880 | return; | ||
881 | |||
882 | obj_cache = kmem_cache_create("debug_objects_cache", | ||
883 | sizeof (struct debug_obj), 0, | ||
884 | SLAB_DEBUG_OBJECTS, NULL); | ||
885 | |||
886 | if (!obj_cache) | ||
887 | debug_objects_enabled = 0; | ||
888 | else | ||
889 | debug_objects_selftest(); | ||
890 | } | ||
diff --git a/lib/devres.c b/lib/devres.c index edc27a5d1b73..26c87c49d776 100644 --- a/lib/devres.c +++ b/lib/devres.c | |||
@@ -20,7 +20,7 @@ static int devm_ioremap_match(struct device *dev, void *res, void *match_data) | |||
20 | * | 20 | * |
21 | * Managed ioremap(). Map is automatically unmapped on driver detach. | 21 | * Managed ioremap(). Map is automatically unmapped on driver detach. |
22 | */ | 22 | */ |
23 | void __iomem *devm_ioremap(struct device *dev, unsigned long offset, | 23 | void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, |
24 | unsigned long size) | 24 | unsigned long size) |
25 | { | 25 | { |
26 | void __iomem **ptr, *addr; | 26 | void __iomem **ptr, *addr; |
@@ -49,7 +49,7 @@ EXPORT_SYMBOL(devm_ioremap); | |||
49 | * Managed ioremap_nocache(). Map is automatically unmapped on driver | 49 | * Managed ioremap_nocache(). Map is automatically unmapped on driver |
50 | * detach. | 50 | * detach. |
51 | */ | 51 | */ |
52 | void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset, | 52 | void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, |
53 | unsigned long size) | 53 | unsigned long size) |
54 | { | 54 | { |
55 | void __iomem **ptr, *addr; | 55 | void __iomem **ptr, *addr; |
diff --git a/lib/div64.c b/lib/div64.c index b71cf93c529a..bb5bd0c0f030 100644 --- a/lib/div64.c +++ b/lib/div64.c | |||
@@ -16,9 +16,8 @@ | |||
16 | * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S. | 16 | * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/types.h> | ||
20 | #include <linux/module.h> | 19 | #include <linux/module.h> |
21 | #include <asm/div64.h> | 20 | #include <linux/math64.h> |
22 | 21 | ||
23 | /* Not needed on 64bit architectures */ | 22 | /* Not needed on 64bit architectures */ |
24 | #if BITS_PER_LONG == 32 | 23 | #if BITS_PER_LONG == 32 |
@@ -58,10 +57,31 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) | |||
58 | 57 | ||
59 | EXPORT_SYMBOL(__div64_32); | 58 | EXPORT_SYMBOL(__div64_32); |
60 | 59 | ||
60 | #ifndef div_s64_rem | ||
61 | s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) | ||
62 | { | ||
63 | u64 quotient; | ||
64 | |||
65 | if (dividend < 0) { | ||
66 | quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder); | ||
67 | *remainder = -*remainder; | ||
68 | if (divisor > 0) | ||
69 | quotient = -quotient; | ||
70 | } else { | ||
71 | quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder); | ||
72 | if (divisor < 0) | ||
73 | quotient = -quotient; | ||
74 | } | ||
75 | return quotient; | ||
76 | } | ||
77 | EXPORT_SYMBOL(div_s64_rem); | ||
78 | #endif | ||
79 | |||
61 | /* 64bit divisor, dividend and result. dynamic precision */ | 80 | /* 64bit divisor, dividend and result. dynamic precision */ |
62 | uint64_t div64_64(uint64_t dividend, uint64_t divisor) | 81 | #ifndef div64_u64 |
82 | u64 div64_u64(u64 dividend, u64 divisor) | ||
63 | { | 83 | { |
64 | uint32_t high, d; | 84 | u32 high, d; |
65 | 85 | ||
66 | high = divisor >> 32; | 86 | high = divisor >> 32; |
67 | if (high) { | 87 | if (high) { |
@@ -72,10 +92,9 @@ uint64_t div64_64(uint64_t dividend, uint64_t divisor) | |||
72 | } else | 92 | } else |
73 | d = divisor; | 93 | d = divisor; |
74 | 94 | ||
75 | do_div(dividend, d); | 95 | return div_u64(dividend, d); |
76 | |||
77 | return dividend; | ||
78 | } | 96 | } |
79 | EXPORT_SYMBOL(div64_64); | 97 | EXPORT_SYMBOL(div64_u64); |
98 | #endif | ||
80 | 99 | ||
81 | #endif /* BITS_PER_LONG == 32 */ | 100 | #endif /* BITS_PER_LONG == 32 */ |
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index 78ccd73a8841..24c59ded47a0 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c | |||
@@ -16,14 +16,12 @@ | |||
16 | 16 | ||
17 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) | 17 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) |
18 | 18 | ||
19 | /** | 19 | #ifdef CONFIG_GENERIC_FIND_NEXT_BIT |
20 | * find_next_bit - find the next set bit in a memory region | 20 | /* |
21 | * @addr: The address to base the search on | 21 | * Find the next set bit in a memory region. |
22 | * @offset: The bitnumber to start searching at | ||
23 | * @size: The maximum size to search | ||
24 | */ | 22 | */ |
25 | unsigned long find_next_bit(const unsigned long *addr, unsigned long size, | 23 | unsigned long find_next_bit(const unsigned long *addr, unsigned long size, |
26 | unsigned long offset) | 24 | unsigned long offset) |
27 | { | 25 | { |
28 | const unsigned long *p = addr + BITOP_WORD(offset); | 26 | const unsigned long *p = addr + BITOP_WORD(offset); |
29 | unsigned long result = offset & ~(BITS_PER_LONG-1); | 27 | unsigned long result = offset & ~(BITS_PER_LONG-1); |
@@ -60,7 +58,6 @@ found_first: | |||
60 | found_middle: | 58 | found_middle: |
61 | return result + __ffs(tmp); | 59 | return result + __ffs(tmp); |
62 | } | 60 | } |
63 | |||
64 | EXPORT_SYMBOL(find_next_bit); | 61 | EXPORT_SYMBOL(find_next_bit); |
65 | 62 | ||
66 | /* | 63 | /* |
@@ -68,7 +65,7 @@ EXPORT_SYMBOL(find_next_bit); | |||
68 | * Linus' asm-alpha/bitops.h. | 65 | * Linus' asm-alpha/bitops.h. |
69 | */ | 66 | */ |
70 | unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, | 67 | unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
71 | unsigned long offset) | 68 | unsigned long offset) |
72 | { | 69 | { |
73 | const unsigned long *p = addr + BITOP_WORD(offset); | 70 | const unsigned long *p = addr + BITOP_WORD(offset); |
74 | unsigned long result = offset & ~(BITS_PER_LONG-1); | 71 | unsigned long result = offset & ~(BITS_PER_LONG-1); |
@@ -105,8 +102,62 @@ found_first: | |||
105 | found_middle: | 102 | found_middle: |
106 | return result + ffz(tmp); | 103 | return result + ffz(tmp); |
107 | } | 104 | } |
108 | |||
109 | EXPORT_SYMBOL(find_next_zero_bit); | 105 | EXPORT_SYMBOL(find_next_zero_bit); |
106 | #endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ | ||
107 | |||
108 | #ifdef CONFIG_GENERIC_FIND_FIRST_BIT | ||
109 | /* | ||
110 | * Find the first set bit in a memory region. | ||
111 | */ | ||
112 | unsigned long find_first_bit(const unsigned long *addr, unsigned long size) | ||
113 | { | ||
114 | const unsigned long *p = addr; | ||
115 | unsigned long result = 0; | ||
116 | unsigned long tmp; | ||
117 | |||
118 | while (size & ~(BITS_PER_LONG-1)) { | ||
119 | if ((tmp = *(p++))) | ||
120 | goto found; | ||
121 | result += BITS_PER_LONG; | ||
122 | size -= BITS_PER_LONG; | ||
123 | } | ||
124 | if (!size) | ||
125 | return result; | ||
126 | |||
127 | tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); | ||
128 | if (tmp == 0UL) /* Are any bits set? */ | ||
129 | return result + size; /* Nope. */ | ||
130 | found: | ||
131 | return result + __ffs(tmp); | ||
132 | } | ||
133 | EXPORT_SYMBOL(find_first_bit); | ||
134 | |||
135 | /* | ||
136 | * Find the first cleared bit in a memory region. | ||
137 | */ | ||
138 | unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) | ||
139 | { | ||
140 | const unsigned long *p = addr; | ||
141 | unsigned long result = 0; | ||
142 | unsigned long tmp; | ||
143 | |||
144 | while (size & ~(BITS_PER_LONG-1)) { | ||
145 | if (~(tmp = *(p++))) | ||
146 | goto found; | ||
147 | result += BITS_PER_LONG; | ||
148 | size -= BITS_PER_LONG; | ||
149 | } | ||
150 | if (!size) | ||
151 | return result; | ||
152 | |||
153 | tmp = (*p) | (~0UL << size); | ||
154 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
155 | return result + size; /* Nope. */ | ||
156 | found: | ||
157 | return result + ffz(tmp); | ||
158 | } | ||
159 | EXPORT_SYMBOL(find_first_zero_bit); | ||
160 | #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ | ||
110 | 161 | ||
111 | #ifdef __BIG_ENDIAN | 162 | #ifdef __BIG_ENDIAN |
112 | 163 | ||
@@ -385,8 +385,8 @@ void idr_remove(struct idr *idp, int id) | |||
385 | while (idp->id_free_cnt >= IDR_FREE_MAX) { | 385 | while (idp->id_free_cnt >= IDR_FREE_MAX) { |
386 | p = alloc_layer(idp); | 386 | p = alloc_layer(idp); |
387 | kmem_cache_free(idr_layer_cache, p); | 387 | kmem_cache_free(idr_layer_cache, p); |
388 | return; | ||
389 | } | 388 | } |
389 | return; | ||
390 | } | 390 | } |
391 | EXPORT_SYMBOL(idr_remove); | 391 | EXPORT_SYMBOL(idr_remove); |
392 | 392 | ||
@@ -585,12 +585,11 @@ static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer) | |||
585 | memset(idr_layer, 0, sizeof(struct idr_layer)); | 585 | memset(idr_layer, 0, sizeof(struct idr_layer)); |
586 | } | 586 | } |
587 | 587 | ||
588 | static int init_id_cache(void) | 588 | void __init idr_init_cache(void) |
589 | { | 589 | { |
590 | if (!idr_layer_cache) | 590 | idr_layer_cache = kmem_cache_create("idr_layer_cache", |
591 | idr_layer_cache = kmem_cache_create("idr_layer_cache", | 591 | sizeof(struct idr_layer), 0, SLAB_PANIC, |
592 | sizeof(struct idr_layer), 0, 0, idr_cache_ctor); | 592 | idr_cache_ctor); |
593 | return 0; | ||
594 | } | 593 | } |
595 | 594 | ||
596 | /** | 595 | /** |
@@ -602,7 +601,6 @@ static int init_id_cache(void) | |||
602 | */ | 601 | */ |
603 | void idr_init(struct idr *idp) | 602 | void idr_init(struct idr *idp) |
604 | { | 603 | { |
605 | init_id_cache(); | ||
606 | memset(idp, 0, sizeof(struct idr)); | 604 | memset(idp, 0, sizeof(struct idr)); |
607 | spin_lock_init(&idp->lock); | 605 | spin_lock_init(&idp->lock); |
608 | } | 606 | } |
diff --git a/lib/inflate.c b/lib/inflate.c index 845f91d3ac12..9762294be062 100644 --- a/lib/inflate.c +++ b/lib/inflate.c | |||
@@ -811,6 +811,9 @@ DEBG("<dyn"); | |||
811 | ll = malloc(sizeof(*ll) * (286+30)); /* literal/length and distance code lengths */ | 811 | ll = malloc(sizeof(*ll) * (286+30)); /* literal/length and distance code lengths */ |
812 | #endif | 812 | #endif |
813 | 813 | ||
814 | if (ll == NULL) | ||
815 | return 1; | ||
816 | |||
814 | /* make local bit buffer */ | 817 | /* make local bit buffer */ |
815 | b = bb; | 818 | b = bb; |
816 | k = bk; | 819 | k = bk; |
diff --git a/lib/iomap.c b/lib/iomap.c index dd6ca48fe6b0..37a3ea4cac9f 100644 --- a/lib/iomap.c +++ b/lib/iomap.c | |||
@@ -257,7 +257,7 @@ EXPORT_SYMBOL(ioport_unmap); | |||
257 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | 257 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
258 | { | 258 | { |
259 | resource_size_t start = pci_resource_start(dev, bar); | 259 | resource_size_t start = pci_resource_start(dev, bar); |
260 | unsigned long len = pci_resource_len(dev, bar); | 260 | resource_size_t len = pci_resource_len(dev, bar); |
261 | unsigned long flags = pci_resource_flags(dev, bar); | 261 | unsigned long flags = pci_resource_flags(dev, bar); |
262 | 262 | ||
263 | if (!len || !start) | 263 | if (!len || !start) |
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index 812dbf00844b..cd3e82530b03 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/smp_lock.h> | 8 | #include <linux/smp_lock.h> |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/kallsyms.h> | 10 | #include <linux/kallsyms.h> |
11 | #include <linux/semaphore.h> | ||
11 | 12 | ||
12 | /* | 13 | /* |
13 | * The 'big kernel semaphore' | 14 | * The 'big kernel semaphore' |
diff --git a/lib/klist.c b/lib/klist.c index 120bd175aa78..cca37f96faa2 100644 --- a/lib/klist.c +++ b/lib/klist.c | |||
@@ -1,38 +1,37 @@ | |||
1 | /* | 1 | /* |
2 | * klist.c - Routines for manipulating klists. | 2 | * klist.c - Routines for manipulating klists. |
3 | * | 3 | * |
4 | * Copyright (C) 2005 Patrick Mochel | ||
4 | * | 5 | * |
5 | * This klist interface provides a couple of structures that wrap around | 6 | * This file is released under the GPL v2. |
6 | * struct list_head to provide explicit list "head" (struct klist) and | ||
7 | * list "node" (struct klist_node) objects. For struct klist, a spinlock | ||
8 | * is included that protects access to the actual list itself. struct | ||
9 | * klist_node provides a pointer to the klist that owns it and a kref | ||
10 | * reference count that indicates the number of current users of that node | ||
11 | * in the list. | ||
12 | * | 7 | * |
13 | * The entire point is to provide an interface for iterating over a list | 8 | * This klist interface provides a couple of structures that wrap around |
14 | * that is safe and allows for modification of the list during the | 9 | * struct list_head to provide explicit list "head" (struct klist) and list |
15 | * iteration (e.g. insertion and removal), including modification of the | 10 | * "node" (struct klist_node) objects. For struct klist, a spinlock is |
16 | * current node on the list. | 11 | * included that protects access to the actual list itself. struct |
12 | * klist_node provides a pointer to the klist that owns it and a kref | ||
13 | * reference count that indicates the number of current users of that node | ||
14 | * in the list. | ||
17 | * | 15 | * |
18 | * It works using a 3rd object type - struct klist_iter - that is declared | 16 | * The entire point is to provide an interface for iterating over a list |
19 | * and initialized before an iteration. klist_next() is used to acquire the | 17 | * that is safe and allows for modification of the list during the |
20 | * next element in the list. It returns NULL if there are no more items. | 18 | * iteration (e.g. insertion and removal), including modification of the |
21 | * Internally, that routine takes the klist's lock, decrements the reference | 19 | * current node on the list. |
22 | * count of the previous klist_node and increments the count of the next | ||
23 | * klist_node. It then drops the lock and returns. | ||
24 | * | 20 | * |
25 | * There are primitives for adding and removing nodes to/from a klist. | 21 | * It works using a 3rd object type - struct klist_iter - that is declared |
26 | * When deleting, klist_del() will simply decrement the reference count. | 22 | * and initialized before an iteration. klist_next() is used to acquire the |
27 | * Only when the count goes to 0 is the node removed from the list. | 23 | * next element in the list. It returns NULL if there are no more items. |
28 | * klist_remove() will try to delete the node from the list and block | 24 | * Internally, that routine takes the klist's lock, decrements the |
29 | * until it is actually removed. This is useful for objects (like devices) | 25 | * reference count of the previous klist_node and increments the count of |
30 | * that have been removed from the system and must be freed (but must wait | 26 | * the next klist_node. It then drops the lock and returns. |
31 | * until all accessors have finished). | ||
32 | * | 27 | * |
33 | * Copyright (C) 2005 Patrick Mochel | 28 | * There are primitives for adding and removing nodes to/from a klist. |
34 | * | 29 | * When deleting, klist_del() will simply decrement the reference count. |
35 | * This file is released under the GPL v2. | 30 | * Only when the count goes to 0 is the node removed from the list. |
31 | * klist_remove() will try to delete the node from the list and block until | ||
32 | * it is actually removed. This is useful for objects (like devices) that | ||
33 | * have been removed from the system and must be freed (but must wait until | ||
34 | * all accessors have finished). | ||
36 | */ | 35 | */ |
37 | 36 | ||
38 | #include <linux/klist.h> | 37 | #include <linux/klist.h> |
@@ -40,10 +39,10 @@ | |||
40 | 39 | ||
41 | 40 | ||
42 | /** | 41 | /** |
43 | * klist_init - Initialize a klist structure. | 42 | * klist_init - Initialize a klist structure. |
44 | * @k: The klist we're initializing. | 43 | * @k: The klist we're initializing. |
45 | * @get: The get function for the embedding object (NULL if none) | 44 | * @get: The get function for the embedding object (NULL if none) |
46 | * @put: The put function for the embedding object (NULL if none) | 45 | * @put: The put function for the embedding object (NULL if none) |
47 | * | 46 | * |
48 | * Initialises the klist structure. If the klist_node structures are | 47 | * Initialises the klist structure. If the klist_node structures are |
49 | * going to be embedded in refcounted objects (necessary for safe | 48 | * going to be embedded in refcounted objects (necessary for safe |
@@ -51,8 +50,7 @@ | |||
51 | * functions that take and release references on the embedding | 50 | * functions that take and release references on the embedding |
52 | * objects. | 51 | * objects. |
53 | */ | 52 | */ |
54 | 53 | void klist_init(struct klist *k, void (*get)(struct klist_node *), | |
55 | void klist_init(struct klist * k, void (*get)(struct klist_node *), | ||
56 | void (*put)(struct klist_node *)) | 54 | void (*put)(struct klist_node *)) |
57 | { | 55 | { |
58 | INIT_LIST_HEAD(&k->k_list); | 56 | INIT_LIST_HEAD(&k->k_list); |
@@ -60,26 +58,23 @@ void klist_init(struct klist * k, void (*get)(struct klist_node *), | |||
60 | k->get = get; | 58 | k->get = get; |
61 | k->put = put; | 59 | k->put = put; |
62 | } | 60 | } |
63 | |||
64 | EXPORT_SYMBOL_GPL(klist_init); | 61 | EXPORT_SYMBOL_GPL(klist_init); |
65 | 62 | ||
66 | 63 | static void add_head(struct klist *k, struct klist_node *n) | |
67 | static void add_head(struct klist * k, struct klist_node * n) | ||
68 | { | 64 | { |
69 | spin_lock(&k->k_lock); | 65 | spin_lock(&k->k_lock); |
70 | list_add(&n->n_node, &k->k_list); | 66 | list_add(&n->n_node, &k->k_list); |
71 | spin_unlock(&k->k_lock); | 67 | spin_unlock(&k->k_lock); |
72 | } | 68 | } |
73 | 69 | ||
74 | static void add_tail(struct klist * k, struct klist_node * n) | 70 | static void add_tail(struct klist *k, struct klist_node *n) |
75 | { | 71 | { |
76 | spin_lock(&k->k_lock); | 72 | spin_lock(&k->k_lock); |
77 | list_add_tail(&n->n_node, &k->k_list); | 73 | list_add_tail(&n->n_node, &k->k_list); |
78 | spin_unlock(&k->k_lock); | 74 | spin_unlock(&k->k_lock); |
79 | } | 75 | } |
80 | 76 | ||
81 | 77 | static void klist_node_init(struct klist *k, struct klist_node *n) | |
82 | static void klist_node_init(struct klist * k, struct klist_node * n) | ||
83 | { | 78 | { |
84 | INIT_LIST_HEAD(&n->n_node); | 79 | INIT_LIST_HEAD(&n->n_node); |
85 | init_completion(&n->n_removed); | 80 | init_completion(&n->n_removed); |
@@ -89,60 +84,83 @@ static void klist_node_init(struct klist * k, struct klist_node * n) | |||
89 | k->get(n); | 84 | k->get(n); |
90 | } | 85 | } |
91 | 86 | ||
92 | |||
93 | /** | 87 | /** |
94 | * klist_add_head - Initialize a klist_node and add it to front. | 88 | * klist_add_head - Initialize a klist_node and add it to front. |
95 | * @n: node we're adding. | 89 | * @n: node we're adding. |
96 | * @k: klist it's going on. | 90 | * @k: klist it's going on. |
97 | */ | 91 | */ |
98 | 92 | void klist_add_head(struct klist_node *n, struct klist *k) | |
99 | void klist_add_head(struct klist_node * n, struct klist * k) | ||
100 | { | 93 | { |
101 | klist_node_init(k, n); | 94 | klist_node_init(k, n); |
102 | add_head(k, n); | 95 | add_head(k, n); |
103 | } | 96 | } |
104 | |||
105 | EXPORT_SYMBOL_GPL(klist_add_head); | 97 | EXPORT_SYMBOL_GPL(klist_add_head); |
106 | 98 | ||
107 | |||
108 | /** | 99 | /** |
109 | * klist_add_tail - Initialize a klist_node and add it to back. | 100 | * klist_add_tail - Initialize a klist_node and add it to back. |
110 | * @n: node we're adding. | 101 | * @n: node we're adding. |
111 | * @k: klist it's going on. | 102 | * @k: klist it's going on. |
112 | */ | 103 | */ |
113 | 104 | void klist_add_tail(struct klist_node *n, struct klist *k) | |
114 | void klist_add_tail(struct klist_node * n, struct klist * k) | ||
115 | { | 105 | { |
116 | klist_node_init(k, n); | 106 | klist_node_init(k, n); |
117 | add_tail(k, n); | 107 | add_tail(k, n); |
118 | } | 108 | } |
119 | |||
120 | EXPORT_SYMBOL_GPL(klist_add_tail); | 109 | EXPORT_SYMBOL_GPL(klist_add_tail); |
121 | 110 | ||
111 | /** | ||
112 | * klist_add_after - Init a klist_node and add it after an existing node | ||
113 | * @n: node we're adding. | ||
114 | * @pos: node to put @n after | ||
115 | */ | ||
116 | void klist_add_after(struct klist_node *n, struct klist_node *pos) | ||
117 | { | ||
118 | struct klist *k = pos->n_klist; | ||
119 | |||
120 | klist_node_init(k, n); | ||
121 | spin_lock(&k->k_lock); | ||
122 | list_add(&n->n_node, &pos->n_node); | ||
123 | spin_unlock(&k->k_lock); | ||
124 | } | ||
125 | EXPORT_SYMBOL_GPL(klist_add_after); | ||
126 | |||
127 | /** | ||
128 | * klist_add_before - Init a klist_node and add it before an existing node | ||
129 | * @n: node we're adding. | ||
130 | * @pos: node to put @n after | ||
131 | */ | ||
132 | void klist_add_before(struct klist_node *n, struct klist_node *pos) | ||
133 | { | ||
134 | struct klist *k = pos->n_klist; | ||
135 | |||
136 | klist_node_init(k, n); | ||
137 | spin_lock(&k->k_lock); | ||
138 | list_add_tail(&n->n_node, &pos->n_node); | ||
139 | spin_unlock(&k->k_lock); | ||
140 | } | ||
141 | EXPORT_SYMBOL_GPL(klist_add_before); | ||
122 | 142 | ||
123 | static void klist_release(struct kref * kref) | 143 | static void klist_release(struct kref *kref) |
124 | { | 144 | { |
125 | struct klist_node * n = container_of(kref, struct klist_node, n_ref); | 145 | struct klist_node *n = container_of(kref, struct klist_node, n_ref); |
126 | 146 | ||
127 | list_del(&n->n_node); | 147 | list_del(&n->n_node); |
128 | complete(&n->n_removed); | 148 | complete(&n->n_removed); |
129 | n->n_klist = NULL; | 149 | n->n_klist = NULL; |
130 | } | 150 | } |
131 | 151 | ||
132 | static int klist_dec_and_del(struct klist_node * n) | 152 | static int klist_dec_and_del(struct klist_node *n) |
133 | { | 153 | { |
134 | return kref_put(&n->n_ref, klist_release); | 154 | return kref_put(&n->n_ref, klist_release); |
135 | } | 155 | } |
136 | 156 | ||
137 | |||
138 | /** | 157 | /** |
139 | * klist_del - Decrement the reference count of node and try to remove. | 158 | * klist_del - Decrement the reference count of node and try to remove. |
140 | * @n: node we're deleting. | 159 | * @n: node we're deleting. |
141 | */ | 160 | */ |
142 | 161 | void klist_del(struct klist_node *n) | |
143 | void klist_del(struct klist_node * n) | ||
144 | { | 162 | { |
145 | struct klist * k = n->n_klist; | 163 | struct klist *k = n->n_klist; |
146 | void (*put)(struct klist_node *) = k->put; | 164 | void (*put)(struct klist_node *) = k->put; |
147 | 165 | ||
148 | spin_lock(&k->k_lock); | 166 | spin_lock(&k->k_lock); |
@@ -152,48 +170,40 @@ void klist_del(struct klist_node * n) | |||
152 | if (put) | 170 | if (put) |
153 | put(n); | 171 | put(n); |
154 | } | 172 | } |
155 | |||
156 | EXPORT_SYMBOL_GPL(klist_del); | 173 | EXPORT_SYMBOL_GPL(klist_del); |
157 | 174 | ||
158 | |||
159 | /** | 175 | /** |
160 | * klist_remove - Decrement the refcount of node and wait for it to go away. | 176 | * klist_remove - Decrement the refcount of node and wait for it to go away. |
161 | * @n: node we're removing. | 177 | * @n: node we're removing. |
162 | */ | 178 | */ |
163 | 179 | void klist_remove(struct klist_node *n) | |
164 | void klist_remove(struct klist_node * n) | ||
165 | { | 180 | { |
166 | klist_del(n); | 181 | klist_del(n); |
167 | wait_for_completion(&n->n_removed); | 182 | wait_for_completion(&n->n_removed); |
168 | } | 183 | } |
169 | |||
170 | EXPORT_SYMBOL_GPL(klist_remove); | 184 | EXPORT_SYMBOL_GPL(klist_remove); |
171 | 185 | ||
172 | |||
173 | /** | 186 | /** |
174 | * klist_node_attached - Say whether a node is bound to a list or not. | 187 | * klist_node_attached - Say whether a node is bound to a list or not. |
175 | * @n: Node that we're testing. | 188 | * @n: Node that we're testing. |
176 | */ | 189 | */ |
177 | 190 | int klist_node_attached(struct klist_node *n) | |
178 | int klist_node_attached(struct klist_node * n) | ||
179 | { | 191 | { |
180 | return (n->n_klist != NULL); | 192 | return (n->n_klist != NULL); |
181 | } | 193 | } |
182 | |||
183 | EXPORT_SYMBOL_GPL(klist_node_attached); | 194 | EXPORT_SYMBOL_GPL(klist_node_attached); |
184 | 195 | ||
185 | |||
186 | /** | 196 | /** |
187 | * klist_iter_init_node - Initialize a klist_iter structure. | 197 | * klist_iter_init_node - Initialize a klist_iter structure. |
188 | * @k: klist we're iterating. | 198 | * @k: klist we're iterating. |
189 | * @i: klist_iter we're filling. | 199 | * @i: klist_iter we're filling. |
190 | * @n: node to start with. | 200 | * @n: node to start with. |
191 | * | 201 | * |
192 | * Similar to klist_iter_init(), but starts the action off with @n, | 202 | * Similar to klist_iter_init(), but starts the action off with @n, |
193 | * instead of with the list head. | 203 | * instead of with the list head. |
194 | */ | 204 | */ |
195 | 205 | void klist_iter_init_node(struct klist *k, struct klist_iter *i, | |
196 | void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_node * n) | 206 | struct klist_node *n) |
197 | { | 207 | { |
198 | i->i_klist = k; | 208 | i->i_klist = k; |
199 | i->i_head = &k->k_list; | 209 | i->i_head = &k->k_list; |
@@ -201,66 +211,56 @@ void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_ | |||
201 | if (n) | 211 | if (n) |
202 | kref_get(&n->n_ref); | 212 | kref_get(&n->n_ref); |
203 | } | 213 | } |
204 | |||
205 | EXPORT_SYMBOL_GPL(klist_iter_init_node); | 214 | EXPORT_SYMBOL_GPL(klist_iter_init_node); |
206 | 215 | ||
207 | |||
208 | /** | 216 | /** |
209 | * klist_iter_init - Iniitalize a klist_iter structure. | 217 | * klist_iter_init - Iniitalize a klist_iter structure. |
210 | * @k: klist we're iterating. | 218 | * @k: klist we're iterating. |
211 | * @i: klist_iter structure we're filling. | 219 | * @i: klist_iter structure we're filling. |
212 | * | 220 | * |
213 | * Similar to klist_iter_init_node(), but start with the list head. | 221 | * Similar to klist_iter_init_node(), but start with the list head. |
214 | */ | 222 | */ |
215 | 223 | void klist_iter_init(struct klist *k, struct klist_iter *i) | |
216 | void klist_iter_init(struct klist * k, struct klist_iter * i) | ||
217 | { | 224 | { |
218 | klist_iter_init_node(k, i, NULL); | 225 | klist_iter_init_node(k, i, NULL); |
219 | } | 226 | } |
220 | |||
221 | EXPORT_SYMBOL_GPL(klist_iter_init); | 227 | EXPORT_SYMBOL_GPL(klist_iter_init); |
222 | 228 | ||
223 | |||
224 | /** | 229 | /** |
225 | * klist_iter_exit - Finish a list iteration. | 230 | * klist_iter_exit - Finish a list iteration. |
226 | * @i: Iterator structure. | 231 | * @i: Iterator structure. |
227 | * | 232 | * |
228 | * Must be called when done iterating over list, as it decrements the | 233 | * Must be called when done iterating over list, as it decrements the |
229 | * refcount of the current node. Necessary in case iteration exited before | 234 | * refcount of the current node. Necessary in case iteration exited before |
230 | * the end of the list was reached, and always good form. | 235 | * the end of the list was reached, and always good form. |
231 | */ | 236 | */ |
232 | 237 | void klist_iter_exit(struct klist_iter *i) | |
233 | void klist_iter_exit(struct klist_iter * i) | ||
234 | { | 238 | { |
235 | if (i->i_cur) { | 239 | if (i->i_cur) { |
236 | klist_del(i->i_cur); | 240 | klist_del(i->i_cur); |
237 | i->i_cur = NULL; | 241 | i->i_cur = NULL; |
238 | } | 242 | } |
239 | } | 243 | } |
240 | |||
241 | EXPORT_SYMBOL_GPL(klist_iter_exit); | 244 | EXPORT_SYMBOL_GPL(klist_iter_exit); |
242 | 245 | ||
243 | 246 | static struct klist_node *to_klist_node(struct list_head *n) | |
244 | static struct klist_node * to_klist_node(struct list_head * n) | ||
245 | { | 247 | { |
246 | return container_of(n, struct klist_node, n_node); | 248 | return container_of(n, struct klist_node, n_node); |
247 | } | 249 | } |
248 | 250 | ||
249 | |||
250 | /** | 251 | /** |
251 | * klist_next - Ante up next node in list. | 252 | * klist_next - Ante up next node in list. |
252 | * @i: Iterator structure. | 253 | * @i: Iterator structure. |
253 | * | 254 | * |
254 | * First grab list lock. Decrement the reference count of the previous | 255 | * First grab list lock. Decrement the reference count of the previous |
255 | * node, if there was one. Grab the next node, increment its reference | 256 | * node, if there was one. Grab the next node, increment its reference |
256 | * count, drop the lock, and return that next node. | 257 | * count, drop the lock, and return that next node. |
257 | */ | 258 | */ |
258 | 259 | struct klist_node *klist_next(struct klist_iter *i) | |
259 | struct klist_node * klist_next(struct klist_iter * i) | ||
260 | { | 260 | { |
261 | struct list_head * next; | 261 | struct list_head *next; |
262 | struct klist_node * lnode = i->i_cur; | 262 | struct klist_node *lnode = i->i_cur; |
263 | struct klist_node * knode = NULL; | 263 | struct klist_node *knode = NULL; |
264 | void (*put)(struct klist_node *) = i->i_klist->put; | 264 | void (*put)(struct klist_node *) = i->i_klist->put; |
265 | 265 | ||
266 | spin_lock(&i->i_klist->k_lock); | 266 | spin_lock(&i->i_klist->k_lock); |
@@ -281,7 +281,4 @@ struct klist_node * klist_next(struct klist_iter * i) | |||
281 | put(lnode); | 281 | put(lnode); |
282 | return knode; | 282 | return knode; |
283 | } | 283 | } |
284 | |||
285 | EXPORT_SYMBOL_GPL(klist_next); | 284 | EXPORT_SYMBOL_GPL(klist_next); |
286 | |||
287 | |||
diff --git a/lib/kobject.c b/lib/kobject.c index 0d03252f87a8..718e5101c263 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
@@ -58,11 +58,6 @@ static int create_dir(struct kobject *kobj) | |||
58 | return error; | 58 | return error; |
59 | } | 59 | } |
60 | 60 | ||
61 | static inline struct kobject *to_kobj(struct list_head *entry) | ||
62 | { | ||
63 | return container_of(entry, struct kobject, entry); | ||
64 | } | ||
65 | |||
66 | static int get_kobj_path_length(struct kobject *kobj) | 61 | static int get_kobj_path_length(struct kobject *kobj) |
67 | { | 62 | { |
68 | int length = 1; | 63 | int length = 1; |
@@ -95,7 +90,7 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length) | |||
95 | } | 90 | } |
96 | 91 | ||
97 | pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj), | 92 | pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj), |
98 | kobj, __FUNCTION__, path); | 93 | kobj, __func__, path); |
99 | } | 94 | } |
100 | 95 | ||
101 | /** | 96 | /** |
@@ -186,7 +181,7 @@ static int kobject_add_internal(struct kobject *kobj) | |||
186 | } | 181 | } |
187 | 182 | ||
188 | pr_debug("kobject: '%s' (%p): %s: parent: '%s', set: '%s'\n", | 183 | pr_debug("kobject: '%s' (%p): %s: parent: '%s', set: '%s'\n", |
189 | kobject_name(kobj), kobj, __FUNCTION__, | 184 | kobject_name(kobj), kobj, __func__, |
190 | parent ? kobject_name(parent) : "<NULL>", | 185 | parent ? kobject_name(parent) : "<NULL>", |
191 | kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>"); | 186 | kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>"); |
192 | 187 | ||
@@ -201,10 +196,10 @@ static int kobject_add_internal(struct kobject *kobj) | |||
201 | printk(KERN_ERR "%s failed for %s with " | 196 | printk(KERN_ERR "%s failed for %s with " |
202 | "-EEXIST, don't try to register things with " | 197 | "-EEXIST, don't try to register things with " |
203 | "the same name in the same directory.\n", | 198 | "the same name in the same directory.\n", |
204 | __FUNCTION__, kobject_name(kobj)); | 199 | __func__, kobject_name(kobj)); |
205 | else | 200 | else |
206 | printk(KERN_ERR "%s failed for %s (%d)\n", | 201 | printk(KERN_ERR "%s failed for %s (%d)\n", |
207 | __FUNCTION__, kobject_name(kobj), error); | 202 | __func__, kobject_name(kobj), error); |
208 | dump_stack(); | 203 | dump_stack(); |
209 | } else | 204 | } else |
210 | kobj->state_in_sysfs = 1; | 205 | kobj->state_in_sysfs = 1; |
@@ -221,21 +216,12 @@ static int kobject_add_internal(struct kobject *kobj) | |||
221 | static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, | 216 | static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, |
222 | va_list vargs) | 217 | va_list vargs) |
223 | { | 218 | { |
224 | va_list aq; | ||
225 | char *name; | ||
226 | |||
227 | va_copy(aq, vargs); | ||
228 | name = kvasprintf(GFP_KERNEL, fmt, vargs); | ||
229 | va_end(aq); | ||
230 | |||
231 | if (!name) | ||
232 | return -ENOMEM; | ||
233 | |||
234 | /* Free the old name, if necessary. */ | 219 | /* Free the old name, if necessary. */ |
235 | kfree(kobj->name); | 220 | kfree(kobj->name); |
236 | 221 | ||
237 | /* Now, set the new name */ | 222 | kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); |
238 | kobj->name = name; | 223 | if (!kobj->name) |
224 | return -ENOMEM; | ||
239 | 225 | ||
240 | return 0; | 226 | return 0; |
241 | } | 227 | } |
@@ -251,12 +237,12 @@ static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, | |||
251 | */ | 237 | */ |
252 | int kobject_set_name(struct kobject *kobj, const char *fmt, ...) | 238 | int kobject_set_name(struct kobject *kobj, const char *fmt, ...) |
253 | { | 239 | { |
254 | va_list args; | 240 | va_list vargs; |
255 | int retval; | 241 | int retval; |
256 | 242 | ||
257 | va_start(args, fmt); | 243 | va_start(vargs, fmt); |
258 | retval = kobject_set_name_vargs(kobj, fmt, args); | 244 | retval = kobject_set_name_vargs(kobj, fmt, vargs); |
259 | va_end(args); | 245 | va_end(vargs); |
260 | 246 | ||
261 | return retval; | 247 | return retval; |
262 | } | 248 | } |
@@ -306,12 +292,9 @@ EXPORT_SYMBOL(kobject_init); | |||
306 | static int kobject_add_varg(struct kobject *kobj, struct kobject *parent, | 292 | static int kobject_add_varg(struct kobject *kobj, struct kobject *parent, |
307 | const char *fmt, va_list vargs) | 293 | const char *fmt, va_list vargs) |
308 | { | 294 | { |
309 | va_list aq; | ||
310 | int retval; | 295 | int retval; |
311 | 296 | ||
312 | va_copy(aq, vargs); | 297 | retval = kobject_set_name_vargs(kobj, fmt, vargs); |
313 | retval = kobject_set_name_vargs(kobj, fmt, aq); | ||
314 | va_end(aq); | ||
315 | if (retval) { | 298 | if (retval) { |
316 | printk(KERN_ERR "kobject: can not set name properly!\n"); | 299 | printk(KERN_ERR "kobject: can not set name properly!\n"); |
317 | return retval; | 300 | return retval; |
@@ -545,7 +528,7 @@ static void kobject_cleanup(struct kobject *kobj) | |||
545 | const char *name = kobj->name; | 528 | const char *name = kobj->name; |
546 | 529 | ||
547 | pr_debug("kobject: '%s' (%p): %s\n", | 530 | pr_debug("kobject: '%s' (%p): %s\n", |
548 | kobject_name(kobj), kobj, __FUNCTION__); | 531 | kobject_name(kobj), kobj, __func__); |
549 | 532 | ||
550 | if (t && !t->release) | 533 | if (t && !t->release) |
551 | pr_debug("kobject: '%s' (%p): does not have a release() " | 534 | pr_debug("kobject: '%s' (%p): does not have a release() " |
@@ -592,13 +575,20 @@ static void kobject_release(struct kref *kref) | |||
592 | */ | 575 | */ |
593 | void kobject_put(struct kobject *kobj) | 576 | void kobject_put(struct kobject *kobj) |
594 | { | 577 | { |
595 | if (kobj) | 578 | if (kobj) { |
579 | if (!kobj->state_initialized) { | ||
580 | printk(KERN_WARNING "kobject: '%s' (%p): is not " | ||
581 | "initialized, yet kobject_put() is being " | ||
582 | "called.\n", kobject_name(kobj), kobj); | ||
583 | WARN_ON(1); | ||
584 | } | ||
596 | kref_put(&kobj->kref, kobject_release); | 585 | kref_put(&kobj->kref, kobject_release); |
586 | } | ||
597 | } | 587 | } |
598 | 588 | ||
599 | static void dynamic_kobj_release(struct kobject *kobj) | 589 | static void dynamic_kobj_release(struct kobject *kobj) |
600 | { | 590 | { |
601 | pr_debug("kobject: (%p): %s\n", kobj, __FUNCTION__); | 591 | pr_debug("kobject: (%p): %s\n", kobj, __func__); |
602 | kfree(kobj); | 592 | kfree(kobj); |
603 | } | 593 | } |
604 | 594 | ||
@@ -655,7 +645,7 @@ struct kobject *kobject_create_and_add(const char *name, struct kobject *parent) | |||
655 | retval = kobject_add(kobj, parent, "%s", name); | 645 | retval = kobject_add(kobj, parent, "%s", name); |
656 | if (retval) { | 646 | if (retval) { |
657 | printk(KERN_WARNING "%s: kobject_add error: %d\n", | 647 | printk(KERN_WARNING "%s: kobject_add error: %d\n", |
658 | __FUNCTION__, retval); | 648 | __func__, retval); |
659 | kobject_put(kobj); | 649 | kobject_put(kobj); |
660 | kobj = NULL; | 650 | kobj = NULL; |
661 | } | 651 | } |
@@ -745,12 +735,11 @@ void kset_unregister(struct kset *k) | |||
745 | */ | 735 | */ |
746 | struct kobject *kset_find_obj(struct kset *kset, const char *name) | 736 | struct kobject *kset_find_obj(struct kset *kset, const char *name) |
747 | { | 737 | { |
748 | struct list_head *entry; | 738 | struct kobject *k; |
749 | struct kobject *ret = NULL; | 739 | struct kobject *ret = NULL; |
750 | 740 | ||
751 | spin_lock(&kset->list_lock); | 741 | spin_lock(&kset->list_lock); |
752 | list_for_each(entry, &kset->list) { | 742 | list_for_each_entry(k, &kset->list, entry) { |
753 | struct kobject *k = to_kobj(entry); | ||
754 | if (kobject_name(k) && !strcmp(kobject_name(k), name)) { | 743 | if (kobject_name(k) && !strcmp(kobject_name(k), name)) { |
755 | ret = kobject_get(k); | 744 | ret = kobject_get(k); |
756 | break; | 745 | break; |
@@ -764,7 +753,7 @@ static void kset_release(struct kobject *kobj) | |||
764 | { | 753 | { |
765 | struct kset *kset = container_of(kobj, struct kset, kobj); | 754 | struct kset *kset = container_of(kobj, struct kset, kobj); |
766 | pr_debug("kobject: '%s' (%p): %s\n", | 755 | pr_debug("kobject: '%s' (%p): %s\n", |
767 | kobject_name(kobj), kobj, __FUNCTION__); | 756 | kobject_name(kobj), kobj, __func__); |
768 | kfree(kset); | 757 | kfree(kset); |
769 | } | 758 | } |
770 | 759 | ||
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 5b6d7f6956b9..2fa545a63160 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
@@ -15,11 +15,13 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
18 | #include <linux/string.h> | ||
19 | #include <linux/kobject.h> | ||
20 | #include <linux/module.h> | ||
21 | |||
18 | #include <linux/socket.h> | 22 | #include <linux/socket.h> |
19 | #include <linux/skbuff.h> | 23 | #include <linux/skbuff.h> |
20 | #include <linux/netlink.h> | 24 | #include <linux/netlink.h> |
21 | #include <linux/string.h> | ||
22 | #include <linux/kobject.h> | ||
23 | #include <net/sock.h> | 25 | #include <net/sock.h> |
24 | 26 | ||
25 | 27 | ||
@@ -99,7 +101,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
99 | int retval = 0; | 101 | int retval = 0; |
100 | 102 | ||
101 | pr_debug("kobject: '%s' (%p): %s\n", | 103 | pr_debug("kobject: '%s' (%p): %s\n", |
102 | kobject_name(kobj), kobj, __FUNCTION__); | 104 | kobject_name(kobj), kobj, __func__); |
103 | 105 | ||
104 | /* search the kset we belong to */ | 106 | /* search the kset we belong to */ |
105 | top_kobj = kobj; | 107 | top_kobj = kobj; |
@@ -109,7 +111,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
109 | if (!top_kobj->kset) { | 111 | if (!top_kobj->kset) { |
110 | pr_debug("kobject: '%s' (%p): %s: attempted to send uevent " | 112 | pr_debug("kobject: '%s' (%p): %s: attempted to send uevent " |
111 | "without kset!\n", kobject_name(kobj), kobj, | 113 | "without kset!\n", kobject_name(kobj), kobj, |
112 | __FUNCTION__); | 114 | __func__); |
113 | return -EINVAL; | 115 | return -EINVAL; |
114 | } | 116 | } |
115 | 117 | ||
@@ -121,7 +123,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
121 | if (!uevent_ops->filter(kset, kobj)) { | 123 | if (!uevent_ops->filter(kset, kobj)) { |
122 | pr_debug("kobject: '%s' (%p): %s: filter function " | 124 | pr_debug("kobject: '%s' (%p): %s: filter function " |
123 | "caused the event to drop!\n", | 125 | "caused the event to drop!\n", |
124 | kobject_name(kobj), kobj, __FUNCTION__); | 126 | kobject_name(kobj), kobj, __func__); |
125 | return 0; | 127 | return 0; |
126 | } | 128 | } |
127 | 129 | ||
@@ -133,7 +135,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
133 | if (!subsystem) { | 135 | if (!subsystem) { |
134 | pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the " | 136 | pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the " |
135 | "event to drop!\n", kobject_name(kobj), kobj, | 137 | "event to drop!\n", kobject_name(kobj), kobj, |
136 | __FUNCTION__); | 138 | __func__); |
137 | return 0; | 139 | return 0; |
138 | } | 140 | } |
139 | 141 | ||
@@ -175,7 +177,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
175 | if (retval) { | 177 | if (retval) { |
176 | pr_debug("kobject: '%s' (%p): %s: uevent() returned " | 178 | pr_debug("kobject: '%s' (%p): %s: uevent() returned " |
177 | "%d\n", kobject_name(kobj), kobj, | 179 | "%d\n", kobject_name(kobj), kobj, |
178 | __FUNCTION__, retval); | 180 | __func__, retval); |
179 | goto exit; | 181 | goto exit; |
180 | } | 182 | } |
181 | } | 183 | } |
diff --git a/lib/lmb.c b/lib/lmb.c new file mode 100644 index 000000000000..83287d3869a3 --- /dev/null +++ b/lib/lmb.c | |||
@@ -0,0 +1,509 @@ | |||
1 | /* | ||
2 | * Procedures for maintaining information about logical memory blocks. | ||
3 | * | ||
4 | * Peter Bergner, IBM Corp. June 2001. | ||
5 | * Copyright (C) 2001 Peter Bergner. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/bitops.h> | ||
16 | #include <linux/lmb.h> | ||
17 | |||
18 | #define LMB_ALLOC_ANYWHERE 0 | ||
19 | |||
20 | struct lmb lmb; | ||
21 | |||
22 | void lmb_dump_all(void) | ||
23 | { | ||
24 | #ifdef DEBUG | ||
25 | unsigned long i; | ||
26 | |||
27 | pr_debug("lmb_dump_all:\n"); | ||
28 | pr_debug(" memory.cnt = 0x%lx\n", lmb.memory.cnt); | ||
29 | pr_debug(" memory.size = 0x%llx\n", | ||
30 | (unsigned long long)lmb.memory.size); | ||
31 | for (i=0; i < lmb.memory.cnt ;i++) { | ||
32 | pr_debug(" memory.region[0x%x].base = 0x%llx\n", | ||
33 | i, (unsigned long long)lmb.memory.region[i].base); | ||
34 | pr_debug(" .size = 0x%llx\n", | ||
35 | (unsigned long long)lmb.memory.region[i].size); | ||
36 | } | ||
37 | |||
38 | pr_debug(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt); | ||
39 | pr_debug(" reserved.size = 0x%lx\n", lmb.reserved.size); | ||
40 | for (i=0; i < lmb.reserved.cnt ;i++) { | ||
41 | pr_debug(" reserved.region[0x%x].base = 0x%llx\n", | ||
42 | i, (unsigned long long)lmb.reserved.region[i].base); | ||
43 | pr_debug(" .size = 0x%llx\n", | ||
44 | (unsigned long long)lmb.reserved.region[i].size); | ||
45 | } | ||
46 | #endif /* DEBUG */ | ||
47 | } | ||
48 | |||
49 | static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2, | ||
50 | u64 size2) | ||
51 | { | ||
52 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); | ||
53 | } | ||
54 | |||
55 | static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) | ||
56 | { | ||
57 | if (base2 == base1 + size1) | ||
58 | return 1; | ||
59 | else if (base1 == base2 + size2) | ||
60 | return -1; | ||
61 | |||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | static long lmb_regions_adjacent(struct lmb_region *rgn, | ||
66 | unsigned long r1, unsigned long r2) | ||
67 | { | ||
68 | u64 base1 = rgn->region[r1].base; | ||
69 | u64 size1 = rgn->region[r1].size; | ||
70 | u64 base2 = rgn->region[r2].base; | ||
71 | u64 size2 = rgn->region[r2].size; | ||
72 | |||
73 | return lmb_addrs_adjacent(base1, size1, base2, size2); | ||
74 | } | ||
75 | |||
76 | static void lmb_remove_region(struct lmb_region *rgn, unsigned long r) | ||
77 | { | ||
78 | unsigned long i; | ||
79 | |||
80 | for (i = r; i < rgn->cnt - 1; i++) { | ||
81 | rgn->region[i].base = rgn->region[i + 1].base; | ||
82 | rgn->region[i].size = rgn->region[i + 1].size; | ||
83 | } | ||
84 | rgn->cnt--; | ||
85 | } | ||
86 | |||
87 | /* Assumption: base addr of region 1 < base addr of region 2 */ | ||
88 | static void lmb_coalesce_regions(struct lmb_region *rgn, | ||
89 | unsigned long r1, unsigned long r2) | ||
90 | { | ||
91 | rgn->region[r1].size += rgn->region[r2].size; | ||
92 | lmb_remove_region(rgn, r2); | ||
93 | } | ||
94 | |||
95 | void __init lmb_init(void) | ||
96 | { | ||
97 | /* Create a dummy zero size LMB which will get coalesced away later. | ||
98 | * This simplifies the lmb_add() code below... | ||
99 | */ | ||
100 | lmb.memory.region[0].base = 0; | ||
101 | lmb.memory.region[0].size = 0; | ||
102 | lmb.memory.cnt = 1; | ||
103 | |||
104 | /* Ditto. */ | ||
105 | lmb.reserved.region[0].base = 0; | ||
106 | lmb.reserved.region[0].size = 0; | ||
107 | lmb.reserved.cnt = 1; | ||
108 | } | ||
109 | |||
110 | void __init lmb_analyze(void) | ||
111 | { | ||
112 | int i; | ||
113 | |||
114 | lmb.memory.size = 0; | ||
115 | |||
116 | for (i = 0; i < lmb.memory.cnt; i++) | ||
117 | lmb.memory.size += lmb.memory.region[i].size; | ||
118 | } | ||
119 | |||
120 | static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) | ||
121 | { | ||
122 | unsigned long coalesced = 0; | ||
123 | long adjacent, i; | ||
124 | |||
125 | if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { | ||
126 | rgn->region[0].base = base; | ||
127 | rgn->region[0].size = size; | ||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | /* First try and coalesce this LMB with another. */ | ||
132 | for (i = 0; i < rgn->cnt; i++) { | ||
133 | u64 rgnbase = rgn->region[i].base; | ||
134 | u64 rgnsize = rgn->region[i].size; | ||
135 | |||
136 | if ((rgnbase == base) && (rgnsize == size)) | ||
137 | /* Already have this region, so we're done */ | ||
138 | return 0; | ||
139 | |||
140 | adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize); | ||
141 | if (adjacent > 0) { | ||
142 | rgn->region[i].base -= size; | ||
143 | rgn->region[i].size += size; | ||
144 | coalesced++; | ||
145 | break; | ||
146 | } else if (adjacent < 0) { | ||
147 | rgn->region[i].size += size; | ||
148 | coalesced++; | ||
149 | break; | ||
150 | } | ||
151 | } | ||
152 | |||
153 | if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) { | ||
154 | lmb_coalesce_regions(rgn, i, i+1); | ||
155 | coalesced++; | ||
156 | } | ||
157 | |||
158 | if (coalesced) | ||
159 | return coalesced; | ||
160 | if (rgn->cnt >= MAX_LMB_REGIONS) | ||
161 | return -1; | ||
162 | |||
163 | /* Couldn't coalesce the LMB, so add it to the sorted table. */ | ||
164 | for (i = rgn->cnt - 1; i >= 0; i--) { | ||
165 | if (base < rgn->region[i].base) { | ||
166 | rgn->region[i+1].base = rgn->region[i].base; | ||
167 | rgn->region[i+1].size = rgn->region[i].size; | ||
168 | } else { | ||
169 | rgn->region[i+1].base = base; | ||
170 | rgn->region[i+1].size = size; | ||
171 | break; | ||
172 | } | ||
173 | } | ||
174 | |||
175 | if (base < rgn->region[0].base) { | ||
176 | rgn->region[0].base = base; | ||
177 | rgn->region[0].size = size; | ||
178 | } | ||
179 | rgn->cnt++; | ||
180 | |||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | long lmb_add(u64 base, u64 size) | ||
185 | { | ||
186 | struct lmb_region *_rgn = &lmb.memory; | ||
187 | |||
188 | /* On pSeries LPAR systems, the first LMB is our RMO region. */ | ||
189 | if (base == 0) | ||
190 | lmb.rmo_size = size; | ||
191 | |||
192 | return lmb_add_region(_rgn, base, size); | ||
193 | |||
194 | } | ||
195 | |||
196 | long lmb_remove(u64 base, u64 size) | ||
197 | { | ||
198 | struct lmb_region *rgn = &(lmb.memory); | ||
199 | u64 rgnbegin, rgnend; | ||
200 | u64 end = base + size; | ||
201 | int i; | ||
202 | |||
203 | rgnbegin = rgnend = 0; /* supress gcc warnings */ | ||
204 | |||
205 | /* Find the region where (base, size) belongs to */ | ||
206 | for (i=0; i < rgn->cnt; i++) { | ||
207 | rgnbegin = rgn->region[i].base; | ||
208 | rgnend = rgnbegin + rgn->region[i].size; | ||
209 | |||
210 | if ((rgnbegin <= base) && (end <= rgnend)) | ||
211 | break; | ||
212 | } | ||
213 | |||
214 | /* Didn't find the region */ | ||
215 | if (i == rgn->cnt) | ||
216 | return -1; | ||
217 | |||
218 | /* Check to see if we are removing entire region */ | ||
219 | if ((rgnbegin == base) && (rgnend == end)) { | ||
220 | lmb_remove_region(rgn, i); | ||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | /* Check to see if region is matching at the front */ | ||
225 | if (rgnbegin == base) { | ||
226 | rgn->region[i].base = end; | ||
227 | rgn->region[i].size -= size; | ||
228 | return 0; | ||
229 | } | ||
230 | |||
231 | /* Check to see if the region is matching at the end */ | ||
232 | if (rgnend == end) { | ||
233 | rgn->region[i].size -= size; | ||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * We need to split the entry - adjust the current one to the | ||
239 | * beginging of the hole and add the region after hole. | ||
240 | */ | ||
241 | rgn->region[i].size = base - rgn->region[i].base; | ||
242 | return lmb_add_region(rgn, end, rgnend - end); | ||
243 | } | ||
244 | |||
245 | long __init lmb_reserve(u64 base, u64 size) | ||
246 | { | ||
247 | struct lmb_region *_rgn = &lmb.reserved; | ||
248 | |||
249 | BUG_ON(0 == size); | ||
250 | |||
251 | return lmb_add_region(_rgn, base, size); | ||
252 | } | ||
253 | |||
254 | long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size) | ||
255 | { | ||
256 | unsigned long i; | ||
257 | |||
258 | for (i = 0; i < rgn->cnt; i++) { | ||
259 | u64 rgnbase = rgn->region[i].base; | ||
260 | u64 rgnsize = rgn->region[i].size; | ||
261 | if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) | ||
262 | break; | ||
263 | } | ||
264 | |||
265 | return (i < rgn->cnt) ? i : -1; | ||
266 | } | ||
267 | |||
268 | static u64 lmb_align_down(u64 addr, u64 size) | ||
269 | { | ||
270 | return addr & ~(size - 1); | ||
271 | } | ||
272 | |||
273 | static u64 lmb_align_up(u64 addr, u64 size) | ||
274 | { | ||
275 | return (addr + (size - 1)) & ~(size - 1); | ||
276 | } | ||
277 | |||
278 | static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end, | ||
279 | u64 size, u64 align) | ||
280 | { | ||
281 | u64 base, res_base; | ||
282 | long j; | ||
283 | |||
284 | base = lmb_align_down((end - size), align); | ||
285 | while (start <= base) { | ||
286 | j = lmb_overlaps_region(&lmb.reserved, base, size); | ||
287 | if (j < 0) { | ||
288 | /* this area isn't reserved, take it */ | ||
289 | if (lmb_add_region(&lmb.reserved, base, | ||
290 | lmb_align_up(size, align)) < 0) | ||
291 | base = ~(u64)0; | ||
292 | return base; | ||
293 | } | ||
294 | res_base = lmb.reserved.region[j].base; | ||
295 | if (res_base < size) | ||
296 | break; | ||
297 | base = lmb_align_down(res_base - size, align); | ||
298 | } | ||
299 | |||
300 | return ~(u64)0; | ||
301 | } | ||
302 | |||
303 | static u64 __init lmb_alloc_nid_region(struct lmb_property *mp, | ||
304 | u64 (*nid_range)(u64, u64, int *), | ||
305 | u64 size, u64 align, int nid) | ||
306 | { | ||
307 | u64 start, end; | ||
308 | |||
309 | start = mp->base; | ||
310 | end = start + mp->size; | ||
311 | |||
312 | start = lmb_align_up(start, align); | ||
313 | while (start < end) { | ||
314 | u64 this_end; | ||
315 | int this_nid; | ||
316 | |||
317 | this_end = nid_range(start, end, &this_nid); | ||
318 | if (this_nid == nid) { | ||
319 | u64 ret = lmb_alloc_nid_unreserved(start, this_end, | ||
320 | size, align); | ||
321 | if (ret != ~(u64)0) | ||
322 | return ret; | ||
323 | } | ||
324 | start = this_end; | ||
325 | } | ||
326 | |||
327 | return ~(u64)0; | ||
328 | } | ||
329 | |||
330 | u64 __init lmb_alloc_nid(u64 size, u64 align, int nid, | ||
331 | u64 (*nid_range)(u64 start, u64 end, int *nid)) | ||
332 | { | ||
333 | struct lmb_region *mem = &lmb.memory; | ||
334 | int i; | ||
335 | |||
336 | for (i = 0; i < mem->cnt; i++) { | ||
337 | u64 ret = lmb_alloc_nid_region(&mem->region[i], | ||
338 | nid_range, | ||
339 | size, align, nid); | ||
340 | if (ret != ~(u64)0) | ||
341 | return ret; | ||
342 | } | ||
343 | |||
344 | return lmb_alloc(size, align); | ||
345 | } | ||
346 | |||
347 | u64 __init lmb_alloc(u64 size, u64 align) | ||
348 | { | ||
349 | return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); | ||
350 | } | ||
351 | |||
352 | u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr) | ||
353 | { | ||
354 | u64 alloc; | ||
355 | |||
356 | alloc = __lmb_alloc_base(size, align, max_addr); | ||
357 | |||
358 | if (alloc == 0) | ||
359 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", | ||
360 | (unsigned long long) size, (unsigned long long) max_addr); | ||
361 | |||
362 | return alloc; | ||
363 | } | ||
364 | |||
365 | u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) | ||
366 | { | ||
367 | long i, j; | ||
368 | u64 base = 0; | ||
369 | u64 res_base; | ||
370 | |||
371 | BUG_ON(0 == size); | ||
372 | |||
373 | /* On some platforms, make sure we allocate lowmem */ | ||
374 | /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */ | ||
375 | if (max_addr == LMB_ALLOC_ANYWHERE) | ||
376 | max_addr = LMB_REAL_LIMIT; | ||
377 | |||
378 | for (i = lmb.memory.cnt - 1; i >= 0; i--) { | ||
379 | u64 lmbbase = lmb.memory.region[i].base; | ||
380 | u64 lmbsize = lmb.memory.region[i].size; | ||
381 | |||
382 | if (lmbsize < size) | ||
383 | continue; | ||
384 | if (max_addr == LMB_ALLOC_ANYWHERE) | ||
385 | base = lmb_align_down(lmbbase + lmbsize - size, align); | ||
386 | else if (lmbbase < max_addr) { | ||
387 | base = min(lmbbase + lmbsize, max_addr); | ||
388 | base = lmb_align_down(base - size, align); | ||
389 | } else | ||
390 | continue; | ||
391 | |||
392 | while (base && lmbbase <= base) { | ||
393 | j = lmb_overlaps_region(&lmb.reserved, base, size); | ||
394 | if (j < 0) { | ||
395 | /* this area isn't reserved, take it */ | ||
396 | if (lmb_add_region(&lmb.reserved, base, | ||
397 | lmb_align_up(size, align)) < 0) | ||
398 | return 0; | ||
399 | return base; | ||
400 | } | ||
401 | res_base = lmb.reserved.region[j].base; | ||
402 | if (res_base < size) | ||
403 | break; | ||
404 | base = lmb_align_down(res_base - size, align); | ||
405 | } | ||
406 | } | ||
407 | return 0; | ||
408 | } | ||
409 | |||
410 | /* You must call lmb_analyze() before this. */ | ||
411 | u64 __init lmb_phys_mem_size(void) | ||
412 | { | ||
413 | return lmb.memory.size; | ||
414 | } | ||
415 | |||
416 | u64 __init lmb_end_of_DRAM(void) | ||
417 | { | ||
418 | int idx = lmb.memory.cnt - 1; | ||
419 | |||
420 | return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); | ||
421 | } | ||
422 | |||
423 | /* You must call lmb_analyze() after this. */ | ||
424 | void __init lmb_enforce_memory_limit(u64 memory_limit) | ||
425 | { | ||
426 | unsigned long i; | ||
427 | u64 limit; | ||
428 | struct lmb_property *p; | ||
429 | |||
430 | if (!memory_limit) | ||
431 | return; | ||
432 | |||
433 | /* Truncate the lmb regions to satisfy the memory limit. */ | ||
434 | limit = memory_limit; | ||
435 | for (i = 0; i < lmb.memory.cnt; i++) { | ||
436 | if (limit > lmb.memory.region[i].size) { | ||
437 | limit -= lmb.memory.region[i].size; | ||
438 | continue; | ||
439 | } | ||
440 | |||
441 | lmb.memory.region[i].size = limit; | ||
442 | lmb.memory.cnt = i + 1; | ||
443 | break; | ||
444 | } | ||
445 | |||
446 | if (lmb.memory.region[0].size < lmb.rmo_size) | ||
447 | lmb.rmo_size = lmb.memory.region[0].size; | ||
448 | |||
449 | /* And truncate any reserves above the limit also. */ | ||
450 | for (i = 0; i < lmb.reserved.cnt; i++) { | ||
451 | p = &lmb.reserved.region[i]; | ||
452 | |||
453 | if (p->base > memory_limit) | ||
454 | p->size = 0; | ||
455 | else if ((p->base + p->size) > memory_limit) | ||
456 | p->size = memory_limit - p->base; | ||
457 | |||
458 | if (p->size == 0) { | ||
459 | lmb_remove_region(&lmb.reserved, i); | ||
460 | i--; | ||
461 | } | ||
462 | } | ||
463 | } | ||
464 | |||
465 | int __init lmb_is_reserved(u64 addr) | ||
466 | { | ||
467 | int i; | ||
468 | |||
469 | for (i = 0; i < lmb.reserved.cnt; i++) { | ||
470 | u64 upper = lmb.reserved.region[i].base + | ||
471 | lmb.reserved.region[i].size - 1; | ||
472 | if ((addr >= lmb.reserved.region[i].base) && (addr <= upper)) | ||
473 | return 1; | ||
474 | } | ||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | /* | ||
479 | * Given a <base, len>, find which memory regions belong to this range. | ||
480 | * Adjust the request and return a contiguous chunk. | ||
481 | */ | ||
482 | int lmb_find(struct lmb_property *res) | ||
483 | { | ||
484 | int i; | ||
485 | u64 rstart, rend; | ||
486 | |||
487 | rstart = res->base; | ||
488 | rend = rstart + res->size - 1; | ||
489 | |||
490 | for (i = 0; i < lmb.memory.cnt; i++) { | ||
491 | u64 start = lmb.memory.region[i].base; | ||
492 | u64 end = start + lmb.memory.region[i].size - 1; | ||
493 | |||
494 | if (start > rend) | ||
495 | return -1; | ||
496 | |||
497 | if ((end >= rstart) && (start < rend)) { | ||
498 | /* adjust the request */ | ||
499 | if (rstart < start) | ||
500 | rstart = start; | ||
501 | if (rend > end) | ||
502 | rend = end; | ||
503 | res->base = rstart; | ||
504 | res->size = rend - rstart + 1; | ||
505 | return 0; | ||
506 | } | ||
507 | } | ||
508 | return -1; | ||
509 | } | ||
diff --git a/lib/pcounter.c b/lib/pcounter.c deleted file mode 100644 index 9b56807da93b..000000000000 --- a/lib/pcounter.c +++ /dev/null | |||
@@ -1,58 +0,0 @@ | |||
1 | /* | ||
2 | * Define default pcounter functions | ||
3 | * Note that often used pcounters use dedicated functions to get a speed increase. | ||
4 | * (see DEFINE_PCOUNTER/REF_PCOUNTER_MEMBER) | ||
5 | */ | ||
6 | |||
7 | #include <linux/module.h> | ||
8 | #include <linux/pcounter.h> | ||
9 | #include <linux/smp.h> | ||
10 | #include <linux/cpumask.h> | ||
11 | |||
12 | static void pcounter_dyn_add(struct pcounter *self, int inc) | ||
13 | { | ||
14 | per_cpu_ptr(self->per_cpu_values, smp_processor_id())[0] += inc; | ||
15 | } | ||
16 | |||
17 | static int pcounter_dyn_getval(const struct pcounter *self, int cpu) | ||
18 | { | ||
19 | return per_cpu_ptr(self->per_cpu_values, cpu)[0]; | ||
20 | } | ||
21 | |||
22 | int pcounter_getval(const struct pcounter *self) | ||
23 | { | ||
24 | int res = 0, cpu; | ||
25 | |||
26 | for_each_possible_cpu(cpu) | ||
27 | res += self->getval(self, cpu); | ||
28 | |||
29 | return res; | ||
30 | } | ||
31 | EXPORT_SYMBOL_GPL(pcounter_getval); | ||
32 | |||
33 | int pcounter_alloc(struct pcounter *self) | ||
34 | { | ||
35 | int rc = 0; | ||
36 | if (self->add == NULL) { | ||
37 | self->per_cpu_values = alloc_percpu(int); | ||
38 | if (self->per_cpu_values != NULL) { | ||
39 | self->add = pcounter_dyn_add; | ||
40 | self->getval = pcounter_dyn_getval; | ||
41 | } else | ||
42 | rc = 1; | ||
43 | } | ||
44 | return rc; | ||
45 | } | ||
46 | EXPORT_SYMBOL_GPL(pcounter_alloc); | ||
47 | |||
48 | void pcounter_free(struct pcounter *self) | ||
49 | { | ||
50 | if (self->per_cpu_values != NULL) { | ||
51 | free_percpu(self->per_cpu_values); | ||
52 | self->per_cpu_values = NULL; | ||
53 | self->getval = NULL; | ||
54 | self->add = NULL; | ||
55 | } | ||
56 | } | ||
57 | EXPORT_SYMBOL_GPL(pcounter_free); | ||
58 | |||
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 393a0e915c23..119174494cb5 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
@@ -102,6 +102,7 @@ void percpu_counter_destroy(struct percpu_counter *fbc) | |||
102 | return; | 102 | return; |
103 | 103 | ||
104 | free_percpu(fbc->counters); | 104 | free_percpu(fbc->counters); |
105 | fbc->counters = NULL; | ||
105 | #ifdef CONFIG_HOTPLUG_CPU | 106 | #ifdef CONFIG_HOTPLUG_CPU |
106 | mutex_lock(&percpu_counters_lock); | 107 | mutex_lock(&percpu_counters_lock); |
107 | list_del(&fbc->list); | 108 | list_del(&fbc->list); |
diff --git a/lib/proportions.c b/lib/proportions.c index 9508d9a7af3e..4f387a643d72 100644 --- a/lib/proportions.c +++ b/lib/proportions.c | |||
@@ -73,12 +73,6 @@ | |||
73 | #include <linux/proportions.h> | 73 | #include <linux/proportions.h> |
74 | #include <linux/rcupdate.h> | 74 | #include <linux/rcupdate.h> |
75 | 75 | ||
76 | /* | ||
77 | * Limit the time part in order to ensure there are some bits left for the | ||
78 | * cycle counter. | ||
79 | */ | ||
80 | #define PROP_MAX_SHIFT (3*BITS_PER_LONG/4) | ||
81 | |||
82 | int prop_descriptor_init(struct prop_descriptor *pd, int shift) | 76 | int prop_descriptor_init(struct prop_descriptor *pd, int shift) |
83 | { | 77 | { |
84 | int err; | 78 | int err; |
@@ -268,6 +262,38 @@ void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl) | |||
268 | } | 262 | } |
269 | 263 | ||
270 | /* | 264 | /* |
265 | * identical to __prop_inc_percpu, except that it limits this pl's fraction to | ||
266 | * @frac/PROP_FRAC_BASE by ignoring events when this limit has been exceeded. | ||
267 | */ | ||
268 | void __prop_inc_percpu_max(struct prop_descriptor *pd, | ||
269 | struct prop_local_percpu *pl, long frac) | ||
270 | { | ||
271 | struct prop_global *pg = prop_get_global(pd); | ||
272 | |||
273 | prop_norm_percpu(pg, pl); | ||
274 | |||
275 | if (unlikely(frac != PROP_FRAC_BASE)) { | ||
276 | unsigned long period_2 = 1UL << (pg->shift - 1); | ||
277 | unsigned long counter_mask = period_2 - 1; | ||
278 | unsigned long global_count; | ||
279 | long numerator, denominator; | ||
280 | |||
281 | numerator = percpu_counter_read_positive(&pl->events); | ||
282 | global_count = percpu_counter_read(&pg->events); | ||
283 | denominator = period_2 + (global_count & counter_mask); | ||
284 | |||
285 | if (numerator > ((denominator * frac) >> PROP_FRAC_SHIFT)) | ||
286 | goto out_put; | ||
287 | } | ||
288 | |||
289 | percpu_counter_add(&pl->events, 1); | ||
290 | percpu_counter_add(&pg->events, 1); | ||
291 | |||
292 | out_put: | ||
293 | prop_put_global(pd, pg); | ||
294 | } | ||
295 | |||
296 | /* | ||
271 | * Obtain a fraction of this proportion | 297 | * Obtain a fraction of this proportion |
272 | * | 298 | * |
273 | * p_{j} = x_{j} / (period/2 + t % period/2) | 299 | * p_{j} = x_{j} / (period/2 + t % period/2) |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 65f0e758ec38..bd521716ab1a 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -114,8 +114,7 @@ radix_tree_node_alloc(struct radix_tree_root *root) | |||
114 | } | 114 | } |
115 | } | 115 | } |
116 | if (ret == NULL) | 116 | if (ret == NULL) |
117 | ret = kmem_cache_alloc(radix_tree_node_cachep, | 117 | ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
118 | set_migrateflags(gfp_mask, __GFP_RECLAIMABLE)); | ||
119 | 118 | ||
120 | BUG_ON(radix_tree_is_indirect_ptr(ret)); | 119 | BUG_ON(radix_tree_is_indirect_ptr(ret)); |
121 | return ret; | 120 | return ret; |
@@ -150,8 +149,7 @@ int radix_tree_preload(gfp_t gfp_mask) | |||
150 | rtp = &__get_cpu_var(radix_tree_preloads); | 149 | rtp = &__get_cpu_var(radix_tree_preloads); |
151 | while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { | 150 | while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { |
152 | preempt_enable(); | 151 | preempt_enable(); |
153 | node = kmem_cache_alloc(radix_tree_node_cachep, | 152 | node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
154 | set_migrateflags(gfp_mask, __GFP_RECLAIMABLE)); | ||
155 | if (node == NULL) | 153 | if (node == NULL) |
156 | goto out; | 154 | goto out; |
157 | preempt_disable(); | 155 | preempt_disable(); |
@@ -1098,7 +1096,8 @@ void __init radix_tree_init(void) | |||
1098 | { | 1096 | { |
1099 | radix_tree_node_cachep = kmem_cache_create("radix_tree_node", | 1097 | radix_tree_node_cachep = kmem_cache_create("radix_tree_node", |
1100 | sizeof(struct radix_tree_node), 0, | 1098 | sizeof(struct radix_tree_node), 0, |
1101 | SLAB_PANIC, radix_tree_node_ctor); | 1099 | SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, |
1100 | radix_tree_node_ctor); | ||
1102 | radix_tree_init_maxindex(); | 1101 | radix_tree_init_maxindex(); |
1103 | hotcpu_notifier(radix_tree_callback, 0); | 1102 | hotcpu_notifier(radix_tree_callback, 0); |
1104 | } | 1103 | } |
diff --git a/lib/ratelimit.c b/lib/ratelimit.c new file mode 100644 index 000000000000..485e3040dcd4 --- /dev/null +++ b/lib/ratelimit.c | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * ratelimit.c - Do something with rate limit. | ||
3 | * | ||
4 | * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com> | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/jiffies.h> | ||
12 | #include <linux/module.h> | ||
13 | |||
14 | /* | ||
15 | * __ratelimit - rate limiting | ||
16 | * @ratelimit_jiffies: minimum time in jiffies between two callbacks | ||
17 | * @ratelimit_burst: number of callbacks we do before ratelimiting | ||
18 | * | ||
19 | * This enforces a rate limit: not more than @ratelimit_burst callbacks | ||
20 | * in every ratelimit_jiffies | ||
21 | */ | ||
22 | int __ratelimit(int ratelimit_jiffies, int ratelimit_burst) | ||
23 | { | ||
24 | static DEFINE_SPINLOCK(ratelimit_lock); | ||
25 | static unsigned toks = 10 * 5 * HZ; | ||
26 | static unsigned long last_msg; | ||
27 | static int missed; | ||
28 | unsigned long flags; | ||
29 | unsigned long now = jiffies; | ||
30 | |||
31 | spin_lock_irqsave(&ratelimit_lock, flags); | ||
32 | toks += now - last_msg; | ||
33 | last_msg = now; | ||
34 | if (toks > (ratelimit_burst * ratelimit_jiffies)) | ||
35 | toks = ratelimit_burst * ratelimit_jiffies; | ||
36 | if (toks >= ratelimit_jiffies) { | ||
37 | int lost = missed; | ||
38 | |||
39 | missed = 0; | ||
40 | toks -= ratelimit_jiffies; | ||
41 | spin_unlock_irqrestore(&ratelimit_lock, flags); | ||
42 | if (lost) | ||
43 | printk(KERN_WARNING "%s: %d messages suppressed\n", | ||
44 | __func__, lost); | ||
45 | return 1; | ||
46 | } | ||
47 | missed++; | ||
48 | spin_unlock_irqrestore(&ratelimit_lock, flags); | ||
49 | return 0; | ||
50 | } | ||
51 | EXPORT_SYMBOL(__ratelimit); | ||
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c index 3ea2db94d5b0..06d04cfa9339 100644 --- a/lib/reed_solomon/reed_solomon.c +++ b/lib/reed_solomon/reed_solomon.c | |||
@@ -45,7 +45,6 @@ | |||
45 | #include <linux/rslib.h> | 45 | #include <linux/rslib.h> |
46 | #include <linux/slab.h> | 46 | #include <linux/slab.h> |
47 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
48 | #include <asm/semaphore.h> | ||
49 | 48 | ||
50 | /* This list holds all currently allocated rs control structures */ | 49 | /* This list holds all currently allocated rs control structures */ |
51 | static LIST_HEAD (rslist); | 50 | static LIST_HEAD (rslist); |
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index acca4901046c..b80c21100d78 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
@@ -8,6 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/scatterlist.h> | 10 | #include <linux/scatterlist.h> |
11 | #include <linux/highmem.h> | ||
11 | 12 | ||
12 | /** | 13 | /** |
13 | * sg_next - return the next scatterlist entry in a list | 14 | * sg_next - return the next scatterlist entry in a list |
@@ -292,3 +293,104 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) | |||
292 | return ret; | 293 | return ret; |
293 | } | 294 | } |
294 | EXPORT_SYMBOL(sg_alloc_table); | 295 | EXPORT_SYMBOL(sg_alloc_table); |
296 | |||
297 | /** | ||
298 | * sg_copy_buffer - Copy data between a linear buffer and an SG list | ||
299 | * @sgl: The SG list | ||
300 | * @nents: Number of SG entries | ||
301 | * @buf: Where to copy from | ||
302 | * @buflen: The number of bytes to copy | ||
303 | * @to_buffer: transfer direction (non zero == from an sg list to a | ||
304 | * buffer, 0 == from a buffer to an sg list | ||
305 | * | ||
306 | * Returns the number of copied bytes. | ||
307 | * | ||
308 | **/ | ||
309 | static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, | ||
310 | void *buf, size_t buflen, int to_buffer) | ||
311 | { | ||
312 | struct scatterlist *sg; | ||
313 | size_t buf_off = 0; | ||
314 | int i; | ||
315 | |||
316 | WARN_ON(!irqs_disabled()); | ||
317 | |||
318 | for_each_sg(sgl, sg, nents, i) { | ||
319 | struct page *page; | ||
320 | int n = 0; | ||
321 | unsigned int sg_off = sg->offset; | ||
322 | unsigned int sg_copy = sg->length; | ||
323 | |||
324 | if (sg_copy > buflen) | ||
325 | sg_copy = buflen; | ||
326 | buflen -= sg_copy; | ||
327 | |||
328 | while (sg_copy > 0) { | ||
329 | unsigned int page_copy; | ||
330 | void *p; | ||
331 | |||
332 | page_copy = PAGE_SIZE - sg_off; | ||
333 | if (page_copy > sg_copy) | ||
334 | page_copy = sg_copy; | ||
335 | |||
336 | page = nth_page(sg_page(sg), n); | ||
337 | p = kmap_atomic(page, KM_BIO_SRC_IRQ); | ||
338 | |||
339 | if (to_buffer) | ||
340 | memcpy(buf + buf_off, p + sg_off, page_copy); | ||
341 | else { | ||
342 | memcpy(p + sg_off, buf + buf_off, page_copy); | ||
343 | flush_kernel_dcache_page(page); | ||
344 | } | ||
345 | |||
346 | kunmap_atomic(p, KM_BIO_SRC_IRQ); | ||
347 | |||
348 | buf_off += page_copy; | ||
349 | sg_off += page_copy; | ||
350 | if (sg_off == PAGE_SIZE) { | ||
351 | sg_off = 0; | ||
352 | n++; | ||
353 | } | ||
354 | sg_copy -= page_copy; | ||
355 | } | ||
356 | |||
357 | if (!buflen) | ||
358 | break; | ||
359 | } | ||
360 | |||
361 | return buf_off; | ||
362 | } | ||
363 | |||
364 | /** | ||
365 | * sg_copy_from_buffer - Copy from a linear buffer to an SG list | ||
366 | * @sgl: The SG list | ||
367 | * @nents: Number of SG entries | ||
368 | * @buf: Where to copy from | ||
369 | * @buflen: The number of bytes to copy | ||
370 | * | ||
371 | * Returns the number of copied bytes. | ||
372 | * | ||
373 | **/ | ||
374 | size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, | ||
375 | void *buf, size_t buflen) | ||
376 | { | ||
377 | return sg_copy_buffer(sgl, nents, buf, buflen, 0); | ||
378 | } | ||
379 | EXPORT_SYMBOL(sg_copy_from_buffer); | ||
380 | |||
381 | /** | ||
382 | * sg_copy_to_buffer - Copy from an SG list to a linear buffer | ||
383 | * @sgl: The SG list | ||
384 | * @nents: Number of SG entries | ||
385 | * @buf: Where to copy to | ||
386 | * @buflen: The number of bytes to copy | ||
387 | * | ||
388 | * Returns the number of copied bytes. | ||
389 | * | ||
390 | **/ | ||
391 | size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, | ||
392 | void *buf, size_t buflen) | ||
393 | { | ||
394 | return sg_copy_buffer(sgl, nents, buf, buflen, 1); | ||
395 | } | ||
396 | EXPORT_SYMBOL(sg_copy_to_buffer); | ||
diff --git a/lib/semaphore-sleepers.c b/lib/semaphore-sleepers.c deleted file mode 100644 index 0198782cdacb..000000000000 --- a/lib/semaphore-sleepers.c +++ /dev/null | |||
@@ -1,176 +0,0 @@ | |||
1 | /* | ||
2 | * i386 and x86-64 semaphore implementation. | ||
3 | * | ||
4 | * (C) Copyright 1999 Linus Torvalds | ||
5 | * | ||
6 | * Portions Copyright 1999 Red Hat, Inc. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version | ||
11 | * 2 of the License, or (at your option) any later version. | ||
12 | * | ||
13 | * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org> | ||
14 | */ | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/err.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <asm/semaphore.h> | ||
19 | |||
20 | /* | ||
21 | * Semaphores are implemented using a two-way counter: | ||
22 | * The "count" variable is decremented for each process | ||
23 | * that tries to acquire the semaphore, while the "sleeping" | ||
24 | * variable is a count of such acquires. | ||
25 | * | ||
26 | * Notably, the inline "up()" and "down()" functions can | ||
27 | * efficiently test if they need to do any extra work (up | ||
28 | * needs to do something only if count was negative before | ||
29 | * the increment operation. | ||
30 | * | ||
31 | * "sleeping" and the contention routine ordering is protected | ||
32 | * by the spinlock in the semaphore's waitqueue head. | ||
33 | * | ||
34 | * Note that these functions are only called when there is | ||
35 | * contention on the lock, and as such all this is the | ||
36 | * "non-critical" part of the whole semaphore business. The | ||
37 | * critical part is the inline stuff in <asm/semaphore.h> | ||
38 | * where we want to avoid any extra jumps and calls. | ||
39 | */ | ||
40 | |||
41 | /* | ||
42 | * Logic: | ||
43 | * - only on a boundary condition do we need to care. When we go | ||
44 | * from a negative count to a non-negative, we wake people up. | ||
45 | * - when we go from a non-negative count to a negative do we | ||
46 | * (a) synchronize with the "sleeper" count and (b) make sure | ||
47 | * that we're on the wakeup list before we synchronize so that | ||
48 | * we cannot lose wakeup events. | ||
49 | */ | ||
50 | |||
51 | void __up(struct semaphore *sem) | ||
52 | { | ||
53 | wake_up(&sem->wait); | ||
54 | } | ||
55 | |||
56 | void __sched __down(struct semaphore *sem) | ||
57 | { | ||
58 | struct task_struct *tsk = current; | ||
59 | DECLARE_WAITQUEUE(wait, tsk); | ||
60 | unsigned long flags; | ||
61 | |||
62 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
63 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
64 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
65 | |||
66 | sem->sleepers++; | ||
67 | for (;;) { | ||
68 | int sleepers = sem->sleepers; | ||
69 | |||
70 | /* | ||
71 | * Add "everybody else" into it. They aren't | ||
72 | * playing, because we own the spinlock in | ||
73 | * the wait_queue_head. | ||
74 | */ | ||
75 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
76 | sem->sleepers = 0; | ||
77 | break; | ||
78 | } | ||
79 | sem->sleepers = 1; /* us - see -1 above */ | ||
80 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
81 | |||
82 | schedule(); | ||
83 | |||
84 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
85 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
86 | } | ||
87 | remove_wait_queue_locked(&sem->wait, &wait); | ||
88 | wake_up_locked(&sem->wait); | ||
89 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
90 | tsk->state = TASK_RUNNING; | ||
91 | } | ||
92 | |||
93 | int __sched __down_interruptible(struct semaphore *sem) | ||
94 | { | ||
95 | int retval = 0; | ||
96 | struct task_struct *tsk = current; | ||
97 | DECLARE_WAITQUEUE(wait, tsk); | ||
98 | unsigned long flags; | ||
99 | |||
100 | tsk->state = TASK_INTERRUPTIBLE; | ||
101 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
102 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
103 | |||
104 | sem->sleepers++; | ||
105 | for (;;) { | ||
106 | int sleepers = sem->sleepers; | ||
107 | |||
108 | /* | ||
109 | * With signals pending, this turns into | ||
110 | * the trylock failure case - we won't be | ||
111 | * sleeping, and we* can't get the lock as | ||
112 | * it has contention. Just correct the count | ||
113 | * and exit. | ||
114 | */ | ||
115 | if (signal_pending(current)) { | ||
116 | retval = -EINTR; | ||
117 | sem->sleepers = 0; | ||
118 | atomic_add(sleepers, &sem->count); | ||
119 | break; | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * Add "everybody else" into it. They aren't | ||
124 | * playing, because we own the spinlock in | ||
125 | * wait_queue_head. The "-1" is because we're | ||
126 | * still hoping to get the semaphore. | ||
127 | */ | ||
128 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
129 | sem->sleepers = 0; | ||
130 | break; | ||
131 | } | ||
132 | sem->sleepers = 1; /* us - see -1 above */ | ||
133 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
134 | |||
135 | schedule(); | ||
136 | |||
137 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
138 | tsk->state = TASK_INTERRUPTIBLE; | ||
139 | } | ||
140 | remove_wait_queue_locked(&sem->wait, &wait); | ||
141 | wake_up_locked(&sem->wait); | ||
142 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
143 | |||
144 | tsk->state = TASK_RUNNING; | ||
145 | return retval; | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * Trylock failed - make sure we correct for | ||
150 | * having decremented the count. | ||
151 | * | ||
152 | * We could have done the trylock with a | ||
153 | * single "cmpxchg" without failure cases, | ||
154 | * but then it wouldn't work on a 386. | ||
155 | */ | ||
156 | int __down_trylock(struct semaphore *sem) | ||
157 | { | ||
158 | int sleepers; | ||
159 | unsigned long flags; | ||
160 | |||
161 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
162 | sleepers = sem->sleepers + 1; | ||
163 | sem->sleepers = 0; | ||
164 | |||
165 | /* | ||
166 | * Add "everybody else" and us into it. They aren't | ||
167 | * playing, because we own the spinlock in the | ||
168 | * wait_queue_head. | ||
169 | */ | ||
170 | if (!atomic_add_negative(sleepers, &sem->count)) { | ||
171 | wake_up_locked(&sem->wait); | ||
172 | } | ||
173 | |||
174 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
175 | return 1; | ||
176 | } | ||
diff --git a/lib/string.c b/lib/string.c index 5efafed3d6b6..b19b87af65a3 100644 --- a/lib/string.c +++ b/lib/string.c | |||
@@ -493,6 +493,33 @@ char *strsep(char **s, const char *ct) | |||
493 | EXPORT_SYMBOL(strsep); | 493 | EXPORT_SYMBOL(strsep); |
494 | #endif | 494 | #endif |
495 | 495 | ||
496 | /** | ||
497 | * sysfs_streq - return true if strings are equal, modulo trailing newline | ||
498 | * @s1: one string | ||
499 | * @s2: another string | ||
500 | * | ||
501 | * This routine returns true iff two strings are equal, treating both | ||
502 | * NUL and newline-then-NUL as equivalent string terminations. It's | ||
503 | * geared for use with sysfs input strings, which generally terminate | ||
504 | * with newlines but are compared against values without newlines. | ||
505 | */ | ||
506 | bool sysfs_streq(const char *s1, const char *s2) | ||
507 | { | ||
508 | while (*s1 && *s1 == *s2) { | ||
509 | s1++; | ||
510 | s2++; | ||
511 | } | ||
512 | |||
513 | if (*s1 == *s2) | ||
514 | return true; | ||
515 | if (!*s1 && *s2 == '\n' && !s2[1]) | ||
516 | return true; | ||
517 | if (*s1 == '\n' && !s1[1] && !*s2) | ||
518 | return true; | ||
519 | return false; | ||
520 | } | ||
521 | EXPORT_SYMBOL(sysfs_streq); | ||
522 | |||
496 | #ifndef __HAVE_ARCH_MEMSET | 523 | #ifndef __HAVE_ARCH_MEMSET |
497 | /** | 524 | /** |
498 | * memset - Fill a region of memory with the given value | 525 | * memset - Fill a region of memory with the given value |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 025922807e6e..d568894df8cc 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/bootmem.h> | 33 | #include <linux/bootmem.h> |
34 | #include <linux/iommu-helper.h> | ||
34 | 35 | ||
35 | #define OFFSET(val,align) ((unsigned long) \ | 36 | #define OFFSET(val,align) ((unsigned long) \ |
36 | ( (val) & ( (align) - 1))) | 37 | ( (val) & ( (align) - 1))) |
@@ -282,15 +283,6 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr) | |||
282 | return (addr & ~mask) != 0; | 283 | return (addr & ~mask) != 0; |
283 | } | 284 | } |
284 | 285 | ||
285 | static inline unsigned int is_span_boundary(unsigned int index, | ||
286 | unsigned int nslots, | ||
287 | unsigned long offset_slots, | ||
288 | unsigned long max_slots) | ||
289 | { | ||
290 | unsigned long offset = (offset_slots + index) & (max_slots - 1); | ||
291 | return offset + nslots > max_slots; | ||
292 | } | ||
293 | |||
294 | /* | 286 | /* |
295 | * Allocates bounce buffer and returns its kernel virtual address. | 287 | * Allocates bounce buffer and returns its kernel virtual address. |
296 | */ | 288 | */ |
@@ -331,56 +323,53 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir) | |||
331 | * request and allocate a buffer from that IO TLB pool. | 323 | * request and allocate a buffer from that IO TLB pool. |
332 | */ | 324 | */ |
333 | spin_lock_irqsave(&io_tlb_lock, flags); | 325 | spin_lock_irqsave(&io_tlb_lock, flags); |
334 | { | 326 | index = ALIGN(io_tlb_index, stride); |
335 | index = ALIGN(io_tlb_index, stride); | 327 | if (index >= io_tlb_nslabs) |
336 | if (index >= io_tlb_nslabs) | 328 | index = 0; |
337 | index = 0; | 329 | wrap = index; |
338 | wrap = index; | 330 | |
339 | 331 | do { | |
340 | do { | 332 | while (iommu_is_span_boundary(index, nslots, offset_slots, |
341 | while (is_span_boundary(index, nslots, offset_slots, | 333 | max_slots)) { |
342 | max_slots)) { | ||
343 | index += stride; | ||
344 | if (index >= io_tlb_nslabs) | ||
345 | index = 0; | ||
346 | if (index == wrap) | ||
347 | goto not_found; | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * If we find a slot that indicates we have 'nslots' | ||
352 | * number of contiguous buffers, we allocate the | ||
353 | * buffers from that slot and mark the entries as '0' | ||
354 | * indicating unavailable. | ||
355 | */ | ||
356 | if (io_tlb_list[index] >= nslots) { | ||
357 | int count = 0; | ||
358 | |||
359 | for (i = index; i < (int) (index + nslots); i++) | ||
360 | io_tlb_list[i] = 0; | ||
361 | for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) | ||
362 | io_tlb_list[i] = ++count; | ||
363 | dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); | ||
364 | |||
365 | /* | ||
366 | * Update the indices to avoid searching in | ||
367 | * the next round. | ||
368 | */ | ||
369 | io_tlb_index = ((index + nslots) < io_tlb_nslabs | ||
370 | ? (index + nslots) : 0); | ||
371 | |||
372 | goto found; | ||
373 | } | ||
374 | index += stride; | 334 | index += stride; |
375 | if (index >= io_tlb_nslabs) | 335 | if (index >= io_tlb_nslabs) |
376 | index = 0; | 336 | index = 0; |
377 | } while (index != wrap); | 337 | if (index == wrap) |
338 | goto not_found; | ||
339 | } | ||
378 | 340 | ||
379 | not_found: | 341 | /* |
380 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 342 | * If we find a slot that indicates we have 'nslots' number of |
381 | return NULL; | 343 | * contiguous buffers, we allocate the buffers from that slot |
382 | } | 344 | * and mark the entries as '0' indicating unavailable. |
383 | found: | 345 | */ |
346 | if (io_tlb_list[index] >= nslots) { | ||
347 | int count = 0; | ||
348 | |||
349 | for (i = index; i < (int) (index + nslots); i++) | ||
350 | io_tlb_list[i] = 0; | ||
351 | for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) | ||
352 | io_tlb_list[i] = ++count; | ||
353 | dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); | ||
354 | |||
355 | /* | ||
356 | * Update the indices to avoid searching in the next | ||
357 | * round. | ||
358 | */ | ||
359 | io_tlb_index = ((index + nslots) < io_tlb_nslabs | ||
360 | ? (index + nslots) : 0); | ||
361 | |||
362 | goto found; | ||
363 | } | ||
364 | index += stride; | ||
365 | if (index >= io_tlb_nslabs) | ||
366 | index = 0; | ||
367 | } while (index != wrap); | ||
368 | |||
369 | not_found: | ||
370 | spin_unlock_irqrestore(&io_tlb_lock, flags); | ||
371 | return NULL; | ||
372 | found: | ||
384 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 373 | spin_unlock_irqrestore(&io_tlb_lock, flags); |
385 | 374 | ||
386 | /* | 375 | /* |
@@ -566,7 +555,8 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) | |||
566 | * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. | 555 | * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. |
567 | */ | 556 | */ |
568 | dma_addr_t | 557 | dma_addr_t |
569 | swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) | 558 | swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, |
559 | int dir, struct dma_attrs *attrs) | ||
570 | { | 560 | { |
571 | dma_addr_t dev_addr = virt_to_bus(ptr); | 561 | dma_addr_t dev_addr = virt_to_bus(ptr); |
572 | void *map; | 562 | void *map; |
@@ -599,6 +589,13 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) | |||
599 | 589 | ||
600 | return dev_addr; | 590 | return dev_addr; |
601 | } | 591 | } |
592 | EXPORT_SYMBOL(swiotlb_map_single_attrs); | ||
593 | |||
594 | dma_addr_t | ||
595 | swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) | ||
596 | { | ||
597 | return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL); | ||
598 | } | ||
602 | 599 | ||
603 | /* | 600 | /* |
604 | * Unmap a single streaming mode DMA translation. The dma_addr and size must | 601 | * Unmap a single streaming mode DMA translation. The dma_addr and size must |
@@ -609,8 +606,8 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) | |||
609 | * whatever the device wrote there. | 606 | * whatever the device wrote there. |
610 | */ | 607 | */ |
611 | void | 608 | void |
612 | swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, | 609 | swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, |
613 | int dir) | 610 | size_t size, int dir, struct dma_attrs *attrs) |
614 | { | 611 | { |
615 | char *dma_addr = bus_to_virt(dev_addr); | 612 | char *dma_addr = bus_to_virt(dev_addr); |
616 | 613 | ||
@@ -620,7 +617,14 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, | |||
620 | else if (dir == DMA_FROM_DEVICE) | 617 | else if (dir == DMA_FROM_DEVICE) |
621 | dma_mark_clean(dma_addr, size); | 618 | dma_mark_clean(dma_addr, size); |
622 | } | 619 | } |
620 | EXPORT_SYMBOL(swiotlb_unmap_single_attrs); | ||
623 | 621 | ||
622 | void | ||
623 | swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, | ||
624 | int dir) | ||
625 | { | ||
626 | return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL); | ||
627 | } | ||
624 | /* | 628 | /* |
625 | * Make physical memory consistent for a single streaming mode DMA translation | 629 | * Make physical memory consistent for a single streaming mode DMA translation |
626 | * after a transfer. | 630 | * after a transfer. |
@@ -691,6 +695,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, | |||
691 | SYNC_FOR_DEVICE); | 695 | SYNC_FOR_DEVICE); |
692 | } | 696 | } |
693 | 697 | ||
698 | void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int, | ||
699 | struct dma_attrs *); | ||
694 | /* | 700 | /* |
695 | * Map a set of buffers described by scatterlist in streaming mode for DMA. | 701 | * Map a set of buffers described by scatterlist in streaming mode for DMA. |
696 | * This is the scatter-gather version of the above swiotlb_map_single | 702 | * This is the scatter-gather version of the above swiotlb_map_single |
@@ -708,8 +714,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, | |||
708 | * same here. | 714 | * same here. |
709 | */ | 715 | */ |
710 | int | 716 | int |
711 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | 717 | swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, |
712 | int dir) | 718 | int dir, struct dma_attrs *attrs) |
713 | { | 719 | { |
714 | struct scatterlist *sg; | 720 | struct scatterlist *sg; |
715 | void *addr; | 721 | void *addr; |
@@ -727,7 +733,8 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
727 | /* Don't panic here, we expect map_sg users | 733 | /* Don't panic here, we expect map_sg users |
728 | to do proper error handling. */ | 734 | to do proper error handling. */ |
729 | swiotlb_full(hwdev, sg->length, dir, 0); | 735 | swiotlb_full(hwdev, sg->length, dir, 0); |
730 | swiotlb_unmap_sg(hwdev, sgl, i, dir); | 736 | swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, |
737 | attrs); | ||
731 | sgl[0].dma_length = 0; | 738 | sgl[0].dma_length = 0; |
732 | return 0; | 739 | return 0; |
733 | } | 740 | } |
@@ -738,14 +745,22 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
738 | } | 745 | } |
739 | return nelems; | 746 | return nelems; |
740 | } | 747 | } |
748 | EXPORT_SYMBOL(swiotlb_map_sg_attrs); | ||
749 | |||
750 | int | ||
751 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | ||
752 | int dir) | ||
753 | { | ||
754 | return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); | ||
755 | } | ||
741 | 756 | ||
742 | /* | 757 | /* |
743 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules | 758 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules |
744 | * concerning calls here are the same as for swiotlb_unmap_single() above. | 759 | * concerning calls here are the same as for swiotlb_unmap_single() above. |
745 | */ | 760 | */ |
746 | void | 761 | void |
747 | swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | 762 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
748 | int dir) | 763 | int nelems, int dir, struct dma_attrs *attrs) |
749 | { | 764 | { |
750 | struct scatterlist *sg; | 765 | struct scatterlist *sg; |
751 | int i; | 766 | int i; |
@@ -760,6 +775,14 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
760 | dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); | 775 | dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); |
761 | } | 776 | } |
762 | } | 777 | } |
778 | EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); | ||
779 | |||
780 | void | ||
781 | swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | ||
782 | int dir) | ||
783 | { | ||
784 | return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); | ||
785 | } | ||
763 | 786 | ||
764 | /* | 787 | /* |
765 | * Make physical memory consistent for a set of streaming mode DMA translations | 788 | * Make physical memory consistent for a set of streaming mode DMA translations |