diff options
| author | Ingo Molnar <mingo@elte.hu> | 2008-08-15 12:15:17 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-08-15 12:15:17 -0400 |
| commit | f3efbe582b5396d134024c03a5fa253f2a85d9a6 (patch) | |
| tree | e4e15b7567b82d24cb1e7327398286a2b88df04c /lib | |
| parent | 05d3ed0a1fe3ea05ab9f3b8d32576a0bc2e19660 (diff) | |
| parent | b635acec48bcaa9183fcbf4e3955616b0d4119b5 (diff) | |
Merge branch 'linus' into x86/gart
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 47 | ||||
| -rw-r--r-- | lib/Kconfig.kgdb | 14 | ||||
| -rw-r--r-- | lib/Makefile | 6 | ||||
| -rw-r--r-- | lib/bcd.c | 14 | ||||
| -rw-r--r-- | lib/bitmap.c | 11 | ||||
| -rw-r--r-- | lib/cmdline.c | 16 | ||||
| -rw-r--r-- | lib/cpumask.c | 9 | ||||
| -rw-r--r-- | lib/debug_locks.c | 2 | ||||
| -rw-r--r-- | lib/debugobjects.c | 19 | ||||
| -rw-r--r-- | lib/idr.c | 142 | ||||
| -rw-r--r-- | lib/inflate.c | 52 | ||||
| -rw-r--r-- | lib/iomap.c | 3 | ||||
| -rw-r--r-- | lib/kobject.c | 9 | ||||
| -rw-r--r-- | lib/kobject_uevent.c | 6 | ||||
| -rw-r--r-- | lib/list_debug.c | 50 | ||||
| -rw-r--r-- | lib/lzo/lzo1x_decompress.c | 6 | ||||
| -rw-r--r-- | lib/plist.c | 13 | ||||
| -rw-r--r-- | lib/radix-tree.c | 180 | ||||
| -rw-r--r-- | lib/random32.c | 48 | ||||
| -rw-r--r-- | lib/ratelimit.c | 56 | ||||
| -rw-r--r-- | lib/scatterlist.c | 176 | ||||
| -rw-r--r-- | lib/show_mem.c | 63 | ||||
| -rw-r--r-- | lib/smp_processor_id.c | 5 | ||||
| -rw-r--r-- | lib/swiotlb.c | 4 | ||||
| -rw-r--r-- | lib/syscall.c | 75 | ||||
| -rw-r--r-- | lib/vsprintf.c | 2 |
26 files changed, 753 insertions, 275 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index ba106db5a65b..800ac8485544 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -150,7 +150,7 @@ config DETECT_SOFTLOCKUP | |||
| 150 | help | 150 | help |
| 151 | Say Y here to enable the kernel to detect "soft lockups", | 151 | Say Y here to enable the kernel to detect "soft lockups", |
| 152 | which are bugs that cause the kernel to loop in kernel | 152 | which are bugs that cause the kernel to loop in kernel |
| 153 | mode for more than 10 seconds, without giving other tasks a | 153 | mode for more than 60 seconds, without giving other tasks a |
| 154 | chance to run. | 154 | chance to run. |
| 155 | 155 | ||
| 156 | When a soft-lockup is detected, the kernel will print the | 156 | When a soft-lockup is detected, the kernel will print the |
| @@ -162,6 +162,30 @@ config DETECT_SOFTLOCKUP | |||
| 162 | can be detected via the NMI-watchdog, on platforms that | 162 | can be detected via the NMI-watchdog, on platforms that |
| 163 | support it.) | 163 | support it.) |
| 164 | 164 | ||
| 165 | config BOOTPARAM_SOFTLOCKUP_PANIC | ||
| 166 | bool "Panic (Reboot) On Soft Lockups" | ||
| 167 | depends on DETECT_SOFTLOCKUP | ||
| 168 | help | ||
| 169 | Say Y here to enable the kernel to panic on "soft lockups", | ||
| 170 | which are bugs that cause the kernel to loop in kernel | ||
| 171 | mode for more than 60 seconds, without giving other tasks a | ||
| 172 | chance to run. | ||
| 173 | |||
| 174 | The panic can be used in combination with panic_timeout, | ||
| 175 | to cause the system to reboot automatically after a | ||
| 176 | lockup has been detected. This feature is useful for | ||
| 177 | high-availability systems that have uptime guarantees and | ||
| 178 | where a lockup must be resolved ASAP. | ||
| 179 | |||
| 180 | Say N if unsure. | ||
| 181 | |||
| 182 | config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE | ||
| 183 | int | ||
| 184 | depends on DETECT_SOFTLOCKUP | ||
| 185 | range 0 1 | ||
| 186 | default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC | ||
| 187 | default 1 if BOOTPARAM_SOFTLOCKUP_PANIC | ||
| 188 | |||
| 165 | config SCHED_DEBUG | 189 | config SCHED_DEBUG |
| 166 | bool "Collect scheduler debugging info" | 190 | bool "Collect scheduler debugging info" |
| 167 | depends on DEBUG_KERNEL && PROC_FS | 191 | depends on DEBUG_KERNEL && PROC_FS |
| @@ -481,6 +505,18 @@ config DEBUG_WRITECOUNT | |||
| 481 | 505 | ||
| 482 | If unsure, say N. | 506 | If unsure, say N. |
| 483 | 507 | ||
| 508 | config DEBUG_MEMORY_INIT | ||
| 509 | bool "Debug memory initialisation" if EMBEDDED | ||
| 510 | default !EMBEDDED | ||
| 511 | help | ||
| 512 | Enable this for additional checks during memory initialisation. | ||
| 513 | The sanity checks verify aspects of the VM such as the memory model | ||
| 514 | and other information provided by the architecture. Verbose | ||
| 515 | information will be printed at KERN_DEBUG loglevel depending | ||
| 516 | on the mminit_loglevel= command-line option. | ||
| 517 | |||
| 518 | If unsure, say Y | ||
| 519 | |||
| 484 | config DEBUG_LIST | 520 | config DEBUG_LIST |
| 485 | bool "Debug linked list manipulation" | 521 | bool "Debug linked list manipulation" |
| 486 | depends on DEBUG_KERNEL | 522 | depends on DEBUG_KERNEL |
| @@ -699,6 +735,15 @@ config FIREWIRE_OHCI_REMOTE_DMA | |||
| 699 | 735 | ||
| 700 | If unsure, say N. | 736 | If unsure, say N. |
| 701 | 737 | ||
| 738 | menuconfig BUILD_DOCSRC | ||
| 739 | bool "Build targets in Documentation/ tree" | ||
| 740 | depends on HEADERS_CHECK | ||
| 741 | help | ||
| 742 | This option attempts to build objects from the source files in the | ||
| 743 | kernel Documentation/ tree. | ||
| 744 | |||
| 745 | Say N if you are unsure. | ||
| 746 | |||
| 702 | source "samples/Kconfig" | 747 | source "samples/Kconfig" |
| 703 | 748 | ||
| 704 | source "lib/Kconfig.kgdb" | 749 | source "lib/Kconfig.kgdb" |
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index a5d4b1dac2a5..9b5d1d7f2ef7 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb | |||
| @@ -1,20 +1,20 @@ | |||
| 1 | 1 | ||
| 2 | config HAVE_ARCH_KGDB_SHADOW_INFO | ||
| 3 | bool | ||
| 4 | |||
| 5 | config HAVE_ARCH_KGDB | 2 | config HAVE_ARCH_KGDB |
| 6 | bool | 3 | bool |
| 7 | 4 | ||
| 8 | menuconfig KGDB | 5 | menuconfig KGDB |
| 9 | bool "KGDB: kernel debugging with remote gdb" | 6 | bool "KGDB: kernel debugging with remote gdb" |
| 10 | select FRAME_POINTER | ||
| 11 | depends on HAVE_ARCH_KGDB | 7 | depends on HAVE_ARCH_KGDB |
| 12 | depends on DEBUG_KERNEL && EXPERIMENTAL | 8 | depends on DEBUG_KERNEL && EXPERIMENTAL |
| 13 | help | 9 | help |
| 14 | If you say Y here, it will be possible to remotely debug the | 10 | If you say Y here, it will be possible to remotely debug the |
| 15 | kernel using gdb. Documentation of kernel debugger is available | 11 | kernel using gdb. It is recommended but not required, that |
| 16 | at http://kgdb.sourceforge.net as well as in DocBook form | 12 | you also turn on the kernel config option |
| 17 | in Documentation/DocBook/. If unsure, say N. | 13 | CONFIG_FRAME_POINTER to aid in producing more reliable stack |
| 14 | backtraces in the external debugger. Documentation of | ||
| 15 | kernel debugger is available at http://kgdb.sourceforge.net | ||
| 16 | as well as in DocBook form in Documentation/DocBook/. If | ||
| 17 | unsure, say N. | ||
| 18 | 18 | ||
| 19 | if KGDB | 19 | if KGDB |
| 20 | 20 | ||
diff --git a/lib/Makefile b/lib/Makefile index 818c4d455518..3b1f94bbe9de 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -11,14 +11,14 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ | |||
| 11 | rbtree.o radix-tree.o dump_stack.o \ | 11 | rbtree.o radix-tree.o dump_stack.o \ |
| 12 | idr.o int_sqrt.o extable.o prio_tree.o \ | 12 | idr.o int_sqrt.o extable.o prio_tree.o \ |
| 13 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ | 13 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ |
| 14 | proportions.o prio_heap.o ratelimit.o | 14 | proportions.o prio_heap.o ratelimit.o show_mem.o |
| 15 | 15 | ||
| 16 | lib-$(CONFIG_MMU) += ioremap.o | 16 | lib-$(CONFIG_MMU) += ioremap.o |
| 17 | lib-$(CONFIG_SMP) += cpumask.o | 17 | lib-$(CONFIG_SMP) += cpumask.o |
| 18 | 18 | ||
| 19 | lib-y += kobject.o kref.o klist.o | 19 | lib-y += kobject.o kref.o klist.o |
| 20 | 20 | ||
| 21 | obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 21 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
| 22 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o | 22 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o |
| 23 | 23 | ||
| 24 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 24 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
| @@ -78,6 +78,8 @@ lib-$(CONFIG_GENERIC_BUG) += bug.o | |||
| 78 | 78 | ||
| 79 | obj-$(CONFIG_HAVE_LMB) += lmb.o | 79 | obj-$(CONFIG_HAVE_LMB) += lmb.o |
| 80 | 80 | ||
| 81 | obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o | ||
| 82 | |||
| 81 | hostprogs-y := gen_crc32table | 83 | hostprogs-y := gen_crc32table |
| 82 | clean-files := crc32table.h | 84 | clean-files := crc32table.h |
| 83 | 85 | ||
diff --git a/lib/bcd.c b/lib/bcd.c new file mode 100644 index 000000000000..d74257fd0fe7 --- /dev/null +++ b/lib/bcd.c | |||
| @@ -0,0 +1,14 @@ | |||
| 1 | #include <linux/bcd.h> | ||
| 2 | #include <linux/module.h> | ||
| 3 | |||
| 4 | unsigned bcd2bin(unsigned char val) | ||
| 5 | { | ||
| 6 | return (val & 0x0f) + (val >> 4) * 10; | ||
| 7 | } | ||
| 8 | EXPORT_SYMBOL(bcd2bin); | ||
| 9 | |||
| 10 | unsigned char bin2bcd(unsigned val) | ||
| 11 | { | ||
| 12 | return ((val / 10) << 4) + val % 10; | ||
| 13 | } | ||
| 14 | EXPORT_SYMBOL(bin2bcd); | ||
diff --git a/lib/bitmap.c b/lib/bitmap.c index 482df94ea21e..06fb57c86de0 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
| @@ -316,6 +316,17 @@ int bitmap_scnprintf(char *buf, unsigned int buflen, | |||
| 316 | EXPORT_SYMBOL(bitmap_scnprintf); | 316 | EXPORT_SYMBOL(bitmap_scnprintf); |
| 317 | 317 | ||
| 318 | /** | 318 | /** |
| 319 | * bitmap_scnprintf_len - return buffer length needed to convert | ||
| 320 | * bitmap to an ASCII hex string | ||
| 321 | * @nr_bits: number of bits to be converted | ||
| 322 | */ | ||
| 323 | int bitmap_scnprintf_len(unsigned int nr_bits) | ||
| 324 | { | ||
| 325 | unsigned int nr_nibbles = ALIGN(nr_bits, 4) / 4; | ||
| 326 | return nr_nibbles + ALIGN(nr_nibbles, CHUNKSZ / 4) / (CHUNKSZ / 4) - 1; | ||
| 327 | } | ||
| 328 | |||
| 329 | /** | ||
| 319 | * __bitmap_parse - convert an ASCII hex string into a bitmap. | 330 | * __bitmap_parse - convert an ASCII hex string into a bitmap. |
| 320 | * @buf: pointer to buffer containing string. | 331 | * @buf: pointer to buffer containing string. |
| 321 | * @buflen: buffer size in bytes. If string is smaller than this | 332 | * @buflen: buffer size in bytes. If string is smaller than this |
diff --git a/lib/cmdline.c b/lib/cmdline.c index f596c08d213a..5ba8a942a478 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c | |||
| @@ -116,7 +116,7 @@ char *get_options(const char *str, int nints, int *ints) | |||
| 116 | /** | 116 | /** |
| 117 | * memparse - parse a string with mem suffixes into a number | 117 | * memparse - parse a string with mem suffixes into a number |
| 118 | * @ptr: Where parse begins | 118 | * @ptr: Where parse begins |
| 119 | * @retptr: (output) Pointer to next char after parse completes | 119 | * @retptr: (output) Optional pointer to next char after parse completes |
| 120 | * | 120 | * |
| 121 | * Parses a string into a number. The number stored at @ptr is | 121 | * Parses a string into a number. The number stored at @ptr is |
| 122 | * potentially suffixed with %K (for kilobytes, or 1024 bytes), | 122 | * potentially suffixed with %K (for kilobytes, or 1024 bytes), |
| @@ -126,11 +126,13 @@ char *get_options(const char *str, int nints, int *ints) | |||
| 126 | * megabyte, or one gigabyte, respectively. | 126 | * megabyte, or one gigabyte, respectively. |
| 127 | */ | 127 | */ |
| 128 | 128 | ||
| 129 | unsigned long long memparse (char *ptr, char **retptr) | 129 | unsigned long long memparse(char *ptr, char **retptr) |
| 130 | { | 130 | { |
| 131 | unsigned long long ret = simple_strtoull (ptr, retptr, 0); | 131 | char *endptr; /* local pointer to end of parsed string */ |
| 132 | 132 | ||
| 133 | switch (**retptr) { | 133 | unsigned long long ret = simple_strtoull(ptr, &endptr, 0); |
| 134 | |||
| 135 | switch (*endptr) { | ||
| 134 | case 'G': | 136 | case 'G': |
| 135 | case 'g': | 137 | case 'g': |
| 136 | ret <<= 10; | 138 | ret <<= 10; |
| @@ -140,10 +142,14 @@ unsigned long long memparse (char *ptr, char **retptr) | |||
| 140 | case 'K': | 142 | case 'K': |
| 141 | case 'k': | 143 | case 'k': |
| 142 | ret <<= 10; | 144 | ret <<= 10; |
| 143 | (*retptr)++; | 145 | endptr++; |
| 144 | default: | 146 | default: |
| 145 | break; | 147 | break; |
| 146 | } | 148 | } |
| 149 | |||
| 150 | if (retptr) | ||
| 151 | *retptr = endptr; | ||
| 152 | |||
| 147 | return ret; | 153 | return ret; |
| 148 | } | 154 | } |
| 149 | 155 | ||
diff --git a/lib/cpumask.c b/lib/cpumask.c index bb4f76d3c3e7..5f97dc25ef9c 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c | |||
| @@ -15,6 +15,15 @@ int __next_cpu(int n, const cpumask_t *srcp) | |||
| 15 | } | 15 | } |
| 16 | EXPORT_SYMBOL(__next_cpu); | 16 | EXPORT_SYMBOL(__next_cpu); |
| 17 | 17 | ||
| 18 | #if NR_CPUS > 64 | ||
| 19 | int __next_cpu_nr(int n, const cpumask_t *srcp) | ||
| 20 | { | ||
| 21 | return min_t(int, nr_cpu_ids, | ||
| 22 | find_next_bit(srcp->bits, nr_cpu_ids, n+1)); | ||
| 23 | } | ||
| 24 | EXPORT_SYMBOL(__next_cpu_nr); | ||
| 25 | #endif | ||
| 26 | |||
| 18 | int __any_online_cpu(const cpumask_t *mask) | 27 | int __any_online_cpu(const cpumask_t *mask) |
| 19 | { | 28 | { |
| 20 | int cpu; | 29 | int cpu; |
diff --git a/lib/debug_locks.c b/lib/debug_locks.c index 0ef01d14727c..0218b4693dd8 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | * | 8 | * |
| 9 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 9 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 10 | */ | 10 | */ |
| 11 | #include <linux/kernel.h> | ||
| 11 | #include <linux/rwsem.h> | 12 | #include <linux/rwsem.h> |
| 12 | #include <linux/mutex.h> | 13 | #include <linux/mutex.h> |
| 13 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| @@ -37,6 +38,7 @@ int debug_locks_off(void) | |||
| 37 | { | 38 | { |
| 38 | if (xchg(&debug_locks, 0)) { | 39 | if (xchg(&debug_locks, 0)) { |
| 39 | if (!debug_locks_silent) { | 40 | if (!debug_locks_silent) { |
| 41 | oops_in_progress = 1; | ||
| 40 | console_verbose(); | 42 | console_verbose(); |
| 41 | return 1; | 43 | return 1; |
| 42 | } | 44 | } |
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 85b18d79be89..45a6bde762d1 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
| @@ -205,9 +205,8 @@ static void debug_print_object(struct debug_obj *obj, char *msg) | |||
| 205 | 205 | ||
| 206 | if (limit < 5 && obj->descr != descr_test) { | 206 | if (limit < 5 && obj->descr != descr_test) { |
| 207 | limit++; | 207 | limit++; |
| 208 | printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, | 208 | WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, |
| 209 | obj_states[obj->state], obj->descr->name); | 209 | obj_states[obj->state], obj->descr->name); |
| 210 | WARN_ON(1); | ||
| 211 | } | 210 | } |
| 212 | debug_objects_warnings++; | 211 | debug_objects_warnings++; |
| 213 | } | 212 | } |
| @@ -226,15 +225,13 @@ debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), | |||
| 226 | 225 | ||
| 227 | static void debug_object_is_on_stack(void *addr, int onstack) | 226 | static void debug_object_is_on_stack(void *addr, int onstack) |
| 228 | { | 227 | { |
| 229 | void *stack = current->stack; | ||
| 230 | int is_on_stack; | 228 | int is_on_stack; |
| 231 | static int limit; | 229 | static int limit; |
| 232 | 230 | ||
| 233 | if (limit > 4) | 231 | if (limit > 4) |
| 234 | return; | 232 | return; |
| 235 | 233 | ||
| 236 | is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE)); | 234 | is_on_stack = object_is_on_stack(addr); |
| 237 | |||
| 238 | if (is_on_stack == onstack) | 235 | if (is_on_stack == onstack) |
| 239 | return; | 236 | return; |
| 240 | 237 | ||
| @@ -735,26 +732,22 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) | |||
| 735 | 732 | ||
| 736 | obj = lookup_object(addr, db); | 733 | obj = lookup_object(addr, db); |
| 737 | if (!obj && state != ODEBUG_STATE_NONE) { | 734 | if (!obj && state != ODEBUG_STATE_NONE) { |
| 738 | printk(KERN_ERR "ODEBUG: selftest object not found\n"); | 735 | WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); |
| 739 | WARN_ON(1); | ||
| 740 | goto out; | 736 | goto out; |
| 741 | } | 737 | } |
| 742 | if (obj && obj->state != state) { | 738 | if (obj && obj->state != state) { |
| 743 | printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", | 739 | WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", |
| 744 | obj->state, state); | 740 | obj->state, state); |
| 745 | WARN_ON(1); | ||
| 746 | goto out; | 741 | goto out; |
| 747 | } | 742 | } |
| 748 | if (fixups != debug_objects_fixups) { | 743 | if (fixups != debug_objects_fixups) { |
| 749 | printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", | 744 | WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", |
| 750 | fixups, debug_objects_fixups); | 745 | fixups, debug_objects_fixups); |
| 751 | WARN_ON(1); | ||
| 752 | goto out; | 746 | goto out; |
| 753 | } | 747 | } |
| 754 | if (warnings != debug_objects_warnings) { | 748 | if (warnings != debug_objects_warnings) { |
| 755 | printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", | 749 | WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", |
| 756 | warnings, debug_objects_warnings); | 750 | warnings, debug_objects_warnings); |
| 757 | WARN_ON(1); | ||
| 758 | goto out; | 751 | goto out; |
| 759 | } | 752 | } |
| 760 | res = 0; | 753 | res = 0; |
| @@ -6,6 +6,8 @@ | |||
| 6 | * Modified by George Anzinger to reuse immediately and to use | 6 | * Modified by George Anzinger to reuse immediately and to use |
| 7 | * find bit instructions. Also removed _irq on spinlocks. | 7 | * find bit instructions. Also removed _irq on spinlocks. |
| 8 | * | 8 | * |
| 9 | * Modified by Nadia Derbey to make it RCU safe. | ||
| 10 | * | ||
| 9 | * Small id to pointer translation service. | 11 | * Small id to pointer translation service. |
| 10 | * | 12 | * |
| 11 | * It uses a radix tree like structure as a sparse array indexed | 13 | * It uses a radix tree like structure as a sparse array indexed |
| @@ -35,7 +37,7 @@ | |||
| 35 | 37 | ||
| 36 | static struct kmem_cache *idr_layer_cache; | 38 | static struct kmem_cache *idr_layer_cache; |
| 37 | 39 | ||
| 38 | static struct idr_layer *alloc_layer(struct idr *idp) | 40 | static struct idr_layer *get_from_free_list(struct idr *idp) |
| 39 | { | 41 | { |
| 40 | struct idr_layer *p; | 42 | struct idr_layer *p; |
| 41 | unsigned long flags; | 43 | unsigned long flags; |
| @@ -50,15 +52,28 @@ static struct idr_layer *alloc_layer(struct idr *idp) | |||
| 50 | return(p); | 52 | return(p); |
| 51 | } | 53 | } |
| 52 | 54 | ||
| 55 | static void idr_layer_rcu_free(struct rcu_head *head) | ||
| 56 | { | ||
| 57 | struct idr_layer *layer; | ||
| 58 | |||
| 59 | layer = container_of(head, struct idr_layer, rcu_head); | ||
| 60 | kmem_cache_free(idr_layer_cache, layer); | ||
| 61 | } | ||
| 62 | |||
| 63 | static inline void free_layer(struct idr_layer *p) | ||
| 64 | { | ||
| 65 | call_rcu(&p->rcu_head, idr_layer_rcu_free); | ||
| 66 | } | ||
| 67 | |||
| 53 | /* only called when idp->lock is held */ | 68 | /* only called when idp->lock is held */ |
| 54 | static void __free_layer(struct idr *idp, struct idr_layer *p) | 69 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) |
| 55 | { | 70 | { |
| 56 | p->ary[0] = idp->id_free; | 71 | p->ary[0] = idp->id_free; |
| 57 | idp->id_free = p; | 72 | idp->id_free = p; |
| 58 | idp->id_free_cnt++; | 73 | idp->id_free_cnt++; |
| 59 | } | 74 | } |
| 60 | 75 | ||
| 61 | static void free_layer(struct idr *idp, struct idr_layer *p) | 76 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) |
| 62 | { | 77 | { |
| 63 | unsigned long flags; | 78 | unsigned long flags; |
| 64 | 79 | ||
| @@ -66,7 +81,7 @@ static void free_layer(struct idr *idp, struct idr_layer *p) | |||
| 66 | * Depends on the return element being zeroed. | 81 | * Depends on the return element being zeroed. |
| 67 | */ | 82 | */ |
| 68 | spin_lock_irqsave(&idp->lock, flags); | 83 | spin_lock_irqsave(&idp->lock, flags); |
| 69 | __free_layer(idp, p); | 84 | __move_to_free_list(idp, p); |
| 70 | spin_unlock_irqrestore(&idp->lock, flags); | 85 | spin_unlock_irqrestore(&idp->lock, flags); |
| 71 | } | 86 | } |
| 72 | 87 | ||
| @@ -96,7 +111,7 @@ static void idr_mark_full(struct idr_layer **pa, int id) | |||
| 96 | * @gfp_mask: memory allocation flags | 111 | * @gfp_mask: memory allocation flags |
| 97 | * | 112 | * |
| 98 | * This function should be called prior to locking and calling the | 113 | * This function should be called prior to locking and calling the |
| 99 | * following function. It preallocates enough memory to satisfy | 114 | * idr_get_new* functions. It preallocates enough memory to satisfy |
| 100 | * the worst possible allocation. | 115 | * the worst possible allocation. |
| 101 | * | 116 | * |
| 102 | * If the system is REALLY out of memory this function returns 0, | 117 | * If the system is REALLY out of memory this function returns 0, |
| @@ -109,7 +124,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | |||
| 109 | new = kmem_cache_alloc(idr_layer_cache, gfp_mask); | 124 | new = kmem_cache_alloc(idr_layer_cache, gfp_mask); |
| 110 | if (new == NULL) | 125 | if (new == NULL) |
| 111 | return (0); | 126 | return (0); |
| 112 | free_layer(idp, new); | 127 | move_to_free_list(idp, new); |
| 113 | } | 128 | } |
| 114 | return 1; | 129 | return 1; |
| 115 | } | 130 | } |
| @@ -143,7 +158,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
| 143 | /* if already at the top layer, we need to grow */ | 158 | /* if already at the top layer, we need to grow */ |
| 144 | if (!(p = pa[l])) { | 159 | if (!(p = pa[l])) { |
| 145 | *starting_id = id; | 160 | *starting_id = id; |
| 146 | return -2; | 161 | return IDR_NEED_TO_GROW; |
| 147 | } | 162 | } |
| 148 | 163 | ||
| 149 | /* If we need to go up one layer, continue the | 164 | /* If we need to go up one layer, continue the |
| @@ -160,16 +175,17 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
| 160 | id = ((id >> sh) ^ n ^ m) << sh; | 175 | id = ((id >> sh) ^ n ^ m) << sh; |
| 161 | } | 176 | } |
| 162 | if ((id >= MAX_ID_BIT) || (id < 0)) | 177 | if ((id >= MAX_ID_BIT) || (id < 0)) |
| 163 | return -3; | 178 | return IDR_NOMORE_SPACE; |
| 164 | if (l == 0) | 179 | if (l == 0) |
| 165 | break; | 180 | break; |
| 166 | /* | 181 | /* |
| 167 | * Create the layer below if it is missing. | 182 | * Create the layer below if it is missing. |
| 168 | */ | 183 | */ |
| 169 | if (!p->ary[m]) { | 184 | if (!p->ary[m]) { |
| 170 | if (!(new = alloc_layer(idp))) | 185 | new = get_from_free_list(idp); |
| 186 | if (!new) | ||
| 171 | return -1; | 187 | return -1; |
| 172 | p->ary[m] = new; | 188 | rcu_assign_pointer(p->ary[m], new); |
| 173 | p->count++; | 189 | p->count++; |
| 174 | } | 190 | } |
| 175 | pa[l--] = p; | 191 | pa[l--] = p; |
| @@ -192,7 +208,7 @@ build_up: | |||
| 192 | p = idp->top; | 208 | p = idp->top; |
| 193 | layers = idp->layers; | 209 | layers = idp->layers; |
| 194 | if (unlikely(!p)) { | 210 | if (unlikely(!p)) { |
| 195 | if (!(p = alloc_layer(idp))) | 211 | if (!(p = get_from_free_list(idp))) |
| 196 | return -1; | 212 | return -1; |
| 197 | layers = 1; | 213 | layers = 1; |
| 198 | } | 214 | } |
| @@ -204,7 +220,7 @@ build_up: | |||
| 204 | layers++; | 220 | layers++; |
| 205 | if (!p->count) | 221 | if (!p->count) |
| 206 | continue; | 222 | continue; |
| 207 | if (!(new = alloc_layer(idp))) { | 223 | if (!(new = get_from_free_list(idp))) { |
| 208 | /* | 224 | /* |
| 209 | * The allocation failed. If we built part of | 225 | * The allocation failed. If we built part of |
| 210 | * the structure tear it down. | 226 | * the structure tear it down. |
| @@ -214,7 +230,7 @@ build_up: | |||
| 214 | p = p->ary[0]; | 230 | p = p->ary[0]; |
| 215 | new->ary[0] = NULL; | 231 | new->ary[0] = NULL; |
| 216 | new->bitmap = new->count = 0; | 232 | new->bitmap = new->count = 0; |
| 217 | __free_layer(idp, new); | 233 | __move_to_free_list(idp, new); |
| 218 | } | 234 | } |
| 219 | spin_unlock_irqrestore(&idp->lock, flags); | 235 | spin_unlock_irqrestore(&idp->lock, flags); |
| 220 | return -1; | 236 | return -1; |
| @@ -225,10 +241,10 @@ build_up: | |||
| 225 | __set_bit(0, &new->bitmap); | 241 | __set_bit(0, &new->bitmap); |
| 226 | p = new; | 242 | p = new; |
| 227 | } | 243 | } |
| 228 | idp->top = p; | 244 | rcu_assign_pointer(idp->top, p); |
| 229 | idp->layers = layers; | 245 | idp->layers = layers; |
| 230 | v = sub_alloc(idp, &id, pa); | 246 | v = sub_alloc(idp, &id, pa); |
| 231 | if (v == -2) | 247 | if (v == IDR_NEED_TO_GROW) |
| 232 | goto build_up; | 248 | goto build_up; |
| 233 | return(v); | 249 | return(v); |
| 234 | } | 250 | } |
| @@ -244,7 +260,8 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | |||
| 244 | * Successfully found an empty slot. Install the user | 260 | * Successfully found an empty slot. Install the user |
| 245 | * pointer and mark the slot full. | 261 | * pointer and mark the slot full. |
| 246 | */ | 262 | */ |
| 247 | pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr; | 263 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], |
| 264 | (struct idr_layer *)ptr); | ||
| 248 | pa[0]->count++; | 265 | pa[0]->count++; |
| 249 | idr_mark_full(pa, id); | 266 | idr_mark_full(pa, id); |
| 250 | } | 267 | } |
| @@ -277,12 +294,8 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | |||
| 277 | * This is a cheap hack until the IDR code can be fixed to | 294 | * This is a cheap hack until the IDR code can be fixed to |
| 278 | * return proper error values. | 295 | * return proper error values. |
| 279 | */ | 296 | */ |
| 280 | if (rv < 0) { | 297 | if (rv < 0) |
| 281 | if (rv == -1) | 298 | return _idr_rc_to_errno(rv); |
| 282 | return -EAGAIN; | ||
| 283 | else /* Will be -3 */ | ||
| 284 | return -ENOSPC; | ||
| 285 | } | ||
| 286 | *id = rv; | 299 | *id = rv; |
| 287 | return 0; | 300 | return 0; |
| 288 | } | 301 | } |
| @@ -312,12 +325,8 @@ int idr_get_new(struct idr *idp, void *ptr, int *id) | |||
| 312 | * This is a cheap hack until the IDR code can be fixed to | 325 | * This is a cheap hack until the IDR code can be fixed to |
| 313 | * return proper error values. | 326 | * return proper error values. |
| 314 | */ | 327 | */ |
| 315 | if (rv < 0) { | 328 | if (rv < 0) |
| 316 | if (rv == -1) | 329 | return _idr_rc_to_errno(rv); |
| 317 | return -EAGAIN; | ||
| 318 | else /* Will be -3 */ | ||
| 319 | return -ENOSPC; | ||
| 320 | } | ||
| 321 | *id = rv; | 330 | *id = rv; |
| 322 | return 0; | 331 | return 0; |
| 323 | } | 332 | } |
| @@ -325,7 +334,8 @@ EXPORT_SYMBOL(idr_get_new); | |||
| 325 | 334 | ||
| 326 | static void idr_remove_warning(int id) | 335 | static void idr_remove_warning(int id) |
| 327 | { | 336 | { |
| 328 | printk("idr_remove called for id=%d which is not allocated.\n", id); | 337 | printk(KERN_WARNING |
| 338 | "idr_remove called for id=%d which is not allocated.\n", id); | ||
| 329 | dump_stack(); | 339 | dump_stack(); |
| 330 | } | 340 | } |
| 331 | 341 | ||
| @@ -334,6 +344,7 @@ static void sub_remove(struct idr *idp, int shift, int id) | |||
| 334 | struct idr_layer *p = idp->top; | 344 | struct idr_layer *p = idp->top; |
| 335 | struct idr_layer **pa[MAX_LEVEL]; | 345 | struct idr_layer **pa[MAX_LEVEL]; |
| 336 | struct idr_layer ***paa = &pa[0]; | 346 | struct idr_layer ***paa = &pa[0]; |
| 347 | struct idr_layer *to_free; | ||
| 337 | int n; | 348 | int n; |
| 338 | 349 | ||
| 339 | *paa = NULL; | 350 | *paa = NULL; |
| @@ -349,13 +360,18 @@ static void sub_remove(struct idr *idp, int shift, int id) | |||
| 349 | n = id & IDR_MASK; | 360 | n = id & IDR_MASK; |
| 350 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ | 361 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ |
| 351 | __clear_bit(n, &p->bitmap); | 362 | __clear_bit(n, &p->bitmap); |
| 352 | p->ary[n] = NULL; | 363 | rcu_assign_pointer(p->ary[n], NULL); |
| 364 | to_free = NULL; | ||
| 353 | while(*paa && ! --((**paa)->count)){ | 365 | while(*paa && ! --((**paa)->count)){ |
| 354 | free_layer(idp, **paa); | 366 | if (to_free) |
| 367 | free_layer(to_free); | ||
| 368 | to_free = **paa; | ||
| 355 | **paa-- = NULL; | 369 | **paa-- = NULL; |
| 356 | } | 370 | } |
| 357 | if (!*paa) | 371 | if (!*paa) |
| 358 | idp->layers = 0; | 372 | idp->layers = 0; |
| 373 | if (to_free) | ||
| 374 | free_layer(to_free); | ||
| 359 | } else | 375 | } else |
| 360 | idr_remove_warning(id); | 376 | idr_remove_warning(id); |
| 361 | } | 377 | } |
| @@ -368,22 +384,34 @@ static void sub_remove(struct idr *idp, int shift, int id) | |||
| 368 | void idr_remove(struct idr *idp, int id) | 384 | void idr_remove(struct idr *idp, int id) |
| 369 | { | 385 | { |
| 370 | struct idr_layer *p; | 386 | struct idr_layer *p; |
| 387 | struct idr_layer *to_free; | ||
| 371 | 388 | ||
| 372 | /* Mask off upper bits we don't use for the search. */ | 389 | /* Mask off upper bits we don't use for the search. */ |
| 373 | id &= MAX_ID_MASK; | 390 | id &= MAX_ID_MASK; |
| 374 | 391 | ||
| 375 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | 392 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
| 376 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && | 393 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
| 377 | idp->top->ary[0]) { // We can drop a layer | 394 | idp->top->ary[0]) { |
| 378 | 395 | /* | |
| 396 | * Single child at leftmost slot: we can shrink the tree. | ||
| 397 | * This level is not needed anymore since when layers are | ||
| 398 | * inserted, they are inserted at the top of the existing | ||
| 399 | * tree. | ||
| 400 | */ | ||
| 401 | to_free = idp->top; | ||
| 379 | p = idp->top->ary[0]; | 402 | p = idp->top->ary[0]; |
| 380 | idp->top->bitmap = idp->top->count = 0; | 403 | rcu_assign_pointer(idp->top, p); |
| 381 | free_layer(idp, idp->top); | ||
| 382 | idp->top = p; | ||
| 383 | --idp->layers; | 404 | --idp->layers; |
| 405 | to_free->bitmap = to_free->count = 0; | ||
| 406 | free_layer(to_free); | ||
| 384 | } | 407 | } |
| 385 | while (idp->id_free_cnt >= IDR_FREE_MAX) { | 408 | while (idp->id_free_cnt >= IDR_FREE_MAX) { |
| 386 | p = alloc_layer(idp); | 409 | p = get_from_free_list(idp); |
| 410 | /* | ||
| 411 | * Note: we don't call the rcu callback here, since the only | ||
| 412 | * layers that fall into the freelist are those that have been | ||
| 413 | * preallocated. | ||
| 414 | */ | ||
| 387 | kmem_cache_free(idr_layer_cache, p); | 415 | kmem_cache_free(idr_layer_cache, p); |
| 388 | } | 416 | } |
| 389 | return; | 417 | return; |
| @@ -424,15 +452,13 @@ void idr_remove_all(struct idr *idp) | |||
| 424 | 452 | ||
| 425 | id += 1 << n; | 453 | id += 1 << n; |
| 426 | while (n < fls(id)) { | 454 | while (n < fls(id)) { |
| 427 | if (p) { | 455 | if (p) |
| 428 | memset(p, 0, sizeof *p); | 456 | free_layer(p); |
| 429 | free_layer(idp, p); | ||
| 430 | } | ||
| 431 | n += IDR_BITS; | 457 | n += IDR_BITS; |
| 432 | p = *--paa; | 458 | p = *--paa; |
| 433 | } | 459 | } |
| 434 | } | 460 | } |
| 435 | idp->top = NULL; | 461 | rcu_assign_pointer(idp->top, NULL); |
| 436 | idp->layers = 0; | 462 | idp->layers = 0; |
| 437 | } | 463 | } |
| 438 | EXPORT_SYMBOL(idr_remove_all); | 464 | EXPORT_SYMBOL(idr_remove_all); |
| @@ -444,7 +470,7 @@ EXPORT_SYMBOL(idr_remove_all); | |||
| 444 | void idr_destroy(struct idr *idp) | 470 | void idr_destroy(struct idr *idp) |
| 445 | { | 471 | { |
| 446 | while (idp->id_free_cnt) { | 472 | while (idp->id_free_cnt) { |
| 447 | struct idr_layer *p = alloc_layer(idp); | 473 | struct idr_layer *p = get_from_free_list(idp); |
| 448 | kmem_cache_free(idr_layer_cache, p); | 474 | kmem_cache_free(idr_layer_cache, p); |
| 449 | } | 475 | } |
| 450 | } | 476 | } |
| @@ -459,7 +485,8 @@ EXPORT_SYMBOL(idr_destroy); | |||
| 459 | * return indicates that @id is not valid or you passed %NULL in | 485 | * return indicates that @id is not valid or you passed %NULL in |
| 460 | * idr_get_new(). | 486 | * idr_get_new(). |
| 461 | * | 487 | * |
| 462 | * The caller must serialize idr_find() vs idr_get_new() and idr_remove(). | 488 | * This function can be called under rcu_read_lock(), given that the leaf |
| 489 | * pointers lifetimes are correctly managed. | ||
| 463 | */ | 490 | */ |
| 464 | void *idr_find(struct idr *idp, int id) | 491 | void *idr_find(struct idr *idp, int id) |
| 465 | { | 492 | { |
| @@ -467,7 +494,7 @@ void *idr_find(struct idr *idp, int id) | |||
| 467 | struct idr_layer *p; | 494 | struct idr_layer *p; |
| 468 | 495 | ||
| 469 | n = idp->layers * IDR_BITS; | 496 | n = idp->layers * IDR_BITS; |
| 470 | p = idp->top; | 497 | p = rcu_dereference(idp->top); |
| 471 | 498 | ||
| 472 | /* Mask off upper bits we don't use for the search. */ | 499 | /* Mask off upper bits we don't use for the search. */ |
| 473 | id &= MAX_ID_MASK; | 500 | id &= MAX_ID_MASK; |
| @@ -477,7 +504,7 @@ void *idr_find(struct idr *idp, int id) | |||
| 477 | 504 | ||
| 478 | while (n > 0 && p) { | 505 | while (n > 0 && p) { |
| 479 | n -= IDR_BITS; | 506 | n -= IDR_BITS; |
| 480 | p = p->ary[(id >> n) & IDR_MASK]; | 507 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
| 481 | } | 508 | } |
| 482 | return((void *)p); | 509 | return((void *)p); |
| 483 | } | 510 | } |
| @@ -510,7 +537,7 @@ int idr_for_each(struct idr *idp, | |||
| 510 | struct idr_layer **paa = &pa[0]; | 537 | struct idr_layer **paa = &pa[0]; |
| 511 | 538 | ||
| 512 | n = idp->layers * IDR_BITS; | 539 | n = idp->layers * IDR_BITS; |
| 513 | p = idp->top; | 540 | p = rcu_dereference(idp->top); |
| 514 | max = 1 << n; | 541 | max = 1 << n; |
| 515 | 542 | ||
| 516 | id = 0; | 543 | id = 0; |
| @@ -518,7 +545,7 @@ int idr_for_each(struct idr *idp, | |||
| 518 | while (n > 0 && p) { | 545 | while (n > 0 && p) { |
| 519 | n -= IDR_BITS; | 546 | n -= IDR_BITS; |
| 520 | *paa++ = p; | 547 | *paa++ = p; |
| 521 | p = p->ary[(id >> n) & IDR_MASK]; | 548 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
| 522 | } | 549 | } |
| 523 | 550 | ||
| 524 | if (p) { | 551 | if (p) { |
| @@ -548,7 +575,7 @@ EXPORT_SYMBOL(idr_for_each); | |||
| 548 | * A -ENOENT return indicates that @id was not found. | 575 | * A -ENOENT return indicates that @id was not found. |
| 549 | * A -EINVAL return indicates that @id was not within valid constraints. | 576 | * A -EINVAL return indicates that @id was not within valid constraints. |
| 550 | * | 577 | * |
| 551 | * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). | 578 | * The caller must serialize with writers. |
| 552 | */ | 579 | */ |
| 553 | void *idr_replace(struct idr *idp, void *ptr, int id) | 580 | void *idr_replace(struct idr *idp, void *ptr, int id) |
| 554 | { | 581 | { |
| @@ -574,13 +601,13 @@ void *idr_replace(struct idr *idp, void *ptr, int id) | |||
| 574 | return ERR_PTR(-ENOENT); | 601 | return ERR_PTR(-ENOENT); |
| 575 | 602 | ||
| 576 | old_p = p->ary[n]; | 603 | old_p = p->ary[n]; |
| 577 | p->ary[n] = ptr; | 604 | rcu_assign_pointer(p->ary[n], ptr); |
| 578 | 605 | ||
| 579 | return old_p; | 606 | return old_p; |
| 580 | } | 607 | } |
| 581 | EXPORT_SYMBOL(idr_replace); | 608 | EXPORT_SYMBOL(idr_replace); |
| 582 | 609 | ||
| 583 | static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer) | 610 | static void idr_cache_ctor(void *idr_layer) |
| 584 | { | 611 | { |
| 585 | memset(idr_layer, 0, sizeof(struct idr_layer)); | 612 | memset(idr_layer, 0, sizeof(struct idr_layer)); |
| 586 | } | 613 | } |
| @@ -694,12 +721,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |||
| 694 | restart: | 721 | restart: |
| 695 | /* get vacant slot */ | 722 | /* get vacant slot */ |
| 696 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); | 723 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); |
| 697 | if (t < 0) { | 724 | if (t < 0) |
| 698 | if (t == -1) | 725 | return _idr_rc_to_errno(t); |
| 699 | return -EAGAIN; | ||
| 700 | else /* will be -3 */ | ||
| 701 | return -ENOSPC; | ||
| 702 | } | ||
| 703 | 726 | ||
| 704 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) | 727 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) |
| 705 | return -ENOSPC; | 728 | return -ENOSPC; |
| @@ -720,7 +743,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |||
| 720 | return -EAGAIN; | 743 | return -EAGAIN; |
| 721 | 744 | ||
| 722 | memset(bitmap, 0, sizeof(struct ida_bitmap)); | 745 | memset(bitmap, 0, sizeof(struct ida_bitmap)); |
| 723 | pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap; | 746 | rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], |
| 747 | (void *)bitmap); | ||
| 724 | pa[0]->count++; | 748 | pa[0]->count++; |
| 725 | } | 749 | } |
| 726 | 750 | ||
| @@ -749,7 +773,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |||
| 749 | * allocation. | 773 | * allocation. |
| 750 | */ | 774 | */ |
| 751 | if (ida->idr.id_free_cnt || ida->free_bitmap) { | 775 | if (ida->idr.id_free_cnt || ida->free_bitmap) { |
| 752 | struct idr_layer *p = alloc_layer(&ida->idr); | 776 | struct idr_layer *p = get_from_free_list(&ida->idr); |
| 753 | if (p) | 777 | if (p) |
| 754 | kmem_cache_free(idr_layer_cache, p); | 778 | kmem_cache_free(idr_layer_cache, p); |
| 755 | } | 779 | } |
diff --git a/lib/inflate.c b/lib/inflate.c index 9762294be062..1a8e8a978128 100644 --- a/lib/inflate.c +++ b/lib/inflate.c | |||
| @@ -230,6 +230,45 @@ STATIC const ush mask_bits[] = { | |||
| 230 | #define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}} | 230 | #define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}} |
| 231 | #define DUMPBITS(n) {b>>=(n);k-=(n);} | 231 | #define DUMPBITS(n) {b>>=(n);k-=(n);} |
| 232 | 232 | ||
| 233 | #ifndef NO_INFLATE_MALLOC | ||
| 234 | /* A trivial malloc implementation, adapted from | ||
| 235 | * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 | ||
| 236 | */ | ||
| 237 | |||
| 238 | static unsigned long malloc_ptr; | ||
| 239 | static int malloc_count; | ||
| 240 | |||
| 241 | static void *malloc(int size) | ||
| 242 | { | ||
| 243 | void *p; | ||
| 244 | |||
| 245 | if (size < 0) | ||
| 246 | error("Malloc error"); | ||
| 247 | if (!malloc_ptr) | ||
| 248 | malloc_ptr = free_mem_ptr; | ||
| 249 | |||
| 250 | malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */ | ||
| 251 | |||
| 252 | p = (void *)malloc_ptr; | ||
| 253 | malloc_ptr += size; | ||
| 254 | |||
| 255 | if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr) | ||
| 256 | error("Out of memory"); | ||
| 257 | |||
| 258 | malloc_count++; | ||
| 259 | return p; | ||
| 260 | } | ||
| 261 | |||
| 262 | static void free(void *where) | ||
| 263 | { | ||
| 264 | malloc_count--; | ||
| 265 | if (!malloc_count) | ||
| 266 | malloc_ptr = free_mem_ptr; | ||
| 267 | } | ||
| 268 | #else | ||
| 269 | #define malloc(a) kmalloc(a, GFP_KERNEL) | ||
| 270 | #define free(a) kfree(a) | ||
| 271 | #endif | ||
| 233 | 272 | ||
| 234 | /* | 273 | /* |
| 235 | Huffman code decoding is performed using a multi-level table lookup. | 274 | Huffman code decoding is performed using a multi-level table lookup. |
| @@ -1045,7 +1084,6 @@ STATIC int INIT inflate(void) | |||
| 1045 | int e; /* last block flag */ | 1084 | int e; /* last block flag */ |
| 1046 | int r; /* result code */ | 1085 | int r; /* result code */ |
| 1047 | unsigned h; /* maximum struct huft's malloc'ed */ | 1086 | unsigned h; /* maximum struct huft's malloc'ed */ |
| 1048 | void *ptr; | ||
| 1049 | 1087 | ||
| 1050 | /* initialize window, bit buffer */ | 1088 | /* initialize window, bit buffer */ |
| 1051 | wp = 0; | 1089 | wp = 0; |
| @@ -1057,12 +1095,12 @@ STATIC int INIT inflate(void) | |||
| 1057 | h = 0; | 1095 | h = 0; |
| 1058 | do { | 1096 | do { |
| 1059 | hufts = 0; | 1097 | hufts = 0; |
| 1060 | gzip_mark(&ptr); | 1098 | #ifdef ARCH_HAS_DECOMP_WDOG |
| 1061 | if ((r = inflate_block(&e)) != 0) { | 1099 | arch_decomp_wdog(); |
| 1062 | gzip_release(&ptr); | 1100 | #endif |
| 1063 | return r; | 1101 | r = inflate_block(&e); |
| 1064 | } | 1102 | if (r) |
| 1065 | gzip_release(&ptr); | 1103 | return r; |
| 1066 | if (hufts > h) | 1104 | if (hufts > h) |
| 1067 | h = hufts; | 1105 | h = hufts; |
| 1068 | } while (!e); | 1106 | } while (!e); |
diff --git a/lib/iomap.c b/lib/iomap.c index 37a3ea4cac9f..d32229385151 100644 --- a/lib/iomap.c +++ b/lib/iomap.c | |||
| @@ -40,8 +40,7 @@ static void bad_io_access(unsigned long port, const char *access) | |||
| 40 | static int count = 10; | 40 | static int count = 10; |
| 41 | if (count) { | 41 | if (count) { |
| 42 | count--; | 42 | count--; |
| 43 | printk(KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access); | 43 | WARN(1, KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access); |
| 44 | WARN_ON(1); | ||
| 45 | } | 44 | } |
| 46 | } | 45 | } |
| 47 | 46 | ||
diff --git a/lib/kobject.c b/lib/kobject.c index 744401571ed7..bd732ffebc85 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
| @@ -164,9 +164,8 @@ static int kobject_add_internal(struct kobject *kobj) | |||
| 164 | return -ENOENT; | 164 | return -ENOENT; |
| 165 | 165 | ||
| 166 | if (!kobj->name || !kobj->name[0]) { | 166 | if (!kobj->name || !kobj->name[0]) { |
| 167 | pr_debug("kobject: (%p): attempted to be registered with empty " | 167 | WARN(1, "kobject: (%p): attempted to be registered with empty " |
| 168 | "name!\n", kobj); | 168 | "name!\n", kobj); |
| 169 | WARN_ON(1); | ||
| 170 | return -EINVAL; | 169 | return -EINVAL; |
| 171 | } | 170 | } |
| 172 | 171 | ||
| @@ -583,12 +582,10 @@ static void kobject_release(struct kref *kref) | |||
| 583 | void kobject_put(struct kobject *kobj) | 582 | void kobject_put(struct kobject *kobj) |
| 584 | { | 583 | { |
| 585 | if (kobj) { | 584 | if (kobj) { |
| 586 | if (!kobj->state_initialized) { | 585 | if (!kobj->state_initialized) |
| 587 | printk(KERN_WARNING "kobject: '%s' (%p): is not " | 586 | WARN(1, KERN_WARNING "kobject: '%s' (%p): is not " |
| 588 | "initialized, yet kobject_put() is being " | 587 | "initialized, yet kobject_put() is being " |
| 589 | "called.\n", kobject_name(kobj), kobj); | 588 | "called.\n", kobject_name(kobj), kobj); |
| 590 | WARN_ON(1); | ||
| 591 | } | ||
| 592 | kref_put(&kobj->kref, kobject_release); | 589 | kref_put(&kobj->kref, kobject_release); |
| 593 | } | 590 | } |
| 594 | } | 591 | } |
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 9f8d599459d1..3f914725bda8 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
| @@ -285,8 +285,7 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) | |||
| 285 | int len; | 285 | int len; |
| 286 | 286 | ||
| 287 | if (env->envp_idx >= ARRAY_SIZE(env->envp)) { | 287 | if (env->envp_idx >= ARRAY_SIZE(env->envp)) { |
| 288 | printk(KERN_ERR "add_uevent_var: too many keys\n"); | 288 | WARN(1, KERN_ERR "add_uevent_var: too many keys\n"); |
| 289 | WARN_ON(1); | ||
| 290 | return -ENOMEM; | 289 | return -ENOMEM; |
| 291 | } | 290 | } |
| 292 | 291 | ||
| @@ -297,8 +296,7 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) | |||
| 297 | va_end(args); | 296 | va_end(args); |
| 298 | 297 | ||
| 299 | if (len >= (sizeof(env->buf) - env->buflen)) { | 298 | if (len >= (sizeof(env->buf) - env->buflen)) { |
| 300 | printk(KERN_ERR "add_uevent_var: buffer size too small\n"); | 299 | WARN(1, KERN_ERR "add_uevent_var: buffer size too small\n"); |
| 301 | WARN_ON(1); | ||
| 302 | return -ENOMEM; | 300 | return -ENOMEM; |
| 303 | } | 301 | } |
| 304 | 302 | ||
diff --git a/lib/list_debug.c b/lib/list_debug.c index 4350ba9655bd..1a39f4e3ae1f 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c | |||
| @@ -20,18 +20,14 @@ void __list_add(struct list_head *new, | |||
| 20 | struct list_head *prev, | 20 | struct list_head *prev, |
| 21 | struct list_head *next) | 21 | struct list_head *next) |
| 22 | { | 22 | { |
| 23 | if (unlikely(next->prev != prev)) { | 23 | WARN(next->prev != prev, |
| 24 | printk(KERN_ERR "list_add corruption. next->prev should be " | 24 | "list_add corruption. next->prev should be " |
| 25 | "prev (%p), but was %p. (next=%p).\n", | 25 | "prev (%p), but was %p. (next=%p).\n", |
| 26 | prev, next->prev, next); | 26 | prev, next->prev, next); |
| 27 | BUG(); | 27 | WARN(prev->next != next, |
| 28 | } | 28 | "list_add corruption. prev->next should be " |
| 29 | if (unlikely(prev->next != next)) { | 29 | "next (%p), but was %p. (prev=%p).\n", |
| 30 | printk(KERN_ERR "list_add corruption. prev->next should be " | 30 | next, prev->next, prev); |
| 31 | "next (%p), but was %p. (prev=%p).\n", | ||
| 32 | next, prev->next, prev); | ||
| 33 | BUG(); | ||
| 34 | } | ||
| 35 | next->prev = new; | 31 | next->prev = new; |
| 36 | new->next = next; | 32 | new->next = next; |
| 37 | new->prev = prev; | 33 | new->prev = prev; |
| @@ -40,20 +36,6 @@ void __list_add(struct list_head *new, | |||
| 40 | EXPORT_SYMBOL(__list_add); | 36 | EXPORT_SYMBOL(__list_add); |
| 41 | 37 | ||
| 42 | /** | 38 | /** |
| 43 | * list_add - add a new entry | ||
| 44 | * @new: new entry to be added | ||
| 45 | * @head: list head to add it after | ||
| 46 | * | ||
| 47 | * Insert a new entry after the specified head. | ||
| 48 | * This is good for implementing stacks. | ||
| 49 | */ | ||
| 50 | void list_add(struct list_head *new, struct list_head *head) | ||
| 51 | { | ||
| 52 | __list_add(new, head, head->next); | ||
| 53 | } | ||
| 54 | EXPORT_SYMBOL(list_add); | ||
| 55 | |||
| 56 | /** | ||
| 57 | * list_del - deletes entry from list. | 39 | * list_del - deletes entry from list. |
| 58 | * @entry: the element to delete from the list. | 40 | * @entry: the element to delete from the list. |
| 59 | * Note: list_empty on entry does not return true after this, the entry is | 41 | * Note: list_empty on entry does not return true after this, the entry is |
| @@ -61,16 +43,12 @@ EXPORT_SYMBOL(list_add); | |||
| 61 | */ | 43 | */ |
| 62 | void list_del(struct list_head *entry) | 44 | void list_del(struct list_head *entry) |
| 63 | { | 45 | { |
| 64 | if (unlikely(entry->prev->next != entry)) { | 46 | WARN(entry->prev->next != entry, |
| 65 | printk(KERN_ERR "list_del corruption. prev->next should be %p, " | 47 | "list_del corruption. prev->next should be %p, " |
| 66 | "but was %p\n", entry, entry->prev->next); | 48 | "but was %p\n", entry, entry->prev->next); |
| 67 | BUG(); | 49 | WARN(entry->next->prev != entry, |
| 68 | } | 50 | "list_del corruption. next->prev should be %p, " |
| 69 | if (unlikely(entry->next->prev != entry)) { | 51 | "but was %p\n", entry, entry->next->prev); |
| 70 | printk(KERN_ERR "list_del corruption. next->prev should be %p, " | ||
| 71 | "but was %p\n", entry, entry->next->prev); | ||
| 72 | BUG(); | ||
| 73 | } | ||
| 74 | __list_del(entry->prev, entry->next); | 52 | __list_del(entry->prev, entry->next); |
| 75 | entry->next = LIST_POISON1; | 53 | entry->next = LIST_POISON1; |
| 76 | entry->prev = LIST_POISON2; | 54 | entry->prev = LIST_POISON2; |
diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c index 77f0f9b775a9..5dc6b29c1575 100644 --- a/lib/lzo/lzo1x_decompress.c +++ b/lib/lzo/lzo1x_decompress.c | |||
| @@ -138,8 +138,7 @@ match: | |||
| 138 | t += 31 + *ip++; | 138 | t += 31 + *ip++; |
| 139 | } | 139 | } |
| 140 | m_pos = op - 1; | 140 | m_pos = op - 1; |
| 141 | m_pos -= le16_to_cpu(get_unaligned( | 141 | m_pos -= get_unaligned_le16(ip) >> 2; |
| 142 | (const unsigned short *)ip)) >> 2; | ||
| 143 | ip += 2; | 142 | ip += 2; |
| 144 | } else if (t >= 16) { | 143 | } else if (t >= 16) { |
| 145 | m_pos = op; | 144 | m_pos = op; |
| @@ -157,8 +156,7 @@ match: | |||
| 157 | } | 156 | } |
| 158 | t += 7 + *ip++; | 157 | t += 7 + *ip++; |
| 159 | } | 158 | } |
| 160 | m_pos -= le16_to_cpu(get_unaligned( | 159 | m_pos -= get_unaligned_le16(ip) >> 2; |
| 161 | (const unsigned short *)ip)) >> 2; | ||
| 162 | ip += 2; | 160 | ip += 2; |
| 163 | if (m_pos == op) | 161 | if (m_pos == op) |
| 164 | goto eof_found; | 162 | goto eof_found; |
diff --git a/lib/plist.c b/lib/plist.c index 3074a02272f3..d6c64a824e1d 100644 --- a/lib/plist.c +++ b/lib/plist.c | |||
| @@ -31,12 +31,13 @@ | |||
| 31 | static void plist_check_prev_next(struct list_head *t, struct list_head *p, | 31 | static void plist_check_prev_next(struct list_head *t, struct list_head *p, |
| 32 | struct list_head *n) | 32 | struct list_head *n) |
| 33 | { | 33 | { |
| 34 | if (n->prev != p || p->next != n) { | 34 | WARN(n->prev != p || p->next != n, |
| 35 | printk("top: %p, n: %p, p: %p\n", t, t->next, t->prev); | 35 | "top: %p, n: %p, p: %p\n" |
| 36 | printk("prev: %p, n: %p, p: %p\n", p, p->next, p->prev); | 36 | "prev: %p, n: %p, p: %p\n" |
| 37 | printk("next: %p, n: %p, p: %p\n", n, n->next, n->prev); | 37 | "next: %p, n: %p, p: %p\n", |
| 38 | WARN_ON(1); | 38 | t, t->next, t->prev, |
| 39 | } | 39 | p, p->next, p->prev, |
| 40 | n, n->next, n->prev); | ||
| 40 | } | 41 | } |
| 41 | 42 | ||
| 42 | static void plist_check_list(struct list_head *top) | 43 | static void plist_check_list(struct list_head *top) |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 56ec21a7f73d..be86b32bc874 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -359,18 +359,17 @@ EXPORT_SYMBOL(radix_tree_insert); | |||
| 359 | * Returns: the slot corresponding to the position @index in the | 359 | * Returns: the slot corresponding to the position @index in the |
| 360 | * radix tree @root. This is useful for update-if-exists operations. | 360 | * radix tree @root. This is useful for update-if-exists operations. |
| 361 | * | 361 | * |
| 362 | * This function cannot be called under rcu_read_lock, it must be | 362 | * This function can be called under rcu_read_lock iff the slot is not |
| 363 | * excluded from writers, as must the returned slot for subsequent | 363 | * modified by radix_tree_replace_slot, otherwise it must be called |
| 364 | * use by radix_tree_deref_slot() and radix_tree_replace slot. | 364 | * exclusive from other writers. Any dereference of the slot must be done |
| 365 | * Caller must hold tree write locked across slot lookup and | 365 | * using radix_tree_deref_slot. |
| 366 | * replace. | ||
| 367 | */ | 366 | */ |
| 368 | void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | 367 | void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) |
| 369 | { | 368 | { |
| 370 | unsigned int height, shift; | 369 | unsigned int height, shift; |
| 371 | struct radix_tree_node *node, **slot; | 370 | struct radix_tree_node *node, **slot; |
| 372 | 371 | ||
| 373 | node = root->rnode; | 372 | node = rcu_dereference(root->rnode); |
| 374 | if (node == NULL) | 373 | if (node == NULL) |
| 375 | return NULL; | 374 | return NULL; |
| 376 | 375 | ||
| @@ -390,7 +389,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | |||
| 390 | do { | 389 | do { |
| 391 | slot = (struct radix_tree_node **) | 390 | slot = (struct radix_tree_node **) |
| 392 | (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); | 391 | (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); |
| 393 | node = *slot; | 392 | node = rcu_dereference(*slot); |
| 394 | if (node == NULL) | 393 | if (node == NULL) |
| 395 | return NULL; | 394 | return NULL; |
| 396 | 395 | ||
| @@ -667,7 +666,7 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root, | |||
| 667 | EXPORT_SYMBOL(radix_tree_next_hole); | 666 | EXPORT_SYMBOL(radix_tree_next_hole); |
| 668 | 667 | ||
| 669 | static unsigned int | 668 | static unsigned int |
| 670 | __lookup(struct radix_tree_node *slot, void **results, unsigned long index, | 669 | __lookup(struct radix_tree_node *slot, void ***results, unsigned long index, |
| 671 | unsigned int max_items, unsigned long *next_index) | 670 | unsigned int max_items, unsigned long *next_index) |
| 672 | { | 671 | { |
| 673 | unsigned int nr_found = 0; | 672 | unsigned int nr_found = 0; |
| @@ -701,11 +700,9 @@ __lookup(struct radix_tree_node *slot, void **results, unsigned long index, | |||
| 701 | 700 | ||
| 702 | /* Bottom level: grab some items */ | 701 | /* Bottom level: grab some items */ |
| 703 | for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { | 702 | for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { |
| 704 | struct radix_tree_node *node; | ||
| 705 | index++; | 703 | index++; |
| 706 | node = slot->slots[i]; | 704 | if (slot->slots[i]) { |
| 707 | if (node) { | 705 | results[nr_found++] = &(slot->slots[i]); |
| 708 | results[nr_found++] = rcu_dereference(node); | ||
| 709 | if (nr_found == max_items) | 706 | if (nr_found == max_items) |
| 710 | goto out; | 707 | goto out; |
| 711 | } | 708 | } |
| @@ -759,13 +756,22 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
| 759 | 756 | ||
| 760 | ret = 0; | 757 | ret = 0; |
| 761 | while (ret < max_items) { | 758 | while (ret < max_items) { |
| 762 | unsigned int nr_found; | 759 | unsigned int nr_found, slots_found, i; |
| 763 | unsigned long next_index; /* Index of next search */ | 760 | unsigned long next_index; /* Index of next search */ |
| 764 | 761 | ||
| 765 | if (cur_index > max_index) | 762 | if (cur_index > max_index) |
| 766 | break; | 763 | break; |
| 767 | nr_found = __lookup(node, results + ret, cur_index, | 764 | slots_found = __lookup(node, (void ***)results + ret, cur_index, |
| 768 | max_items - ret, &next_index); | 765 | max_items - ret, &next_index); |
| 766 | nr_found = 0; | ||
| 767 | for (i = 0; i < slots_found; i++) { | ||
| 768 | struct radix_tree_node *slot; | ||
| 769 | slot = *(((void ***)results)[ret + i]); | ||
| 770 | if (!slot) | ||
| 771 | continue; | ||
| 772 | results[ret + nr_found] = rcu_dereference(slot); | ||
| 773 | nr_found++; | ||
| 774 | } | ||
| 769 | ret += nr_found; | 775 | ret += nr_found; |
| 770 | if (next_index == 0) | 776 | if (next_index == 0) |
| 771 | break; | 777 | break; |
| @@ -776,12 +782,71 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
| 776 | } | 782 | } |
| 777 | EXPORT_SYMBOL(radix_tree_gang_lookup); | 783 | EXPORT_SYMBOL(radix_tree_gang_lookup); |
| 778 | 784 | ||
| 785 | /** | ||
| 786 | * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree | ||
| 787 | * @root: radix tree root | ||
| 788 | * @results: where the results of the lookup are placed | ||
| 789 | * @first_index: start the lookup from this key | ||
| 790 | * @max_items: place up to this many items at *results | ||
| 791 | * | ||
| 792 | * Performs an index-ascending scan of the tree for present items. Places | ||
| 793 | * their slots at *@results and returns the number of items which were | ||
| 794 | * placed at *@results. | ||
| 795 | * | ||
| 796 | * The implementation is naive. | ||
| 797 | * | ||
| 798 | * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must | ||
| 799 | * be dereferenced with radix_tree_deref_slot, and if using only RCU | ||
| 800 | * protection, radix_tree_deref_slot may fail requiring a retry. | ||
| 801 | */ | ||
| 802 | unsigned int | ||
| 803 | radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, | ||
| 804 | unsigned long first_index, unsigned int max_items) | ||
| 805 | { | ||
| 806 | unsigned long max_index; | ||
| 807 | struct radix_tree_node *node; | ||
| 808 | unsigned long cur_index = first_index; | ||
| 809 | unsigned int ret; | ||
| 810 | |||
| 811 | node = rcu_dereference(root->rnode); | ||
| 812 | if (!node) | ||
| 813 | return 0; | ||
| 814 | |||
| 815 | if (!radix_tree_is_indirect_ptr(node)) { | ||
| 816 | if (first_index > 0) | ||
| 817 | return 0; | ||
| 818 | results[0] = (void **)&root->rnode; | ||
| 819 | return 1; | ||
| 820 | } | ||
| 821 | node = radix_tree_indirect_to_ptr(node); | ||
| 822 | |||
| 823 | max_index = radix_tree_maxindex(node->height); | ||
| 824 | |||
| 825 | ret = 0; | ||
| 826 | while (ret < max_items) { | ||
| 827 | unsigned int slots_found; | ||
| 828 | unsigned long next_index; /* Index of next search */ | ||
| 829 | |||
| 830 | if (cur_index > max_index) | ||
| 831 | break; | ||
| 832 | slots_found = __lookup(node, results + ret, cur_index, | ||
| 833 | max_items - ret, &next_index); | ||
| 834 | ret += slots_found; | ||
| 835 | if (next_index == 0) | ||
| 836 | break; | ||
| 837 | cur_index = next_index; | ||
| 838 | } | ||
| 839 | |||
| 840 | return ret; | ||
| 841 | } | ||
| 842 | EXPORT_SYMBOL(radix_tree_gang_lookup_slot); | ||
| 843 | |||
| 779 | /* | 844 | /* |
| 780 | * FIXME: the two tag_get()s here should use find_next_bit() instead of | 845 | * FIXME: the two tag_get()s here should use find_next_bit() instead of |
| 781 | * open-coding the search. | 846 | * open-coding the search. |
| 782 | */ | 847 | */ |
| 783 | static unsigned int | 848 | static unsigned int |
| 784 | __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index, | 849 | __lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index, |
| 785 | unsigned int max_items, unsigned long *next_index, unsigned int tag) | 850 | unsigned int max_items, unsigned long *next_index, unsigned int tag) |
| 786 | { | 851 | { |
| 787 | unsigned int nr_found = 0; | 852 | unsigned int nr_found = 0; |
| @@ -811,11 +876,9 @@ __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index, | |||
| 811 | unsigned long j = index & RADIX_TREE_MAP_MASK; | 876 | unsigned long j = index & RADIX_TREE_MAP_MASK; |
| 812 | 877 | ||
| 813 | for ( ; j < RADIX_TREE_MAP_SIZE; j++) { | 878 | for ( ; j < RADIX_TREE_MAP_SIZE; j++) { |
| 814 | struct radix_tree_node *node; | ||
| 815 | index++; | 879 | index++; |
| 816 | if (!tag_get(slot, tag, j)) | 880 | if (!tag_get(slot, tag, j)) |
| 817 | continue; | 881 | continue; |
| 818 | node = slot->slots[j]; | ||
| 819 | /* | 882 | /* |
| 820 | * Even though the tag was found set, we need to | 883 | * Even though the tag was found set, we need to |
| 821 | * recheck that we have a non-NULL node, because | 884 | * recheck that we have a non-NULL node, because |
| @@ -826,9 +889,8 @@ __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index, | |||
| 826 | * lookup ->slots[x] without a lock (ie. can't | 889 | * lookup ->slots[x] without a lock (ie. can't |
| 827 | * rely on its value remaining the same). | 890 | * rely on its value remaining the same). |
| 828 | */ | 891 | */ |
| 829 | if (node) { | 892 | if (slot->slots[j]) { |
| 830 | node = rcu_dereference(node); | 893 | results[nr_found++] = &(slot->slots[j]); |
| 831 | results[nr_found++] = node; | ||
| 832 | if (nr_found == max_items) | 894 | if (nr_found == max_items) |
| 833 | goto out; | 895 | goto out; |
| 834 | } | 896 | } |
| @@ -887,13 +949,22 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |||
| 887 | 949 | ||
| 888 | ret = 0; | 950 | ret = 0; |
| 889 | while (ret < max_items) { | 951 | while (ret < max_items) { |
| 890 | unsigned int nr_found; | 952 | unsigned int nr_found, slots_found, i; |
| 891 | unsigned long next_index; /* Index of next search */ | 953 | unsigned long next_index; /* Index of next search */ |
| 892 | 954 | ||
| 893 | if (cur_index > max_index) | 955 | if (cur_index > max_index) |
| 894 | break; | 956 | break; |
| 895 | nr_found = __lookup_tag(node, results + ret, cur_index, | 957 | slots_found = __lookup_tag(node, (void ***)results + ret, |
| 896 | max_items - ret, &next_index, tag); | 958 | cur_index, max_items - ret, &next_index, tag); |
| 959 | nr_found = 0; | ||
| 960 | for (i = 0; i < slots_found; i++) { | ||
| 961 | struct radix_tree_node *slot; | ||
| 962 | slot = *(((void ***)results)[ret + i]); | ||
| 963 | if (!slot) | ||
| 964 | continue; | ||
| 965 | results[ret + nr_found] = rcu_dereference(slot); | ||
| 966 | nr_found++; | ||
| 967 | } | ||
| 897 | ret += nr_found; | 968 | ret += nr_found; |
| 898 | if (next_index == 0) | 969 | if (next_index == 0) |
| 899 | break; | 970 | break; |
| @@ -905,6 +976,67 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |||
| 905 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag); | 976 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag); |
| 906 | 977 | ||
| 907 | /** | 978 | /** |
| 979 | * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a | ||
| 980 | * radix tree based on a tag | ||
| 981 | * @root: radix tree root | ||
| 982 | * @results: where the results of the lookup are placed | ||
| 983 | * @first_index: start the lookup from this key | ||
| 984 | * @max_items: place up to this many items at *results | ||
| 985 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) | ||
| 986 | * | ||
| 987 | * Performs an index-ascending scan of the tree for present items which | ||
| 988 | * have the tag indexed by @tag set. Places the slots at *@results and | ||
| 989 | * returns the number of slots which were placed at *@results. | ||
| 990 | */ | ||
| 991 | unsigned int | ||
| 992 | radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | ||
| 993 | unsigned long first_index, unsigned int max_items, | ||
| 994 | unsigned int tag) | ||
| 995 | { | ||
| 996 | struct radix_tree_node *node; | ||
| 997 | unsigned long max_index; | ||
| 998 | unsigned long cur_index = first_index; | ||
| 999 | unsigned int ret; | ||
| 1000 | |||
| 1001 | /* check the root's tag bit */ | ||
| 1002 | if (!root_tag_get(root, tag)) | ||
| 1003 | return 0; | ||
| 1004 | |||
| 1005 | node = rcu_dereference(root->rnode); | ||
| 1006 | if (!node) | ||
| 1007 | return 0; | ||
| 1008 | |||
| 1009 | if (!radix_tree_is_indirect_ptr(node)) { | ||
| 1010 | if (first_index > 0) | ||
| 1011 | return 0; | ||
| 1012 | results[0] = (void **)&root->rnode; | ||
| 1013 | return 1; | ||
| 1014 | } | ||
| 1015 | node = radix_tree_indirect_to_ptr(node); | ||
| 1016 | |||
| 1017 | max_index = radix_tree_maxindex(node->height); | ||
| 1018 | |||
| 1019 | ret = 0; | ||
| 1020 | while (ret < max_items) { | ||
| 1021 | unsigned int slots_found; | ||
| 1022 | unsigned long next_index; /* Index of next search */ | ||
| 1023 | |||
| 1024 | if (cur_index > max_index) | ||
| 1025 | break; | ||
| 1026 | slots_found = __lookup_tag(node, results + ret, | ||
| 1027 | cur_index, max_items - ret, &next_index, tag); | ||
| 1028 | ret += slots_found; | ||
| 1029 | if (next_index == 0) | ||
| 1030 | break; | ||
| 1031 | cur_index = next_index; | ||
| 1032 | } | ||
| 1033 | |||
| 1034 | return ret; | ||
| 1035 | } | ||
| 1036 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); | ||
| 1037 | |||
| 1038 | |||
| 1039 | /** | ||
| 908 | * radix_tree_shrink - shrink height of a radix tree to minimal | 1040 | * radix_tree_shrink - shrink height of a radix tree to minimal |
| 909 | * @root radix tree root | 1041 | * @root radix tree root |
| 910 | */ | 1042 | */ |
| @@ -1051,7 +1183,7 @@ int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) | |||
| 1051 | EXPORT_SYMBOL(radix_tree_tagged); | 1183 | EXPORT_SYMBOL(radix_tree_tagged); |
| 1052 | 1184 | ||
| 1053 | static void | 1185 | static void |
| 1054 | radix_tree_node_ctor(struct kmem_cache *cachep, void *node) | 1186 | radix_tree_node_ctor(void *node) |
| 1055 | { | 1187 | { |
| 1056 | memset(node, 0, sizeof(struct radix_tree_node)); | 1188 | memset(node, 0, sizeof(struct radix_tree_node)); |
| 1057 | } | 1189 | } |
diff --git a/lib/random32.c b/lib/random32.c index ca87d86992bd..217d5c4b666d 100644 --- a/lib/random32.c +++ b/lib/random32.c | |||
| @@ -56,23 +56,12 @@ static u32 __random32(struct rnd_state *state) | |||
| 56 | return (state->s1 ^ state->s2 ^ state->s3); | 56 | return (state->s1 ^ state->s2 ^ state->s3); |
| 57 | } | 57 | } |
| 58 | 58 | ||
| 59 | static void __set_random32(struct rnd_state *state, unsigned long s) | 59 | /* |
| 60 | * Handle minimum values for seeds | ||
| 61 | */ | ||
| 62 | static inline u32 __seed(u32 x, u32 m) | ||
| 60 | { | 63 | { |
| 61 | if (s == 0) | 64 | return (x < m) ? x + m : x; |
| 62 | s = 1; /* default seed is 1 */ | ||
| 63 | |||
| 64 | #define LCG(n) (69069 * n) | ||
| 65 | state->s1 = LCG(s); | ||
| 66 | state->s2 = LCG(state->s1); | ||
| 67 | state->s3 = LCG(state->s2); | ||
| 68 | |||
| 69 | /* "warm it up" */ | ||
| 70 | __random32(state); | ||
| 71 | __random32(state); | ||
| 72 | __random32(state); | ||
| 73 | __random32(state); | ||
| 74 | __random32(state); | ||
| 75 | __random32(state); | ||
| 76 | } | 65 | } |
| 77 | 66 | ||
| 78 | /** | 67 | /** |
| @@ -107,7 +96,7 @@ void srandom32(u32 entropy) | |||
| 107 | */ | 96 | */ |
| 108 | for_each_possible_cpu (i) { | 97 | for_each_possible_cpu (i) { |
| 109 | struct rnd_state *state = &per_cpu(net_rand_state, i); | 98 | struct rnd_state *state = &per_cpu(net_rand_state, i); |
| 110 | __set_random32(state, state->s1 ^ entropy); | 99 | state->s1 = __seed(state->s1 ^ entropy, 1); |
| 111 | } | 100 | } |
| 112 | } | 101 | } |
| 113 | EXPORT_SYMBOL(srandom32); | 102 | EXPORT_SYMBOL(srandom32); |
| @@ -122,7 +111,19 @@ static int __init random32_init(void) | |||
| 122 | 111 | ||
| 123 | for_each_possible_cpu(i) { | 112 | for_each_possible_cpu(i) { |
| 124 | struct rnd_state *state = &per_cpu(net_rand_state,i); | 113 | struct rnd_state *state = &per_cpu(net_rand_state,i); |
| 125 | __set_random32(state, i + jiffies); | 114 | |
| 115 | #define LCG(x) ((x) * 69069) /* super-duper LCG */ | ||
| 116 | state->s1 = __seed(LCG(i + jiffies), 1); | ||
| 117 | state->s2 = __seed(LCG(state->s1), 7); | ||
| 118 | state->s3 = __seed(LCG(state->s2), 15); | ||
| 119 | |||
| 120 | /* "warm it up" */ | ||
| 121 | __random32(state); | ||
| 122 | __random32(state); | ||
| 123 | __random32(state); | ||
| 124 | __random32(state); | ||
| 125 | __random32(state); | ||
| 126 | __random32(state); | ||
| 126 | } | 127 | } |
| 127 | return 0; | 128 | return 0; |
| 128 | } | 129 | } |
| @@ -135,13 +136,18 @@ core_initcall(random32_init); | |||
| 135 | static int __init random32_reseed(void) | 136 | static int __init random32_reseed(void) |
| 136 | { | 137 | { |
| 137 | int i; | 138 | int i; |
| 138 | unsigned long seed; | ||
| 139 | 139 | ||
| 140 | for_each_possible_cpu(i) { | 140 | for_each_possible_cpu(i) { |
| 141 | struct rnd_state *state = &per_cpu(net_rand_state,i); | 141 | struct rnd_state *state = &per_cpu(net_rand_state,i); |
| 142 | u32 seeds[3]; | ||
| 143 | |||
| 144 | get_random_bytes(&seeds, sizeof(seeds)); | ||
| 145 | state->s1 = __seed(seeds[0], 1); | ||
| 146 | state->s2 = __seed(seeds[1], 7); | ||
| 147 | state->s3 = __seed(seeds[2], 15); | ||
| 142 | 148 | ||
| 143 | get_random_bytes(&seed, sizeof(seed)); | 149 | /* mix it in */ |
| 144 | __set_random32(state, seed); | 150 | __random32(state); |
| 145 | } | 151 | } |
| 146 | return 0; | 152 | return 0; |
| 147 | } | 153 | } |
diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 485e3040dcd4..26187edcc7ea 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c | |||
| @@ -3,6 +3,9 @@ | |||
| 3 | * | 3 | * |
| 4 | * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com> | 4 | * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com> |
| 5 | * | 5 | * |
| 6 | * 2008-05-01 rewrite the function and use a ratelimit_state data struct as | ||
| 7 | * parameter. Now every user can use their own standalone ratelimit_state. | ||
| 8 | * | ||
| 6 | * This file is released under the GPLv2. | 9 | * This file is released under the GPLv2. |
| 7 | * | 10 | * |
| 8 | */ | 11 | */ |
| @@ -11,41 +14,44 @@ | |||
| 11 | #include <linux/jiffies.h> | 14 | #include <linux/jiffies.h> |
| 12 | #include <linux/module.h> | 15 | #include <linux/module.h> |
| 13 | 16 | ||
| 17 | static DEFINE_SPINLOCK(ratelimit_lock); | ||
| 18 | |||
| 14 | /* | 19 | /* |
| 15 | * __ratelimit - rate limiting | 20 | * __ratelimit - rate limiting |
| 16 | * @ratelimit_jiffies: minimum time in jiffies between two callbacks | 21 | * @rs: ratelimit_state data |
| 17 | * @ratelimit_burst: number of callbacks we do before ratelimiting | ||
| 18 | * | 22 | * |
| 19 | * This enforces a rate limit: not more than @ratelimit_burst callbacks | 23 | * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks |
| 20 | * in every ratelimit_jiffies | 24 | * in every @rs->ratelimit_jiffies |
| 21 | */ | 25 | */ |
| 22 | int __ratelimit(int ratelimit_jiffies, int ratelimit_burst) | 26 | int __ratelimit(struct ratelimit_state *rs) |
| 23 | { | 27 | { |
| 24 | static DEFINE_SPINLOCK(ratelimit_lock); | ||
| 25 | static unsigned toks = 10 * 5 * HZ; | ||
| 26 | static unsigned long last_msg; | ||
| 27 | static int missed; | ||
| 28 | unsigned long flags; | 28 | unsigned long flags; |
| 29 | unsigned long now = jiffies; | ||
| 30 | 29 | ||
| 31 | spin_lock_irqsave(&ratelimit_lock, flags); | 30 | if (!rs->interval) |
| 32 | toks += now - last_msg; | ||
| 33 | last_msg = now; | ||
| 34 | if (toks > (ratelimit_burst * ratelimit_jiffies)) | ||
| 35 | toks = ratelimit_burst * ratelimit_jiffies; | ||
| 36 | if (toks >= ratelimit_jiffies) { | ||
| 37 | int lost = missed; | ||
| 38 | |||
| 39 | missed = 0; | ||
| 40 | toks -= ratelimit_jiffies; | ||
| 41 | spin_unlock_irqrestore(&ratelimit_lock, flags); | ||
| 42 | if (lost) | ||
| 43 | printk(KERN_WARNING "%s: %d messages suppressed\n", | ||
| 44 | __func__, lost); | ||
| 45 | return 1; | 31 | return 1; |
| 32 | |||
| 33 | spin_lock_irqsave(&ratelimit_lock, flags); | ||
| 34 | if (!rs->begin) | ||
| 35 | rs->begin = jiffies; | ||
| 36 | |||
| 37 | if (time_is_before_jiffies(rs->begin + rs->interval)) { | ||
| 38 | if (rs->missed) | ||
| 39 | printk(KERN_WARNING "%s: %d callbacks suppressed\n", | ||
| 40 | __func__, rs->missed); | ||
| 41 | rs->begin = 0; | ||
| 42 | rs->printed = 0; | ||
| 43 | rs->missed = 0; | ||
| 46 | } | 44 | } |
| 47 | missed++; | 45 | if (rs->burst && rs->burst > rs->printed) |
| 46 | goto print; | ||
| 47 | |||
| 48 | rs->missed++; | ||
| 48 | spin_unlock_irqrestore(&ratelimit_lock, flags); | 49 | spin_unlock_irqrestore(&ratelimit_lock, flags); |
| 49 | return 0; | 50 | return 0; |
| 51 | |||
| 52 | print: | ||
| 53 | rs->printed++; | ||
| 54 | spin_unlock_irqrestore(&ratelimit_lock, flags); | ||
| 55 | return 1; | ||
| 50 | } | 56 | } |
| 51 | EXPORT_SYMBOL(__ratelimit); | 57 | EXPORT_SYMBOL(__ratelimit); |
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index b80c21100d78..876ba6d5b670 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
| @@ -295,6 +295,117 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) | |||
| 295 | EXPORT_SYMBOL(sg_alloc_table); | 295 | EXPORT_SYMBOL(sg_alloc_table); |
| 296 | 296 | ||
| 297 | /** | 297 | /** |
| 298 | * sg_miter_start - start mapping iteration over a sg list | ||
| 299 | * @miter: sg mapping iter to be started | ||
| 300 | * @sgl: sg list to iterate over | ||
| 301 | * @nents: number of sg entries | ||
| 302 | * | ||
| 303 | * Description: | ||
| 304 | * Starts mapping iterator @miter. | ||
| 305 | * | ||
| 306 | * Context: | ||
| 307 | * Don't care. | ||
| 308 | */ | ||
| 309 | void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, | ||
| 310 | unsigned int nents, unsigned int flags) | ||
| 311 | { | ||
| 312 | memset(miter, 0, sizeof(struct sg_mapping_iter)); | ||
| 313 | |||
| 314 | miter->__sg = sgl; | ||
| 315 | miter->__nents = nents; | ||
| 316 | miter->__offset = 0; | ||
| 317 | miter->__flags = flags; | ||
| 318 | } | ||
| 319 | EXPORT_SYMBOL(sg_miter_start); | ||
| 320 | |||
| 321 | /** | ||
| 322 | * sg_miter_next - proceed mapping iterator to the next mapping | ||
| 323 | * @miter: sg mapping iter to proceed | ||
| 324 | * | ||
| 325 | * Description: | ||
| 326 | * Proceeds @miter@ to the next mapping. @miter@ should have been | ||
| 327 | * started using sg_miter_start(). On successful return, | ||
| 328 | * @miter@->page, @miter@->addr and @miter@->length point to the | ||
| 329 | * current mapping. | ||
| 330 | * | ||
| 331 | * Context: | ||
| 332 | * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till | ||
| 333 | * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC. | ||
| 334 | * | ||
| 335 | * Returns: | ||
| 336 | * true if @miter contains the next mapping. false if end of sg | ||
| 337 | * list is reached. | ||
| 338 | */ | ||
| 339 | bool sg_miter_next(struct sg_mapping_iter *miter) | ||
| 340 | { | ||
| 341 | unsigned int off, len; | ||
| 342 | |||
| 343 | /* check for end and drop resources from the last iteration */ | ||
| 344 | if (!miter->__nents) | ||
| 345 | return false; | ||
| 346 | |||
| 347 | sg_miter_stop(miter); | ||
| 348 | |||
| 349 | /* get to the next sg if necessary. __offset is adjusted by stop */ | ||
| 350 | if (miter->__offset == miter->__sg->length && --miter->__nents) { | ||
| 351 | miter->__sg = sg_next(miter->__sg); | ||
| 352 | miter->__offset = 0; | ||
| 353 | } | ||
| 354 | |||
| 355 | /* map the next page */ | ||
| 356 | off = miter->__sg->offset + miter->__offset; | ||
| 357 | len = miter->__sg->length - miter->__offset; | ||
| 358 | |||
| 359 | miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT); | ||
| 360 | off &= ~PAGE_MASK; | ||
| 361 | miter->length = min_t(unsigned int, len, PAGE_SIZE - off); | ||
| 362 | miter->consumed = miter->length; | ||
| 363 | |||
| 364 | if (miter->__flags & SG_MITER_ATOMIC) | ||
| 365 | miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off; | ||
| 366 | else | ||
| 367 | miter->addr = kmap(miter->page) + off; | ||
| 368 | |||
| 369 | return true; | ||
| 370 | } | ||
| 371 | EXPORT_SYMBOL(sg_miter_next); | ||
| 372 | |||
| 373 | /** | ||
| 374 | * sg_miter_stop - stop mapping iteration | ||
| 375 | * @miter: sg mapping iter to be stopped | ||
| 376 | * | ||
| 377 | * Description: | ||
| 378 | * Stops mapping iterator @miter. @miter should have been started | ||
| 379 | * started using sg_miter_start(). A stopped iteration can be | ||
| 380 | * resumed by calling sg_miter_next() on it. This is useful when | ||
| 381 | * resources (kmap) need to be released during iteration. | ||
| 382 | * | ||
| 383 | * Context: | ||
| 384 | * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. | ||
| 385 | */ | ||
| 386 | void sg_miter_stop(struct sg_mapping_iter *miter) | ||
| 387 | { | ||
| 388 | WARN_ON(miter->consumed > miter->length); | ||
| 389 | |||
| 390 | /* drop resources from the last iteration */ | ||
| 391 | if (miter->addr) { | ||
| 392 | miter->__offset += miter->consumed; | ||
| 393 | |||
| 394 | if (miter->__flags & SG_MITER_ATOMIC) { | ||
| 395 | WARN_ON(!irqs_disabled()); | ||
| 396 | kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); | ||
| 397 | } else | ||
| 398 | kunmap(miter->addr); | ||
| 399 | |||
| 400 | miter->page = NULL; | ||
| 401 | miter->addr = NULL; | ||
| 402 | miter->length = 0; | ||
| 403 | miter->consumed = 0; | ||
| 404 | } | ||
| 405 | } | ||
| 406 | EXPORT_SYMBOL(sg_miter_stop); | ||
| 407 | |||
| 408 | /** | ||
| 298 | * sg_copy_buffer - Copy data between a linear buffer and an SG list | 409 | * sg_copy_buffer - Copy data between a linear buffer and an SG list |
| 299 | * @sgl: The SG list | 410 | * @sgl: The SG list |
| 300 | * @nents: Number of SG entries | 411 | * @nents: Number of SG entries |
| @@ -309,56 +420,29 @@ EXPORT_SYMBOL(sg_alloc_table); | |||
| 309 | static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, | 420 | static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, |
| 310 | void *buf, size_t buflen, int to_buffer) | 421 | void *buf, size_t buflen, int to_buffer) |
| 311 | { | 422 | { |
| 312 | struct scatterlist *sg; | 423 | unsigned int offset = 0; |
| 313 | size_t buf_off = 0; | 424 | struct sg_mapping_iter miter; |
| 314 | int i; | 425 | |
| 315 | 426 | sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC); | |
| 316 | WARN_ON(!irqs_disabled()); | 427 | |
| 317 | 428 | while (sg_miter_next(&miter) && offset < buflen) { | |
| 318 | for_each_sg(sgl, sg, nents, i) { | 429 | unsigned int len; |
| 319 | struct page *page; | 430 | |
| 320 | int n = 0; | 431 | len = min(miter.length, buflen - offset); |
| 321 | unsigned int sg_off = sg->offset; | 432 | |
| 322 | unsigned int sg_copy = sg->length; | 433 | if (to_buffer) |
| 323 | 434 | memcpy(buf + offset, miter.addr, len); | |
| 324 | if (sg_copy > buflen) | 435 | else { |
| 325 | sg_copy = buflen; | 436 | memcpy(miter.addr, buf + offset, len); |
| 326 | buflen -= sg_copy; | 437 | flush_kernel_dcache_page(miter.page); |
| 327 | |||
| 328 | while (sg_copy > 0) { | ||
| 329 | unsigned int page_copy; | ||
| 330 | void *p; | ||
| 331 | |||
| 332 | page_copy = PAGE_SIZE - sg_off; | ||
| 333 | if (page_copy > sg_copy) | ||
| 334 | page_copy = sg_copy; | ||
| 335 | |||
| 336 | page = nth_page(sg_page(sg), n); | ||
| 337 | p = kmap_atomic(page, KM_BIO_SRC_IRQ); | ||
| 338 | |||
| 339 | if (to_buffer) | ||
| 340 | memcpy(buf + buf_off, p + sg_off, page_copy); | ||
| 341 | else { | ||
| 342 | memcpy(p + sg_off, buf + buf_off, page_copy); | ||
| 343 | flush_kernel_dcache_page(page); | ||
| 344 | } | ||
| 345 | |||
| 346 | kunmap_atomic(p, KM_BIO_SRC_IRQ); | ||
| 347 | |||
| 348 | buf_off += page_copy; | ||
| 349 | sg_off += page_copy; | ||
| 350 | if (sg_off == PAGE_SIZE) { | ||
| 351 | sg_off = 0; | ||
| 352 | n++; | ||
| 353 | } | ||
| 354 | sg_copy -= page_copy; | ||
| 355 | } | 438 | } |
| 356 | 439 | ||
| 357 | if (!buflen) | 440 | offset += len; |
| 358 | break; | ||
| 359 | } | 441 | } |
| 360 | 442 | ||
| 361 | return buf_off; | 443 | sg_miter_stop(&miter); |
| 444 | |||
| 445 | return offset; | ||
| 362 | } | 446 | } |
| 363 | 447 | ||
| 364 | /** | 448 | /** |
diff --git a/lib/show_mem.c b/lib/show_mem.c new file mode 100644 index 000000000000..238e72a18ce1 --- /dev/null +++ b/lib/show_mem.c | |||
| @@ -0,0 +1,63 @@ | |||
| 1 | /* | ||
| 2 | * Generic show_mem() implementation | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de> | ||
| 5 | * All code subject to the GPL version 2. | ||
| 6 | */ | ||
| 7 | |||
| 8 | #include <linux/mm.h> | ||
| 9 | #include <linux/nmi.h> | ||
| 10 | #include <linux/quicklist.h> | ||
| 11 | |||
| 12 | void show_mem(void) | ||
| 13 | { | ||
| 14 | pg_data_t *pgdat; | ||
| 15 | unsigned long total = 0, reserved = 0, shared = 0, | ||
| 16 | nonshared = 0, highmem = 0; | ||
| 17 | |||
| 18 | printk(KERN_INFO "Mem-Info:\n"); | ||
| 19 | show_free_areas(); | ||
| 20 | |||
| 21 | for_each_online_pgdat(pgdat) { | ||
| 22 | unsigned long i, flags; | ||
| 23 | |||
| 24 | pgdat_resize_lock(pgdat, &flags); | ||
| 25 | for (i = 0; i < pgdat->node_spanned_pages; i++) { | ||
| 26 | struct page *page; | ||
| 27 | unsigned long pfn = pgdat->node_start_pfn + i; | ||
| 28 | |||
| 29 | if (unlikely(!(i % MAX_ORDER_NR_PAGES))) | ||
| 30 | touch_nmi_watchdog(); | ||
| 31 | |||
| 32 | if (!pfn_valid(pfn)) | ||
| 33 | continue; | ||
| 34 | |||
| 35 | page = pfn_to_page(pfn); | ||
| 36 | |||
| 37 | if (PageHighMem(page)) | ||
| 38 | highmem++; | ||
| 39 | |||
| 40 | if (PageReserved(page)) | ||
| 41 | reserved++; | ||
| 42 | else if (page_count(page) == 1) | ||
| 43 | nonshared++; | ||
| 44 | else if (page_count(page) > 1) | ||
| 45 | shared += page_count(page) - 1; | ||
| 46 | |||
| 47 | total++; | ||
| 48 | } | ||
| 49 | pgdat_resize_unlock(pgdat, &flags); | ||
| 50 | } | ||
| 51 | |||
| 52 | printk(KERN_INFO "%lu pages RAM\n", total); | ||
| 53 | #ifdef CONFIG_HIGHMEM | ||
| 54 | printk(KERN_INFO "%lu pages HighMem\n", highmem); | ||
| 55 | #endif | ||
| 56 | printk(KERN_INFO "%lu pages reserved\n", reserved); | ||
| 57 | printk(KERN_INFO "%lu pages shared\n", shared); | ||
| 58 | printk(KERN_INFO "%lu pages non-shared\n", nonshared); | ||
| 59 | #ifdef CONFIG_QUICKLIST | ||
| 60 | printk(KERN_INFO "%lu pages in pagetable cache\n", | ||
| 61 | quicklist_total_size()); | ||
| 62 | #endif | ||
| 63 | } | ||
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 3b4dc098181e..0f8fc22ed103 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c | |||
| @@ -11,7 +11,6 @@ notrace unsigned int debug_smp_processor_id(void) | |||
| 11 | { | 11 | { |
| 12 | unsigned long preempt_count = preempt_count(); | 12 | unsigned long preempt_count = preempt_count(); |
| 13 | int this_cpu = raw_smp_processor_id(); | 13 | int this_cpu = raw_smp_processor_id(); |
| 14 | cpumask_t this_mask; | ||
| 15 | 14 | ||
| 16 | if (likely(preempt_count)) | 15 | if (likely(preempt_count)) |
| 17 | goto out; | 16 | goto out; |
| @@ -23,9 +22,7 @@ notrace unsigned int debug_smp_processor_id(void) | |||
| 23 | * Kernel threads bound to a single CPU can safely use | 22 | * Kernel threads bound to a single CPU can safely use |
| 24 | * smp_processor_id(): | 23 | * smp_processor_id(): |
| 25 | */ | 24 | */ |
| 26 | this_mask = cpumask_of_cpu(this_cpu); | 25 | if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu))) |
| 27 | |||
| 28 | if (cpus_equal(current->cpus_allowed, this_mask)) | ||
| 29 | goto out; | 26 | goto out; |
| 30 | 27 | ||
| 31 | /* | 28 | /* |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index d568894df8cc..977edbdbc1de 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -492,7 +492,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 492 | */ | 492 | */ |
| 493 | dma_addr_t handle; | 493 | dma_addr_t handle; |
| 494 | handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); | 494 | handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); |
| 495 | if (swiotlb_dma_mapping_error(handle)) | 495 | if (swiotlb_dma_mapping_error(hwdev, handle)) |
| 496 | return NULL; | 496 | return NULL; |
| 497 | 497 | ||
| 498 | ret = bus_to_virt(handle); | 498 | ret = bus_to_virt(handle); |
| @@ -824,7 +824,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | |||
| 824 | } | 824 | } |
| 825 | 825 | ||
| 826 | int | 826 | int |
| 827 | swiotlb_dma_mapping_error(dma_addr_t dma_addr) | 827 | swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) |
| 828 | { | 828 | { |
| 829 | return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); | 829 | return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); |
| 830 | } | 830 | } |
diff --git a/lib/syscall.c b/lib/syscall.c new file mode 100644 index 000000000000..a4f7067f72fa --- /dev/null +++ b/lib/syscall.c | |||
| @@ -0,0 +1,75 @@ | |||
| 1 | #include <linux/ptrace.h> | ||
| 2 | #include <linux/sched.h> | ||
| 3 | #include <linux/module.h> | ||
| 4 | #include <asm/syscall.h> | ||
| 5 | |||
| 6 | static int collect_syscall(struct task_struct *target, long *callno, | ||
| 7 | unsigned long args[6], unsigned int maxargs, | ||
| 8 | unsigned long *sp, unsigned long *pc) | ||
| 9 | { | ||
| 10 | struct pt_regs *regs = task_pt_regs(target); | ||
| 11 | if (unlikely(!regs)) | ||
| 12 | return -EAGAIN; | ||
| 13 | |||
| 14 | *sp = user_stack_pointer(regs); | ||
| 15 | *pc = instruction_pointer(regs); | ||
| 16 | |||
| 17 | *callno = syscall_get_nr(target, regs); | ||
| 18 | if (*callno != -1L && maxargs > 0) | ||
| 19 | syscall_get_arguments(target, regs, 0, maxargs, args); | ||
| 20 | |||
| 21 | return 0; | ||
| 22 | } | ||
| 23 | |||
| 24 | /** | ||
| 25 | * task_current_syscall - Discover what a blocked task is doing. | ||
| 26 | * @target: thread to examine | ||
| 27 | * @callno: filled with system call number or -1 | ||
| 28 | * @args: filled with @maxargs system call arguments | ||
| 29 | * @maxargs: number of elements in @args to fill | ||
| 30 | * @sp: filled with user stack pointer | ||
| 31 | * @pc: filled with user PC | ||
| 32 | * | ||
| 33 | * If @target is blocked in a system call, returns zero with *@callno | ||
| 34 | * set to the the call's number and @args filled in with its arguments. | ||
| 35 | * Registers not used for system call arguments may not be available and | ||
| 36 | * it is not kosher to use &struct user_regset calls while the system | ||
| 37 | * call is still in progress. Note we may get this result if @target | ||
| 38 | * has finished its system call but not yet returned to user mode, such | ||
| 39 | * as when it's stopped for signal handling or syscall exit tracing. | ||
| 40 | * | ||
| 41 | * If @target is blocked in the kernel during a fault or exception, | ||
| 42 | * returns zero with *@callno set to -1 and does not fill in @args. | ||
| 43 | * If so, it's now safe to examine @target using &struct user_regset | ||
| 44 | * get() calls as long as we're sure @target won't return to user mode. | ||
| 45 | * | ||
| 46 | * Returns -%EAGAIN if @target does not remain blocked. | ||
| 47 | * | ||
| 48 | * Returns -%EINVAL if @maxargs is too large (maximum is six). | ||
| 49 | */ | ||
| 50 | int task_current_syscall(struct task_struct *target, long *callno, | ||
| 51 | unsigned long args[6], unsigned int maxargs, | ||
| 52 | unsigned long *sp, unsigned long *pc) | ||
| 53 | { | ||
| 54 | long state; | ||
| 55 | unsigned long ncsw; | ||
| 56 | |||
| 57 | if (unlikely(maxargs > 6)) | ||
| 58 | return -EINVAL; | ||
| 59 | |||
| 60 | if (target == current) | ||
| 61 | return collect_syscall(target, callno, args, maxargs, sp, pc); | ||
| 62 | |||
| 63 | state = target->state; | ||
| 64 | if (unlikely(!state)) | ||
| 65 | return -EAGAIN; | ||
| 66 | |||
| 67 | ncsw = wait_task_inactive(target, state); | ||
| 68 | if (unlikely(!ncsw) || | ||
| 69 | unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) || | ||
| 70 | unlikely(wait_task_inactive(target, state) != ncsw)) | ||
| 71 | return -EAGAIN; | ||
| 72 | |||
| 73 | return 0; | ||
| 74 | } | ||
| 75 | EXPORT_SYMBOL_GPL(task_current_syscall); | ||
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 1dc2d1d18fa8..d8d1d1142248 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
| @@ -220,7 +220,7 @@ int strict_strtou##type(const char *cp, unsigned int base, valtype *res)\ | |||
| 220 | if (len == 0) \ | 220 | if (len == 0) \ |
| 221 | return -EINVAL; \ | 221 | return -EINVAL; \ |
| 222 | \ | 222 | \ |
| 223 | val = simple_strtoul(cp, &tail, base); \ | 223 | val = simple_strtou##type(cp, &tail, base); \ |
| 224 | if ((*tail == '\0') || \ | 224 | if ((*tail == '\0') || \ |
| 225 | ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {\ | 225 | ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {\ |
| 226 | *res = val; \ | 226 | *res = val; \ |
