aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2011-03-25 11:41:20 -0400
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2011-03-25 11:41:20 -0400
commit7bf7e370d5919112c223a269462cd0b546903829 (patch)
tree03ccc715239df14ae168277dbccc9d9cf4d8a2c8 /lib
parent68b1a1e786f29c900fa1c516a402e24f0ece622a (diff)
parentd39dd11c3e6a7af5c20bfac40594db36cf270f42 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-linus-1
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6: (9356 commits) [media] rc: update for bitop name changes fs: simplify iget & friends fs: pull inode->i_lock up out of writeback_single_inode fs: rename inode_lock to inode_hash_lock fs: move i_wb_list out from under inode_lock fs: move i_sb_list out from under inode_lock fs: remove inode_lock from iput_final and prune_icache fs: Lock the inode LRU list separately fs: factor inode disposal fs: protect inode->i_state with inode->i_lock lib, arch: add filter argument to show_mem and fix private implementations SLUB: Write to per cpu data when allocating it slub: Fix debugobjects with lockless fastpath autofs4: Do not potentially dereference NULL pointer returned by fget() in autofs_dev_ioctl_setpipefd() autofs4 - remove autofs4_lock autofs4 - fix d_manage() return on rcu-walk autofs4 - fix autofs4_expire_indirect() traversal autofs4 - fix dentry leak in autofs4_expire_direct() autofs4 - reinstate last used update on access vfs - check non-mountpoint dentry might block in __follow_mount_rcu() ... NOTE! This merge commit was created to fix compilation error. The block tree was merged upstream and removed the 'elv_queue_empty()' function which the new 'mtdswap' driver is using. So a simple merge of the mtd tree with upstream does not compile. And the mtd tree has already be published, so re-basing it is not an option. To fix this unfortunate situation, I had to merge upstream into the mtd-2.6.git tree without committing, put the fixup patch on top of this, and then commit this. The result is that we do not have commits which do not compile. In other words, this merge commit "merges" 3 things: the MTD tree, the upstream tree, and the fixup patch.
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig16
-rw-r--r--lib/Kconfig.debug47
-rw-r--r--lib/Makefile6
-rw-r--r--lib/cpu_rmap.c269
-rw-r--r--lib/debugobjects.c9
-rw-r--r--lib/dynamic_debug.c61
-rw-r--r--lib/find_next_bit.c18
-rw-r--r--lib/kernel_lock.c143
-rw-r--r--lib/kstrtox.c227
-rw-r--r--lib/list_debug.c39
-rw-r--r--lib/nlattr.c2
-rw-r--r--lib/plist.c135
-rw-r--r--lib/radix-tree.c7
-rw-r--r--lib/rbtree.c3
-rw-r--r--lib/rwsem.c10
-rw-r--r--lib/show_mem.c4
-rw-r--r--lib/swiotlb.c6
-rw-r--r--lib/test-kstrtox.c739
-rw-r--r--lib/textsearch.c10
-rw-r--r--lib/vsprintf.c157
-rw-r--r--lib/zlib_deflate/deflate.c31
-rw-r--r--lib/zlib_deflate/defutil.h17
22 files changed, 1568 insertions, 388 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index b9fef78ed04f..9c10e38fc609 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -22,6 +22,9 @@ config GENERIC_FIND_FIRST_BIT
22config GENERIC_FIND_NEXT_BIT 22config GENERIC_FIND_NEXT_BIT
23 bool 23 bool
24 24
25config GENERIC_FIND_BIT_LE
26 bool
27
25config GENERIC_FIND_LAST_BIT 28config GENERIC_FIND_LAST_BIT
26 bool 29 bool
27 default y 30 default y
@@ -240,6 +243,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
240 bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS 243 bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS
241 depends on EXPERIMENTAL && BROKEN 244 depends on EXPERIMENTAL && BROKEN
242 245
246config CPU_RMAP
247 bool
248 depends on SMP
249
243# 250#
244# Netlink attribute parsing support is select'ed if needed 251# Netlink attribute parsing support is select'ed if needed
245# 252#
@@ -256,6 +263,13 @@ config LRU_CACHE
256 tristate 263 tristate
257 264
258config AVERAGE 265config AVERAGE
259 bool 266 bool "Averaging functions"
267 help
268 This option is provided for the case where no in-kernel-tree
269 modules require averaging functions, but a module built outside
270 the kernel tree does. Such modules that use library averaging
271 functions require Y here.
272
273 If unsure, say N.
260 274
261endmenu 275endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 3967c2356e37..df9234c5f9d1 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -9,6 +9,17 @@ config PRINTK_TIME
9 operations. This is useful for identifying long delays 9 operations. This is useful for identifying long delays
10 in kernel startup. 10 in kernel startup.
11 11
12config DEFAULT_MESSAGE_LOGLEVEL
13 int "Default message log level (1-7)"
14 range 1 7
15 default "4"
16 help
17 Default log level for printk statements with no specified priority.
18
19 This was hard-coded to KERN_WARNING since at least 2.6.10 but folks
20 that are auditing their logs closely may want to set it to a lower
21 priority.
22
12config ENABLE_WARN_DEPRECATED 23config ENABLE_WARN_DEPRECATED
13 bool "Enable __deprecated logic" 24 bool "Enable __deprecated logic"
14 default y 25 default y
@@ -102,11 +113,6 @@ config HEADERS_CHECK
102 113
103config DEBUG_SECTION_MISMATCH 114config DEBUG_SECTION_MISMATCH
104 bool "Enable full Section mismatch analysis" 115 bool "Enable full Section mismatch analysis"
105 depends on UNDEFINED || (BLACKFIN)
106 default y
107 # This option is on purpose disabled for now.
108 # It will be enabled when we are down to a reasonable number
109 # of section mismatch warnings (< 10 for an allyesconfig build)
110 help 116 help
111 The section mismatch analysis checks if there are illegal 117 The section mismatch analysis checks if there are illegal
112 references from one section to another section. 118 references from one section to another section.
@@ -176,6 +182,23 @@ config HARDLOCKUP_DETECTOR
176 def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI && \ 182 def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI && \
177 !ARCH_HAS_NMI_WATCHDOG 183 !ARCH_HAS_NMI_WATCHDOG
178 184
185config BOOTPARAM_HARDLOCKUP_PANIC
186 bool "Panic (Reboot) On Hard Lockups"
187 depends on LOCKUP_DETECTOR
188 help
189 Say Y here to enable the kernel to panic on "hard lockups",
190 which are bugs that cause the kernel to loop in kernel
191 mode with interrupts disabled for more than 60 seconds.
192
193 Say N if unsure.
194
195config BOOTPARAM_HARDLOCKUP_PANIC_VALUE
196 int
197 depends on LOCKUP_DETECTOR
198 range 0 1
199 default 0 if !BOOTPARAM_HARDLOCKUP_PANIC
200 default 1 if BOOTPARAM_HARDLOCKUP_PANIC
201
179config BOOTPARAM_SOFTLOCKUP_PANIC 202config BOOTPARAM_SOFTLOCKUP_PANIC
180 bool "Panic (Reboot) On Soft Lockups" 203 bool "Panic (Reboot) On Soft Lockups"
181 depends on LOCKUP_DETECTOR 204 depends on LOCKUP_DETECTOR
@@ -470,15 +493,6 @@ config DEBUG_MUTEXES
470 This feature allows mutex semantics violations to be detected and 493 This feature allows mutex semantics violations to be detected and
471 reported. 494 reported.
472 495
473config BKL
474 bool "Big Kernel Lock" if (SMP || PREEMPT)
475 default y
476 help
477 This is the traditional lock that is used in old code instead
478 of proper locking. All drivers that use the BKL should depend
479 on this symbol.
480 Say Y here unless you are working on removing the BKL.
481
482config DEBUG_LOCK_ALLOC 496config DEBUG_LOCK_ALLOC
483 bool "Lock debugging: detect incorrect freeing of live locks" 497 bool "Lock debugging: detect incorrect freeing of live locks"
484 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 498 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -805,7 +819,7 @@ config ARCH_WANT_FRAME_POINTERS
805config FRAME_POINTER 819config FRAME_POINTER
806 bool "Compile the kernel with frame pointers" 820 bool "Compile the kernel with frame pointers"
807 depends on DEBUG_KERNEL && \ 821 depends on DEBUG_KERNEL && \
808 (CRIS || M68K || M68KNOMMU || FRV || UML || \ 822 (CRIS || M68K || FRV || UML || \
809 AVR32 || SUPERH || BLACKFIN || MN10300) || \ 823 AVR32 || SUPERH || BLACKFIN || MN10300) || \
810 ARCH_WANT_FRAME_POINTERS 824 ARCH_WANT_FRAME_POINTERS
811 default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS 825 default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
@@ -1236,3 +1250,6 @@ source "samples/Kconfig"
1236source "lib/Kconfig.kgdb" 1250source "lib/Kconfig.kgdb"
1237 1251
1238source "lib/Kconfig.kmemcheck" 1252source "lib/Kconfig.kmemcheck"
1253
1254config TEST_KSTRTOX
1255 tristate "Test kstrto*() family of functions at runtime"
diff --git a/lib/Makefile b/lib/Makefile
index 0544c814d827..ef0f28571156 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -22,6 +22,8 @@ lib-y += kobject.o kref.o klist.o
22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
24 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o 24 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o
25obj-y += kstrtox.o
26obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
25 27
26ifeq ($(CONFIG_DEBUG_KOBJECT),y) 28ifeq ($(CONFIG_DEBUG_KOBJECT),y)
27CFLAGS_kobject.o += -DDEBUG 29CFLAGS_kobject.o += -DDEBUG
@@ -38,12 +40,12 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
38lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 40lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
39lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o 41lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
40lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 42lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
43lib-$(CONFIG_GENERIC_FIND_BIT_LE) += find_next_bit.o
41obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o 44obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
42 45
43CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) 46CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
44obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 47obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
45 48
46obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
47obj-$(CONFIG_BTREE) += btree.o 49obj-$(CONFIG_BTREE) += btree.o
48obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 50obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
49obj-$(CONFIG_DEBUG_LIST) += list_debug.o 51obj-$(CONFIG_DEBUG_LIST) += list_debug.o
@@ -111,6 +113,8 @@ obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
111 113
112obj-$(CONFIG_AVERAGE) += average.o 114obj-$(CONFIG_AVERAGE) += average.o
113 115
116obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
117
114hostprogs-y := gen_crc32table 118hostprogs-y := gen_crc32table
115clean-files := crc32table.h 119clean-files := crc32table.h
116 120
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
new file mode 100644
index 000000000000..987acfafeb83
--- /dev/null
+++ b/lib/cpu_rmap.c
@@ -0,0 +1,269 @@
1/*
2 * cpu_rmap.c: CPU affinity reverse-map support
3 * Copyright 2011 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/cpu_rmap.h>
11#ifdef CONFIG_GENERIC_HARDIRQS
12#include <linux/interrupt.h>
13#endif
14#include <linux/module.h>
15
16/*
17 * These functions maintain a mapping from CPUs to some ordered set of
18 * objects with CPU affinities. This can be seen as a reverse-map of
19 * CPU affinity. However, we do not assume that the object affinities
20 * cover all CPUs in the system. For those CPUs not directly covered
21 * by object affinities, we attempt to find a nearest object based on
22 * CPU topology.
23 */
24
25/**
26 * alloc_cpu_rmap - allocate CPU affinity reverse-map
27 * @size: Number of objects to be mapped
28 * @flags: Allocation flags e.g. %GFP_KERNEL
29 */
30struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
31{
32 struct cpu_rmap *rmap;
33 unsigned int cpu;
34 size_t obj_offset;
35
36 /* This is a silly number of objects, and we use u16 indices. */
37 if (size > 0xffff)
38 return NULL;
39
40 /* Offset of object pointer array from base structure */
41 obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]),
42 sizeof(void *));
43
44 rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags);
45 if (!rmap)
46 return NULL;
47
48 rmap->obj = (void **)((char *)rmap + obj_offset);
49
50 /* Initially assign CPUs to objects on a rota, since we have
51 * no idea where the objects are. Use infinite distance, so
52 * any object with known distance is preferable. Include the
53 * CPUs that are not present/online, since we definitely want
54 * any newly-hotplugged CPUs to have some object assigned.
55 */
56 for_each_possible_cpu(cpu) {
57 rmap->near[cpu].index = cpu % size;
58 rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
59 }
60
61 rmap->size = size;
62 return rmap;
63}
64EXPORT_SYMBOL(alloc_cpu_rmap);
65
66/* Reevaluate nearest object for given CPU, comparing with the given
67 * neighbours at the given distance.
68 */
69static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu,
70 const struct cpumask *mask, u16 dist)
71{
72 int neigh;
73
74 for_each_cpu(neigh, mask) {
75 if (rmap->near[cpu].dist > dist &&
76 rmap->near[neigh].dist <= dist) {
77 rmap->near[cpu].index = rmap->near[neigh].index;
78 rmap->near[cpu].dist = dist;
79 return true;
80 }
81 }
82 return false;
83}
84
85#ifdef DEBUG
86static void debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
87{
88 unsigned index;
89 unsigned int cpu;
90
91 pr_info("cpu_rmap %p, %s:\n", rmap, prefix);
92
93 for_each_possible_cpu(cpu) {
94 index = rmap->near[cpu].index;
95 pr_info("cpu %d -> obj %u (distance %u)\n",
96 cpu, index, rmap->near[cpu].dist);
97 }
98}
99#else
100static inline void
101debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
102{
103}
104#endif
105
106/**
107 * cpu_rmap_add - add object to a rmap
108 * @rmap: CPU rmap allocated with alloc_cpu_rmap()
109 * @obj: Object to add to rmap
110 *
111 * Return index of object.
112 */
113int cpu_rmap_add(struct cpu_rmap *rmap, void *obj)
114{
115 u16 index;
116
117 BUG_ON(rmap->used >= rmap->size);
118 index = rmap->used++;
119 rmap->obj[index] = obj;
120 return index;
121}
122EXPORT_SYMBOL(cpu_rmap_add);
123
124/**
125 * cpu_rmap_update - update CPU rmap following a change of object affinity
126 * @rmap: CPU rmap to update
127 * @index: Index of object whose affinity changed
128 * @affinity: New CPU affinity of object
129 */
130int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
131 const struct cpumask *affinity)
132{
133 cpumask_var_t update_mask;
134 unsigned int cpu;
135
136 if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL)))
137 return -ENOMEM;
138
139 /* Invalidate distance for all CPUs for which this used to be
140 * the nearest object. Mark those CPUs for update.
141 */
142 for_each_online_cpu(cpu) {
143 if (rmap->near[cpu].index == index) {
144 rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
145 cpumask_set_cpu(cpu, update_mask);
146 }
147 }
148
149 debug_print_rmap(rmap, "after invalidating old distances");
150
151 /* Set distance to 0 for all CPUs in the new affinity mask.
152 * Mark all CPUs within their NUMA nodes for update.
153 */
154 for_each_cpu(cpu, affinity) {
155 rmap->near[cpu].index = index;
156 rmap->near[cpu].dist = 0;
157 cpumask_or(update_mask, update_mask,
158 cpumask_of_node(cpu_to_node(cpu)));
159 }
160
161 debug_print_rmap(rmap, "after updating neighbours");
162
163 /* Update distances based on topology */
164 for_each_cpu(cpu, update_mask) {
165 if (cpu_rmap_copy_neigh(rmap, cpu,
166 topology_thread_cpumask(cpu), 1))
167 continue;
168 if (cpu_rmap_copy_neigh(rmap, cpu,
169 topology_core_cpumask(cpu), 2))
170 continue;
171 if (cpu_rmap_copy_neigh(rmap, cpu,
172 cpumask_of_node(cpu_to_node(cpu)), 3))
173 continue;
174 /* We could continue into NUMA node distances, but for now
175 * we give up.
176 */
177 }
178
179 debug_print_rmap(rmap, "after copying neighbours");
180
181 free_cpumask_var(update_mask);
182 return 0;
183}
184EXPORT_SYMBOL(cpu_rmap_update);
185
186#ifdef CONFIG_GENERIC_HARDIRQS
187
188/* Glue between IRQ affinity notifiers and CPU rmaps */
189
190struct irq_glue {
191 struct irq_affinity_notify notify;
192 struct cpu_rmap *rmap;
193 u16 index;
194};
195
196/**
197 * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs
198 * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL
199 *
200 * Must be called in process context, before freeing the IRQs, and
201 * without holding any locks required by global workqueue items.
202 */
203void free_irq_cpu_rmap(struct cpu_rmap *rmap)
204{
205 struct irq_glue *glue;
206 u16 index;
207
208 if (!rmap)
209 return;
210
211 for (index = 0; index < rmap->used; index++) {
212 glue = rmap->obj[index];
213 irq_set_affinity_notifier(glue->notify.irq, NULL);
214 }
215 irq_run_affinity_notifiers();
216
217 kfree(rmap);
218}
219EXPORT_SYMBOL(free_irq_cpu_rmap);
220
221static void
222irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
223{
224 struct irq_glue *glue =
225 container_of(notify, struct irq_glue, notify);
226 int rc;
227
228 rc = cpu_rmap_update(glue->rmap, glue->index, mask);
229 if (rc)
230 pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc);
231}
232
233static void irq_cpu_rmap_release(struct kref *ref)
234{
235 struct irq_glue *glue =
236 container_of(ref, struct irq_glue, notify.kref);
237 kfree(glue);
238}
239
240/**
241 * irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map
242 * @rmap: The reverse-map
243 * @irq: The IRQ number
244 *
245 * This adds an IRQ affinity notifier that will update the reverse-map
246 * automatically.
247 *
248 * Must be called in process context, after the IRQ is allocated but
249 * before it is bound with request_irq().
250 */
251int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
252{
253 struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL);
254 int rc;
255
256 if (!glue)
257 return -ENOMEM;
258 glue->notify.notify = irq_cpu_rmap_notify;
259 glue->notify.release = irq_cpu_rmap_release;
260 glue->rmap = rmap;
261 glue->index = cpu_rmap_add(rmap, glue);
262 rc = irq_set_affinity_notifier(irq, &glue->notify);
263 if (rc)
264 kfree(glue);
265 return rc;
266}
267EXPORT_SYMBOL(irq_cpu_rmap_add);
268
269#endif /* CONFIG_GENERIC_HARDIRQS */
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index deebcc57d4e6..9d86e45086f5 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -249,14 +249,17 @@ static struct debug_bucket *get_bucket(unsigned long addr)
249 249
250static void debug_print_object(struct debug_obj *obj, char *msg) 250static void debug_print_object(struct debug_obj *obj, char *msg)
251{ 251{
252 struct debug_obj_descr *descr = obj->descr;
252 static int limit; 253 static int limit;
253 254
254 if (limit < 5 && obj->descr != descr_test) { 255 if (limit < 5 && descr != descr_test) {
256 void *hint = descr->debug_hint ?
257 descr->debug_hint(obj->object) : NULL;
255 limit++; 258 limit++;
256 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " 259 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
257 "object type: %s\n", 260 "object type: %s hint: %pS\n",
258 msg, obj_states[obj->state], obj->astate, 261 msg, obj_states[obj->state], obj->astate,
259 obj->descr->name); 262 descr->name, hint);
260 } 263 }
261 debug_objects_warnings++; 264 debug_objects_warnings++;
262} 265}
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index b335acb43be2..75ca78f3a8c9 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -7,6 +7,7 @@
7 * Copyright (C) 2008 Jason Baron <jbaron@redhat.com> 7 * Copyright (C) 2008 Jason Baron <jbaron@redhat.com>
8 * By Greg Banks <gnb@melbourne.sgi.com> 8 * By Greg Banks <gnb@melbourne.sgi.com>
9 * Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved. 9 * Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved.
10 * Copyright (C) 2011 Bart Van Assche. All Rights Reserved.
10 */ 11 */
11 12
12#include <linux/kernel.h> 13#include <linux/kernel.h>
@@ -27,6 +28,8 @@
27#include <linux/debugfs.h> 28#include <linux/debugfs.h>
28#include <linux/slab.h> 29#include <linux/slab.h>
29#include <linux/jump_label.h> 30#include <linux/jump_label.h>
31#include <linux/hardirq.h>
32#include <linux/sched.h>
30 33
31extern struct _ddebug __start___verbose[]; 34extern struct _ddebug __start___verbose[];
32extern struct _ddebug __stop___verbose[]; 35extern struct _ddebug __stop___verbose[];
@@ -63,15 +66,25 @@ static inline const char *basename(const char *path)
63 return tail ? tail+1 : path; 66 return tail ? tail+1 : path;
64} 67}
65 68
69static struct { unsigned flag:8; char opt_char; } opt_array[] = {
70 { _DPRINTK_FLAGS_PRINT, 'p' },
71 { _DPRINTK_FLAGS_INCL_MODNAME, 'm' },
72 { _DPRINTK_FLAGS_INCL_FUNCNAME, 'f' },
73 { _DPRINTK_FLAGS_INCL_LINENO, 'l' },
74 { _DPRINTK_FLAGS_INCL_TID, 't' },
75};
76
66/* format a string into buf[] which describes the _ddebug's flags */ 77/* format a string into buf[] which describes the _ddebug's flags */
67static char *ddebug_describe_flags(struct _ddebug *dp, char *buf, 78static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,
68 size_t maxlen) 79 size_t maxlen)
69{ 80{
70 char *p = buf; 81 char *p = buf;
82 int i;
71 83
72 BUG_ON(maxlen < 4); 84 BUG_ON(maxlen < 4);
73 if (dp->flags & _DPRINTK_FLAGS_PRINT) 85 for (i = 0; i < ARRAY_SIZE(opt_array); ++i)
74 *p++ = 'p'; 86 if (dp->flags & opt_array[i].flag)
87 *p++ = opt_array[i].opt_char;
75 if (p == buf) 88 if (p == buf)
76 *p++ = '-'; 89 *p++ = '-';
77 *p = '\0'; 90 *p = '\0';
@@ -343,7 +356,7 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
343 unsigned int *maskp) 356 unsigned int *maskp)
344{ 357{
345 unsigned flags = 0; 358 unsigned flags = 0;
346 int op = '='; 359 int op = '=', i;
347 360
348 switch (*str) { 361 switch (*str) {
349 case '+': 362 case '+':
@@ -358,13 +371,14 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
358 printk(KERN_INFO "%s: op='%c'\n", __func__, op); 371 printk(KERN_INFO "%s: op='%c'\n", __func__, op);
359 372
360 for ( ; *str ; ++str) { 373 for ( ; *str ; ++str) {
361 switch (*str) { 374 for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) {
362 case 'p': 375 if (*str == opt_array[i].opt_char) {
363 flags |= _DPRINTK_FLAGS_PRINT; 376 flags |= opt_array[i].flag;
364 break; 377 break;
365 default: 378 }
366 return -EINVAL;
367 } 379 }
380 if (i < 0)
381 return -EINVAL;
368 } 382 }
369 if (flags == 0) 383 if (flags == 0)
370 return -EINVAL; 384 return -EINVAL;
@@ -413,6 +427,35 @@ static int ddebug_exec_query(char *query_string)
413 return 0; 427 return 0;
414} 428}
415 429
430int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
431{
432 va_list args;
433 int res;
434
435 BUG_ON(!descriptor);
436 BUG_ON(!fmt);
437
438 va_start(args, fmt);
439 res = printk(KERN_DEBUG);
440 if (descriptor->flags & _DPRINTK_FLAGS_INCL_TID) {
441 if (in_interrupt())
442 res += printk(KERN_CONT "<intr> ");
443 else
444 res += printk(KERN_CONT "[%d] ", task_pid_vnr(current));
445 }
446 if (descriptor->flags & _DPRINTK_FLAGS_INCL_MODNAME)
447 res += printk(KERN_CONT "%s:", descriptor->modname);
448 if (descriptor->flags & _DPRINTK_FLAGS_INCL_FUNCNAME)
449 res += printk(KERN_CONT "%s:", descriptor->function);
450 if (descriptor->flags & _DPRINTK_FLAGS_INCL_LINENO)
451 res += printk(KERN_CONT "%d ", descriptor->lineno);
452 res += vprintk(fmt, args);
453 va_end(args);
454
455 return res;
456}
457EXPORT_SYMBOL(__dynamic_pr_debug);
458
416static __initdata char ddebug_setup_string[1024]; 459static __initdata char ddebug_setup_string[1024];
417static __init int ddebug_setup_query(char *str) 460static __init int ddebug_setup_query(char *str)
418{ 461{
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
index 24c59ded47a0..b0a8767282bf 100644
--- a/lib/find_next_bit.c
+++ b/lib/find_next_bit.c
@@ -160,6 +160,7 @@ EXPORT_SYMBOL(find_first_zero_bit);
160#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ 160#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
161 161
162#ifdef __BIG_ENDIAN 162#ifdef __BIG_ENDIAN
163#ifdef CONFIG_GENERIC_FIND_BIT_LE
163 164
164/* include/linux/byteorder does not support "unsigned long" type */ 165/* include/linux/byteorder does not support "unsigned long" type */
165static inline unsigned long ext2_swabp(const unsigned long * x) 166static inline unsigned long ext2_swabp(const unsigned long * x)
@@ -185,15 +186,16 @@ static inline unsigned long ext2_swab(const unsigned long y)
185#endif 186#endif
186} 187}
187 188
188unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned 189unsigned long find_next_zero_bit_le(const void *addr, unsigned
189 long size, unsigned long offset) 190 long size, unsigned long offset)
190{ 191{
191 const unsigned long *p = addr + BITOP_WORD(offset); 192 const unsigned long *p = addr;
192 unsigned long result = offset & ~(BITS_PER_LONG - 1); 193 unsigned long result = offset & ~(BITS_PER_LONG - 1);
193 unsigned long tmp; 194 unsigned long tmp;
194 195
195 if (offset >= size) 196 if (offset >= size)
196 return size; 197 return size;
198 p += BITOP_WORD(offset);
197 size -= result; 199 size -= result;
198 offset &= (BITS_PER_LONG - 1UL); 200 offset &= (BITS_PER_LONG - 1UL);
199 if (offset) { 201 if (offset) {
@@ -226,18 +228,18 @@ found_middle:
226found_middle_swap: 228found_middle_swap:
227 return result + ffz(ext2_swab(tmp)); 229 return result + ffz(ext2_swab(tmp));
228} 230}
231EXPORT_SYMBOL(find_next_zero_bit_le);
229 232
230EXPORT_SYMBOL(generic_find_next_zero_le_bit); 233unsigned long find_next_bit_le(const void *addr, unsigned
231
232unsigned long generic_find_next_le_bit(const unsigned long *addr, unsigned
233 long size, unsigned long offset) 234 long size, unsigned long offset)
234{ 235{
235 const unsigned long *p = addr + BITOP_WORD(offset); 236 const unsigned long *p = addr;
236 unsigned long result = offset & ~(BITS_PER_LONG - 1); 237 unsigned long result = offset & ~(BITS_PER_LONG - 1);
237 unsigned long tmp; 238 unsigned long tmp;
238 239
239 if (offset >= size) 240 if (offset >= size)
240 return size; 241 return size;
242 p += BITOP_WORD(offset);
241 size -= result; 243 size -= result;
242 offset &= (BITS_PER_LONG - 1UL); 244 offset &= (BITS_PER_LONG - 1UL);
243 if (offset) { 245 if (offset) {
@@ -271,5 +273,7 @@ found_middle:
271found_middle_swap: 273found_middle_swap:
272 return result + __ffs(ext2_swab(tmp)); 274 return result + __ffs(ext2_swab(tmp));
273} 275}
274EXPORT_SYMBOL(generic_find_next_le_bit); 276EXPORT_SYMBOL(find_next_bit_le);
277
278#endif /* CONFIG_GENERIC_FIND_BIT_LE */
275#endif /* __BIG_ENDIAN */ 279#endif /* __BIG_ENDIAN */
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
deleted file mode 100644
index b135d04aa48a..000000000000
--- a/lib/kernel_lock.c
+++ /dev/null
@@ -1,143 +0,0 @@
1/*
2 * lib/kernel_lock.c
3 *
4 * This is the traditional BKL - big kernel lock. Largely
5 * relegated to obsolescence, but used by various less
6 * important (or lazy) subsystems.
7 */
8#include <linux/module.h>
9#include <linux/kallsyms.h>
10#include <linux/semaphore.h>
11#include <linux/smp_lock.h>
12
13#define CREATE_TRACE_POINTS
14#include <trace/events/bkl.h>
15
16/*
17 * The 'big kernel lock'
18 *
19 * This spinlock is taken and released recursively by lock_kernel()
20 * and unlock_kernel(). It is transparently dropped and reacquired
21 * over schedule(). It is used to protect legacy code that hasn't
22 * been migrated to a proper locking design yet.
23 *
24 * Don't use in new code.
25 */
26static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
27
28
29/*
30 * Acquire/release the underlying lock from the scheduler.
31 *
32 * This is called with preemption disabled, and should
33 * return an error value if it cannot get the lock and
34 * TIF_NEED_RESCHED gets set.
35 *
36 * If it successfully gets the lock, it should increment
37 * the preemption count like any spinlock does.
38 *
39 * (This works on UP too - do_raw_spin_trylock will never
40 * return false in that case)
41 */
42int __lockfunc __reacquire_kernel_lock(void)
43{
44 while (!do_raw_spin_trylock(&kernel_flag)) {
45 if (need_resched())
46 return -EAGAIN;
47 cpu_relax();
48 }
49 preempt_disable();
50 return 0;
51}
52
53void __lockfunc __release_kernel_lock(void)
54{
55 do_raw_spin_unlock(&kernel_flag);
56 preempt_enable_no_resched();
57}
58
59/*
60 * These are the BKL spinlocks - we try to be polite about preemption.
61 * If SMP is not on (ie UP preemption), this all goes away because the
62 * do_raw_spin_trylock() will always succeed.
63 */
64#ifdef CONFIG_PREEMPT
65static inline void __lock_kernel(void)
66{
67 preempt_disable();
68 if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
69 /*
70 * If preemption was disabled even before this
71 * was called, there's nothing we can be polite
72 * about - just spin.
73 */
74 if (preempt_count() > 1) {
75 do_raw_spin_lock(&kernel_flag);
76 return;
77 }
78
79 /*
80 * Otherwise, let's wait for the kernel lock
81 * with preemption enabled..
82 */
83 do {
84 preempt_enable();
85 while (raw_spin_is_locked(&kernel_flag))
86 cpu_relax();
87 preempt_disable();
88 } while (!do_raw_spin_trylock(&kernel_flag));
89 }
90}
91
92#else
93
94/*
95 * Non-preemption case - just get the spinlock
96 */
97static inline void __lock_kernel(void)
98{
99 do_raw_spin_lock(&kernel_flag);
100}
101#endif
102
103static inline void __unlock_kernel(void)
104{
105 /*
106 * the BKL is not covered by lockdep, so we open-code the
107 * unlocking sequence (and thus avoid the dep-chain ops):
108 */
109 do_raw_spin_unlock(&kernel_flag);
110 preempt_enable();
111}
112
113/*
114 * Getting the big kernel lock.
115 *
116 * This cannot happen asynchronously, so we only need to
117 * worry about other CPU's.
118 */
119void __lockfunc _lock_kernel(const char *func, const char *file, int line)
120{
121 int depth = current->lock_depth + 1;
122
123 trace_lock_kernel(func, file, line);
124
125 if (likely(!depth)) {
126 might_sleep();
127 __lock_kernel();
128 }
129 current->lock_depth = depth;
130}
131
132void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
133{
134 BUG_ON(current->lock_depth < 0);
135 if (likely(--current->lock_depth < 0))
136 __unlock_kernel();
137
138 trace_unlock_kernel(func, file, line);
139}
140
141EXPORT_SYMBOL(_lock_kernel);
142EXPORT_SYMBOL(_unlock_kernel);
143
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
new file mode 100644
index 000000000000..05672e819f8c
--- /dev/null
+++ b/lib/kstrtox.c
@@ -0,0 +1,227 @@
1/*
2 * Convert integer string representation to an integer.
3 * If an integer doesn't fit into specified type, -E is returned.
4 *
5 * Integer starts with optional sign.
6 * kstrtou*() functions do not accept sign "-".
7 *
8 * Radix 0 means autodetection: leading "0x" implies radix 16,
9 * leading "0" implies radix 8, otherwise radix is 10.
10 * Autodetection hints work after optional sign, but not before.
11 *
12 * If -E is returned, result is not touched.
13 */
14#include <linux/ctype.h>
15#include <linux/errno.h>
16#include <linux/kernel.h>
17#include <linux/math64.h>
18#include <linux/module.h>
19#include <linux/types.h>
20
21static inline char _tolower(const char c)
22{
23 return c | 0x20;
24}
25
26static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
27{
28 unsigned long long acc;
29 int ok;
30
31 if (base == 0) {
32 if (s[0] == '0') {
33 if (_tolower(s[1]) == 'x' && isxdigit(s[2]))
34 base = 16;
35 else
36 base = 8;
37 } else
38 base = 10;
39 }
40 if (base == 16 && s[0] == '0' && _tolower(s[1]) == 'x')
41 s += 2;
42
43 acc = 0;
44 ok = 0;
45 while (*s) {
46 unsigned int val;
47
48 if ('0' <= *s && *s <= '9')
49 val = *s - '0';
50 else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f')
51 val = _tolower(*s) - 'a' + 10;
52 else if (*s == '\n') {
53 if (*(s + 1) == '\0')
54 break;
55 else
56 return -EINVAL;
57 } else
58 return -EINVAL;
59
60 if (val >= base)
61 return -EINVAL;
62 if (acc > div_u64(ULLONG_MAX - val, base))
63 return -ERANGE;
64 acc = acc * base + val;
65 ok = 1;
66
67 s++;
68 }
69 if (!ok)
70 return -EINVAL;
71 *res = acc;
72 return 0;
73}
74
75int kstrtoull(const char *s, unsigned int base, unsigned long long *res)
76{
77 if (s[0] == '+')
78 s++;
79 return _kstrtoull(s, base, res);
80}
81EXPORT_SYMBOL(kstrtoull);
82
83int kstrtoll(const char *s, unsigned int base, long long *res)
84{
85 unsigned long long tmp;
86 int rv;
87
88 if (s[0] == '-') {
89 rv = _kstrtoull(s + 1, base, &tmp);
90 if (rv < 0)
91 return rv;
92 if ((long long)(-tmp) >= 0)
93 return -ERANGE;
94 *res = -tmp;
95 } else {
96 rv = kstrtoull(s, base, &tmp);
97 if (rv < 0)
98 return rv;
99 if ((long long)tmp < 0)
100 return -ERANGE;
101 *res = tmp;
102 }
103 return 0;
104}
105EXPORT_SYMBOL(kstrtoll);
106
107/* Internal, do not use. */
108int _kstrtoul(const char *s, unsigned int base, unsigned long *res)
109{
110 unsigned long long tmp;
111 int rv;
112
113 rv = kstrtoull(s, base, &tmp);
114 if (rv < 0)
115 return rv;
116 if (tmp != (unsigned long long)(unsigned long)tmp)
117 return -ERANGE;
118 *res = tmp;
119 return 0;
120}
121EXPORT_SYMBOL(_kstrtoul);
122
123/* Internal, do not use. */
124int _kstrtol(const char *s, unsigned int base, long *res)
125{
126 long long tmp;
127 int rv;
128
129 rv = kstrtoll(s, base, &tmp);
130 if (rv < 0)
131 return rv;
132 if (tmp != (long long)(long)tmp)
133 return -ERANGE;
134 *res = tmp;
135 return 0;
136}
137EXPORT_SYMBOL(_kstrtol);
138
139int kstrtouint(const char *s, unsigned int base, unsigned int *res)
140{
141 unsigned long long tmp;
142 int rv;
143
144 rv = kstrtoull(s, base, &tmp);
145 if (rv < 0)
146 return rv;
147 if (tmp != (unsigned long long)(unsigned int)tmp)
148 return -ERANGE;
149 *res = tmp;
150 return 0;
151}
152EXPORT_SYMBOL(kstrtouint);
153
154int kstrtoint(const char *s, unsigned int base, int *res)
155{
156 long long tmp;
157 int rv;
158
159 rv = kstrtoll(s, base, &tmp);
160 if (rv < 0)
161 return rv;
162 if (tmp != (long long)(int)tmp)
163 return -ERANGE;
164 *res = tmp;
165 return 0;
166}
167EXPORT_SYMBOL(kstrtoint);
168
169int kstrtou16(const char *s, unsigned int base, u16 *res)
170{
171 unsigned long long tmp;
172 int rv;
173
174 rv = kstrtoull(s, base, &tmp);
175 if (rv < 0)
176 return rv;
177 if (tmp != (unsigned long long)(u16)tmp)
178 return -ERANGE;
179 *res = tmp;
180 return 0;
181}
182EXPORT_SYMBOL(kstrtou16);
183
184int kstrtos16(const char *s, unsigned int base, s16 *res)
185{
186 long long tmp;
187 int rv;
188
189 rv = kstrtoll(s, base, &tmp);
190 if (rv < 0)
191 return rv;
192 if (tmp != (long long)(s16)tmp)
193 return -ERANGE;
194 *res = tmp;
195 return 0;
196}
197EXPORT_SYMBOL(kstrtos16);
198
199int kstrtou8(const char *s, unsigned int base, u8 *res)
200{
201 unsigned long long tmp;
202 int rv;
203
204 rv = kstrtoull(s, base, &tmp);
205 if (rv < 0)
206 return rv;
207 if (tmp != (unsigned long long)(u8)tmp)
208 return -ERANGE;
209 *res = tmp;
210 return 0;
211}
212EXPORT_SYMBOL(kstrtou8);
213
214int kstrtos8(const char *s, unsigned int base, s8 *res)
215{
216 long long tmp;
217 int rv;
218
219 rv = kstrtoll(s, base, &tmp);
220 if (rv < 0)
221 return rv;
222 if (tmp != (long long)(s8)tmp)
223 return -ERANGE;
224 *res = tmp;
225 return 0;
226}
227EXPORT_SYMBOL(kstrtos8);
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 344c710d16ca..b8029a5583ff 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -35,6 +35,31 @@ void __list_add(struct list_head *new,
35} 35}
36EXPORT_SYMBOL(__list_add); 36EXPORT_SYMBOL(__list_add);
37 37
38void __list_del_entry(struct list_head *entry)
39{
40 struct list_head *prev, *next;
41
42 prev = entry->prev;
43 next = entry->next;
44
45 if (WARN(next == LIST_POISON1,
46 "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
47 entry, LIST_POISON1) ||
48 WARN(prev == LIST_POISON2,
49 "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
50 entry, LIST_POISON2) ||
51 WARN(prev->next != entry,
52 "list_del corruption. prev->next should be %p, "
53 "but was %p\n", entry, prev->next) ||
54 WARN(next->prev != entry,
55 "list_del corruption. next->prev should be %p, "
56 "but was %p\n", entry, next->prev))
57 return;
58
59 __list_del(prev, next);
60}
61EXPORT_SYMBOL(__list_del_entry);
62
38/** 63/**
39 * list_del - deletes entry from list. 64 * list_del - deletes entry from list.
40 * @entry: the element to delete from the list. 65 * @entry: the element to delete from the list.
@@ -43,19 +68,7 @@ EXPORT_SYMBOL(__list_add);
43 */ 68 */
44void list_del(struct list_head *entry) 69void list_del(struct list_head *entry)
45{ 70{
46 WARN(entry->next == LIST_POISON1, 71 __list_del_entry(entry);
47 "list_del corruption, next is LIST_POISON1 (%p)\n",
48 LIST_POISON1);
49 WARN(entry->next != LIST_POISON1 && entry->prev == LIST_POISON2,
50 "list_del corruption, prev is LIST_POISON2 (%p)\n",
51 LIST_POISON2);
52 WARN(entry->prev->next != entry,
53 "list_del corruption. prev->next should be %p, "
54 "but was %p\n", entry, entry->prev->next);
55 WARN(entry->next->prev != entry,
56 "list_del corruption. next->prev should be %p, "
57 "but was %p\n", entry, entry->next->prev);
58 __list_del(entry->prev, entry->next);
59 entry->next = LIST_POISON1; 72 entry->next = LIST_POISON1;
60 entry->prev = LIST_POISON2; 73 entry->prev = LIST_POISON2;
61} 74}
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 5021cbc34411..ac09f2226dc7 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -148,7 +148,7 @@ nla_policy_len(const struct nla_policy *p, int n)
148{ 148{
149 int i, len = 0; 149 int i, len = 0;
150 150
151 for (i = 0; i < n; i++) { 151 for (i = 0; i < n; i++, p++) {
152 if (p->len) 152 if (p->len)
153 len += nla_total_size(p->len); 153 len += nla_total_size(p->len);
154 else if (nla_attr_minlen[p->type]) 154 else if (nla_attr_minlen[p->type])
diff --git a/lib/plist.c b/lib/plist.c
index 1471988d9190..0ae7e6431726 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -28,6 +28,8 @@
28 28
29#ifdef CONFIG_DEBUG_PI_LIST 29#ifdef CONFIG_DEBUG_PI_LIST
30 30
31static struct plist_head test_head;
32
31static void plist_check_prev_next(struct list_head *t, struct list_head *p, 33static void plist_check_prev_next(struct list_head *t, struct list_head *p,
32 struct list_head *n) 34 struct list_head *n)
33{ 35{
@@ -54,12 +56,13 @@ static void plist_check_list(struct list_head *top)
54 56
55static void plist_check_head(struct plist_head *head) 57static void plist_check_head(struct plist_head *head)
56{ 58{
57 WARN_ON(!head->rawlock && !head->spinlock); 59 WARN_ON(head != &test_head && !head->rawlock && !head->spinlock);
58 if (head->rawlock) 60 if (head->rawlock)
59 WARN_ON_SMP(!raw_spin_is_locked(head->rawlock)); 61 WARN_ON_SMP(!raw_spin_is_locked(head->rawlock));
60 if (head->spinlock) 62 if (head->spinlock)
61 WARN_ON_SMP(!spin_is_locked(head->spinlock)); 63 WARN_ON_SMP(!spin_is_locked(head->spinlock));
62 plist_check_list(&head->prio_list); 64 if (!plist_head_empty(head))
65 plist_check_list(&plist_first(head)->prio_list);
63 plist_check_list(&head->node_list); 66 plist_check_list(&head->node_list);
64} 67}
65 68
@@ -75,25 +78,33 @@ static void plist_check_head(struct plist_head *head)
75 */ 78 */
76void plist_add(struct plist_node *node, struct plist_head *head) 79void plist_add(struct plist_node *node, struct plist_head *head)
77{ 80{
78 struct plist_node *iter; 81 struct plist_node *first, *iter, *prev = NULL;
82 struct list_head *node_next = &head->node_list;
79 83
80 plist_check_head(head); 84 plist_check_head(head);
81 WARN_ON(!plist_node_empty(node)); 85 WARN_ON(!plist_node_empty(node));
86 WARN_ON(!list_empty(&node->prio_list));
87
88 if (plist_head_empty(head))
89 goto ins_node;
82 90
83 list_for_each_entry(iter, &head->prio_list, plist.prio_list) { 91 first = iter = plist_first(head);
84 if (node->prio < iter->prio) 92
85 goto lt_prio; 93 do {
86 else if (node->prio == iter->prio) { 94 if (node->prio < iter->prio) {
87 iter = list_entry(iter->plist.prio_list.next, 95 node_next = &iter->node_list;
88 struct plist_node, plist.prio_list); 96 break;
89 goto eq_prio;
90 } 97 }
91 }
92 98
93lt_prio: 99 prev = iter;
94 list_add_tail(&node->plist.prio_list, &iter->plist.prio_list); 100 iter = list_entry(iter->prio_list.next,
95eq_prio: 101 struct plist_node, prio_list);
96 list_add_tail(&node->plist.node_list, &iter->plist.node_list); 102 } while (iter != first);
103
104 if (!prev || prev->prio != node->prio)
105 list_add_tail(&node->prio_list, &iter->prio_list);
106ins_node:
107 list_add_tail(&node->node_list, node_next);
97 108
98 plist_check_head(head); 109 plist_check_head(head);
99} 110}
@@ -108,14 +119,98 @@ void plist_del(struct plist_node *node, struct plist_head *head)
108{ 119{
109 plist_check_head(head); 120 plist_check_head(head);
110 121
111 if (!list_empty(&node->plist.prio_list)) { 122 if (!list_empty(&node->prio_list)) {
112 struct plist_node *next = plist_first(&node->plist); 123 if (node->node_list.next != &head->node_list) {
124 struct plist_node *next;
125
126 next = list_entry(node->node_list.next,
127 struct plist_node, node_list);
113 128
114 list_move_tail(&next->plist.prio_list, &node->plist.prio_list); 129 /* add the next plist_node into prio_list */
115 list_del_init(&node->plist.prio_list); 130 if (list_empty(&next->prio_list))
131 list_add(&next->prio_list, &node->prio_list);
132 }
133 list_del_init(&node->prio_list);
116 } 134 }
117 135
118 list_del_init(&node->plist.node_list); 136 list_del_init(&node->node_list);
119 137
120 plist_check_head(head); 138 plist_check_head(head);
121} 139}
140
141#ifdef CONFIG_DEBUG_PI_LIST
142#include <linux/sched.h>
143#include <linux/module.h>
144#include <linux/init.h>
145
146static struct plist_node __initdata test_node[241];
147
148static void __init plist_test_check(int nr_expect)
149{
150 struct plist_node *first, *prio_pos, *node_pos;
151
152 if (plist_head_empty(&test_head)) {
153 BUG_ON(nr_expect != 0);
154 return;
155 }
156
157 prio_pos = first = plist_first(&test_head);
158 plist_for_each(node_pos, &test_head) {
159 if (nr_expect-- < 0)
160 break;
161 if (node_pos == first)
162 continue;
163 if (node_pos->prio == prio_pos->prio) {
164 BUG_ON(!list_empty(&node_pos->prio_list));
165 continue;
166 }
167
168 BUG_ON(prio_pos->prio > node_pos->prio);
169 BUG_ON(prio_pos->prio_list.next != &node_pos->prio_list);
170 prio_pos = node_pos;
171 }
172
173 BUG_ON(nr_expect != 0);
174 BUG_ON(prio_pos->prio_list.next != &first->prio_list);
175}
176
177static int __init plist_test(void)
178{
179 int nr_expect = 0, i, loop;
180 unsigned int r = local_clock();
181
182 printk(KERN_INFO "start plist test\n");
183 plist_head_init(&test_head, NULL);
184 for (i = 0; i < ARRAY_SIZE(test_node); i++)
185 plist_node_init(test_node + i, 0);
186
187 for (loop = 0; loop < 1000; loop++) {
188 r = r * 193939 % 47629;
189 i = r % ARRAY_SIZE(test_node);
190 if (plist_node_empty(test_node + i)) {
191 r = r * 193939 % 47629;
192 test_node[i].prio = r % 99;
193 plist_add(test_node + i, &test_head);
194 nr_expect++;
195 } else {
196 plist_del(test_node + i, &test_head);
197 nr_expect--;
198 }
199 plist_test_check(nr_expect);
200 }
201
202 for (i = 0; i < ARRAY_SIZE(test_node); i++) {
203 if (plist_node_empty(test_node + i))
204 continue;
205 plist_del(test_node + i, &test_head);
206 nr_expect--;
207 plist_test_check(nr_expect);
208 }
209
210 printk(KERN_INFO "end plist test\n");
211 return 0;
212}
213
214module_init(plist_test);
215
216#endif
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 5086bb962b4d..7ea2e033d715 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -736,10 +736,11 @@ next:
736 } 736 }
737 } 737 }
738 /* 738 /*
739 * The iftag must have been set somewhere because otherwise 739 * We need not to tag the root tag if there is no tag which is set with
740 * we would return immediated at the beginning of the function 740 * settag within the range from *first_indexp to last_index.
741 */ 741 */
742 root_tag_set(root, settag); 742 if (tagged > 0)
743 root_tag_set(root, settag);
743 *first_indexp = index; 744 *first_indexp = index;
744 745
745 return tagged; 746 return tagged;
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 4693f79195d3..a16be19a1305 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -315,6 +315,7 @@ void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data)
315 315
316 rb_augment_path(node, func, data); 316 rb_augment_path(node, func, data);
317} 317}
318EXPORT_SYMBOL(rb_augment_insert);
318 319
319/* 320/*
320 * before removing the node, find the deepest node on the rebalance path 321 * before removing the node, find the deepest node on the rebalance path
@@ -340,6 +341,7 @@ struct rb_node *rb_augment_erase_begin(struct rb_node *node)
340 341
341 return deepest; 342 return deepest;
342} 343}
344EXPORT_SYMBOL(rb_augment_erase_begin);
343 345
344/* 346/*
345 * after removal, update the tree to account for the removed entry 347 * after removal, update the tree to account for the removed entry
@@ -350,6 +352,7 @@ void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data)
350 if (node) 352 if (node)
351 rb_augment_path(node, func, data); 353 rb_augment_path(node, func, data);
352} 354}
355EXPORT_SYMBOL(rb_augment_erase_end);
353 356
354/* 357/*
355 * This function returns the first node (in sort order) of the tree. 358 * This function returns the first node (in sort order) of the tree.
diff --git a/lib/rwsem.c b/lib/rwsem.c
index f236d7cd5cf3..aa7c3052261f 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -222,8 +222,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
222/* 222/*
223 * wait for the read lock to be granted 223 * wait for the read lock to be granted
224 */ 224 */
225asmregparm struct rw_semaphore __sched * 225struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
226rwsem_down_read_failed(struct rw_semaphore *sem)
227{ 226{
228 return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ, 227 return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ,
229 -RWSEM_ACTIVE_READ_BIAS); 228 -RWSEM_ACTIVE_READ_BIAS);
@@ -232,8 +231,7 @@ rwsem_down_read_failed(struct rw_semaphore *sem)
232/* 231/*
233 * wait for the write lock to be granted 232 * wait for the write lock to be granted
234 */ 233 */
235asmregparm struct rw_semaphore __sched * 234struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
236rwsem_down_write_failed(struct rw_semaphore *sem)
237{ 235{
238 return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE, 236 return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE,
239 -RWSEM_ACTIVE_WRITE_BIAS); 237 -RWSEM_ACTIVE_WRITE_BIAS);
@@ -243,7 +241,7 @@ rwsem_down_write_failed(struct rw_semaphore *sem)
243 * handle waking up a waiter on the semaphore 241 * handle waking up a waiter on the semaphore
244 * - up_read/up_write has decremented the active part of count if we come here 242 * - up_read/up_write has decremented the active part of count if we come here
245 */ 243 */
246asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) 244struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
247{ 245{
248 unsigned long flags; 246 unsigned long flags;
249 247
@@ -263,7 +261,7 @@ asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
263 * - caller incremented waiting part of count and discovered it still negative 261 * - caller incremented waiting part of count and discovered it still negative
264 * - just wake up any readers at the front of the queue 262 * - just wake up any readers at the front of the queue
265 */ 263 */
266asmregparm struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) 264struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
267{ 265{
268 unsigned long flags; 266 unsigned long flags;
269 267
diff --git a/lib/show_mem.c b/lib/show_mem.c
index fdc77c82f922..90cbe4bb5960 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -9,14 +9,14 @@
9#include <linux/nmi.h> 9#include <linux/nmi.h>
10#include <linux/quicklist.h> 10#include <linux/quicklist.h>
11 11
12void show_mem(void) 12void show_mem(unsigned int filter)
13{ 13{
14 pg_data_t *pgdat; 14 pg_data_t *pgdat;
15 unsigned long total = 0, reserved = 0, shared = 0, 15 unsigned long total = 0, reserved = 0, shared = 0,
16 nonshared = 0, highmem = 0; 16 nonshared = 0, highmem = 0;
17 17
18 printk("Mem-Info:\n"); 18 printk("Mem-Info:\n");
19 show_free_areas(); 19 __show_free_areas(filter);
20 20
21 for_each_online_pgdat(pgdat) { 21 for_each_online_pgdat(pgdat) {
22 unsigned long i, flags; 22 unsigned long i, flags;
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index c47bbe11b804..93ca08b8a451 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -686,8 +686,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
686 /* 686 /*
687 * Ensure that the address returned is DMA'ble 687 * Ensure that the address returned is DMA'ble
688 */ 688 */
689 if (!dma_capable(dev, dev_addr, size)) 689 if (!dma_capable(dev, dev_addr, size)) {
690 panic("map_single: bounce buffer is not DMA'ble"); 690 swiotlb_tbl_unmap_single(dev, map, size, dir);
691 dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer);
692 }
691 693
692 return dev_addr; 694 return dev_addr;
693} 695}
diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c
new file mode 100644
index 000000000000..325c2f9ecebd
--- /dev/null
+++ b/lib/test-kstrtox.c
@@ -0,0 +1,739 @@
1#include <linux/init.h>
2#include <linux/kernel.h>
3#include <linux/module.h>
4
5#define for_each_test(i, test) \
6 for (i = 0; i < sizeof(test) / sizeof(test[0]); i++)
7
8struct test_fail {
9 const char *str;
10 unsigned int base;
11};
12
13#define DEFINE_TEST_FAIL(test) \
14 const struct test_fail test[] __initdata
15
16#define DECLARE_TEST_OK(type, test_type) \
17 test_type { \
18 const char *str; \
19 unsigned int base; \
20 type expected_res; \
21 }
22
23#define DEFINE_TEST_OK(type, test) \
24 const type test[] __initdata
25
26#define TEST_FAIL(fn, type, fmt, test) \
27{ \
28 unsigned int i; \
29 \
30 for_each_test(i, test) { \
31 const struct test_fail *t = &test[i]; \
32 type tmp; \
33 int rv; \
34 \
35 tmp = 0; \
36 rv = fn(t->str, t->base, &tmp); \
37 if (rv >= 0) { \
38 WARN(1, "str '%s', base %u, expected -E, got %d/" fmt "\n", \
39 t->str, t->base, rv, tmp); \
40 continue; \
41 } \
42 } \
43}
44
45#define TEST_OK(fn, type, fmt, test) \
46{ \
47 unsigned int i; \
48 \
49 for_each_test(i, test) { \
50 const typeof(test[0]) *t = &test[i]; \
51 type res; \
52 int rv; \
53 \
54 rv = fn(t->str, t->base, &res); \
55 if (rv != 0) { \
56 WARN(1, "str '%s', base %u, expected 0/" fmt ", got %d\n", \
57 t->str, t->base, t->expected_res, rv); \
58 continue; \
59 } \
60 if (res != t->expected_res) { \
61 WARN(1, "str '%s', base %u, expected " fmt ", got " fmt "\n", \
62 t->str, t->base, t->expected_res, res); \
63 continue; \
64 } \
65 } \
66}
67
68static void __init test_kstrtoull_ok(void)
69{
70 DECLARE_TEST_OK(unsigned long long, struct test_ull);
71 static DEFINE_TEST_OK(struct test_ull, test_ull_ok) = {
72 {"0", 10, 0ULL},
73 {"1", 10, 1ULL},
74 {"127", 10, 127ULL},
75 {"128", 10, 128ULL},
76 {"129", 10, 129ULL},
77 {"255", 10, 255ULL},
78 {"256", 10, 256ULL},
79 {"257", 10, 257ULL},
80 {"32767", 10, 32767ULL},
81 {"32768", 10, 32768ULL},
82 {"32769", 10, 32769ULL},
83 {"65535", 10, 65535ULL},
84 {"65536", 10, 65536ULL},
85 {"65537", 10, 65537ULL},
86 {"2147483647", 10, 2147483647ULL},
87 {"2147483648", 10, 2147483648ULL},
88 {"2147483649", 10, 2147483649ULL},
89 {"4294967295", 10, 4294967295ULL},
90 {"4294967296", 10, 4294967296ULL},
91 {"4294967297", 10, 4294967297ULL},
92 {"9223372036854775807", 10, 9223372036854775807ULL},
93 {"9223372036854775808", 10, 9223372036854775808ULL},
94 {"9223372036854775809", 10, 9223372036854775809ULL},
95 {"18446744073709551614", 10, 18446744073709551614ULL},
96 {"18446744073709551615", 10, 18446744073709551615ULL},
97
98 {"00", 8, 00ULL},
99 {"01", 8, 01ULL},
100 {"0177", 8, 0177ULL},
101 {"0200", 8, 0200ULL},
102 {"0201", 8, 0201ULL},
103 {"0377", 8, 0377ULL},
104 {"0400", 8, 0400ULL},
105 {"0401", 8, 0401ULL},
106 {"077777", 8, 077777ULL},
107 {"0100000", 8, 0100000ULL},
108 {"0100001", 8, 0100001ULL},
109 {"0177777", 8, 0177777ULL},
110 {"0200000", 8, 0200000ULL},
111 {"0200001", 8, 0200001ULL},
112 {"017777777777", 8, 017777777777ULL},
113 {"020000000000", 8, 020000000000ULL},
114 {"020000000001", 8, 020000000001ULL},
115 {"037777777777", 8, 037777777777ULL},
116 {"040000000000", 8, 040000000000ULL},
117 {"040000000001", 8, 040000000001ULL},
118 {"0777777777777777777777", 8, 0777777777777777777777ULL},
119 {"01000000000000000000000", 8, 01000000000000000000000ULL},
120 {"01000000000000000000001", 8, 01000000000000000000001ULL},
121 {"01777777777777777777776", 8, 01777777777777777777776ULL},
122 {"01777777777777777777777", 8, 01777777777777777777777ULL},
123
124 {"0x0", 16, 0x0ULL},
125 {"0x1", 16, 0x1ULL},
126 {"0x7f", 16, 0x7fULL},
127 {"0x80", 16, 0x80ULL},
128 {"0x81", 16, 0x81ULL},
129 {"0xff", 16, 0xffULL},
130 {"0x100", 16, 0x100ULL},
131 {"0x101", 16, 0x101ULL},
132 {"0x7fff", 16, 0x7fffULL},
133 {"0x8000", 16, 0x8000ULL},
134 {"0x8001", 16, 0x8001ULL},
135 {"0xffff", 16, 0xffffULL},
136 {"0x10000", 16, 0x10000ULL},
137 {"0x10001", 16, 0x10001ULL},
138 {"0x7fffffff", 16, 0x7fffffffULL},
139 {"0x80000000", 16, 0x80000000ULL},
140 {"0x80000001", 16, 0x80000001ULL},
141 {"0xffffffff", 16, 0xffffffffULL},
142 {"0x100000000", 16, 0x100000000ULL},
143 {"0x100000001", 16, 0x100000001ULL},
144 {"0x7fffffffffffffff", 16, 0x7fffffffffffffffULL},
145 {"0x8000000000000000", 16, 0x8000000000000000ULL},
146 {"0x8000000000000001", 16, 0x8000000000000001ULL},
147 {"0xfffffffffffffffe", 16, 0xfffffffffffffffeULL},
148 {"0xffffffffffffffff", 16, 0xffffffffffffffffULL},
149
150 {"0\n", 0, 0ULL},
151 };
152 TEST_OK(kstrtoull, unsigned long long, "%llu", test_ull_ok);
153}
154
155static void __init test_kstrtoull_fail(void)
156{
157 static DEFINE_TEST_FAIL(test_ull_fail) = {
158 {"", 0},
159 {"", 8},
160 {"", 10},
161 {"", 16},
162 {"\n", 0},
163 {"\n", 8},
164 {"\n", 10},
165 {"\n", 16},
166 {"\n0", 0},
167 {"\n0", 8},
168 {"\n0", 10},
169 {"\n0", 16},
170 {"+", 0},
171 {"+", 8},
172 {"+", 10},
173 {"+", 16},
174 {"-", 0},
175 {"-", 8},
176 {"-", 10},
177 {"-", 16},
178 {"0x", 0},
179 {"0x", 16},
180 {"0X", 0},
181 {"0X", 16},
182 {"0 ", 0},
183 {"1+", 0},
184 {"1-", 0},
185 {" 2", 0},
186 /* base autodetection */
187 {"0x0z", 0},
188 {"0z", 0},
189 {"a", 0},
190 /* digit >= base */
191 {"2", 2},
192 {"8", 8},
193 {"a", 10},
194 {"A", 10},
195 {"g", 16},
196 {"G", 16},
197 /* overflow */
198 {"10000000000000000000000000000000000000000000000000000000000000000", 2},
199 {"2000000000000000000000", 8},
200 {"18446744073709551616", 10},
201 {"10000000000000000", 16},
202 /* negative */
203 {"-0", 0},
204 {"-0", 8},
205 {"-0", 10},
206 {"-0", 16},
207 {"-1", 0},
208 {"-1", 8},
209 {"-1", 10},
210 {"-1", 16},
211 /* sign is first character if any */
212 {"-+1", 0},
213 {"-+1", 8},
214 {"-+1", 10},
215 {"-+1", 16},
216 /* nothing after \n */
217 {"0\n0", 0},
218 {"0\n0", 8},
219 {"0\n0", 10},
220 {"0\n0", 16},
221 {"0\n+", 0},
222 {"0\n+", 8},
223 {"0\n+", 10},
224 {"0\n+", 16},
225 {"0\n-", 0},
226 {"0\n-", 8},
227 {"0\n-", 10},
228 {"0\n-", 16},
229 {"0\n ", 0},
230 {"0\n ", 8},
231 {"0\n ", 10},
232 {"0\n ", 16},
233 };
234 TEST_FAIL(kstrtoull, unsigned long long, "%llu", test_ull_fail);
235}
236
237static void __init test_kstrtoll_ok(void)
238{
239 DECLARE_TEST_OK(long long, struct test_ll);
240 static DEFINE_TEST_OK(struct test_ll, test_ll_ok) = {
241 {"0", 10, 0LL},
242 {"1", 10, 1LL},
243 {"127", 10, 127LL},
244 {"128", 10, 128LL},
245 {"129", 10, 129LL},
246 {"255", 10, 255LL},
247 {"256", 10, 256LL},
248 {"257", 10, 257LL},
249 {"32767", 10, 32767LL},
250 {"32768", 10, 32768LL},
251 {"32769", 10, 32769LL},
252 {"65535", 10, 65535LL},
253 {"65536", 10, 65536LL},
254 {"65537", 10, 65537LL},
255 {"2147483647", 10, 2147483647LL},
256 {"2147483648", 10, 2147483648LL},
257 {"2147483649", 10, 2147483649LL},
258 {"4294967295", 10, 4294967295LL},
259 {"4294967296", 10, 4294967296LL},
260 {"4294967297", 10, 4294967297LL},
261 {"9223372036854775807", 10, 9223372036854775807LL},
262
263 {"-1", 10, -1LL},
264 {"-2", 10, -2LL},
265 {"-9223372036854775808", 10, LLONG_MIN},
266 };
267 TEST_OK(kstrtoll, long long, "%lld", test_ll_ok);
268}
269
270static void __init test_kstrtoll_fail(void)
271{
272 static DEFINE_TEST_FAIL(test_ll_fail) = {
273 {"9223372036854775808", 10},
274 {"9223372036854775809", 10},
275 {"18446744073709551614", 10},
276 {"18446744073709551615", 10},
277 {"-9223372036854775809", 10},
278 {"-18446744073709551614", 10},
279 {"-18446744073709551615", 10},
280 /* negative zero isn't an integer in Linux */
281 {"-0", 0},
282 {"-0", 8},
283 {"-0", 10},
284 {"-0", 16},
285 /* sign is first character if any */
286 {"-+1", 0},
287 {"-+1", 8},
288 {"-+1", 10},
289 {"-+1", 16},
290 };
291 TEST_FAIL(kstrtoll, long long, "%lld", test_ll_fail);
292}
293
294static void __init test_kstrtou64_ok(void)
295{
296 DECLARE_TEST_OK(u64, struct test_u64);
297 static DEFINE_TEST_OK(struct test_u64, test_u64_ok) = {
298 {"0", 10, 0},
299 {"1", 10, 1},
300 {"126", 10, 126},
301 {"127", 10, 127},
302 {"128", 10, 128},
303 {"129", 10, 129},
304 {"254", 10, 254},
305 {"255", 10, 255},
306 {"256", 10, 256},
307 {"257", 10, 257},
308 {"32766", 10, 32766},
309 {"32767", 10, 32767},
310 {"32768", 10, 32768},
311 {"32769", 10, 32769},
312 {"65534", 10, 65534},
313 {"65535", 10, 65535},
314 {"65536", 10, 65536},
315 {"65537", 10, 65537},
316 {"2147483646", 10, 2147483646},
317 {"2147483647", 10, 2147483647},
318 {"2147483648", 10, 2147483648},
319 {"2147483649", 10, 2147483649},
320 {"4294967294", 10, 4294967294},
321 {"4294967295", 10, 4294967295},
322 {"4294967296", 10, 4294967296},
323 {"4294967297", 10, 4294967297},
324 {"9223372036854775806", 10, 9223372036854775806ULL},
325 {"9223372036854775807", 10, 9223372036854775807ULL},
326 {"9223372036854775808", 10, 9223372036854775808ULL},
327 {"9223372036854775809", 10, 9223372036854775809ULL},
328 {"18446744073709551614", 10, 18446744073709551614ULL},
329 {"18446744073709551615", 10, 18446744073709551615ULL},
330 };
331 TEST_OK(kstrtou64, u64, "%llu", test_u64_ok);
332}
333
334static void __init test_kstrtou64_fail(void)
335{
336 static DEFINE_TEST_FAIL(test_u64_fail) = {
337 {"-2", 10},
338 {"-1", 10},
339 {"18446744073709551616", 10},
340 {"18446744073709551617", 10},
341 };
342 TEST_FAIL(kstrtou64, u64, "%llu", test_u64_fail);
343}
344
345static void __init test_kstrtos64_ok(void)
346{
347 DECLARE_TEST_OK(s64, struct test_s64);
348 static DEFINE_TEST_OK(struct test_s64, test_s64_ok) = {
349 {"-128", 10, -128},
350 {"-127", 10, -127},
351 {"-1", 10, -1},
352 {"0", 10, 0},
353 {"1", 10, 1},
354 {"126", 10, 126},
355 {"127", 10, 127},
356 {"128", 10, 128},
357 {"129", 10, 129},
358 {"254", 10, 254},
359 {"255", 10, 255},
360 {"256", 10, 256},
361 {"257", 10, 257},
362 {"32766", 10, 32766},
363 {"32767", 10, 32767},
364 {"32768", 10, 32768},
365 {"32769", 10, 32769},
366 {"65534", 10, 65534},
367 {"65535", 10, 65535},
368 {"65536", 10, 65536},
369 {"65537", 10, 65537},
370 {"2147483646", 10, 2147483646},
371 {"2147483647", 10, 2147483647},
372 {"2147483648", 10, 2147483648},
373 {"2147483649", 10, 2147483649},
374 {"4294967294", 10, 4294967294},
375 {"4294967295", 10, 4294967295},
376 {"4294967296", 10, 4294967296},
377 {"4294967297", 10, 4294967297},
378 {"9223372036854775806", 10, 9223372036854775806LL},
379 {"9223372036854775807", 10, 9223372036854775807LL},
380 };
381 TEST_OK(kstrtos64, s64, "%lld", test_s64_ok);
382}
383
384static void __init test_kstrtos64_fail(void)
385{
386 static DEFINE_TEST_FAIL(test_s64_fail) = {
387 {"9223372036854775808", 10},
388 {"9223372036854775809", 10},
389 {"18446744073709551614", 10},
390 {"18446744073709551615", 10},
391 {"18446744073709551616", 10},
392 {"18446744073709551617", 10},
393 };
394 TEST_FAIL(kstrtos64, s64, "%lld", test_s64_fail);
395}
396
397static void __init test_kstrtou32_ok(void)
398{
399 DECLARE_TEST_OK(u32, struct test_u32);
400 static DEFINE_TEST_OK(struct test_u32, test_u32_ok) = {
401 {"0", 10, 0},
402 {"1", 10, 1},
403 {"126", 10, 126},
404 {"127", 10, 127},
405 {"128", 10, 128},
406 {"129", 10, 129},
407 {"254", 10, 254},
408 {"255", 10, 255},
409 {"256", 10, 256},
410 {"257", 10, 257},
411 {"32766", 10, 32766},
412 {"32767", 10, 32767},
413 {"32768", 10, 32768},
414 {"32769", 10, 32769},
415 {"65534", 10, 65534},
416 {"65535", 10, 65535},
417 {"65536", 10, 65536},
418 {"65537", 10, 65537},
419 {"2147483646", 10, 2147483646},
420 {"2147483647", 10, 2147483647},
421 {"2147483648", 10, 2147483648},
422 {"2147483649", 10, 2147483649},
423 {"4294967294", 10, 4294967294},
424 {"4294967295", 10, 4294967295},
425 };
426 TEST_OK(kstrtou32, u32, "%u", test_u32_ok);
427}
428
429static void __init test_kstrtou32_fail(void)
430{
431 static DEFINE_TEST_FAIL(test_u32_fail) = {
432 {"-2", 10},
433 {"-1", 10},
434 {"4294967296", 10},
435 {"4294967297", 10},
436 {"9223372036854775806", 10},
437 {"9223372036854775807", 10},
438 {"9223372036854775808", 10},
439 {"9223372036854775809", 10},
440 {"18446744073709551614", 10},
441 {"18446744073709551615", 10},
442 {"18446744073709551616", 10},
443 {"18446744073709551617", 10},
444 };
445 TEST_FAIL(kstrtou32, u32, "%u", test_u32_fail);
446}
447
448static void __init test_kstrtos32_ok(void)
449{
450 DECLARE_TEST_OK(s32, struct test_s32);
451 static DEFINE_TEST_OK(struct test_s32, test_s32_ok) = {
452 {"-128", 10, -128},
453 {"-127", 10, -127},
454 {"-1", 10, -1},
455 {"0", 10, 0},
456 {"1", 10, 1},
457 {"126", 10, 126},
458 {"127", 10, 127},
459 {"128", 10, 128},
460 {"129", 10, 129},
461 {"254", 10, 254},
462 {"255", 10, 255},
463 {"256", 10, 256},
464 {"257", 10, 257},
465 {"32766", 10, 32766},
466 {"32767", 10, 32767},
467 {"32768", 10, 32768},
468 {"32769", 10, 32769},
469 {"65534", 10, 65534},
470 {"65535", 10, 65535},
471 {"65536", 10, 65536},
472 {"65537", 10, 65537},
473 {"2147483646", 10, 2147483646},
474 {"2147483647", 10, 2147483647},
475 };
476 TEST_OK(kstrtos32, s32, "%d", test_s32_ok);
477}
478
479static void __init test_kstrtos32_fail(void)
480{
481 static DEFINE_TEST_FAIL(test_s32_fail) = {
482 {"2147483648", 10},
483 {"2147483649", 10},
484 {"4294967294", 10},
485 {"4294967295", 10},
486 {"4294967296", 10},
487 {"4294967297", 10},
488 {"9223372036854775806", 10},
489 {"9223372036854775807", 10},
490 {"9223372036854775808", 10},
491 {"9223372036854775809", 10},
492 {"18446744073709551614", 10},
493 {"18446744073709551615", 10},
494 {"18446744073709551616", 10},
495 {"18446744073709551617", 10},
496 };
497 TEST_FAIL(kstrtos32, s32, "%d", test_s32_fail);
498}
499
500static void __init test_kstrtou16_ok(void)
501{
502 DECLARE_TEST_OK(u16, struct test_u16);
503 static DEFINE_TEST_OK(struct test_u16, test_u16_ok) = {
504 {"0", 10, 0},
505 {"1", 10, 1},
506 {"126", 10, 126},
507 {"127", 10, 127},
508 {"128", 10, 128},
509 {"129", 10, 129},
510 {"254", 10, 254},
511 {"255", 10, 255},
512 {"256", 10, 256},
513 {"257", 10, 257},
514 {"32766", 10, 32766},
515 {"32767", 10, 32767},
516 {"32768", 10, 32768},
517 {"32769", 10, 32769},
518 {"65534", 10, 65534},
519 {"65535", 10, 65535},
520 };
521 TEST_OK(kstrtou16, u16, "%hu", test_u16_ok);
522}
523
524static void __init test_kstrtou16_fail(void)
525{
526 static DEFINE_TEST_FAIL(test_u16_fail) = {
527 {"-2", 10},
528 {"-1", 10},
529 {"65536", 10},
530 {"65537", 10},
531 {"2147483646", 10},
532 {"2147483647", 10},
533 {"2147483648", 10},
534 {"2147483649", 10},
535 {"4294967294", 10},
536 {"4294967295", 10},
537 {"4294967296", 10},
538 {"4294967297", 10},
539 {"9223372036854775806", 10},
540 {"9223372036854775807", 10},
541 {"9223372036854775808", 10},
542 {"9223372036854775809", 10},
543 {"18446744073709551614", 10},
544 {"18446744073709551615", 10},
545 {"18446744073709551616", 10},
546 {"18446744073709551617", 10},
547 };
548 TEST_FAIL(kstrtou16, u16, "%hu", test_u16_fail);
549}
550
551static void __init test_kstrtos16_ok(void)
552{
553 DECLARE_TEST_OK(s16, struct test_s16);
554 static DEFINE_TEST_OK(struct test_s16, test_s16_ok) = {
555 {"-130", 10, -130},
556 {"-129", 10, -129},
557 {"-128", 10, -128},
558 {"-127", 10, -127},
559 {"-1", 10, -1},
560 {"0", 10, 0},
561 {"1", 10, 1},
562 {"126", 10, 126},
563 {"127", 10, 127},
564 {"128", 10, 128},
565 {"129", 10, 129},
566 {"254", 10, 254},
567 {"255", 10, 255},
568 {"256", 10, 256},
569 {"257", 10, 257},
570 {"32766", 10, 32766},
571 {"32767", 10, 32767},
572 };
573 TEST_OK(kstrtos16, s16, "%hd", test_s16_ok);
574}
575
576static void __init test_kstrtos16_fail(void)
577{
578 static DEFINE_TEST_FAIL(test_s16_fail) = {
579 {"32768", 10},
580 {"32769", 10},
581 {"65534", 10},
582 {"65535", 10},
583 {"65536", 10},
584 {"65537", 10},
585 {"2147483646", 10},
586 {"2147483647", 10},
587 {"2147483648", 10},
588 {"2147483649", 10},
589 {"4294967294", 10},
590 {"4294967295", 10},
591 {"4294967296", 10},
592 {"4294967297", 10},
593 {"9223372036854775806", 10},
594 {"9223372036854775807", 10},
595 {"9223372036854775808", 10},
596 {"9223372036854775809", 10},
597 {"18446744073709551614", 10},
598 {"18446744073709551615", 10},
599 {"18446744073709551616", 10},
600 {"18446744073709551617", 10},
601 };
602 TEST_FAIL(kstrtos16, s16, "%hd", test_s16_fail);
603}
604
605static void __init test_kstrtou8_ok(void)
606{
607 DECLARE_TEST_OK(u8, struct test_u8);
608 static DEFINE_TEST_OK(struct test_u8, test_u8_ok) = {
609 {"0", 10, 0},
610 {"1", 10, 1},
611 {"126", 10, 126},
612 {"127", 10, 127},
613 {"128", 10, 128},
614 {"129", 10, 129},
615 {"254", 10, 254},
616 {"255", 10, 255},
617 };
618 TEST_OK(kstrtou8, u8, "%hhu", test_u8_ok);
619}
620
621static void __init test_kstrtou8_fail(void)
622{
623 static DEFINE_TEST_FAIL(test_u8_fail) = {
624 {"-2", 10},
625 {"-1", 10},
626 {"256", 10},
627 {"257", 10},
628 {"32766", 10},
629 {"32767", 10},
630 {"32768", 10},
631 {"32769", 10},
632 {"65534", 10},
633 {"65535", 10},
634 {"65536", 10},
635 {"65537", 10},
636 {"2147483646", 10},
637 {"2147483647", 10},
638 {"2147483648", 10},
639 {"2147483649", 10},
640 {"4294967294", 10},
641 {"4294967295", 10},
642 {"4294967296", 10},
643 {"4294967297", 10},
644 {"9223372036854775806", 10},
645 {"9223372036854775807", 10},
646 {"9223372036854775808", 10},
647 {"9223372036854775809", 10},
648 {"18446744073709551614", 10},
649 {"18446744073709551615", 10},
650 {"18446744073709551616", 10},
651 {"18446744073709551617", 10},
652 };
653 TEST_FAIL(kstrtou8, u8, "%hhu", test_u8_fail);
654}
655
656static void __init test_kstrtos8_ok(void)
657{
658 DECLARE_TEST_OK(s8, struct test_s8);
659 static DEFINE_TEST_OK(struct test_s8, test_s8_ok) = {
660 {"-128", 10, -128},
661 {"-127", 10, -127},
662 {"-1", 10, -1},
663 {"0", 10, 0},
664 {"1", 10, 1},
665 {"126", 10, 126},
666 {"127", 10, 127},
667 };
668 TEST_OK(kstrtos8, s8, "%hhd", test_s8_ok);
669}
670
671static void __init test_kstrtos8_fail(void)
672{
673 static DEFINE_TEST_FAIL(test_s8_fail) = {
674 {"-130", 10},
675 {"-129", 10},
676 {"128", 10},
677 {"129", 10},
678 {"254", 10},
679 {"255", 10},
680 {"256", 10},
681 {"257", 10},
682 {"32766", 10},
683 {"32767", 10},
684 {"32768", 10},
685 {"32769", 10},
686 {"65534", 10},
687 {"65535", 10},
688 {"65536", 10},
689 {"65537", 10},
690 {"2147483646", 10},
691 {"2147483647", 10},
692 {"2147483648", 10},
693 {"2147483649", 10},
694 {"4294967294", 10},
695 {"4294967295", 10},
696 {"4294967296", 10},
697 {"4294967297", 10},
698 {"9223372036854775806", 10},
699 {"9223372036854775807", 10},
700 {"9223372036854775808", 10},
701 {"9223372036854775809", 10},
702 {"18446744073709551614", 10},
703 {"18446744073709551615", 10},
704 {"18446744073709551616", 10},
705 {"18446744073709551617", 10},
706 };
707 TEST_FAIL(kstrtos8, s8, "%hhd", test_s8_fail);
708}
709
710static int __init test_kstrtox_init(void)
711{
712 test_kstrtoull_ok();
713 test_kstrtoull_fail();
714 test_kstrtoll_ok();
715 test_kstrtoll_fail();
716
717 test_kstrtou64_ok();
718 test_kstrtou64_fail();
719 test_kstrtos64_ok();
720 test_kstrtos64_fail();
721
722 test_kstrtou32_ok();
723 test_kstrtou32_fail();
724 test_kstrtos32_ok();
725 test_kstrtos32_fail();
726
727 test_kstrtou16_ok();
728 test_kstrtou16_fail();
729 test_kstrtos16_ok();
730 test_kstrtos16_fail();
731
732 test_kstrtou8_ok();
733 test_kstrtou8_fail();
734 test_kstrtos8_ok();
735 test_kstrtos8_fail();
736 return -EINVAL;
737}
738module_init(test_kstrtox_init);
739MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/textsearch.c b/lib/textsearch.c
index d608331b3e47..e0cc0146ae62 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -13,7 +13,7 @@
13 * 13 *
14 * INTRODUCTION 14 * INTRODUCTION
15 * 15 *
16 * The textsearch infrastructure provides text searching facitilies for 16 * The textsearch infrastructure provides text searching facilities for
17 * both linear and non-linear data. Individual search algorithms are 17 * both linear and non-linear data. Individual search algorithms are
18 * implemented in modules and chosen by the user. 18 * implemented in modules and chosen by the user.
19 * 19 *
@@ -43,7 +43,7 @@
43 * to the algorithm to store persistent variables. 43 * to the algorithm to store persistent variables.
44 * (4) Core eventually resets the search offset and forwards the find() 44 * (4) Core eventually resets the search offset and forwards the find()
45 * request to the algorithm. 45 * request to the algorithm.
46 * (5) Algorithm calls get_next_block() provided by the user continously 46 * (5) Algorithm calls get_next_block() provided by the user continuously
47 * to fetch the data to be searched in block by block. 47 * to fetch the data to be searched in block by block.
48 * (6) Algorithm invokes finish() after the last call to get_next_block 48 * (6) Algorithm invokes finish() after the last call to get_next_block
49 * to clean up any leftovers from get_next_block. (Optional) 49 * to clean up any leftovers from get_next_block. (Optional)
@@ -58,15 +58,15 @@
58 * the pattern to look for and flags. As a flag, you can set TS_IGNORECASE 58 * the pattern to look for and flags. As a flag, you can set TS_IGNORECASE
59 * to perform case insensitive matching. But it might slow down 59 * to perform case insensitive matching. But it might slow down
60 * performance of algorithm, so you should use it at own your risk. 60 * performance of algorithm, so you should use it at own your risk.
61 * The returned configuration may then be used for an arbitary 61 * The returned configuration may then be used for an arbitrary
62 * amount of times and even in parallel as long as a separate struct 62 * amount of times and even in parallel as long as a separate struct
63 * ts_state variable is provided to every instance. 63 * ts_state variable is provided to every instance.
64 * 64 *
65 * The actual search is performed by either calling textsearch_find_- 65 * The actual search is performed by either calling textsearch_find_-
66 * continuous() for linear data or by providing an own get_next_block() 66 * continuous() for linear data or by providing an own get_next_block()
67 * implementation and calling textsearch_find(). Both functions return 67 * implementation and calling textsearch_find(). Both functions return
68 * the position of the first occurrence of the patern or UINT_MAX if 68 * the position of the first occurrence of the pattern or UINT_MAX if
69 * no match was found. Subsequent occurences can be found by calling 69 * no match was found. Subsequent occurrences can be found by calling
70 * textsearch_next() regardless of the linearity of the data. 70 * textsearch_next() regardless of the linearity of the data.
71 * 71 *
72 * Once you're done using a configuration it must be given back via 72 * Once you're done using a configuration it must be given back via
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index d3023df8477f..02bcdd5feac4 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -120,147 +120,6 @@ long long simple_strtoll(const char *cp, char **endp, unsigned int base)
120} 120}
121EXPORT_SYMBOL(simple_strtoll); 121EXPORT_SYMBOL(simple_strtoll);
122 122
123/**
124 * strict_strtoul - convert a string to an unsigned long strictly
125 * @cp: The string to be converted
126 * @base: The number base to use
127 * @res: The converted result value
128 *
129 * strict_strtoul converts a string to an unsigned long only if the
130 * string is really an unsigned long string, any string containing
131 * any invalid char at the tail will be rejected and -EINVAL is returned,
132 * only a newline char at the tail is acceptible because people generally
133 * change a module parameter in the following way:
134 *
135 * echo 1024 > /sys/module/e1000/parameters/copybreak
136 *
137 * echo will append a newline to the tail.
138 *
139 * It returns 0 if conversion is successful and *res is set to the converted
140 * value, otherwise it returns -EINVAL and *res is set to 0.
141 *
142 * simple_strtoul just ignores the successive invalid characters and
143 * return the converted value of prefix part of the string.
144 */
145int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
146{
147 char *tail;
148 unsigned long val;
149
150 *res = 0;
151 if (!*cp)
152 return -EINVAL;
153
154 val = simple_strtoul(cp, &tail, base);
155 if (tail == cp)
156 return -EINVAL;
157
158 if ((tail[0] == '\0') || (tail[0] == '\n' && tail[1] == '\0')) {
159 *res = val;
160 return 0;
161 }
162
163 return -EINVAL;
164}
165EXPORT_SYMBOL(strict_strtoul);
166
167/**
168 * strict_strtol - convert a string to a long strictly
169 * @cp: The string to be converted
170 * @base: The number base to use
171 * @res: The converted result value
172 *
173 * strict_strtol is similiar to strict_strtoul, but it allows the first
174 * character of a string is '-'.
175 *
176 * It returns 0 if conversion is successful and *res is set to the converted
177 * value, otherwise it returns -EINVAL and *res is set to 0.
178 */
179int strict_strtol(const char *cp, unsigned int base, long *res)
180{
181 int ret;
182 if (*cp == '-') {
183 ret = strict_strtoul(cp + 1, base, (unsigned long *)res);
184 if (!ret)
185 *res = -(*res);
186 } else {
187 ret = strict_strtoul(cp, base, (unsigned long *)res);
188 }
189
190 return ret;
191}
192EXPORT_SYMBOL(strict_strtol);
193
194/**
195 * strict_strtoull - convert a string to an unsigned long long strictly
196 * @cp: The string to be converted
197 * @base: The number base to use
198 * @res: The converted result value
199 *
200 * strict_strtoull converts a string to an unsigned long long only if the
201 * string is really an unsigned long long string, any string containing
202 * any invalid char at the tail will be rejected and -EINVAL is returned,
203 * only a newline char at the tail is acceptible because people generally
204 * change a module parameter in the following way:
205 *
206 * echo 1024 > /sys/module/e1000/parameters/copybreak
207 *
208 * echo will append a newline to the tail of the string.
209 *
210 * It returns 0 if conversion is successful and *res is set to the converted
211 * value, otherwise it returns -EINVAL and *res is set to 0.
212 *
213 * simple_strtoull just ignores the successive invalid characters and
214 * return the converted value of prefix part of the string.
215 */
216int strict_strtoull(const char *cp, unsigned int base, unsigned long long *res)
217{
218 char *tail;
219 unsigned long long val;
220
221 *res = 0;
222 if (!*cp)
223 return -EINVAL;
224
225 val = simple_strtoull(cp, &tail, base);
226 if (tail == cp)
227 return -EINVAL;
228 if ((tail[0] == '\0') || (tail[0] == '\n' && tail[1] == '\0')) {
229 *res = val;
230 return 0;
231 }
232
233 return -EINVAL;
234}
235EXPORT_SYMBOL(strict_strtoull);
236
237/**
238 * strict_strtoll - convert a string to a long long strictly
239 * @cp: The string to be converted
240 * @base: The number base to use
241 * @res: The converted result value
242 *
243 * strict_strtoll is similiar to strict_strtoull, but it allows the first
244 * character of a string is '-'.
245 *
246 * It returns 0 if conversion is successful and *res is set to the converted
247 * value, otherwise it returns -EINVAL and *res is set to 0.
248 */
249int strict_strtoll(const char *cp, unsigned int base, long long *res)
250{
251 int ret;
252 if (*cp == '-') {
253 ret = strict_strtoull(cp + 1, base, (unsigned long long *)res);
254 if (!ret)
255 *res = -(*res);
256 } else {
257 ret = strict_strtoull(cp, base, (unsigned long long *)res);
258 }
259
260 return ret;
261}
262EXPORT_SYMBOL(strict_strtoll);
263
264static noinline_for_stack 123static noinline_for_stack
265int skip_atoi(const char **s) 124int skip_atoi(const char **s)
266{ 125{
@@ -991,7 +850,7 @@ static noinline_for_stack
991char *pointer(const char *fmt, char *buf, char *end, void *ptr, 850char *pointer(const char *fmt, char *buf, char *end, void *ptr,
992 struct printf_spec spec) 851 struct printf_spec spec)
993{ 852{
994 if (!ptr) { 853 if (!ptr && *fmt != 'K') {
995 /* 854 /*
996 * Print (null) with the same width as a pointer so it makes 855 * Print (null) with the same width as a pointer so it makes
997 * tabular output look nice. 856 * tabular output look nice.
@@ -1047,16 +906,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1047 if (spec.field_width == -1) 906 if (spec.field_width == -1)
1048 spec.field_width = 2 * sizeof(void *); 907 spec.field_width = 2 * sizeof(void *);
1049 return string(buf, end, "pK-error", spec); 908 return string(buf, end, "pK-error", spec);
1050 } else if ((kptr_restrict == 0) ||
1051 (kptr_restrict == 1 &&
1052 has_capability_noaudit(current, CAP_SYSLOG)))
1053 break;
1054
1055 if (spec.field_width == -1) {
1056 spec.field_width = 2 * sizeof(void *);
1057 spec.flags |= ZEROPAD;
1058 } 909 }
1059 return number(buf, end, 0, spec); 910 if (!((kptr_restrict == 0) ||
911 (kptr_restrict == 1 &&
912 has_capability_noaudit(current, CAP_SYSLOG))))
913 ptr = NULL;
914 break;
1060 } 915 }
1061 spec.flags |= SMALL; 916 spec.flags |= SMALL;
1062 if (spec.field_width == -1) { 917 if (spec.field_width == -1) {
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c
index 46a31e5f49c3..d63381e8e333 100644
--- a/lib/zlib_deflate/deflate.c
+++ b/lib/zlib_deflate/deflate.c
@@ -176,6 +176,7 @@ int zlib_deflateInit2(
176 deflate_state *s; 176 deflate_state *s;
177 int noheader = 0; 177 int noheader = 0;
178 deflate_workspace *mem; 178 deflate_workspace *mem;
179 char *next;
179 180
180 ush *overlay; 181 ush *overlay;
181 /* We overlay pending_buf and d_buf+l_buf. This works since the average 182 /* We overlay pending_buf and d_buf+l_buf. This works since the average
@@ -199,6 +200,21 @@ int zlib_deflateInit2(
199 strategy < 0 || strategy > Z_HUFFMAN_ONLY) { 200 strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
200 return Z_STREAM_ERROR; 201 return Z_STREAM_ERROR;
201 } 202 }
203
204 /*
205 * Direct the workspace's pointers to the chunks that were allocated
206 * along with the deflate_workspace struct.
207 */
208 next = (char *) mem;
209 next += sizeof(*mem);
210 mem->window_memory = (Byte *) next;
211 next += zlib_deflate_window_memsize(windowBits);
212 mem->prev_memory = (Pos *) next;
213 next += zlib_deflate_prev_memsize(windowBits);
214 mem->head_memory = (Pos *) next;
215 next += zlib_deflate_head_memsize(memLevel);
216 mem->overlay_memory = next;
217
202 s = (deflate_state *) &(mem->deflate_memory); 218 s = (deflate_state *) &(mem->deflate_memory);
203 strm->state = (struct internal_state *)s; 219 strm->state = (struct internal_state *)s;
204 s->strm = strm; 220 s->strm = strm;
@@ -1247,7 +1263,18 @@ static block_state deflate_slow(
1247 return flush == Z_FINISH ? finish_done : block_done; 1263 return flush == Z_FINISH ? finish_done : block_done;
1248} 1264}
1249 1265
1250int zlib_deflate_workspacesize(void) 1266int zlib_deflate_workspacesize(int windowBits, int memLevel)
1251{ 1267{
1252 return sizeof(deflate_workspace); 1268 if (windowBits < 0) /* undocumented feature: suppress zlib header */
1269 windowBits = -windowBits;
1270
1271 /* Since the return value is typically passed to vmalloc() unchecked... */
1272 BUG_ON(memLevel < 1 || memLevel > MAX_MEM_LEVEL || windowBits < 9 ||
1273 windowBits > 15);
1274
1275 return sizeof(deflate_workspace)
1276 + zlib_deflate_window_memsize(windowBits)
1277 + zlib_deflate_prev_memsize(windowBits)
1278 + zlib_deflate_head_memsize(memLevel)
1279 + zlib_deflate_overlay_memsize(memLevel);
1253} 1280}
diff --git a/lib/zlib_deflate/defutil.h b/lib/zlib_deflate/defutil.h
index 6b15a909ca3f..b640b6402e99 100644
--- a/lib/zlib_deflate/defutil.h
+++ b/lib/zlib_deflate/defutil.h
@@ -241,12 +241,21 @@ typedef struct deflate_state {
241typedef struct deflate_workspace { 241typedef struct deflate_workspace {
242 /* State memory for the deflator */ 242 /* State memory for the deflator */
243 deflate_state deflate_memory; 243 deflate_state deflate_memory;
244 Byte window_memory[2 * (1 << MAX_WBITS)]; 244 Byte *window_memory;
245 Pos prev_memory[1 << MAX_WBITS]; 245 Pos *prev_memory;
246 Pos head_memory[1 << (MAX_MEM_LEVEL + 7)]; 246 Pos *head_memory;
247 char overlay_memory[(1 << (MAX_MEM_LEVEL + 6)) * (sizeof(ush)+2)]; 247 char *overlay_memory;
248} deflate_workspace; 248} deflate_workspace;
249 249
250#define zlib_deflate_window_memsize(windowBits) \
251 (2 * (1 << (windowBits)) * sizeof(Byte))
252#define zlib_deflate_prev_memsize(windowBits) \
253 ((1 << (windowBits)) * sizeof(Pos))
254#define zlib_deflate_head_memsize(memLevel) \
255 ((1 << ((memLevel)+7)) * sizeof(Pos))
256#define zlib_deflate_overlay_memsize(memLevel) \
257 ((1 << ((memLevel)+6)) * (sizeof(ush)+2))
258
250/* Output a byte on the stream. 259/* Output a byte on the stream.
251 * IN assertion: there is enough room in pending_buf. 260 * IN assertion: there is enough room in pending_buf.
252 */ 261 */