aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig17
-rw-r--r--lib/Kconfig.debug32
-rw-r--r--lib/Makefile3
-rw-r--r--lib/bug.c19
-rw-r--r--lib/bust_spinlocks.c2
-rw-r--r--lib/cpumask.c62
-rw-r--r--lib/debugobjects.c4
-rw-r--r--lib/dynamic_printk.c58
-rw-r--r--lib/fault-inject.c1
-rw-r--r--lib/find_last_bit.c45
-rw-r--r--lib/is_single_threaded.c45
-rw-r--r--lib/klist.c43
-rw-r--r--lib/kobject_uevent.c8
-rw-r--r--lib/libcrc32c.c182
-rw-r--r--lib/percpu_counter.c36
-rw-r--r--lib/prio_heap.c2
-rw-r--r--lib/proportions.c8
-rw-r--r--lib/radix-tree.c13
-rw-r--r--lib/sort.c30
-rw-r--r--lib/swiotlb.c259
-rw-r--r--lib/vsprintf.c84
21 files changed, 628 insertions, 325 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 85cf7ea978aa..03c2c24b9083 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -13,6 +13,10 @@ config GENERIC_FIND_FIRST_BIT
13config GENERIC_FIND_NEXT_BIT 13config GENERIC_FIND_NEXT_BIT
14 bool 14 bool
15 15
16config GENERIC_FIND_LAST_BIT
17 bool
18 default y
19
16config CRC_CCITT 20config CRC_CCITT
17 tristate "CRC-CCITT functions" 21 tristate "CRC-CCITT functions"
18 help 22 help
@@ -64,6 +68,8 @@ config CRC7
64 68
65config LIBCRC32C 69config LIBCRC32C
66 tristate "CRC32c (Castagnoli, et al) Cyclic Redundancy-Check" 70 tristate "CRC32c (Castagnoli, et al) Cyclic Redundancy-Check"
71 select CRYPTO
72 select CRYPTO_CRC32C
67 help 73 help
68 This option is provided for the case where no in-kernel-tree 74 This option is provided for the case where no in-kernel-tree
69 modules require CRC32c functions, but a module built outside the 75 modules require CRC32c functions, but a module built outside the
@@ -157,4 +163,15 @@ config CHECK_SIGNATURE
157config HAVE_LMB 163config HAVE_LMB
158 boolean 164 boolean
159 165
166config CPUMASK_OFFSTACK
167 bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
168 help
169 Use dynamic allocation for cpumask_var_t, instead of putting
170 them on the stack. This is a bit more expensive, but avoids
171 stack overflow.
172
173config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
174 bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS
175 depends on EXPERIMENTAL && BROKEN
176
160endmenu 177endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b0f239e443bc..2e75478e9c69 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -252,6 +252,14 @@ config DEBUG_OBJECTS_TIMERS
252 timer routines to track the life time of timer objects and 252 timer routines to track the life time of timer objects and
253 validate the timer operations. 253 validate the timer operations.
254 254
255config DEBUG_OBJECTS_ENABLE_DEFAULT
256 int "debug_objects bootup default value (0-1)"
257 range 0 1
258 default "1"
259 depends on DEBUG_OBJECTS
260 help
261 Debug objects boot parameter default value
262
255config DEBUG_SLAB 263config DEBUG_SLAB
256 bool "Debug slab memory allocations" 264 bool "Debug slab memory allocations"
257 depends on DEBUG_KERNEL && SLAB 265 depends on DEBUG_KERNEL && SLAB
@@ -545,6 +553,16 @@ config DEBUG_SG
545 553
546 If unsure, say N. 554 If unsure, say N.
547 555
556config DEBUG_NOTIFIERS
557 bool "Debug notifier call chains"
558 depends on DEBUG_KERNEL
559 help
560 Enable this to turn on sanity checking for notifier call chains.
561 This is most useful for kernel developers to make sure that
562 modules properly unregister themselves from notifier chains.
563 This is a relatively cheap check but if you care about maximum
564 performance, say N.
565
548config FRAME_POINTER 566config FRAME_POINTER
549 bool "Compile the kernel with frame pointers" 567 bool "Compile the kernel with frame pointers"
550 depends on DEBUG_KERNEL && \ 568 depends on DEBUG_KERNEL && \
@@ -619,6 +637,19 @@ config RCU_CPU_STALL_DETECTOR
619 637
620 Say N if you are unsure. 638 Say N if you are unsure.
621 639
640config RCU_CPU_STALL_DETECTOR
641 bool "Check for stalled CPUs delaying RCU grace periods"
642 depends on CLASSIC_RCU || TREE_RCU
643 default n
644 help
645 This option causes RCU to printk information on which
646 CPUs are delaying the current grace period, but only when
647 the grace period extends for excessive time periods.
648
649 Say Y if you want RCU to perform such checks.
650
651 Say N if you are unsure.
652
622config KPROBES_SANITY_TEST 653config KPROBES_SANITY_TEST
623 bool "Kprobes sanity tests" 654 bool "Kprobes sanity tests"
624 depends on DEBUG_KERNEL 655 depends on DEBUG_KERNEL
@@ -699,6 +730,7 @@ config FAULT_INJECTION
699config FAILSLAB 730config FAILSLAB
700 bool "Fault-injection capability for kmalloc" 731 bool "Fault-injection capability for kmalloc"
701 depends on FAULT_INJECTION 732 depends on FAULT_INJECTION
733 depends on SLAB || SLUB
702 help 734 help
703 Provide fault-injection capability for kmalloc. 735 Provide fault-injection capability for kmalloc.
704 736
diff --git a/lib/Makefile b/lib/Makefile
index 7cb65d85aeb0..32b0e64ded27 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -11,7 +11,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
11 rbtree.o radix-tree.o dump_stack.o \ 11 rbtree.o radix-tree.o dump_stack.o \
12 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o prio_tree.o \
13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o prio_heap.o ratelimit.o show_mem.o 14 proportions.o prio_heap.o ratelimit.o show_mem.o is_single_threaded.o
15 15
16lib-$(CONFIG_MMU) += ioremap.o 16lib-$(CONFIG_MMU) += ioremap.o
17lib-$(CONFIG_SMP) += cpumask.o 17lib-$(CONFIG_SMP) += cpumask.o
@@ -37,6 +37,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
37lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 37lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
38lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o 38lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
39lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 39lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
40lib-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
40obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 41obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
41obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 42obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
42obj-$(CONFIG_PLIST) += plist.o 43obj-$(CONFIG_PLIST) += plist.o
diff --git a/lib/bug.c b/lib/bug.c
index bfeafd60ee9f..300e41afbf97 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -5,6 +5,8 @@
5 5
6 CONFIG_BUG - emit BUG traps. Nothing happens without this. 6 CONFIG_BUG - emit BUG traps. Nothing happens without this.
7 CONFIG_GENERIC_BUG - enable this code. 7 CONFIG_GENERIC_BUG - enable this code.
8 CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit pointers relative to
9 the containing struct bug_entry for bug_addr and file.
8 CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG 10 CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG
9 11
10 CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable 12 CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable
@@ -43,6 +45,15 @@
43 45
44extern const struct bug_entry __start___bug_table[], __stop___bug_table[]; 46extern const struct bug_entry __start___bug_table[], __stop___bug_table[];
45 47
48static inline unsigned long bug_addr(const struct bug_entry *bug)
49{
50#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
51 return bug->bug_addr;
52#else
53 return (unsigned long)bug + bug->bug_addr_disp;
54#endif
55}
56
46#ifdef CONFIG_MODULES 57#ifdef CONFIG_MODULES
47static LIST_HEAD(module_bug_list); 58static LIST_HEAD(module_bug_list);
48 59
@@ -55,7 +66,7 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr)
55 unsigned i; 66 unsigned i;
56 67
57 for (i = 0; i < mod->num_bugs; ++i, ++bug) 68 for (i = 0; i < mod->num_bugs; ++i, ++bug)
58 if (bugaddr == bug->bug_addr) 69 if (bugaddr == bug_addr(bug))
59 return bug; 70 return bug;
60 } 71 }
61 return NULL; 72 return NULL;
@@ -108,7 +119,7 @@ const struct bug_entry *find_bug(unsigned long bugaddr)
108 const struct bug_entry *bug; 119 const struct bug_entry *bug;
109 120
110 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) 121 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
111 if (bugaddr == bug->bug_addr) 122 if (bugaddr == bug_addr(bug))
112 return bug; 123 return bug;
113 124
114 return module_find_bug(bugaddr); 125 return module_find_bug(bugaddr);
@@ -133,7 +144,11 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
133 144
134 if (bug) { 145 if (bug) {
135#ifdef CONFIG_DEBUG_BUGVERBOSE 146#ifdef CONFIG_DEBUG_BUGVERBOSE
147#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
136 file = bug->file; 148 file = bug->file;
149#else
150 file = (const char *)bug + bug->file_disp;
151#endif
137 line = bug->line; 152 line = bug->line;
138#endif 153#endif
139 warning = (bug->flags & BUGFLAG_WARNING) != 0; 154 warning = (bug->flags & BUGFLAG_WARNING) != 0;
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c
index 486da62b2b07..9681d54b95d1 100644
--- a/lib/bust_spinlocks.c
+++ b/lib/bust_spinlocks.c
@@ -12,6 +12,7 @@
12#include <linux/tty.h> 12#include <linux/tty.h>
13#include <linux/wait.h> 13#include <linux/wait.h>
14#include <linux/vt_kern.h> 14#include <linux/vt_kern.h>
15#include <linux/console.h>
15 16
16 17
17void __attribute__((weak)) bust_spinlocks(int yes) 18void __attribute__((weak)) bust_spinlocks(int yes)
@@ -22,6 +23,7 @@ void __attribute__((weak)) bust_spinlocks(int yes)
22#ifdef CONFIG_VT 23#ifdef CONFIG_VT
23 unblank_screen(); 24 unblank_screen();
24#endif 25#endif
26 console_unblank();
25 if (--oops_in_progress == 0) 27 if (--oops_in_progress == 0)
26 wake_up_klogd(); 28 wake_up_klogd();
27 } 29 }
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 8d03f22c6ced..3389e2440da0 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -76,15 +76,28 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
76 76
77/* These are not inline because of header tangles. */ 77/* These are not inline because of header tangles. */
78#ifdef CONFIG_CPUMASK_OFFSTACK 78#ifdef CONFIG_CPUMASK_OFFSTACK
79bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 79/**
80 * alloc_cpumask_var_node - allocate a struct cpumask on a given node
81 * @mask: pointer to cpumask_var_t where the cpumask is returned
82 * @flags: GFP_ flags
83 *
84 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
85 * a nop returning a constant 1 (in <linux/cpumask.h>)
86 * Returns TRUE if memory allocation succeeded, FALSE otherwise.
87 *
88 * In addition, mask will be NULL if this fails. Note that gcc is
89 * usually smart enough to know that mask can never be NULL if
90 * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
91 * too.
92 */
93bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
80{ 94{
81 if (likely(slab_is_available())) 95 if (likely(slab_is_available()))
82 *mask = kmalloc(cpumask_size(), flags); 96 *mask = kmalloc_node(cpumask_size(), flags, node);
83 else { 97 else {
84#ifdef CONFIG_DEBUG_PER_CPU_MAPS 98#ifdef CONFIG_DEBUG_PER_CPU_MAPS
85 printk(KERN_ERR 99 printk(KERN_ERR
86 "=> alloc_cpumask_var: kmalloc not available!\n"); 100 "=> alloc_cpumask_var: kmalloc not available!\n");
87 dump_stack();
88#endif 101#endif
89 *mask = NULL; 102 *mask = NULL;
90 } 103 }
@@ -94,21 +107,64 @@ bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
94 dump_stack(); 107 dump_stack();
95 } 108 }
96#endif 109#endif
110 /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */
111 if (*mask) {
112 unsigned int tail;
113 tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long);
114 memset(cpumask_bits(*mask) + cpumask_size() - tail,
115 0, tail);
116 }
117
97 return *mask != NULL; 118 return *mask != NULL;
98} 119}
120EXPORT_SYMBOL(alloc_cpumask_var_node);
121
122/**
123 * alloc_cpumask_var - allocate a struct cpumask
124 * @mask: pointer to cpumask_var_t where the cpumask is returned
125 * @flags: GFP_ flags
126 *
127 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
128 * a nop returning a constant 1 (in <linux/cpumask.h>).
129 *
130 * See alloc_cpumask_var_node.
131 */
132bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
133{
134 return alloc_cpumask_var_node(mask, flags, numa_node_id());
135}
99EXPORT_SYMBOL(alloc_cpumask_var); 136EXPORT_SYMBOL(alloc_cpumask_var);
100 137
138/**
139 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
140 * @mask: pointer to cpumask_var_t where the cpumask is returned
141 *
142 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
143 * a nop (in <linux/cpumask.h>).
144 * Either returns an allocated (zero-filled) cpumask, or causes the
145 * system to panic.
146 */
101void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) 147void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
102{ 148{
103 *mask = alloc_bootmem(cpumask_size()); 149 *mask = alloc_bootmem(cpumask_size());
104} 150}
105 151
152/**
153 * free_cpumask_var - frees memory allocated for a struct cpumask.
154 * @mask: cpumask to free
155 *
156 * This is safe on a NULL mask.
157 */
106void free_cpumask_var(cpumask_var_t mask) 158void free_cpumask_var(cpumask_var_t mask)
107{ 159{
108 kfree(mask); 160 kfree(mask);
109} 161}
110EXPORT_SYMBOL(free_cpumask_var); 162EXPORT_SYMBOL(free_cpumask_var);
111 163
164/**
165 * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
166 * @mask: cpumask to free
167 */
112void __init free_bootmem_cpumask_var(cpumask_var_t mask) 168void __init free_bootmem_cpumask_var(cpumask_var_t mask)
113{ 169{
114 free_bootmem((unsigned long)mask, cpumask_size()); 170 free_bootmem((unsigned long)mask, cpumask_size());
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index e3ab374e1334..5d99be1fd988 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -45,7 +45,9 @@ static struct kmem_cache *obj_cache;
45static int debug_objects_maxchain __read_mostly; 45static int debug_objects_maxchain __read_mostly;
46static int debug_objects_fixups __read_mostly; 46static int debug_objects_fixups __read_mostly;
47static int debug_objects_warnings __read_mostly; 47static int debug_objects_warnings __read_mostly;
48static int debug_objects_enabled __read_mostly; 48static int debug_objects_enabled __read_mostly
49 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
50
49static struct debug_obj_descr *descr_test __read_mostly; 51static struct debug_obj_descr *descr_test __read_mostly;
50 52
51static int __init enable_object_debug(char *str) 53static int __init enable_object_debug(char *str)
diff --git a/lib/dynamic_printk.c b/lib/dynamic_printk.c
index 8e30295e8566..165a19763dc9 100644
--- a/lib/dynamic_printk.c
+++ b/lib/dynamic_printk.c
@@ -277,40 +277,34 @@ static ssize_t pr_debug_write(struct file *file, const char __user *buf,
277 dynamic_enabled = DYNAMIC_ENABLED_NONE; 277 dynamic_enabled = DYNAMIC_ENABLED_NONE;
278 } 278 }
279 err = 0; 279 err = 0;
280 } else { 280 } else if (elem) {
281 if (elem) { 281 if (value && (elem->enable == 0)) {
282 if (value && (elem->enable == 0)) { 282 dynamic_printk_enabled |= (1LL << elem->hash1);
283 dynamic_printk_enabled |= 283 dynamic_printk_enabled2 |= (1LL << elem->hash2);
284 (1LL << elem->hash1); 284 elem->enable = 1;
285 dynamic_printk_enabled2 |= 285 num_enabled++;
286 (1LL << elem->hash2); 286 dynamic_enabled = DYNAMIC_ENABLED_SOME;
287 elem->enable = 1; 287 err = 0;
288 num_enabled++; 288 printk(KERN_DEBUG
289 dynamic_enabled = DYNAMIC_ENABLED_SOME; 289 "debugging enabled for module %s\n",
290 err = 0; 290 elem->name);
291 printk(KERN_DEBUG 291 } else if (!value && (elem->enable == 1)) {
292 "debugging enabled for module %s\n", 292 elem->enable = 0;
293 elem->name); 293 num_enabled--;
294 } else if (!value && (elem->enable == 1)) { 294 if (disabled_hash(elem->hash1, true))
295 elem->enable = 0; 295 dynamic_printk_enabled &=
296 num_enabled--;
297 if (disabled_hash(elem->hash1, true))
298 dynamic_printk_enabled &=
299 ~(1LL << elem->hash1); 296 ~(1LL << elem->hash1);
300 if (disabled_hash(elem->hash2, false)) 297 if (disabled_hash(elem->hash2, false))
301 dynamic_printk_enabled2 &= 298 dynamic_printk_enabled2 &=
302 ~(1LL << elem->hash2); 299 ~(1LL << elem->hash2);
303 if (num_enabled) 300 if (num_enabled)
304 dynamic_enabled = 301 dynamic_enabled = DYNAMIC_ENABLED_SOME;
305 DYNAMIC_ENABLED_SOME; 302 else
306 else 303 dynamic_enabled = DYNAMIC_ENABLED_NONE;
307 dynamic_enabled = 304 err = 0;
308 DYNAMIC_ENABLED_NONE; 305 printk(KERN_DEBUG
309 err = 0; 306 "debugging disabled for module %s\n",
310 printk(KERN_DEBUG 307 elem->name);
311 "debugging disabled for module "
312 "%s\n", elem->name);
313 }
314 } 308 }
315 } 309 }
316 } 310 }
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index a50a311554cc..f97af55bdd96 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -6,7 +6,6 @@
6#include <linux/fs.h> 6#include <linux/fs.h>
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/interrupt.h> 8#include <linux/interrupt.h>
9#include <linux/unwind.h>
10#include <linux/stacktrace.h> 9#include <linux/stacktrace.h>
11#include <linux/kallsyms.h> 10#include <linux/kallsyms.h>
12#include <linux/fault-inject.h> 11#include <linux/fault-inject.h>
diff --git a/lib/find_last_bit.c b/lib/find_last_bit.c
new file mode 100644
index 000000000000..5d202e36bdd8
--- /dev/null
+++ b/lib/find_last_bit.c
@@ -0,0 +1,45 @@
1/* find_last_bit.c: fallback find next bit implementation
2 *
3 * Copyright (C) 2008 IBM Corporation
4 * Written by Rusty Russell <rusty@rustcorp.com.au>
5 * (Inspired by David Howell's find_next_bit implementation)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/bitops.h>
14#include <linux/module.h>
15#include <asm/types.h>
16#include <asm/byteorder.h>
17
18unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
19{
20 unsigned long words;
21 unsigned long tmp;
22
23 /* Start at final word. */
24 words = size / BITS_PER_LONG;
25
26 /* Partial final word? */
27 if (size & (BITS_PER_LONG-1)) {
28 tmp = (addr[words] & (~0UL >> (BITS_PER_LONG
29 - (size & (BITS_PER_LONG-1)))));
30 if (tmp)
31 goto found;
32 }
33
34 while (words) {
35 tmp = addr[--words];
36 if (tmp) {
37found:
38 return words * BITS_PER_LONG + __fls(tmp);
39 }
40 }
41
42 /* Not found */
43 return size;
44}
45EXPORT_SYMBOL(find_last_bit);
diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
new file mode 100644
index 000000000000..f1ed2fe76c65
--- /dev/null
+++ b/lib/is_single_threaded.c
@@ -0,0 +1,45 @@
1/* Function to determine if a thread group is single threaded or not
2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from security/selinux/hooks.c
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public Licence
9 * as published by the Free Software Foundation; either version
10 * 2 of the Licence, or (at your option) any later version.
11 */
12
13#include <linux/sched.h>
14
15/**
16 * is_single_threaded - Determine if a thread group is single-threaded or not
17 * @p: A task in the thread group in question
18 *
19 * This returns true if the thread group to which a task belongs is single
20 * threaded, false if it is not.
21 */
22bool is_single_threaded(struct task_struct *p)
23{
24 struct task_struct *g, *t;
25 struct mm_struct *mm = p->mm;
26
27 if (atomic_read(&p->signal->count) != 1)
28 goto no;
29
30 if (atomic_read(&p->mm->mm_users) != 1) {
31 read_lock(&tasklist_lock);
32 do_each_thread(g, t) {
33 if (t->mm == mm && t != p)
34 goto no_unlock;
35 } while_each_thread(g, t);
36 read_unlock(&tasklist_lock);
37 }
38
39 return true;
40
41no_unlock:
42 read_unlock(&tasklist_lock);
43no:
44 return false;
45}
diff --git a/lib/klist.c b/lib/klist.c
index bbdd3015c2c7..573d6068a42e 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -36,6 +36,7 @@
36 36
37#include <linux/klist.h> 37#include <linux/klist.h>
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/sched.h>
39 40
40/* 41/*
41 * Use the lowest bit of n_klist to mark deleted nodes and exclude 42 * Use the lowest bit of n_klist to mark deleted nodes and exclude
@@ -108,7 +109,6 @@ static void add_tail(struct klist *k, struct klist_node *n)
108static void klist_node_init(struct klist *k, struct klist_node *n) 109static void klist_node_init(struct klist *k, struct klist_node *n)
109{ 110{
110 INIT_LIST_HEAD(&n->n_node); 111 INIT_LIST_HEAD(&n->n_node);
111 init_completion(&n->n_removed);
112 kref_init(&n->n_ref); 112 kref_init(&n->n_ref);
113 knode_set_klist(n, k); 113 knode_set_klist(n, k);
114 if (k->get) 114 if (k->get)
@@ -171,13 +171,34 @@ void klist_add_before(struct klist_node *n, struct klist_node *pos)
171} 171}
172EXPORT_SYMBOL_GPL(klist_add_before); 172EXPORT_SYMBOL_GPL(klist_add_before);
173 173
174struct klist_waiter {
175 struct list_head list;
176 struct klist_node *node;
177 struct task_struct *process;
178 int woken;
179};
180
181static DEFINE_SPINLOCK(klist_remove_lock);
182static LIST_HEAD(klist_remove_waiters);
183
174static void klist_release(struct kref *kref) 184static void klist_release(struct kref *kref)
175{ 185{
186 struct klist_waiter *waiter, *tmp;
176 struct klist_node *n = container_of(kref, struct klist_node, n_ref); 187 struct klist_node *n = container_of(kref, struct klist_node, n_ref);
177 188
178 WARN_ON(!knode_dead(n)); 189 WARN_ON(!knode_dead(n));
179 list_del(&n->n_node); 190 list_del(&n->n_node);
180 complete(&n->n_removed); 191 spin_lock(&klist_remove_lock);
192 list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) {
193 if (waiter->node != n)
194 continue;
195
196 waiter->woken = 1;
197 mb();
198 wake_up_process(waiter->process);
199 list_del(&waiter->list);
200 }
201 spin_unlock(&klist_remove_lock);
181 knode_set_klist(n, NULL); 202 knode_set_klist(n, NULL);
182} 203}
183 204
@@ -217,8 +238,24 @@ EXPORT_SYMBOL_GPL(klist_del);
217 */ 238 */
218void klist_remove(struct klist_node *n) 239void klist_remove(struct klist_node *n)
219{ 240{
241 struct klist_waiter waiter;
242
243 waiter.node = n;
244 waiter.process = current;
245 waiter.woken = 0;
246 spin_lock(&klist_remove_lock);
247 list_add(&waiter.list, &klist_remove_waiters);
248 spin_unlock(&klist_remove_lock);
249
220 klist_del(n); 250 klist_del(n);
221 wait_for_completion(&n->n_removed); 251
252 for (;;) {
253 set_current_state(TASK_UNINTERRUPTIBLE);
254 if (waiter.woken)
255 break;
256 schedule();
257 }
258 __set_current_state(TASK_RUNNING);
222} 259}
223EXPORT_SYMBOL_GPL(klist_remove); 260EXPORT_SYMBOL_GPL(klist_remove);
224 261
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 3f914725bda8..318328ddbd1c 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -165,7 +165,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
165 /* keys passed in from the caller */ 165 /* keys passed in from the caller */
166 if (envp_ext) { 166 if (envp_ext) {
167 for (i = 0; envp_ext[i]; i++) { 167 for (i = 0; envp_ext[i]; i++) {
168 retval = add_uevent_var(env, envp_ext[i]); 168 retval = add_uevent_var(env, "%s", envp_ext[i]);
169 if (retval) 169 if (retval)
170 goto exit; 170 goto exit;
171 } 171 }
@@ -225,8 +225,10 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
225 } 225 }
226 226
227 NETLINK_CB(skb).dst_group = 1; 227 NETLINK_CB(skb).dst_group = 1;
228 netlink_broadcast(uevent_sock, skb, 0, 1, GFP_KERNEL); 228 retval = netlink_broadcast(uevent_sock, skb, 0, 1,
229 } 229 GFP_KERNEL);
230 } else
231 retval = -ENOMEM;
230 } 232 }
231#endif 233#endif
232 234
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index b5c3287d8ea4..244f5480c898 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -30,168 +30,52 @@
30 * any later version. 30 * any later version.
31 * 31 *
32 */ 32 */
33#include <linux/crc32c.h>
34#include <linux/compiler.h>
35#include <linux/module.h>
36
37MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
38MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations");
39MODULE_LICENSE("GPL");
40 33
41#define CRC32C_POLY_BE 0x1EDC6F41 34#include <crypto/hash.h>
42#define CRC32C_POLY_LE 0x82F63B78 35#include <linux/err.h>
36#include <linux/init.h>
37#include <linux/kernel.h>
38#include <linux/module.h>
43 39
44#ifndef CRC_LE_BITS 40static struct crypto_shash *tfm;
45# define CRC_LE_BITS 8
46#endif
47 41
42u32 crc32c(u32 crc, const void *address, unsigned int length)
43{
44 struct {
45 struct shash_desc shash;
46 char ctx[crypto_shash_descsize(tfm)];
47 } desc;
48 int err;
48 49
49/* 50 desc.shash.tfm = tfm;
50 * Haven't generated a big-endian table yet, but the bit-wise version 51 desc.shash.flags = 0;
51 * should at least work. 52 *(u32 *)desc.ctx = crc;
52 */
53#if defined CRC_BE_BITS && CRC_BE_BITS != 1
54#undef CRC_BE_BITS
55#endif
56#ifndef CRC_BE_BITS
57# define CRC_BE_BITS 1
58#endif
59 53
60EXPORT_SYMBOL(crc32c_le); 54 err = crypto_shash_update(&desc.shash, address, length);
55 BUG_ON(err);
61 56
62#if CRC_LE_BITS == 1 57 return *(u32 *)desc.ctx;
63/*
64 * Compute things bit-wise, as done in crc32.c. We could share the tight
65 * loop below with crc32 and vary the POLY if we don't find value in terms
66 * of space and maintainability in keeping the two modules separate.
67 */
68u32 __pure
69crc32c_le(u32 crc, unsigned char const *p, size_t len)
70{
71 int i;
72 while (len--) {
73 crc ^= *p++;
74 for (i = 0; i < 8; i++)
75 crc = (crc >> 1) ^ ((crc & 1) ? CRC32C_POLY_LE : 0);
76 }
77 return crc;
78} 58}
79#else
80
81/*
82 * This is the CRC-32C table
83 * Generated with:
84 * width = 32 bits
85 * poly = 0x1EDC6F41
86 * reflect input bytes = true
87 * reflect output bytes = true
88 */
89
90static const u32 crc32c_table[256] = {
91 0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L,
92 0xC79A971FL, 0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL,
93 0x8AD958CFL, 0x78B2DBCCL, 0x6BE22838L, 0x9989AB3BL,
94 0x4D43CFD0L, 0xBF284CD3L, 0xAC78BF27L, 0x5E133C24L,
95 0x105EC76FL, 0xE235446CL, 0xF165B798L, 0x030E349BL,
96 0xD7C45070L, 0x25AFD373L, 0x36FF2087L, 0xC494A384L,
97 0x9A879FA0L, 0x68EC1CA3L, 0x7BBCEF57L, 0x89D76C54L,
98 0x5D1D08BFL, 0xAF768BBCL, 0xBC267848L, 0x4E4DFB4BL,
99 0x20BD8EDEL, 0xD2D60DDDL, 0xC186FE29L, 0x33ED7D2AL,
100 0xE72719C1L, 0x154C9AC2L, 0x061C6936L, 0xF477EA35L,
101 0xAA64D611L, 0x580F5512L, 0x4B5FA6E6L, 0xB93425E5L,
102 0x6DFE410EL, 0x9F95C20DL, 0x8CC531F9L, 0x7EAEB2FAL,
103 0x30E349B1L, 0xC288CAB2L, 0xD1D83946L, 0x23B3BA45L,
104 0xF779DEAEL, 0x05125DADL, 0x1642AE59L, 0xE4292D5AL,
105 0xBA3A117EL, 0x4851927DL, 0x5B016189L, 0xA96AE28AL,
106 0x7DA08661L, 0x8FCB0562L, 0x9C9BF696L, 0x6EF07595L,
107 0x417B1DBCL, 0xB3109EBFL, 0xA0406D4BL, 0x522BEE48L,
108 0x86E18AA3L, 0x748A09A0L, 0x67DAFA54L, 0x95B17957L,
109 0xCBA24573L, 0x39C9C670L, 0x2A993584L, 0xD8F2B687L,
110 0x0C38D26CL, 0xFE53516FL, 0xED03A29BL, 0x1F682198L,
111 0x5125DAD3L, 0xA34E59D0L, 0xB01EAA24L, 0x42752927L,
112 0x96BF4DCCL, 0x64D4CECFL, 0x77843D3BL, 0x85EFBE38L,
113 0xDBFC821CL, 0x2997011FL, 0x3AC7F2EBL, 0xC8AC71E8L,
114 0x1C661503L, 0xEE0D9600L, 0xFD5D65F4L, 0x0F36E6F7L,
115 0x61C69362L, 0x93AD1061L, 0x80FDE395L, 0x72966096L,
116 0xA65C047DL, 0x5437877EL, 0x4767748AL, 0xB50CF789L,
117 0xEB1FCBADL, 0x197448AEL, 0x0A24BB5AL, 0xF84F3859L,
118 0x2C855CB2L, 0xDEEEDFB1L, 0xCDBE2C45L, 0x3FD5AF46L,
119 0x7198540DL, 0x83F3D70EL, 0x90A324FAL, 0x62C8A7F9L,
120 0xB602C312L, 0x44694011L, 0x5739B3E5L, 0xA55230E6L,
121 0xFB410CC2L, 0x092A8FC1L, 0x1A7A7C35L, 0xE811FF36L,
122 0x3CDB9BDDL, 0xCEB018DEL, 0xDDE0EB2AL, 0x2F8B6829L,
123 0x82F63B78L, 0x709DB87BL, 0x63CD4B8FL, 0x91A6C88CL,
124 0x456CAC67L, 0xB7072F64L, 0xA457DC90L, 0x563C5F93L,
125 0x082F63B7L, 0xFA44E0B4L, 0xE9141340L, 0x1B7F9043L,
126 0xCFB5F4A8L, 0x3DDE77ABL, 0x2E8E845FL, 0xDCE5075CL,
127 0x92A8FC17L, 0x60C37F14L, 0x73938CE0L, 0x81F80FE3L,
128 0x55326B08L, 0xA759E80BL, 0xB4091BFFL, 0x466298FCL,
129 0x1871A4D8L, 0xEA1A27DBL, 0xF94AD42FL, 0x0B21572CL,
130 0xDFEB33C7L, 0x2D80B0C4L, 0x3ED04330L, 0xCCBBC033L,
131 0xA24BB5A6L, 0x502036A5L, 0x4370C551L, 0xB11B4652L,
132 0x65D122B9L, 0x97BAA1BAL, 0x84EA524EL, 0x7681D14DL,
133 0x2892ED69L, 0xDAF96E6AL, 0xC9A99D9EL, 0x3BC21E9DL,
134 0xEF087A76L, 0x1D63F975L, 0x0E330A81L, 0xFC588982L,
135 0xB21572C9L, 0x407EF1CAL, 0x532E023EL, 0xA145813DL,
136 0x758FE5D6L, 0x87E466D5L, 0x94B49521L, 0x66DF1622L,
137 0x38CC2A06L, 0xCAA7A905L, 0xD9F75AF1L, 0x2B9CD9F2L,
138 0xFF56BD19L, 0x0D3D3E1AL, 0x1E6DCDEEL, 0xEC064EEDL,
139 0xC38D26C4L, 0x31E6A5C7L, 0x22B65633L, 0xD0DDD530L,
140 0x0417B1DBL, 0xF67C32D8L, 0xE52CC12CL, 0x1747422FL,
141 0x49547E0BL, 0xBB3FFD08L, 0xA86F0EFCL, 0x5A048DFFL,
142 0x8ECEE914L, 0x7CA56A17L, 0x6FF599E3L, 0x9D9E1AE0L,
143 0xD3D3E1ABL, 0x21B862A8L, 0x32E8915CL, 0xC083125FL,
144 0x144976B4L, 0xE622F5B7L, 0xF5720643L, 0x07198540L,
145 0x590AB964L, 0xAB613A67L, 0xB831C993L, 0x4A5A4A90L,
146 0x9E902E7BL, 0x6CFBAD78L, 0x7FAB5E8CL, 0x8DC0DD8FL,
147 0xE330A81AL, 0x115B2B19L, 0x020BD8EDL, 0xF0605BEEL,
148 0x24AA3F05L, 0xD6C1BC06L, 0xC5914FF2L, 0x37FACCF1L,
149 0x69E9F0D5L, 0x9B8273D6L, 0x88D28022L, 0x7AB90321L,
150 0xAE7367CAL, 0x5C18E4C9L, 0x4F48173DL, 0xBD23943EL,
151 0xF36E6F75L, 0x0105EC76L, 0x12551F82L, 0xE03E9C81L,
152 0x34F4F86AL, 0xC69F7B69L, 0xD5CF889DL, 0x27A40B9EL,
153 0x79B737BAL, 0x8BDCB4B9L, 0x988C474DL, 0x6AE7C44EL,
154 0xBE2DA0A5L, 0x4C4623A6L, 0x5F16D052L, 0xAD7D5351L
155};
156 59
157/* 60EXPORT_SYMBOL(crc32c);
158 * Steps through buffer one byte at at time, calculates reflected
159 * crc using table.
160 */
161 61
162u32 __pure 62static int __init libcrc32c_mod_init(void)
163crc32c_le(u32 crc, unsigned char const *data, size_t length)
164{ 63{
165 while (length--) 64 tfm = crypto_alloc_shash("crc32c", 0, 0);
166 crc = 65 if (IS_ERR(tfm))
167 crc32c_table[(crc ^ *data++) & 0xFFL] ^ (crc >> 8); 66 return PTR_ERR(tfm);
168 67
169 return crc; 68 return 0;
170} 69}
171 70
172#endif /* CRC_LE_BITS == 8 */ 71static void __exit libcrc32c_mod_fini(void)
173
174EXPORT_SYMBOL(crc32c_be);
175
176#if CRC_BE_BITS == 1
177u32 __pure
178crc32c_be(u32 crc, unsigned char const *p, size_t len)
179{ 72{
180 int i; 73 crypto_free_shash(tfm);
181 while (len--) {
182 crc ^= *p++ << 24;
183 for (i = 0; i < 8; i++)
184 crc =
185 (crc << 1) ^ ((crc & 0x80000000) ? CRC32C_POLY_BE :
186 0);
187 }
188 return crc;
189} 74}
190#endif
191 75
192/* 76module_init(libcrc32c_mod_init);
193 * Unit test 77module_exit(libcrc32c_mod_fini);
194 * 78
195 * A small unit test suite is implemented as part of the crypto suite. 79MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
196 * Select CRYPTO_CRC32C and use the tcrypt module to run the tests. 80MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations");
197 */ 81MODULE_LICENSE("GPL");
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index b255b939bc1b..aeaa6d734447 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -9,10 +9,8 @@
9#include <linux/cpu.h> 9#include <linux/cpu.h>
10#include <linux/module.h> 10#include <linux/module.h>
11 11
12#ifdef CONFIG_HOTPLUG_CPU
13static LIST_HEAD(percpu_counters); 12static LIST_HEAD(percpu_counters);
14static DEFINE_MUTEX(percpu_counters_lock); 13static DEFINE_MUTEX(percpu_counters_lock);
15#endif
16 14
17void percpu_counter_set(struct percpu_counter *fbc, s64 amount) 15void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
18{ 16{
@@ -68,11 +66,11 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
68} 66}
69EXPORT_SYMBOL(__percpu_counter_sum); 67EXPORT_SYMBOL(__percpu_counter_sum);
70 68
71static struct lock_class_key percpu_counter_irqsafe; 69int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
72 70 struct lock_class_key *key)
73int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
74{ 71{
75 spin_lock_init(&fbc->lock); 72 spin_lock_init(&fbc->lock);
73 lockdep_set_class(&fbc->lock, key);
76 fbc->count = amount; 74 fbc->count = amount;
77 fbc->counters = alloc_percpu(s32); 75 fbc->counters = alloc_percpu(s32);
78 if (!fbc->counters) 76 if (!fbc->counters)
@@ -84,17 +82,7 @@ int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
84#endif 82#endif
85 return 0; 83 return 0;
86} 84}
87EXPORT_SYMBOL(percpu_counter_init); 85EXPORT_SYMBOL(__percpu_counter_init);
88
89int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount)
90{
91 int err;
92
93 err = percpu_counter_init(fbc, amount);
94 if (!err)
95 lockdep_set_class(&fbc->lock, &percpu_counter_irqsafe);
96 return err;
97}
98 86
99void percpu_counter_destroy(struct percpu_counter *fbc) 87void percpu_counter_destroy(struct percpu_counter *fbc)
100{ 88{
@@ -111,13 +99,24 @@ void percpu_counter_destroy(struct percpu_counter *fbc)
111} 99}
112EXPORT_SYMBOL(percpu_counter_destroy); 100EXPORT_SYMBOL(percpu_counter_destroy);
113 101
114#ifdef CONFIG_HOTPLUG_CPU 102int percpu_counter_batch __read_mostly = 32;
103EXPORT_SYMBOL(percpu_counter_batch);
104
105static void compute_batch_value(void)
106{
107 int nr = num_online_cpus();
108
109 percpu_counter_batch = max(32, nr*2);
110}
111
115static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, 112static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
116 unsigned long action, void *hcpu) 113 unsigned long action, void *hcpu)
117{ 114{
115#ifdef CONFIG_HOTPLUG_CPU
118 unsigned int cpu; 116 unsigned int cpu;
119 struct percpu_counter *fbc; 117 struct percpu_counter *fbc;
120 118
119 compute_batch_value();
121 if (action != CPU_DEAD) 120 if (action != CPU_DEAD)
122 return NOTIFY_OK; 121 return NOTIFY_OK;
123 122
@@ -134,13 +133,14 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
134 spin_unlock_irqrestore(&fbc->lock, flags); 133 spin_unlock_irqrestore(&fbc->lock, flags);
135 } 134 }
136 mutex_unlock(&percpu_counters_lock); 135 mutex_unlock(&percpu_counters_lock);
136#endif
137 return NOTIFY_OK; 137 return NOTIFY_OK;
138} 138}
139 139
140static int __init percpu_counter_startup(void) 140static int __init percpu_counter_startup(void)
141{ 141{
142 compute_batch_value();
142 hotcpu_notifier(percpu_counter_hotcpu_callback, 0); 143 hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
143 return 0; 144 return 0;
144} 145}
145module_init(percpu_counter_startup); 146module_init(percpu_counter_startup);
146#endif
diff --git a/lib/prio_heap.c b/lib/prio_heap.c
index 471944a54e23..a7af6f85eca8 100644
--- a/lib/prio_heap.c
+++ b/lib/prio_heap.c
@@ -31,7 +31,7 @@ void *heap_insert(struct ptr_heap *heap, void *p)
31 31
32 if (heap->size < heap->max) { 32 if (heap->size < heap->max) {
33 /* Heap insertion */ 33 /* Heap insertion */
34 int pos = heap->size++; 34 pos = heap->size++;
35 while (pos > 0 && heap->gt(p, ptrs[(pos-1)/2])) { 35 while (pos > 0 && heap->gt(p, ptrs[(pos-1)/2])) {
36 ptrs[pos] = ptrs[(pos-1)/2]; 36 ptrs[pos] = ptrs[(pos-1)/2];
37 pos = (pos-1)/2; 37 pos = (pos-1)/2;
diff --git a/lib/proportions.c b/lib/proportions.c
index 4f387a643d72..d50746a79de2 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -83,11 +83,11 @@ int prop_descriptor_init(struct prop_descriptor *pd, int shift)
83 pd->index = 0; 83 pd->index = 0;
84 pd->pg[0].shift = shift; 84 pd->pg[0].shift = shift;
85 mutex_init(&pd->mutex); 85 mutex_init(&pd->mutex);
86 err = percpu_counter_init_irq(&pd->pg[0].events, 0); 86 err = percpu_counter_init(&pd->pg[0].events, 0);
87 if (err) 87 if (err)
88 goto out; 88 goto out;
89 89
90 err = percpu_counter_init_irq(&pd->pg[1].events, 0); 90 err = percpu_counter_init(&pd->pg[1].events, 0);
91 if (err) 91 if (err)
92 percpu_counter_destroy(&pd->pg[0].events); 92 percpu_counter_destroy(&pd->pg[0].events);
93 93
@@ -147,6 +147,7 @@ out:
147 * this is used to track the active references. 147 * this is used to track the active references.
148 */ 148 */
149static struct prop_global *prop_get_global(struct prop_descriptor *pd) 149static struct prop_global *prop_get_global(struct prop_descriptor *pd)
150__acquires(RCU)
150{ 151{
151 int index; 152 int index;
152 153
@@ -160,6 +161,7 @@ static struct prop_global *prop_get_global(struct prop_descriptor *pd)
160} 161}
161 162
162static void prop_put_global(struct prop_descriptor *pd, struct prop_global *pg) 163static void prop_put_global(struct prop_descriptor *pd, struct prop_global *pg)
164__releases(RCU)
163{ 165{
164 rcu_read_unlock(); 166 rcu_read_unlock();
165} 167}
@@ -191,7 +193,7 @@ int prop_local_init_percpu(struct prop_local_percpu *pl)
191 spin_lock_init(&pl->lock); 193 spin_lock_init(&pl->lock);
192 pl->shift = 0; 194 pl->shift = 0;
193 pl->period = 0; 195 pl->period = 0;
194 return percpu_counter_init_irq(&pl->events, 0); 196 return percpu_counter_init(&pl->events, 0);
195} 197}
196 198
197void prop_local_destroy_percpu(struct prop_local_percpu *pl) 199void prop_local_destroy_percpu(struct prop_local_percpu *pl)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index be86b32bc874..4bb42a0344ec 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -81,7 +81,7 @@ struct radix_tree_preload {
81 int nr; 81 int nr;
82 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH]; 82 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
83}; 83};
84DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; 84static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
85 85
86static inline gfp_t root_gfp_mask(struct radix_tree_root *root) 86static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
87{ 87{
@@ -640,13 +640,14 @@ EXPORT_SYMBOL(radix_tree_tag_get);
640 * 640 *
641 * Returns: the index of the hole if found, otherwise returns an index 641 * Returns: the index of the hole if found, otherwise returns an index
642 * outside of the set specified (in which case 'return - index >= max_scan' 642 * outside of the set specified (in which case 'return - index >= max_scan'
643 * will be true). 643 * will be true). In rare cases of index wrap-around, 0 will be returned.
644 * 644 *
645 * radix_tree_next_hole may be called under rcu_read_lock. However, like 645 * radix_tree_next_hole may be called under rcu_read_lock. However, like
646 * radix_tree_gang_lookup, this will not atomically search a snapshot of the 646 * radix_tree_gang_lookup, this will not atomically search a snapshot of
647 * tree at a single point in time. For example, if a hole is created at index 647 * the tree at a single point in time. For example, if a hole is created
648 * 5, then subsequently a hole is created at index 10, radix_tree_next_hole 648 * at index 5, then subsequently a hole is created at index 10,
649 * covering both indexes may return 10 if called under rcu_read_lock. 649 * radix_tree_next_hole covering both indexes may return 10 if called
650 * under rcu_read_lock.
650 */ 651 */
651unsigned long radix_tree_next_hole(struct radix_tree_root *root, 652unsigned long radix_tree_next_hole(struct radix_tree_root *root,
652 unsigned long index, unsigned long max_scan) 653 unsigned long index, unsigned long max_scan)
diff --git a/lib/sort.c b/lib/sort.c
index 6abbaf3d5858..926d00429ed2 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -32,11 +32,11 @@ static void generic_swap(void *a, void *b, int size)
32 * @base: pointer to data to sort 32 * @base: pointer to data to sort
33 * @num: number of elements 33 * @num: number of elements
34 * @size: size of each element 34 * @size: size of each element
35 * @cmp: pointer to comparison function 35 * @cmp_func: pointer to comparison function
36 * @swap: pointer to swap function or NULL 36 * @swap_func: pointer to swap function or NULL
37 * 37 *
38 * This function does a heapsort on the given array. You may provide a 38 * This function does a heapsort on the given array. You may provide a
39 * swap function optimized to your element type. 39 * swap_func function optimized to your element type.
40 * 40 *
41 * Sorting time is O(n log n) both on average and worst-case. While 41 * Sorting time is O(n log n) both on average and worst-case. While
42 * qsort is about 20% faster on average, it suffers from exploitable 42 * qsort is about 20% faster on average, it suffers from exploitable
@@ -45,37 +45,39 @@ static void generic_swap(void *a, void *b, int size)
45 */ 45 */
46 46
47void sort(void *base, size_t num, size_t size, 47void sort(void *base, size_t num, size_t size,
48 int (*cmp)(const void *, const void *), 48 int (*cmp_func)(const void *, const void *),
49 void (*swap)(void *, void *, int size)) 49 void (*swap_func)(void *, void *, int size))
50{ 50{
51 /* pre-scale counters for performance */ 51 /* pre-scale counters for performance */
52 int i = (num/2 - 1) * size, n = num * size, c, r; 52 int i = (num/2 - 1) * size, n = num * size, c, r;
53 53
54 if (!swap) 54 if (!swap_func)
55 swap = (size == 4 ? u32_swap : generic_swap); 55 swap_func = (size == 4 ? u32_swap : generic_swap);
56 56
57 /* heapify */ 57 /* heapify */
58 for ( ; i >= 0; i -= size) { 58 for ( ; i >= 0; i -= size) {
59 for (r = i; r * 2 + size < n; r = c) { 59 for (r = i; r * 2 + size < n; r = c) {
60 c = r * 2 + size; 60 c = r * 2 + size;
61 if (c < n - size && cmp(base + c, base + c + size) < 0) 61 if (c < n - size &&
62 cmp_func(base + c, base + c + size) < 0)
62 c += size; 63 c += size;
63 if (cmp(base + r, base + c) >= 0) 64 if (cmp_func(base + r, base + c) >= 0)
64 break; 65 break;
65 swap(base + r, base + c, size); 66 swap_func(base + r, base + c, size);
66 } 67 }
67 } 68 }
68 69
69 /* sort */ 70 /* sort */
70 for (i = n - size; i > 0; i -= size) { 71 for (i = n - size; i > 0; i -= size) {
71 swap(base, base + i, size); 72 swap_func(base, base + i, size);
72 for (r = 0; r * 2 + size < i; r = c) { 73 for (r = 0; r * 2 + size < i; r = c) {
73 c = r * 2 + size; 74 c = r * 2 + size;
74 if (c < i - size && cmp(base + c, base + c + size) < 0) 75 if (c < i - size &&
76 cmp_func(base + c, base + c + size) < 0)
75 c += size; 77 c += size;
76 if (cmp(base + r, base + c) >= 0) 78 if (cmp_func(base + r, base + c) >= 0)
77 break; 79 break;
78 swap(base + r, base + c, size); 80 swap_func(base + r, base + c, size);
79 } 81 }
80 } 82 }
81} 83}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 5f6c629a924d..1f991acc2a05 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -14,6 +14,7 @@
14 * 04/07/.. ak Better overflow handling. Assorted fixes. 14 * 04/07/.. ak Better overflow handling. Assorted fixes.
15 * 05/09/10 linville Add support for syncing ranges, support syncing for 15 * 05/09/10 linville Add support for syncing ranges, support syncing for
16 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. 16 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
17 * 08/12/11 beckyb Add highmem support
17 */ 18 */
18 19
19#include <linux/cache.h> 20#include <linux/cache.h>
@@ -22,8 +23,11 @@
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/spinlock.h> 24#include <linux/spinlock.h>
24#include <linux/string.h> 25#include <linux/string.h>
26#include <linux/swiotlb.h>
27#include <linux/pfn.h>
25#include <linux/types.h> 28#include <linux/types.h>
26#include <linux/ctype.h> 29#include <linux/ctype.h>
30#include <linux/highmem.h>
27 31
28#include <asm/io.h> 32#include <asm/io.h>
29#include <asm/dma.h> 33#include <asm/dma.h>
@@ -36,22 +40,6 @@
36#define OFFSET(val,align) ((unsigned long) \ 40#define OFFSET(val,align) ((unsigned long) \
37 ( (val) & ( (align) - 1))) 41 ( (val) & ( (align) - 1)))
38 42
39#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
40#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
41
42/*
43 * Maximum allowable number of contiguous slabs to map,
44 * must be a power of 2. What is the appropriate value ?
45 * The complexity of {map,unmap}_single is linearly dependent on this value.
46 */
47#define IO_TLB_SEGSIZE 128
48
49/*
50 * log of the size of each IO TLB slab. The number of slabs is command line
51 * controllable.
52 */
53#define IO_TLB_SHIFT 11
54
55#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) 43#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
56 44
57/* 45/*
@@ -102,7 +90,7 @@ static unsigned int io_tlb_index;
102 * We need to save away the original address corresponding to a mapped entry 90 * We need to save away the original address corresponding to a mapped entry
103 * for the sync operations. 91 * for the sync operations.
104 */ 92 */
105static unsigned char **io_tlb_orig_addr; 93static phys_addr_t *io_tlb_orig_addr;
106 94
107/* 95/*
108 * Protect the above data structures in the map and unmap calls 96 * Protect the above data structures in the map and unmap calls
@@ -126,6 +114,56 @@ setup_io_tlb_npages(char *str)
126__setup("swiotlb=", setup_io_tlb_npages); 114__setup("swiotlb=", setup_io_tlb_npages);
127/* make io_tlb_overflow tunable too? */ 115/* make io_tlb_overflow tunable too? */
128 116
117void * __weak __init swiotlb_alloc_boot(size_t size, unsigned long nslabs)
118{
119 return alloc_bootmem_low_pages(size);
120}
121
122void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
123{
124 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
125}
126
127dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
128{
129 return paddr;
130}
131
132phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr)
133{
134 return baddr;
135}
136
137static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
138 volatile void *address)
139{
140 return swiotlb_phys_to_bus(hwdev, virt_to_phys(address));
141}
142
143static void *swiotlb_bus_to_virt(dma_addr_t address)
144{
145 return phys_to_virt(swiotlb_bus_to_phys(address));
146}
147
148int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size)
149{
150 return 0;
151}
152
153static void swiotlb_print_info(unsigned long bytes)
154{
155 phys_addr_t pstart, pend;
156
157 pstart = virt_to_phys(io_tlb_start);
158 pend = virt_to_phys(io_tlb_end);
159
160 printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
161 bytes >> 20, io_tlb_start, io_tlb_end);
162 printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
163 (unsigned long long)pstart,
164 (unsigned long long)pend);
165}
166
129/* 167/*
130 * Statically reserve bounce buffer space and initialize bounce buffer data 168 * Statically reserve bounce buffer space and initialize bounce buffer data
131 * structures for the software IO TLB used to implement the DMA API. 169 * structures for the software IO TLB used to implement the DMA API.
@@ -145,7 +183,7 @@ swiotlb_init_with_default_size(size_t default_size)
145 /* 183 /*
146 * Get IO TLB memory from the low pages 184 * Get IO TLB memory from the low pages
147 */ 185 */
148 io_tlb_start = alloc_bootmem_low_pages(bytes); 186 io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs);
149 if (!io_tlb_start) 187 if (!io_tlb_start)
150 panic("Cannot allocate SWIOTLB buffer"); 188 panic("Cannot allocate SWIOTLB buffer");
151 io_tlb_end = io_tlb_start + bytes; 189 io_tlb_end = io_tlb_start + bytes;
@@ -159,7 +197,7 @@ swiotlb_init_with_default_size(size_t default_size)
159 for (i = 0; i < io_tlb_nslabs; i++) 197 for (i = 0; i < io_tlb_nslabs; i++)
160 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 198 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
161 io_tlb_index = 0; 199 io_tlb_index = 0;
162 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *)); 200 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
163 201
164 /* 202 /*
165 * Get the overflow emergency buffer 203 * Get the overflow emergency buffer
@@ -168,8 +206,7 @@ swiotlb_init_with_default_size(size_t default_size)
168 if (!io_tlb_overflow_buffer) 206 if (!io_tlb_overflow_buffer)
169 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 207 panic("Cannot allocate SWIOTLB overflow buffer!\n");
170 208
171 printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", 209 swiotlb_print_info(bytes);
172 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
173} 210}
174 211
175void __init 212void __init
@@ -202,8 +239,7 @@ swiotlb_late_init_with_default_size(size_t default_size)
202 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 239 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
203 240
204 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 241 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
205 io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN, 242 io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs);
206 order);
207 if (io_tlb_start) 243 if (io_tlb_start)
208 break; 244 break;
209 order--; 245 order--;
@@ -235,12 +271,14 @@ swiotlb_late_init_with_default_size(size_t default_size)
235 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 271 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
236 io_tlb_index = 0; 272 io_tlb_index = 0;
237 273
238 io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL, 274 io_tlb_orig_addr = (phys_addr_t *)
239 get_order(io_tlb_nslabs * sizeof(char *))); 275 __get_free_pages(GFP_KERNEL,
276 get_order(io_tlb_nslabs *
277 sizeof(phys_addr_t)));
240 if (!io_tlb_orig_addr) 278 if (!io_tlb_orig_addr)
241 goto cleanup3; 279 goto cleanup3;
242 280
243 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *)); 281 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
244 282
245 /* 283 /*
246 * Get the overflow emergency buffer 284 * Get the overflow emergency buffer
@@ -250,15 +288,13 @@ swiotlb_late_init_with_default_size(size_t default_size)
250 if (!io_tlb_overflow_buffer) 288 if (!io_tlb_overflow_buffer)
251 goto cleanup4; 289 goto cleanup4;
252 290
253 printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " 291 swiotlb_print_info(bytes);
254 "0x%lx\n", bytes >> 20,
255 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
256 292
257 return 0; 293 return 0;
258 294
259cleanup4: 295cleanup4:
260 free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * 296 free_pages((unsigned long)io_tlb_orig_addr,
261 sizeof(char *))); 297 get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
262 io_tlb_orig_addr = NULL; 298 io_tlb_orig_addr = NULL;
263cleanup3: 299cleanup3:
264 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * 300 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
@@ -279,16 +315,62 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
279 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); 315 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
280} 316}
281 317
318static inline int range_needs_mapping(void *ptr, size_t size)
319{
320 return swiotlb_force || swiotlb_arch_range_needs_mapping(ptr, size);
321}
322
282static int is_swiotlb_buffer(char *addr) 323static int is_swiotlb_buffer(char *addr)
283{ 324{
284 return addr >= io_tlb_start && addr < io_tlb_end; 325 return addr >= io_tlb_start && addr < io_tlb_end;
285} 326}
286 327
287/* 328/*
329 * Bounce: copy the swiotlb buffer back to the original dma location
330 */
331static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
332 enum dma_data_direction dir)
333{
334 unsigned long pfn = PFN_DOWN(phys);
335
336 if (PageHighMem(pfn_to_page(pfn))) {
337 /* The buffer does not have a mapping. Map it in and copy */
338 unsigned int offset = phys & ~PAGE_MASK;
339 char *buffer;
340 unsigned int sz = 0;
341 unsigned long flags;
342
343 while (size) {
344 sz = min(PAGE_SIZE - offset, size);
345
346 local_irq_save(flags);
347 buffer = kmap_atomic(pfn_to_page(pfn),
348 KM_BOUNCE_READ);
349 if (dir == DMA_TO_DEVICE)
350 memcpy(dma_addr, buffer + offset, sz);
351 else
352 memcpy(buffer + offset, dma_addr, sz);
353 kunmap_atomic(buffer, KM_BOUNCE_READ);
354 local_irq_restore(flags);
355
356 size -= sz;
357 pfn++;
358 dma_addr += sz;
359 offset = 0;
360 }
361 } else {
362 if (dir == DMA_TO_DEVICE)
363 memcpy(dma_addr, phys_to_virt(phys), size);
364 else
365 memcpy(phys_to_virt(phys), dma_addr, size);
366 }
367}
368
369/*
288 * Allocates bounce buffer and returns its kernel virtual address. 370 * Allocates bounce buffer and returns its kernel virtual address.
289 */ 371 */
290static void * 372static void *
291map_single(struct device *hwdev, char *buffer, size_t size, int dir) 373map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
292{ 374{
293 unsigned long flags; 375 unsigned long flags;
294 char *dma_addr; 376 char *dma_addr;
@@ -300,9 +382,13 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
300 unsigned long max_slots; 382 unsigned long max_slots;
301 383
302 mask = dma_get_seg_boundary(hwdev); 384 mask = dma_get_seg_boundary(hwdev);
303 start_dma_addr = virt_to_bus(io_tlb_start) & mask; 385 start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
304 386
305 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 387 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
388
389 /*
390 * Carefully handle integer overflow which can occur when mask == ~0UL.
391 */
306 max_slots = mask + 1 392 max_slots = mask + 1
307 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT 393 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
308 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); 394 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
@@ -379,9 +465,9 @@ found:
379 * needed. 465 * needed.
380 */ 466 */
381 for (i = 0; i < nslots; i++) 467 for (i = 0; i < nslots; i++)
382 io_tlb_orig_addr[index+i] = buffer + (i << IO_TLB_SHIFT); 468 io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
383 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 469 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
384 memcpy(dma_addr, buffer, size); 470 swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
385 471
386 return dma_addr; 472 return dma_addr;
387} 473}
@@ -395,17 +481,13 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
395 unsigned long flags; 481 unsigned long flags;
396 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 482 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
397 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 483 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
398 char *buffer = io_tlb_orig_addr[index]; 484 phys_addr_t phys = io_tlb_orig_addr[index];
399 485
400 /* 486 /*
401 * First, sync the memory before unmapping the entry 487 * First, sync the memory before unmapping the entry
402 */ 488 */
403 if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) 489 if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
404 /* 490 swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
405 * bounce... copy the data back into the original buffer * and
406 * delete the bounce buffer.
407 */
408 memcpy(buffer, dma_addr, size);
409 491
410 /* 492 /*
411 * Return the buffer to the free list by setting the corresponding 493 * Return the buffer to the free list by setting the corresponding
@@ -438,20 +520,20 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
438 int dir, int target) 520 int dir, int target)
439{ 521{
440 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 522 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
441 char *buffer = io_tlb_orig_addr[index]; 523 phys_addr_t phys = io_tlb_orig_addr[index];
442 524
443 buffer += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1)); 525 phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
444 526
445 switch (target) { 527 switch (target) {
446 case SYNC_FOR_CPU: 528 case SYNC_FOR_CPU:
447 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 529 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
448 memcpy(buffer, dma_addr, size); 530 swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
449 else 531 else
450 BUG_ON(dir != DMA_TO_DEVICE); 532 BUG_ON(dir != DMA_TO_DEVICE);
451 break; 533 break;
452 case SYNC_FOR_DEVICE: 534 case SYNC_FOR_DEVICE:
453 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 535 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
454 memcpy(dma_addr, buffer, size); 536 swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
455 else 537 else
456 BUG_ON(dir != DMA_FROM_DEVICE); 538 BUG_ON(dir != DMA_FROM_DEVICE);
457 break; 539 break;
@@ -473,7 +555,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
473 dma_mask = hwdev->coherent_dma_mask; 555 dma_mask = hwdev->coherent_dma_mask;
474 556
475 ret = (void *)__get_free_pages(flags, order); 557 ret = (void *)__get_free_pages(flags, order);
476 if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) { 558 if (ret &&
559 !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret),
560 size)) {
477 /* 561 /*
478 * The allocated memory isn't reachable by the device. 562 * The allocated memory isn't reachable by the device.
479 * Fall back on swiotlb_map_single(). 563 * Fall back on swiotlb_map_single().
@@ -488,13 +572,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
488 * swiotlb_map_single(), which will grab memory from 572 * swiotlb_map_single(), which will grab memory from
489 * the lowest available address range. 573 * the lowest available address range.
490 */ 574 */
491 ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE); 575 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
492 if (!ret) 576 if (!ret)
493 return NULL; 577 return NULL;
494 } 578 }
495 579
496 memset(ret, 0, size); 580 memset(ret, 0, size);
497 dev_addr = virt_to_bus(ret); 581 dev_addr = swiotlb_virt_to_bus(hwdev, ret);
498 582
499 /* Confirm address can be DMA'd by device */ 583 /* Confirm address can be DMA'd by device */
500 if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { 584 if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
@@ -509,6 +593,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
509 *dma_handle = dev_addr; 593 *dma_handle = dev_addr;
510 return ret; 594 return ret;
511} 595}
596EXPORT_SYMBOL(swiotlb_alloc_coherent);
512 597
513void 598void
514swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 599swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
@@ -521,6 +606,7 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
521 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 606 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
522 unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 607 unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
523} 608}
609EXPORT_SYMBOL(swiotlb_free_coherent);
524 610
525static void 611static void
526swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) 612swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
@@ -533,7 +619,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
533 * the damage, or panic when the transfer is too big. 619 * the damage, or panic when the transfer is too big.
534 */ 620 */
535 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at " 621 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
536 "device %s\n", size, dev ? dev->bus_id : "?"); 622 "device %s\n", size, dev ? dev_name(dev) : "?");
537 623
538 if (size > io_tlb_overflow && do_panic) { 624 if (size > io_tlb_overflow && do_panic) {
539 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 625 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
@@ -554,7 +640,7 @@ dma_addr_t
554swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, 640swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
555 int dir, struct dma_attrs *attrs) 641 int dir, struct dma_attrs *attrs)
556{ 642{
557 dma_addr_t dev_addr = virt_to_bus(ptr); 643 dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, ptr);
558 void *map; 644 void *map;
559 645
560 BUG_ON(dir == DMA_NONE); 646 BUG_ON(dir == DMA_NONE);
@@ -563,19 +649,20 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
563 * we can safely return the device addr and not worry about bounce 649 * we can safely return the device addr and not worry about bounce
564 * buffering it. 650 * buffering it.
565 */ 651 */
566 if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force) 652 if (!address_needs_mapping(hwdev, dev_addr, size) &&
653 !range_needs_mapping(ptr, size))
567 return dev_addr; 654 return dev_addr;
568 655
569 /* 656 /*
570 * Oh well, have to allocate and map a bounce buffer. 657 * Oh well, have to allocate and map a bounce buffer.
571 */ 658 */
572 map = map_single(hwdev, ptr, size, dir); 659 map = map_single(hwdev, virt_to_phys(ptr), size, dir);
573 if (!map) { 660 if (!map) {
574 swiotlb_full(hwdev, size, dir, 1); 661 swiotlb_full(hwdev, size, dir, 1);
575 map = io_tlb_overflow_buffer; 662 map = io_tlb_overflow_buffer;
576 } 663 }
577 664
578 dev_addr = virt_to_bus(map); 665 dev_addr = swiotlb_virt_to_bus(hwdev, map);
579 666
580 /* 667 /*
581 * Ensure that the address returned is DMA'ble 668 * Ensure that the address returned is DMA'ble
@@ -592,6 +679,7 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
592{ 679{
593 return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL); 680 return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL);
594} 681}
682EXPORT_SYMBOL(swiotlb_map_single);
595 683
596/* 684/*
597 * Unmap a single streaming mode DMA translation. The dma_addr and size must 685 * Unmap a single streaming mode DMA translation. The dma_addr and size must
@@ -605,7 +693,7 @@ void
605swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, 693swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
606 size_t size, int dir, struct dma_attrs *attrs) 694 size_t size, int dir, struct dma_attrs *attrs)
607{ 695{
608 char *dma_addr = bus_to_virt(dev_addr); 696 char *dma_addr = swiotlb_bus_to_virt(dev_addr);
609 697
610 BUG_ON(dir == DMA_NONE); 698 BUG_ON(dir == DMA_NONE);
611 if (is_swiotlb_buffer(dma_addr)) 699 if (is_swiotlb_buffer(dma_addr))
@@ -621,6 +709,8 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
621{ 709{
622 return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL); 710 return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL);
623} 711}
712EXPORT_SYMBOL(swiotlb_unmap_single);
713
624/* 714/*
625 * Make physical memory consistent for a single streaming mode DMA translation 715 * Make physical memory consistent for a single streaming mode DMA translation
626 * after a transfer. 716 * after a transfer.
@@ -635,7 +725,7 @@ static void
635swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 725swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
636 size_t size, int dir, int target) 726 size_t size, int dir, int target)
637{ 727{
638 char *dma_addr = bus_to_virt(dev_addr); 728 char *dma_addr = swiotlb_bus_to_virt(dev_addr);
639 729
640 BUG_ON(dir == DMA_NONE); 730 BUG_ON(dir == DMA_NONE);
641 if (is_swiotlb_buffer(dma_addr)) 731 if (is_swiotlb_buffer(dma_addr))
@@ -650,6 +740,7 @@ swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
650{ 740{
651 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); 741 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
652} 742}
743EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
653 744
654void 745void
655swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, 746swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
@@ -657,6 +748,7 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
657{ 748{
658 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); 749 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
659} 750}
751EXPORT_SYMBOL(swiotlb_sync_single_for_device);
660 752
661/* 753/*
662 * Same as above, but for a sub-range of the mapping. 754 * Same as above, but for a sub-range of the mapping.
@@ -666,7 +758,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
666 unsigned long offset, size_t size, 758 unsigned long offset, size_t size,
667 int dir, int target) 759 int dir, int target)
668{ 760{
669 char *dma_addr = bus_to_virt(dev_addr) + offset; 761 char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset;
670 762
671 BUG_ON(dir == DMA_NONE); 763 BUG_ON(dir == DMA_NONE);
672 if (is_swiotlb_buffer(dma_addr)) 764 if (is_swiotlb_buffer(dma_addr))
@@ -682,6 +774,7 @@ swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
682 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, 774 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
683 SYNC_FOR_CPU); 775 SYNC_FOR_CPU);
684} 776}
777EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
685 778
686void 779void
687swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, 780swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
@@ -690,9 +783,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
690 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, 783 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
691 SYNC_FOR_DEVICE); 784 SYNC_FOR_DEVICE);
692} 785}
786EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
693 787
694void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int,
695 struct dma_attrs *);
696/* 788/*
697 * Map a set of buffers described by scatterlist in streaming mode for DMA. 789 * Map a set of buffers described by scatterlist in streaming mode for DMA.
698 * This is the scatter-gather version of the above swiotlb_map_single 790 * This is the scatter-gather version of the above swiotlb_map_single
@@ -714,18 +806,18 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
714 int dir, struct dma_attrs *attrs) 806 int dir, struct dma_attrs *attrs)
715{ 807{
716 struct scatterlist *sg; 808 struct scatterlist *sg;
717 void *addr;
718 dma_addr_t dev_addr;
719 int i; 809 int i;
720 810
721 BUG_ON(dir == DMA_NONE); 811 BUG_ON(dir == DMA_NONE);
722 812
723 for_each_sg(sgl, sg, nelems, i) { 813 for_each_sg(sgl, sg, nelems, i) {
724 addr = SG_ENT_VIRT_ADDRESS(sg); 814 void *addr = sg_virt(sg);
725 dev_addr = virt_to_bus(addr); 815 dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, addr);
726 if (swiotlb_force || 816
817 if (range_needs_mapping(addr, sg->length) ||
727 address_needs_mapping(hwdev, dev_addr, sg->length)) { 818 address_needs_mapping(hwdev, dev_addr, sg->length)) {
728 void *map = map_single(hwdev, addr, sg->length, dir); 819 void *map = map_single(hwdev, sg_phys(sg),
820 sg->length, dir);
729 if (!map) { 821 if (!map) {
730 /* Don't panic here, we expect map_sg users 822 /* Don't panic here, we expect map_sg users
731 to do proper error handling. */ 823 to do proper error handling. */
@@ -735,7 +827,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
735 sgl[0].dma_length = 0; 827 sgl[0].dma_length = 0;
736 return 0; 828 return 0;
737 } 829 }
738 sg->dma_address = virt_to_bus(map); 830 sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
739 } else 831 } else
740 sg->dma_address = dev_addr; 832 sg->dma_address = dev_addr;
741 sg->dma_length = sg->length; 833 sg->dma_length = sg->length;
@@ -750,6 +842,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
750{ 842{
751 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); 843 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
752} 844}
845EXPORT_SYMBOL(swiotlb_map_sg);
753 846
754/* 847/*
755 * Unmap a set of streaming mode DMA translations. Again, cpu read rules 848 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
@@ -765,11 +858,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
765 BUG_ON(dir == DMA_NONE); 858 BUG_ON(dir == DMA_NONE);
766 859
767 for_each_sg(sgl, sg, nelems, i) { 860 for_each_sg(sgl, sg, nelems, i) {
768 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 861 if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg)))
769 unmap_single(hwdev, bus_to_virt(sg->dma_address), 862 unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
770 sg->dma_length, dir); 863 sg->dma_length, dir);
771 else if (dir == DMA_FROM_DEVICE) 864 else if (dir == DMA_FROM_DEVICE)
772 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); 865 dma_mark_clean(sg_virt(sg), sg->dma_length);
773 } 866 }
774} 867}
775EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); 868EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
@@ -780,6 +873,7 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
780{ 873{
781 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); 874 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
782} 875}
876EXPORT_SYMBOL(swiotlb_unmap_sg);
783 877
784/* 878/*
785 * Make physical memory consistent for a set of streaming mode DMA translations 879 * Make physical memory consistent for a set of streaming mode DMA translations
@@ -798,11 +892,11 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
798 BUG_ON(dir == DMA_NONE); 892 BUG_ON(dir == DMA_NONE);
799 893
800 for_each_sg(sgl, sg, nelems, i) { 894 for_each_sg(sgl, sg, nelems, i) {
801 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 895 if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg)))
802 sync_single(hwdev, bus_to_virt(sg->dma_address), 896 sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
803 sg->dma_length, dir, target); 897 sg->dma_length, dir, target);
804 else if (dir == DMA_FROM_DEVICE) 898 else if (dir == DMA_FROM_DEVICE)
805 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); 899 dma_mark_clean(sg_virt(sg), sg->dma_length);
806 } 900 }
807} 901}
808 902
@@ -812,6 +906,7 @@ swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
812{ 906{
813 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); 907 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
814} 908}
909EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
815 910
816void 911void
817swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 912swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
@@ -819,12 +914,14 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
819{ 914{
820 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); 915 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
821} 916}
917EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
822 918
823int 919int
824swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) 920swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
825{ 921{
826 return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); 922 return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
827} 923}
924EXPORT_SYMBOL(swiotlb_dma_mapping_error);
828 925
829/* 926/*
830 * Return whether the given device DMA address mask can be supported 927 * Return whether the given device DMA address mask can be supported
@@ -835,20 +932,6 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
835int 932int
836swiotlb_dma_supported(struct device *hwdev, u64 mask) 933swiotlb_dma_supported(struct device *hwdev, u64 mask)
837{ 934{
838 return virt_to_bus(io_tlb_end - 1) <= mask; 935 return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask;
839} 936}
840
841EXPORT_SYMBOL(swiotlb_map_single);
842EXPORT_SYMBOL(swiotlb_unmap_single);
843EXPORT_SYMBOL(swiotlb_map_sg);
844EXPORT_SYMBOL(swiotlb_unmap_sg);
845EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
846EXPORT_SYMBOL(swiotlb_sync_single_for_device);
847EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
848EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
849EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
850EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
851EXPORT_SYMBOL(swiotlb_dma_mapping_error);
852EXPORT_SYMBOL(swiotlb_alloc_coherent);
853EXPORT_SYMBOL(swiotlb_free_coherent);
854EXPORT_SYMBOL(swiotlb_dma_supported); 937EXPORT_SYMBOL(swiotlb_dma_supported);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index a013bbc23717..0fbd0121d91d 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -170,6 +170,8 @@ int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
170 return -EINVAL; 170 return -EINVAL;
171 171
172 val = simple_strtoul(cp, &tail, base); 172 val = simple_strtoul(cp, &tail, base);
173 if (tail == cp)
174 return -EINVAL;
173 if ((*tail == '\0') || 175 if ((*tail == '\0') ||
174 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { 176 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
175 *res = val; 177 *res = val;
@@ -241,6 +243,8 @@ int strict_strtoull(const char *cp, unsigned int base, unsigned long long *res)
241 return -EINVAL; 243 return -EINVAL;
242 244
243 val = simple_strtoull(cp, &tail, base); 245 val = simple_strtoull(cp, &tail, base);
246 if (tail == cp)
247 return -EINVAL;
244 if ((*tail == '\0') || 248 if ((*tail == '\0') ||
245 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { 249 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
246 *res = val; 250 *res = val;
@@ -581,6 +585,62 @@ static char *resource_string(char *buf, char *end, struct resource *res, int fie
581 return string(buf, end, sym, field_width, precision, flags); 585 return string(buf, end, sym, field_width, precision, flags);
582} 586}
583 587
588static char *mac_address_string(char *buf, char *end, u8 *addr, int field_width,
589 int precision, int flags)
590{
591 char mac_addr[6 * 3]; /* (6 * 2 hex digits), 5 colons and trailing zero */
592 char *p = mac_addr;
593 int i;
594
595 for (i = 0; i < 6; i++) {
596 p = pack_hex_byte(p, addr[i]);
597 if (!(flags & SPECIAL) && i != 5)
598 *p++ = ':';
599 }
600 *p = '\0';
601
602 return string(buf, end, mac_addr, field_width, precision, flags & ~SPECIAL);
603}
604
605static char *ip6_addr_string(char *buf, char *end, u8 *addr, int field_width,
606 int precision, int flags)
607{
608 char ip6_addr[8 * 5]; /* (8 * 4 hex digits), 7 colons and trailing zero */
609 char *p = ip6_addr;
610 int i;
611
612 for (i = 0; i < 8; i++) {
613 p = pack_hex_byte(p, addr[2 * i]);
614 p = pack_hex_byte(p, addr[2 * i + 1]);
615 if (!(flags & SPECIAL) && i != 7)
616 *p++ = ':';
617 }
618 *p = '\0';
619
620 return string(buf, end, ip6_addr, field_width, precision, flags & ~SPECIAL);
621}
622
623static char *ip4_addr_string(char *buf, char *end, u8 *addr, int field_width,
624 int precision, int flags)
625{
626 char ip4_addr[4 * 4]; /* (4 * 3 decimal digits), 3 dots and trailing zero */
627 char temp[3]; /* hold each IP quad in reverse order */
628 char *p = ip4_addr;
629 int i, digits;
630
631 for (i = 0; i < 4; i++) {
632 digits = put_dec_trunc(temp, addr[i]) - temp;
633 /* reverse the digits in the quad */
634 while (digits--)
635 *p++ = temp[digits];
636 if (i != 3)
637 *p++ = '.';
638 }
639 *p = '\0';
640
641 return string(buf, end, ip4_addr, field_width, precision, flags & ~SPECIAL);
642}
643
584/* 644/*
585 * Show a '%p' thing. A kernel extension is that the '%p' is followed 645 * Show a '%p' thing. A kernel extension is that the '%p' is followed
586 * by an extra set of alphanumeric characters that are extended format 646 * by an extra set of alphanumeric characters that are extended format
@@ -592,6 +652,12 @@ static char *resource_string(char *buf, char *end, struct resource *res, int fie
592 * - 'S' For symbolic direct pointers 652 * - 'S' For symbolic direct pointers
593 * - 'R' For a struct resource pointer, it prints the range of 653 * - 'R' For a struct resource pointer, it prints the range of
594 * addresses (not the name nor the flags) 654 * addresses (not the name nor the flags)
655 * - 'M' For a 6-byte MAC address, it prints the address in the
656 * usual colon-separated hex notation
657 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way (dot-separated
658 * decimal for v4 and colon separated network-order 16 bit hex for v6)
659 * - 'i' [46] for 'raw' IPv4/IPv6 addresses, IPv6 omits the colons, IPv4 is
660 * currently the same
595 * 661 *
596 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 662 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
597 * function pointers are really function descriptors, which contain a 663 * function pointers are really function descriptors, which contain a
@@ -599,6 +665,9 @@ static char *resource_string(char *buf, char *end, struct resource *res, int fie
599 */ 665 */
600static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags) 666static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags)
601{ 667{
668 if (!ptr)
669 return string(buf, end, "(null)", field_width, precision, flags);
670
602 switch (*fmt) { 671 switch (*fmt) {
603 case 'F': 672 case 'F':
604 ptr = dereference_function_descriptor(ptr); 673 ptr = dereference_function_descriptor(ptr);
@@ -607,6 +676,21 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field
607 return symbol_string(buf, end, ptr, field_width, precision, flags); 676 return symbol_string(buf, end, ptr, field_width, precision, flags);
608 case 'R': 677 case 'R':
609 return resource_string(buf, end, ptr, field_width, precision, flags); 678 return resource_string(buf, end, ptr, field_width, precision, flags);
679 case 'm':
680 flags |= SPECIAL;
681 /* Fallthrough */
682 case 'M':
683 return mac_address_string(buf, end, ptr, field_width, precision, flags);
684 case 'i':
685 flags |= SPECIAL;
686 /* Fallthrough */
687 case 'I':
688 if (fmt[1] == '6')
689 return ip6_addr_string(buf, end, ptr, field_width, precision, flags);
690 if (fmt[1] == '4')
691 return ip4_addr_string(buf, end, ptr, field_width, precision, flags);
692 flags &= ~SPECIAL;
693 break;
610 } 694 }
611 flags |= SMALL; 695 flags |= SMALL;
612 if (field_width == -1) { 696 if (field_width == -1) {