aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Kconfig.debug92
-rw-r--r--lib/Makefile6
-rw-r--r--lib/bitmap.c33
-rw-r--r--lib/bug.c19
-rw-r--r--lib/cpumask.c79
-rw-r--r--lib/debugobjects.c4
-rw-r--r--lib/dynamic_printk.c420
-rw-r--r--lib/idr.c22
-rw-r--r--lib/iommu-helper.c9
-rw-r--r--lib/is_single_threaded.c45
-rw-r--r--lib/kobject.c35
-rw-r--r--lib/libcrc32c.c182
-rw-r--r--lib/percpu_counter.c7
-rw-r--r--lib/scatterlist.c2
-rw-r--r--lib/string_helpers.c34
-rw-r--r--lib/swiotlb.c269
-rw-r--r--lib/vsprintf.c361
18 files changed, 1238 insertions, 387 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index c7ad7a5b3535..fd4118e097f0 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -8,10 +8,10 @@ config BITREVERSE
8 tristate 8 tristate
9 9
10config GENERIC_FIND_FIRST_BIT 10config GENERIC_FIND_FIRST_BIT
11 def_bool n 11 bool
12 12
13config GENERIC_FIND_NEXT_BIT 13config GENERIC_FIND_NEXT_BIT
14 def_bool n 14 bool
15 15
16config CRC_CCITT 16config CRC_CCITT
17 tristate "CRC-CCITT functions" 17 tristate "CRC-CCITT functions"
@@ -64,6 +64,8 @@ config CRC7
64 64
65config LIBCRC32C 65config LIBCRC32C
66 tristate "CRC32c (Castagnoli, et al) Cyclic Redundancy-Check" 66 tristate "CRC32c (Castagnoli, et al) Cyclic Redundancy-Check"
67 select CRYPTO
68 select CRYPTO_CRC32C
67 help 69 help
68 This option is provided for the case where no in-kernel-tree 70 This option is provided for the case where no in-kernel-tree
69 modules require CRC32c functions, but a module built outside the 71 modules require CRC32c functions, but a module built outside the
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index aa81d2848448..2e75478e9c69 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -252,6 +252,14 @@ config DEBUG_OBJECTS_TIMERS
252 timer routines to track the life time of timer objects and 252 timer routines to track the life time of timer objects and
253 validate the timer operations. 253 validate the timer operations.
254 254
255config DEBUG_OBJECTS_ENABLE_DEFAULT
256 int "debug_objects bootup default value (0-1)"
257 range 0 1
258 default "1"
259 depends on DEBUG_OBJECTS
260 help
261 Debug objects boot parameter default value
262
255config DEBUG_SLAB 263config DEBUG_SLAB
256 bool "Debug slab memory allocations" 264 bool "Debug slab memory allocations"
257 depends on DEBUG_KERNEL && SLAB 265 depends on DEBUG_KERNEL && SLAB
@@ -545,6 +553,16 @@ config DEBUG_SG
545 553
546 If unsure, say N. 554 If unsure, say N.
547 555
556config DEBUG_NOTIFIERS
557 bool "Debug notifier call chains"
558 depends on DEBUG_KERNEL
559 help
560 Enable this to turn on sanity checking for notifier call chains.
561 This is most useful for kernel developers to make sure that
562 modules properly unregister themselves from notifier chains.
563 This is a relatively cheap check but if you care about maximum
564 performance, say N.
565
548config FRAME_POINTER 566config FRAME_POINTER
549 bool "Compile the kernel with frame pointers" 567 bool "Compile the kernel with frame pointers"
550 depends on DEBUG_KERNEL && \ 568 depends on DEBUG_KERNEL && \
@@ -619,6 +637,19 @@ config RCU_CPU_STALL_DETECTOR
619 637
620 Say N if you are unsure. 638 Say N if you are unsure.
621 639
640config RCU_CPU_STALL_DETECTOR
641 bool "Check for stalled CPUs delaying RCU grace periods"
642 depends on CLASSIC_RCU || TREE_RCU
643 default n
644 help
645 This option causes RCU to printk information on which
646 CPUs are delaying the current grace period, but only when
647 the grace period extends for excessive time periods.
648
649 Say Y if you want RCU to perform such checks.
650
651 Say N if you are unsure.
652
622config KPROBES_SANITY_TEST 653config KPROBES_SANITY_TEST
623 bool "Kprobes sanity tests" 654 bool "Kprobes sanity tests"
624 depends on DEBUG_KERNEL 655 depends on DEBUG_KERNEL
@@ -652,6 +683,11 @@ config DEBUG_BLOCK_EXT_DEVT
652 depends on BLOCK 683 depends on BLOCK
653 default n 684 default n
654 help 685 help
686 BIG FAT WARNING: ENABLING THIS OPTION MIGHT BREAK BOOTING ON
687 SOME DISTRIBUTIONS. DO NOT ENABLE THIS UNLESS YOU KNOW WHAT
688 YOU ARE DOING. Distros, please enable this and fix whatever
689 is broken.
690
655 Conventionally, block device numbers are allocated from 691 Conventionally, block device numbers are allocated from
656 predetermined contiguous area. However, extended block area 692 predetermined contiguous area. However, extended block area
657 may introduce non-contiguous block device numbers. This 693 may introduce non-contiguous block device numbers. This
@@ -694,6 +730,7 @@ config FAULT_INJECTION
694config FAILSLAB 730config FAILSLAB
695 bool "Fault-injection capability for kmalloc" 731 bool "Fault-injection capability for kmalloc"
696 depends on FAULT_INJECTION 732 depends on FAULT_INJECTION
733 depends on SLAB || SLUB
697 help 734 help
698 Provide fault-injection capability for kmalloc. 735 Provide fault-injection capability for kmalloc.
699 736
@@ -807,6 +844,61 @@ menuconfig BUILD_DOCSRC
807 844
808 Say N if you are unsure. 845 Say N if you are unsure.
809 846
847config DYNAMIC_PRINTK_DEBUG
848 bool "Enable dynamic printk() call support"
849 default n
850 depends on PRINTK
851 select PRINTK_DEBUG
852 help
853
854 Compiles debug level messages into the kernel, which would not
855 otherwise be available at runtime. These messages can then be
856 enabled/disabled on a per module basis. This mechanism implicitly
857 enables all pr_debug() and dev_dbg() calls. The impact of this
858 compile option is a larger kernel text size of about 2%.
859
860 Usage:
861
862 Dynamic debugging is controlled by the debugfs file,
863 dynamic_printk/modules. This file contains a list of the modules that
864 can be enabled. The format of the file is the module name, followed
865 by a set of flags that can be enabled. The first flag is always the
866 'enabled' flag. For example:
867
868 <module_name> <enabled=0/1>
869 .
870 .
871 .
872
873 <module_name> : Name of the module in which the debug call resides
874 <enabled=0/1> : whether the messages are enabled or not
875
876 From a live system:
877
878 snd_hda_intel enabled=0
879 fixup enabled=0
880 driver enabled=0
881
882 Enable a module:
883
884 $echo "set enabled=1 <module_name>" > dynamic_printk/modules
885
886 Disable a module:
887
888 $echo "set enabled=0 <module_name>" > dynamic_printk/modules
889
890 Enable all modules:
891
892 $echo "set enabled=1 all" > dynamic_printk/modules
893
894 Disable all modules:
895
896 $echo "set enabled=0 all" > dynamic_printk/modules
897
898 Finally, passing "dynamic_printk" at the command line enables
899 debugging for all modules. This mode can be turned off via the above
900 disable command.
901
810source "samples/Kconfig" 902source "samples/Kconfig"
811 903
812source "lib/Kconfig.kgdb" 904source "lib/Kconfig.kgdb"
diff --git a/lib/Makefile b/lib/Makefile
index 44001af76a7d..80fe8a3ec12a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -2,7 +2,7 @@
2# Makefile for some libs needed in the kernel. 2# Makefile for some libs needed in the kernel.
3# 3#
4 4
5ifdef CONFIG_FTRACE 5ifdef CONFIG_FUNCTION_TRACER
6ORIG_CFLAGS := $(KBUILD_CFLAGS) 6ORIG_CFLAGS := $(KBUILD_CFLAGS)
7KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) 7KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
8endif 8endif
@@ -11,7 +11,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
11 rbtree.o radix-tree.o dump_stack.o \ 11 rbtree.o radix-tree.o dump_stack.o \
12 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o prio_tree.o \
13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o prio_heap.o ratelimit.o show_mem.o 14 proportions.o prio_heap.o ratelimit.o show_mem.o is_single_threaded.o
15 15
16lib-$(CONFIG_MMU) += ioremap.o 16lib-$(CONFIG_MMU) += ioremap.o
17lib-$(CONFIG_SMP) += cpumask.o 17lib-$(CONFIG_SMP) += cpumask.o
@@ -81,6 +81,8 @@ obj-$(CONFIG_HAVE_LMB) += lmb.o
81 81
82obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o 82obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
83 83
84obj-$(CONFIG_DYNAMIC_PRINTK_DEBUG) += dynamic_printk.o
85
84hostprogs-y := gen_crc32table 86hostprogs-y := gen_crc32table
85clean-files := crc32table.h 87clean-files := crc32table.h
86 88
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 06fb57c86de0..1338469ac849 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -316,17 +316,6 @@ int bitmap_scnprintf(char *buf, unsigned int buflen,
316EXPORT_SYMBOL(bitmap_scnprintf); 316EXPORT_SYMBOL(bitmap_scnprintf);
317 317
318/** 318/**
319 * bitmap_scnprintf_len - return buffer length needed to convert
320 * bitmap to an ASCII hex string
321 * @nr_bits: number of bits to be converted
322 */
323int bitmap_scnprintf_len(unsigned int nr_bits)
324{
325 unsigned int nr_nibbles = ALIGN(nr_bits, 4) / 4;
326 return nr_nibbles + ALIGN(nr_nibbles, CHUNKSZ / 4) / (CHUNKSZ / 4) - 1;
327}
328
329/**
330 * __bitmap_parse - convert an ASCII hex string into a bitmap. 319 * __bitmap_parse - convert an ASCII hex string into a bitmap.
331 * @buf: pointer to buffer containing string. 320 * @buf: pointer to buffer containing string.
332 * @buflen: buffer size in bytes. If string is smaller than this 321 * @buflen: buffer size in bytes. If string is smaller than this
@@ -1007,3 +996,25 @@ int bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
1007 return 0; 996 return 0;
1008} 997}
1009EXPORT_SYMBOL(bitmap_allocate_region); 998EXPORT_SYMBOL(bitmap_allocate_region);
999
1000/**
1001 * bitmap_copy_le - copy a bitmap, putting the bits into little-endian order.
1002 * @dst: destination buffer
1003 * @src: bitmap to copy
1004 * @nbits: number of bits in the bitmap
1005 *
1006 * Require nbits % BITS_PER_LONG == 0.
1007 */
1008void bitmap_copy_le(void *dst, const unsigned long *src, int nbits)
1009{
1010 unsigned long *d = dst;
1011 int i;
1012
1013 for (i = 0; i < nbits/BITS_PER_LONG; i++) {
1014 if (BITS_PER_LONG == 64)
1015 d[i] = cpu_to_le64(src[i]);
1016 else
1017 d[i] = cpu_to_le32(src[i]);
1018 }
1019}
1020EXPORT_SYMBOL(bitmap_copy_le);
diff --git a/lib/bug.c b/lib/bug.c
index bfeafd60ee9f..300e41afbf97 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -5,6 +5,8 @@
5 5
6 CONFIG_BUG - emit BUG traps. Nothing happens without this. 6 CONFIG_BUG - emit BUG traps. Nothing happens without this.
7 CONFIG_GENERIC_BUG - enable this code. 7 CONFIG_GENERIC_BUG - enable this code.
8 CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit pointers relative to
9 the containing struct bug_entry for bug_addr and file.
8 CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG 10 CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG
9 11
10 CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable 12 CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable
@@ -43,6 +45,15 @@
43 45
44extern const struct bug_entry __start___bug_table[], __stop___bug_table[]; 46extern const struct bug_entry __start___bug_table[], __stop___bug_table[];
45 47
48static inline unsigned long bug_addr(const struct bug_entry *bug)
49{
50#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
51 return bug->bug_addr;
52#else
53 return (unsigned long)bug + bug->bug_addr_disp;
54#endif
55}
56
46#ifdef CONFIG_MODULES 57#ifdef CONFIG_MODULES
47static LIST_HEAD(module_bug_list); 58static LIST_HEAD(module_bug_list);
48 59
@@ -55,7 +66,7 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr)
55 unsigned i; 66 unsigned i;
56 67
57 for (i = 0; i < mod->num_bugs; ++i, ++bug) 68 for (i = 0; i < mod->num_bugs; ++i, ++bug)
58 if (bugaddr == bug->bug_addr) 69 if (bugaddr == bug_addr(bug))
59 return bug; 70 return bug;
60 } 71 }
61 return NULL; 72 return NULL;
@@ -108,7 +119,7 @@ const struct bug_entry *find_bug(unsigned long bugaddr)
108 const struct bug_entry *bug; 119 const struct bug_entry *bug;
109 120
110 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) 121 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
111 if (bugaddr == bug->bug_addr) 122 if (bugaddr == bug_addr(bug))
112 return bug; 123 return bug;
113 124
114 return module_find_bug(bugaddr); 125 return module_find_bug(bugaddr);
@@ -133,7 +144,11 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
133 144
134 if (bug) { 145 if (bug) {
135#ifdef CONFIG_DEBUG_BUGVERBOSE 146#ifdef CONFIG_DEBUG_BUGVERBOSE
147#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
136 file = bug->file; 148 file = bug->file;
149#else
150 file = (const char *)bug + bug->file_disp;
151#endif
137 line = bug->line; 152 line = bug->line;
138#endif 153#endif
139 warning = (bug->flags & BUGFLAG_WARNING) != 0; 154 warning = (bug->flags & BUGFLAG_WARNING) != 0;
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 5f97dc25ef9c..8d03f22c6ced 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -2,6 +2,7 @@
2#include <linux/bitops.h> 2#include <linux/bitops.h>
3#include <linux/cpumask.h> 3#include <linux/cpumask.h>
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/bootmem.h>
5 6
6int __first_cpu(const cpumask_t *srcp) 7int __first_cpu(const cpumask_t *srcp)
7{ 8{
@@ -35,3 +36,81 @@ int __any_online_cpu(const cpumask_t *mask)
35 return cpu; 36 return cpu;
36} 37}
37EXPORT_SYMBOL(__any_online_cpu); 38EXPORT_SYMBOL(__any_online_cpu);
39
40/**
41 * cpumask_next_and - get the next cpu in *src1p & *src2p
42 * @n: the cpu prior to the place to search (ie. return will be > @n)
43 * @src1p: the first cpumask pointer
44 * @src2p: the second cpumask pointer
45 *
46 * Returns >= nr_cpu_ids if no further cpus set in both.
47 */
48int cpumask_next_and(int n, const struct cpumask *src1p,
49 const struct cpumask *src2p)
50{
51 while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
52 if (cpumask_test_cpu(n, src2p))
53 break;
54 return n;
55}
56EXPORT_SYMBOL(cpumask_next_and);
57
58/**
59 * cpumask_any_but - return a "random" in a cpumask, but not this one.
60 * @mask: the cpumask to search
61 * @cpu: the cpu to ignore.
62 *
63 * Often used to find any cpu but smp_processor_id() in a mask.
64 * Returns >= nr_cpu_ids if no cpus set.
65 */
66int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
67{
68 unsigned int i;
69
70 cpumask_check(cpu);
71 for_each_cpu(i, mask)
72 if (i != cpu)
73 break;
74 return i;
75}
76
77/* These are not inline because of header tangles. */
78#ifdef CONFIG_CPUMASK_OFFSTACK
79bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
80{
81 if (likely(slab_is_available()))
82 *mask = kmalloc(cpumask_size(), flags);
83 else {
84#ifdef CONFIG_DEBUG_PER_CPU_MAPS
85 printk(KERN_ERR
86 "=> alloc_cpumask_var: kmalloc not available!\n");
87 dump_stack();
88#endif
89 *mask = NULL;
90 }
91#ifdef CONFIG_DEBUG_PER_CPU_MAPS
92 if (!*mask) {
93 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
94 dump_stack();
95 }
96#endif
97 return *mask != NULL;
98}
99EXPORT_SYMBOL(alloc_cpumask_var);
100
101void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
102{
103 *mask = alloc_bootmem(cpumask_size());
104}
105
106void free_cpumask_var(cpumask_var_t mask)
107{
108 kfree(mask);
109}
110EXPORT_SYMBOL(free_cpumask_var);
111
112void __init free_bootmem_cpumask_var(cpumask_var_t mask)
113{
114 free_bootmem((unsigned long)mask, cpumask_size());
115}
116#endif
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index e3ab374e1334..5d99be1fd988 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -45,7 +45,9 @@ static struct kmem_cache *obj_cache;
45static int debug_objects_maxchain __read_mostly; 45static int debug_objects_maxchain __read_mostly;
46static int debug_objects_fixups __read_mostly; 46static int debug_objects_fixups __read_mostly;
47static int debug_objects_warnings __read_mostly; 47static int debug_objects_warnings __read_mostly;
48static int debug_objects_enabled __read_mostly; 48static int debug_objects_enabled __read_mostly
49 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
50
49static struct debug_obj_descr *descr_test __read_mostly; 51static struct debug_obj_descr *descr_test __read_mostly;
50 52
51static int __init enable_object_debug(char *str) 53static int __init enable_object_debug(char *str)
diff --git a/lib/dynamic_printk.c b/lib/dynamic_printk.c
new file mode 100644
index 000000000000..8e30295e8566
--- /dev/null
+++ b/lib/dynamic_printk.c
@@ -0,0 +1,420 @@
1/*
2 * lib/dynamic_printk.c
3 *
4 * make pr_debug()/dev_dbg() calls runtime configurable based upon their
5 * their source module.
6 *
7 * Copyright (C) 2008 Red Hat, Inc., Jason Baron <jbaron@redhat.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/uaccess.h>
13#include <linux/seq_file.h>
14#include <linux/debugfs.h>
15#include <linux/fs.h>
16
17extern struct mod_debug __start___verbose[];
18extern struct mod_debug __stop___verbose[];
19
20struct debug_name {
21 struct hlist_node hlist;
22 struct hlist_node hlist2;
23 int hash1;
24 int hash2;
25 char *name;
26 int enable;
27 int type;
28};
29
30static int nr_entries;
31static int num_enabled;
32int dynamic_enabled = DYNAMIC_ENABLED_NONE;
33static struct hlist_head module_table[DEBUG_HASH_TABLE_SIZE] =
34 { [0 ... DEBUG_HASH_TABLE_SIZE-1] = HLIST_HEAD_INIT };
35static struct hlist_head module_table2[DEBUG_HASH_TABLE_SIZE] =
36 { [0 ... DEBUG_HASH_TABLE_SIZE-1] = HLIST_HEAD_INIT };
37static DECLARE_MUTEX(debug_list_mutex);
38
39/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
40 * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
41 * use independent hash functions, to reduce the chance of false positives.
42 */
43long long dynamic_printk_enabled;
44EXPORT_SYMBOL_GPL(dynamic_printk_enabled);
45long long dynamic_printk_enabled2;
46EXPORT_SYMBOL_GPL(dynamic_printk_enabled2);
47
48/* returns the debug module pointer. */
49static struct debug_name *find_debug_module(char *module_name)
50{
51 int i;
52 struct hlist_head *head;
53 struct hlist_node *node;
54 struct debug_name *element;
55
56 element = NULL;
57 for (i = 0; i < DEBUG_HASH_TABLE_SIZE; i++) {
58 head = &module_table[i];
59 hlist_for_each_entry_rcu(element, node, head, hlist)
60 if (!strcmp(element->name, module_name))
61 return element;
62 }
63 return NULL;
64}
65
66/* returns the debug module pointer. */
67static struct debug_name *find_debug_module_hash(char *module_name, int hash)
68{
69 struct hlist_head *head;
70 struct hlist_node *node;
71 struct debug_name *element;
72
73 element = NULL;
74 head = &module_table[hash];
75 hlist_for_each_entry_rcu(element, node, head, hlist)
76 if (!strcmp(element->name, module_name))
77 return element;
78 return NULL;
79}
80
81/* caller must hold mutex*/
82static int __add_debug_module(char *mod_name, int hash, int hash2)
83{
84 struct debug_name *new;
85 char *module_name;
86 int ret = 0;
87
88 if (find_debug_module(mod_name)) {
89 ret = -EINVAL;
90 goto out;
91 }
92 module_name = kmalloc(strlen(mod_name) + 1, GFP_KERNEL);
93 if (!module_name) {
94 ret = -ENOMEM;
95 goto out;
96 }
97 module_name = strcpy(module_name, mod_name);
98 module_name[strlen(mod_name)] = '\0';
99 new = kzalloc(sizeof(struct debug_name), GFP_KERNEL);
100 if (!new) {
101 kfree(module_name);
102 ret = -ENOMEM;
103 goto out;
104 }
105 INIT_HLIST_NODE(&new->hlist);
106 INIT_HLIST_NODE(&new->hlist2);
107 new->name = module_name;
108 new->hash1 = hash;
109 new->hash2 = hash2;
110 hlist_add_head_rcu(&new->hlist, &module_table[hash]);
111 hlist_add_head_rcu(&new->hlist2, &module_table2[hash2]);
112 nr_entries++;
113out:
114 return ret;
115}
116
117int unregister_dynamic_debug_module(char *mod_name)
118{
119 struct debug_name *element;
120 int ret = 0;
121
122 down(&debug_list_mutex);
123 element = find_debug_module(mod_name);
124 if (!element) {
125 ret = -EINVAL;
126 goto out;
127 }
128 hlist_del_rcu(&element->hlist);
129 hlist_del_rcu(&element->hlist2);
130 synchronize_rcu();
131 kfree(element->name);
132 if (element->enable)
133 num_enabled--;
134 kfree(element);
135 nr_entries--;
136out:
137 up(&debug_list_mutex);
138 return ret;
139}
140EXPORT_SYMBOL_GPL(unregister_dynamic_debug_module);
141
142int register_dynamic_debug_module(char *mod_name, int type, char *share_name,
143 char *flags, int hash, int hash2)
144{
145 struct debug_name *elem;
146 int ret = 0;
147
148 down(&debug_list_mutex);
149 elem = find_debug_module(mod_name);
150 if (!elem) {
151 if (__add_debug_module(mod_name, hash, hash2))
152 goto out;
153 elem = find_debug_module(mod_name);
154 if (dynamic_enabled == DYNAMIC_ENABLED_ALL &&
155 !strcmp(mod_name, share_name)) {
156 elem->enable = true;
157 num_enabled++;
158 }
159 }
160 elem->type |= type;
161out:
162 up(&debug_list_mutex);
163 return ret;
164}
165EXPORT_SYMBOL_GPL(register_dynamic_debug_module);
166
167int __dynamic_dbg_enabled_helper(char *mod_name, int type, int value, int hash)
168{
169 struct debug_name *elem;
170 int ret = 0;
171
172 if (dynamic_enabled == DYNAMIC_ENABLED_ALL)
173 return 1;
174 rcu_read_lock();
175 elem = find_debug_module_hash(mod_name, hash);
176 if (elem && elem->enable)
177 ret = 1;
178 rcu_read_unlock();
179 return ret;
180}
181EXPORT_SYMBOL_GPL(__dynamic_dbg_enabled_helper);
182
183static void set_all(bool enable)
184{
185 struct debug_name *e;
186 struct hlist_node *node;
187 int i;
188 long long enable_mask;
189
190 for (i = 0; i < DEBUG_HASH_TABLE_SIZE; i++) {
191 if (module_table[i].first != NULL) {
192 hlist_for_each_entry(e, node, &module_table[i], hlist) {
193 e->enable = enable;
194 }
195 }
196 }
197 if (enable)
198 enable_mask = ULLONG_MAX;
199 else
200 enable_mask = 0;
201 dynamic_printk_enabled = enable_mask;
202 dynamic_printk_enabled2 = enable_mask;
203}
204
205static int disabled_hash(int i, bool first_table)
206{
207 struct debug_name *e;
208 struct hlist_node *node;
209
210 if (first_table) {
211 hlist_for_each_entry(e, node, &module_table[i], hlist) {
212 if (e->enable)
213 return 0;
214 }
215 } else {
216 hlist_for_each_entry(e, node, &module_table2[i], hlist2) {
217 if (e->enable)
218 return 0;
219 }
220 }
221 return 1;
222}
223
224static ssize_t pr_debug_write(struct file *file, const char __user *buf,
225 size_t length, loff_t *ppos)
226{
227 char *buffer, *s, *value_str, *setting_str;
228 int err, value;
229 struct debug_name *elem = NULL;
230 int all = 0;
231
232 if (length > PAGE_SIZE || length < 0)
233 return -EINVAL;
234
235 buffer = (char *)__get_free_page(GFP_KERNEL);
236 if (!buffer)
237 return -ENOMEM;
238
239 err = -EFAULT;
240 if (copy_from_user(buffer, buf, length))
241 goto out;
242
243 err = -EINVAL;
244 if (length < PAGE_SIZE)
245 buffer[length] = '\0';
246 else if (buffer[PAGE_SIZE-1])
247 goto out;
248
249 err = -EINVAL;
250 down(&debug_list_mutex);
251
252 if (strncmp("set", buffer, 3))
253 goto out_up;
254 s = buffer + 3;
255 setting_str = strsep(&s, "=");
256 if (s == NULL)
257 goto out_up;
258 setting_str = strstrip(setting_str);
259 value_str = strsep(&s, " ");
260 if (s == NULL)
261 goto out_up;
262 s = strstrip(s);
263 if (!strncmp(s, "all", 3))
264 all = 1;
265 else
266 elem = find_debug_module(s);
267 if (!strncmp(setting_str, "enable", 6)) {
268 value = !!simple_strtol(value_str, NULL, 10);
269 if (all) {
270 if (value) {
271 set_all(true);
272 num_enabled = nr_entries;
273 dynamic_enabled = DYNAMIC_ENABLED_ALL;
274 } else {
275 set_all(false);
276 num_enabled = 0;
277 dynamic_enabled = DYNAMIC_ENABLED_NONE;
278 }
279 err = 0;
280 } else {
281 if (elem) {
282 if (value && (elem->enable == 0)) {
283 dynamic_printk_enabled |=
284 (1LL << elem->hash1);
285 dynamic_printk_enabled2 |=
286 (1LL << elem->hash2);
287 elem->enable = 1;
288 num_enabled++;
289 dynamic_enabled = DYNAMIC_ENABLED_SOME;
290 err = 0;
291 printk(KERN_DEBUG
292 "debugging enabled for module %s\n",
293 elem->name);
294 } else if (!value && (elem->enable == 1)) {
295 elem->enable = 0;
296 num_enabled--;
297 if (disabled_hash(elem->hash1, true))
298 dynamic_printk_enabled &=
299 ~(1LL << elem->hash1);
300 if (disabled_hash(elem->hash2, false))
301 dynamic_printk_enabled2 &=
302 ~(1LL << elem->hash2);
303 if (num_enabled)
304 dynamic_enabled =
305 DYNAMIC_ENABLED_SOME;
306 else
307 dynamic_enabled =
308 DYNAMIC_ENABLED_NONE;
309 err = 0;
310 printk(KERN_DEBUG
311 "debugging disabled for module "
312 "%s\n", elem->name);
313 }
314 }
315 }
316 }
317 if (!err)
318 err = length;
319out_up:
320 up(&debug_list_mutex);
321out:
322 free_page((unsigned long)buffer);
323 return err;
324}
325
326static void *pr_debug_seq_start(struct seq_file *f, loff_t *pos)
327{
328 return (*pos < DEBUG_HASH_TABLE_SIZE) ? pos : NULL;
329}
330
331static void *pr_debug_seq_next(struct seq_file *s, void *v, loff_t *pos)
332{
333 (*pos)++;
334 if (*pos >= DEBUG_HASH_TABLE_SIZE)
335 return NULL;
336 return pos;
337}
338
339static void pr_debug_seq_stop(struct seq_file *s, void *v)
340{
341 /* Nothing to do */
342}
343
344static int pr_debug_seq_show(struct seq_file *s, void *v)
345{
346 struct hlist_head *head;
347 struct hlist_node *node;
348 struct debug_name *elem;
349 unsigned int i = *(loff_t *) v;
350
351 rcu_read_lock();
352 head = &module_table[i];
353 hlist_for_each_entry_rcu(elem, node, head, hlist) {
354 seq_printf(s, "%s enabled=%d", elem->name, elem->enable);
355 seq_printf(s, "\n");
356 }
357 rcu_read_unlock();
358 return 0;
359}
360
361static struct seq_operations pr_debug_seq_ops = {
362 .start = pr_debug_seq_start,
363 .next = pr_debug_seq_next,
364 .stop = pr_debug_seq_stop,
365 .show = pr_debug_seq_show
366};
367
368static int pr_debug_open(struct inode *inode, struct file *filp)
369{
370 return seq_open(filp, &pr_debug_seq_ops);
371}
372
373static const struct file_operations pr_debug_operations = {
374 .open = pr_debug_open,
375 .read = seq_read,
376 .write = pr_debug_write,
377 .llseek = seq_lseek,
378 .release = seq_release,
379};
380
381static int __init dynamic_printk_init(void)
382{
383 struct dentry *dir, *file;
384 struct mod_debug *iter;
385 unsigned long value;
386
387 dir = debugfs_create_dir("dynamic_printk", NULL);
388 if (!dir)
389 return -ENOMEM;
390 file = debugfs_create_file("modules", 0644, dir, NULL,
391 &pr_debug_operations);
392 if (!file) {
393 debugfs_remove(dir);
394 return -ENOMEM;
395 }
396 for (value = (unsigned long)__start___verbose;
397 value < (unsigned long)__stop___verbose;
398 value += sizeof(struct mod_debug)) {
399 iter = (struct mod_debug *)value;
400 register_dynamic_debug_module(iter->modname,
401 iter->type,
402 iter->logical_modname,
403 iter->flag_names, iter->hash, iter->hash2);
404 }
405 if (dynamic_enabled == DYNAMIC_ENABLED_ALL)
406 set_all(true);
407 return 0;
408}
409module_init(dynamic_printk_init);
410/* may want to move this earlier so we can get traces as early as possible */
411
412static int __init dynamic_printk_setup(char *str)
413{
414 if (str)
415 return -ENOENT;
416 dynamic_enabled = DYNAMIC_ENABLED_ALL;
417 return 0;
418}
419/* Use early_param(), so we can get debug output as early as possible */
420early_param("dynamic_printk", dynamic_printk_setup);
diff --git a/lib/idr.c b/lib/idr.c
index e728c7fccc4d..1c4f9281f412 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -185,6 +185,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
185 new = get_from_free_list(idp); 185 new = get_from_free_list(idp);
186 if (!new) 186 if (!new)
187 return -1; 187 return -1;
188 new->layer = l-1;
188 rcu_assign_pointer(p->ary[m], new); 189 rcu_assign_pointer(p->ary[m], new);
189 p->count++; 190 p->count++;
190 } 191 }
@@ -210,6 +211,7 @@ build_up:
210 if (unlikely(!p)) { 211 if (unlikely(!p)) {
211 if (!(p = get_from_free_list(idp))) 212 if (!(p = get_from_free_list(idp)))
212 return -1; 213 return -1;
214 p->layer = 0;
213 layers = 1; 215 layers = 1;
214 } 216 }
215 /* 217 /*
@@ -218,8 +220,14 @@ build_up:
218 */ 220 */
219 while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { 221 while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
220 layers++; 222 layers++;
221 if (!p->count) 223 if (!p->count) {
224 /* special case: if the tree is currently empty,
225 * then we grow the tree by moving the top node
226 * upwards.
227 */
228 p->layer++;
222 continue; 229 continue;
230 }
223 if (!(new = get_from_free_list(idp))) { 231 if (!(new = get_from_free_list(idp))) {
224 /* 232 /*
225 * The allocation failed. If we built part of 233 * The allocation failed. If we built part of
@@ -237,6 +245,7 @@ build_up:
237 } 245 }
238 new->ary[0] = p; 246 new->ary[0] = p;
239 new->count = 1; 247 new->count = 1;
248 new->layer = layers-1;
240 if (p->bitmap == IDR_FULL) 249 if (p->bitmap == IDR_FULL)
241 __set_bit(0, &new->bitmap); 250 __set_bit(0, &new->bitmap);
242 p = new; 251 p = new;
@@ -493,17 +502,21 @@ void *idr_find(struct idr *idp, int id)
493 int n; 502 int n;
494 struct idr_layer *p; 503 struct idr_layer *p;
495 504
496 n = idp->layers * IDR_BITS;
497 p = rcu_dereference(idp->top); 505 p = rcu_dereference(idp->top);
506 if (!p)
507 return NULL;
508 n = (p->layer+1) * IDR_BITS;
498 509
499 /* Mask off upper bits we don't use for the search. */ 510 /* Mask off upper bits we don't use for the search. */
500 id &= MAX_ID_MASK; 511 id &= MAX_ID_MASK;
501 512
502 if (id >= (1 << n)) 513 if (id >= (1 << n))
503 return NULL; 514 return NULL;
515 BUG_ON(n == 0);
504 516
505 while (n > 0 && p) { 517 while (n > 0 && p) {
506 n -= IDR_BITS; 518 n -= IDR_BITS;
519 BUG_ON(n != p->layer*IDR_BITS);
507 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); 520 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
508 } 521 }
509 return((void *)p); 522 return((void *)p);
@@ -582,8 +595,11 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
582 int n; 595 int n;
583 struct idr_layer *p, *old_p; 596 struct idr_layer *p, *old_p;
584 597
585 n = idp->layers * IDR_BITS;
586 p = idp->top; 598 p = idp->top;
599 if (!p)
600 return ERR_PTR(-EINVAL);
601
602 n = (p->layer+1) * IDR_BITS;
587 603
588 id &= MAX_ID_MASK; 604 id &= MAX_ID_MASK;
589 605
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index 5d90074dca75..75dbda03f4fb 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -79,3 +79,12 @@ void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr)
79 } 79 }
80} 80}
81EXPORT_SYMBOL(iommu_area_free); 81EXPORT_SYMBOL(iommu_area_free);
82
83unsigned long iommu_num_pages(unsigned long addr, unsigned long len,
84 unsigned long io_page_size)
85{
86 unsigned long size = (addr & (io_page_size - 1)) + len;
87
88 return DIV_ROUND_UP(size, io_page_size);
89}
90EXPORT_SYMBOL(iommu_num_pages);
diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
new file mode 100644
index 000000000000..f1ed2fe76c65
--- /dev/null
+++ b/lib/is_single_threaded.c
@@ -0,0 +1,45 @@
1/* Function to determine if a thread group is single threaded or not
2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from security/selinux/hooks.c
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public Licence
9 * as published by the Free Software Foundation; either version
10 * 2 of the Licence, or (at your option) any later version.
11 */
12
13#include <linux/sched.h>
14
15/**
16 * is_single_threaded - Determine if a thread group is single-threaded or not
17 * @p: A task in the thread group in question
18 *
19 * This returns true if the thread group to which a task belongs is single
20 * threaded, false if it is not.
21 */
22bool is_single_threaded(struct task_struct *p)
23{
24 struct task_struct *g, *t;
25 struct mm_struct *mm = p->mm;
26
27 if (atomic_read(&p->signal->count) != 1)
28 goto no;
29
30 if (atomic_read(&p->mm->mm_users) != 1) {
31 read_lock(&tasklist_lock);
32 do_each_thread(g, t) {
33 if (t->mm == mm && t != p)
34 goto no_unlock;
35 } while_each_thread(g, t);
36 read_unlock(&tasklist_lock);
37 }
38
39 return true;
40
41no_unlock:
42 read_unlock(&tasklist_lock);
43no:
44 return false;
45}
diff --git a/lib/kobject.c b/lib/kobject.c
index fbf0ae282376..0487d1f64806 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -387,11 +387,17 @@ EXPORT_SYMBOL_GPL(kobject_init_and_add);
387 * kobject_rename - change the name of an object 387 * kobject_rename - change the name of an object
388 * @kobj: object in question. 388 * @kobj: object in question.
389 * @new_name: object's new name 389 * @new_name: object's new name
390 *
391 * It is the responsibility of the caller to provide mutual
392 * exclusion between two different calls of kobject_rename
393 * on the same kobject and to ensure that new_name is valid and
394 * won't conflict with other kobjects.
390 */ 395 */
391int kobject_rename(struct kobject *kobj, const char *new_name) 396int kobject_rename(struct kobject *kobj, const char *new_name)
392{ 397{
393 int error = 0; 398 int error = 0;
394 const char *devpath = NULL; 399 const char *devpath = NULL;
400 const char *dup_name = NULL, *name;
395 char *devpath_string = NULL; 401 char *devpath_string = NULL;
396 char *envp[2]; 402 char *envp[2];
397 403
@@ -401,19 +407,6 @@ int kobject_rename(struct kobject *kobj, const char *new_name)
401 if (!kobj->parent) 407 if (!kobj->parent)
402 return -EINVAL; 408 return -EINVAL;
403 409
404 /* see if this name is already in use */
405 if (kobj->kset) {
406 struct kobject *temp_kobj;
407 temp_kobj = kset_find_obj(kobj->kset, new_name);
408 if (temp_kobj) {
409 printk(KERN_WARNING "kobject '%s' cannot be renamed "
410 "to '%s' as '%s' is already in existence.\n",
411 kobject_name(kobj), new_name, new_name);
412 kobject_put(temp_kobj);
413 return -EINVAL;
414 }
415 }
416
417 devpath = kobject_get_path(kobj, GFP_KERNEL); 410 devpath = kobject_get_path(kobj, GFP_KERNEL);
418 if (!devpath) { 411 if (!devpath) {
419 error = -ENOMEM; 412 error = -ENOMEM;
@@ -428,15 +421,27 @@ int kobject_rename(struct kobject *kobj, const char *new_name)
428 envp[0] = devpath_string; 421 envp[0] = devpath_string;
429 envp[1] = NULL; 422 envp[1] = NULL;
430 423
424 name = dup_name = kstrdup(new_name, GFP_KERNEL);
425 if (!name) {
426 error = -ENOMEM;
427 goto out;
428 }
429
431 error = sysfs_rename_dir(kobj, new_name); 430 error = sysfs_rename_dir(kobj, new_name);
431 if (error)
432 goto out;
433
434 /* Install the new kobject name */
435 dup_name = kobj->name;
436 kobj->name = name;
432 437
433 /* This function is mostly/only used for network interface. 438 /* This function is mostly/only used for network interface.
434 * Some hotplug package track interfaces by their name and 439 * Some hotplug package track interfaces by their name and
435 * therefore want to know when the name is changed by the user. */ 440 * therefore want to know when the name is changed by the user. */
436 if (!error) 441 kobject_uevent_env(kobj, KOBJ_MOVE, envp);
437 kobject_uevent_env(kobj, KOBJ_MOVE, envp);
438 442
439out: 443out:
444 kfree(dup_name);
440 kfree(devpath_string); 445 kfree(devpath_string);
441 kfree(devpath); 446 kfree(devpath);
442 kobject_put(kobj); 447 kobject_put(kobj);
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index b5c3287d8ea4..244f5480c898 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -30,168 +30,52 @@
30 * any later version. 30 * any later version.
31 * 31 *
32 */ 32 */
33#include <linux/crc32c.h>
34#include <linux/compiler.h>
35#include <linux/module.h>
36
37MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
38MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations");
39MODULE_LICENSE("GPL");
40 33
41#define CRC32C_POLY_BE 0x1EDC6F41 34#include <crypto/hash.h>
42#define CRC32C_POLY_LE 0x82F63B78 35#include <linux/err.h>
36#include <linux/init.h>
37#include <linux/kernel.h>
38#include <linux/module.h>
43 39
44#ifndef CRC_LE_BITS 40static struct crypto_shash *tfm;
45# define CRC_LE_BITS 8
46#endif
47 41
42u32 crc32c(u32 crc, const void *address, unsigned int length)
43{
44 struct {
45 struct shash_desc shash;
46 char ctx[crypto_shash_descsize(tfm)];
47 } desc;
48 int err;
48 49
49/* 50 desc.shash.tfm = tfm;
50 * Haven't generated a big-endian table yet, but the bit-wise version 51 desc.shash.flags = 0;
51 * should at least work. 52 *(u32 *)desc.ctx = crc;
52 */
53#if defined CRC_BE_BITS && CRC_BE_BITS != 1
54#undef CRC_BE_BITS
55#endif
56#ifndef CRC_BE_BITS
57# define CRC_BE_BITS 1
58#endif
59 53
60EXPORT_SYMBOL(crc32c_le); 54 err = crypto_shash_update(&desc.shash, address, length);
55 BUG_ON(err);
61 56
62#if CRC_LE_BITS == 1 57 return *(u32 *)desc.ctx;
63/*
64 * Compute things bit-wise, as done in crc32.c. We could share the tight
65 * loop below with crc32 and vary the POLY if we don't find value in terms
66 * of space and maintainability in keeping the two modules separate.
67 */
68u32 __pure
69crc32c_le(u32 crc, unsigned char const *p, size_t len)
70{
71 int i;
72 while (len--) {
73 crc ^= *p++;
74 for (i = 0; i < 8; i++)
75 crc = (crc >> 1) ^ ((crc & 1) ? CRC32C_POLY_LE : 0);
76 }
77 return crc;
78} 58}
79#else
80
81/*
82 * This is the CRC-32C table
83 * Generated with:
84 * width = 32 bits
85 * poly = 0x1EDC6F41
86 * reflect input bytes = true
87 * reflect output bytes = true
88 */
89
90static const u32 crc32c_table[256] = {
91 0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L,
92 0xC79A971FL, 0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL,
93 0x8AD958CFL, 0x78B2DBCCL, 0x6BE22838L, 0x9989AB3BL,
94 0x4D43CFD0L, 0xBF284CD3L, 0xAC78BF27L, 0x5E133C24L,
95 0x105EC76FL, 0xE235446CL, 0xF165B798L, 0x030E349BL,
96 0xD7C45070L, 0x25AFD373L, 0x36FF2087L, 0xC494A384L,
97 0x9A879FA0L, 0x68EC1CA3L, 0x7BBCEF57L, 0x89D76C54L,
98 0x5D1D08BFL, 0xAF768BBCL, 0xBC267848L, 0x4E4DFB4BL,
99 0x20BD8EDEL, 0xD2D60DDDL, 0xC186FE29L, 0x33ED7D2AL,
100 0xE72719C1L, 0x154C9AC2L, 0x061C6936L, 0xF477EA35L,
101 0xAA64D611L, 0x580F5512L, 0x4B5FA6E6L, 0xB93425E5L,
102 0x6DFE410EL, 0x9F95C20DL, 0x8CC531F9L, 0x7EAEB2FAL,
103 0x30E349B1L, 0xC288CAB2L, 0xD1D83946L, 0x23B3BA45L,
104 0xF779DEAEL, 0x05125DADL, 0x1642AE59L, 0xE4292D5AL,
105 0xBA3A117EL, 0x4851927DL, 0x5B016189L, 0xA96AE28AL,
106 0x7DA08661L, 0x8FCB0562L, 0x9C9BF696L, 0x6EF07595L,
107 0x417B1DBCL, 0xB3109EBFL, 0xA0406D4BL, 0x522BEE48L,
108 0x86E18AA3L, 0x748A09A0L, 0x67DAFA54L, 0x95B17957L,
109 0xCBA24573L, 0x39C9C670L, 0x2A993584L, 0xD8F2B687L,
110 0x0C38D26CL, 0xFE53516FL, 0xED03A29BL, 0x1F682198L,
111 0x5125DAD3L, 0xA34E59D0L, 0xB01EAA24L, 0x42752927L,
112 0x96BF4DCCL, 0x64D4CECFL, 0x77843D3BL, 0x85EFBE38L,
113 0xDBFC821CL, 0x2997011FL, 0x3AC7F2EBL, 0xC8AC71E8L,
114 0x1C661503L, 0xEE0D9600L, 0xFD5D65F4L, 0x0F36E6F7L,
115 0x61C69362L, 0x93AD1061L, 0x80FDE395L, 0x72966096L,
116 0xA65C047DL, 0x5437877EL, 0x4767748AL, 0xB50CF789L,
117 0xEB1FCBADL, 0x197448AEL, 0x0A24BB5AL, 0xF84F3859L,
118 0x2C855CB2L, 0xDEEEDFB1L, 0xCDBE2C45L, 0x3FD5AF46L,
119 0x7198540DL, 0x83F3D70EL, 0x90A324FAL, 0x62C8A7F9L,
120 0xB602C312L, 0x44694011L, 0x5739B3E5L, 0xA55230E6L,
121 0xFB410CC2L, 0x092A8FC1L, 0x1A7A7C35L, 0xE811FF36L,
122 0x3CDB9BDDL, 0xCEB018DEL, 0xDDE0EB2AL, 0x2F8B6829L,
123 0x82F63B78L, 0x709DB87BL, 0x63CD4B8FL, 0x91A6C88CL,
124 0x456CAC67L, 0xB7072F64L, 0xA457DC90L, 0x563C5F93L,
125 0x082F63B7L, 0xFA44E0B4L, 0xE9141340L, 0x1B7F9043L,
126 0xCFB5F4A8L, 0x3DDE77ABL, 0x2E8E845FL, 0xDCE5075CL,
127 0x92A8FC17L, 0x60C37F14L, 0x73938CE0L, 0x81F80FE3L,
128 0x55326B08L, 0xA759E80BL, 0xB4091BFFL, 0x466298FCL,
129 0x1871A4D8L, 0xEA1A27DBL, 0xF94AD42FL, 0x0B21572CL,
130 0xDFEB33C7L, 0x2D80B0C4L, 0x3ED04330L, 0xCCBBC033L,
131 0xA24BB5A6L, 0x502036A5L, 0x4370C551L, 0xB11B4652L,
132 0x65D122B9L, 0x97BAA1BAL, 0x84EA524EL, 0x7681D14DL,
133 0x2892ED69L, 0xDAF96E6AL, 0xC9A99D9EL, 0x3BC21E9DL,
134 0xEF087A76L, 0x1D63F975L, 0x0E330A81L, 0xFC588982L,
135 0xB21572C9L, 0x407EF1CAL, 0x532E023EL, 0xA145813DL,
136 0x758FE5D6L, 0x87E466D5L, 0x94B49521L, 0x66DF1622L,
137 0x38CC2A06L, 0xCAA7A905L, 0xD9F75AF1L, 0x2B9CD9F2L,
138 0xFF56BD19L, 0x0D3D3E1AL, 0x1E6DCDEEL, 0xEC064EEDL,
139 0xC38D26C4L, 0x31E6A5C7L, 0x22B65633L, 0xD0DDD530L,
140 0x0417B1DBL, 0xF67C32D8L, 0xE52CC12CL, 0x1747422FL,
141 0x49547E0BL, 0xBB3FFD08L, 0xA86F0EFCL, 0x5A048DFFL,
142 0x8ECEE914L, 0x7CA56A17L, 0x6FF599E3L, 0x9D9E1AE0L,
143 0xD3D3E1ABL, 0x21B862A8L, 0x32E8915CL, 0xC083125FL,
144 0x144976B4L, 0xE622F5B7L, 0xF5720643L, 0x07198540L,
145 0x590AB964L, 0xAB613A67L, 0xB831C993L, 0x4A5A4A90L,
146 0x9E902E7BL, 0x6CFBAD78L, 0x7FAB5E8CL, 0x8DC0DD8FL,
147 0xE330A81AL, 0x115B2B19L, 0x020BD8EDL, 0xF0605BEEL,
148 0x24AA3F05L, 0xD6C1BC06L, 0xC5914FF2L, 0x37FACCF1L,
149 0x69E9F0D5L, 0x9B8273D6L, 0x88D28022L, 0x7AB90321L,
150 0xAE7367CAL, 0x5C18E4C9L, 0x4F48173DL, 0xBD23943EL,
151 0xF36E6F75L, 0x0105EC76L, 0x12551F82L, 0xE03E9C81L,
152 0x34F4F86AL, 0xC69F7B69L, 0xD5CF889DL, 0x27A40B9EL,
153 0x79B737BAL, 0x8BDCB4B9L, 0x988C474DL, 0x6AE7C44EL,
154 0xBE2DA0A5L, 0x4C4623A6L, 0x5F16D052L, 0xAD7D5351L
155};
156 59
157/* 60EXPORT_SYMBOL(crc32c);
158 * Steps through buffer one byte at at time, calculates reflected
159 * crc using table.
160 */
161 61
162u32 __pure 62static int __init libcrc32c_mod_init(void)
163crc32c_le(u32 crc, unsigned char const *data, size_t length)
164{ 63{
165 while (length--) 64 tfm = crypto_alloc_shash("crc32c", 0, 0);
166 crc = 65 if (IS_ERR(tfm))
167 crc32c_table[(crc ^ *data++) & 0xFFL] ^ (crc >> 8); 66 return PTR_ERR(tfm);
168 67
169 return crc; 68 return 0;
170} 69}
171 70
172#endif /* CRC_LE_BITS == 8 */ 71static void __exit libcrc32c_mod_fini(void)
173
174EXPORT_SYMBOL(crc32c_be);
175
176#if CRC_BE_BITS == 1
177u32 __pure
178crc32c_be(u32 crc, unsigned char const *p, size_t len)
179{ 72{
180 int i; 73 crypto_free_shash(tfm);
181 while (len--) {
182 crc ^= *p++ << 24;
183 for (i = 0; i < 8; i++)
184 crc =
185 (crc << 1) ^ ((crc & 0x80000000) ? CRC32C_POLY_BE :
186 0);
187 }
188 return crc;
189} 74}
190#endif
191 75
192/* 76module_init(libcrc32c_mod_init);
193 * Unit test 77module_exit(libcrc32c_mod_fini);
194 * 78
195 * A small unit test suite is implemented as part of the crypto suite. 79MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
196 * Select CRYPTO_CRC32C and use the tcrypt module to run the tests. 80MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations");
197 */ 81MODULE_LICENSE("GPL");
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index a8663890a88c..b255b939bc1b 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -62,10 +62,7 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
62 for_each_online_cpu(cpu) { 62 for_each_online_cpu(cpu) {
63 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); 63 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
64 ret += *pcount; 64 ret += *pcount;
65 *pcount = 0;
66 } 65 }
67 fbc->count = ret;
68
69 spin_unlock(&fbc->lock); 66 spin_unlock(&fbc->lock);
70 return ret; 67 return ret;
71} 68}
@@ -104,13 +101,13 @@ void percpu_counter_destroy(struct percpu_counter *fbc)
104 if (!fbc->counters) 101 if (!fbc->counters)
105 return; 102 return;
106 103
107 free_percpu(fbc->counters);
108 fbc->counters = NULL;
109#ifdef CONFIG_HOTPLUG_CPU 104#ifdef CONFIG_HOTPLUG_CPU
110 mutex_lock(&percpu_counters_lock); 105 mutex_lock(&percpu_counters_lock);
111 list_del(&fbc->list); 106 list_del(&fbc->list);
112 mutex_unlock(&percpu_counters_lock); 107 mutex_unlock(&percpu_counters_lock);
113#endif 108#endif
109 free_percpu(fbc->counters);
110 fbc->counters = NULL;
114} 111}
115EXPORT_SYMBOL(percpu_counter_destroy); 112EXPORT_SYMBOL(percpu_counter_destroy);
116 113
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 8d2688ff1352..b7b449dafbe5 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -395,7 +395,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
395 WARN_ON(!irqs_disabled()); 395 WARN_ON(!irqs_disabled());
396 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); 396 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
397 } else 397 } else
398 kunmap(miter->addr); 398 kunmap(miter->page);
399 399
400 miter->page = NULL; 400 miter->page = NULL;
401 miter->addr = NULL; 401 miter->addr = NULL;
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 8347925030ff..ab431d4cc970 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -23,7 +23,7 @@
23int string_get_size(u64 size, const enum string_size_units units, 23int string_get_size(u64 size, const enum string_size_units units,
24 char *buf, int len) 24 char *buf, int len)
25{ 25{
26 const char *units_10[] = { "B", "KB", "MB", "GB", "TB", "PB", 26 const char *units_10[] = { "B", "kB", "MB", "GB", "TB", "PB",
27 "EB", "ZB", "YB", NULL}; 27 "EB", "ZB", "YB", NULL};
28 const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB", 28 const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB",
29 "EiB", "ZiB", "YiB", NULL }; 29 "EiB", "ZiB", "YiB", NULL };
@@ -31,7 +31,7 @@ int string_get_size(u64 size, const enum string_size_units units,
31 [STRING_UNITS_10] = units_10, 31 [STRING_UNITS_10] = units_10,
32 [STRING_UNITS_2] = units_2, 32 [STRING_UNITS_2] = units_2,
33 }; 33 };
34 const int divisor[] = { 34 const unsigned int divisor[] = {
35 [STRING_UNITS_10] = 1000, 35 [STRING_UNITS_10] = 1000,
36 [STRING_UNITS_2] = 1024, 36 [STRING_UNITS_2] = 1024,
37 }; 37 };
@@ -40,23 +40,27 @@ int string_get_size(u64 size, const enum string_size_units units,
40 char tmp[8]; 40 char tmp[8];
41 41
42 tmp[0] = '\0'; 42 tmp[0] = '\0';
43 i = 0;
44 if (size >= divisor[units]) {
45 while (size >= divisor[units] && units_str[units][i]) {
46 remainder = do_div(size, divisor[units]);
47 i++;
48 }
43 49
44 for (i = 0; size > divisor[units] && units_str[units][i]; i++) 50 sf_cap = size;
45 remainder = do_div(size, divisor[units]); 51 for (j = 0; sf_cap*10 < 1000; j++)
52 sf_cap *= 10;
46 53
47 sf_cap = size; 54 if (j) {
48 for (j = 0; sf_cap*10 < 1000; j++) 55 remainder *= 1000;
49 sf_cap *= 10; 56 do_div(remainder, divisor[units]);
50 57 snprintf(tmp, sizeof(tmp), ".%03lld",
51 if (j) { 58 (unsigned long long)remainder);
52 remainder *= 1000; 59 tmp[j+1] = '\0';
53 do_div(remainder, divisor[units]); 60 }
54 snprintf(tmp, sizeof(tmp), ".%03lld",
55 (unsigned long long)remainder);
56 tmp[j+1] = '\0';
57 } 61 }
58 62
59 snprintf(buf, len, "%lld%s%s", (unsigned long long)size, 63 snprintf(buf, len, "%lld%s %s", (unsigned long long)size,
60 tmp, units_str[units][i]); 64 tmp, units_str[units][i]);
61 65
62 return 0; 66 return 0;
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index f8eebd489149..fa2dc4e5f9ba 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -21,9 +21,12 @@
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/swiotlb.h>
24#include <linux/string.h> 25#include <linux/string.h>
26#include <linux/swiotlb.h>
25#include <linux/types.h> 27#include <linux/types.h>
26#include <linux/ctype.h> 28#include <linux/ctype.h>
29#include <linux/highmem.h>
27 30
28#include <asm/io.h> 31#include <asm/io.h>
29#include <asm/dma.h> 32#include <asm/dma.h>
@@ -36,22 +39,6 @@
36#define OFFSET(val,align) ((unsigned long) \ 39#define OFFSET(val,align) ((unsigned long) \
37 ( (val) & ( (align) - 1))) 40 ( (val) & ( (align) - 1)))
38 41
39#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
40#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
41
42/*
43 * Maximum allowable number of contiguous slabs to map,
44 * must be a power of 2. What is the appropriate value ?
45 * The complexity of {map,unmap}_single is linearly dependent on this value.
46 */
47#define IO_TLB_SEGSIZE 128
48
49/*
50 * log of the size of each IO TLB slab. The number of slabs is command line
51 * controllable.
52 */
53#define IO_TLB_SHIFT 11
54
55#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) 42#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
56 43
57/* 44/*
@@ -102,7 +89,10 @@ static unsigned int io_tlb_index;
102 * We need to save away the original address corresponding to a mapped entry 89 * We need to save away the original address corresponding to a mapped entry
103 * for the sync operations. 90 * for the sync operations.
104 */ 91 */
105static unsigned char **io_tlb_orig_addr; 92static struct swiotlb_phys_addr {
93 struct page *page;
94 unsigned int offset;
95} *io_tlb_orig_addr;
106 96
107/* 97/*
108 * Protect the above data structures in the map and unmap calls 98 * Protect the above data structures in the map and unmap calls
@@ -126,6 +116,72 @@ setup_io_tlb_npages(char *str)
126__setup("swiotlb=", setup_io_tlb_npages); 116__setup("swiotlb=", setup_io_tlb_npages);
127/* make io_tlb_overflow tunable too? */ 117/* make io_tlb_overflow tunable too? */
128 118
119void * __weak swiotlb_alloc_boot(size_t size, unsigned long nslabs)
120{
121 return alloc_bootmem_low_pages(size);
122}
123
124void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
125{
126 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
127}
128
129dma_addr_t __weak swiotlb_phys_to_bus(phys_addr_t paddr)
130{
131 return paddr;
132}
133
134phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr)
135{
136 return baddr;
137}
138
139static dma_addr_t swiotlb_virt_to_bus(volatile void *address)
140{
141 return swiotlb_phys_to_bus(virt_to_phys(address));
142}
143
144static void *swiotlb_bus_to_virt(dma_addr_t address)
145{
146 return phys_to_virt(swiotlb_bus_to_phys(address));
147}
148
149int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size)
150{
151 return 0;
152}
153
154static dma_addr_t swiotlb_sg_to_bus(struct scatterlist *sg)
155{
156 return swiotlb_phys_to_bus(page_to_phys(sg_page(sg)) + sg->offset);
157}
158
159static void swiotlb_print_info(unsigned long bytes)
160{
161 phys_addr_t pstart, pend;
162 dma_addr_t bstart, bend;
163
164 pstart = virt_to_phys(io_tlb_start);
165 pend = virt_to_phys(io_tlb_end);
166
167 bstart = swiotlb_phys_to_bus(pstart);
168 bend = swiotlb_phys_to_bus(pend);
169
170 printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
171 bytes >> 20, io_tlb_start, io_tlb_end);
172 if (pstart != bstart || pend != bend)
173 printk(KERN_INFO "software IO TLB at phys %#llx - %#llx"
174 " bus %#llx - %#llx\n",
175 (unsigned long long)pstart,
176 (unsigned long long)pend,
177 (unsigned long long)bstart,
178 (unsigned long long)bend);
179 else
180 printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
181 (unsigned long long)pstart,
182 (unsigned long long)pend);
183}
184
129/* 185/*
130 * Statically reserve bounce buffer space and initialize bounce buffer data 186 * Statically reserve bounce buffer space and initialize bounce buffer data
131 * structures for the software IO TLB used to implement the DMA API. 187 * structures for the software IO TLB used to implement the DMA API.
@@ -145,7 +201,7 @@ swiotlb_init_with_default_size(size_t default_size)
145 /* 201 /*
146 * Get IO TLB memory from the low pages 202 * Get IO TLB memory from the low pages
147 */ 203 */
148 io_tlb_start = alloc_bootmem_low_pages(bytes); 204 io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs);
149 if (!io_tlb_start) 205 if (!io_tlb_start)
150 panic("Cannot allocate SWIOTLB buffer"); 206 panic("Cannot allocate SWIOTLB buffer");
151 io_tlb_end = io_tlb_start + bytes; 207 io_tlb_end = io_tlb_start + bytes;
@@ -159,7 +215,7 @@ swiotlb_init_with_default_size(size_t default_size)
159 for (i = 0; i < io_tlb_nslabs; i++) 215 for (i = 0; i < io_tlb_nslabs; i++)
160 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 216 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
161 io_tlb_index = 0; 217 io_tlb_index = 0;
162 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *)); 218 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr));
163 219
164 /* 220 /*
165 * Get the overflow emergency buffer 221 * Get the overflow emergency buffer
@@ -168,8 +224,7 @@ swiotlb_init_with_default_size(size_t default_size)
168 if (!io_tlb_overflow_buffer) 224 if (!io_tlb_overflow_buffer)
169 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 225 panic("Cannot allocate SWIOTLB overflow buffer!\n");
170 226
171 printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", 227 swiotlb_print_info(bytes);
172 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
173} 228}
174 229
175void __init 230void __init
@@ -202,8 +257,7 @@ swiotlb_late_init_with_default_size(size_t default_size)
202 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 257 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
203 258
204 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 259 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
205 io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN, 260 io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs);
206 order);
207 if (io_tlb_start) 261 if (io_tlb_start)
208 break; 262 break;
209 order--; 263 order--;
@@ -235,12 +289,12 @@ swiotlb_late_init_with_default_size(size_t default_size)
235 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 289 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
236 io_tlb_index = 0; 290 io_tlb_index = 0;
237 291
238 io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL, 292 io_tlb_orig_addr = (struct swiotlb_phys_addr *)__get_free_pages(GFP_KERNEL,
239 get_order(io_tlb_nslabs * sizeof(char *))); 293 get_order(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr)));
240 if (!io_tlb_orig_addr) 294 if (!io_tlb_orig_addr)
241 goto cleanup3; 295 goto cleanup3;
242 296
243 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *)); 297 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(struct swiotlb_phys_addr));
244 298
245 /* 299 /*
246 * Get the overflow emergency buffer 300 * Get the overflow emergency buffer
@@ -250,9 +304,7 @@ swiotlb_late_init_with_default_size(size_t default_size)
250 if (!io_tlb_overflow_buffer) 304 if (!io_tlb_overflow_buffer)
251 goto cleanup4; 305 goto cleanup4;
252 306
253 printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " 307 swiotlb_print_info(bytes);
254 "0x%lx\n", bytes >> 20,
255 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
256 308
257 return 0; 309 return 0;
258 310
@@ -279,16 +331,69 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
279 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); 331 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
280} 332}
281 333
334static inline int range_needs_mapping(void *ptr, size_t size)
335{
336 return swiotlb_force || swiotlb_arch_range_needs_mapping(ptr, size);
337}
338
282static int is_swiotlb_buffer(char *addr) 339static int is_swiotlb_buffer(char *addr)
283{ 340{
284 return addr >= io_tlb_start && addr < io_tlb_end; 341 return addr >= io_tlb_start && addr < io_tlb_end;
285} 342}
286 343
344static struct swiotlb_phys_addr swiotlb_bus_to_phys_addr(char *dma_addr)
345{
346 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
347 struct swiotlb_phys_addr buffer = io_tlb_orig_addr[index];
348 buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1);
349 buffer.page += buffer.offset >> PAGE_SHIFT;
350 buffer.offset &= PAGE_SIZE - 1;
351 return buffer;
352}
353
354static void
355__sync_single(struct swiotlb_phys_addr buffer, char *dma_addr, size_t size, int dir)
356{
357 if (PageHighMem(buffer.page)) {
358 size_t len, bytes;
359 char *dev, *host, *kmp;
360
361 len = size;
362 while (len != 0) {
363 unsigned long flags;
364
365 bytes = len;
366 if ((bytes + buffer.offset) > PAGE_SIZE)
367 bytes = PAGE_SIZE - buffer.offset;
368 local_irq_save(flags); /* protects KM_BOUNCE_READ */
369 kmp = kmap_atomic(buffer.page, KM_BOUNCE_READ);
370 dev = dma_addr + size - len;
371 host = kmp + buffer.offset;
372 if (dir == DMA_FROM_DEVICE)
373 memcpy(host, dev, bytes);
374 else
375 memcpy(dev, host, bytes);
376 kunmap_atomic(kmp, KM_BOUNCE_READ);
377 local_irq_restore(flags);
378 len -= bytes;
379 buffer.page++;
380 buffer.offset = 0;
381 }
382 } else {
383 void *v = page_address(buffer.page) + buffer.offset;
384
385 if (dir == DMA_TO_DEVICE)
386 memcpy(dma_addr, v, size);
387 else
388 memcpy(v, dma_addr, size);
389 }
390}
391
287/* 392/*
288 * Allocates bounce buffer and returns its kernel virtual address. 393 * Allocates bounce buffer and returns its kernel virtual address.
289 */ 394 */
290static void * 395static void *
291map_single(struct device *hwdev, char *buffer, size_t size, int dir) 396map_single(struct device *hwdev, struct swiotlb_phys_addr buffer, size_t size, int dir)
292{ 397{
293 unsigned long flags; 398 unsigned long flags;
294 char *dma_addr; 399 char *dma_addr;
@@ -298,11 +403,16 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
298 unsigned long mask; 403 unsigned long mask;
299 unsigned long offset_slots; 404 unsigned long offset_slots;
300 unsigned long max_slots; 405 unsigned long max_slots;
406 struct swiotlb_phys_addr slot_buf;
301 407
302 mask = dma_get_seg_boundary(hwdev); 408 mask = dma_get_seg_boundary(hwdev);
303 start_dma_addr = virt_to_bus(io_tlb_start) & mask; 409 start_dma_addr = swiotlb_virt_to_bus(io_tlb_start) & mask;
304 410
305 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 411 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
412
413 /*
414 * Carefully handle integer overflow which can occur when mask == ~0UL.
415 */
306 max_slots = mask + 1 416 max_slots = mask + 1
307 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT 417 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
308 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); 418 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
@@ -378,10 +488,15 @@ found:
378 * This is needed when we sync the memory. Then we sync the buffer if 488 * This is needed when we sync the memory. Then we sync the buffer if
379 * needed. 489 * needed.
380 */ 490 */
381 for (i = 0; i < nslots; i++) 491 slot_buf = buffer;
382 io_tlb_orig_addr[index+i] = buffer + (i << IO_TLB_SHIFT); 492 for (i = 0; i < nslots; i++) {
493 slot_buf.page += slot_buf.offset >> PAGE_SHIFT;
494 slot_buf.offset &= PAGE_SIZE - 1;
495 io_tlb_orig_addr[index+i] = slot_buf;
496 slot_buf.offset += 1 << IO_TLB_SHIFT;
497 }
383 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 498 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
384 memcpy(dma_addr, buffer, size); 499 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
385 500
386 return dma_addr; 501 return dma_addr;
387} 502}
@@ -395,17 +510,17 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
395 unsigned long flags; 510 unsigned long flags;
396 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 511 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
397 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 512 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
398 char *buffer = io_tlb_orig_addr[index]; 513 struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr);
399 514
400 /* 515 /*
401 * First, sync the memory before unmapping the entry 516 * First, sync the memory before unmapping the entry
402 */ 517 */
403 if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) 518 if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
404 /* 519 /*
405 * bounce... copy the data back into the original buffer * and 520 * bounce... copy the data back into the original buffer * and
406 * delete the bounce buffer. 521 * delete the bounce buffer.
407 */ 522 */
408 memcpy(buffer, dma_addr, size); 523 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
409 524
410 /* 525 /*
411 * Return the buffer to the free list by setting the corresponding 526 * Return the buffer to the free list by setting the corresponding
@@ -437,21 +552,18 @@ static void
437sync_single(struct device *hwdev, char *dma_addr, size_t size, 552sync_single(struct device *hwdev, char *dma_addr, size_t size,
438 int dir, int target) 553 int dir, int target)
439{ 554{
440 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 555 struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr);
441 char *buffer = io_tlb_orig_addr[index];
442
443 buffer += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
444 556
445 switch (target) { 557 switch (target) {
446 case SYNC_FOR_CPU: 558 case SYNC_FOR_CPU:
447 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 559 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
448 memcpy(buffer, dma_addr, size); 560 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
449 else 561 else
450 BUG_ON(dir != DMA_TO_DEVICE); 562 BUG_ON(dir != DMA_TO_DEVICE);
451 break; 563 break;
452 case SYNC_FOR_DEVICE: 564 case SYNC_FOR_DEVICE:
453 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 565 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
454 memcpy(dma_addr, buffer, size); 566 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
455 else 567 else
456 BUG_ON(dir != DMA_FROM_DEVICE); 568 BUG_ON(dir != DMA_FROM_DEVICE);
457 break; 569 break;
@@ -467,9 +579,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
467 dma_addr_t dev_addr; 579 dma_addr_t dev_addr;
468 void *ret; 580 void *ret;
469 int order = get_order(size); 581 int order = get_order(size);
582 u64 dma_mask = DMA_32BIT_MASK;
583
584 if (hwdev && hwdev->coherent_dma_mask)
585 dma_mask = hwdev->coherent_dma_mask;
470 586
471 ret = (void *)__get_free_pages(flags, order); 587 ret = (void *)__get_free_pages(flags, order);
472 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) { 588 if (ret && !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(ret), size)) {
473 /* 589 /*
474 * The allocated memory isn't reachable by the device. 590 * The allocated memory isn't reachable by the device.
475 * Fall back on swiotlb_map_single(). 591 * Fall back on swiotlb_map_single().
@@ -484,21 +600,26 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
484 * swiotlb_map_single(), which will grab memory from 600 * swiotlb_map_single(), which will grab memory from
485 * the lowest available address range. 601 * the lowest available address range.
486 */ 602 */
487 ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE); 603 struct swiotlb_phys_addr buffer;
604 buffer.page = virt_to_page(NULL);
605 buffer.offset = 0;
606 ret = map_single(hwdev, buffer, size, DMA_FROM_DEVICE);
488 if (!ret) 607 if (!ret)
489 return NULL; 608 return NULL;
490 } 609 }
491 610
492 memset(ret, 0, size); 611 memset(ret, 0, size);
493 dev_addr = virt_to_bus(ret); 612 dev_addr = swiotlb_virt_to_bus(ret);
494 613
495 /* Confirm address can be DMA'd by device */ 614 /* Confirm address can be DMA'd by device */
496 if (address_needs_mapping(hwdev, dev_addr, size)) { 615 if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
497 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 616 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
498 (unsigned long long)*hwdev->dma_mask, 617 (unsigned long long)dma_mask,
499 (unsigned long long)dev_addr); 618 (unsigned long long)dev_addr);
500 panic("swiotlb_alloc_coherent: allocated memory is out of " 619
501 "range for device"); 620 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
621 unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
622 return NULL;
502 } 623 }
503 *dma_handle = dev_addr; 624 *dma_handle = dev_addr;
504 return ret; 625 return ret;
@@ -548,8 +669,9 @@ dma_addr_t
548swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, 669swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
549 int dir, struct dma_attrs *attrs) 670 int dir, struct dma_attrs *attrs)
550{ 671{
551 dma_addr_t dev_addr = virt_to_bus(ptr); 672 dma_addr_t dev_addr = swiotlb_virt_to_bus(ptr);
552 void *map; 673 void *map;
674 struct swiotlb_phys_addr buffer;
553 675
554 BUG_ON(dir == DMA_NONE); 676 BUG_ON(dir == DMA_NONE);
555 /* 677 /*
@@ -557,19 +679,22 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
557 * we can safely return the device addr and not worry about bounce 679 * we can safely return the device addr and not worry about bounce
558 * buffering it. 680 * buffering it.
559 */ 681 */
560 if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force) 682 if (!address_needs_mapping(hwdev, dev_addr, size) &&
683 !range_needs_mapping(ptr, size))
561 return dev_addr; 684 return dev_addr;
562 685
563 /* 686 /*
564 * Oh well, have to allocate and map a bounce buffer. 687 * Oh well, have to allocate and map a bounce buffer.
565 */ 688 */
566 map = map_single(hwdev, ptr, size, dir); 689 buffer.page = virt_to_page(ptr);
690 buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
691 map = map_single(hwdev, buffer, size, dir);
567 if (!map) { 692 if (!map) {
568 swiotlb_full(hwdev, size, dir, 1); 693 swiotlb_full(hwdev, size, dir, 1);
569 map = io_tlb_overflow_buffer; 694 map = io_tlb_overflow_buffer;
570 } 695 }
571 696
572 dev_addr = virt_to_bus(map); 697 dev_addr = swiotlb_virt_to_bus(map);
573 698
574 /* 699 /*
575 * Ensure that the address returned is DMA'ble 700 * Ensure that the address returned is DMA'ble
@@ -599,7 +724,7 @@ void
599swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, 724swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
600 size_t size, int dir, struct dma_attrs *attrs) 725 size_t size, int dir, struct dma_attrs *attrs)
601{ 726{
602 char *dma_addr = bus_to_virt(dev_addr); 727 char *dma_addr = swiotlb_bus_to_virt(dev_addr);
603 728
604 BUG_ON(dir == DMA_NONE); 729 BUG_ON(dir == DMA_NONE);
605 if (is_swiotlb_buffer(dma_addr)) 730 if (is_swiotlb_buffer(dma_addr))
@@ -629,7 +754,7 @@ static void
629swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 754swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
630 size_t size, int dir, int target) 755 size_t size, int dir, int target)
631{ 756{
632 char *dma_addr = bus_to_virt(dev_addr); 757 char *dma_addr = swiotlb_bus_to_virt(dev_addr);
633 758
634 BUG_ON(dir == DMA_NONE); 759 BUG_ON(dir == DMA_NONE);
635 if (is_swiotlb_buffer(dma_addr)) 760 if (is_swiotlb_buffer(dma_addr))
@@ -660,7 +785,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
660 unsigned long offset, size_t size, 785 unsigned long offset, size_t size,
661 int dir, int target) 786 int dir, int target)
662{ 787{
663 char *dma_addr = bus_to_virt(dev_addr) + offset; 788 char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset;
664 789
665 BUG_ON(dir == DMA_NONE); 790 BUG_ON(dir == DMA_NONE);
666 if (is_swiotlb_buffer(dma_addr)) 791 if (is_swiotlb_buffer(dma_addr))
@@ -708,18 +833,20 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
708 int dir, struct dma_attrs *attrs) 833 int dir, struct dma_attrs *attrs)
709{ 834{
710 struct scatterlist *sg; 835 struct scatterlist *sg;
711 void *addr; 836 struct swiotlb_phys_addr buffer;
712 dma_addr_t dev_addr; 837 dma_addr_t dev_addr;
713 int i; 838 int i;
714 839
715 BUG_ON(dir == DMA_NONE); 840 BUG_ON(dir == DMA_NONE);
716 841
717 for_each_sg(sgl, sg, nelems, i) { 842 for_each_sg(sgl, sg, nelems, i) {
718 addr = SG_ENT_VIRT_ADDRESS(sg); 843 dev_addr = swiotlb_sg_to_bus(sg);
719 dev_addr = virt_to_bus(addr); 844 if (range_needs_mapping(sg_virt(sg), sg->length) ||
720 if (swiotlb_force ||
721 address_needs_mapping(hwdev, dev_addr, sg->length)) { 845 address_needs_mapping(hwdev, dev_addr, sg->length)) {
722 void *map = map_single(hwdev, addr, sg->length, dir); 846 void *map;
847 buffer.page = sg_page(sg);
848 buffer.offset = sg->offset;
849 map = map_single(hwdev, buffer, sg->length, dir);
723 if (!map) { 850 if (!map) {
724 /* Don't panic here, we expect map_sg users 851 /* Don't panic here, we expect map_sg users
725 to do proper error handling. */ 852 to do proper error handling. */
@@ -729,7 +856,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
729 sgl[0].dma_length = 0; 856 sgl[0].dma_length = 0;
730 return 0; 857 return 0;
731 } 858 }
732 sg->dma_address = virt_to_bus(map); 859 sg->dma_address = swiotlb_virt_to_bus(map);
733 } else 860 } else
734 sg->dma_address = dev_addr; 861 sg->dma_address = dev_addr;
735 sg->dma_length = sg->length; 862 sg->dma_length = sg->length;
@@ -759,11 +886,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
759 BUG_ON(dir == DMA_NONE); 886 BUG_ON(dir == DMA_NONE);
760 887
761 for_each_sg(sgl, sg, nelems, i) { 888 for_each_sg(sgl, sg, nelems, i) {
762 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 889 if (sg->dma_address != swiotlb_sg_to_bus(sg))
763 unmap_single(hwdev, bus_to_virt(sg->dma_address), 890 unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
764 sg->dma_length, dir); 891 sg->dma_length, dir);
765 else if (dir == DMA_FROM_DEVICE) 892 else if (dir == DMA_FROM_DEVICE)
766 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); 893 dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
767 } 894 }
768} 895}
769EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); 896EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
@@ -792,11 +919,11 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
792 BUG_ON(dir == DMA_NONE); 919 BUG_ON(dir == DMA_NONE);
793 920
794 for_each_sg(sgl, sg, nelems, i) { 921 for_each_sg(sgl, sg, nelems, i) {
795 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 922 if (sg->dma_address != swiotlb_sg_to_bus(sg))
796 sync_single(hwdev, bus_to_virt(sg->dma_address), 923 sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
797 sg->dma_length, dir, target); 924 sg->dma_length, dir, target);
798 else if (dir == DMA_FROM_DEVICE) 925 else if (dir == DMA_FROM_DEVICE)
799 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); 926 dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
800 } 927 }
801} 928}
802 929
@@ -817,7 +944,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
817int 944int
818swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) 945swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
819{ 946{
820 return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); 947 return (dma_addr == swiotlb_virt_to_bus(io_tlb_overflow_buffer));
821} 948}
822 949
823/* 950/*
@@ -829,7 +956,7 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
829int 956int
830swiotlb_dma_supported(struct device *hwdev, u64 mask) 957swiotlb_dma_supported(struct device *hwdev, u64 mask)
831{ 958{
832 return virt_to_bus(io_tlb_end - 1) <= mask; 959 return swiotlb_virt_to_bus(io_tlb_end - 1) <= mask;
833} 960}
834 961
835EXPORT_SYMBOL(swiotlb_map_single); 962EXPORT_SYMBOL(swiotlb_map_single);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index c399bc1093cb..3b777025d876 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -24,6 +24,7 @@
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/kallsyms.h> 25#include <linux/kallsyms.h>
26#include <linux/uaccess.h> 26#include <linux/uaccess.h>
27#include <linux/ioport.h>
27 28
28#include <asm/page.h> /* for PAGE_SIZE */ 29#include <asm/page.h> /* for PAGE_SIZE */
29#include <asm/div64.h> 30#include <asm/div64.h>
@@ -32,40 +33,48 @@
32/* Works only for digits and letters, but small and fast */ 33/* Works only for digits and letters, but small and fast */
33#define TOLOWER(x) ((x) | 0x20) 34#define TOLOWER(x) ((x) | 0x20)
34 35
36static unsigned int simple_guess_base(const char *cp)
37{
38 if (cp[0] == '0') {
39 if (TOLOWER(cp[1]) == 'x' && isxdigit(cp[2]))
40 return 16;
41 else
42 return 8;
43 } else {
44 return 10;
45 }
46}
47
35/** 48/**
36 * simple_strtoul - convert a string to an unsigned long 49 * simple_strtoul - convert a string to an unsigned long
37 * @cp: The start of the string 50 * @cp: The start of the string
38 * @endp: A pointer to the end of the parsed string will be placed here 51 * @endp: A pointer to the end of the parsed string will be placed here
39 * @base: The number base to use 52 * @base: The number base to use
40 */ 53 */
41unsigned long simple_strtoul(const char *cp,char **endp,unsigned int base) 54unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
42{ 55{
43 unsigned long result = 0,value; 56 unsigned long result = 0;
44 57
45 if (!base) { 58 if (!base)
46 base = 10; 59 base = simple_guess_base(cp);
47 if (*cp == '0') { 60
48 base = 8; 61 if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x')
49 cp++; 62 cp += 2;
50 if ((TOLOWER(*cp) == 'x') && isxdigit(cp[1])) { 63
51 cp++; 64 while (isxdigit(*cp)) {
52 base = 16; 65 unsigned int value;
53 } 66
54 } 67 value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10;
55 } else if (base == 16) { 68 if (value >= base)
56 if (cp[0] == '0' && TOLOWER(cp[1]) == 'x') 69 break;
57 cp += 2; 70 result = result * base + value;
58 }
59 while (isxdigit(*cp) &&
60 (value = isdigit(*cp) ? *cp-'0' : TOLOWER(*cp)-'a'+10) < base) {
61 result = result*base + value;
62 cp++; 71 cp++;
63 } 72 }
73
64 if (endp) 74 if (endp)
65 *endp = (char *)cp; 75 *endp = (char *)cp;
66 return result; 76 return result;
67} 77}
68
69EXPORT_SYMBOL(simple_strtoul); 78EXPORT_SYMBOL(simple_strtoul);
70 79
71/** 80/**
@@ -74,13 +83,12 @@ EXPORT_SYMBOL(simple_strtoul);
74 * @endp: A pointer to the end of the parsed string will be placed here 83 * @endp: A pointer to the end of the parsed string will be placed here
75 * @base: The number base to use 84 * @base: The number base to use
76 */ 85 */
77long simple_strtol(const char *cp,char **endp,unsigned int base) 86long simple_strtol(const char *cp, char **endp, unsigned int base)
78{ 87{
79 if(*cp=='-') 88 if(*cp == '-')
80 return -simple_strtoul(cp+1,endp,base); 89 return -simple_strtoul(cp + 1, endp, base);
81 return simple_strtoul(cp,endp,base); 90 return simple_strtoul(cp, endp, base);
82} 91}
83
84EXPORT_SYMBOL(simple_strtol); 92EXPORT_SYMBOL(simple_strtol);
85 93
86/** 94/**
@@ -89,34 +97,30 @@ EXPORT_SYMBOL(simple_strtol);
89 * @endp: A pointer to the end of the parsed string will be placed here 97 * @endp: A pointer to the end of the parsed string will be placed here
90 * @base: The number base to use 98 * @base: The number base to use
91 */ 99 */
92unsigned long long simple_strtoull(const char *cp,char **endp,unsigned int base) 100unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
93{ 101{
94 unsigned long long result = 0,value; 102 unsigned long long result = 0;
95 103
96 if (!base) { 104 if (!base)
97 base = 10; 105 base = simple_guess_base(cp);
98 if (*cp == '0') { 106
99 base = 8; 107 if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x')
100 cp++; 108 cp += 2;
101 if ((TOLOWER(*cp) == 'x') && isxdigit(cp[1])) { 109
102 cp++; 110 while (isxdigit(*cp)) {
103 base = 16; 111 unsigned int value;
104 } 112
105 } 113 value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10;
106 } else if (base == 16) { 114 if (value >= base)
107 if (cp[0] == '0' && TOLOWER(cp[1]) == 'x') 115 break;
108 cp += 2; 116 result = result * base + value;
109 }
110 while (isxdigit(*cp)
111 && (value = isdigit(*cp) ? *cp-'0' : TOLOWER(*cp)-'a'+10) < base) {
112 result = result*base + value;
113 cp++; 117 cp++;
114 } 118 }
119
115 if (endp) 120 if (endp)
116 *endp = (char *)cp; 121 *endp = (char *)cp;
117 return result; 122 return result;
118} 123}
119
120EXPORT_SYMBOL(simple_strtoull); 124EXPORT_SYMBOL(simple_strtoull);
121 125
122/** 126/**
@@ -125,14 +129,13 @@ EXPORT_SYMBOL(simple_strtoull);
125 * @endp: A pointer to the end of the parsed string will be placed here 129 * @endp: A pointer to the end of the parsed string will be placed here
126 * @base: The number base to use 130 * @base: The number base to use
127 */ 131 */
128long long simple_strtoll(const char *cp,char **endp,unsigned int base) 132long long simple_strtoll(const char *cp, char **endp, unsigned int base)
129{ 133{
130 if(*cp=='-') 134 if(*cp=='-')
131 return -simple_strtoull(cp+1,endp,base); 135 return -simple_strtoull(cp + 1, endp, base);
132 return simple_strtoull(cp,endp,base); 136 return simple_strtoull(cp, endp, base);
133} 137}
134 138
135
136/** 139/**
137 * strict_strtoul - convert a string to an unsigned long strictly 140 * strict_strtoul - convert a string to an unsigned long strictly
138 * @cp: The string to be converted 141 * @cp: The string to be converted
@@ -155,7 +158,27 @@ long long simple_strtoll(const char *cp,char **endp,unsigned int base)
155 * simple_strtoul just ignores the successive invalid characters and 158 * simple_strtoul just ignores the successive invalid characters and
156 * return the converted value of prefix part of the string. 159 * return the converted value of prefix part of the string.
157 */ 160 */
158int strict_strtoul(const char *cp, unsigned int base, unsigned long *res); 161int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
162{
163 char *tail;
164 unsigned long val;
165 size_t len;
166
167 *res = 0;
168 len = strlen(cp);
169 if (len == 0)
170 return -EINVAL;
171
172 val = simple_strtoul(cp, &tail, base);
173 if ((*tail == '\0') ||
174 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
175 *res = val;
176 return 0;
177 }
178
179 return -EINVAL;
180}
181EXPORT_SYMBOL(strict_strtoul);
159 182
160/** 183/**
161 * strict_strtol - convert a string to a long strictly 184 * strict_strtol - convert a string to a long strictly
@@ -169,7 +192,20 @@ int strict_strtoul(const char *cp, unsigned int base, unsigned long *res);
169 * It returns 0 if conversion is successful and *res is set to the converted 192 * It returns 0 if conversion is successful and *res is set to the converted
170 * value, otherwise it returns -EINVAL and *res is set to 0. 193 * value, otherwise it returns -EINVAL and *res is set to 0.
171 */ 194 */
172int strict_strtol(const char *cp, unsigned int base, long *res); 195int strict_strtol(const char *cp, unsigned int base, long *res)
196{
197 int ret;
198 if (*cp == '-') {
199 ret = strict_strtoul(cp + 1, base, (unsigned long *)res);
200 if (!ret)
201 *res = -(*res);
202 } else {
203 ret = strict_strtoul(cp, base, (unsigned long *)res);
204 }
205
206 return ret;
207}
208EXPORT_SYMBOL(strict_strtol);
173 209
174/** 210/**
175 * strict_strtoull - convert a string to an unsigned long long strictly 211 * strict_strtoull - convert a string to an unsigned long long strictly
@@ -193,7 +229,27 @@ int strict_strtol(const char *cp, unsigned int base, long *res);
193 * simple_strtoull just ignores the successive invalid characters and 229 * simple_strtoull just ignores the successive invalid characters and
194 * return the converted value of prefix part of the string. 230 * return the converted value of prefix part of the string.
195 */ 231 */
196int strict_strtoull(const char *cp, unsigned int base, unsigned long long *res); 232int strict_strtoull(const char *cp, unsigned int base, unsigned long long *res)
233{
234 char *tail;
235 unsigned long long val;
236 size_t len;
237
238 *res = 0;
239 len = strlen(cp);
240 if (len == 0)
241 return -EINVAL;
242
243 val = simple_strtoull(cp, &tail, base);
244 if ((*tail == '\0') ||
245 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
246 *res = val;
247 return 0;
248 }
249
250 return -EINVAL;
251}
252EXPORT_SYMBOL(strict_strtoull);
197 253
198/** 254/**
199 * strict_strtoll - convert a string to a long long strictly 255 * strict_strtoll - convert a string to a long long strictly
@@ -207,53 +263,20 @@ int strict_strtoull(const char *cp, unsigned int base, unsigned long long *res);
207 * It returns 0 if conversion is successful and *res is set to the converted 263 * It returns 0 if conversion is successful and *res is set to the converted
208 * value, otherwise it returns -EINVAL and *res is set to 0. 264 * value, otherwise it returns -EINVAL and *res is set to 0.
209 */ 265 */
210int strict_strtoll(const char *cp, unsigned int base, long long *res); 266int strict_strtoll(const char *cp, unsigned int base, long long *res)
211 267{
212#define define_strict_strtoux(type, valtype) \ 268 int ret;
213int strict_strtou##type(const char *cp, unsigned int base, valtype *res)\ 269 if (*cp == '-') {
214{ \ 270 ret = strict_strtoull(cp + 1, base, (unsigned long long *)res);
215 char *tail; \ 271 if (!ret)
216 valtype val; \ 272 *res = -(*res);
217 size_t len; \ 273 } else {
218 \ 274 ret = strict_strtoull(cp, base, (unsigned long long *)res);
219 *res = 0; \ 275 }
220 len = strlen(cp); \
221 if (len == 0) \
222 return -EINVAL; \
223 \
224 val = simple_strtou##type(cp, &tail, base); \
225 if ((*tail == '\0') || \
226 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {\
227 *res = val; \
228 return 0; \
229 } \
230 \
231 return -EINVAL; \
232} \
233
234#define define_strict_strtox(type, valtype) \
235int strict_strto##type(const char *cp, unsigned int base, valtype *res) \
236{ \
237 int ret; \
238 if (*cp == '-') { \
239 ret = strict_strtou##type(cp+1, base, res); \
240 if (!ret) \
241 *res = -(*res); \
242 } else \
243 ret = strict_strtou##type(cp, base, res); \
244 \
245 return ret; \
246} \
247
248define_strict_strtoux(l, unsigned long)
249define_strict_strtox(l, long)
250define_strict_strtoux(ll, unsigned long long)
251define_strict_strtox(ll, long long)
252 276
253EXPORT_SYMBOL(strict_strtoul); 277 return ret;
254EXPORT_SYMBOL(strict_strtol); 278}
255EXPORT_SYMBOL(strict_strtoll); 279EXPORT_SYMBOL(strict_strtoll);
256EXPORT_SYMBOL(strict_strtoull);
257 280
258static int skip_atoi(const char **s) 281static int skip_atoi(const char **s)
259{ 282{
@@ -528,18 +551,113 @@ static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int
528#endif 551#endif
529} 552}
530 553
554static char *resource_string(char *buf, char *end, struct resource *res, int field_width, int precision, int flags)
555{
556#ifndef IO_RSRC_PRINTK_SIZE
557#define IO_RSRC_PRINTK_SIZE 4
558#endif
559
560#ifndef MEM_RSRC_PRINTK_SIZE
561#define MEM_RSRC_PRINTK_SIZE 8
562#endif
563
564 /* room for the actual numbers, the two "0x", -, [, ] and the final zero */
565 char sym[4*sizeof(resource_size_t) + 8];
566 char *p = sym, *pend = sym + sizeof(sym);
567 int size = -1;
568
569 if (res->flags & IORESOURCE_IO)
570 size = IO_RSRC_PRINTK_SIZE;
571 else if (res->flags & IORESOURCE_MEM)
572 size = MEM_RSRC_PRINTK_SIZE;
573
574 *p++ = '[';
575 p = number(p, pend, res->start, 16, size, -1, SPECIAL | SMALL | ZEROPAD);
576 *p++ = '-';
577 p = number(p, pend, res->end, 16, size, -1, SPECIAL | SMALL | ZEROPAD);
578 *p++ = ']';
579 *p = 0;
580
581 return string(buf, end, sym, field_width, precision, flags);
582}
583
584static char *mac_address_string(char *buf, char *end, u8 *addr, int field_width,
585 int precision, int flags)
586{
587 char mac_addr[6 * 3]; /* (6 * 2 hex digits), 5 colons and trailing zero */
588 char *p = mac_addr;
589 int i;
590
591 for (i = 0; i < 6; i++) {
592 p = pack_hex_byte(p, addr[i]);
593 if (!(flags & SPECIAL) && i != 5)
594 *p++ = ':';
595 }
596 *p = '\0';
597
598 return string(buf, end, mac_addr, field_width, precision, flags & ~SPECIAL);
599}
600
601static char *ip6_addr_string(char *buf, char *end, u8 *addr, int field_width,
602 int precision, int flags)
603{
604 char ip6_addr[8 * 5]; /* (8 * 4 hex digits), 7 colons and trailing zero */
605 char *p = ip6_addr;
606 int i;
607
608 for (i = 0; i < 8; i++) {
609 p = pack_hex_byte(p, addr[2 * i]);
610 p = pack_hex_byte(p, addr[2 * i + 1]);
611 if (!(flags & SPECIAL) && i != 7)
612 *p++ = ':';
613 }
614 *p = '\0';
615
616 return string(buf, end, ip6_addr, field_width, precision, flags & ~SPECIAL);
617}
618
619static char *ip4_addr_string(char *buf, char *end, u8 *addr, int field_width,
620 int precision, int flags)
621{
622 char ip4_addr[4 * 4]; /* (4 * 3 decimal digits), 3 dots and trailing zero */
623 char temp[3]; /* hold each IP quad in reverse order */
624 char *p = ip4_addr;
625 int i, digits;
626
627 for (i = 0; i < 4; i++) {
628 digits = put_dec_trunc(temp, addr[i]) - temp;
629 /* reverse the digits in the quad */
630 while (digits--)
631 *p++ = temp[digits];
632 if (i != 3)
633 *p++ = '.';
634 }
635 *p = '\0';
636
637 return string(buf, end, ip4_addr, field_width, precision, flags & ~SPECIAL);
638}
639
531/* 640/*
532 * Show a '%p' thing. A kernel extension is that the '%p' is followed 641 * Show a '%p' thing. A kernel extension is that the '%p' is followed
533 * by an extra set of alphanumeric characters that are extended format 642 * by an extra set of alphanumeric characters that are extended format
534 * specifiers. 643 * specifiers.
535 * 644 *
536 * Right now we just handle 'F' (for symbolic Function descriptor pointers) 645 * Right now we handle:
537 * and 'S' (for Symbolic direct pointers), but this can easily be 646 *
538 * extended in the future (network address types etc). 647 * - 'F' For symbolic function descriptor pointers
648 * - 'S' For symbolic direct pointers
649 * - 'R' For a struct resource pointer, it prints the range of
650 * addresses (not the name nor the flags)
651 * - 'M' For a 6-byte MAC address, it prints the address in the
652 * usual colon-separated hex notation
653 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way (dot-separated
654 * decimal for v4 and colon separated network-order 16 bit hex for v6)
655 * - 'i' [46] for 'raw' IPv4/IPv6 addresses, IPv6 omits the colons, IPv4 is
656 * currently the same
539 * 657 *
540 * The difference between 'S' and 'F' is that on ia64 and ppc64 function 658 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
541 * pointers are really function descriptors, which contain a pointer the 659 * function pointers are really function descriptors, which contain a
542 * real address. 660 * pointer to the real address.
543 */ 661 */
544static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags) 662static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags)
545{ 663{
@@ -549,6 +667,23 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field
549 /* Fallthrough */ 667 /* Fallthrough */
550 case 'S': 668 case 'S':
551 return symbol_string(buf, end, ptr, field_width, precision, flags); 669 return symbol_string(buf, end, ptr, field_width, precision, flags);
670 case 'R':
671 return resource_string(buf, end, ptr, field_width, precision, flags);
672 case 'm':
673 flags |= SPECIAL;
674 /* Fallthrough */
675 case 'M':
676 return mac_address_string(buf, end, ptr, field_width, precision, flags);
677 case 'i':
678 flags |= SPECIAL;
679 /* Fallthrough */
680 case 'I':
681 if (fmt[1] == '6')
682 return ip6_addr_string(buf, end, ptr, field_width, precision, flags);
683 if (fmt[1] == '4')
684 return ip4_addr_string(buf, end, ptr, field_width, precision, flags);
685 flags &= ~SPECIAL;
686 break;
552 } 687 }
553 flags |= SMALL; 688 flags |= SMALL;
554 if (field_width == -1) { 689 if (field_width == -1) {
@@ -565,6 +700,11 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field
565 * @fmt: The format string to use 700 * @fmt: The format string to use
566 * @args: Arguments for the format string 701 * @args: Arguments for the format string
567 * 702 *
703 * This function follows C99 vsnprintf, but has some extensions:
704 * %pS output the name of a text symbol
705 * %pF output the name of a function pointer
706 * %pR output the address range in a struct resource
707 *
568 * The return value is the number of characters which would 708 * The return value is the number of characters which would
569 * be generated for the given input, excluding the trailing 709 * be generated for the given input, excluding the trailing
570 * '\0', as per ISO C99. If you want to have the exact 710 * '\0', as per ISO C99. If you want to have the exact
@@ -790,7 +930,6 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
790 /* the trailing null byte doesn't count towards the total */ 930 /* the trailing null byte doesn't count towards the total */
791 return str-buf; 931 return str-buf;
792} 932}
793
794EXPORT_SYMBOL(vsnprintf); 933EXPORT_SYMBOL(vsnprintf);
795 934
796/** 935/**
@@ -806,6 +945,8 @@ EXPORT_SYMBOL(vsnprintf);
806 * 945 *
807 * Call this function if you are already dealing with a va_list. 946 * Call this function if you are already dealing with a va_list.
808 * You probably want scnprintf() instead. 947 * You probably want scnprintf() instead.
948 *
949 * See the vsnprintf() documentation for format string extensions over C99.
809 */ 950 */
810int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) 951int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
811{ 952{
@@ -814,7 +955,6 @@ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
814 i=vsnprintf(buf,size,fmt,args); 955 i=vsnprintf(buf,size,fmt,args);
815 return (i >= size) ? (size - 1) : i; 956 return (i >= size) ? (size - 1) : i;
816} 957}
817
818EXPORT_SYMBOL(vscnprintf); 958EXPORT_SYMBOL(vscnprintf);
819 959
820/** 960/**
@@ -828,6 +968,8 @@ EXPORT_SYMBOL(vscnprintf);
828 * generated for the given input, excluding the trailing null, 968 * generated for the given input, excluding the trailing null,
829 * as per ISO C99. If the return is greater than or equal to 969 * as per ISO C99. If the return is greater than or equal to
830 * @size, the resulting string is truncated. 970 * @size, the resulting string is truncated.
971 *
972 * See the vsnprintf() documentation for format string extensions over C99.
831 */ 973 */
832int snprintf(char * buf, size_t size, const char *fmt, ...) 974int snprintf(char * buf, size_t size, const char *fmt, ...)
833{ 975{
@@ -839,7 +981,6 @@ int snprintf(char * buf, size_t size, const char *fmt, ...)
839 va_end(args); 981 va_end(args);
840 return i; 982 return i;
841} 983}
842
843EXPORT_SYMBOL(snprintf); 984EXPORT_SYMBOL(snprintf);
844 985
845/** 986/**
@@ -877,12 +1018,13 @@ EXPORT_SYMBOL(scnprintf);
877 * 1018 *
878 * Call this function if you are already dealing with a va_list. 1019 * Call this function if you are already dealing with a va_list.
879 * You probably want sprintf() instead. 1020 * You probably want sprintf() instead.
1021 *
1022 * See the vsnprintf() documentation for format string extensions over C99.
880 */ 1023 */
881int vsprintf(char *buf, const char *fmt, va_list args) 1024int vsprintf(char *buf, const char *fmt, va_list args)
882{ 1025{
883 return vsnprintf(buf, INT_MAX, fmt, args); 1026 return vsnprintf(buf, INT_MAX, fmt, args);
884} 1027}
885
886EXPORT_SYMBOL(vsprintf); 1028EXPORT_SYMBOL(vsprintf);
887 1029
888/** 1030/**
@@ -894,6 +1036,8 @@ EXPORT_SYMBOL(vsprintf);
894 * The function returns the number of characters written 1036 * The function returns the number of characters written
895 * into @buf. Use snprintf() or scnprintf() in order to avoid 1037 * into @buf. Use snprintf() or scnprintf() in order to avoid
896 * buffer overflows. 1038 * buffer overflows.
1039 *
1040 * See the vsnprintf() documentation for format string extensions over C99.
897 */ 1041 */
898int sprintf(char * buf, const char *fmt, ...) 1042int sprintf(char * buf, const char *fmt, ...)
899{ 1043{
@@ -905,7 +1049,6 @@ int sprintf(char * buf, const char *fmt, ...)
905 va_end(args); 1049 va_end(args);
906 return i; 1050 return i;
907} 1051}
908
909EXPORT_SYMBOL(sprintf); 1052EXPORT_SYMBOL(sprintf);
910 1053
911/** 1054/**
@@ -1134,7 +1277,6 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1134 1277
1135 return num; 1278 return num;
1136} 1279}
1137
1138EXPORT_SYMBOL(vsscanf); 1280EXPORT_SYMBOL(vsscanf);
1139 1281
1140/** 1282/**
@@ -1153,5 +1295,4 @@ int sscanf(const char * buf, const char * fmt, ...)
1153 va_end(args); 1295 va_end(args);
1154 return i; 1296 return i;
1155} 1297}
1156
1157EXPORT_SYMBOL(sscanf); 1298EXPORT_SYMBOL(sscanf);