aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-10 06:04:41 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-10 06:04:41 -0500
commitb17304245f0db0ac69b795c411407808f3f2796d (patch)
tree63ed3915d9295bd08f640bf25c322064ba787fad /lib
parent889c92d21db40be0b7d22a59395060237895bb85 (diff)
parent9a100a4464917b5ffff3a8ce1c2758088fd9bb32 (diff)
Merge branch 'linus' into x86/setup-lzma
Conflicts: init/do_mounts_rd.c
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug7
-rw-r--r--lib/bust_spinlocks.c2
-rw-r--r--lib/dynamic_printk.c58
-rw-r--r--lib/fault-inject.c1
-rw-r--r--lib/klist.c43
-rw-r--r--lib/kobject_uevent.c8
-rw-r--r--lib/percpu_counter.c36
-rw-r--r--lib/prio_heap.c2
-rw-r--r--lib/proportions.c8
-rw-r--r--lib/radix-tree.c13
-rw-r--r--lib/sort.c30
-rw-r--r--lib/swiotlb.c240
-rw-r--r--lib/vsprintf.c4
13 files changed, 232 insertions, 220 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2e75478e9c69..d0a32aab03ff 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -512,6 +512,13 @@ config DEBUG_VIRTUAL
512 512
513 If unsure, say N. 513 If unsure, say N.
514 514
515config DEBUG_NOMMU_REGIONS
516 bool "Debug the global anon/private NOMMU mapping region tree"
517 depends on DEBUG_KERNEL && !MMU
518 help
519 This option causes the global tree of anonymous and private mapping
520 regions to be regularly checked for invalid topology.
521
515config DEBUG_WRITECOUNT 522config DEBUG_WRITECOUNT
516 bool "Debug filesystem writers count" 523 bool "Debug filesystem writers count"
517 depends on DEBUG_KERNEL 524 depends on DEBUG_KERNEL
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c
index 486da62b2b07..9681d54b95d1 100644
--- a/lib/bust_spinlocks.c
+++ b/lib/bust_spinlocks.c
@@ -12,6 +12,7 @@
12#include <linux/tty.h> 12#include <linux/tty.h>
13#include <linux/wait.h> 13#include <linux/wait.h>
14#include <linux/vt_kern.h> 14#include <linux/vt_kern.h>
15#include <linux/console.h>
15 16
16 17
17void __attribute__((weak)) bust_spinlocks(int yes) 18void __attribute__((weak)) bust_spinlocks(int yes)
@@ -22,6 +23,7 @@ void __attribute__((weak)) bust_spinlocks(int yes)
22#ifdef CONFIG_VT 23#ifdef CONFIG_VT
23 unblank_screen(); 24 unblank_screen();
24#endif 25#endif
26 console_unblank();
25 if (--oops_in_progress == 0) 27 if (--oops_in_progress == 0)
26 wake_up_klogd(); 28 wake_up_klogd();
27 } 29 }
diff --git a/lib/dynamic_printk.c b/lib/dynamic_printk.c
index 8e30295e8566..165a19763dc9 100644
--- a/lib/dynamic_printk.c
+++ b/lib/dynamic_printk.c
@@ -277,40 +277,34 @@ static ssize_t pr_debug_write(struct file *file, const char __user *buf,
277 dynamic_enabled = DYNAMIC_ENABLED_NONE; 277 dynamic_enabled = DYNAMIC_ENABLED_NONE;
278 } 278 }
279 err = 0; 279 err = 0;
280 } else { 280 } else if (elem) {
281 if (elem) { 281 if (value && (elem->enable == 0)) {
282 if (value && (elem->enable == 0)) { 282 dynamic_printk_enabled |= (1LL << elem->hash1);
283 dynamic_printk_enabled |= 283 dynamic_printk_enabled2 |= (1LL << elem->hash2);
284 (1LL << elem->hash1); 284 elem->enable = 1;
285 dynamic_printk_enabled2 |= 285 num_enabled++;
286 (1LL << elem->hash2); 286 dynamic_enabled = DYNAMIC_ENABLED_SOME;
287 elem->enable = 1; 287 err = 0;
288 num_enabled++; 288 printk(KERN_DEBUG
289 dynamic_enabled = DYNAMIC_ENABLED_SOME; 289 "debugging enabled for module %s\n",
290 err = 0; 290 elem->name);
291 printk(KERN_DEBUG 291 } else if (!value && (elem->enable == 1)) {
292 "debugging enabled for module %s\n", 292 elem->enable = 0;
293 elem->name); 293 num_enabled--;
294 } else if (!value && (elem->enable == 1)) { 294 if (disabled_hash(elem->hash1, true))
295 elem->enable = 0; 295 dynamic_printk_enabled &=
296 num_enabled--;
297 if (disabled_hash(elem->hash1, true))
298 dynamic_printk_enabled &=
299 ~(1LL << elem->hash1); 296 ~(1LL << elem->hash1);
300 if (disabled_hash(elem->hash2, false)) 297 if (disabled_hash(elem->hash2, false))
301 dynamic_printk_enabled2 &= 298 dynamic_printk_enabled2 &=
302 ~(1LL << elem->hash2); 299 ~(1LL << elem->hash2);
303 if (num_enabled) 300 if (num_enabled)
304 dynamic_enabled = 301 dynamic_enabled = DYNAMIC_ENABLED_SOME;
305 DYNAMIC_ENABLED_SOME; 302 else
306 else 303 dynamic_enabled = DYNAMIC_ENABLED_NONE;
307 dynamic_enabled = 304 err = 0;
308 DYNAMIC_ENABLED_NONE; 305 printk(KERN_DEBUG
309 err = 0; 306 "debugging disabled for module %s\n",
310 printk(KERN_DEBUG 307 elem->name);
311 "debugging disabled for module "
312 "%s\n", elem->name);
313 }
314 } 308 }
315 } 309 }
316 } 310 }
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index a50a311554cc..f97af55bdd96 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -6,7 +6,6 @@
6#include <linux/fs.h> 6#include <linux/fs.h>
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/interrupt.h> 8#include <linux/interrupt.h>
9#include <linux/unwind.h>
10#include <linux/stacktrace.h> 9#include <linux/stacktrace.h>
11#include <linux/kallsyms.h> 10#include <linux/kallsyms.h>
12#include <linux/fault-inject.h> 11#include <linux/fault-inject.h>
diff --git a/lib/klist.c b/lib/klist.c
index bbdd3015c2c7..573d6068a42e 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -36,6 +36,7 @@
36 36
37#include <linux/klist.h> 37#include <linux/klist.h>
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/sched.h>
39 40
40/* 41/*
41 * Use the lowest bit of n_klist to mark deleted nodes and exclude 42 * Use the lowest bit of n_klist to mark deleted nodes and exclude
@@ -108,7 +109,6 @@ static void add_tail(struct klist *k, struct klist_node *n)
108static void klist_node_init(struct klist *k, struct klist_node *n) 109static void klist_node_init(struct klist *k, struct klist_node *n)
109{ 110{
110 INIT_LIST_HEAD(&n->n_node); 111 INIT_LIST_HEAD(&n->n_node);
111 init_completion(&n->n_removed);
112 kref_init(&n->n_ref); 112 kref_init(&n->n_ref);
113 knode_set_klist(n, k); 113 knode_set_klist(n, k);
114 if (k->get) 114 if (k->get)
@@ -171,13 +171,34 @@ void klist_add_before(struct klist_node *n, struct klist_node *pos)
171} 171}
172EXPORT_SYMBOL_GPL(klist_add_before); 172EXPORT_SYMBOL_GPL(klist_add_before);
173 173
174struct klist_waiter {
175 struct list_head list;
176 struct klist_node *node;
177 struct task_struct *process;
178 int woken;
179};
180
181static DEFINE_SPINLOCK(klist_remove_lock);
182static LIST_HEAD(klist_remove_waiters);
183
174static void klist_release(struct kref *kref) 184static void klist_release(struct kref *kref)
175{ 185{
186 struct klist_waiter *waiter, *tmp;
176 struct klist_node *n = container_of(kref, struct klist_node, n_ref); 187 struct klist_node *n = container_of(kref, struct klist_node, n_ref);
177 188
178 WARN_ON(!knode_dead(n)); 189 WARN_ON(!knode_dead(n));
179 list_del(&n->n_node); 190 list_del(&n->n_node);
180 complete(&n->n_removed); 191 spin_lock(&klist_remove_lock);
192 list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) {
193 if (waiter->node != n)
194 continue;
195
196 waiter->woken = 1;
197 mb();
198 wake_up_process(waiter->process);
199 list_del(&waiter->list);
200 }
201 spin_unlock(&klist_remove_lock);
181 knode_set_klist(n, NULL); 202 knode_set_klist(n, NULL);
182} 203}
183 204
@@ -217,8 +238,24 @@ EXPORT_SYMBOL_GPL(klist_del);
217 */ 238 */
218void klist_remove(struct klist_node *n) 239void klist_remove(struct klist_node *n)
219{ 240{
241 struct klist_waiter waiter;
242
243 waiter.node = n;
244 waiter.process = current;
245 waiter.woken = 0;
246 spin_lock(&klist_remove_lock);
247 list_add(&waiter.list, &klist_remove_waiters);
248 spin_unlock(&klist_remove_lock);
249
220 klist_del(n); 250 klist_del(n);
221 wait_for_completion(&n->n_removed); 251
252 for (;;) {
253 set_current_state(TASK_UNINTERRUPTIBLE);
254 if (waiter.woken)
255 break;
256 schedule();
257 }
258 __set_current_state(TASK_RUNNING);
222} 259}
223EXPORT_SYMBOL_GPL(klist_remove); 260EXPORT_SYMBOL_GPL(klist_remove);
224 261
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 3f914725bda8..318328ddbd1c 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -165,7 +165,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
165 /* keys passed in from the caller */ 165 /* keys passed in from the caller */
166 if (envp_ext) { 166 if (envp_ext) {
167 for (i = 0; envp_ext[i]; i++) { 167 for (i = 0; envp_ext[i]; i++) {
168 retval = add_uevent_var(env, envp_ext[i]); 168 retval = add_uevent_var(env, "%s", envp_ext[i]);
169 if (retval) 169 if (retval)
170 goto exit; 170 goto exit;
171 } 171 }
@@ -225,8 +225,10 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
225 } 225 }
226 226
227 NETLINK_CB(skb).dst_group = 1; 227 NETLINK_CB(skb).dst_group = 1;
228 netlink_broadcast(uevent_sock, skb, 0, 1, GFP_KERNEL); 228 retval = netlink_broadcast(uevent_sock, skb, 0, 1,
229 } 229 GFP_KERNEL);
230 } else
231 retval = -ENOMEM;
230 } 232 }
231#endif 233#endif
232 234
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index b255b939bc1b..aeaa6d734447 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -9,10 +9,8 @@
9#include <linux/cpu.h> 9#include <linux/cpu.h>
10#include <linux/module.h> 10#include <linux/module.h>
11 11
12#ifdef CONFIG_HOTPLUG_CPU
13static LIST_HEAD(percpu_counters); 12static LIST_HEAD(percpu_counters);
14static DEFINE_MUTEX(percpu_counters_lock); 13static DEFINE_MUTEX(percpu_counters_lock);
15#endif
16 14
17void percpu_counter_set(struct percpu_counter *fbc, s64 amount) 15void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
18{ 16{
@@ -68,11 +66,11 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
68} 66}
69EXPORT_SYMBOL(__percpu_counter_sum); 67EXPORT_SYMBOL(__percpu_counter_sum);
70 68
71static struct lock_class_key percpu_counter_irqsafe; 69int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
72 70 struct lock_class_key *key)
73int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
74{ 71{
75 spin_lock_init(&fbc->lock); 72 spin_lock_init(&fbc->lock);
73 lockdep_set_class(&fbc->lock, key);
76 fbc->count = amount; 74 fbc->count = amount;
77 fbc->counters = alloc_percpu(s32); 75 fbc->counters = alloc_percpu(s32);
78 if (!fbc->counters) 76 if (!fbc->counters)
@@ -84,17 +82,7 @@ int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
84#endif 82#endif
85 return 0; 83 return 0;
86} 84}
87EXPORT_SYMBOL(percpu_counter_init); 85EXPORT_SYMBOL(__percpu_counter_init);
88
89int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount)
90{
91 int err;
92
93 err = percpu_counter_init(fbc, amount);
94 if (!err)
95 lockdep_set_class(&fbc->lock, &percpu_counter_irqsafe);
96 return err;
97}
98 86
99void percpu_counter_destroy(struct percpu_counter *fbc) 87void percpu_counter_destroy(struct percpu_counter *fbc)
100{ 88{
@@ -111,13 +99,24 @@ void percpu_counter_destroy(struct percpu_counter *fbc)
111} 99}
112EXPORT_SYMBOL(percpu_counter_destroy); 100EXPORT_SYMBOL(percpu_counter_destroy);
113 101
114#ifdef CONFIG_HOTPLUG_CPU 102int percpu_counter_batch __read_mostly = 32;
103EXPORT_SYMBOL(percpu_counter_batch);
104
105static void compute_batch_value(void)
106{
107 int nr = num_online_cpus();
108
109 percpu_counter_batch = max(32, nr*2);
110}
111
115static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, 112static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
116 unsigned long action, void *hcpu) 113 unsigned long action, void *hcpu)
117{ 114{
115#ifdef CONFIG_HOTPLUG_CPU
118 unsigned int cpu; 116 unsigned int cpu;
119 struct percpu_counter *fbc; 117 struct percpu_counter *fbc;
120 118
119 compute_batch_value();
121 if (action != CPU_DEAD) 120 if (action != CPU_DEAD)
122 return NOTIFY_OK; 121 return NOTIFY_OK;
123 122
@@ -134,13 +133,14 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
134 spin_unlock_irqrestore(&fbc->lock, flags); 133 spin_unlock_irqrestore(&fbc->lock, flags);
135 } 134 }
136 mutex_unlock(&percpu_counters_lock); 135 mutex_unlock(&percpu_counters_lock);
136#endif
137 return NOTIFY_OK; 137 return NOTIFY_OK;
138} 138}
139 139
140static int __init percpu_counter_startup(void) 140static int __init percpu_counter_startup(void)
141{ 141{
142 compute_batch_value();
142 hotcpu_notifier(percpu_counter_hotcpu_callback, 0); 143 hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
143 return 0; 144 return 0;
144} 145}
145module_init(percpu_counter_startup); 146module_init(percpu_counter_startup);
146#endif
diff --git a/lib/prio_heap.c b/lib/prio_heap.c
index 471944a54e23..a7af6f85eca8 100644
--- a/lib/prio_heap.c
+++ b/lib/prio_heap.c
@@ -31,7 +31,7 @@ void *heap_insert(struct ptr_heap *heap, void *p)
31 31
32 if (heap->size < heap->max) { 32 if (heap->size < heap->max) {
33 /* Heap insertion */ 33 /* Heap insertion */
34 int pos = heap->size++; 34 pos = heap->size++;
35 while (pos > 0 && heap->gt(p, ptrs[(pos-1)/2])) { 35 while (pos > 0 && heap->gt(p, ptrs[(pos-1)/2])) {
36 ptrs[pos] = ptrs[(pos-1)/2]; 36 ptrs[pos] = ptrs[(pos-1)/2];
37 pos = (pos-1)/2; 37 pos = (pos-1)/2;
diff --git a/lib/proportions.c b/lib/proportions.c
index 4f387a643d72..d50746a79de2 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -83,11 +83,11 @@ int prop_descriptor_init(struct prop_descriptor *pd, int shift)
83 pd->index = 0; 83 pd->index = 0;
84 pd->pg[0].shift = shift; 84 pd->pg[0].shift = shift;
85 mutex_init(&pd->mutex); 85 mutex_init(&pd->mutex);
86 err = percpu_counter_init_irq(&pd->pg[0].events, 0); 86 err = percpu_counter_init(&pd->pg[0].events, 0);
87 if (err) 87 if (err)
88 goto out; 88 goto out;
89 89
90 err = percpu_counter_init_irq(&pd->pg[1].events, 0); 90 err = percpu_counter_init(&pd->pg[1].events, 0);
91 if (err) 91 if (err)
92 percpu_counter_destroy(&pd->pg[0].events); 92 percpu_counter_destroy(&pd->pg[0].events);
93 93
@@ -147,6 +147,7 @@ out:
147 * this is used to track the active references. 147 * this is used to track the active references.
148 */ 148 */
149static struct prop_global *prop_get_global(struct prop_descriptor *pd) 149static struct prop_global *prop_get_global(struct prop_descriptor *pd)
150__acquires(RCU)
150{ 151{
151 int index; 152 int index;
152 153
@@ -160,6 +161,7 @@ static struct prop_global *prop_get_global(struct prop_descriptor *pd)
160} 161}
161 162
162static void prop_put_global(struct prop_descriptor *pd, struct prop_global *pg) 163static void prop_put_global(struct prop_descriptor *pd, struct prop_global *pg)
164__releases(RCU)
163{ 165{
164 rcu_read_unlock(); 166 rcu_read_unlock();
165} 167}
@@ -191,7 +193,7 @@ int prop_local_init_percpu(struct prop_local_percpu *pl)
191 spin_lock_init(&pl->lock); 193 spin_lock_init(&pl->lock);
192 pl->shift = 0; 194 pl->shift = 0;
193 pl->period = 0; 195 pl->period = 0;
194 return percpu_counter_init_irq(&pl->events, 0); 196 return percpu_counter_init(&pl->events, 0);
195} 197}
196 198
197void prop_local_destroy_percpu(struct prop_local_percpu *pl) 199void prop_local_destroy_percpu(struct prop_local_percpu *pl)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index be86b32bc874..4bb42a0344ec 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -81,7 +81,7 @@ struct radix_tree_preload {
81 int nr; 81 int nr;
82 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH]; 82 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
83}; 83};
84DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; 84static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
85 85
86static inline gfp_t root_gfp_mask(struct radix_tree_root *root) 86static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
87{ 87{
@@ -640,13 +640,14 @@ EXPORT_SYMBOL(radix_tree_tag_get);
640 * 640 *
641 * Returns: the index of the hole if found, otherwise returns an index 641 * Returns: the index of the hole if found, otherwise returns an index
642 * outside of the set specified (in which case 'return - index >= max_scan' 642 * outside of the set specified (in which case 'return - index >= max_scan'
643 * will be true). 643 * will be true). In rare cases of index wrap-around, 0 will be returned.
644 * 644 *
645 * radix_tree_next_hole may be called under rcu_read_lock. However, like 645 * radix_tree_next_hole may be called under rcu_read_lock. However, like
646 * radix_tree_gang_lookup, this will not atomically search a snapshot of the 646 * radix_tree_gang_lookup, this will not atomically search a snapshot of
647 * tree at a single point in time. For example, if a hole is created at index 647 * the tree at a single point in time. For example, if a hole is created
648 * 5, then subsequently a hole is created at index 10, radix_tree_next_hole 648 * at index 5, then subsequently a hole is created at index 10,
649 * covering both indexes may return 10 if called under rcu_read_lock. 649 * radix_tree_next_hole covering both indexes may return 10 if called
650 * under rcu_read_lock.
650 */ 651 */
651unsigned long radix_tree_next_hole(struct radix_tree_root *root, 652unsigned long radix_tree_next_hole(struct radix_tree_root *root,
652 unsigned long index, unsigned long max_scan) 653 unsigned long index, unsigned long max_scan)
diff --git a/lib/sort.c b/lib/sort.c
index 6abbaf3d5858..926d00429ed2 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -32,11 +32,11 @@ static void generic_swap(void *a, void *b, int size)
32 * @base: pointer to data to sort 32 * @base: pointer to data to sort
33 * @num: number of elements 33 * @num: number of elements
34 * @size: size of each element 34 * @size: size of each element
35 * @cmp: pointer to comparison function 35 * @cmp_func: pointer to comparison function
36 * @swap: pointer to swap function or NULL 36 * @swap_func: pointer to swap function or NULL
37 * 37 *
38 * This function does a heapsort on the given array. You may provide a 38 * This function does a heapsort on the given array. You may provide a
39 * swap function optimized to your element type. 39 * swap_func function optimized to your element type.
40 * 40 *
41 * Sorting time is O(n log n) both on average and worst-case. While 41 * Sorting time is O(n log n) both on average and worst-case. While
42 * qsort is about 20% faster on average, it suffers from exploitable 42 * qsort is about 20% faster on average, it suffers from exploitable
@@ -45,37 +45,39 @@ static void generic_swap(void *a, void *b, int size)
45 */ 45 */
46 46
47void sort(void *base, size_t num, size_t size, 47void sort(void *base, size_t num, size_t size,
48 int (*cmp)(const void *, const void *), 48 int (*cmp_func)(const void *, const void *),
49 void (*swap)(void *, void *, int size)) 49 void (*swap_func)(void *, void *, int size))
50{ 50{
51 /* pre-scale counters for performance */ 51 /* pre-scale counters for performance */
52 int i = (num/2 - 1) * size, n = num * size, c, r; 52 int i = (num/2 - 1) * size, n = num * size, c, r;
53 53
54 if (!swap) 54 if (!swap_func)
55 swap = (size == 4 ? u32_swap : generic_swap); 55 swap_func = (size == 4 ? u32_swap : generic_swap);
56 56
57 /* heapify */ 57 /* heapify */
58 for ( ; i >= 0; i -= size) { 58 for ( ; i >= 0; i -= size) {
59 for (r = i; r * 2 + size < n; r = c) { 59 for (r = i; r * 2 + size < n; r = c) {
60 c = r * 2 + size; 60 c = r * 2 + size;
61 if (c < n - size && cmp(base + c, base + c + size) < 0) 61 if (c < n - size &&
62 cmp_func(base + c, base + c + size) < 0)
62 c += size; 63 c += size;
63 if (cmp(base + r, base + c) >= 0) 64 if (cmp_func(base + r, base + c) >= 0)
64 break; 65 break;
65 swap(base + r, base + c, size); 66 swap_func(base + r, base + c, size);
66 } 67 }
67 } 68 }
68 69
69 /* sort */ 70 /* sort */
70 for (i = n - size; i > 0; i -= size) { 71 for (i = n - size; i > 0; i -= size) {
71 swap(base, base + i, size); 72 swap_func(base, base + i, size);
72 for (r = 0; r * 2 + size < i; r = c) { 73 for (r = 0; r * 2 + size < i; r = c) {
73 c = r * 2 + size; 74 c = r * 2 + size;
74 if (c < i - size && cmp(base + c, base + c + size) < 0) 75 if (c < i - size &&
76 cmp_func(base + c, base + c + size) < 0)
75 c += size; 77 c += size;
76 if (cmp(base + r, base + c) >= 0) 78 if (cmp_func(base + r, base + c) >= 0)
77 break; 79 break;
78 swap(base + r, base + c, size); 80 swap_func(base + r, base + c, size);
79 } 81 }
80 } 82 }
81} 83}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index fa2dc4e5f9ba..1f991acc2a05 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -14,6 +14,7 @@
14 * 04/07/.. ak Better overflow handling. Assorted fixes. 14 * 04/07/.. ak Better overflow handling. Assorted fixes.
15 * 05/09/10 linville Add support for syncing ranges, support syncing for 15 * 05/09/10 linville Add support for syncing ranges, support syncing for
16 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. 16 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
17 * 08/12/11 beckyb Add highmem support
17 */ 18 */
18 19
19#include <linux/cache.h> 20#include <linux/cache.h>
@@ -21,9 +22,9 @@
21#include <linux/mm.h> 22#include <linux/mm.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/spinlock.h> 24#include <linux/spinlock.h>
24#include <linux/swiotlb.h>
25#include <linux/string.h> 25#include <linux/string.h>
26#include <linux/swiotlb.h> 26#include <linux/swiotlb.h>
27#include <linux/pfn.h>
27#include <linux/types.h> 28#include <linux/types.h>
28#include <linux/ctype.h> 29#include <linux/ctype.h>
29#include <linux/highmem.h> 30#include <linux/highmem.h>
@@ -89,10 +90,7 @@ static unsigned int io_tlb_index;
89 * We need to save away the original address corresponding to a mapped entry 90 * We need to save away the original address corresponding to a mapped entry
90 * for the sync operations. 91 * for the sync operations.
91 */ 92 */
92static struct swiotlb_phys_addr { 93static phys_addr_t *io_tlb_orig_addr;
93 struct page *page;
94 unsigned int offset;
95} *io_tlb_orig_addr;
96 94
97/* 95/*
98 * Protect the above data structures in the map and unmap calls 96 * Protect the above data structures in the map and unmap calls
@@ -116,7 +114,7 @@ setup_io_tlb_npages(char *str)
116__setup("swiotlb=", setup_io_tlb_npages); 114__setup("swiotlb=", setup_io_tlb_npages);
117/* make io_tlb_overflow tunable too? */ 115/* make io_tlb_overflow tunable too? */
118 116
119void * __weak swiotlb_alloc_boot(size_t size, unsigned long nslabs) 117void * __weak __init swiotlb_alloc_boot(size_t size, unsigned long nslabs)
120{ 118{
121 return alloc_bootmem_low_pages(size); 119 return alloc_bootmem_low_pages(size);
122} 120}
@@ -126,7 +124,7 @@ void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
126 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); 124 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
127} 125}
128 126
129dma_addr_t __weak swiotlb_phys_to_bus(phys_addr_t paddr) 127dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
130{ 128{
131 return paddr; 129 return paddr;
132} 130}
@@ -136,9 +134,10 @@ phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr)
136 return baddr; 134 return baddr;
137} 135}
138 136
139static dma_addr_t swiotlb_virt_to_bus(volatile void *address) 137static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
138 volatile void *address)
140{ 139{
141 return swiotlb_phys_to_bus(virt_to_phys(address)); 140 return swiotlb_phys_to_bus(hwdev, virt_to_phys(address));
142} 141}
143 142
144static void *swiotlb_bus_to_virt(dma_addr_t address) 143static void *swiotlb_bus_to_virt(dma_addr_t address)
@@ -151,35 +150,18 @@ int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size)
151 return 0; 150 return 0;
152} 151}
153 152
154static dma_addr_t swiotlb_sg_to_bus(struct scatterlist *sg)
155{
156 return swiotlb_phys_to_bus(page_to_phys(sg_page(sg)) + sg->offset);
157}
158
159static void swiotlb_print_info(unsigned long bytes) 153static void swiotlb_print_info(unsigned long bytes)
160{ 154{
161 phys_addr_t pstart, pend; 155 phys_addr_t pstart, pend;
162 dma_addr_t bstart, bend;
163 156
164 pstart = virt_to_phys(io_tlb_start); 157 pstart = virt_to_phys(io_tlb_start);
165 pend = virt_to_phys(io_tlb_end); 158 pend = virt_to_phys(io_tlb_end);
166 159
167 bstart = swiotlb_phys_to_bus(pstart);
168 bend = swiotlb_phys_to_bus(pend);
169
170 printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n", 160 printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
171 bytes >> 20, io_tlb_start, io_tlb_end); 161 bytes >> 20, io_tlb_start, io_tlb_end);
172 if (pstart != bstart || pend != bend) 162 printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
173 printk(KERN_INFO "software IO TLB at phys %#llx - %#llx" 163 (unsigned long long)pstart,
174 " bus %#llx - %#llx\n", 164 (unsigned long long)pend);
175 (unsigned long long)pstart,
176 (unsigned long long)pend,
177 (unsigned long long)bstart,
178 (unsigned long long)bend);
179 else
180 printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
181 (unsigned long long)pstart,
182 (unsigned long long)pend);
183} 165}
184 166
185/* 167/*
@@ -215,7 +197,7 @@ swiotlb_init_with_default_size(size_t default_size)
215 for (i = 0; i < io_tlb_nslabs; i++) 197 for (i = 0; i < io_tlb_nslabs; i++)
216 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 198 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
217 io_tlb_index = 0; 199 io_tlb_index = 0;
218 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr)); 200 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
219 201
220 /* 202 /*
221 * Get the overflow emergency buffer 203 * Get the overflow emergency buffer
@@ -289,12 +271,14 @@ swiotlb_late_init_with_default_size(size_t default_size)
289 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 271 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
290 io_tlb_index = 0; 272 io_tlb_index = 0;
291 273
292 io_tlb_orig_addr = (struct swiotlb_phys_addr *)__get_free_pages(GFP_KERNEL, 274 io_tlb_orig_addr = (phys_addr_t *)
293 get_order(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr))); 275 __get_free_pages(GFP_KERNEL,
276 get_order(io_tlb_nslabs *
277 sizeof(phys_addr_t)));
294 if (!io_tlb_orig_addr) 278 if (!io_tlb_orig_addr)
295 goto cleanup3; 279 goto cleanup3;
296 280
297 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(struct swiotlb_phys_addr)); 281 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
298 282
299 /* 283 /*
300 * Get the overflow emergency buffer 284 * Get the overflow emergency buffer
@@ -309,8 +293,8 @@ swiotlb_late_init_with_default_size(size_t default_size)
309 return 0; 293 return 0;
310 294
311cleanup4: 295cleanup4:
312 free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * 296 free_pages((unsigned long)io_tlb_orig_addr,
313 sizeof(char *))); 297 get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
314 io_tlb_orig_addr = NULL; 298 io_tlb_orig_addr = NULL;
315cleanup3: 299cleanup3:
316 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * 300 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
@@ -341,51 +325,44 @@ static int is_swiotlb_buffer(char *addr)
341 return addr >= io_tlb_start && addr < io_tlb_end; 325 return addr >= io_tlb_start && addr < io_tlb_end;
342} 326}
343 327
344static struct swiotlb_phys_addr swiotlb_bus_to_phys_addr(char *dma_addr) 328/*
345{ 329 * Bounce: copy the swiotlb buffer back to the original dma location
346 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 330 */
347 struct swiotlb_phys_addr buffer = io_tlb_orig_addr[index]; 331static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
348 buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1); 332 enum dma_data_direction dir)
349 buffer.page += buffer.offset >> PAGE_SHIFT; 333{
350 buffer.offset &= PAGE_SIZE - 1; 334 unsigned long pfn = PFN_DOWN(phys);
351 return buffer; 335
352} 336 if (PageHighMem(pfn_to_page(pfn))) {
353 337 /* The buffer does not have a mapping. Map it in and copy */
354static void 338 unsigned int offset = phys & ~PAGE_MASK;
355__sync_single(struct swiotlb_phys_addr buffer, char *dma_addr, size_t size, int dir) 339 char *buffer;
356{ 340 unsigned int sz = 0;
357 if (PageHighMem(buffer.page)) { 341 unsigned long flags;
358 size_t len, bytes; 342
359 char *dev, *host, *kmp; 343 while (size) {
360 344 sz = min(PAGE_SIZE - offset, size);
361 len = size; 345
362 while (len != 0) { 346 local_irq_save(flags);
363 unsigned long flags; 347 buffer = kmap_atomic(pfn_to_page(pfn),
364 348 KM_BOUNCE_READ);
365 bytes = len; 349 if (dir == DMA_TO_DEVICE)
366 if ((bytes + buffer.offset) > PAGE_SIZE) 350 memcpy(dma_addr, buffer + offset, sz);
367 bytes = PAGE_SIZE - buffer.offset;
368 local_irq_save(flags); /* protects KM_BOUNCE_READ */
369 kmp = kmap_atomic(buffer.page, KM_BOUNCE_READ);
370 dev = dma_addr + size - len;
371 host = kmp + buffer.offset;
372 if (dir == DMA_FROM_DEVICE)
373 memcpy(host, dev, bytes);
374 else 351 else
375 memcpy(dev, host, bytes); 352 memcpy(buffer + offset, dma_addr, sz);
376 kunmap_atomic(kmp, KM_BOUNCE_READ); 353 kunmap_atomic(buffer, KM_BOUNCE_READ);
377 local_irq_restore(flags); 354 local_irq_restore(flags);
378 len -= bytes; 355
379 buffer.page++; 356 size -= sz;
380 buffer.offset = 0; 357 pfn++;
358 dma_addr += sz;
359 offset = 0;
381 } 360 }
382 } else { 361 } else {
383 void *v = page_address(buffer.page) + buffer.offset;
384
385 if (dir == DMA_TO_DEVICE) 362 if (dir == DMA_TO_DEVICE)
386 memcpy(dma_addr, v, size); 363 memcpy(dma_addr, phys_to_virt(phys), size);
387 else 364 else
388 memcpy(v, dma_addr, size); 365 memcpy(phys_to_virt(phys), dma_addr, size);
389 } 366 }
390} 367}
391 368
@@ -393,7 +370,7 @@ __sync_single(struct swiotlb_phys_addr buffer, char *dma_addr, size_t size, int
393 * Allocates bounce buffer and returns its kernel virtual address. 370 * Allocates bounce buffer and returns its kernel virtual address.
394 */ 371 */
395static void * 372static void *
396map_single(struct device *hwdev, struct swiotlb_phys_addr buffer, size_t size, int dir) 373map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
397{ 374{
398 unsigned long flags; 375 unsigned long flags;
399 char *dma_addr; 376 char *dma_addr;
@@ -403,10 +380,9 @@ map_single(struct device *hwdev, struct swiotlb_phys_addr buffer, size_t size, i
403 unsigned long mask; 380 unsigned long mask;
404 unsigned long offset_slots; 381 unsigned long offset_slots;
405 unsigned long max_slots; 382 unsigned long max_slots;
406 struct swiotlb_phys_addr slot_buf;
407 383
408 mask = dma_get_seg_boundary(hwdev); 384 mask = dma_get_seg_boundary(hwdev);
409 start_dma_addr = swiotlb_virt_to_bus(io_tlb_start) & mask; 385 start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
410 386
411 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 387 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
412 388
@@ -488,15 +464,10 @@ found:
488 * This is needed when we sync the memory. Then we sync the buffer if 464 * This is needed when we sync the memory. Then we sync the buffer if
489 * needed. 465 * needed.
490 */ 466 */
491 slot_buf = buffer; 467 for (i = 0; i < nslots; i++)
492 for (i = 0; i < nslots; i++) { 468 io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
493 slot_buf.page += slot_buf.offset >> PAGE_SHIFT;
494 slot_buf.offset &= PAGE_SIZE - 1;
495 io_tlb_orig_addr[index+i] = slot_buf;
496 slot_buf.offset += 1 << IO_TLB_SHIFT;
497 }
498 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 469 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
499 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE); 470 swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
500 471
501 return dma_addr; 472 return dma_addr;
502} 473}
@@ -510,17 +481,13 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
510 unsigned long flags; 481 unsigned long flags;
511 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 482 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
512 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 483 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
513 struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr); 484 phys_addr_t phys = io_tlb_orig_addr[index];
514 485
515 /* 486 /*
516 * First, sync the memory before unmapping the entry 487 * First, sync the memory before unmapping the entry
517 */ 488 */
518 if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)) 489 if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
519 /* 490 swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
520 * bounce... copy the data back into the original buffer * and
521 * delete the bounce buffer.
522 */
523 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
524 491
525 /* 492 /*
526 * Return the buffer to the free list by setting the corresponding 493 * Return the buffer to the free list by setting the corresponding
@@ -552,18 +519,21 @@ static void
552sync_single(struct device *hwdev, char *dma_addr, size_t size, 519sync_single(struct device *hwdev, char *dma_addr, size_t size,
553 int dir, int target) 520 int dir, int target)
554{ 521{
555 struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr); 522 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
523 phys_addr_t phys = io_tlb_orig_addr[index];
524
525 phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
556 526
557 switch (target) { 527 switch (target) {
558 case SYNC_FOR_CPU: 528 case SYNC_FOR_CPU:
559 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 529 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
560 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE); 530 swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
561 else 531 else
562 BUG_ON(dir != DMA_TO_DEVICE); 532 BUG_ON(dir != DMA_TO_DEVICE);
563 break; 533 break;
564 case SYNC_FOR_DEVICE: 534 case SYNC_FOR_DEVICE:
565 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 535 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
566 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE); 536 swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
567 else 537 else
568 BUG_ON(dir != DMA_FROM_DEVICE); 538 BUG_ON(dir != DMA_FROM_DEVICE);
569 break; 539 break;
@@ -585,7 +555,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
585 dma_mask = hwdev->coherent_dma_mask; 555 dma_mask = hwdev->coherent_dma_mask;
586 556
587 ret = (void *)__get_free_pages(flags, order); 557 ret = (void *)__get_free_pages(flags, order);
588 if (ret && !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(ret), size)) { 558 if (ret &&
559 !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret),
560 size)) {
589 /* 561 /*
590 * The allocated memory isn't reachable by the device. 562 * The allocated memory isn't reachable by the device.
591 * Fall back on swiotlb_map_single(). 563 * Fall back on swiotlb_map_single().
@@ -600,16 +572,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
600 * swiotlb_map_single(), which will grab memory from 572 * swiotlb_map_single(), which will grab memory from
601 * the lowest available address range. 573 * the lowest available address range.
602 */ 574 */
603 struct swiotlb_phys_addr buffer; 575 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
604 buffer.page = virt_to_page(NULL);
605 buffer.offset = 0;
606 ret = map_single(hwdev, buffer, size, DMA_FROM_DEVICE);
607 if (!ret) 576 if (!ret)
608 return NULL; 577 return NULL;
609 } 578 }
610 579
611 memset(ret, 0, size); 580 memset(ret, 0, size);
612 dev_addr = swiotlb_virt_to_bus(ret); 581 dev_addr = swiotlb_virt_to_bus(hwdev, ret);
613 582
614 /* Confirm address can be DMA'd by device */ 583 /* Confirm address can be DMA'd by device */
615 if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { 584 if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
@@ -624,6 +593,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
624 *dma_handle = dev_addr; 593 *dma_handle = dev_addr;
625 return ret; 594 return ret;
626} 595}
596EXPORT_SYMBOL(swiotlb_alloc_coherent);
627 597
628void 598void
629swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 599swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
@@ -636,6 +606,7 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
636 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 606 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
637 unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 607 unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
638} 608}
609EXPORT_SYMBOL(swiotlb_free_coherent);
639 610
640static void 611static void
641swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) 612swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
@@ -648,7 +619,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
648 * the damage, or panic when the transfer is too big. 619 * the damage, or panic when the transfer is too big.
649 */ 620 */
650 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at " 621 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
651 "device %s\n", size, dev ? dev->bus_id : "?"); 622 "device %s\n", size, dev ? dev_name(dev) : "?");
652 623
653 if (size > io_tlb_overflow && do_panic) { 624 if (size > io_tlb_overflow && do_panic) {
654 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 625 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
@@ -669,9 +640,8 @@ dma_addr_t
669swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, 640swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
670 int dir, struct dma_attrs *attrs) 641 int dir, struct dma_attrs *attrs)
671{ 642{
672 dma_addr_t dev_addr = swiotlb_virt_to_bus(ptr); 643 dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, ptr);
673 void *map; 644 void *map;
674 struct swiotlb_phys_addr buffer;
675 645
676 BUG_ON(dir == DMA_NONE); 646 BUG_ON(dir == DMA_NONE);
677 /* 647 /*
@@ -686,15 +656,13 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
686 /* 656 /*
687 * Oh well, have to allocate and map a bounce buffer. 657 * Oh well, have to allocate and map a bounce buffer.
688 */ 658 */
689 buffer.page = virt_to_page(ptr); 659 map = map_single(hwdev, virt_to_phys(ptr), size, dir);
690 buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
691 map = map_single(hwdev, buffer, size, dir);
692 if (!map) { 660 if (!map) {
693 swiotlb_full(hwdev, size, dir, 1); 661 swiotlb_full(hwdev, size, dir, 1);
694 map = io_tlb_overflow_buffer; 662 map = io_tlb_overflow_buffer;
695 } 663 }
696 664
697 dev_addr = swiotlb_virt_to_bus(map); 665 dev_addr = swiotlb_virt_to_bus(hwdev, map);
698 666
699 /* 667 /*
700 * Ensure that the address returned is DMA'ble 668 * Ensure that the address returned is DMA'ble
@@ -711,6 +679,7 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
711{ 679{
712 return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL); 680 return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL);
713} 681}
682EXPORT_SYMBOL(swiotlb_map_single);
714 683
715/* 684/*
716 * Unmap a single streaming mode DMA translation. The dma_addr and size must 685 * Unmap a single streaming mode DMA translation. The dma_addr and size must
@@ -740,6 +709,8 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
740{ 709{
741 return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL); 710 return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL);
742} 711}
712EXPORT_SYMBOL(swiotlb_unmap_single);
713
743/* 714/*
744 * Make physical memory consistent for a single streaming mode DMA translation 715 * Make physical memory consistent for a single streaming mode DMA translation
745 * after a transfer. 716 * after a transfer.
@@ -769,6 +740,7 @@ swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
769{ 740{
770 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); 741 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
771} 742}
743EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
772 744
773void 745void
774swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, 746swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
@@ -776,6 +748,7 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
776{ 748{
777 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); 749 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
778} 750}
751EXPORT_SYMBOL(swiotlb_sync_single_for_device);
779 752
780/* 753/*
781 * Same as above, but for a sub-range of the mapping. 754 * Same as above, but for a sub-range of the mapping.
@@ -801,6 +774,7 @@ swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
801 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, 774 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
802 SYNC_FOR_CPU); 775 SYNC_FOR_CPU);
803} 776}
777EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
804 778
805void 779void
806swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, 780swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
@@ -809,9 +783,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
809 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, 783 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
810 SYNC_FOR_DEVICE); 784 SYNC_FOR_DEVICE);
811} 785}
786EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
812 787
813void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int,
814 struct dma_attrs *);
815/* 788/*
816 * Map a set of buffers described by scatterlist in streaming mode for DMA. 789 * Map a set of buffers described by scatterlist in streaming mode for DMA.
817 * This is the scatter-gather version of the above swiotlb_map_single 790 * This is the scatter-gather version of the above swiotlb_map_single
@@ -833,20 +806,18 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
833 int dir, struct dma_attrs *attrs) 806 int dir, struct dma_attrs *attrs)
834{ 807{
835 struct scatterlist *sg; 808 struct scatterlist *sg;
836 struct swiotlb_phys_addr buffer;
837 dma_addr_t dev_addr;
838 int i; 809 int i;
839 810
840 BUG_ON(dir == DMA_NONE); 811 BUG_ON(dir == DMA_NONE);
841 812
842 for_each_sg(sgl, sg, nelems, i) { 813 for_each_sg(sgl, sg, nelems, i) {
843 dev_addr = swiotlb_sg_to_bus(sg); 814 void *addr = sg_virt(sg);
844 if (range_needs_mapping(sg_virt(sg), sg->length) || 815 dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, addr);
816
817 if (range_needs_mapping(addr, sg->length) ||
845 address_needs_mapping(hwdev, dev_addr, sg->length)) { 818 address_needs_mapping(hwdev, dev_addr, sg->length)) {
846 void *map; 819 void *map = map_single(hwdev, sg_phys(sg),
847 buffer.page = sg_page(sg); 820 sg->length, dir);
848 buffer.offset = sg->offset;
849 map = map_single(hwdev, buffer, sg->length, dir);
850 if (!map) { 821 if (!map) {
851 /* Don't panic here, we expect map_sg users 822 /* Don't panic here, we expect map_sg users
852 to do proper error handling. */ 823 to do proper error handling. */
@@ -856,7 +827,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
856 sgl[0].dma_length = 0; 827 sgl[0].dma_length = 0;
857 return 0; 828 return 0;
858 } 829 }
859 sg->dma_address = swiotlb_virt_to_bus(map); 830 sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
860 } else 831 } else
861 sg->dma_address = dev_addr; 832 sg->dma_address = dev_addr;
862 sg->dma_length = sg->length; 833 sg->dma_length = sg->length;
@@ -871,6 +842,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
871{ 842{
872 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); 843 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
873} 844}
845EXPORT_SYMBOL(swiotlb_map_sg);
874 846
875/* 847/*
876 * Unmap a set of streaming mode DMA translations. Again, cpu read rules 848 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
@@ -886,11 +858,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
886 BUG_ON(dir == DMA_NONE); 858 BUG_ON(dir == DMA_NONE);
887 859
888 for_each_sg(sgl, sg, nelems, i) { 860 for_each_sg(sgl, sg, nelems, i) {
889 if (sg->dma_address != swiotlb_sg_to_bus(sg)) 861 if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg)))
890 unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), 862 unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
891 sg->dma_length, dir); 863 sg->dma_length, dir);
892 else if (dir == DMA_FROM_DEVICE) 864 else if (dir == DMA_FROM_DEVICE)
893 dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); 865 dma_mark_clean(sg_virt(sg), sg->dma_length);
894 } 866 }
895} 867}
896EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); 868EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
@@ -901,6 +873,7 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
901{ 873{
902 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); 874 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
903} 875}
876EXPORT_SYMBOL(swiotlb_unmap_sg);
904 877
905/* 878/*
906 * Make physical memory consistent for a set of streaming mode DMA translations 879 * Make physical memory consistent for a set of streaming mode DMA translations
@@ -919,11 +892,11 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
919 BUG_ON(dir == DMA_NONE); 892 BUG_ON(dir == DMA_NONE);
920 893
921 for_each_sg(sgl, sg, nelems, i) { 894 for_each_sg(sgl, sg, nelems, i) {
922 if (sg->dma_address != swiotlb_sg_to_bus(sg)) 895 if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg)))
923 sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), 896 sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
924 sg->dma_length, dir, target); 897 sg->dma_length, dir, target);
925 else if (dir == DMA_FROM_DEVICE) 898 else if (dir == DMA_FROM_DEVICE)
926 dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); 899 dma_mark_clean(sg_virt(sg), sg->dma_length);
927 } 900 }
928} 901}
929 902
@@ -933,6 +906,7 @@ swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
933{ 906{
934 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); 907 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
935} 908}
909EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
936 910
937void 911void
938swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 912swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
@@ -940,12 +914,14 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
940{ 914{
941 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); 915 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
942} 916}
917EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
943 918
944int 919int
945swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) 920swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
946{ 921{
947 return (dma_addr == swiotlb_virt_to_bus(io_tlb_overflow_buffer)); 922 return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
948} 923}
924EXPORT_SYMBOL(swiotlb_dma_mapping_error);
949 925
950/* 926/*
951 * Return whether the given device DMA address mask can be supported 927 * Return whether the given device DMA address mask can be supported
@@ -956,20 +932,6 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
956int 932int
957swiotlb_dma_supported(struct device *hwdev, u64 mask) 933swiotlb_dma_supported(struct device *hwdev, u64 mask)
958{ 934{
959 return swiotlb_virt_to_bus(io_tlb_end - 1) <= mask; 935 return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask;
960} 936}
961
962EXPORT_SYMBOL(swiotlb_map_single);
963EXPORT_SYMBOL(swiotlb_unmap_single);
964EXPORT_SYMBOL(swiotlb_map_sg);
965EXPORT_SYMBOL(swiotlb_unmap_sg);
966EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
967EXPORT_SYMBOL(swiotlb_sync_single_for_device);
968EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
969EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
970EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
971EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
972EXPORT_SYMBOL(swiotlb_dma_mapping_error);
973EXPORT_SYMBOL(swiotlb_alloc_coherent);
974EXPORT_SYMBOL(swiotlb_free_coherent);
975EXPORT_SYMBOL(swiotlb_dma_supported); 937EXPORT_SYMBOL(swiotlb_dma_supported);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 98d632277ca8..0fbd0121d91d 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -170,6 +170,8 @@ int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
170 return -EINVAL; 170 return -EINVAL;
171 171
172 val = simple_strtoul(cp, &tail, base); 172 val = simple_strtoul(cp, &tail, base);
173 if (tail == cp)
174 return -EINVAL;
173 if ((*tail == '\0') || 175 if ((*tail == '\0') ||
174 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { 176 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
175 *res = val; 177 *res = val;
@@ -241,6 +243,8 @@ int strict_strtoull(const char *cp, unsigned int base, unsigned long long *res)
241 return -EINVAL; 243 return -EINVAL;
242 244
243 val = simple_strtoull(cp, &tail, base); 245 val = simple_strtoull(cp, &tail, base);
246 if (tail == cp)
247 return -EINVAL;
244 if ((*tail == '\0') || 248 if ((*tail == '\0') ||
245 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { 249 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
246 *res = val; 250 *res = val;