aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug9
-rw-r--r--lib/Makefile2
-rw-r--r--lib/cpumask.c12
-rw-r--r--lib/debug_locks.c2
-rw-r--r--lib/dma-debug.c387
-rw-r--r--lib/kobject.c3
-rw-r--r--lib/kobject_uevent.c4
-rw-r--r--lib/scatterlist.c9
-rw-r--r--lib/swiotlb.c119
9 files changed, 430 insertions, 117 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index c6e854f215fa..6cdcf38f2da9 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -440,7 +440,7 @@ config LOCKDEP
440 bool 440 bool
441 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 441 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
442 select STACKTRACE 442 select STACKTRACE
443 select FRAME_POINTER if !X86 && !MIPS && !PPC && !ARM_UNWIND 443 select FRAME_POINTER if !X86 && !MIPS && !PPC && !ARM_UNWIND && !S390
444 select KALLSYMS 444 select KALLSYMS
445 select KALLSYMS_ALL 445 select KALLSYMS_ALL
446 446
@@ -620,7 +620,7 @@ config ARCH_WANT_FRAME_POINTERS
620config FRAME_POINTER 620config FRAME_POINTER
621 bool "Compile the kernel with frame pointers" 621 bool "Compile the kernel with frame pointers"
622 depends on DEBUG_KERNEL && \ 622 depends on DEBUG_KERNEL && \
623 (CRIS || M68K || M68KNOMMU || FRV || UML || S390 || \ 623 (CRIS || M68K || M68KNOMMU || FRV || UML || \
624 AVR32 || SUPERH || BLACKFIN || MN10300) || \ 624 AVR32 || SUPERH || BLACKFIN || MN10300) || \
625 ARCH_WANT_FRAME_POINTERS 625 ARCH_WANT_FRAME_POINTERS
626 default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS 626 default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
@@ -809,13 +809,13 @@ config FAULT_INJECTION_STACKTRACE_FILTER
809 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT 809 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
810 depends on !X86_64 810 depends on !X86_64
811 select STACKTRACE 811 select STACKTRACE
812 select FRAME_POINTER if !PPC 812 select FRAME_POINTER if !PPC && !S390
813 help 813 help
814 Provide stacktrace filter for fault-injection capabilities 814 Provide stacktrace filter for fault-injection capabilities
815 815
816config LATENCYTOP 816config LATENCYTOP
817 bool "Latency measuring infrastructure" 817 bool "Latency measuring infrastructure"
818 select FRAME_POINTER if !MIPS && !PPC 818 select FRAME_POINTER if !MIPS && !PPC && !S390
819 select KALLSYMS 819 select KALLSYMS
820 select KALLSYMS_ALL 820 select KALLSYMS_ALL
821 select STACKTRACE 821 select STACKTRACE
@@ -891,7 +891,6 @@ config DYNAMIC_DEBUG
891 default n 891 default n
892 depends on PRINTK 892 depends on PRINTK
893 depends on DEBUG_FS 893 depends on DEBUG_FS
894 select PRINTK_DEBUG
895 help 894 help
896 895
897 Compiles debug level messages into the kernel, which would not 896 Compiles debug level messages into the kernel, which would not
diff --git a/lib/Makefile b/lib/Makefile
index d6edd6753f40..33a40e40e3ee 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -38,7 +38,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
38lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 38lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
39lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o 39lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
40lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 40lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
41lib-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o 41obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
42obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 42obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
43obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 43obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
44obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 44obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 1f71b97de0f9..eb23aaa0c7b8 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -119,6 +119,12 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
119} 119}
120EXPORT_SYMBOL(alloc_cpumask_var_node); 120EXPORT_SYMBOL(alloc_cpumask_var_node);
121 121
122bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
123{
124 return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
125}
126EXPORT_SYMBOL(zalloc_cpumask_var_node);
127
122/** 128/**
123 * alloc_cpumask_var - allocate a struct cpumask 129 * alloc_cpumask_var - allocate a struct cpumask
124 * @mask: pointer to cpumask_var_t where the cpumask is returned 130 * @mask: pointer to cpumask_var_t where the cpumask is returned
@@ -135,6 +141,12 @@ bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
135} 141}
136EXPORT_SYMBOL(alloc_cpumask_var); 142EXPORT_SYMBOL(alloc_cpumask_var);
137 143
144bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
145{
146 return alloc_cpumask_var(mask, flags | __GFP_ZERO);
147}
148EXPORT_SYMBOL(zalloc_cpumask_var);
149
138/** 150/**
139 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. 151 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
140 * @mask: pointer to cpumask_var_t where the cpumask is returned 152 * @mask: pointer to cpumask_var_t where the cpumask is returned
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index 0218b4693dd8..bc3b11731b9c 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -36,7 +36,7 @@ int debug_locks_silent;
36 */ 36 */
37int debug_locks_off(void) 37int debug_locks_off(void)
38{ 38{
39 if (xchg(&debug_locks, 0)) { 39 if (__debug_locks_off()) {
40 if (!debug_locks_silent) { 40 if (!debug_locks_silent) {
41 oops_in_progress = 1; 41 oops_in_progress = 1;
42 console_verbose(); 42 console_verbose();
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index d3da7edc034f..ad65fc0317d9 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -23,9 +23,11 @@
23#include <linux/dma-debug.h> 23#include <linux/dma-debug.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/debugfs.h> 25#include <linux/debugfs.h>
26#include <linux/uaccess.h>
26#include <linux/device.h> 27#include <linux/device.h>
27#include <linux/types.h> 28#include <linux/types.h>
28#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/ctype.h>
29#include <linux/list.h> 31#include <linux/list.h>
30#include <linux/slab.h> 32#include <linux/slab.h>
31 33
@@ -85,6 +87,7 @@ static u32 show_num_errors = 1;
85 87
86static u32 num_free_entries; 88static u32 num_free_entries;
87static u32 min_free_entries; 89static u32 min_free_entries;
90static u32 nr_total_entries;
88 91
89/* number of preallocated entries requested by kernel cmdline */ 92/* number of preallocated entries requested by kernel cmdline */
90static u32 req_entries; 93static u32 req_entries;
@@ -97,6 +100,16 @@ static struct dentry *show_all_errors_dent __read_mostly;
97static struct dentry *show_num_errors_dent __read_mostly; 100static struct dentry *show_num_errors_dent __read_mostly;
98static struct dentry *num_free_entries_dent __read_mostly; 101static struct dentry *num_free_entries_dent __read_mostly;
99static struct dentry *min_free_entries_dent __read_mostly; 102static struct dentry *min_free_entries_dent __read_mostly;
103static struct dentry *filter_dent __read_mostly;
104
105/* per-driver filter related state */
106
107#define NAME_MAX_LEN 64
108
109static char current_driver_name[NAME_MAX_LEN] __read_mostly;
110static struct device_driver *current_driver __read_mostly;
111
112static DEFINE_RWLOCK(driver_name_lock);
100 113
101static const char *type2name[4] = { "single", "page", 114static const char *type2name[4] = { "single", "page",
102 "scather-gather", "coherent" }; 115 "scather-gather", "coherent" };
@@ -104,6 +117,11 @@ static const char *type2name[4] = { "single", "page",
104static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", 117static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
105 "DMA_FROM_DEVICE", "DMA_NONE" }; 118 "DMA_FROM_DEVICE", "DMA_NONE" };
106 119
120/* little merge helper - remove it after the merge window */
121#ifndef BUS_NOTIFY_UNBOUND_DRIVER
122#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
123#endif
124
107/* 125/*
108 * The access to some variables in this macro is racy. We can't use atomic_t 126 * The access to some variables in this macro is racy. We can't use atomic_t
109 * here because all these variables are exported to debugfs. Some of them even 127 * here because all these variables are exported to debugfs. Some of them even
@@ -121,15 +139,54 @@ static inline void dump_entry_trace(struct dma_debug_entry *entry)
121{ 139{
122#ifdef CONFIG_STACKTRACE 140#ifdef CONFIG_STACKTRACE
123 if (entry) { 141 if (entry) {
124 printk(KERN_WARNING "Mapped at:\n"); 142 pr_warning("Mapped at:\n");
125 print_stack_trace(&entry->stacktrace, 0); 143 print_stack_trace(&entry->stacktrace, 0);
126 } 144 }
127#endif 145#endif
128} 146}
129 147
148static bool driver_filter(struct device *dev)
149{
150 struct device_driver *drv;
151 unsigned long flags;
152 bool ret;
153
154 /* driver filter off */
155 if (likely(!current_driver_name[0]))
156 return true;
157
158 /* driver filter on and initialized */
159 if (current_driver && dev->driver == current_driver)
160 return true;
161
162 if (current_driver || !current_driver_name[0])
163 return false;
164
165 /* driver filter on but not yet initialized */
166 drv = get_driver(dev->driver);
167 if (!drv)
168 return false;
169
170 /* lock to protect against change of current_driver_name */
171 read_lock_irqsave(&driver_name_lock, flags);
172
173 ret = false;
174 if (drv->name &&
175 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
176 current_driver = drv;
177 ret = true;
178 }
179
180 read_unlock_irqrestore(&driver_name_lock, flags);
181 put_driver(drv);
182
183 return ret;
184}
185
130#define err_printk(dev, entry, format, arg...) do { \ 186#define err_printk(dev, entry, format, arg...) do { \
131 error_count += 1; \ 187 error_count += 1; \
132 if (show_all_errors || show_num_errors > 0) { \ 188 if (driver_filter(dev) && \
189 (show_all_errors || show_num_errors > 0)) { \
133 WARN(1, "%s %s: " format, \ 190 WARN(1, "%s %s: " format, \
134 dev_driver_string(dev), \ 191 dev_driver_string(dev), \
135 dev_name(dev) , ## arg); \ 192 dev_name(dev) , ## arg); \
@@ -185,15 +242,50 @@ static void put_hash_bucket(struct hash_bucket *bucket,
185static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, 242static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
186 struct dma_debug_entry *ref) 243 struct dma_debug_entry *ref)
187{ 244{
188 struct dma_debug_entry *entry; 245 struct dma_debug_entry *entry, *ret = NULL;
246 int matches = 0, match_lvl, last_lvl = 0;
189 247
190 list_for_each_entry(entry, &bucket->list, list) { 248 list_for_each_entry(entry, &bucket->list, list) {
191 if ((entry->dev_addr == ref->dev_addr) && 249 if ((entry->dev_addr != ref->dev_addr) ||
192 (entry->dev == ref->dev)) 250 (entry->dev != ref->dev))
251 continue;
252
253 /*
254 * Some drivers map the same physical address multiple
255 * times. Without a hardware IOMMU this results in the
256 * same device addresses being put into the dma-debug
257 * hash multiple times too. This can result in false
258 * positives being reported. Therfore we implement a
259 * best-fit algorithm here which returns the entry from
260 * the hash which fits best to the reference value
261 * instead of the first-fit.
262 */
263 matches += 1;
264 match_lvl = 0;
265 entry->size == ref->size ? ++match_lvl : match_lvl;
266 entry->type == ref->type ? ++match_lvl : match_lvl;
267 entry->direction == ref->direction ? ++match_lvl : match_lvl;
268
269 if (match_lvl == 3) {
270 /* perfect-fit - return the result */
193 return entry; 271 return entry;
272 } else if (match_lvl > last_lvl) {
273 /*
274 * We found an entry that fits better then the
275 * previous one
276 */
277 last_lvl = match_lvl;
278 ret = entry;
279 }
194 } 280 }
195 281
196 return NULL; 282 /*
283 * If we have multiple matches but no perfect-fit, just return
284 * NULL.
285 */
286 ret = (matches == 1) ? ret : NULL;
287
288 return ret;
197} 289}
198 290
199/* 291/*
@@ -257,6 +349,21 @@ static void add_dma_entry(struct dma_debug_entry *entry)
257 put_hash_bucket(bucket, &flags); 349 put_hash_bucket(bucket, &flags);
258} 350}
259 351
352static struct dma_debug_entry *__dma_entry_alloc(void)
353{
354 struct dma_debug_entry *entry;
355
356 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
357 list_del(&entry->list);
358 memset(entry, 0, sizeof(*entry));
359
360 num_free_entries -= 1;
361 if (num_free_entries < min_free_entries)
362 min_free_entries = num_free_entries;
363
364 return entry;
365}
366
260/* struct dma_entry allocator 367/* struct dma_entry allocator
261 * 368 *
262 * The next two functions implement the allocator for 369 * The next two functions implement the allocator for
@@ -270,15 +377,12 @@ static struct dma_debug_entry *dma_entry_alloc(void)
270 spin_lock_irqsave(&free_entries_lock, flags); 377 spin_lock_irqsave(&free_entries_lock, flags);
271 378
272 if (list_empty(&free_entries)) { 379 if (list_empty(&free_entries)) {
273 printk(KERN_ERR "DMA-API: debugging out of memory " 380 pr_err("DMA-API: debugging out of memory - disabling\n");
274 "- disabling\n");
275 global_disable = true; 381 global_disable = true;
276 goto out; 382 goto out;
277 } 383 }
278 384
279 entry = list_entry(free_entries.next, struct dma_debug_entry, list); 385 entry = __dma_entry_alloc();
280 list_del(&entry->list);
281 memset(entry, 0, sizeof(*entry));
282 386
283#ifdef CONFIG_STACKTRACE 387#ifdef CONFIG_STACKTRACE
284 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; 388 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
@@ -286,9 +390,6 @@ static struct dma_debug_entry *dma_entry_alloc(void)
286 entry->stacktrace.skip = 2; 390 entry->stacktrace.skip = 2;
287 save_stack_trace(&entry->stacktrace); 391 save_stack_trace(&entry->stacktrace);
288#endif 392#endif
289 num_free_entries -= 1;
290 if (num_free_entries < min_free_entries)
291 min_free_entries = num_free_entries;
292 393
293out: 394out:
294 spin_unlock_irqrestore(&free_entries_lock, flags); 395 spin_unlock_irqrestore(&free_entries_lock, flags);
@@ -310,6 +411,53 @@ static void dma_entry_free(struct dma_debug_entry *entry)
310 spin_unlock_irqrestore(&free_entries_lock, flags); 411 spin_unlock_irqrestore(&free_entries_lock, flags);
311} 412}
312 413
414int dma_debug_resize_entries(u32 num_entries)
415{
416 int i, delta, ret = 0;
417 unsigned long flags;
418 struct dma_debug_entry *entry;
419 LIST_HEAD(tmp);
420
421 spin_lock_irqsave(&free_entries_lock, flags);
422
423 if (nr_total_entries < num_entries) {
424 delta = num_entries - nr_total_entries;
425
426 spin_unlock_irqrestore(&free_entries_lock, flags);
427
428 for (i = 0; i < delta; i++) {
429 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
430 if (!entry)
431 break;
432
433 list_add_tail(&entry->list, &tmp);
434 }
435
436 spin_lock_irqsave(&free_entries_lock, flags);
437
438 list_splice(&tmp, &free_entries);
439 nr_total_entries += i;
440 num_free_entries += i;
441 } else {
442 delta = nr_total_entries - num_entries;
443
444 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
445 entry = __dma_entry_alloc();
446 kfree(entry);
447 }
448
449 nr_total_entries -= i;
450 }
451
452 if (nr_total_entries != num_entries)
453 ret = 1;
454
455 spin_unlock_irqrestore(&free_entries_lock, flags);
456
457 return ret;
458}
459EXPORT_SYMBOL(dma_debug_resize_entries);
460
313/* 461/*
314 * DMA-API debugging init code 462 * DMA-API debugging init code
315 * 463 *
@@ -334,8 +482,7 @@ static int prealloc_memory(u32 num_entries)
334 num_free_entries = num_entries; 482 num_free_entries = num_entries;
335 min_free_entries = num_entries; 483 min_free_entries = num_entries;
336 484
337 printk(KERN_INFO "DMA-API: preallocated %d debug entries\n", 485 pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
338 num_entries);
339 486
340 return 0; 487 return 0;
341 488
@@ -349,11 +496,102 @@ out_err:
349 return -ENOMEM; 496 return -ENOMEM;
350} 497}
351 498
499static ssize_t filter_read(struct file *file, char __user *user_buf,
500 size_t count, loff_t *ppos)
501{
502 char buf[NAME_MAX_LEN + 1];
503 unsigned long flags;
504 int len;
505
506 if (!current_driver_name[0])
507 return 0;
508
509 /*
510 * We can't copy to userspace directly because current_driver_name can
511 * only be read under the driver_name_lock with irqs disabled. So
512 * create a temporary copy first.
513 */
514 read_lock_irqsave(&driver_name_lock, flags);
515 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
516 read_unlock_irqrestore(&driver_name_lock, flags);
517
518 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
519}
520
521static ssize_t filter_write(struct file *file, const char __user *userbuf,
522 size_t count, loff_t *ppos)
523{
524 char buf[NAME_MAX_LEN];
525 unsigned long flags;
526 size_t len;
527 int i;
528
529 /*
530 * We can't copy from userspace directly. Access to
531 * current_driver_name is protected with a write_lock with irqs
532 * disabled. Since copy_from_user can fault and may sleep we
533 * need to copy to temporary buffer first
534 */
535 len = min(count, (size_t)(NAME_MAX_LEN - 1));
536 if (copy_from_user(buf, userbuf, len))
537 return -EFAULT;
538
539 buf[len] = 0;
540
541 write_lock_irqsave(&driver_name_lock, flags);
542
543 /*
544 * Now handle the string we got from userspace very carefully.
545 * The rules are:
546 * - only use the first token we got
547 * - token delimiter is everything looking like a space
548 * character (' ', '\n', '\t' ...)
549 *
550 */
551 if (!isalnum(buf[0])) {
552 /*
553 * If the first character userspace gave us is not
554 * alphanumerical then assume the filter should be
555 * switched off.
556 */
557 if (current_driver_name[0])
558 pr_info("DMA-API: switching off dma-debug driver filter\n");
559 current_driver_name[0] = 0;
560 current_driver = NULL;
561 goto out_unlock;
562 }
563
564 /*
565 * Now parse out the first token and use it as the name for the
566 * driver to filter for.
567 */
568 for (i = 0; i < NAME_MAX_LEN; ++i) {
569 current_driver_name[i] = buf[i];
570 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
571 break;
572 }
573 current_driver_name[i] = 0;
574 current_driver = NULL;
575
576 pr_info("DMA-API: enable driver filter for driver [%s]\n",
577 current_driver_name);
578
579out_unlock:
580 write_unlock_irqrestore(&driver_name_lock, flags);
581
582 return count;
583}
584
585const struct file_operations filter_fops = {
586 .read = filter_read,
587 .write = filter_write,
588};
589
352static int dma_debug_fs_init(void) 590static int dma_debug_fs_init(void)
353{ 591{
354 dma_debug_dent = debugfs_create_dir("dma-api", NULL); 592 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
355 if (!dma_debug_dent) { 593 if (!dma_debug_dent) {
356 printk(KERN_ERR "DMA-API: can not create debugfs directory\n"); 594 pr_err("DMA-API: can not create debugfs directory\n");
357 return -ENOMEM; 595 return -ENOMEM;
358 } 596 }
359 597
@@ -392,6 +630,11 @@ static int dma_debug_fs_init(void)
392 if (!min_free_entries_dent) 630 if (!min_free_entries_dent)
393 goto out_err; 631 goto out_err;
394 632
633 filter_dent = debugfs_create_file("driver_filter", 0644,
634 dma_debug_dent, NULL, &filter_fops);
635 if (!filter_dent)
636 goto out_err;
637
395 return 0; 638 return 0;
396 639
397out_err: 640out_err:
@@ -406,15 +649,19 @@ static int device_dma_allocations(struct device *dev)
406 unsigned long flags; 649 unsigned long flags;
407 int count = 0, i; 650 int count = 0, i;
408 651
652 local_irq_save(flags);
653
409 for (i = 0; i < HASH_SIZE; ++i) { 654 for (i = 0; i < HASH_SIZE; ++i) {
410 spin_lock_irqsave(&dma_entry_hash[i].lock, flags); 655 spin_lock(&dma_entry_hash[i].lock);
411 list_for_each_entry(entry, &dma_entry_hash[i].list, list) { 656 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
412 if (entry->dev == dev) 657 if (entry->dev == dev)
413 count += 1; 658 count += 1;
414 } 659 }
415 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags); 660 spin_unlock(&dma_entry_hash[i].lock);
416 } 661 }
417 662
663 local_irq_restore(flags);
664
418 return count; 665 return count;
419} 666}
420 667
@@ -426,7 +673,7 @@ static int dma_debug_device_change(struct notifier_block *nb,
426 673
427 674
428 switch (action) { 675 switch (action) {
429 case BUS_NOTIFY_UNBIND_DRIVER: 676 case BUS_NOTIFY_UNBOUND_DRIVER:
430 count = device_dma_allocations(dev); 677 count = device_dma_allocations(dev);
431 if (count == 0) 678 if (count == 0)
432 break; 679 break;
@@ -447,7 +694,7 @@ void dma_debug_add_bus(struct bus_type *bus)
447 694
448 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); 695 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
449 if (nb == NULL) { 696 if (nb == NULL) {
450 printk(KERN_ERR "dma_debug_add_bus: out of memory\n"); 697 pr_err("dma_debug_add_bus: out of memory\n");
451 return; 698 return;
452 } 699 }
453 700
@@ -472,8 +719,7 @@ void dma_debug_init(u32 num_entries)
472 } 719 }
473 720
474 if (dma_debug_fs_init() != 0) { 721 if (dma_debug_fs_init() != 0) {
475 printk(KERN_ERR "DMA-API: error creating debugfs entries " 722 pr_err("DMA-API: error creating debugfs entries - disabling\n");
476 "- disabling\n");
477 global_disable = true; 723 global_disable = true;
478 724
479 return; 725 return;
@@ -483,14 +729,15 @@ void dma_debug_init(u32 num_entries)
483 num_entries = req_entries; 729 num_entries = req_entries;
484 730
485 if (prealloc_memory(num_entries) != 0) { 731 if (prealloc_memory(num_entries) != 0) {
486 printk(KERN_ERR "DMA-API: debugging out of memory error " 732 pr_err("DMA-API: debugging out of memory error - disabled\n");
487 "- disabled\n");
488 global_disable = true; 733 global_disable = true;
489 734
490 return; 735 return;
491 } 736 }
492 737
493 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n"); 738 nr_total_entries = num_free_entries;
739
740 pr_info("DMA-API: debugging enabled by kernel config\n");
494} 741}
495 742
496static __init int dma_debug_cmdline(char *str) 743static __init int dma_debug_cmdline(char *str)
@@ -499,8 +746,7 @@ static __init int dma_debug_cmdline(char *str)
499 return -EINVAL; 746 return -EINVAL;
500 747
501 if (strncmp(str, "off", 3) == 0) { 748 if (strncmp(str, "off", 3) == 0) {
502 printk(KERN_INFO "DMA-API: debugging disabled on kernel " 749 pr_info("DMA-API: debugging disabled on kernel command line\n");
503 "command line\n");
504 global_disable = true; 750 global_disable = true;
505 } 751 }
506 752
@@ -774,15 +1020,15 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
774 entry->type = dma_debug_sg; 1020 entry->type = dma_debug_sg;
775 entry->dev = dev; 1021 entry->dev = dev;
776 entry->paddr = sg_phys(s); 1022 entry->paddr = sg_phys(s);
777 entry->size = s->length; 1023 entry->size = sg_dma_len(s);
778 entry->dev_addr = s->dma_address; 1024 entry->dev_addr = sg_dma_address(s);
779 entry->direction = direction; 1025 entry->direction = direction;
780 entry->sg_call_ents = nents; 1026 entry->sg_call_ents = nents;
781 entry->sg_mapped_ents = mapped_ents; 1027 entry->sg_mapped_ents = mapped_ents;
782 1028
783 if (!PageHighMem(sg_page(s))) { 1029 if (!PageHighMem(sg_page(s))) {
784 check_for_stack(dev, sg_virt(s)); 1030 check_for_stack(dev, sg_virt(s));
785 check_for_illegal_area(dev, sg_virt(s), s->length); 1031 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
786 } 1032 }
787 1033
788 add_dma_entry(entry); 1034 add_dma_entry(entry);
@@ -790,13 +1036,33 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
790} 1036}
791EXPORT_SYMBOL(debug_dma_map_sg); 1037EXPORT_SYMBOL(debug_dma_map_sg);
792 1038
1039static int get_nr_mapped_entries(struct device *dev, struct scatterlist *s)
1040{
1041 struct dma_debug_entry *entry, ref;
1042 struct hash_bucket *bucket;
1043 unsigned long flags;
1044 int mapped_ents;
1045
1046 ref.dev = dev;
1047 ref.dev_addr = sg_dma_address(s);
1048 ref.size = sg_dma_len(s),
1049
1050 bucket = get_hash_bucket(&ref, &flags);
1051 entry = hash_bucket_find(bucket, &ref);
1052 mapped_ents = 0;
1053
1054 if (entry)
1055 mapped_ents = entry->sg_mapped_ents;
1056 put_hash_bucket(bucket, &flags);
1057
1058 return mapped_ents;
1059}
1060
793void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 1061void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
794 int nelems, int dir) 1062 int nelems, int dir)
795{ 1063{
796 struct dma_debug_entry *entry;
797 struct scatterlist *s; 1064 struct scatterlist *s;
798 int mapped_ents = 0, i; 1065 int mapped_ents = 0, i;
799 unsigned long flags;
800 1066
801 if (unlikely(global_disable)) 1067 if (unlikely(global_disable))
802 return; 1068 return;
@@ -807,8 +1073,8 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
807 .type = dma_debug_sg, 1073 .type = dma_debug_sg,
808 .dev = dev, 1074 .dev = dev,
809 .paddr = sg_phys(s), 1075 .paddr = sg_phys(s),
810 .dev_addr = s->dma_address, 1076 .dev_addr = sg_dma_address(s),
811 .size = s->length, 1077 .size = sg_dma_len(s),
812 .direction = dir, 1078 .direction = dir,
813 .sg_call_ents = 0, 1079 .sg_call_ents = 0,
814 }; 1080 };
@@ -816,14 +1082,9 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
816 if (mapped_ents && i >= mapped_ents) 1082 if (mapped_ents && i >= mapped_ents)
817 break; 1083 break;
818 1084
819 if (mapped_ents == 0) { 1085 if (!i) {
820 struct hash_bucket *bucket;
821 ref.sg_call_ents = nelems; 1086 ref.sg_call_ents = nelems;
822 bucket = get_hash_bucket(&ref, &flags); 1087 mapped_ents = get_nr_mapped_entries(dev, s);
823 entry = hash_bucket_find(bucket, &ref);
824 if (entry)
825 mapped_ents = entry->sg_mapped_ents;
826 put_hash_bucket(bucket, &flags);
827 } 1088 }
828 1089
829 check_unmap(&ref); 1090 check_unmap(&ref);
@@ -925,14 +1186,20 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
925 int nelems, int direction) 1186 int nelems, int direction)
926{ 1187{
927 struct scatterlist *s; 1188 struct scatterlist *s;
928 int i; 1189 int mapped_ents = 0, i;
929 1190
930 if (unlikely(global_disable)) 1191 if (unlikely(global_disable))
931 return; 1192 return;
932 1193
933 for_each_sg(sg, s, nelems, i) { 1194 for_each_sg(sg, s, nelems, i) {
934 check_sync(dev, s->dma_address, s->dma_length, 0, 1195 if (!i)
935 direction, true); 1196 mapped_ents = get_nr_mapped_entries(dev, s);
1197
1198 if (i >= mapped_ents)
1199 break;
1200
1201 check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0,
1202 direction, true);
936 } 1203 }
937} 1204}
938EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); 1205EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
@@ -941,15 +1208,39 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
941 int nelems, int direction) 1208 int nelems, int direction)
942{ 1209{
943 struct scatterlist *s; 1210 struct scatterlist *s;
944 int i; 1211 int mapped_ents = 0, i;
945 1212
946 if (unlikely(global_disable)) 1213 if (unlikely(global_disable))
947 return; 1214 return;
948 1215
949 for_each_sg(sg, s, nelems, i) { 1216 for_each_sg(sg, s, nelems, i) {
950 check_sync(dev, s->dma_address, s->dma_length, 0, 1217 if (!i)
951 direction, false); 1218 mapped_ents = get_nr_mapped_entries(dev, s);
1219
1220 if (i >= mapped_ents)
1221 break;
1222
1223 check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0,
1224 direction, false);
952 } 1225 }
953} 1226}
954EXPORT_SYMBOL(debug_dma_sync_sg_for_device); 1227EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
955 1228
1229static int __init dma_debug_driver_setup(char *str)
1230{
1231 int i;
1232
1233 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1234 current_driver_name[i] = *str;
1235 if (*str == 0)
1236 break;
1237 }
1238
1239 if (current_driver_name[0])
1240 pr_info("DMA-API: enable driver filter for driver [%s]\n",
1241 current_driver_name);
1242
1243
1244 return 1;
1245}
1246__setup("dma_debug_driver=", dma_debug_driver_setup);
diff --git a/lib/kobject.c b/lib/kobject.c
index a6dec32f2ddd..bacf6fe4f7a0 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -218,6 +218,9 @@ int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
218 const char *old_name = kobj->name; 218 const char *old_name = kobj->name;
219 char *s; 219 char *s;
220 220
221 if (kobj->name && !fmt)
222 return 0;
223
221 kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); 224 kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs);
222 if (!kobj->name) 225 if (!kobj->name)
223 return -ENOMEM; 226 return -ENOMEM;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 97a777ad4f59..920a3ca6e259 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -258,7 +258,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
258 goto exit; 258 goto exit;
259 259
260 retval = call_usermodehelper(argv[0], argv, 260 retval = call_usermodehelper(argv[0], argv,
261 env->envp, UMH_NO_WAIT); 261 env->envp, UMH_WAIT_EXEC);
262 } 262 }
263 263
264exit: 264exit:
@@ -328,7 +328,7 @@ static int __init kobject_uevent_init(void)
328 "kobject_uevent: unable to create netlink socket!\n"); 328 "kobject_uevent: unable to create netlink socket!\n");
329 return -ENODEV; 329 return -ENODEV;
330 } 330 }
331 331 netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV);
332 return 0; 332 return 0;
333} 333}
334 334
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index b7b449dafbe5..a295e404e908 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -347,9 +347,12 @@ bool sg_miter_next(struct sg_mapping_iter *miter)
347 sg_miter_stop(miter); 347 sg_miter_stop(miter);
348 348
349 /* get to the next sg if necessary. __offset is adjusted by stop */ 349 /* get to the next sg if necessary. __offset is adjusted by stop */
350 if (miter->__offset == miter->__sg->length && --miter->__nents) { 350 while (miter->__offset == miter->__sg->length) {
351 miter->__sg = sg_next(miter->__sg); 351 if (--miter->__nents) {
352 miter->__offset = 0; 352 miter->__sg = sg_next(miter->__sg);
353 miter->__offset = 0;
354 } else
355 return false;
353 } 356 }
354 357
355 /* map the next page */ 358 /* map the next page */
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 2b0b5a7d2ced..bffe6d7ef9d9 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -60,8 +60,8 @@ enum dma_sync_target {
60int swiotlb_force; 60int swiotlb_force;
61 61
62/* 62/*
63 * Used to do a quick range check in swiotlb_unmap_single and 63 * Used to do a quick range check in unmap_single and
64 * swiotlb_sync_single_*, to see if the memory was in fact allocated by this 64 * sync_single_*, to see if the memory was in fact allocated by this
65 * API. 65 * API.
66 */ 66 */
67static char *io_tlb_start, *io_tlb_end; 67static char *io_tlb_start, *io_tlb_end;
@@ -129,7 +129,7 @@ dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
129 return paddr; 129 return paddr;
130} 130}
131 131
132phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr) 132phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
133{ 133{
134 return baddr; 134 return baddr;
135} 135}
@@ -140,9 +140,15 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
140 return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); 140 return swiotlb_phys_to_bus(hwdev, virt_to_phys(address));
141} 141}
142 142
143static void *swiotlb_bus_to_virt(dma_addr_t address) 143void * __weak swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address)
144{ 144{
145 return phys_to_virt(swiotlb_bus_to_phys(address)); 145 return phys_to_virt(swiotlb_bus_to_phys(hwdev, address));
146}
147
148int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev,
149 dma_addr_t addr, size_t size)
150{
151 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
146} 152}
147 153
148int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) 154int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
@@ -309,10 +315,10 @@ cleanup1:
309 return -ENOMEM; 315 return -ENOMEM;
310} 316}
311 317
312static int 318static inline int
313address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) 319address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
314{ 320{
315 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); 321 return swiotlb_arch_address_needs_mapping(hwdev, addr, size);
316} 322}
317 323
318static inline int range_needs_mapping(phys_addr_t paddr, size_t size) 324static inline int range_needs_mapping(phys_addr_t paddr, size_t size)
@@ -341,7 +347,7 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
341 unsigned long flags; 347 unsigned long flags;
342 348
343 while (size) { 349 while (size) {
344 sz = min(PAGE_SIZE - offset, size); 350 sz = min_t(size_t, PAGE_SIZE - offset, size);
345 351
346 local_irq_save(flags); 352 local_irq_save(flags);
347 buffer = kmap_atomic(pfn_to_page(pfn), 353 buffer = kmap_atomic(pfn_to_page(pfn),
@@ -476,7 +482,7 @@ found:
476 * dma_addr is the kernel virtual address of the bounce buffer to unmap. 482 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
477 */ 483 */
478static void 484static void
479unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) 485do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
480{ 486{
481 unsigned long flags; 487 unsigned long flags;
482 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 488 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
@@ -560,7 +566,6 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
560 size)) { 566 size)) {
561 /* 567 /*
562 * The allocated memory isn't reachable by the device. 568 * The allocated memory isn't reachable by the device.
563 * Fall back on swiotlb_map_single().
564 */ 569 */
565 free_pages((unsigned long) ret, order); 570 free_pages((unsigned long) ret, order);
566 ret = NULL; 571 ret = NULL;
@@ -568,9 +573,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
568 if (!ret) { 573 if (!ret) {
569 /* 574 /*
570 * We are either out of memory or the device can't DMA 575 * We are either out of memory or the device can't DMA
571 * to GFP_DMA memory; fall back on 576 * to GFP_DMA memory; fall back on map_single(), which
572 * swiotlb_map_single(), which will grab memory from 577 * will grab memory from the lowest available address range.
573 * the lowest available address range.
574 */ 578 */
575 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); 579 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
576 if (!ret) 580 if (!ret)
@@ -587,7 +591,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
587 (unsigned long long)dev_addr); 591 (unsigned long long)dev_addr);
588 592
589 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 593 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
590 unmap_single(hwdev, ret, size, DMA_TO_DEVICE); 594 do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
591 return NULL; 595 return NULL;
592 } 596 }
593 *dma_handle = dev_addr; 597 *dma_handle = dev_addr;
@@ -604,7 +608,7 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
604 free_pages((unsigned long) vaddr, get_order(size)); 608 free_pages((unsigned long) vaddr, get_order(size));
605 else 609 else
606 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 610 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
607 unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 611 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
608} 612}
609EXPORT_SYMBOL(swiotlb_free_coherent); 613EXPORT_SYMBOL(swiotlb_free_coherent);
610 614
@@ -634,7 +638,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
634 * physical address to use is returned. 638 * physical address to use is returned.
635 * 639 *
636 * Once the device is given the dma address, the device owns this memory until 640 * Once the device is given the dma address, the device owns this memory until
637 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. 641 * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
638 */ 642 */
639dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, 643dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
640 unsigned long offset, size_t size, 644 unsigned long offset, size_t size,
@@ -642,18 +646,17 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
642 struct dma_attrs *attrs) 646 struct dma_attrs *attrs)
643{ 647{
644 phys_addr_t phys = page_to_phys(page) + offset; 648 phys_addr_t phys = page_to_phys(page) + offset;
645 void *ptr = page_address(page) + offset;
646 dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); 649 dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys);
647 void *map; 650 void *map;
648 651
649 BUG_ON(dir == DMA_NONE); 652 BUG_ON(dir == DMA_NONE);
650 /* 653 /*
651 * If the pointer passed in happens to be in the device's DMA window, 654 * If the address happens to be in the device's DMA window,
652 * we can safely return the device addr and not worry about bounce 655 * we can safely return the device addr and not worry about bounce
653 * buffering it. 656 * buffering it.
654 */ 657 */
655 if (!address_needs_mapping(dev, dev_addr, size) && 658 if (!address_needs_mapping(dev, dev_addr, size) &&
656 !range_needs_mapping(virt_to_phys(ptr), size)) 659 !range_needs_mapping(phys, size))
657 return dev_addr; 660 return dev_addr;
658 661
659 /* 662 /*
@@ -679,23 +682,35 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
679 682
680/* 683/*
681 * Unmap a single streaming mode DMA translation. The dma_addr and size must 684 * Unmap a single streaming mode DMA translation. The dma_addr and size must
682 * match what was provided for in a previous swiotlb_map_single call. All 685 * match what was provided for in a previous swiotlb_map_page call. All
683 * other usages are undefined. 686 * other usages are undefined.
684 * 687 *
685 * After this call, reads by the cpu to the buffer are guaranteed to see 688 * After this call, reads by the cpu to the buffer are guaranteed to see
686 * whatever the device wrote there. 689 * whatever the device wrote there.
687 */ 690 */
691static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
692 size_t size, int dir)
693{
694 char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr);
695
696 BUG_ON(dir == DMA_NONE);
697
698 if (is_swiotlb_buffer(dma_addr)) {
699 do_unmap_single(hwdev, dma_addr, size, dir);
700 return;
701 }
702
703 if (dir != DMA_FROM_DEVICE)
704 return;
705
706 dma_mark_clean(dma_addr, size);
707}
708
688void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, 709void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
689 size_t size, enum dma_data_direction dir, 710 size_t size, enum dma_data_direction dir,
690 struct dma_attrs *attrs) 711 struct dma_attrs *attrs)
691{ 712{
692 char *dma_addr = swiotlb_bus_to_virt(dev_addr); 713 unmap_single(hwdev, dev_addr, size, dir);
693
694 BUG_ON(dir == DMA_NONE);
695 if (is_swiotlb_buffer(dma_addr))
696 unmap_single(hwdev, dma_addr, size, dir);
697 else if (dir == DMA_FROM_DEVICE)
698 dma_mark_clean(dma_addr, size);
699} 714}
700EXPORT_SYMBOL_GPL(swiotlb_unmap_page); 715EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
701 716
@@ -703,7 +718,7 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
703 * Make physical memory consistent for a single streaming mode DMA translation 718 * Make physical memory consistent for a single streaming mode DMA translation
704 * after a transfer. 719 * after a transfer.
705 * 720 *
706 * If you perform a swiotlb_map_single() but wish to interrogate the buffer 721 * If you perform a swiotlb_map_page() but wish to interrogate the buffer
707 * using the cpu, yet do not wish to teardown the dma mapping, you must 722 * using the cpu, yet do not wish to teardown the dma mapping, you must
708 * call this function before doing so. At the next point you give the dma 723 * call this function before doing so. At the next point you give the dma
709 * address back to the card, you must first perform a 724 * address back to the card, you must first perform a
@@ -713,13 +728,19 @@ static void
713swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 728swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
714 size_t size, int dir, int target) 729 size_t size, int dir, int target)
715{ 730{
716 char *dma_addr = swiotlb_bus_to_virt(dev_addr); 731 char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr);
717 732
718 BUG_ON(dir == DMA_NONE); 733 BUG_ON(dir == DMA_NONE);
719 if (is_swiotlb_buffer(dma_addr)) 734
735 if (is_swiotlb_buffer(dma_addr)) {
720 sync_single(hwdev, dma_addr, size, dir, target); 736 sync_single(hwdev, dma_addr, size, dir, target);
721 else if (dir == DMA_FROM_DEVICE) 737 return;
722 dma_mark_clean(dma_addr, size); 738 }
739
740 if (dir != DMA_FROM_DEVICE)
741 return;
742
743 dma_mark_clean(dma_addr, size);
723} 744}
724 745
725void 746void
@@ -746,13 +767,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
746 unsigned long offset, size_t size, 767 unsigned long offset, size_t size,
747 int dir, int target) 768 int dir, int target)
748{ 769{
749 char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset; 770 swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
750
751 BUG_ON(dir == DMA_NONE);
752 if (is_swiotlb_buffer(dma_addr))
753 sync_single(hwdev, dma_addr, size, dir, target);
754 else if (dir == DMA_FROM_DEVICE)
755 dma_mark_clean(dma_addr, size);
756} 771}
757 772
758void 773void
@@ -777,7 +792,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
777 792
778/* 793/*
779 * Map a set of buffers described by scatterlist in streaming mode for DMA. 794 * Map a set of buffers described by scatterlist in streaming mode for DMA.
780 * This is the scatter-gather version of the above swiotlb_map_single 795 * This is the scatter-gather version of the above swiotlb_map_page
781 * interface. Here the scatter gather list elements are each tagged with the 796 * interface. Here the scatter gather list elements are each tagged with the
782 * appropriate dma address and length. They are obtained via 797 * appropriate dma address and length. They are obtained via
783 * sg_dma_{address,length}(SG). 798 * sg_dma_{address,length}(SG).
@@ -788,7 +803,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
788 * The routine returns the number of addr/length pairs actually 803 * The routine returns the number of addr/length pairs actually
789 * used, at most nents. 804 * used, at most nents.
790 * 805 *
791 * Device ownership issues as mentioned above for swiotlb_map_single are the 806 * Device ownership issues as mentioned above for swiotlb_map_page are the
792 * same here. 807 * same here.
793 */ 808 */
794int 809int
@@ -836,7 +851,7 @@ EXPORT_SYMBOL(swiotlb_map_sg);
836 851
837/* 852/*
838 * Unmap a set of streaming mode DMA translations. Again, cpu read rules 853 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
839 * concerning calls here are the same as for swiotlb_unmap_single() above. 854 * concerning calls here are the same as for swiotlb_unmap_page() above.
840 */ 855 */
841void 856void
842swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, 857swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
@@ -847,13 +862,9 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
847 862
848 BUG_ON(dir == DMA_NONE); 863 BUG_ON(dir == DMA_NONE);
849 864
850 for_each_sg(sgl, sg, nelems, i) { 865 for_each_sg(sgl, sg, nelems, i)
851 if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg))) 866 unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
852 unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), 867
853 sg->dma_length, dir);
854 else if (dir == DMA_FROM_DEVICE)
855 dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
856 }
857} 868}
858EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); 869EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
859 870
@@ -879,15 +890,9 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
879 struct scatterlist *sg; 890 struct scatterlist *sg;
880 int i; 891 int i;
881 892
882 BUG_ON(dir == DMA_NONE); 893 for_each_sg(sgl, sg, nelems, i)
883 894 swiotlb_sync_single(hwdev, sg->dma_address,
884 for_each_sg(sgl, sg, nelems, i) {
885 if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg)))
886 sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
887 sg->dma_length, dir, target); 895 sg->dma_length, dir, target);
888 else if (dir == DMA_FROM_DEVICE)
889 dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
890 }
891} 896}
892 897
893void 898void