aboutsummaryrefslogtreecommitdiffstats
path: root/lib/dma-debug.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/dma-debug.c')
-rw-r--r--lib/dma-debug.c496
1 files changed, 417 insertions, 79 deletions
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index d3da7edc034f..3b93129a968c 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -23,9 +23,11 @@
23#include <linux/dma-debug.h> 23#include <linux/dma-debug.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/debugfs.h> 25#include <linux/debugfs.h>
26#include <linux/uaccess.h>
26#include <linux/device.h> 27#include <linux/device.h>
27#include <linux/types.h> 28#include <linux/types.h>
28#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/ctype.h>
29#include <linux/list.h> 31#include <linux/list.h>
30#include <linux/slab.h> 32#include <linux/slab.h>
31 33
@@ -85,6 +87,7 @@ static u32 show_num_errors = 1;
85 87
86static u32 num_free_entries; 88static u32 num_free_entries;
87static u32 min_free_entries; 89static u32 min_free_entries;
90static u32 nr_total_entries;
88 91
89/* number of preallocated entries requested by kernel cmdline */ 92/* number of preallocated entries requested by kernel cmdline */
90static u32 req_entries; 93static u32 req_entries;
@@ -97,6 +100,16 @@ static struct dentry *show_all_errors_dent __read_mostly;
97static struct dentry *show_num_errors_dent __read_mostly; 100static struct dentry *show_num_errors_dent __read_mostly;
98static struct dentry *num_free_entries_dent __read_mostly; 101static struct dentry *num_free_entries_dent __read_mostly;
99static struct dentry *min_free_entries_dent __read_mostly; 102static struct dentry *min_free_entries_dent __read_mostly;
103static struct dentry *filter_dent __read_mostly;
104
105/* per-driver filter related state */
106
107#define NAME_MAX_LEN 64
108
109static char current_driver_name[NAME_MAX_LEN] __read_mostly;
110static struct device_driver *current_driver __read_mostly;
111
112static DEFINE_RWLOCK(driver_name_lock);
100 113
101static const char *type2name[4] = { "single", "page", 114static const char *type2name[4] = { "single", "page",
102 "scather-gather", "coherent" }; 115 "scather-gather", "coherent" };
@@ -104,6 +117,11 @@ static const char *type2name[4] = { "single", "page",
104static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", 117static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
105 "DMA_FROM_DEVICE", "DMA_NONE" }; 118 "DMA_FROM_DEVICE", "DMA_NONE" };
106 119
120/* little merge helper - remove it after the merge window */
121#ifndef BUS_NOTIFY_UNBOUND_DRIVER
122#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
123#endif
124
107/* 125/*
108 * The access to some variables in this macro is racy. We can't use atomic_t 126 * The access to some variables in this macro is racy. We can't use atomic_t
109 * here because all these variables are exported to debugfs. Some of them even 127 * here because all these variables are exported to debugfs. Some of them even
@@ -121,15 +139,54 @@ static inline void dump_entry_trace(struct dma_debug_entry *entry)
121{ 139{
122#ifdef CONFIG_STACKTRACE 140#ifdef CONFIG_STACKTRACE
123 if (entry) { 141 if (entry) {
124 printk(KERN_WARNING "Mapped at:\n"); 142 pr_warning("Mapped at:\n");
125 print_stack_trace(&entry->stacktrace, 0); 143 print_stack_trace(&entry->stacktrace, 0);
126 } 144 }
127#endif 145#endif
128} 146}
129 147
148static bool driver_filter(struct device *dev)
149{
150 struct device_driver *drv;
151 unsigned long flags;
152 bool ret;
153
154 /* driver filter off */
155 if (likely(!current_driver_name[0]))
156 return true;
157
158 /* driver filter on and initialized */
159 if (current_driver && dev->driver == current_driver)
160 return true;
161
162 if (current_driver || !current_driver_name[0])
163 return false;
164
165 /* driver filter on but not yet initialized */
166 drv = get_driver(dev->driver);
167 if (!drv)
168 return false;
169
170 /* lock to protect against change of current_driver_name */
171 read_lock_irqsave(&driver_name_lock, flags);
172
173 ret = false;
174 if (drv->name &&
175 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
176 current_driver = drv;
177 ret = true;
178 }
179
180 read_unlock_irqrestore(&driver_name_lock, flags);
181 put_driver(drv);
182
183 return ret;
184}
185
130#define err_printk(dev, entry, format, arg...) do { \ 186#define err_printk(dev, entry, format, arg...) do { \
131 error_count += 1; \ 187 error_count += 1; \
132 if (show_all_errors || show_num_errors > 0) { \ 188 if (driver_filter(dev) && \
189 (show_all_errors || show_num_errors > 0)) { \
133 WARN(1, "%s %s: " format, \ 190 WARN(1, "%s %s: " format, \
134 dev_driver_string(dev), \ 191 dev_driver_string(dev), \
135 dev_name(dev) , ## arg); \ 192 dev_name(dev) , ## arg); \
@@ -185,15 +242,51 @@ static void put_hash_bucket(struct hash_bucket *bucket,
185static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, 242static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
186 struct dma_debug_entry *ref) 243 struct dma_debug_entry *ref)
187{ 244{
188 struct dma_debug_entry *entry; 245 struct dma_debug_entry *entry, *ret = NULL;
246 int matches = 0, match_lvl, last_lvl = 0;
189 247
190 list_for_each_entry(entry, &bucket->list, list) { 248 list_for_each_entry(entry, &bucket->list, list) {
191 if ((entry->dev_addr == ref->dev_addr) && 249 if ((entry->dev_addr != ref->dev_addr) ||
192 (entry->dev == ref->dev)) 250 (entry->dev != ref->dev))
251 continue;
252
253 /*
254 * Some drivers map the same physical address multiple
255 * times. Without a hardware IOMMU this results in the
256 * same device addresses being put into the dma-debug
257 * hash multiple times too. This can result in false
258 * positives being reported. Therfore we implement a
259 * best-fit algorithm here which returns the entry from
260 * the hash which fits best to the reference value
261 * instead of the first-fit.
262 */
263 matches += 1;
264 match_lvl = 0;
265 entry->size == ref->size ? ++match_lvl : 0;
266 entry->type == ref->type ? ++match_lvl : 0;
267 entry->direction == ref->direction ? ++match_lvl : 0;
268 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
269
270 if (match_lvl == 4) {
271 /* perfect-fit - return the result */
193 return entry; 272 return entry;
273 } else if (match_lvl > last_lvl) {
274 /*
275 * We found an entry that fits better then the
276 * previous one
277 */
278 last_lvl = match_lvl;
279 ret = entry;
280 }
194 } 281 }
195 282
196 return NULL; 283 /*
284 * If we have multiple matches but no perfect-fit, just return
285 * NULL.
286 */
287 ret = (matches == 1) ? ret : NULL;
288
289 return ret;
197} 290}
198 291
199/* 292/*
@@ -257,6 +350,21 @@ static void add_dma_entry(struct dma_debug_entry *entry)
257 put_hash_bucket(bucket, &flags); 350 put_hash_bucket(bucket, &flags);
258} 351}
259 352
353static struct dma_debug_entry *__dma_entry_alloc(void)
354{
355 struct dma_debug_entry *entry;
356
357 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
358 list_del(&entry->list);
359 memset(entry, 0, sizeof(*entry));
360
361 num_free_entries -= 1;
362 if (num_free_entries < min_free_entries)
363 min_free_entries = num_free_entries;
364
365 return entry;
366}
367
260/* struct dma_entry allocator 368/* struct dma_entry allocator
261 * 369 *
262 * The next two functions implement the allocator for 370 * The next two functions implement the allocator for
@@ -270,15 +378,12 @@ static struct dma_debug_entry *dma_entry_alloc(void)
270 spin_lock_irqsave(&free_entries_lock, flags); 378 spin_lock_irqsave(&free_entries_lock, flags);
271 379
272 if (list_empty(&free_entries)) { 380 if (list_empty(&free_entries)) {
273 printk(KERN_ERR "DMA-API: debugging out of memory " 381 pr_err("DMA-API: debugging out of memory - disabling\n");
274 "- disabling\n");
275 global_disable = true; 382 global_disable = true;
276 goto out; 383 goto out;
277 } 384 }
278 385
279 entry = list_entry(free_entries.next, struct dma_debug_entry, list); 386 entry = __dma_entry_alloc();
280 list_del(&entry->list);
281 memset(entry, 0, sizeof(*entry));
282 387
283#ifdef CONFIG_STACKTRACE 388#ifdef CONFIG_STACKTRACE
284 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; 389 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
@@ -286,9 +391,6 @@ static struct dma_debug_entry *dma_entry_alloc(void)
286 entry->stacktrace.skip = 2; 391 entry->stacktrace.skip = 2;
287 save_stack_trace(&entry->stacktrace); 392 save_stack_trace(&entry->stacktrace);
288#endif 393#endif
289 num_free_entries -= 1;
290 if (num_free_entries < min_free_entries)
291 min_free_entries = num_free_entries;
292 394
293out: 395out:
294 spin_unlock_irqrestore(&free_entries_lock, flags); 396 spin_unlock_irqrestore(&free_entries_lock, flags);
@@ -310,6 +412,53 @@ static void dma_entry_free(struct dma_debug_entry *entry)
310 spin_unlock_irqrestore(&free_entries_lock, flags); 412 spin_unlock_irqrestore(&free_entries_lock, flags);
311} 413}
312 414
415int dma_debug_resize_entries(u32 num_entries)
416{
417 int i, delta, ret = 0;
418 unsigned long flags;
419 struct dma_debug_entry *entry;
420 LIST_HEAD(tmp);
421
422 spin_lock_irqsave(&free_entries_lock, flags);
423
424 if (nr_total_entries < num_entries) {
425 delta = num_entries - nr_total_entries;
426
427 spin_unlock_irqrestore(&free_entries_lock, flags);
428
429 for (i = 0; i < delta; i++) {
430 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
431 if (!entry)
432 break;
433
434 list_add_tail(&entry->list, &tmp);
435 }
436
437 spin_lock_irqsave(&free_entries_lock, flags);
438
439 list_splice(&tmp, &free_entries);
440 nr_total_entries += i;
441 num_free_entries += i;
442 } else {
443 delta = nr_total_entries - num_entries;
444
445 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
446 entry = __dma_entry_alloc();
447 kfree(entry);
448 }
449
450 nr_total_entries -= i;
451 }
452
453 if (nr_total_entries != num_entries)
454 ret = 1;
455
456 spin_unlock_irqrestore(&free_entries_lock, flags);
457
458 return ret;
459}
460EXPORT_SYMBOL(dma_debug_resize_entries);
461
313/* 462/*
314 * DMA-API debugging init code 463 * DMA-API debugging init code
315 * 464 *
@@ -334,8 +483,7 @@ static int prealloc_memory(u32 num_entries)
334 num_free_entries = num_entries; 483 num_free_entries = num_entries;
335 min_free_entries = num_entries; 484 min_free_entries = num_entries;
336 485
337 printk(KERN_INFO "DMA-API: preallocated %d debug entries\n", 486 pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
338 num_entries);
339 487
340 return 0; 488 return 0;
341 489
@@ -349,11 +497,102 @@ out_err:
349 return -ENOMEM; 497 return -ENOMEM;
350} 498}
351 499
500static ssize_t filter_read(struct file *file, char __user *user_buf,
501 size_t count, loff_t *ppos)
502{
503 char buf[NAME_MAX_LEN + 1];
504 unsigned long flags;
505 int len;
506
507 if (!current_driver_name[0])
508 return 0;
509
510 /*
511 * We can't copy to userspace directly because current_driver_name can
512 * only be read under the driver_name_lock with irqs disabled. So
513 * create a temporary copy first.
514 */
515 read_lock_irqsave(&driver_name_lock, flags);
516 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
517 read_unlock_irqrestore(&driver_name_lock, flags);
518
519 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
520}
521
522static ssize_t filter_write(struct file *file, const char __user *userbuf,
523 size_t count, loff_t *ppos)
524{
525 char buf[NAME_MAX_LEN];
526 unsigned long flags;
527 size_t len;
528 int i;
529
530 /*
531 * We can't copy from userspace directly. Access to
532 * current_driver_name is protected with a write_lock with irqs
533 * disabled. Since copy_from_user can fault and may sleep we
534 * need to copy to temporary buffer first
535 */
536 len = min(count, (size_t)(NAME_MAX_LEN - 1));
537 if (copy_from_user(buf, userbuf, len))
538 return -EFAULT;
539
540 buf[len] = 0;
541
542 write_lock_irqsave(&driver_name_lock, flags);
543
544 /*
545 * Now handle the string we got from userspace very carefully.
546 * The rules are:
547 * - only use the first token we got
548 * - token delimiter is everything looking like a space
549 * character (' ', '\n', '\t' ...)
550 *
551 */
552 if (!isalnum(buf[0])) {
553 /*
554 * If the first character userspace gave us is not
555 * alphanumerical then assume the filter should be
556 * switched off.
557 */
558 if (current_driver_name[0])
559 pr_info("DMA-API: switching off dma-debug driver filter\n");
560 current_driver_name[0] = 0;
561 current_driver = NULL;
562 goto out_unlock;
563 }
564
565 /*
566 * Now parse out the first token and use it as the name for the
567 * driver to filter for.
568 */
569 for (i = 0; i < NAME_MAX_LEN; ++i) {
570 current_driver_name[i] = buf[i];
571 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
572 break;
573 }
574 current_driver_name[i] = 0;
575 current_driver = NULL;
576
577 pr_info("DMA-API: enable driver filter for driver [%s]\n",
578 current_driver_name);
579
580out_unlock:
581 write_unlock_irqrestore(&driver_name_lock, flags);
582
583 return count;
584}
585
586const struct file_operations filter_fops = {
587 .read = filter_read,
588 .write = filter_write,
589};
590
352static int dma_debug_fs_init(void) 591static int dma_debug_fs_init(void)
353{ 592{
354 dma_debug_dent = debugfs_create_dir("dma-api", NULL); 593 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
355 if (!dma_debug_dent) { 594 if (!dma_debug_dent) {
356 printk(KERN_ERR "DMA-API: can not create debugfs directory\n"); 595 pr_err("DMA-API: can not create debugfs directory\n");
357 return -ENOMEM; 596 return -ENOMEM;
358 } 597 }
359 598
@@ -392,6 +631,11 @@ static int dma_debug_fs_init(void)
392 if (!min_free_entries_dent) 631 if (!min_free_entries_dent)
393 goto out_err; 632 goto out_err;
394 633
634 filter_dent = debugfs_create_file("driver_filter", 0644,
635 dma_debug_dent, NULL, &filter_fops);
636 if (!filter_dent)
637 goto out_err;
638
395 return 0; 639 return 0;
396 640
397out_err: 641out_err:
@@ -406,15 +650,19 @@ static int device_dma_allocations(struct device *dev)
406 unsigned long flags; 650 unsigned long flags;
407 int count = 0, i; 651 int count = 0, i;
408 652
653 local_irq_save(flags);
654
409 for (i = 0; i < HASH_SIZE; ++i) { 655 for (i = 0; i < HASH_SIZE; ++i) {
410 spin_lock_irqsave(&dma_entry_hash[i].lock, flags); 656 spin_lock(&dma_entry_hash[i].lock);
411 list_for_each_entry(entry, &dma_entry_hash[i].list, list) { 657 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
412 if (entry->dev == dev) 658 if (entry->dev == dev)
413 count += 1; 659 count += 1;
414 } 660 }
415 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags); 661 spin_unlock(&dma_entry_hash[i].lock);
416 } 662 }
417 663
664 local_irq_restore(flags);
665
418 return count; 666 return count;
419} 667}
420 668
@@ -426,7 +674,7 @@ static int dma_debug_device_change(struct notifier_block *nb,
426 674
427 675
428 switch (action) { 676 switch (action) {
429 case BUS_NOTIFY_UNBIND_DRIVER: 677 case BUS_NOTIFY_UNBOUND_DRIVER:
430 count = device_dma_allocations(dev); 678 count = device_dma_allocations(dev);
431 if (count == 0) 679 if (count == 0)
432 break; 680 break;
@@ -447,7 +695,7 @@ void dma_debug_add_bus(struct bus_type *bus)
447 695
448 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); 696 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
449 if (nb == NULL) { 697 if (nb == NULL) {
450 printk(KERN_ERR "dma_debug_add_bus: out of memory\n"); 698 pr_err("dma_debug_add_bus: out of memory\n");
451 return; 699 return;
452 } 700 }
453 701
@@ -472,8 +720,7 @@ void dma_debug_init(u32 num_entries)
472 } 720 }
473 721
474 if (dma_debug_fs_init() != 0) { 722 if (dma_debug_fs_init() != 0) {
475 printk(KERN_ERR "DMA-API: error creating debugfs entries " 723 pr_err("DMA-API: error creating debugfs entries - disabling\n");
476 "- disabling\n");
477 global_disable = true; 724 global_disable = true;
478 725
479 return; 726 return;
@@ -483,14 +730,15 @@ void dma_debug_init(u32 num_entries)
483 num_entries = req_entries; 730 num_entries = req_entries;
484 731
485 if (prealloc_memory(num_entries) != 0) { 732 if (prealloc_memory(num_entries) != 0) {
486 printk(KERN_ERR "DMA-API: debugging out of memory error " 733 pr_err("DMA-API: debugging out of memory error - disabled\n");
487 "- disabled\n");
488 global_disable = true; 734 global_disable = true;
489 735
490 return; 736 return;
491 } 737 }
492 738
493 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n"); 739 nr_total_entries = num_free_entries;
740
741 pr_info("DMA-API: debugging enabled by kernel config\n");
494} 742}
495 743
496static __init int dma_debug_cmdline(char *str) 744static __init int dma_debug_cmdline(char *str)
@@ -499,8 +747,7 @@ static __init int dma_debug_cmdline(char *str)
499 return -EINVAL; 747 return -EINVAL;
500 748
501 if (strncmp(str, "off", 3) == 0) { 749 if (strncmp(str, "off", 3) == 0) {
502 printk(KERN_INFO "DMA-API: debugging disabled on kernel " 750 pr_info("DMA-API: debugging disabled on kernel command line\n");
503 "command line\n");
504 global_disable = true; 751 global_disable = true;
505 } 752 }
506 753
@@ -627,72 +874,68 @@ static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
627 "[addr=%p] [size=%llu]\n", addr, size); 874 "[addr=%p] [size=%llu]\n", addr, size);
628} 875}
629 876
630static void check_sync(struct device *dev, dma_addr_t addr, 877static void check_sync(struct device *dev,
631 u64 size, u64 offset, int direction, bool to_cpu) 878 struct dma_debug_entry *ref,
879 bool to_cpu)
632{ 880{
633 struct dma_debug_entry ref = {
634 .dev = dev,
635 .dev_addr = addr,
636 .size = size,
637 .direction = direction,
638 };
639 struct dma_debug_entry *entry; 881 struct dma_debug_entry *entry;
640 struct hash_bucket *bucket; 882 struct hash_bucket *bucket;
641 unsigned long flags; 883 unsigned long flags;
642 884
643 bucket = get_hash_bucket(&ref, &flags); 885 bucket = get_hash_bucket(ref, &flags);
644 886
645 entry = hash_bucket_find(bucket, &ref); 887 entry = hash_bucket_find(bucket, ref);
646 888
647 if (!entry) { 889 if (!entry) {
648 err_printk(dev, NULL, "DMA-API: device driver tries " 890 err_printk(dev, NULL, "DMA-API: device driver tries "
649 "to sync DMA memory it has not allocated " 891 "to sync DMA memory it has not allocated "
650 "[device address=0x%016llx] [size=%llu bytes]\n", 892 "[device address=0x%016llx] [size=%llu bytes]\n",
651 (unsigned long long)addr, size); 893 (unsigned long long)ref->dev_addr, ref->size);
652 goto out; 894 goto out;
653 } 895 }
654 896
655 if ((offset + size) > entry->size) { 897 if (ref->size > entry->size) {
656 err_printk(dev, entry, "DMA-API: device driver syncs" 898 err_printk(dev, entry, "DMA-API: device driver syncs"
657 " DMA memory outside allocated range " 899 " DMA memory outside allocated range "
658 "[device address=0x%016llx] " 900 "[device address=0x%016llx] "
659 "[allocation size=%llu bytes] [sync offset=%llu] " 901 "[allocation size=%llu bytes] "
660 "[sync size=%llu]\n", entry->dev_addr, entry->size, 902 "[sync offset+size=%llu]\n",
661 offset, size); 903 entry->dev_addr, entry->size,
904 ref->size);
662 } 905 }
663 906
664 if (direction != entry->direction) { 907 if (ref->direction != entry->direction) {
665 err_printk(dev, entry, "DMA-API: device driver syncs " 908 err_printk(dev, entry, "DMA-API: device driver syncs "
666 "DMA memory with different direction " 909 "DMA memory with different direction "
667 "[device address=0x%016llx] [size=%llu bytes] " 910 "[device address=0x%016llx] [size=%llu bytes] "
668 "[mapped with %s] [synced with %s]\n", 911 "[mapped with %s] [synced with %s]\n",
669 (unsigned long long)addr, entry->size, 912 (unsigned long long)ref->dev_addr, entry->size,
670 dir2name[entry->direction], 913 dir2name[entry->direction],
671 dir2name[direction]); 914 dir2name[ref->direction]);
672 } 915 }
673 916
674 if (entry->direction == DMA_BIDIRECTIONAL) 917 if (entry->direction == DMA_BIDIRECTIONAL)
675 goto out; 918 goto out;
676 919
677 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && 920 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
678 !(direction == DMA_TO_DEVICE)) 921 !(ref->direction == DMA_TO_DEVICE))
679 err_printk(dev, entry, "DMA-API: device driver syncs " 922 err_printk(dev, entry, "DMA-API: device driver syncs "
680 "device read-only DMA memory for cpu " 923 "device read-only DMA memory for cpu "
681 "[device address=0x%016llx] [size=%llu bytes] " 924 "[device address=0x%016llx] [size=%llu bytes] "
682 "[mapped with %s] [synced with %s]\n", 925 "[mapped with %s] [synced with %s]\n",
683 (unsigned long long)addr, entry->size, 926 (unsigned long long)ref->dev_addr, entry->size,
684 dir2name[entry->direction], 927 dir2name[entry->direction],
685 dir2name[direction]); 928 dir2name[ref->direction]);
686 929
687 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && 930 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
688 !(direction == DMA_FROM_DEVICE)) 931 !(ref->direction == DMA_FROM_DEVICE))
689 err_printk(dev, entry, "DMA-API: device driver syncs " 932 err_printk(dev, entry, "DMA-API: device driver syncs "
690 "device write-only DMA memory to device " 933 "device write-only DMA memory to device "
691 "[device address=0x%016llx] [size=%llu bytes] " 934 "[device address=0x%016llx] [size=%llu bytes] "
692 "[mapped with %s] [synced with %s]\n", 935 "[mapped with %s] [synced with %s]\n",
693 (unsigned long long)addr, entry->size, 936 (unsigned long long)ref->dev_addr, entry->size,
694 dir2name[entry->direction], 937 dir2name[entry->direction],
695 dir2name[direction]); 938 dir2name[ref->direction]);
696 939
697out: 940out:
698 put_hash_bucket(bucket, &flags); 941 put_hash_bucket(bucket, &flags);
@@ -774,15 +1017,15 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
774 entry->type = dma_debug_sg; 1017 entry->type = dma_debug_sg;
775 entry->dev = dev; 1018 entry->dev = dev;
776 entry->paddr = sg_phys(s); 1019 entry->paddr = sg_phys(s);
777 entry->size = s->length; 1020 entry->size = sg_dma_len(s);
778 entry->dev_addr = s->dma_address; 1021 entry->dev_addr = sg_dma_address(s);
779 entry->direction = direction; 1022 entry->direction = direction;
780 entry->sg_call_ents = nents; 1023 entry->sg_call_ents = nents;
781 entry->sg_mapped_ents = mapped_ents; 1024 entry->sg_mapped_ents = mapped_ents;
782 1025
783 if (!PageHighMem(sg_page(s))) { 1026 if (!PageHighMem(sg_page(s))) {
784 check_for_stack(dev, sg_virt(s)); 1027 check_for_stack(dev, sg_virt(s));
785 check_for_illegal_area(dev, sg_virt(s), s->length); 1028 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
786 } 1029 }
787 1030
788 add_dma_entry(entry); 1031 add_dma_entry(entry);
@@ -790,13 +1033,30 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
790} 1033}
791EXPORT_SYMBOL(debug_dma_map_sg); 1034EXPORT_SYMBOL(debug_dma_map_sg);
792 1035
1036static int get_nr_mapped_entries(struct device *dev,
1037 struct dma_debug_entry *ref)
1038{
1039 struct dma_debug_entry *entry;
1040 struct hash_bucket *bucket;
1041 unsigned long flags;
1042 int mapped_ents;
1043
1044 bucket = get_hash_bucket(ref, &flags);
1045 entry = hash_bucket_find(bucket, ref);
1046 mapped_ents = 0;
1047
1048 if (entry)
1049 mapped_ents = entry->sg_mapped_ents;
1050 put_hash_bucket(bucket, &flags);
1051
1052 return mapped_ents;
1053}
1054
793void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 1055void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
794 int nelems, int dir) 1056 int nelems, int dir)
795{ 1057{
796 struct dma_debug_entry *entry;
797 struct scatterlist *s; 1058 struct scatterlist *s;
798 int mapped_ents = 0, i; 1059 int mapped_ents = 0, i;
799 unsigned long flags;
800 1060
801 if (unlikely(global_disable)) 1061 if (unlikely(global_disable))
802 return; 1062 return;
@@ -807,24 +1067,17 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
807 .type = dma_debug_sg, 1067 .type = dma_debug_sg,
808 .dev = dev, 1068 .dev = dev,
809 .paddr = sg_phys(s), 1069 .paddr = sg_phys(s),
810 .dev_addr = s->dma_address, 1070 .dev_addr = sg_dma_address(s),
811 .size = s->length, 1071 .size = sg_dma_len(s),
812 .direction = dir, 1072 .direction = dir,
813 .sg_call_ents = 0, 1073 .sg_call_ents = nelems,
814 }; 1074 };
815 1075
816 if (mapped_ents && i >= mapped_ents) 1076 if (mapped_ents && i >= mapped_ents)
817 break; 1077 break;
818 1078
819 if (mapped_ents == 0) { 1079 if (!i)
820 struct hash_bucket *bucket; 1080 mapped_ents = get_nr_mapped_entries(dev, &ref);
821 ref.sg_call_ents = nelems;
822 bucket = get_hash_bucket(&ref, &flags);
823 entry = hash_bucket_find(bucket, &ref);
824 if (entry)
825 mapped_ents = entry->sg_mapped_ents;
826 put_hash_bucket(bucket, &flags);
827 }
828 1081
829 check_unmap(&ref); 1082 check_unmap(&ref);
830 } 1083 }
@@ -879,10 +1132,19 @@ EXPORT_SYMBOL(debug_dma_free_coherent);
879void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 1132void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
880 size_t size, int direction) 1133 size_t size, int direction)
881{ 1134{
1135 struct dma_debug_entry ref;
1136
882 if (unlikely(global_disable)) 1137 if (unlikely(global_disable))
883 return; 1138 return;
884 1139
885 check_sync(dev, dma_handle, size, 0, direction, true); 1140 ref.type = dma_debug_single;
1141 ref.dev = dev;
1142 ref.dev_addr = dma_handle;
1143 ref.size = size;
1144 ref.direction = direction;
1145 ref.sg_call_ents = 0;
1146
1147 check_sync(dev, &ref, true);
886} 1148}
887EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); 1149EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
888 1150
@@ -890,10 +1152,19 @@ void debug_dma_sync_single_for_device(struct device *dev,
890 dma_addr_t dma_handle, size_t size, 1152 dma_addr_t dma_handle, size_t size,
891 int direction) 1153 int direction)
892{ 1154{
1155 struct dma_debug_entry ref;
1156
893 if (unlikely(global_disable)) 1157 if (unlikely(global_disable))
894 return; 1158 return;
895 1159
896 check_sync(dev, dma_handle, size, 0, direction, false); 1160 ref.type = dma_debug_single;
1161 ref.dev = dev;
1162 ref.dev_addr = dma_handle;
1163 ref.size = size;
1164 ref.direction = direction;
1165 ref.sg_call_ents = 0;
1166
1167 check_sync(dev, &ref, false);
897} 1168}
898EXPORT_SYMBOL(debug_dma_sync_single_for_device); 1169EXPORT_SYMBOL(debug_dma_sync_single_for_device);
899 1170
@@ -902,10 +1173,19 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev,
902 unsigned long offset, size_t size, 1173 unsigned long offset, size_t size,
903 int direction) 1174 int direction)
904{ 1175{
1176 struct dma_debug_entry ref;
1177
905 if (unlikely(global_disable)) 1178 if (unlikely(global_disable))
906 return; 1179 return;
907 1180
908 check_sync(dev, dma_handle, size, offset, direction, true); 1181 ref.type = dma_debug_single;
1182 ref.dev = dev;
1183 ref.dev_addr = dma_handle;
1184 ref.size = offset + size;
1185 ref.direction = direction;
1186 ref.sg_call_ents = 0;
1187
1188 check_sync(dev, &ref, true);
909} 1189}
910EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); 1190EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
911 1191
@@ -914,10 +1194,19 @@ void debug_dma_sync_single_range_for_device(struct device *dev,
914 unsigned long offset, 1194 unsigned long offset,
915 size_t size, int direction) 1195 size_t size, int direction)
916{ 1196{
1197 struct dma_debug_entry ref;
1198
917 if (unlikely(global_disable)) 1199 if (unlikely(global_disable))
918 return; 1200 return;
919 1201
920 check_sync(dev, dma_handle, size, offset, direction, false); 1202 ref.type = dma_debug_single;
1203 ref.dev = dev;
1204 ref.dev_addr = dma_handle;
1205 ref.size = offset + size;
1206 ref.direction = direction;
1207 ref.sg_call_ents = 0;
1208
1209 check_sync(dev, &ref, false);
921} 1210}
922EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); 1211EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
923 1212
@@ -925,14 +1214,30 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
925 int nelems, int direction) 1214 int nelems, int direction)
926{ 1215{
927 struct scatterlist *s; 1216 struct scatterlist *s;
928 int i; 1217 int mapped_ents = 0, i;
929 1218
930 if (unlikely(global_disable)) 1219 if (unlikely(global_disable))
931 return; 1220 return;
932 1221
933 for_each_sg(sg, s, nelems, i) { 1222 for_each_sg(sg, s, nelems, i) {
934 check_sync(dev, s->dma_address, s->dma_length, 0, 1223
935 direction, true); 1224 struct dma_debug_entry ref = {
1225 .type = dma_debug_sg,
1226 .dev = dev,
1227 .paddr = sg_phys(s),
1228 .dev_addr = sg_dma_address(s),
1229 .size = sg_dma_len(s),
1230 .direction = direction,
1231 .sg_call_ents = nelems,
1232 };
1233
1234 if (!i)
1235 mapped_ents = get_nr_mapped_entries(dev, &ref);
1236
1237 if (i >= mapped_ents)
1238 break;
1239
1240 check_sync(dev, &ref, true);
936 } 1241 }
937} 1242}
938EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); 1243EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
@@ -941,15 +1246,48 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
941 int nelems, int direction) 1246 int nelems, int direction)
942{ 1247{
943 struct scatterlist *s; 1248 struct scatterlist *s;
944 int i; 1249 int mapped_ents = 0, i;
945 1250
946 if (unlikely(global_disable)) 1251 if (unlikely(global_disable))
947 return; 1252 return;
948 1253
949 for_each_sg(sg, s, nelems, i) { 1254 for_each_sg(sg, s, nelems, i) {
950 check_sync(dev, s->dma_address, s->dma_length, 0, 1255
951 direction, false); 1256 struct dma_debug_entry ref = {
1257 .type = dma_debug_sg,
1258 .dev = dev,
1259 .paddr = sg_phys(s),
1260 .dev_addr = sg_dma_address(s),
1261 .size = sg_dma_len(s),
1262 .direction = direction,
1263 .sg_call_ents = nelems,
1264 };
1265 if (!i)
1266 mapped_ents = get_nr_mapped_entries(dev, &ref);
1267
1268 if (i >= mapped_ents)
1269 break;
1270
1271 check_sync(dev, &ref, false);
952 } 1272 }
953} 1273}
954EXPORT_SYMBOL(debug_dma_sync_sg_for_device); 1274EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
955 1275
1276static int __init dma_debug_driver_setup(char *str)
1277{
1278 int i;
1279
1280 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1281 current_driver_name[i] = *str;
1282 if (*str == 0)
1283 break;
1284 }
1285
1286 if (current_driver_name[0])
1287 pr_info("DMA-API: enable driver filter for driver [%s]\n",
1288 current_driver_name);
1289
1290
1291 return 1;
1292}
1293__setup("dma_debug_driver=", dma_debug_driver_setup);