aboutsummaryrefslogtreecommitdiffstats
path: root/lib/dma-debug.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/dma-debug.c')
-rw-r--r--lib/dma-debug.c589
1 files changed, 491 insertions, 98 deletions
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 69da09a085a1..58a9f9fc609a 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -23,9 +23,11 @@
23#include <linux/dma-debug.h> 23#include <linux/dma-debug.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/debugfs.h> 25#include <linux/debugfs.h>
26#include <linux/uaccess.h>
26#include <linux/device.h> 27#include <linux/device.h>
27#include <linux/types.h> 28#include <linux/types.h>
28#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/ctype.h>
29#include <linux/list.h> 31#include <linux/list.h>
30#include <linux/slab.h> 32#include <linux/slab.h>
31 33
@@ -85,6 +87,7 @@ static u32 show_num_errors = 1;
85 87
86static u32 num_free_entries; 88static u32 num_free_entries;
87static u32 min_free_entries; 89static u32 min_free_entries;
90static u32 nr_total_entries;
88 91
89/* number of preallocated entries requested by kernel cmdline */ 92/* number of preallocated entries requested by kernel cmdline */
90static u32 req_entries; 93static u32 req_entries;
@@ -97,6 +100,16 @@ static struct dentry *show_all_errors_dent __read_mostly;
97static struct dentry *show_num_errors_dent __read_mostly; 100static struct dentry *show_num_errors_dent __read_mostly;
98static struct dentry *num_free_entries_dent __read_mostly; 101static struct dentry *num_free_entries_dent __read_mostly;
99static struct dentry *min_free_entries_dent __read_mostly; 102static struct dentry *min_free_entries_dent __read_mostly;
103static struct dentry *filter_dent __read_mostly;
104
105/* per-driver filter related state */
106
107#define NAME_MAX_LEN 64
108
109static char current_driver_name[NAME_MAX_LEN] __read_mostly;
110static struct device_driver *current_driver __read_mostly;
111
112static DEFINE_RWLOCK(driver_name_lock);
100 113
101static const char *type2name[4] = { "single", "page", 114static const char *type2name[4] = { "single", "page",
102 "scather-gather", "coherent" }; 115 "scather-gather", "coherent" };
@@ -104,6 +117,11 @@ static const char *type2name[4] = { "single", "page",
104static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", 117static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
105 "DMA_FROM_DEVICE", "DMA_NONE" }; 118 "DMA_FROM_DEVICE", "DMA_NONE" };
106 119
120/* little merge helper - remove it after the merge window */
121#ifndef BUS_NOTIFY_UNBOUND_DRIVER
122#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
123#endif
124
107/* 125/*
108 * The access to some variables in this macro is racy. We can't use atomic_t 126 * The access to some variables in this macro is racy. We can't use atomic_t
109 * here because all these variables are exported to debugfs. Some of them even 127 * here because all these variables are exported to debugfs. Some of them even
@@ -121,22 +139,65 @@ static inline void dump_entry_trace(struct dma_debug_entry *entry)
121{ 139{
122#ifdef CONFIG_STACKTRACE 140#ifdef CONFIG_STACKTRACE
123 if (entry) { 141 if (entry) {
124 printk(KERN_WARNING "Mapped at:\n"); 142 pr_warning("Mapped at:\n");
125 print_stack_trace(&entry->stacktrace, 0); 143 print_stack_trace(&entry->stacktrace, 0);
126 } 144 }
127#endif 145#endif
128} 146}
129 147
130#define err_printk(dev, entry, format, arg...) do { \ 148static bool driver_filter(struct device *dev)
131 error_count += 1; \ 149{
132 if (show_all_errors || show_num_errors > 0) { \ 150 struct device_driver *drv;
133 WARN(1, "%s %s: " format, \ 151 unsigned long flags;
134 dev_driver_string(dev), \ 152 bool ret;
135 dev_name(dev) , ## arg); \ 153
136 dump_entry_trace(entry); \ 154 /* driver filter off */
137 } \ 155 if (likely(!current_driver_name[0]))
138 if (!show_all_errors && show_num_errors > 0) \ 156 return true;
139 show_num_errors -= 1; \ 157
158 /* driver filter on and initialized */
159 if (current_driver && dev && dev->driver == current_driver)
160 return true;
161
162 /* driver filter on, but we can't filter on a NULL device... */
163 if (!dev)
164 return false;
165
166 if (current_driver || !current_driver_name[0])
167 return false;
168
169 /* driver filter on but not yet initialized */
170 drv = get_driver(dev->driver);
171 if (!drv)
172 return false;
173
174 /* lock to protect against change of current_driver_name */
175 read_lock_irqsave(&driver_name_lock, flags);
176
177 ret = false;
178 if (drv->name &&
179 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
180 current_driver = drv;
181 ret = true;
182 }
183
184 read_unlock_irqrestore(&driver_name_lock, flags);
185 put_driver(drv);
186
187 return ret;
188}
189
190#define err_printk(dev, entry, format, arg...) do { \
191 error_count += 1; \
192 if (driver_filter(dev) && \
193 (show_all_errors || show_num_errors > 0)) { \
194 WARN(1, "%s %s: " format, \
195 dev ? dev_driver_string(dev) : "NULL", \
196 dev ? dev_name(dev) : "NULL", ## arg); \
197 dump_entry_trace(entry); \
198 } \
199 if (!show_all_errors && show_num_errors > 0) \
200 show_num_errors -= 1; \
140 } while (0); 201 } while (0);
141 202
142/* 203/*
@@ -185,15 +246,51 @@ static void put_hash_bucket(struct hash_bucket *bucket,
185static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, 246static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
186 struct dma_debug_entry *ref) 247 struct dma_debug_entry *ref)
187{ 248{
188 struct dma_debug_entry *entry; 249 struct dma_debug_entry *entry, *ret = NULL;
250 int matches = 0, match_lvl, last_lvl = 0;
189 251
190 list_for_each_entry(entry, &bucket->list, list) { 252 list_for_each_entry(entry, &bucket->list, list) {
191 if ((entry->dev_addr == ref->dev_addr) && 253 if ((entry->dev_addr != ref->dev_addr) ||
192 (entry->dev == ref->dev)) 254 (entry->dev != ref->dev))
255 continue;
256
257 /*
258 * Some drivers map the same physical address multiple
259 * times. Without a hardware IOMMU this results in the
260 * same device addresses being put into the dma-debug
261 * hash multiple times too. This can result in false
262 * positives being reported. Therfore we implement a
263 * best-fit algorithm here which returns the entry from
264 * the hash which fits best to the reference value
265 * instead of the first-fit.
266 */
267 matches += 1;
268 match_lvl = 0;
269 entry->size == ref->size ? ++match_lvl : 0;
270 entry->type == ref->type ? ++match_lvl : 0;
271 entry->direction == ref->direction ? ++match_lvl : 0;
272 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
273
274 if (match_lvl == 4) {
275 /* perfect-fit - return the result */
193 return entry; 276 return entry;
277 } else if (match_lvl > last_lvl) {
278 /*
279 * We found an entry that fits better then the
280 * previous one
281 */
282 last_lvl = match_lvl;
283 ret = entry;
284 }
194 } 285 }
195 286
196 return NULL; 287 /*
288 * If we have multiple matches but no perfect-fit, just return
289 * NULL.
290 */
291 ret = (matches == 1) ? ret : NULL;
292
293 return ret;
197} 294}
198 295
199/* 296/*
@@ -257,6 +354,21 @@ static void add_dma_entry(struct dma_debug_entry *entry)
257 put_hash_bucket(bucket, &flags); 354 put_hash_bucket(bucket, &flags);
258} 355}
259 356
357static struct dma_debug_entry *__dma_entry_alloc(void)
358{
359 struct dma_debug_entry *entry;
360
361 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
362 list_del(&entry->list);
363 memset(entry, 0, sizeof(*entry));
364
365 num_free_entries -= 1;
366 if (num_free_entries < min_free_entries)
367 min_free_entries = num_free_entries;
368
369 return entry;
370}
371
260/* struct dma_entry allocator 372/* struct dma_entry allocator
261 * 373 *
262 * The next two functions implement the allocator for 374 * The next two functions implement the allocator for
@@ -270,15 +382,12 @@ static struct dma_debug_entry *dma_entry_alloc(void)
270 spin_lock_irqsave(&free_entries_lock, flags); 382 spin_lock_irqsave(&free_entries_lock, flags);
271 383
272 if (list_empty(&free_entries)) { 384 if (list_empty(&free_entries)) {
273 printk(KERN_ERR "DMA-API: debugging out of memory " 385 pr_err("DMA-API: debugging out of memory - disabling\n");
274 "- disabling\n");
275 global_disable = true; 386 global_disable = true;
276 goto out; 387 goto out;
277 } 388 }
278 389
279 entry = list_entry(free_entries.next, struct dma_debug_entry, list); 390 entry = __dma_entry_alloc();
280 list_del(&entry->list);
281 memset(entry, 0, sizeof(*entry));
282 391
283#ifdef CONFIG_STACKTRACE 392#ifdef CONFIG_STACKTRACE
284 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; 393 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
@@ -286,9 +395,6 @@ static struct dma_debug_entry *dma_entry_alloc(void)
286 entry->stacktrace.skip = 2; 395 entry->stacktrace.skip = 2;
287 save_stack_trace(&entry->stacktrace); 396 save_stack_trace(&entry->stacktrace);
288#endif 397#endif
289 num_free_entries -= 1;
290 if (num_free_entries < min_free_entries)
291 min_free_entries = num_free_entries;
292 398
293out: 399out:
294 spin_unlock_irqrestore(&free_entries_lock, flags); 400 spin_unlock_irqrestore(&free_entries_lock, flags);
@@ -310,6 +416,53 @@ static void dma_entry_free(struct dma_debug_entry *entry)
310 spin_unlock_irqrestore(&free_entries_lock, flags); 416 spin_unlock_irqrestore(&free_entries_lock, flags);
311} 417}
312 418
419int dma_debug_resize_entries(u32 num_entries)
420{
421 int i, delta, ret = 0;
422 unsigned long flags;
423 struct dma_debug_entry *entry;
424 LIST_HEAD(tmp);
425
426 spin_lock_irqsave(&free_entries_lock, flags);
427
428 if (nr_total_entries < num_entries) {
429 delta = num_entries - nr_total_entries;
430
431 spin_unlock_irqrestore(&free_entries_lock, flags);
432
433 for (i = 0; i < delta; i++) {
434 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
435 if (!entry)
436 break;
437
438 list_add_tail(&entry->list, &tmp);
439 }
440
441 spin_lock_irqsave(&free_entries_lock, flags);
442
443 list_splice(&tmp, &free_entries);
444 nr_total_entries += i;
445 num_free_entries += i;
446 } else {
447 delta = nr_total_entries - num_entries;
448
449 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
450 entry = __dma_entry_alloc();
451 kfree(entry);
452 }
453
454 nr_total_entries -= i;
455 }
456
457 if (nr_total_entries != num_entries)
458 ret = 1;
459
460 spin_unlock_irqrestore(&free_entries_lock, flags);
461
462 return ret;
463}
464EXPORT_SYMBOL(dma_debug_resize_entries);
465
313/* 466/*
314 * DMA-API debugging init code 467 * DMA-API debugging init code
315 * 468 *
@@ -334,8 +487,7 @@ static int prealloc_memory(u32 num_entries)
334 num_free_entries = num_entries; 487 num_free_entries = num_entries;
335 min_free_entries = num_entries; 488 min_free_entries = num_entries;
336 489
337 printk(KERN_INFO "DMA-API: preallocated %d debug entries\n", 490 pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
338 num_entries);
339 491
340 return 0; 492 return 0;
341 493
@@ -349,11 +501,102 @@ out_err:
349 return -ENOMEM; 501 return -ENOMEM;
350} 502}
351 503
504static ssize_t filter_read(struct file *file, char __user *user_buf,
505 size_t count, loff_t *ppos)
506{
507 char buf[NAME_MAX_LEN + 1];
508 unsigned long flags;
509 int len;
510
511 if (!current_driver_name[0])
512 return 0;
513
514 /*
515 * We can't copy to userspace directly because current_driver_name can
516 * only be read under the driver_name_lock with irqs disabled. So
517 * create a temporary copy first.
518 */
519 read_lock_irqsave(&driver_name_lock, flags);
520 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
521 read_unlock_irqrestore(&driver_name_lock, flags);
522
523 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
524}
525
526static ssize_t filter_write(struct file *file, const char __user *userbuf,
527 size_t count, loff_t *ppos)
528{
529 char buf[NAME_MAX_LEN];
530 unsigned long flags;
531 size_t len;
532 int i;
533
534 /*
535 * We can't copy from userspace directly. Access to
536 * current_driver_name is protected with a write_lock with irqs
537 * disabled. Since copy_from_user can fault and may sleep we
538 * need to copy to temporary buffer first
539 */
540 len = min(count, (size_t)(NAME_MAX_LEN - 1));
541 if (copy_from_user(buf, userbuf, len))
542 return -EFAULT;
543
544 buf[len] = 0;
545
546 write_lock_irqsave(&driver_name_lock, flags);
547
548 /*
549 * Now handle the string we got from userspace very carefully.
550 * The rules are:
551 * - only use the first token we got
552 * - token delimiter is everything looking like a space
553 * character (' ', '\n', '\t' ...)
554 *
555 */
556 if (!isalnum(buf[0])) {
557 /*
558 * If the first character userspace gave us is not
559 * alphanumerical then assume the filter should be
560 * switched off.
561 */
562 if (current_driver_name[0])
563 pr_info("DMA-API: switching off dma-debug driver filter\n");
564 current_driver_name[0] = 0;
565 current_driver = NULL;
566 goto out_unlock;
567 }
568
569 /*
570 * Now parse out the first token and use it as the name for the
571 * driver to filter for.
572 */
573 for (i = 0; i < NAME_MAX_LEN; ++i) {
574 current_driver_name[i] = buf[i];
575 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
576 break;
577 }
578 current_driver_name[i] = 0;
579 current_driver = NULL;
580
581 pr_info("DMA-API: enable driver filter for driver [%s]\n",
582 current_driver_name);
583
584out_unlock:
585 write_unlock_irqrestore(&driver_name_lock, flags);
586
587 return count;
588}
589
590const struct file_operations filter_fops = {
591 .read = filter_read,
592 .write = filter_write,
593};
594
352static int dma_debug_fs_init(void) 595static int dma_debug_fs_init(void)
353{ 596{
354 dma_debug_dent = debugfs_create_dir("dma-api", NULL); 597 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
355 if (!dma_debug_dent) { 598 if (!dma_debug_dent) {
356 printk(KERN_ERR "DMA-API: can not create debugfs directory\n"); 599 pr_err("DMA-API: can not create debugfs directory\n");
357 return -ENOMEM; 600 return -ENOMEM;
358 } 601 }
359 602
@@ -392,6 +635,11 @@ static int dma_debug_fs_init(void)
392 if (!min_free_entries_dent) 635 if (!min_free_entries_dent)
393 goto out_err; 636 goto out_err;
394 637
638 filter_dent = debugfs_create_file("driver_filter", 0644,
639 dma_debug_dent, NULL, &filter_fops);
640 if (!filter_dent)
641 goto out_err;
642
395 return 0; 643 return 0;
396 644
397out_err: 645out_err:
@@ -400,9 +648,64 @@ out_err:
400 return -ENOMEM; 648 return -ENOMEM;
401} 649}
402 650
651static int device_dma_allocations(struct device *dev)
652{
653 struct dma_debug_entry *entry;
654 unsigned long flags;
655 int count = 0, i;
656
657 local_irq_save(flags);
658
659 for (i = 0; i < HASH_SIZE; ++i) {
660 spin_lock(&dma_entry_hash[i].lock);
661 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
662 if (entry->dev == dev)
663 count += 1;
664 }
665 spin_unlock(&dma_entry_hash[i].lock);
666 }
667
668 local_irq_restore(flags);
669
670 return count;
671}
672
673static int dma_debug_device_change(struct notifier_block *nb,
674 unsigned long action, void *data)
675{
676 struct device *dev = data;
677 int count;
678
679
680 switch (action) {
681 case BUS_NOTIFY_UNBOUND_DRIVER:
682 count = device_dma_allocations(dev);
683 if (count == 0)
684 break;
685 err_printk(dev, NULL, "DMA-API: device driver has pending "
686 "DMA allocations while released from device "
687 "[count=%d]\n", count);
688 break;
689 default:
690 break;
691 }
692
693 return 0;
694}
695
403void dma_debug_add_bus(struct bus_type *bus) 696void dma_debug_add_bus(struct bus_type *bus)
404{ 697{
405 /* FIXME: register notifier */ 698 struct notifier_block *nb;
699
700 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
701 if (nb == NULL) {
702 pr_err("dma_debug_add_bus: out of memory\n");
703 return;
704 }
705
706 nb->notifier_call = dma_debug_device_change;
707
708 bus_register_notifier(bus, nb);
406} 709}
407 710
408/* 711/*
@@ -417,12 +720,11 @@ void dma_debug_init(u32 num_entries)
417 720
418 for (i = 0; i < HASH_SIZE; ++i) { 721 for (i = 0; i < HASH_SIZE; ++i) {
419 INIT_LIST_HEAD(&dma_entry_hash[i].list); 722 INIT_LIST_HEAD(&dma_entry_hash[i].list);
420 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED; 723 spin_lock_init(&dma_entry_hash[i].lock);
421 } 724 }
422 725
423 if (dma_debug_fs_init() != 0) { 726 if (dma_debug_fs_init() != 0) {
424 printk(KERN_ERR "DMA-API: error creating debugfs entries " 727 pr_err("DMA-API: error creating debugfs entries - disabling\n");
425 "- disabling\n");
426 global_disable = true; 728 global_disable = true;
427 729
428 return; 730 return;
@@ -432,14 +734,15 @@ void dma_debug_init(u32 num_entries)
432 num_entries = req_entries; 734 num_entries = req_entries;
433 735
434 if (prealloc_memory(num_entries) != 0) { 736 if (prealloc_memory(num_entries) != 0) {
435 printk(KERN_ERR "DMA-API: debugging out of memory error " 737 pr_err("DMA-API: debugging out of memory error - disabled\n");
436 "- disabled\n");
437 global_disable = true; 738 global_disable = true;
438 739
439 return; 740 return;
440 } 741 }
441 742
442 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n"); 743 nr_total_entries = num_free_entries;
744
745 pr_info("DMA-API: debugging enabled by kernel config\n");
443} 746}
444 747
445static __init int dma_debug_cmdline(char *str) 748static __init int dma_debug_cmdline(char *str)
@@ -448,8 +751,7 @@ static __init int dma_debug_cmdline(char *str)
448 return -EINVAL; 751 return -EINVAL;
449 752
450 if (strncmp(str, "off", 3) == 0) { 753 if (strncmp(str, "off", 3) == 0) {
451 printk(KERN_INFO "DMA-API: debugging disabled on kernel " 754 pr_info("DMA-API: debugging disabled on kernel command line\n");
452 "command line\n");
453 global_disable = true; 755 global_disable = true;
454 } 756 }
455 757
@@ -558,90 +860,85 @@ static void check_for_stack(struct device *dev, void *addr)
558 "stack [addr=%p]\n", addr); 860 "stack [addr=%p]\n", addr);
559} 861}
560 862
561static inline bool overlap(void *addr, u64 size, void *start, void *end) 863static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
562{ 864{
563 void *addr2 = (char *)addr + size; 865 unsigned long a1 = (unsigned long)addr;
866 unsigned long b1 = a1 + len;
867 unsigned long a2 = (unsigned long)start;
868 unsigned long b2 = (unsigned long)end;
564 869
565 return ((addr >= start && addr < end) || 870 return !(b1 <= a2 || a1 >= b2);
566 (addr2 >= start && addr2 < end) ||
567 ((addr < start) && (addr2 >= end)));
568} 871}
569 872
570static void check_for_illegal_area(struct device *dev, void *addr, u64 size) 873static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
571{ 874{
572 if (overlap(addr, size, _text, _etext) || 875 if (overlap(addr, len, _text, _etext) ||
573 overlap(addr, size, __start_rodata, __end_rodata)) 876 overlap(addr, len, __start_rodata, __end_rodata))
574 err_printk(dev, NULL, "DMA-API: device driver maps " 877 err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
575 "memory from kernel text or rodata "
576 "[addr=%p] [size=%llu]\n", addr, size);
577} 878}
578 879
579static void check_sync(struct device *dev, dma_addr_t addr, 880static void check_sync(struct device *dev,
580 u64 size, u64 offset, int direction, bool to_cpu) 881 struct dma_debug_entry *ref,
882 bool to_cpu)
581{ 883{
582 struct dma_debug_entry ref = {
583 .dev = dev,
584 .dev_addr = addr,
585 .size = size,
586 .direction = direction,
587 };
588 struct dma_debug_entry *entry; 884 struct dma_debug_entry *entry;
589 struct hash_bucket *bucket; 885 struct hash_bucket *bucket;
590 unsigned long flags; 886 unsigned long flags;
591 887
592 bucket = get_hash_bucket(&ref, &flags); 888 bucket = get_hash_bucket(ref, &flags);
593 889
594 entry = hash_bucket_find(bucket, &ref); 890 entry = hash_bucket_find(bucket, ref);
595 891
596 if (!entry) { 892 if (!entry) {
597 err_printk(dev, NULL, "DMA-API: device driver tries " 893 err_printk(dev, NULL, "DMA-API: device driver tries "
598 "to sync DMA memory it has not allocated " 894 "to sync DMA memory it has not allocated "
599 "[device address=0x%016llx] [size=%llu bytes]\n", 895 "[device address=0x%016llx] [size=%llu bytes]\n",
600 (unsigned long long)addr, size); 896 (unsigned long long)ref->dev_addr, ref->size);
601 goto out; 897 goto out;
602 } 898 }
603 899
604 if ((offset + size) > entry->size) { 900 if (ref->size > entry->size) {
605 err_printk(dev, entry, "DMA-API: device driver syncs" 901 err_printk(dev, entry, "DMA-API: device driver syncs"
606 " DMA memory outside allocated range " 902 " DMA memory outside allocated range "
607 "[device address=0x%016llx] " 903 "[device address=0x%016llx] "
608 "[allocation size=%llu bytes] [sync offset=%llu] " 904 "[allocation size=%llu bytes] "
609 "[sync size=%llu]\n", entry->dev_addr, entry->size, 905 "[sync offset+size=%llu]\n",
610 offset, size); 906 entry->dev_addr, entry->size,
907 ref->size);
611 } 908 }
612 909
613 if (direction != entry->direction) { 910 if (ref->direction != entry->direction) {
614 err_printk(dev, entry, "DMA-API: device driver syncs " 911 err_printk(dev, entry, "DMA-API: device driver syncs "
615 "DMA memory with different direction " 912 "DMA memory with different direction "
616 "[device address=0x%016llx] [size=%llu bytes] " 913 "[device address=0x%016llx] [size=%llu bytes] "
617 "[mapped with %s] [synced with %s]\n", 914 "[mapped with %s] [synced with %s]\n",
618 (unsigned long long)addr, entry->size, 915 (unsigned long long)ref->dev_addr, entry->size,
619 dir2name[entry->direction], 916 dir2name[entry->direction],
620 dir2name[direction]); 917 dir2name[ref->direction]);
621 } 918 }
622 919
623 if (entry->direction == DMA_BIDIRECTIONAL) 920 if (entry->direction == DMA_BIDIRECTIONAL)
624 goto out; 921 goto out;
625 922
626 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && 923 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
627 !(direction == DMA_TO_DEVICE)) 924 !(ref->direction == DMA_TO_DEVICE))
628 err_printk(dev, entry, "DMA-API: device driver syncs " 925 err_printk(dev, entry, "DMA-API: device driver syncs "
629 "device read-only DMA memory for cpu " 926 "device read-only DMA memory for cpu "
630 "[device address=0x%016llx] [size=%llu bytes] " 927 "[device address=0x%016llx] [size=%llu bytes] "
631 "[mapped with %s] [synced with %s]\n", 928 "[mapped with %s] [synced with %s]\n",
632 (unsigned long long)addr, entry->size, 929 (unsigned long long)ref->dev_addr, entry->size,
633 dir2name[entry->direction], 930 dir2name[entry->direction],
634 dir2name[direction]); 931 dir2name[ref->direction]);
635 932
636 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && 933 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
637 !(direction == DMA_FROM_DEVICE)) 934 !(ref->direction == DMA_FROM_DEVICE))
638 err_printk(dev, entry, "DMA-API: device driver syncs " 935 err_printk(dev, entry, "DMA-API: device driver syncs "
639 "device write-only DMA memory to device " 936 "device write-only DMA memory to device "
640 "[device address=0x%016llx] [size=%llu bytes] " 937 "[device address=0x%016llx] [size=%llu bytes] "
641 "[mapped with %s] [synced with %s]\n", 938 "[mapped with %s] [synced with %s]\n",
642 (unsigned long long)addr, entry->size, 939 (unsigned long long)ref->dev_addr, entry->size,
643 dir2name[entry->direction], 940 dir2name[entry->direction],
644 dir2name[direction]); 941 dir2name[ref->direction]);
645 942
646out: 943out:
647 put_hash_bucket(bucket, &flags); 944 put_hash_bucket(bucket, &flags);
@@ -675,7 +972,8 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
675 entry->type = dma_debug_single; 972 entry->type = dma_debug_single;
676 973
677 if (!PageHighMem(page)) { 974 if (!PageHighMem(page)) {
678 void *addr = ((char *)page_address(page)) + offset; 975 void *addr = page_address(page) + offset;
976
679 check_for_stack(dev, addr); 977 check_for_stack(dev, addr);
680 check_for_illegal_area(dev, addr, size); 978 check_for_illegal_area(dev, addr, size);
681 } 979 }
@@ -723,15 +1021,15 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
723 entry->type = dma_debug_sg; 1021 entry->type = dma_debug_sg;
724 entry->dev = dev; 1022 entry->dev = dev;
725 entry->paddr = sg_phys(s); 1023 entry->paddr = sg_phys(s);
726 entry->size = s->length; 1024 entry->size = sg_dma_len(s);
727 entry->dev_addr = s->dma_address; 1025 entry->dev_addr = sg_dma_address(s);
728 entry->direction = direction; 1026 entry->direction = direction;
729 entry->sg_call_ents = nents; 1027 entry->sg_call_ents = nents;
730 entry->sg_mapped_ents = mapped_ents; 1028 entry->sg_mapped_ents = mapped_ents;
731 1029
732 if (!PageHighMem(sg_page(s))) { 1030 if (!PageHighMem(sg_page(s))) {
733 check_for_stack(dev, sg_virt(s)); 1031 check_for_stack(dev, sg_virt(s));
734 check_for_illegal_area(dev, sg_virt(s), s->length); 1032 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
735 } 1033 }
736 1034
737 add_dma_entry(entry); 1035 add_dma_entry(entry);
@@ -739,13 +1037,30 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
739} 1037}
740EXPORT_SYMBOL(debug_dma_map_sg); 1038EXPORT_SYMBOL(debug_dma_map_sg);
741 1039
1040static int get_nr_mapped_entries(struct device *dev,
1041 struct dma_debug_entry *ref)
1042{
1043 struct dma_debug_entry *entry;
1044 struct hash_bucket *bucket;
1045 unsigned long flags;
1046 int mapped_ents;
1047
1048 bucket = get_hash_bucket(ref, &flags);
1049 entry = hash_bucket_find(bucket, ref);
1050 mapped_ents = 0;
1051
1052 if (entry)
1053 mapped_ents = entry->sg_mapped_ents;
1054 put_hash_bucket(bucket, &flags);
1055
1056 return mapped_ents;
1057}
1058
742void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 1059void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
743 int nelems, int dir) 1060 int nelems, int dir)
744{ 1061{
745 struct dma_debug_entry *entry;
746 struct scatterlist *s; 1062 struct scatterlist *s;
747 int mapped_ents = 0, i; 1063 int mapped_ents = 0, i;
748 unsigned long flags;
749 1064
750 if (unlikely(global_disable)) 1065 if (unlikely(global_disable))
751 return; 1066 return;
@@ -756,24 +1071,17 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
756 .type = dma_debug_sg, 1071 .type = dma_debug_sg,
757 .dev = dev, 1072 .dev = dev,
758 .paddr = sg_phys(s), 1073 .paddr = sg_phys(s),
759 .dev_addr = s->dma_address, 1074 .dev_addr = sg_dma_address(s),
760 .size = s->length, 1075 .size = sg_dma_len(s),
761 .direction = dir, 1076 .direction = dir,
762 .sg_call_ents = 0, 1077 .sg_call_ents = nelems,
763 }; 1078 };
764 1079
765 if (mapped_ents && i >= mapped_ents) 1080 if (mapped_ents && i >= mapped_ents)
766 break; 1081 break;
767 1082
768 if (mapped_ents == 0) { 1083 if (!i)
769 struct hash_bucket *bucket; 1084 mapped_ents = get_nr_mapped_entries(dev, &ref);
770 ref.sg_call_ents = nelems;
771 bucket = get_hash_bucket(&ref, &flags);
772 entry = hash_bucket_find(bucket, &ref);
773 if (entry)
774 mapped_ents = entry->sg_mapped_ents;
775 put_hash_bucket(bucket, &flags);
776 }
777 1085
778 check_unmap(&ref); 1086 check_unmap(&ref);
779 } 1087 }
@@ -828,10 +1136,19 @@ EXPORT_SYMBOL(debug_dma_free_coherent);
828void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 1136void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
829 size_t size, int direction) 1137 size_t size, int direction)
830{ 1138{
1139 struct dma_debug_entry ref;
1140
831 if (unlikely(global_disable)) 1141 if (unlikely(global_disable))
832 return; 1142 return;
833 1143
834 check_sync(dev, dma_handle, size, 0, direction, true); 1144 ref.type = dma_debug_single;
1145 ref.dev = dev;
1146 ref.dev_addr = dma_handle;
1147 ref.size = size;
1148 ref.direction = direction;
1149 ref.sg_call_ents = 0;
1150
1151 check_sync(dev, &ref, true);
835} 1152}
836EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); 1153EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
837 1154
@@ -839,10 +1156,19 @@ void debug_dma_sync_single_for_device(struct device *dev,
839 dma_addr_t dma_handle, size_t size, 1156 dma_addr_t dma_handle, size_t size,
840 int direction) 1157 int direction)
841{ 1158{
1159 struct dma_debug_entry ref;
1160
842 if (unlikely(global_disable)) 1161 if (unlikely(global_disable))
843 return; 1162 return;
844 1163
845 check_sync(dev, dma_handle, size, 0, direction, false); 1164 ref.type = dma_debug_single;
1165 ref.dev = dev;
1166 ref.dev_addr = dma_handle;
1167 ref.size = size;
1168 ref.direction = direction;
1169 ref.sg_call_ents = 0;
1170
1171 check_sync(dev, &ref, false);
846} 1172}
847EXPORT_SYMBOL(debug_dma_sync_single_for_device); 1173EXPORT_SYMBOL(debug_dma_sync_single_for_device);
848 1174
@@ -851,10 +1177,19 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev,
851 unsigned long offset, size_t size, 1177 unsigned long offset, size_t size,
852 int direction) 1178 int direction)
853{ 1179{
1180 struct dma_debug_entry ref;
1181
854 if (unlikely(global_disable)) 1182 if (unlikely(global_disable))
855 return; 1183 return;
856 1184
857 check_sync(dev, dma_handle, size, offset, direction, true); 1185 ref.type = dma_debug_single;
1186 ref.dev = dev;
1187 ref.dev_addr = dma_handle;
1188 ref.size = offset + size;
1189 ref.direction = direction;
1190 ref.sg_call_ents = 0;
1191
1192 check_sync(dev, &ref, true);
858} 1193}
859EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); 1194EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
860 1195
@@ -863,10 +1198,19 @@ void debug_dma_sync_single_range_for_device(struct device *dev,
863 unsigned long offset, 1198 unsigned long offset,
864 size_t size, int direction) 1199 size_t size, int direction)
865{ 1200{
1201 struct dma_debug_entry ref;
1202
866 if (unlikely(global_disable)) 1203 if (unlikely(global_disable))
867 return; 1204 return;
868 1205
869 check_sync(dev, dma_handle, size, offset, direction, false); 1206 ref.type = dma_debug_single;
1207 ref.dev = dev;
1208 ref.dev_addr = dma_handle;
1209 ref.size = offset + size;
1210 ref.direction = direction;
1211 ref.sg_call_ents = 0;
1212
1213 check_sync(dev, &ref, false);
870} 1214}
871EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); 1215EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
872 1216
@@ -874,14 +1218,30 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
874 int nelems, int direction) 1218 int nelems, int direction)
875{ 1219{
876 struct scatterlist *s; 1220 struct scatterlist *s;
877 int i; 1221 int mapped_ents = 0, i;
878 1222
879 if (unlikely(global_disable)) 1223 if (unlikely(global_disable))
880 return; 1224 return;
881 1225
882 for_each_sg(sg, s, nelems, i) { 1226 for_each_sg(sg, s, nelems, i) {
883 check_sync(dev, s->dma_address, s->dma_length, 0, 1227
884 direction, true); 1228 struct dma_debug_entry ref = {
1229 .type = dma_debug_sg,
1230 .dev = dev,
1231 .paddr = sg_phys(s),
1232 .dev_addr = sg_dma_address(s),
1233 .size = sg_dma_len(s),
1234 .direction = direction,
1235 .sg_call_ents = nelems,
1236 };
1237
1238 if (!i)
1239 mapped_ents = get_nr_mapped_entries(dev, &ref);
1240
1241 if (i >= mapped_ents)
1242 break;
1243
1244 check_sync(dev, &ref, true);
885 } 1245 }
886} 1246}
887EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); 1247EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
@@ -890,15 +1250,48 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
890 int nelems, int direction) 1250 int nelems, int direction)
891{ 1251{
892 struct scatterlist *s; 1252 struct scatterlist *s;
893 int i; 1253 int mapped_ents = 0, i;
894 1254
895 if (unlikely(global_disable)) 1255 if (unlikely(global_disable))
896 return; 1256 return;
897 1257
898 for_each_sg(sg, s, nelems, i) { 1258 for_each_sg(sg, s, nelems, i) {
899 check_sync(dev, s->dma_address, s->dma_length, 0, 1259
900 direction, false); 1260 struct dma_debug_entry ref = {
1261 .type = dma_debug_sg,
1262 .dev = dev,
1263 .paddr = sg_phys(s),
1264 .dev_addr = sg_dma_address(s),
1265 .size = sg_dma_len(s),
1266 .direction = direction,
1267 .sg_call_ents = nelems,
1268 };
1269 if (!i)
1270 mapped_ents = get_nr_mapped_entries(dev, &ref);
1271
1272 if (i >= mapped_ents)
1273 break;
1274
1275 check_sync(dev, &ref, false);
901 } 1276 }
902} 1277}
903EXPORT_SYMBOL(debug_dma_sync_sg_for_device); 1278EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
904 1279
1280static int __init dma_debug_driver_setup(char *str)
1281{
1282 int i;
1283
1284 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1285 current_driver_name[i] = *str;
1286 if (*str == 0)
1287 break;
1288 }
1289
1290 if (current_driver_name[0])
1291 pr_info("DMA-API: enable driver filter for driver [%s]\n",
1292 current_driver_name);
1293
1294
1295 return 1;
1296}
1297__setup("dma_debug_driver=", dma_debug_driver_setup);