aboutsummaryrefslogtreecommitdiffstats
path: root/lib/dma-debug.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/dma-debug.c')
-rw-r--r--lib/dma-debug.c541
1 files changed, 465 insertions, 76 deletions
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 69da09a085a1..3b93129a968c 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -23,9 +23,11 @@
23#include <linux/dma-debug.h> 23#include <linux/dma-debug.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/debugfs.h> 25#include <linux/debugfs.h>
26#include <linux/uaccess.h>
26#include <linux/device.h> 27#include <linux/device.h>
27#include <linux/types.h> 28#include <linux/types.h>
28#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/ctype.h>
29#include <linux/list.h> 31#include <linux/list.h>
30#include <linux/slab.h> 32#include <linux/slab.h>
31 33
@@ -85,6 +87,7 @@ static u32 show_num_errors = 1;
85 87
86static u32 num_free_entries; 88static u32 num_free_entries;
87static u32 min_free_entries; 89static u32 min_free_entries;
90static u32 nr_total_entries;
88 91
89/* number of preallocated entries requested by kernel cmdline */ 92/* number of preallocated entries requested by kernel cmdline */
90static u32 req_entries; 93static u32 req_entries;
@@ -97,6 +100,16 @@ static struct dentry *show_all_errors_dent __read_mostly;
97static struct dentry *show_num_errors_dent __read_mostly; 100static struct dentry *show_num_errors_dent __read_mostly;
98static struct dentry *num_free_entries_dent __read_mostly; 101static struct dentry *num_free_entries_dent __read_mostly;
99static struct dentry *min_free_entries_dent __read_mostly; 102static struct dentry *min_free_entries_dent __read_mostly;
103static struct dentry *filter_dent __read_mostly;
104
105/* per-driver filter related state */
106
107#define NAME_MAX_LEN 64
108
109static char current_driver_name[NAME_MAX_LEN] __read_mostly;
110static struct device_driver *current_driver __read_mostly;
111
112static DEFINE_RWLOCK(driver_name_lock);
100 113
101static const char *type2name[4] = { "single", "page", 114static const char *type2name[4] = { "single", "page",
102 "scather-gather", "coherent" }; 115 "scather-gather", "coherent" };
@@ -104,6 +117,11 @@ static const char *type2name[4] = { "single", "page",
104static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", 117static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
105 "DMA_FROM_DEVICE", "DMA_NONE" }; 118 "DMA_FROM_DEVICE", "DMA_NONE" };
106 119
120/* little merge helper - remove it after the merge window */
121#ifndef BUS_NOTIFY_UNBOUND_DRIVER
122#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
123#endif
124
107/* 125/*
108 * The access to some variables in this macro is racy. We can't use atomic_t 126 * The access to some variables in this macro is racy. We can't use atomic_t
109 * here because all these variables are exported to debugfs. Some of them even 127 * here because all these variables are exported to debugfs. Some of them even
@@ -121,15 +139,54 @@ static inline void dump_entry_trace(struct dma_debug_entry *entry)
121{ 139{
122#ifdef CONFIG_STACKTRACE 140#ifdef CONFIG_STACKTRACE
123 if (entry) { 141 if (entry) {
124 printk(KERN_WARNING "Mapped at:\n"); 142 pr_warning("Mapped at:\n");
125 print_stack_trace(&entry->stacktrace, 0); 143 print_stack_trace(&entry->stacktrace, 0);
126 } 144 }
127#endif 145#endif
128} 146}
129 147
148static bool driver_filter(struct device *dev)
149{
150 struct device_driver *drv;
151 unsigned long flags;
152 bool ret;
153
154 /* driver filter off */
155 if (likely(!current_driver_name[0]))
156 return true;
157
158 /* driver filter on and initialized */
159 if (current_driver && dev->driver == current_driver)
160 return true;
161
162 if (current_driver || !current_driver_name[0])
163 return false;
164
165 /* driver filter on but not yet initialized */
166 drv = get_driver(dev->driver);
167 if (!drv)
168 return false;
169
170 /* lock to protect against change of current_driver_name */
171 read_lock_irqsave(&driver_name_lock, flags);
172
173 ret = false;
174 if (drv->name &&
175 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
176 current_driver = drv;
177 ret = true;
178 }
179
180 read_unlock_irqrestore(&driver_name_lock, flags);
181 put_driver(drv);
182
183 return ret;
184}
185
130#define err_printk(dev, entry, format, arg...) do { \ 186#define err_printk(dev, entry, format, arg...) do { \
131 error_count += 1; \ 187 error_count += 1; \
132 if (show_all_errors || show_num_errors > 0) { \ 188 if (driver_filter(dev) && \
189 (show_all_errors || show_num_errors > 0)) { \
133 WARN(1, "%s %s: " format, \ 190 WARN(1, "%s %s: " format, \
134 dev_driver_string(dev), \ 191 dev_driver_string(dev), \
135 dev_name(dev) , ## arg); \ 192 dev_name(dev) , ## arg); \
@@ -185,15 +242,51 @@ static void put_hash_bucket(struct hash_bucket *bucket,
185static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, 242static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
186 struct dma_debug_entry *ref) 243 struct dma_debug_entry *ref)
187{ 244{
188 struct dma_debug_entry *entry; 245 struct dma_debug_entry *entry, *ret = NULL;
246 int matches = 0, match_lvl, last_lvl = 0;
189 247
190 list_for_each_entry(entry, &bucket->list, list) { 248 list_for_each_entry(entry, &bucket->list, list) {
191 if ((entry->dev_addr == ref->dev_addr) && 249 if ((entry->dev_addr != ref->dev_addr) ||
192 (entry->dev == ref->dev)) 250 (entry->dev != ref->dev))
251 continue;
252
253 /*
254 * Some drivers map the same physical address multiple
255 * times. Without a hardware IOMMU this results in the
256 * same device addresses being put into the dma-debug
257 * hash multiple times too. This can result in false
258 * positives being reported. Therfore we implement a
259 * best-fit algorithm here which returns the entry from
260 * the hash which fits best to the reference value
261 * instead of the first-fit.
262 */
263 matches += 1;
264 match_lvl = 0;
265 entry->size == ref->size ? ++match_lvl : 0;
266 entry->type == ref->type ? ++match_lvl : 0;
267 entry->direction == ref->direction ? ++match_lvl : 0;
268 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
269
270 if (match_lvl == 4) {
271 /* perfect-fit - return the result */
193 return entry; 272 return entry;
273 } else if (match_lvl > last_lvl) {
274 /*
275 * We found an entry that fits better then the
276 * previous one
277 */
278 last_lvl = match_lvl;
279 ret = entry;
280 }
194 } 281 }
195 282
196 return NULL; 283 /*
284 * If we have multiple matches but no perfect-fit, just return
285 * NULL.
286 */
287 ret = (matches == 1) ? ret : NULL;
288
289 return ret;
197} 290}
198 291
199/* 292/*
@@ -257,6 +350,21 @@ static void add_dma_entry(struct dma_debug_entry *entry)
257 put_hash_bucket(bucket, &flags); 350 put_hash_bucket(bucket, &flags);
258} 351}
259 352
353static struct dma_debug_entry *__dma_entry_alloc(void)
354{
355 struct dma_debug_entry *entry;
356
357 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
358 list_del(&entry->list);
359 memset(entry, 0, sizeof(*entry));
360
361 num_free_entries -= 1;
362 if (num_free_entries < min_free_entries)
363 min_free_entries = num_free_entries;
364
365 return entry;
366}
367
260/* struct dma_entry allocator 368/* struct dma_entry allocator
261 * 369 *
262 * The next two functions implement the allocator for 370 * The next two functions implement the allocator for
@@ -270,15 +378,12 @@ static struct dma_debug_entry *dma_entry_alloc(void)
270 spin_lock_irqsave(&free_entries_lock, flags); 378 spin_lock_irqsave(&free_entries_lock, flags);
271 379
272 if (list_empty(&free_entries)) { 380 if (list_empty(&free_entries)) {
273 printk(KERN_ERR "DMA-API: debugging out of memory " 381 pr_err("DMA-API: debugging out of memory - disabling\n");
274 "- disabling\n");
275 global_disable = true; 382 global_disable = true;
276 goto out; 383 goto out;
277 } 384 }
278 385
279 entry = list_entry(free_entries.next, struct dma_debug_entry, list); 386 entry = __dma_entry_alloc();
280 list_del(&entry->list);
281 memset(entry, 0, sizeof(*entry));
282 387
283#ifdef CONFIG_STACKTRACE 388#ifdef CONFIG_STACKTRACE
284 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; 389 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
@@ -286,9 +391,6 @@ static struct dma_debug_entry *dma_entry_alloc(void)
286 entry->stacktrace.skip = 2; 391 entry->stacktrace.skip = 2;
287 save_stack_trace(&entry->stacktrace); 392 save_stack_trace(&entry->stacktrace);
288#endif 393#endif
289 num_free_entries -= 1;
290 if (num_free_entries < min_free_entries)
291 min_free_entries = num_free_entries;
292 394
293out: 395out:
294 spin_unlock_irqrestore(&free_entries_lock, flags); 396 spin_unlock_irqrestore(&free_entries_lock, flags);
@@ -310,6 +412,53 @@ static void dma_entry_free(struct dma_debug_entry *entry)
310 spin_unlock_irqrestore(&free_entries_lock, flags); 412 spin_unlock_irqrestore(&free_entries_lock, flags);
311} 413}
312 414
415int dma_debug_resize_entries(u32 num_entries)
416{
417 int i, delta, ret = 0;
418 unsigned long flags;
419 struct dma_debug_entry *entry;
420 LIST_HEAD(tmp);
421
422 spin_lock_irqsave(&free_entries_lock, flags);
423
424 if (nr_total_entries < num_entries) {
425 delta = num_entries - nr_total_entries;
426
427 spin_unlock_irqrestore(&free_entries_lock, flags);
428
429 for (i = 0; i < delta; i++) {
430 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
431 if (!entry)
432 break;
433
434 list_add_tail(&entry->list, &tmp);
435 }
436
437 spin_lock_irqsave(&free_entries_lock, flags);
438
439 list_splice(&tmp, &free_entries);
440 nr_total_entries += i;
441 num_free_entries += i;
442 } else {
443 delta = nr_total_entries - num_entries;
444
445 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
446 entry = __dma_entry_alloc();
447 kfree(entry);
448 }
449
450 nr_total_entries -= i;
451 }
452
453 if (nr_total_entries != num_entries)
454 ret = 1;
455
456 spin_unlock_irqrestore(&free_entries_lock, flags);
457
458 return ret;
459}
460EXPORT_SYMBOL(dma_debug_resize_entries);
461
313/* 462/*
314 * DMA-API debugging init code 463 * DMA-API debugging init code
315 * 464 *
@@ -334,8 +483,7 @@ static int prealloc_memory(u32 num_entries)
334 num_free_entries = num_entries; 483 num_free_entries = num_entries;
335 min_free_entries = num_entries; 484 min_free_entries = num_entries;
336 485
337 printk(KERN_INFO "DMA-API: preallocated %d debug entries\n", 486 pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
338 num_entries);
339 487
340 return 0; 488 return 0;
341 489
@@ -349,11 +497,102 @@ out_err:
349 return -ENOMEM; 497 return -ENOMEM;
350} 498}
351 499
500static ssize_t filter_read(struct file *file, char __user *user_buf,
501 size_t count, loff_t *ppos)
502{
503 char buf[NAME_MAX_LEN + 1];
504 unsigned long flags;
505 int len;
506
507 if (!current_driver_name[0])
508 return 0;
509
510 /*
511 * We can't copy to userspace directly because current_driver_name can
512 * only be read under the driver_name_lock with irqs disabled. So
513 * create a temporary copy first.
514 */
515 read_lock_irqsave(&driver_name_lock, flags);
516 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
517 read_unlock_irqrestore(&driver_name_lock, flags);
518
519 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
520}
521
522static ssize_t filter_write(struct file *file, const char __user *userbuf,
523 size_t count, loff_t *ppos)
524{
525 char buf[NAME_MAX_LEN];
526 unsigned long flags;
527 size_t len;
528 int i;
529
530 /*
531 * We can't copy from userspace directly. Access to
532 * current_driver_name is protected with a write_lock with irqs
533 * disabled. Since copy_from_user can fault and may sleep we
534 * need to copy to temporary buffer first
535 */
536 len = min(count, (size_t)(NAME_MAX_LEN - 1));
537 if (copy_from_user(buf, userbuf, len))
538 return -EFAULT;
539
540 buf[len] = 0;
541
542 write_lock_irqsave(&driver_name_lock, flags);
543
544 /*
545 * Now handle the string we got from userspace very carefully.
546 * The rules are:
547 * - only use the first token we got
548 * - token delimiter is everything looking like a space
549 * character (' ', '\n', '\t' ...)
550 *
551 */
552 if (!isalnum(buf[0])) {
553 /*
554 * If the first character userspace gave us is not
555 * alphanumerical then assume the filter should be
556 * switched off.
557 */
558 if (current_driver_name[0])
559 pr_info("DMA-API: switching off dma-debug driver filter\n");
560 current_driver_name[0] = 0;
561 current_driver = NULL;
562 goto out_unlock;
563 }
564
565 /*
566 * Now parse out the first token and use it as the name for the
567 * driver to filter for.
568 */
569 for (i = 0; i < NAME_MAX_LEN; ++i) {
570 current_driver_name[i] = buf[i];
571 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
572 break;
573 }
574 current_driver_name[i] = 0;
575 current_driver = NULL;
576
577 pr_info("DMA-API: enable driver filter for driver [%s]\n",
578 current_driver_name);
579
580out_unlock:
581 write_unlock_irqrestore(&driver_name_lock, flags);
582
583 return count;
584}
585
586const struct file_operations filter_fops = {
587 .read = filter_read,
588 .write = filter_write,
589};
590
352static int dma_debug_fs_init(void) 591static int dma_debug_fs_init(void)
353{ 592{
354 dma_debug_dent = debugfs_create_dir("dma-api", NULL); 593 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
355 if (!dma_debug_dent) { 594 if (!dma_debug_dent) {
356 printk(KERN_ERR "DMA-API: can not create debugfs directory\n"); 595 pr_err("DMA-API: can not create debugfs directory\n");
357 return -ENOMEM; 596 return -ENOMEM;
358 } 597 }
359 598
@@ -392,6 +631,11 @@ static int dma_debug_fs_init(void)
392 if (!min_free_entries_dent) 631 if (!min_free_entries_dent)
393 goto out_err; 632 goto out_err;
394 633
634 filter_dent = debugfs_create_file("driver_filter", 0644,
635 dma_debug_dent, NULL, &filter_fops);
636 if (!filter_dent)
637 goto out_err;
638
395 return 0; 639 return 0;
396 640
397out_err: 641out_err:
@@ -400,9 +644,64 @@ out_err:
400 return -ENOMEM; 644 return -ENOMEM;
401} 645}
402 646
647static int device_dma_allocations(struct device *dev)
648{
649 struct dma_debug_entry *entry;
650 unsigned long flags;
651 int count = 0, i;
652
653 local_irq_save(flags);
654
655 for (i = 0; i < HASH_SIZE; ++i) {
656 spin_lock(&dma_entry_hash[i].lock);
657 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
658 if (entry->dev == dev)
659 count += 1;
660 }
661 spin_unlock(&dma_entry_hash[i].lock);
662 }
663
664 local_irq_restore(flags);
665
666 return count;
667}
668
669static int dma_debug_device_change(struct notifier_block *nb,
670 unsigned long action, void *data)
671{
672 struct device *dev = data;
673 int count;
674
675
676 switch (action) {
677 case BUS_NOTIFY_UNBOUND_DRIVER:
678 count = device_dma_allocations(dev);
679 if (count == 0)
680 break;
681 err_printk(dev, NULL, "DMA-API: device driver has pending "
682 "DMA allocations while released from device "
683 "[count=%d]\n", count);
684 break;
685 default:
686 break;
687 }
688
689 return 0;
690}
691
403void dma_debug_add_bus(struct bus_type *bus) 692void dma_debug_add_bus(struct bus_type *bus)
404{ 693{
405 /* FIXME: register notifier */ 694 struct notifier_block *nb;
695
696 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
697 if (nb == NULL) {
698 pr_err("dma_debug_add_bus: out of memory\n");
699 return;
700 }
701
702 nb->notifier_call = dma_debug_device_change;
703
704 bus_register_notifier(bus, nb);
406} 705}
407 706
408/* 707/*
@@ -421,8 +720,7 @@ void dma_debug_init(u32 num_entries)
421 } 720 }
422 721
423 if (dma_debug_fs_init() != 0) { 722 if (dma_debug_fs_init() != 0) {
424 printk(KERN_ERR "DMA-API: error creating debugfs entries " 723 pr_err("DMA-API: error creating debugfs entries - disabling\n");
425 "- disabling\n");
426 global_disable = true; 724 global_disable = true;
427 725
428 return; 726 return;
@@ -432,14 +730,15 @@ void dma_debug_init(u32 num_entries)
432 num_entries = req_entries; 730 num_entries = req_entries;
433 731
434 if (prealloc_memory(num_entries) != 0) { 732 if (prealloc_memory(num_entries) != 0) {
435 printk(KERN_ERR "DMA-API: debugging out of memory error " 733 pr_err("DMA-API: debugging out of memory error - disabled\n");
436 "- disabled\n");
437 global_disable = true; 734 global_disable = true;
438 735
439 return; 736 return;
440 } 737 }
441 738
442 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n"); 739 nr_total_entries = num_free_entries;
740
741 pr_info("DMA-API: debugging enabled by kernel config\n");
443} 742}
444 743
445static __init int dma_debug_cmdline(char *str) 744static __init int dma_debug_cmdline(char *str)
@@ -448,8 +747,7 @@ static __init int dma_debug_cmdline(char *str)
448 return -EINVAL; 747 return -EINVAL;
449 748
450 if (strncmp(str, "off", 3) == 0) { 749 if (strncmp(str, "off", 3) == 0) {
451 printk(KERN_INFO "DMA-API: debugging disabled on kernel " 750 pr_info("DMA-API: debugging disabled on kernel command line\n");
452 "command line\n");
453 global_disable = true; 751 global_disable = true;
454 } 752 }
455 753
@@ -576,72 +874,68 @@ static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
576 "[addr=%p] [size=%llu]\n", addr, size); 874 "[addr=%p] [size=%llu]\n", addr, size);
577} 875}
578 876
579static void check_sync(struct device *dev, dma_addr_t addr, 877static void check_sync(struct device *dev,
580 u64 size, u64 offset, int direction, bool to_cpu) 878 struct dma_debug_entry *ref,
879 bool to_cpu)
581{ 880{
582 struct dma_debug_entry ref = {
583 .dev = dev,
584 .dev_addr = addr,
585 .size = size,
586 .direction = direction,
587 };
588 struct dma_debug_entry *entry; 881 struct dma_debug_entry *entry;
589 struct hash_bucket *bucket; 882 struct hash_bucket *bucket;
590 unsigned long flags; 883 unsigned long flags;
591 884
592 bucket = get_hash_bucket(&ref, &flags); 885 bucket = get_hash_bucket(ref, &flags);
593 886
594 entry = hash_bucket_find(bucket, &ref); 887 entry = hash_bucket_find(bucket, ref);
595 888
596 if (!entry) { 889 if (!entry) {
597 err_printk(dev, NULL, "DMA-API: device driver tries " 890 err_printk(dev, NULL, "DMA-API: device driver tries "
598 "to sync DMA memory it has not allocated " 891 "to sync DMA memory it has not allocated "
599 "[device address=0x%016llx] [size=%llu bytes]\n", 892 "[device address=0x%016llx] [size=%llu bytes]\n",
600 (unsigned long long)addr, size); 893 (unsigned long long)ref->dev_addr, ref->size);
601 goto out; 894 goto out;
602 } 895 }
603 896
604 if ((offset + size) > entry->size) { 897 if (ref->size > entry->size) {
605 err_printk(dev, entry, "DMA-API: device driver syncs" 898 err_printk(dev, entry, "DMA-API: device driver syncs"
606 " DMA memory outside allocated range " 899 " DMA memory outside allocated range "
607 "[device address=0x%016llx] " 900 "[device address=0x%016llx] "
608 "[allocation size=%llu bytes] [sync offset=%llu] " 901 "[allocation size=%llu bytes] "
609 "[sync size=%llu]\n", entry->dev_addr, entry->size, 902 "[sync offset+size=%llu]\n",
610 offset, size); 903 entry->dev_addr, entry->size,
904 ref->size);
611 } 905 }
612 906
613 if (direction != entry->direction) { 907 if (ref->direction != entry->direction) {
614 err_printk(dev, entry, "DMA-API: device driver syncs " 908 err_printk(dev, entry, "DMA-API: device driver syncs "
615 "DMA memory with different direction " 909 "DMA memory with different direction "
616 "[device address=0x%016llx] [size=%llu bytes] " 910 "[device address=0x%016llx] [size=%llu bytes] "
617 "[mapped with %s] [synced with %s]\n", 911 "[mapped with %s] [synced with %s]\n",
618 (unsigned long long)addr, entry->size, 912 (unsigned long long)ref->dev_addr, entry->size,
619 dir2name[entry->direction], 913 dir2name[entry->direction],
620 dir2name[direction]); 914 dir2name[ref->direction]);
621 } 915 }
622 916
623 if (entry->direction == DMA_BIDIRECTIONAL) 917 if (entry->direction == DMA_BIDIRECTIONAL)
624 goto out; 918 goto out;
625 919
626 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && 920 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
627 !(direction == DMA_TO_DEVICE)) 921 !(ref->direction == DMA_TO_DEVICE))
628 err_printk(dev, entry, "DMA-API: device driver syncs " 922 err_printk(dev, entry, "DMA-API: device driver syncs "
629 "device read-only DMA memory for cpu " 923 "device read-only DMA memory for cpu "
630 "[device address=0x%016llx] [size=%llu bytes] " 924 "[device address=0x%016llx] [size=%llu bytes] "
631 "[mapped with %s] [synced with %s]\n", 925 "[mapped with %s] [synced with %s]\n",
632 (unsigned long long)addr, entry->size, 926 (unsigned long long)ref->dev_addr, entry->size,
633 dir2name[entry->direction], 927 dir2name[entry->direction],
634 dir2name[direction]); 928 dir2name[ref->direction]);
635 929
636 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && 930 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
637 !(direction == DMA_FROM_DEVICE)) 931 !(ref->direction == DMA_FROM_DEVICE))
638 err_printk(dev, entry, "DMA-API: device driver syncs " 932 err_printk(dev, entry, "DMA-API: device driver syncs "
639 "device write-only DMA memory to device " 933 "device write-only DMA memory to device "
640 "[device address=0x%016llx] [size=%llu bytes] " 934 "[device address=0x%016llx] [size=%llu bytes] "
641 "[mapped with %s] [synced with %s]\n", 935 "[mapped with %s] [synced with %s]\n",
642 (unsigned long long)addr, entry->size, 936 (unsigned long long)ref->dev_addr, entry->size,
643 dir2name[entry->direction], 937 dir2name[entry->direction],
644 dir2name[direction]); 938 dir2name[ref->direction]);
645 939
646out: 940out:
647 put_hash_bucket(bucket, &flags); 941 put_hash_bucket(bucket, &flags);
@@ -723,15 +1017,15 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
723 entry->type = dma_debug_sg; 1017 entry->type = dma_debug_sg;
724 entry->dev = dev; 1018 entry->dev = dev;
725 entry->paddr = sg_phys(s); 1019 entry->paddr = sg_phys(s);
726 entry->size = s->length; 1020 entry->size = sg_dma_len(s);
727 entry->dev_addr = s->dma_address; 1021 entry->dev_addr = sg_dma_address(s);
728 entry->direction = direction; 1022 entry->direction = direction;
729 entry->sg_call_ents = nents; 1023 entry->sg_call_ents = nents;
730 entry->sg_mapped_ents = mapped_ents; 1024 entry->sg_mapped_ents = mapped_ents;
731 1025
732 if (!PageHighMem(sg_page(s))) { 1026 if (!PageHighMem(sg_page(s))) {
733 check_for_stack(dev, sg_virt(s)); 1027 check_for_stack(dev, sg_virt(s));
734 check_for_illegal_area(dev, sg_virt(s), s->length); 1028 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
735 } 1029 }
736 1030
737 add_dma_entry(entry); 1031 add_dma_entry(entry);
@@ -739,13 +1033,30 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
739} 1033}
740EXPORT_SYMBOL(debug_dma_map_sg); 1034EXPORT_SYMBOL(debug_dma_map_sg);
741 1035
1036static int get_nr_mapped_entries(struct device *dev,
1037 struct dma_debug_entry *ref)
1038{
1039 struct dma_debug_entry *entry;
1040 struct hash_bucket *bucket;
1041 unsigned long flags;
1042 int mapped_ents;
1043
1044 bucket = get_hash_bucket(ref, &flags);
1045 entry = hash_bucket_find(bucket, ref);
1046 mapped_ents = 0;
1047
1048 if (entry)
1049 mapped_ents = entry->sg_mapped_ents;
1050 put_hash_bucket(bucket, &flags);
1051
1052 return mapped_ents;
1053}
1054
742void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 1055void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
743 int nelems, int dir) 1056 int nelems, int dir)
744{ 1057{
745 struct dma_debug_entry *entry;
746 struct scatterlist *s; 1058 struct scatterlist *s;
747 int mapped_ents = 0, i; 1059 int mapped_ents = 0, i;
748 unsigned long flags;
749 1060
750 if (unlikely(global_disable)) 1061 if (unlikely(global_disable))
751 return; 1062 return;
@@ -756,24 +1067,17 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
756 .type = dma_debug_sg, 1067 .type = dma_debug_sg,
757 .dev = dev, 1068 .dev = dev,
758 .paddr = sg_phys(s), 1069 .paddr = sg_phys(s),
759 .dev_addr = s->dma_address, 1070 .dev_addr = sg_dma_address(s),
760 .size = s->length, 1071 .size = sg_dma_len(s),
761 .direction = dir, 1072 .direction = dir,
762 .sg_call_ents = 0, 1073 .sg_call_ents = nelems,
763 }; 1074 };
764 1075
765 if (mapped_ents && i >= mapped_ents) 1076 if (mapped_ents && i >= mapped_ents)
766 break; 1077 break;
767 1078
768 if (mapped_ents == 0) { 1079 if (!i)
769 struct hash_bucket *bucket; 1080 mapped_ents = get_nr_mapped_entries(dev, &ref);
770 ref.sg_call_ents = nelems;
771 bucket = get_hash_bucket(&ref, &flags);
772 entry = hash_bucket_find(bucket, &ref);
773 if (entry)
774 mapped_ents = entry->sg_mapped_ents;
775 put_hash_bucket(bucket, &flags);
776 }
777 1081
778 check_unmap(&ref); 1082 check_unmap(&ref);
779 } 1083 }
@@ -828,10 +1132,19 @@ EXPORT_SYMBOL(debug_dma_free_coherent);
828void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 1132void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
829 size_t size, int direction) 1133 size_t size, int direction)
830{ 1134{
1135 struct dma_debug_entry ref;
1136
831 if (unlikely(global_disable)) 1137 if (unlikely(global_disable))
832 return; 1138 return;
833 1139
834 check_sync(dev, dma_handle, size, 0, direction, true); 1140 ref.type = dma_debug_single;
1141 ref.dev = dev;
1142 ref.dev_addr = dma_handle;
1143 ref.size = size;
1144 ref.direction = direction;
1145 ref.sg_call_ents = 0;
1146
1147 check_sync(dev, &ref, true);
835} 1148}
836EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); 1149EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
837 1150
@@ -839,10 +1152,19 @@ void debug_dma_sync_single_for_device(struct device *dev,
839 dma_addr_t dma_handle, size_t size, 1152 dma_addr_t dma_handle, size_t size,
840 int direction) 1153 int direction)
841{ 1154{
1155 struct dma_debug_entry ref;
1156
842 if (unlikely(global_disable)) 1157 if (unlikely(global_disable))
843 return; 1158 return;
844 1159
845 check_sync(dev, dma_handle, size, 0, direction, false); 1160 ref.type = dma_debug_single;
1161 ref.dev = dev;
1162 ref.dev_addr = dma_handle;
1163 ref.size = size;
1164 ref.direction = direction;
1165 ref.sg_call_ents = 0;
1166
1167 check_sync(dev, &ref, false);
846} 1168}
847EXPORT_SYMBOL(debug_dma_sync_single_for_device); 1169EXPORT_SYMBOL(debug_dma_sync_single_for_device);
848 1170
@@ -851,10 +1173,19 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev,
851 unsigned long offset, size_t size, 1173 unsigned long offset, size_t size,
852 int direction) 1174 int direction)
853{ 1175{
1176 struct dma_debug_entry ref;
1177
854 if (unlikely(global_disable)) 1178 if (unlikely(global_disable))
855 return; 1179 return;
856 1180
857 check_sync(dev, dma_handle, size, offset, direction, true); 1181 ref.type = dma_debug_single;
1182 ref.dev = dev;
1183 ref.dev_addr = dma_handle;
1184 ref.size = offset + size;
1185 ref.direction = direction;
1186 ref.sg_call_ents = 0;
1187
1188 check_sync(dev, &ref, true);
858} 1189}
859EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); 1190EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
860 1191
@@ -863,10 +1194,19 @@ void debug_dma_sync_single_range_for_device(struct device *dev,
863 unsigned long offset, 1194 unsigned long offset,
864 size_t size, int direction) 1195 size_t size, int direction)
865{ 1196{
1197 struct dma_debug_entry ref;
1198
866 if (unlikely(global_disable)) 1199 if (unlikely(global_disable))
867 return; 1200 return;
868 1201
869 check_sync(dev, dma_handle, size, offset, direction, false); 1202 ref.type = dma_debug_single;
1203 ref.dev = dev;
1204 ref.dev_addr = dma_handle;
1205 ref.size = offset + size;
1206 ref.direction = direction;
1207 ref.sg_call_ents = 0;
1208
1209 check_sync(dev, &ref, false);
870} 1210}
871EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); 1211EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
872 1212
@@ -874,14 +1214,30 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
874 int nelems, int direction) 1214 int nelems, int direction)
875{ 1215{
876 struct scatterlist *s; 1216 struct scatterlist *s;
877 int i; 1217 int mapped_ents = 0, i;
878 1218
879 if (unlikely(global_disable)) 1219 if (unlikely(global_disable))
880 return; 1220 return;
881 1221
882 for_each_sg(sg, s, nelems, i) { 1222 for_each_sg(sg, s, nelems, i) {
883 check_sync(dev, s->dma_address, s->dma_length, 0, 1223
884 direction, true); 1224 struct dma_debug_entry ref = {
1225 .type = dma_debug_sg,
1226 .dev = dev,
1227 .paddr = sg_phys(s),
1228 .dev_addr = sg_dma_address(s),
1229 .size = sg_dma_len(s),
1230 .direction = direction,
1231 .sg_call_ents = nelems,
1232 };
1233
1234 if (!i)
1235 mapped_ents = get_nr_mapped_entries(dev, &ref);
1236
1237 if (i >= mapped_ents)
1238 break;
1239
1240 check_sync(dev, &ref, true);
885 } 1241 }
886} 1242}
887EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); 1243EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
@@ -890,15 +1246,48 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
890 int nelems, int direction) 1246 int nelems, int direction)
891{ 1247{
892 struct scatterlist *s; 1248 struct scatterlist *s;
893 int i; 1249 int mapped_ents = 0, i;
894 1250
895 if (unlikely(global_disable)) 1251 if (unlikely(global_disable))
896 return; 1252 return;
897 1253
898 for_each_sg(sg, s, nelems, i) { 1254 for_each_sg(sg, s, nelems, i) {
899 check_sync(dev, s->dma_address, s->dma_length, 0, 1255
900 direction, false); 1256 struct dma_debug_entry ref = {
1257 .type = dma_debug_sg,
1258 .dev = dev,
1259 .paddr = sg_phys(s),
1260 .dev_addr = sg_dma_address(s),
1261 .size = sg_dma_len(s),
1262 .direction = direction,
1263 .sg_call_ents = nelems,
1264 };
1265 if (!i)
1266 mapped_ents = get_nr_mapped_entries(dev, &ref);
1267
1268 if (i >= mapped_ents)
1269 break;
1270
1271 check_sync(dev, &ref, false);
901 } 1272 }
902} 1273}
903EXPORT_SYMBOL(debug_dma_sync_sg_for_device); 1274EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
904 1275
1276static int __init dma_debug_driver_setup(char *str)
1277{
1278 int i;
1279
1280 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1281 current_driver_name[i] = *str;
1282 if (*str == 0)
1283 break;
1284 }
1285
1286 if (current_driver_name[0])
1287 pr_info("DMA-API: enable driver filter for driver [%s]\n",
1288 current_driver_name);
1289
1290
1291 return 1;
1292}
1293__setup("dma_debug_driver=", dma_debug_driver_setup);