aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug11
-rw-r--r--lib/Makefile2
-rw-r--r--lib/dma-debug.c949
3 files changed, 962 insertions, 0 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1bcf9cd4baa0..d9cbada7e2f8 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -902,6 +902,17 @@ config DYNAMIC_PRINTK_DEBUG
902 debugging for all modules. This mode can be turned off via the above 902 debugging for all modules. This mode can be turned off via the above
903 disable command. 903 disable command.
904 904
905config DMA_API_DEBUG
906 bool "Enable debugging of DMA-API usage"
907 depends on HAVE_DMA_API_DEBUG
908 help
909 Enable this option to debug the use of the DMA API by device drivers.
910 With this option you will be able to detect common bugs in device
911 drivers like double-freeing of DMA mappings or freeing mappings that
912 were never allocated.
913 This option causes a performance degredation. Use only if you want
914 to debug device drivers. If unsure, say N.
915
905source "samples/Kconfig" 916source "samples/Kconfig"
906 917
907source "lib/Kconfig.kgdb" 918source "lib/Kconfig.kgdb"
diff --git a/lib/Makefile b/lib/Makefile
index 32b0e64ded27..50b48cf63e4a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -84,6 +84,8 @@ obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
84 84
85obj-$(CONFIG_DYNAMIC_PRINTK_DEBUG) += dynamic_printk.o 85obj-$(CONFIG_DYNAMIC_PRINTK_DEBUG) += dynamic_printk.o
86 86
87obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
88
87hostprogs-y := gen_crc32table 89hostprogs-y := gen_crc32table
88clean-files := crc32table.h 90clean-files := crc32table.h
89 91
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
new file mode 100644
index 000000000000..9a350b414a50
--- /dev/null
+++ b/lib/dma-debug.c
@@ -0,0 +1,949 @@
1/*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/scatterlist.h>
21#include <linux/dma-mapping.h>
22#include <linux/stacktrace.h>
23#include <linux/dma-debug.h>
24#include <linux/spinlock.h>
25#include <linux/debugfs.h>
26#include <linux/device.h>
27#include <linux/types.h>
28#include <linux/sched.h>
29#include <linux/list.h>
30#include <linux/slab.h>
31
32#include <asm/sections.h>
33
34#define HASH_SIZE 1024ULL
35#define HASH_FN_SHIFT 13
36#define HASH_FN_MASK (HASH_SIZE - 1)
37
38enum {
39 dma_debug_single,
40 dma_debug_page,
41 dma_debug_sg,
42 dma_debug_coherent,
43};
44
45#define DMA_DEBUG_STACKTRACE_ENTRIES 5
46
47struct dma_debug_entry {
48 struct list_head list;
49 struct device *dev;
50 int type;
51 phys_addr_t paddr;
52 u64 dev_addr;
53 u64 size;
54 int direction;
55 int sg_call_ents;
56 int sg_mapped_ents;
57#ifdef CONFIG_STACKTRACE
58 struct stack_trace stacktrace;
59 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
60#endif
61};
62
63struct hash_bucket {
64 struct list_head list;
65 spinlock_t lock;
66} ____cacheline_aligned_in_smp;
67
68/* Hash list to save the allocated dma addresses */
69static struct hash_bucket dma_entry_hash[HASH_SIZE];
70/* List of pre-allocated dma_debug_entry's */
71static LIST_HEAD(free_entries);
72/* Lock for the list above */
73static DEFINE_SPINLOCK(free_entries_lock);
74
75/* Global disable flag - will be set in case of an error */
76static bool global_disable __read_mostly;
77
78/* Global error count */
79static u32 error_count;
80
81/* Global error show enable*/
82static u32 show_all_errors __read_mostly;
83/* Number of errors to show */
84static u32 show_num_errors = 1;
85
86static u32 num_free_entries;
87static u32 min_free_entries;
88
89/* number of preallocated entries requested by kernel cmdline */
90static u32 req_entries;
91
92/* debugfs dentry's for the stuff above */
93static struct dentry *dma_debug_dent __read_mostly;
94static struct dentry *global_disable_dent __read_mostly;
95static struct dentry *error_count_dent __read_mostly;
96static struct dentry *show_all_errors_dent __read_mostly;
97static struct dentry *show_num_errors_dent __read_mostly;
98static struct dentry *num_free_entries_dent __read_mostly;
99static struct dentry *min_free_entries_dent __read_mostly;
100
101static const char *type2name[4] = { "single", "page",
102 "scather-gather", "coherent" };
103
104static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
105 "DMA_FROM_DEVICE", "DMA_NONE" };
106
107/*
108 * The access to some variables in this macro is racy. We can't use atomic_t
109 * here because all these variables are exported to debugfs. Some of them even
110 * writeable. This is also the reason why a lock won't help much. But anyway,
111 * the races are no big deal. Here is why:
112 *
113 * error_count: the addition is racy, but the worst thing that can happen is
114 * that we don't count some errors
115 * show_num_errors: the subtraction is racy. Also no big deal because in
116 * worst case this will result in one warning more in the
117 * system log than the user configured. This variable is
118 * writeable via debugfs.
119 */
120static inline void dump_entry_trace(struct dma_debug_entry *entry)
121{
122#ifdef CONFIG_STACKTRACE
123 if (entry) {
124 printk(KERN_WARNING "Mapped at:\n");
125 print_stack_trace(&entry->stacktrace, 0);
126 }
127#endif
128}
129
130#define err_printk(dev, entry, format, arg...) do { \
131 error_count += 1; \
132 if (show_all_errors || show_num_errors > 0) { \
133 WARN(1, "%s %s: " format, \
134 dev_driver_string(dev), \
135 dev_name(dev) , ## arg); \
136 dump_entry_trace(entry); \
137 } \
138 if (!show_all_errors && show_num_errors > 0) \
139 show_num_errors -= 1; \
140 } while (0);
141
142/*
143 * Hash related functions
144 *
145 * Every DMA-API request is saved into a struct dma_debug_entry. To
146 * have quick access to these structs they are stored into a hash.
147 */
148static int hash_fn(struct dma_debug_entry *entry)
149{
150 /*
151 * Hash function is based on the dma address.
152 * We use bits 20-27 here as the index into the hash
153 */
154 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
155}
156
157/*
158 * Request exclusive access to a hash bucket for a given dma_debug_entry.
159 */
160static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
161 unsigned long *flags)
162{
163 int idx = hash_fn(entry);
164 unsigned long __flags;
165
166 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
167 *flags = __flags;
168 return &dma_entry_hash[idx];
169}
170
171/*
172 * Give up exclusive access to the hash bucket
173 */
174static void put_hash_bucket(struct hash_bucket *bucket,
175 unsigned long *flags)
176{
177 unsigned long __flags = *flags;
178
179 spin_unlock_irqrestore(&bucket->lock, __flags);
180}
181
182/*
183 * Search a given entry in the hash bucket list
184 */
185static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
186 struct dma_debug_entry *ref)
187{
188 struct dma_debug_entry *entry;
189
190 list_for_each_entry(entry, &bucket->list, list) {
191 if ((entry->dev_addr == ref->dev_addr) &&
192 (entry->dev == ref->dev))
193 return entry;
194 }
195
196 return NULL;
197}
198
199/*
200 * Add an entry to a hash bucket
201 */
202static void hash_bucket_add(struct hash_bucket *bucket,
203 struct dma_debug_entry *entry)
204{
205 list_add_tail(&entry->list, &bucket->list);
206}
207
208/*
209 * Remove entry from a hash bucket list
210 */
211static void hash_bucket_del(struct dma_debug_entry *entry)
212{
213 list_del(&entry->list);
214}
215
216/*
217 * Dump mapping entries for debugging purposes
218 */
219void debug_dma_dump_mappings(struct device *dev)
220{
221 int idx;
222
223 for (idx = 0; idx < HASH_SIZE; idx++) {
224 struct hash_bucket *bucket = &dma_entry_hash[idx];
225 struct dma_debug_entry *entry;
226 unsigned long flags;
227
228 spin_lock_irqsave(&bucket->lock, flags);
229
230 list_for_each_entry(entry, &bucket->list, list) {
231 if (!dev || dev == entry->dev) {
232 dev_info(entry->dev,
233 "%s idx %d P=%Lx D=%Lx L=%Lx %s\n",
234 type2name[entry->type], idx,
235 (unsigned long long)entry->paddr,
236 entry->dev_addr, entry->size,
237 dir2name[entry->direction]);
238 }
239 }
240
241 spin_unlock_irqrestore(&bucket->lock, flags);
242 }
243}
244EXPORT_SYMBOL(debug_dma_dump_mappings);
245
246/*
247 * Wrapper function for adding an entry to the hash.
248 * This function takes care of locking itself.
249 */
250static void add_dma_entry(struct dma_debug_entry *entry)
251{
252 struct hash_bucket *bucket;
253 unsigned long flags;
254
255 bucket = get_hash_bucket(entry, &flags);
256 hash_bucket_add(bucket, entry);
257 put_hash_bucket(bucket, &flags);
258}
259
260/* struct dma_entry allocator
261 *
262 * The next two functions implement the allocator for
263 * struct dma_debug_entries.
264 */
265static struct dma_debug_entry *dma_entry_alloc(void)
266{
267 struct dma_debug_entry *entry = NULL;
268 unsigned long flags;
269
270 spin_lock_irqsave(&free_entries_lock, flags);
271
272 if (list_empty(&free_entries)) {
273 printk(KERN_ERR "DMA-API: debugging out of memory "
274 "- disabling\n");
275 global_disable = true;
276 goto out;
277 }
278
279 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
280 list_del(&entry->list);
281 memset(entry, 0, sizeof(*entry));
282
283#ifdef CONFIG_STACKTRACE
284 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
285 entry->stacktrace.entries = entry->st_entries;
286 entry->stacktrace.skip = 2;
287 save_stack_trace(&entry->stacktrace);
288#endif
289 num_free_entries -= 1;
290 if (num_free_entries < min_free_entries)
291 min_free_entries = num_free_entries;
292
293out:
294 spin_unlock_irqrestore(&free_entries_lock, flags);
295
296 return entry;
297}
298
299static void dma_entry_free(struct dma_debug_entry *entry)
300{
301 unsigned long flags;
302
303 /*
304 * add to beginning of the list - this way the entries are
305 * more likely cache hot when they are reallocated.
306 */
307 spin_lock_irqsave(&free_entries_lock, flags);
308 list_add(&entry->list, &free_entries);
309 num_free_entries += 1;
310 spin_unlock_irqrestore(&free_entries_lock, flags);
311}
312
313/*
314 * DMA-API debugging init code
315 *
316 * The init code does two things:
317 * 1. Initialize core data structures
318 * 2. Preallocate a given number of dma_debug_entry structs
319 */
320
321static int prealloc_memory(u32 num_entries)
322{
323 struct dma_debug_entry *entry, *next_entry;
324 int i;
325
326 for (i = 0; i < num_entries; ++i) {
327 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
328 if (!entry)
329 goto out_err;
330
331 list_add_tail(&entry->list, &free_entries);
332 }
333
334 num_free_entries = num_entries;
335 min_free_entries = num_entries;
336
337 printk(KERN_INFO "DMA-API: preallocated %d debug entries\n",
338 num_entries);
339
340 return 0;
341
342out_err:
343
344 list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
345 list_del(&entry->list);
346 kfree(entry);
347 }
348
349 return -ENOMEM;
350}
351
352static int dma_debug_fs_init(void)
353{
354 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
355 if (!dma_debug_dent) {
356 printk(KERN_ERR "DMA-API: can not create debugfs directory\n");
357 return -ENOMEM;
358 }
359
360 global_disable_dent = debugfs_create_bool("disabled", 0444,
361 dma_debug_dent,
362 (u32 *)&global_disable);
363 if (!global_disable_dent)
364 goto out_err;
365
366 error_count_dent = debugfs_create_u32("error_count", 0444,
367 dma_debug_dent, &error_count);
368 if (!error_count_dent)
369 goto out_err;
370
371 show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
372 dma_debug_dent,
373 &show_all_errors);
374 if (!show_all_errors_dent)
375 goto out_err;
376
377 show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
378 dma_debug_dent,
379 &show_num_errors);
380 if (!show_num_errors_dent)
381 goto out_err;
382
383 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
384 dma_debug_dent,
385 &num_free_entries);
386 if (!num_free_entries_dent)
387 goto out_err;
388
389 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
390 dma_debug_dent,
391 &min_free_entries);
392 if (!min_free_entries_dent)
393 goto out_err;
394
395 return 0;
396
397out_err:
398 debugfs_remove_recursive(dma_debug_dent);
399
400 return -ENOMEM;
401}
402
403static int device_dma_allocations(struct device *dev)
404{
405 struct dma_debug_entry *entry;
406 unsigned long flags;
407 int count = 0, i;
408
409 for (i = 0; i < HASH_SIZE; ++i) {
410 spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
411 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
412 if (entry->dev == dev)
413 count += 1;
414 }
415 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
416 }
417
418 return count;
419}
420
421static int dma_debug_device_change(struct notifier_block *nb,
422 unsigned long action, void *data)
423{
424 struct device *dev = data;
425 int count;
426
427
428 switch (action) {
429 case BUS_NOTIFY_UNBIND_DRIVER:
430 count = device_dma_allocations(dev);
431 if (count == 0)
432 break;
433 err_printk(dev, NULL, "DMA-API: device driver has pending "
434 "DMA allocations while released from device "
435 "[count=%d]\n", count);
436 break;
437 default:
438 break;
439 }
440
441 return 0;
442}
443
444void dma_debug_add_bus(struct bus_type *bus)
445{
446 struct notifier_block *nb;
447
448 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
449 if (nb == NULL) {
450 printk(KERN_ERR "dma_debug_add_bus: out of memory\n");
451 return;
452 }
453
454 nb->notifier_call = dma_debug_device_change;
455
456 bus_register_notifier(bus, nb);
457}
458
459/*
460 * Let the architectures decide how many entries should be preallocated.
461 */
462void dma_debug_init(u32 num_entries)
463{
464 int i;
465
466 if (global_disable)
467 return;
468
469 for (i = 0; i < HASH_SIZE; ++i) {
470 INIT_LIST_HEAD(&dma_entry_hash[i].list);
471 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
472 }
473
474 if (dma_debug_fs_init() != 0) {
475 printk(KERN_ERR "DMA-API: error creating debugfs entries "
476 "- disabling\n");
477 global_disable = true;
478
479 return;
480 }
481
482 if (req_entries)
483 num_entries = req_entries;
484
485 if (prealloc_memory(num_entries) != 0) {
486 printk(KERN_ERR "DMA-API: debugging out of memory error "
487 "- disabled\n");
488 global_disable = true;
489
490 return;
491 }
492
493 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n");
494}
495
496static __init int dma_debug_cmdline(char *str)
497{
498 if (!str)
499 return -EINVAL;
500
501 if (strncmp(str, "off", 3) == 0) {
502 printk(KERN_INFO "DMA-API: debugging disabled on kernel "
503 "command line\n");
504 global_disable = true;
505 }
506
507 return 0;
508}
509
510static __init int dma_debug_entries_cmdline(char *str)
511{
512 int res;
513
514 if (!str)
515 return -EINVAL;
516
517 res = get_option(&str, &req_entries);
518
519 if (!res)
520 req_entries = 0;
521
522 return 0;
523}
524
525__setup("dma_debug=", dma_debug_cmdline);
526__setup("dma_debug_entries=", dma_debug_entries_cmdline);
527
528static void check_unmap(struct dma_debug_entry *ref)
529{
530 struct dma_debug_entry *entry;
531 struct hash_bucket *bucket;
532 unsigned long flags;
533
534 if (dma_mapping_error(ref->dev, ref->dev_addr))
535 return;
536
537 bucket = get_hash_bucket(ref, &flags);
538 entry = hash_bucket_find(bucket, ref);
539
540 if (!entry) {
541 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
542 "to free DMA memory it has not allocated "
543 "[device address=0x%016llx] [size=%llu bytes]\n",
544 ref->dev_addr, ref->size);
545 goto out;
546 }
547
548 if (ref->size != entry->size) {
549 err_printk(ref->dev, entry, "DMA-API: device driver frees "
550 "DMA memory with different size "
551 "[device address=0x%016llx] [map size=%llu bytes] "
552 "[unmap size=%llu bytes]\n",
553 ref->dev_addr, entry->size, ref->size);
554 }
555
556 if (ref->type != entry->type) {
557 err_printk(ref->dev, entry, "DMA-API: device driver frees "
558 "DMA memory with wrong function "
559 "[device address=0x%016llx] [size=%llu bytes] "
560 "[mapped as %s] [unmapped as %s]\n",
561 ref->dev_addr, ref->size,
562 type2name[entry->type], type2name[ref->type]);
563 } else if ((entry->type == dma_debug_coherent) &&
564 (ref->paddr != entry->paddr)) {
565 err_printk(ref->dev, entry, "DMA-API: device driver frees "
566 "DMA memory with different CPU address "
567 "[device address=0x%016llx] [size=%llu bytes] "
568 "[cpu alloc address=%p] [cpu free address=%p]",
569 ref->dev_addr, ref->size,
570 (void *)entry->paddr, (void *)ref->paddr);
571 }
572
573 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
574 ref->sg_call_ents != entry->sg_call_ents) {
575 err_printk(ref->dev, entry, "DMA-API: device driver frees "
576 "DMA sg list with different entry count "
577 "[map count=%d] [unmap count=%d]\n",
578 entry->sg_call_ents, ref->sg_call_ents);
579 }
580
581 /*
582 * This may be no bug in reality - but most implementations of the
583 * DMA API don't handle this properly, so check for it here
584 */
585 if (ref->direction != entry->direction) {
586 err_printk(ref->dev, entry, "DMA-API: device driver frees "
587 "DMA memory with different direction "
588 "[device address=0x%016llx] [size=%llu bytes] "
589 "[mapped with %s] [unmapped with %s]\n",
590 ref->dev_addr, ref->size,
591 dir2name[entry->direction],
592 dir2name[ref->direction]);
593 }
594
595 hash_bucket_del(entry);
596 dma_entry_free(entry);
597
598out:
599 put_hash_bucket(bucket, &flags);
600}
601
602static void check_for_stack(struct device *dev, void *addr)
603{
604 if (object_is_on_stack(addr))
605 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
606 "stack [addr=%p]\n", addr);
607}
608
609static inline bool overlap(void *addr, u64 size, void *start, void *end)
610{
611 void *addr2 = (char *)addr + size;
612
613 return ((addr >= start && addr < end) ||
614 (addr2 >= start && addr2 < end) ||
615 ((addr < start) && (addr2 >= end)));
616}
617
618static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
619{
620 if (overlap(addr, size, _text, _etext) ||
621 overlap(addr, size, __start_rodata, __end_rodata))
622 err_printk(dev, NULL, "DMA-API: device driver maps "
623 "memory from kernel text or rodata "
624 "[addr=%p] [size=%llu]\n", addr, size);
625}
626
627static void check_sync(struct device *dev, dma_addr_t addr,
628 u64 size, u64 offset, int direction, bool to_cpu)
629{
630 struct dma_debug_entry ref = {
631 .dev = dev,
632 .dev_addr = addr,
633 .size = size,
634 .direction = direction,
635 };
636 struct dma_debug_entry *entry;
637 struct hash_bucket *bucket;
638 unsigned long flags;
639
640 bucket = get_hash_bucket(&ref, &flags);
641
642 entry = hash_bucket_find(bucket, &ref);
643
644 if (!entry) {
645 err_printk(dev, NULL, "DMA-API: device driver tries "
646 "to sync DMA memory it has not allocated "
647 "[device address=0x%016llx] [size=%llu bytes]\n",
648 addr, size);
649 goto out;
650 }
651
652 if ((offset + size) > entry->size) {
653 err_printk(dev, entry, "DMA-API: device driver syncs"
654 " DMA memory outside allocated range "
655 "[device address=0x%016llx] "
656 "[allocation size=%llu bytes] [sync offset=%llu] "
657 "[sync size=%llu]\n", entry->dev_addr, entry->size,
658 offset, size);
659 }
660
661 if (direction != entry->direction) {
662 err_printk(dev, entry, "DMA-API: device driver syncs "
663 "DMA memory with different direction "
664 "[device address=0x%016llx] [size=%llu bytes] "
665 "[mapped with %s] [synced with %s]\n",
666 addr, entry->size,
667 dir2name[entry->direction],
668 dir2name[direction]);
669 }
670
671 if (entry->direction == DMA_BIDIRECTIONAL)
672 goto out;
673
674 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
675 !(direction == DMA_TO_DEVICE))
676 err_printk(dev, entry, "DMA-API: device driver syncs "
677 "device read-only DMA memory for cpu "
678 "[device address=0x%016llx] [size=%llu bytes] "
679 "[mapped with %s] [synced with %s]\n",
680 addr, entry->size,
681 dir2name[entry->direction],
682 dir2name[direction]);
683
684 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
685 !(direction == DMA_FROM_DEVICE))
686 err_printk(dev, entry, "DMA-API: device driver syncs "
687 "device write-only DMA memory to device "
688 "[device address=0x%016llx] [size=%llu bytes] "
689 "[mapped with %s] [synced with %s]\n",
690 addr, entry->size,
691 dir2name[entry->direction],
692 dir2name[direction]);
693
694out:
695 put_hash_bucket(bucket, &flags);
696
697}
698
699void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
700 size_t size, int direction, dma_addr_t dma_addr,
701 bool map_single)
702{
703 struct dma_debug_entry *entry;
704
705 if (unlikely(global_disable))
706 return;
707
708 if (unlikely(dma_mapping_error(dev, dma_addr)))
709 return;
710
711 entry = dma_entry_alloc();
712 if (!entry)
713 return;
714
715 entry->dev = dev;
716 entry->type = dma_debug_page;
717 entry->paddr = page_to_phys(page) + offset;
718 entry->dev_addr = dma_addr;
719 entry->size = size;
720 entry->direction = direction;
721
722 if (map_single) {
723 void *addr = ((char *)page_address(page)) + offset;
724
725 entry->type = dma_debug_single;
726 check_for_stack(dev, addr);
727 check_for_illegal_area(dev, addr, size);
728 }
729
730 add_dma_entry(entry);
731}
732EXPORT_SYMBOL(debug_dma_map_page);
733
734void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
735 size_t size, int direction, bool map_single)
736{
737 struct dma_debug_entry ref = {
738 .type = dma_debug_page,
739 .dev = dev,
740 .dev_addr = addr,
741 .size = size,
742 .direction = direction,
743 };
744
745 if (unlikely(global_disable))
746 return;
747
748 if (map_single)
749 ref.type = dma_debug_single;
750
751 check_unmap(&ref);
752}
753EXPORT_SYMBOL(debug_dma_unmap_page);
754
755void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
756 int nents, int mapped_ents, int direction)
757{
758 struct dma_debug_entry *entry;
759 struct scatterlist *s;
760 int i;
761
762 if (unlikely(global_disable))
763 return;
764
765 for_each_sg(sg, s, mapped_ents, i) {
766 entry = dma_entry_alloc();
767 if (!entry)
768 return;
769
770 entry->type = dma_debug_sg;
771 entry->dev = dev;
772 entry->paddr = sg_phys(s);
773 entry->size = s->length;
774 entry->dev_addr = s->dma_address;
775 entry->direction = direction;
776 entry->sg_call_ents = nents;
777 entry->sg_mapped_ents = mapped_ents;
778
779 check_for_stack(dev, sg_virt(s));
780 check_for_illegal_area(dev, sg_virt(s), s->length);
781
782 add_dma_entry(entry);
783 }
784}
785EXPORT_SYMBOL(debug_dma_map_sg);
786
787void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
788 int nelems, int dir)
789{
790 struct dma_debug_entry *entry;
791 struct scatterlist *s;
792 int mapped_ents = 0, i;
793 unsigned long flags;
794
795 if (unlikely(global_disable))
796 return;
797
798 for_each_sg(sglist, s, nelems, i) {
799
800 struct dma_debug_entry ref = {
801 .type = dma_debug_sg,
802 .dev = dev,
803 .paddr = sg_phys(s),
804 .dev_addr = s->dma_address,
805 .size = s->length,
806 .direction = dir,
807 .sg_call_ents = 0,
808 };
809
810 if (mapped_ents && i >= mapped_ents)
811 break;
812
813 if (mapped_ents == 0) {
814 struct hash_bucket *bucket;
815 ref.sg_call_ents = nelems;
816 bucket = get_hash_bucket(&ref, &flags);
817 entry = hash_bucket_find(bucket, &ref);
818 if (entry)
819 mapped_ents = entry->sg_mapped_ents;
820 put_hash_bucket(bucket, &flags);
821 }
822
823 check_unmap(&ref);
824 }
825}
826EXPORT_SYMBOL(debug_dma_unmap_sg);
827
828void debug_dma_alloc_coherent(struct device *dev, size_t size,
829 dma_addr_t dma_addr, void *virt)
830{
831 struct dma_debug_entry *entry;
832
833 if (unlikely(global_disable))
834 return;
835
836 if (unlikely(virt == NULL))
837 return;
838
839 entry = dma_entry_alloc();
840 if (!entry)
841 return;
842
843 entry->type = dma_debug_coherent;
844 entry->dev = dev;
845 entry->paddr = virt_to_phys(virt);
846 entry->size = size;
847 entry->dev_addr = dma_addr;
848 entry->direction = DMA_BIDIRECTIONAL;
849
850 add_dma_entry(entry);
851}
852EXPORT_SYMBOL(debug_dma_alloc_coherent);
853
854void debug_dma_free_coherent(struct device *dev, size_t size,
855 void *virt, dma_addr_t addr)
856{
857 struct dma_debug_entry ref = {
858 .type = dma_debug_coherent,
859 .dev = dev,
860 .paddr = virt_to_phys(virt),
861 .dev_addr = addr,
862 .size = size,
863 .direction = DMA_BIDIRECTIONAL,
864 };
865
866 if (unlikely(global_disable))
867 return;
868
869 check_unmap(&ref);
870}
871EXPORT_SYMBOL(debug_dma_free_coherent);
872
873void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
874 size_t size, int direction)
875{
876 if (unlikely(global_disable))
877 return;
878
879 check_sync(dev, dma_handle, size, 0, direction, true);
880}
881EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
882
883void debug_dma_sync_single_for_device(struct device *dev,
884 dma_addr_t dma_handle, size_t size,
885 int direction)
886{
887 if (unlikely(global_disable))
888 return;
889
890 check_sync(dev, dma_handle, size, 0, direction, false);
891}
892EXPORT_SYMBOL(debug_dma_sync_single_for_device);
893
894void debug_dma_sync_single_range_for_cpu(struct device *dev,
895 dma_addr_t dma_handle,
896 unsigned long offset, size_t size,
897 int direction)
898{
899 if (unlikely(global_disable))
900 return;
901
902 check_sync(dev, dma_handle, size, offset, direction, true);
903}
904EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
905
906void debug_dma_sync_single_range_for_device(struct device *dev,
907 dma_addr_t dma_handle,
908 unsigned long offset,
909 size_t size, int direction)
910{
911 if (unlikely(global_disable))
912 return;
913
914 check_sync(dev, dma_handle, size, offset, direction, false);
915}
916EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
917
918void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
919 int nelems, int direction)
920{
921 struct scatterlist *s;
922 int i;
923
924 if (unlikely(global_disable))
925 return;
926
927 for_each_sg(sg, s, nelems, i) {
928 check_sync(dev, s->dma_address, s->dma_length, 0,
929 direction, true);
930 }
931}
932EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
933
934void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
935 int nelems, int direction)
936{
937 struct scatterlist *s;
938 int i;
939
940 if (unlikely(global_disable))
941 return;
942
943 for_each_sg(sg, s, nelems, i) {
944 check_sync(dev, s->dma_address, s->dma_length, 0,
945 direction, false);
946 }
947}
948EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
949