diff options
Diffstat (limited to 'lib/dma-debug.c')
-rw-r--r-- | lib/dma-debug.c | 955 |
1 files changed, 955 insertions, 0 deletions
diff --git a/lib/dma-debug.c b/lib/dma-debug.c new file mode 100644 index 000000000000..d3da7edc034f --- /dev/null +++ b/lib/dma-debug.c | |||
@@ -0,0 +1,955 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Author: Joerg Roedel <joerg.roedel@amd.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | #include <linux/scatterlist.h> | ||
21 | #include <linux/dma-mapping.h> | ||
22 | #include <linux/stacktrace.h> | ||
23 | #include <linux/dma-debug.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/debugfs.h> | ||
26 | #include <linux/device.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/sched.h> | ||
29 | #include <linux/list.h> | ||
30 | #include <linux/slab.h> | ||
31 | |||
32 | #include <asm/sections.h> | ||
33 | |||
34 | #define HASH_SIZE 1024ULL | ||
35 | #define HASH_FN_SHIFT 13 | ||
36 | #define HASH_FN_MASK (HASH_SIZE - 1) | ||
37 | |||
38 | enum { | ||
39 | dma_debug_single, | ||
40 | dma_debug_page, | ||
41 | dma_debug_sg, | ||
42 | dma_debug_coherent, | ||
43 | }; | ||
44 | |||
45 | #define DMA_DEBUG_STACKTRACE_ENTRIES 5 | ||
46 | |||
47 | struct dma_debug_entry { | ||
48 | struct list_head list; | ||
49 | struct device *dev; | ||
50 | int type; | ||
51 | phys_addr_t paddr; | ||
52 | u64 dev_addr; | ||
53 | u64 size; | ||
54 | int direction; | ||
55 | int sg_call_ents; | ||
56 | int sg_mapped_ents; | ||
57 | #ifdef CONFIG_STACKTRACE | ||
58 | struct stack_trace stacktrace; | ||
59 | unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; | ||
60 | #endif | ||
61 | }; | ||
62 | |||
63 | struct hash_bucket { | ||
64 | struct list_head list; | ||
65 | spinlock_t lock; | ||
66 | } ____cacheline_aligned_in_smp; | ||
67 | |||
68 | /* Hash list to save the allocated dma addresses */ | ||
69 | static struct hash_bucket dma_entry_hash[HASH_SIZE]; | ||
70 | /* List of pre-allocated dma_debug_entry's */ | ||
71 | static LIST_HEAD(free_entries); | ||
72 | /* Lock for the list above */ | ||
73 | static DEFINE_SPINLOCK(free_entries_lock); | ||
74 | |||
75 | /* Global disable flag - will be set in case of an error */ | ||
76 | static bool global_disable __read_mostly; | ||
77 | |||
78 | /* Global error count */ | ||
79 | static u32 error_count; | ||
80 | |||
81 | /* Global error show enable*/ | ||
82 | static u32 show_all_errors __read_mostly; | ||
83 | /* Number of errors to show */ | ||
84 | static u32 show_num_errors = 1; | ||
85 | |||
86 | static u32 num_free_entries; | ||
87 | static u32 min_free_entries; | ||
88 | |||
89 | /* number of preallocated entries requested by kernel cmdline */ | ||
90 | static u32 req_entries; | ||
91 | |||
92 | /* debugfs dentry's for the stuff above */ | ||
93 | static struct dentry *dma_debug_dent __read_mostly; | ||
94 | static struct dentry *global_disable_dent __read_mostly; | ||
95 | static struct dentry *error_count_dent __read_mostly; | ||
96 | static struct dentry *show_all_errors_dent __read_mostly; | ||
97 | static struct dentry *show_num_errors_dent __read_mostly; | ||
98 | static struct dentry *num_free_entries_dent __read_mostly; | ||
99 | static struct dentry *min_free_entries_dent __read_mostly; | ||
100 | |||
101 | static const char *type2name[4] = { "single", "page", | ||
102 | "scather-gather", "coherent" }; | ||
103 | |||
104 | static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", | ||
105 | "DMA_FROM_DEVICE", "DMA_NONE" }; | ||
106 | |||
107 | /* | ||
108 | * The access to some variables in this macro is racy. We can't use atomic_t | ||
109 | * here because all these variables are exported to debugfs. Some of them even | ||
110 | * writeable. This is also the reason why a lock won't help much. But anyway, | ||
111 | * the races are no big deal. Here is why: | ||
112 | * | ||
113 | * error_count: the addition is racy, but the worst thing that can happen is | ||
114 | * that we don't count some errors | ||
115 | * show_num_errors: the subtraction is racy. Also no big deal because in | ||
116 | * worst case this will result in one warning more in the | ||
117 | * system log than the user configured. This variable is | ||
118 | * writeable via debugfs. | ||
119 | */ | ||
120 | static inline void dump_entry_trace(struct dma_debug_entry *entry) | ||
121 | { | ||
122 | #ifdef CONFIG_STACKTRACE | ||
123 | if (entry) { | ||
124 | printk(KERN_WARNING "Mapped at:\n"); | ||
125 | print_stack_trace(&entry->stacktrace, 0); | ||
126 | } | ||
127 | #endif | ||
128 | } | ||
129 | |||
130 | #define err_printk(dev, entry, format, arg...) do { \ | ||
131 | error_count += 1; \ | ||
132 | if (show_all_errors || show_num_errors > 0) { \ | ||
133 | WARN(1, "%s %s: " format, \ | ||
134 | dev_driver_string(dev), \ | ||
135 | dev_name(dev) , ## arg); \ | ||
136 | dump_entry_trace(entry); \ | ||
137 | } \ | ||
138 | if (!show_all_errors && show_num_errors > 0) \ | ||
139 | show_num_errors -= 1; \ | ||
140 | } while (0); | ||
141 | |||
142 | /* | ||
143 | * Hash related functions | ||
144 | * | ||
145 | * Every DMA-API request is saved into a struct dma_debug_entry. To | ||
146 | * have quick access to these structs they are stored into a hash. | ||
147 | */ | ||
148 | static int hash_fn(struct dma_debug_entry *entry) | ||
149 | { | ||
150 | /* | ||
151 | * Hash function is based on the dma address. | ||
152 | * We use bits 20-27 here as the index into the hash | ||
153 | */ | ||
154 | return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * Request exclusive access to a hash bucket for a given dma_debug_entry. | ||
159 | */ | ||
160 | static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, | ||
161 | unsigned long *flags) | ||
162 | { | ||
163 | int idx = hash_fn(entry); | ||
164 | unsigned long __flags; | ||
165 | |||
166 | spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); | ||
167 | *flags = __flags; | ||
168 | return &dma_entry_hash[idx]; | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * Give up exclusive access to the hash bucket | ||
173 | */ | ||
174 | static void put_hash_bucket(struct hash_bucket *bucket, | ||
175 | unsigned long *flags) | ||
176 | { | ||
177 | unsigned long __flags = *flags; | ||
178 | |||
179 | spin_unlock_irqrestore(&bucket->lock, __flags); | ||
180 | } | ||
181 | |||
182 | /* | ||
183 | * Search a given entry in the hash bucket list | ||
184 | */ | ||
185 | static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, | ||
186 | struct dma_debug_entry *ref) | ||
187 | { | ||
188 | struct dma_debug_entry *entry; | ||
189 | |||
190 | list_for_each_entry(entry, &bucket->list, list) { | ||
191 | if ((entry->dev_addr == ref->dev_addr) && | ||
192 | (entry->dev == ref->dev)) | ||
193 | return entry; | ||
194 | } | ||
195 | |||
196 | return NULL; | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * Add an entry to a hash bucket | ||
201 | */ | ||
202 | static void hash_bucket_add(struct hash_bucket *bucket, | ||
203 | struct dma_debug_entry *entry) | ||
204 | { | ||
205 | list_add_tail(&entry->list, &bucket->list); | ||
206 | } | ||
207 | |||
208 | /* | ||
209 | * Remove entry from a hash bucket list | ||
210 | */ | ||
211 | static void hash_bucket_del(struct dma_debug_entry *entry) | ||
212 | { | ||
213 | list_del(&entry->list); | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * Dump mapping entries for debugging purposes | ||
218 | */ | ||
219 | void debug_dma_dump_mappings(struct device *dev) | ||
220 | { | ||
221 | int idx; | ||
222 | |||
223 | for (idx = 0; idx < HASH_SIZE; idx++) { | ||
224 | struct hash_bucket *bucket = &dma_entry_hash[idx]; | ||
225 | struct dma_debug_entry *entry; | ||
226 | unsigned long flags; | ||
227 | |||
228 | spin_lock_irqsave(&bucket->lock, flags); | ||
229 | |||
230 | list_for_each_entry(entry, &bucket->list, list) { | ||
231 | if (!dev || dev == entry->dev) { | ||
232 | dev_info(entry->dev, | ||
233 | "%s idx %d P=%Lx D=%Lx L=%Lx %s\n", | ||
234 | type2name[entry->type], idx, | ||
235 | (unsigned long long)entry->paddr, | ||
236 | entry->dev_addr, entry->size, | ||
237 | dir2name[entry->direction]); | ||
238 | } | ||
239 | } | ||
240 | |||
241 | spin_unlock_irqrestore(&bucket->lock, flags); | ||
242 | } | ||
243 | } | ||
244 | EXPORT_SYMBOL(debug_dma_dump_mappings); | ||
245 | |||
246 | /* | ||
247 | * Wrapper function for adding an entry to the hash. | ||
248 | * This function takes care of locking itself. | ||
249 | */ | ||
250 | static void add_dma_entry(struct dma_debug_entry *entry) | ||
251 | { | ||
252 | struct hash_bucket *bucket; | ||
253 | unsigned long flags; | ||
254 | |||
255 | bucket = get_hash_bucket(entry, &flags); | ||
256 | hash_bucket_add(bucket, entry); | ||
257 | put_hash_bucket(bucket, &flags); | ||
258 | } | ||
259 | |||
260 | /* struct dma_entry allocator | ||
261 | * | ||
262 | * The next two functions implement the allocator for | ||
263 | * struct dma_debug_entries. | ||
264 | */ | ||
265 | static struct dma_debug_entry *dma_entry_alloc(void) | ||
266 | { | ||
267 | struct dma_debug_entry *entry = NULL; | ||
268 | unsigned long flags; | ||
269 | |||
270 | spin_lock_irqsave(&free_entries_lock, flags); | ||
271 | |||
272 | if (list_empty(&free_entries)) { | ||
273 | printk(KERN_ERR "DMA-API: debugging out of memory " | ||
274 | "- disabling\n"); | ||
275 | global_disable = true; | ||
276 | goto out; | ||
277 | } | ||
278 | |||
279 | entry = list_entry(free_entries.next, struct dma_debug_entry, list); | ||
280 | list_del(&entry->list); | ||
281 | memset(entry, 0, sizeof(*entry)); | ||
282 | |||
283 | #ifdef CONFIG_STACKTRACE | ||
284 | entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; | ||
285 | entry->stacktrace.entries = entry->st_entries; | ||
286 | entry->stacktrace.skip = 2; | ||
287 | save_stack_trace(&entry->stacktrace); | ||
288 | #endif | ||
289 | num_free_entries -= 1; | ||
290 | if (num_free_entries < min_free_entries) | ||
291 | min_free_entries = num_free_entries; | ||
292 | |||
293 | out: | ||
294 | spin_unlock_irqrestore(&free_entries_lock, flags); | ||
295 | |||
296 | return entry; | ||
297 | } | ||
298 | |||
299 | static void dma_entry_free(struct dma_debug_entry *entry) | ||
300 | { | ||
301 | unsigned long flags; | ||
302 | |||
303 | /* | ||
304 | * add to beginning of the list - this way the entries are | ||
305 | * more likely cache hot when they are reallocated. | ||
306 | */ | ||
307 | spin_lock_irqsave(&free_entries_lock, flags); | ||
308 | list_add(&entry->list, &free_entries); | ||
309 | num_free_entries += 1; | ||
310 | spin_unlock_irqrestore(&free_entries_lock, flags); | ||
311 | } | ||
312 | |||
313 | /* | ||
314 | * DMA-API debugging init code | ||
315 | * | ||
316 | * The init code does two things: | ||
317 | * 1. Initialize core data structures | ||
318 | * 2. Preallocate a given number of dma_debug_entry structs | ||
319 | */ | ||
320 | |||
321 | static int prealloc_memory(u32 num_entries) | ||
322 | { | ||
323 | struct dma_debug_entry *entry, *next_entry; | ||
324 | int i; | ||
325 | |||
326 | for (i = 0; i < num_entries; ++i) { | ||
327 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); | ||
328 | if (!entry) | ||
329 | goto out_err; | ||
330 | |||
331 | list_add_tail(&entry->list, &free_entries); | ||
332 | } | ||
333 | |||
334 | num_free_entries = num_entries; | ||
335 | min_free_entries = num_entries; | ||
336 | |||
337 | printk(KERN_INFO "DMA-API: preallocated %d debug entries\n", | ||
338 | num_entries); | ||
339 | |||
340 | return 0; | ||
341 | |||
342 | out_err: | ||
343 | |||
344 | list_for_each_entry_safe(entry, next_entry, &free_entries, list) { | ||
345 | list_del(&entry->list); | ||
346 | kfree(entry); | ||
347 | } | ||
348 | |||
349 | return -ENOMEM; | ||
350 | } | ||
351 | |||
352 | static int dma_debug_fs_init(void) | ||
353 | { | ||
354 | dma_debug_dent = debugfs_create_dir("dma-api", NULL); | ||
355 | if (!dma_debug_dent) { | ||
356 | printk(KERN_ERR "DMA-API: can not create debugfs directory\n"); | ||
357 | return -ENOMEM; | ||
358 | } | ||
359 | |||
360 | global_disable_dent = debugfs_create_bool("disabled", 0444, | ||
361 | dma_debug_dent, | ||
362 | (u32 *)&global_disable); | ||
363 | if (!global_disable_dent) | ||
364 | goto out_err; | ||
365 | |||
366 | error_count_dent = debugfs_create_u32("error_count", 0444, | ||
367 | dma_debug_dent, &error_count); | ||
368 | if (!error_count_dent) | ||
369 | goto out_err; | ||
370 | |||
371 | show_all_errors_dent = debugfs_create_u32("all_errors", 0644, | ||
372 | dma_debug_dent, | ||
373 | &show_all_errors); | ||
374 | if (!show_all_errors_dent) | ||
375 | goto out_err; | ||
376 | |||
377 | show_num_errors_dent = debugfs_create_u32("num_errors", 0644, | ||
378 | dma_debug_dent, | ||
379 | &show_num_errors); | ||
380 | if (!show_num_errors_dent) | ||
381 | goto out_err; | ||
382 | |||
383 | num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444, | ||
384 | dma_debug_dent, | ||
385 | &num_free_entries); | ||
386 | if (!num_free_entries_dent) | ||
387 | goto out_err; | ||
388 | |||
389 | min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444, | ||
390 | dma_debug_dent, | ||
391 | &min_free_entries); | ||
392 | if (!min_free_entries_dent) | ||
393 | goto out_err; | ||
394 | |||
395 | return 0; | ||
396 | |||
397 | out_err: | ||
398 | debugfs_remove_recursive(dma_debug_dent); | ||
399 | |||
400 | return -ENOMEM; | ||
401 | } | ||
402 | |||
403 | static int device_dma_allocations(struct device *dev) | ||
404 | { | ||
405 | struct dma_debug_entry *entry; | ||
406 | unsigned long flags; | ||
407 | int count = 0, i; | ||
408 | |||
409 | for (i = 0; i < HASH_SIZE; ++i) { | ||
410 | spin_lock_irqsave(&dma_entry_hash[i].lock, flags); | ||
411 | list_for_each_entry(entry, &dma_entry_hash[i].list, list) { | ||
412 | if (entry->dev == dev) | ||
413 | count += 1; | ||
414 | } | ||
415 | spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags); | ||
416 | } | ||
417 | |||
418 | return count; | ||
419 | } | ||
420 | |||
421 | static int dma_debug_device_change(struct notifier_block *nb, | ||
422 | unsigned long action, void *data) | ||
423 | { | ||
424 | struct device *dev = data; | ||
425 | int count; | ||
426 | |||
427 | |||
428 | switch (action) { | ||
429 | case BUS_NOTIFY_UNBIND_DRIVER: | ||
430 | count = device_dma_allocations(dev); | ||
431 | if (count == 0) | ||
432 | break; | ||
433 | err_printk(dev, NULL, "DMA-API: device driver has pending " | ||
434 | "DMA allocations while released from device " | ||
435 | "[count=%d]\n", count); | ||
436 | break; | ||
437 | default: | ||
438 | break; | ||
439 | } | ||
440 | |||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | void dma_debug_add_bus(struct bus_type *bus) | ||
445 | { | ||
446 | struct notifier_block *nb; | ||
447 | |||
448 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); | ||
449 | if (nb == NULL) { | ||
450 | printk(KERN_ERR "dma_debug_add_bus: out of memory\n"); | ||
451 | return; | ||
452 | } | ||
453 | |||
454 | nb->notifier_call = dma_debug_device_change; | ||
455 | |||
456 | bus_register_notifier(bus, nb); | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * Let the architectures decide how many entries should be preallocated. | ||
461 | */ | ||
462 | void dma_debug_init(u32 num_entries) | ||
463 | { | ||
464 | int i; | ||
465 | |||
466 | if (global_disable) | ||
467 | return; | ||
468 | |||
469 | for (i = 0; i < HASH_SIZE; ++i) { | ||
470 | INIT_LIST_HEAD(&dma_entry_hash[i].list); | ||
471 | dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED; | ||
472 | } | ||
473 | |||
474 | if (dma_debug_fs_init() != 0) { | ||
475 | printk(KERN_ERR "DMA-API: error creating debugfs entries " | ||
476 | "- disabling\n"); | ||
477 | global_disable = true; | ||
478 | |||
479 | return; | ||
480 | } | ||
481 | |||
482 | if (req_entries) | ||
483 | num_entries = req_entries; | ||
484 | |||
485 | if (prealloc_memory(num_entries) != 0) { | ||
486 | printk(KERN_ERR "DMA-API: debugging out of memory error " | ||
487 | "- disabled\n"); | ||
488 | global_disable = true; | ||
489 | |||
490 | return; | ||
491 | } | ||
492 | |||
493 | printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n"); | ||
494 | } | ||
495 | |||
496 | static __init int dma_debug_cmdline(char *str) | ||
497 | { | ||
498 | if (!str) | ||
499 | return -EINVAL; | ||
500 | |||
501 | if (strncmp(str, "off", 3) == 0) { | ||
502 | printk(KERN_INFO "DMA-API: debugging disabled on kernel " | ||
503 | "command line\n"); | ||
504 | global_disable = true; | ||
505 | } | ||
506 | |||
507 | return 0; | ||
508 | } | ||
509 | |||
510 | static __init int dma_debug_entries_cmdline(char *str) | ||
511 | { | ||
512 | int res; | ||
513 | |||
514 | if (!str) | ||
515 | return -EINVAL; | ||
516 | |||
517 | res = get_option(&str, &req_entries); | ||
518 | |||
519 | if (!res) | ||
520 | req_entries = 0; | ||
521 | |||
522 | return 0; | ||
523 | } | ||
524 | |||
525 | __setup("dma_debug=", dma_debug_cmdline); | ||
526 | __setup("dma_debug_entries=", dma_debug_entries_cmdline); | ||
527 | |||
528 | static void check_unmap(struct dma_debug_entry *ref) | ||
529 | { | ||
530 | struct dma_debug_entry *entry; | ||
531 | struct hash_bucket *bucket; | ||
532 | unsigned long flags; | ||
533 | |||
534 | if (dma_mapping_error(ref->dev, ref->dev_addr)) { | ||
535 | err_printk(ref->dev, NULL, "DMA-API: device driver tries " | ||
536 | "to free an invalid DMA memory address\n"); | ||
537 | return; | ||
538 | } | ||
539 | |||
540 | bucket = get_hash_bucket(ref, &flags); | ||
541 | entry = hash_bucket_find(bucket, ref); | ||
542 | |||
543 | if (!entry) { | ||
544 | err_printk(ref->dev, NULL, "DMA-API: device driver tries " | ||
545 | "to free DMA memory it has not allocated " | ||
546 | "[device address=0x%016llx] [size=%llu bytes]\n", | ||
547 | ref->dev_addr, ref->size); | ||
548 | goto out; | ||
549 | } | ||
550 | |||
551 | if (ref->size != entry->size) { | ||
552 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | ||
553 | "DMA memory with different size " | ||
554 | "[device address=0x%016llx] [map size=%llu bytes] " | ||
555 | "[unmap size=%llu bytes]\n", | ||
556 | ref->dev_addr, entry->size, ref->size); | ||
557 | } | ||
558 | |||
559 | if (ref->type != entry->type) { | ||
560 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | ||
561 | "DMA memory with wrong function " | ||
562 | "[device address=0x%016llx] [size=%llu bytes] " | ||
563 | "[mapped as %s] [unmapped as %s]\n", | ||
564 | ref->dev_addr, ref->size, | ||
565 | type2name[entry->type], type2name[ref->type]); | ||
566 | } else if ((entry->type == dma_debug_coherent) && | ||
567 | (ref->paddr != entry->paddr)) { | ||
568 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | ||
569 | "DMA memory with different CPU address " | ||
570 | "[device address=0x%016llx] [size=%llu bytes] " | ||
571 | "[cpu alloc address=%p] [cpu free address=%p]", | ||
572 | ref->dev_addr, ref->size, | ||
573 | (void *)entry->paddr, (void *)ref->paddr); | ||
574 | } | ||
575 | |||
576 | if (ref->sg_call_ents && ref->type == dma_debug_sg && | ||
577 | ref->sg_call_ents != entry->sg_call_ents) { | ||
578 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | ||
579 | "DMA sg list with different entry count " | ||
580 | "[map count=%d] [unmap count=%d]\n", | ||
581 | entry->sg_call_ents, ref->sg_call_ents); | ||
582 | } | ||
583 | |||
584 | /* | ||
585 | * This may be no bug in reality - but most implementations of the | ||
586 | * DMA API don't handle this properly, so check for it here | ||
587 | */ | ||
588 | if (ref->direction != entry->direction) { | ||
589 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | ||
590 | "DMA memory with different direction " | ||
591 | "[device address=0x%016llx] [size=%llu bytes] " | ||
592 | "[mapped with %s] [unmapped with %s]\n", | ||
593 | ref->dev_addr, ref->size, | ||
594 | dir2name[entry->direction], | ||
595 | dir2name[ref->direction]); | ||
596 | } | ||
597 | |||
598 | hash_bucket_del(entry); | ||
599 | dma_entry_free(entry); | ||
600 | |||
601 | out: | ||
602 | put_hash_bucket(bucket, &flags); | ||
603 | } | ||
604 | |||
605 | static void check_for_stack(struct device *dev, void *addr) | ||
606 | { | ||
607 | if (object_is_on_stack(addr)) | ||
608 | err_printk(dev, NULL, "DMA-API: device driver maps memory from" | ||
609 | "stack [addr=%p]\n", addr); | ||
610 | } | ||
611 | |||
612 | static inline bool overlap(void *addr, u64 size, void *start, void *end) | ||
613 | { | ||
614 | void *addr2 = (char *)addr + size; | ||
615 | |||
616 | return ((addr >= start && addr < end) || | ||
617 | (addr2 >= start && addr2 < end) || | ||
618 | ((addr < start) && (addr2 >= end))); | ||
619 | } | ||
620 | |||
621 | static void check_for_illegal_area(struct device *dev, void *addr, u64 size) | ||
622 | { | ||
623 | if (overlap(addr, size, _text, _etext) || | ||
624 | overlap(addr, size, __start_rodata, __end_rodata)) | ||
625 | err_printk(dev, NULL, "DMA-API: device driver maps " | ||
626 | "memory from kernel text or rodata " | ||
627 | "[addr=%p] [size=%llu]\n", addr, size); | ||
628 | } | ||
629 | |||
630 | static void check_sync(struct device *dev, dma_addr_t addr, | ||
631 | u64 size, u64 offset, int direction, bool to_cpu) | ||
632 | { | ||
633 | struct dma_debug_entry ref = { | ||
634 | .dev = dev, | ||
635 | .dev_addr = addr, | ||
636 | .size = size, | ||
637 | .direction = direction, | ||
638 | }; | ||
639 | struct dma_debug_entry *entry; | ||
640 | struct hash_bucket *bucket; | ||
641 | unsigned long flags; | ||
642 | |||
643 | bucket = get_hash_bucket(&ref, &flags); | ||
644 | |||
645 | entry = hash_bucket_find(bucket, &ref); | ||
646 | |||
647 | if (!entry) { | ||
648 | err_printk(dev, NULL, "DMA-API: device driver tries " | ||
649 | "to sync DMA memory it has not allocated " | ||
650 | "[device address=0x%016llx] [size=%llu bytes]\n", | ||
651 | (unsigned long long)addr, size); | ||
652 | goto out; | ||
653 | } | ||
654 | |||
655 | if ((offset + size) > entry->size) { | ||
656 | err_printk(dev, entry, "DMA-API: device driver syncs" | ||
657 | " DMA memory outside allocated range " | ||
658 | "[device address=0x%016llx] " | ||
659 | "[allocation size=%llu bytes] [sync offset=%llu] " | ||
660 | "[sync size=%llu]\n", entry->dev_addr, entry->size, | ||
661 | offset, size); | ||
662 | } | ||
663 | |||
664 | if (direction != entry->direction) { | ||
665 | err_printk(dev, entry, "DMA-API: device driver syncs " | ||
666 | "DMA memory with different direction " | ||
667 | "[device address=0x%016llx] [size=%llu bytes] " | ||
668 | "[mapped with %s] [synced with %s]\n", | ||
669 | (unsigned long long)addr, entry->size, | ||
670 | dir2name[entry->direction], | ||
671 | dir2name[direction]); | ||
672 | } | ||
673 | |||
674 | if (entry->direction == DMA_BIDIRECTIONAL) | ||
675 | goto out; | ||
676 | |||
677 | if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && | ||
678 | !(direction == DMA_TO_DEVICE)) | ||
679 | err_printk(dev, entry, "DMA-API: device driver syncs " | ||
680 | "device read-only DMA memory for cpu " | ||
681 | "[device address=0x%016llx] [size=%llu bytes] " | ||
682 | "[mapped with %s] [synced with %s]\n", | ||
683 | (unsigned long long)addr, entry->size, | ||
684 | dir2name[entry->direction], | ||
685 | dir2name[direction]); | ||
686 | |||
687 | if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && | ||
688 | !(direction == DMA_FROM_DEVICE)) | ||
689 | err_printk(dev, entry, "DMA-API: device driver syncs " | ||
690 | "device write-only DMA memory to device " | ||
691 | "[device address=0x%016llx] [size=%llu bytes] " | ||
692 | "[mapped with %s] [synced with %s]\n", | ||
693 | (unsigned long long)addr, entry->size, | ||
694 | dir2name[entry->direction], | ||
695 | dir2name[direction]); | ||
696 | |||
697 | out: | ||
698 | put_hash_bucket(bucket, &flags); | ||
699 | |||
700 | } | ||
701 | |||
702 | void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, | ||
703 | size_t size, int direction, dma_addr_t dma_addr, | ||
704 | bool map_single) | ||
705 | { | ||
706 | struct dma_debug_entry *entry; | ||
707 | |||
708 | if (unlikely(global_disable)) | ||
709 | return; | ||
710 | |||
711 | if (unlikely(dma_mapping_error(dev, dma_addr))) | ||
712 | return; | ||
713 | |||
714 | entry = dma_entry_alloc(); | ||
715 | if (!entry) | ||
716 | return; | ||
717 | |||
718 | entry->dev = dev; | ||
719 | entry->type = dma_debug_page; | ||
720 | entry->paddr = page_to_phys(page) + offset; | ||
721 | entry->dev_addr = dma_addr; | ||
722 | entry->size = size; | ||
723 | entry->direction = direction; | ||
724 | |||
725 | if (map_single) | ||
726 | entry->type = dma_debug_single; | ||
727 | |||
728 | if (!PageHighMem(page)) { | ||
729 | void *addr = ((char *)page_address(page)) + offset; | ||
730 | check_for_stack(dev, addr); | ||
731 | check_for_illegal_area(dev, addr, size); | ||
732 | } | ||
733 | |||
734 | add_dma_entry(entry); | ||
735 | } | ||
736 | EXPORT_SYMBOL(debug_dma_map_page); | ||
737 | |||
738 | void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, | ||
739 | size_t size, int direction, bool map_single) | ||
740 | { | ||
741 | struct dma_debug_entry ref = { | ||
742 | .type = dma_debug_page, | ||
743 | .dev = dev, | ||
744 | .dev_addr = addr, | ||
745 | .size = size, | ||
746 | .direction = direction, | ||
747 | }; | ||
748 | |||
749 | if (unlikely(global_disable)) | ||
750 | return; | ||
751 | |||
752 | if (map_single) | ||
753 | ref.type = dma_debug_single; | ||
754 | |||
755 | check_unmap(&ref); | ||
756 | } | ||
757 | EXPORT_SYMBOL(debug_dma_unmap_page); | ||
758 | |||
759 | void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
760 | int nents, int mapped_ents, int direction) | ||
761 | { | ||
762 | struct dma_debug_entry *entry; | ||
763 | struct scatterlist *s; | ||
764 | int i; | ||
765 | |||
766 | if (unlikely(global_disable)) | ||
767 | return; | ||
768 | |||
769 | for_each_sg(sg, s, mapped_ents, i) { | ||
770 | entry = dma_entry_alloc(); | ||
771 | if (!entry) | ||
772 | return; | ||
773 | |||
774 | entry->type = dma_debug_sg; | ||
775 | entry->dev = dev; | ||
776 | entry->paddr = sg_phys(s); | ||
777 | entry->size = s->length; | ||
778 | entry->dev_addr = s->dma_address; | ||
779 | entry->direction = direction; | ||
780 | entry->sg_call_ents = nents; | ||
781 | entry->sg_mapped_ents = mapped_ents; | ||
782 | |||
783 | if (!PageHighMem(sg_page(s))) { | ||
784 | check_for_stack(dev, sg_virt(s)); | ||
785 | check_for_illegal_area(dev, sg_virt(s), s->length); | ||
786 | } | ||
787 | |||
788 | add_dma_entry(entry); | ||
789 | } | ||
790 | } | ||
791 | EXPORT_SYMBOL(debug_dma_map_sg); | ||
792 | |||
793 | void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
794 | int nelems, int dir) | ||
795 | { | ||
796 | struct dma_debug_entry *entry; | ||
797 | struct scatterlist *s; | ||
798 | int mapped_ents = 0, i; | ||
799 | unsigned long flags; | ||
800 | |||
801 | if (unlikely(global_disable)) | ||
802 | return; | ||
803 | |||
804 | for_each_sg(sglist, s, nelems, i) { | ||
805 | |||
806 | struct dma_debug_entry ref = { | ||
807 | .type = dma_debug_sg, | ||
808 | .dev = dev, | ||
809 | .paddr = sg_phys(s), | ||
810 | .dev_addr = s->dma_address, | ||
811 | .size = s->length, | ||
812 | .direction = dir, | ||
813 | .sg_call_ents = 0, | ||
814 | }; | ||
815 | |||
816 | if (mapped_ents && i >= mapped_ents) | ||
817 | break; | ||
818 | |||
819 | if (mapped_ents == 0) { | ||
820 | struct hash_bucket *bucket; | ||
821 | ref.sg_call_ents = nelems; | ||
822 | bucket = get_hash_bucket(&ref, &flags); | ||
823 | entry = hash_bucket_find(bucket, &ref); | ||
824 | if (entry) | ||
825 | mapped_ents = entry->sg_mapped_ents; | ||
826 | put_hash_bucket(bucket, &flags); | ||
827 | } | ||
828 | |||
829 | check_unmap(&ref); | ||
830 | } | ||
831 | } | ||
832 | EXPORT_SYMBOL(debug_dma_unmap_sg); | ||
833 | |||
834 | void debug_dma_alloc_coherent(struct device *dev, size_t size, | ||
835 | dma_addr_t dma_addr, void *virt) | ||
836 | { | ||
837 | struct dma_debug_entry *entry; | ||
838 | |||
839 | if (unlikely(global_disable)) | ||
840 | return; | ||
841 | |||
842 | if (unlikely(virt == NULL)) | ||
843 | return; | ||
844 | |||
845 | entry = dma_entry_alloc(); | ||
846 | if (!entry) | ||
847 | return; | ||
848 | |||
849 | entry->type = dma_debug_coherent; | ||
850 | entry->dev = dev; | ||
851 | entry->paddr = virt_to_phys(virt); | ||
852 | entry->size = size; | ||
853 | entry->dev_addr = dma_addr; | ||
854 | entry->direction = DMA_BIDIRECTIONAL; | ||
855 | |||
856 | add_dma_entry(entry); | ||
857 | } | ||
858 | EXPORT_SYMBOL(debug_dma_alloc_coherent); | ||
859 | |||
860 | void debug_dma_free_coherent(struct device *dev, size_t size, | ||
861 | void *virt, dma_addr_t addr) | ||
862 | { | ||
863 | struct dma_debug_entry ref = { | ||
864 | .type = dma_debug_coherent, | ||
865 | .dev = dev, | ||
866 | .paddr = virt_to_phys(virt), | ||
867 | .dev_addr = addr, | ||
868 | .size = size, | ||
869 | .direction = DMA_BIDIRECTIONAL, | ||
870 | }; | ||
871 | |||
872 | if (unlikely(global_disable)) | ||
873 | return; | ||
874 | |||
875 | check_unmap(&ref); | ||
876 | } | ||
877 | EXPORT_SYMBOL(debug_dma_free_coherent); | ||
878 | |||
879 | void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
880 | size_t size, int direction) | ||
881 | { | ||
882 | if (unlikely(global_disable)) | ||
883 | return; | ||
884 | |||
885 | check_sync(dev, dma_handle, size, 0, direction, true); | ||
886 | } | ||
887 | EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); | ||
888 | |||
889 | void debug_dma_sync_single_for_device(struct device *dev, | ||
890 | dma_addr_t dma_handle, size_t size, | ||
891 | int direction) | ||
892 | { | ||
893 | if (unlikely(global_disable)) | ||
894 | return; | ||
895 | |||
896 | check_sync(dev, dma_handle, size, 0, direction, false); | ||
897 | } | ||
898 | EXPORT_SYMBOL(debug_dma_sync_single_for_device); | ||
899 | |||
900 | void debug_dma_sync_single_range_for_cpu(struct device *dev, | ||
901 | dma_addr_t dma_handle, | ||
902 | unsigned long offset, size_t size, | ||
903 | int direction) | ||
904 | { | ||
905 | if (unlikely(global_disable)) | ||
906 | return; | ||
907 | |||
908 | check_sync(dev, dma_handle, size, offset, direction, true); | ||
909 | } | ||
910 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); | ||
911 | |||
912 | void debug_dma_sync_single_range_for_device(struct device *dev, | ||
913 | dma_addr_t dma_handle, | ||
914 | unsigned long offset, | ||
915 | size_t size, int direction) | ||
916 | { | ||
917 | if (unlikely(global_disable)) | ||
918 | return; | ||
919 | |||
920 | check_sync(dev, dma_handle, size, offset, direction, false); | ||
921 | } | ||
922 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); | ||
923 | |||
924 | void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
925 | int nelems, int direction) | ||
926 | { | ||
927 | struct scatterlist *s; | ||
928 | int i; | ||
929 | |||
930 | if (unlikely(global_disable)) | ||
931 | return; | ||
932 | |||
933 | for_each_sg(sg, s, nelems, i) { | ||
934 | check_sync(dev, s->dma_address, s->dma_length, 0, | ||
935 | direction, true); | ||
936 | } | ||
937 | } | ||
938 | EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); | ||
939 | |||
940 | void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | ||
941 | int nelems, int direction) | ||
942 | { | ||
943 | struct scatterlist *s; | ||
944 | int i; | ||
945 | |||
946 | if (unlikely(global_disable)) | ||
947 | return; | ||
948 | |||
949 | for_each_sg(sg, s, nelems, i) { | ||
950 | check_sync(dev, s->dma_address, s->dma_length, 0, | ||
951 | direction, false); | ||
952 | } | ||
953 | } | ||
954 | EXPORT_SYMBOL(debug_dma_sync_sg_for_device); | ||
955 | |||