aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/dma
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/dma')
-rw-r--r--kernel/dma/debug.c14
-rw-r--r--kernel/dma/swiotlb.c6
2 files changed, 11 insertions, 9 deletions
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 45d51e8e26f6..badd77670d00 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -89,8 +89,8 @@ struct dma_debug_entry {
89 int sg_mapped_ents; 89 int sg_mapped_ents;
90 enum map_err_types map_err_type; 90 enum map_err_types map_err_type;
91#ifdef CONFIG_STACKTRACE 91#ifdef CONFIG_STACKTRACE
92 struct stack_trace stacktrace; 92 unsigned int stack_len;
93 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; 93 unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
94#endif 94#endif
95}; 95};
96 96
@@ -174,7 +174,7 @@ static inline void dump_entry_trace(struct dma_debug_entry *entry)
174#ifdef CONFIG_STACKTRACE 174#ifdef CONFIG_STACKTRACE
175 if (entry) { 175 if (entry) {
176 pr_warning("Mapped at:\n"); 176 pr_warning("Mapped at:\n");
177 print_stack_trace(&entry->stacktrace, 0); 177 stack_trace_print(entry->stack_entries, entry->stack_len, 0);
178 } 178 }
179#endif 179#endif
180} 180}
@@ -704,12 +704,10 @@ static struct dma_debug_entry *dma_entry_alloc(void)
704 spin_unlock_irqrestore(&free_entries_lock, flags); 704 spin_unlock_irqrestore(&free_entries_lock, flags);
705 705
706#ifdef CONFIG_STACKTRACE 706#ifdef CONFIG_STACKTRACE
707 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; 707 entry->stack_len = stack_trace_save(entry->stack_entries,
708 entry->stacktrace.entries = entry->st_entries; 708 ARRAY_SIZE(entry->stack_entries),
709 entry->stacktrace.skip = 2; 709 1);
710 save_stack_trace(&entry->stacktrace);
711#endif 710#endif
712
713 return entry; 711 return entry;
714} 712}
715 713
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 53012db1e53c..6f7619c1f877 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -452,6 +452,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
452 unsigned long mask; 452 unsigned long mask;
453 unsigned long offset_slots; 453 unsigned long offset_slots;
454 unsigned long max_slots; 454 unsigned long max_slots;
455 unsigned long tmp_io_tlb_used;
455 456
456 if (no_iotlb_memory) 457 if (no_iotlb_memory)
457 panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); 458 panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
@@ -538,9 +539,12 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
538 } while (index != wrap); 539 } while (index != wrap);
539 540
540not_found: 541not_found:
542 tmp_io_tlb_used = io_tlb_used;
543
541 spin_unlock_irqrestore(&io_tlb_lock, flags); 544 spin_unlock_irqrestore(&io_tlb_lock, flags);
542 if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) 545 if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
543 dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size); 546 dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
547 size, io_tlb_nslabs, tmp_io_tlb_used);
544 return DMA_MAPPING_ERROR; 548 return DMA_MAPPING_ERROR;
545found: 549found:
546 io_tlb_used += nslots; 550 io_tlb_used += nslots;