aboutsummaryrefslogtreecommitdiffstats
path: root/lib/dma-debug.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2009-04-15 05:22:41 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-15 06:22:37 -0400
commite6a1a89d572c31b62d6dcf11a371c7323852d9b2 (patch)
treebca6ff9d83ae6820c3dd4270e165705c12b66f56 /lib/dma-debug.c
parent7e05575c422d45f393c2d9b5900e97a30bf69bea (diff)
dma-debug: add dma_debug_resize_entries() to adjust the number of dma_debug_entries
We use a static value for the number of dma_debug_entries. It can be overwritten by a kernel command line option. Some IOMMUs (e.g. GART) can't set an appropriate value by a kernel command line option because they can't know such value until they finish initializing up their hardware. This patch adds dma_debug_resize_entries() enables IOMMUs to adjust the number of dma_debug_entries anytime. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Joerg Roedel <joerg.roedel@amd.com> Cc: fujita.tomonori@lab.ntt.co.jp Cc: akpm@linux-foundation.org LKML-Reference: <20090415182234R.fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'lib/dma-debug.c')
-rw-r--r--lib/dma-debug.c72
1 files changed, 66 insertions, 6 deletions
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index d3da7edc034f..5d61019330cd 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -85,6 +85,7 @@ static u32 show_num_errors = 1;
85 85
86static u32 num_free_entries; 86static u32 num_free_entries;
87static u32 min_free_entries; 87static u32 min_free_entries;
88static u32 nr_total_entries;
88 89
89/* number of preallocated entries requested by kernel cmdline */ 90/* number of preallocated entries requested by kernel cmdline */
90static u32 req_entries; 91static u32 req_entries;
@@ -257,6 +258,21 @@ static void add_dma_entry(struct dma_debug_entry *entry)
257 put_hash_bucket(bucket, &flags); 258 put_hash_bucket(bucket, &flags);
258} 259}
259 260
261static struct dma_debug_entry *__dma_entry_alloc(void)
262{
263 struct dma_debug_entry *entry;
264
265 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
266 list_del(&entry->list);
267 memset(entry, 0, sizeof(*entry));
268
269 num_free_entries -= 1;
270 if (num_free_entries < min_free_entries)
271 min_free_entries = num_free_entries;
272
273 return entry;
274}
275
260/* struct dma_entry allocator 276/* struct dma_entry allocator
261 * 277 *
262 * The next two functions implement the allocator for 278 * The next two functions implement the allocator for
@@ -276,9 +292,7 @@ static struct dma_debug_entry *dma_entry_alloc(void)
276 goto out; 292 goto out;
277 } 293 }
278 294
279 entry = list_entry(free_entries.next, struct dma_debug_entry, list); 295 entry = __dma_entry_alloc();
280 list_del(&entry->list);
281 memset(entry, 0, sizeof(*entry));
282 296
283#ifdef CONFIG_STACKTRACE 297#ifdef CONFIG_STACKTRACE
284 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; 298 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
@@ -286,9 +300,6 @@ static struct dma_debug_entry *dma_entry_alloc(void)
286 entry->stacktrace.skip = 2; 300 entry->stacktrace.skip = 2;
287 save_stack_trace(&entry->stacktrace); 301 save_stack_trace(&entry->stacktrace);
288#endif 302#endif
289 num_free_entries -= 1;
290 if (num_free_entries < min_free_entries)
291 min_free_entries = num_free_entries;
292 303
293out: 304out:
294 spin_unlock_irqrestore(&free_entries_lock, flags); 305 spin_unlock_irqrestore(&free_entries_lock, flags);
@@ -310,6 +321,53 @@ static void dma_entry_free(struct dma_debug_entry *entry)
310 spin_unlock_irqrestore(&free_entries_lock, flags); 321 spin_unlock_irqrestore(&free_entries_lock, flags);
311} 322}
312 323
324int dma_debug_resize_entries(u32 num_entries)
325{
326 int i, delta, ret = 0;
327 unsigned long flags;
328 struct dma_debug_entry *entry;
329 LIST_HEAD(tmp);
330
331 spin_lock_irqsave(&free_entries_lock, flags);
332
333 if (nr_total_entries < num_entries) {
334 delta = num_entries - nr_total_entries;
335
336 spin_unlock_irqrestore(&free_entries_lock, flags);
337
338 for (i = 0; i < delta; i++) {
339 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
340 if (!entry)
341 break;
342
343 list_add_tail(&entry->list, &tmp);
344 }
345
346 spin_lock_irqsave(&free_entries_lock, flags);
347
348 list_splice(&tmp, &free_entries);
349 nr_total_entries += i;
350 num_free_entries += i;
351 } else {
352 delta = nr_total_entries - num_entries;
353
354 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
355 entry = __dma_entry_alloc();
356 kfree(entry);
357 }
358
359 nr_total_entries -= i;
360 }
361
362 if (nr_total_entries != num_entries)
363 ret = 1;
364
365 spin_unlock_irqrestore(&free_entries_lock, flags);
366
367 return ret;
368}
369EXPORT_SYMBOL(dma_debug_resize_entries);
370
313/* 371/*
314 * DMA-API debugging init code 372 * DMA-API debugging init code
315 * 373 *
@@ -490,6 +548,8 @@ void dma_debug_init(u32 num_entries)
490 return; 548 return;
491 } 549 }
492 550
551 nr_total_entries = num_free_entries;
552
493 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n"); 553 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n");
494} 554}
495 555