aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/kernel/dma-mapping.c
diff options
context:
space:
mode:
authorMichael Hennerich <michael.hennerich@analog.com>2009-09-23 07:32:52 -0400
committerMike Frysinger <vapier@gentoo.org>2009-12-15 00:13:51 -0500
commitcb5ae60f7a0b21b936c022a2e7f9303c60665233 (patch)
tree6605a4dec5a881fa4436c36c18f985436b69bd0c /arch/blackfin/kernel/dma-mapping.c
parentddcd7cb857cdf2a29c30125f71cb5d4d6744c99b (diff)
Blackfin: convert DMA code to a proper bitmap
Rather than using our own data structures that basically boil down to a bitmap, use the standard bitmap functions. Reported-by: Paul Mundt <lethal@linux-sh.org> Signed-off-by: Michael Hennerich <michael.hennerich@analog.com> Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Diffstat (limited to 'arch/blackfin/kernel/dma-mapping.c')
-rw-r--r--arch/blackfin/kernel/dma-mapping.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
index e74e74d7733f..be81572b89c2 100644
--- a/arch/blackfin/kernel/dma-mapping.c
+++ b/arch/blackfin/kernel/dma-mapping.c
@@ -19,7 +19,7 @@
19#include <asm/bfin-global.h> 19#include <asm/bfin-global.h>
20 20
21static spinlock_t dma_page_lock; 21static spinlock_t dma_page_lock;
22static unsigned int *dma_page; 22static unsigned long *dma_page;
23static unsigned int dma_pages; 23static unsigned int dma_pages;
24static unsigned long dma_base; 24static unsigned long dma_base;
25static unsigned long dma_size; 25static unsigned long dma_size;
@@ -30,7 +30,7 @@ void dma_alloc_init(unsigned long start, unsigned long end)
30 spin_lock_init(&dma_page_lock); 30 spin_lock_init(&dma_page_lock);
31 dma_initialized = 0; 31 dma_initialized = 0;
32 32
33 dma_page = (unsigned int *)__get_free_page(GFP_KERNEL); 33 dma_page = (unsigned long *)__get_free_page(GFP_KERNEL);
34 memset(dma_page, 0, PAGE_SIZE); 34 memset(dma_page, 0, PAGE_SIZE);
35 dma_base = PAGE_ALIGN(start); 35 dma_base = PAGE_ALIGN(start);
36 dma_size = PAGE_ALIGN(end) - PAGE_ALIGN(start); 36 dma_size = PAGE_ALIGN(end) - PAGE_ALIGN(start);
@@ -58,10 +58,11 @@ static unsigned long __alloc_dma_pages(unsigned int pages)
58 spin_lock_irqsave(&dma_page_lock, flags); 58 spin_lock_irqsave(&dma_page_lock, flags);
59 59
60 for (i = 0; i < dma_pages;) { 60 for (i = 0; i < dma_pages;) {
61 if (dma_page[i++] == 0) { 61 if (test_bit(i++, dma_page) == 0) {
62 if (++count == pages) { 62 if (++count == pages) {
63 while (count--) 63 while (count--)
64 dma_page[--i] = 1; 64 __set_bit(--i, dma_page);
65
65 ret = dma_base + (i << PAGE_SHIFT); 66 ret = dma_base + (i << PAGE_SHIFT);
66 break; 67 break;
67 } 68 }
@@ -84,9 +85,9 @@ static void __free_dma_pages(unsigned long addr, unsigned int pages)
84 } 85 }
85 86
86 spin_lock_irqsave(&dma_page_lock, flags); 87 spin_lock_irqsave(&dma_page_lock, flags);
87 for (i = page; i < page + pages; i++) { 88 for (i = page; i < page + pages; i++)
88 dma_page[i] = 0; 89 __clear_bit(i, dma_page);
89 } 90
90 spin_unlock_irqrestore(&dma_page_lock, flags); 91 spin_unlock_irqrestore(&dma_page_lock, flags);
91} 92}
92 93