aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin
diff options
context:
space:
mode:
authorAkinobu Mita <akinobu.mita@gmail.com>2012-11-25 02:48:23 -0500
committerBob Liu <lliubbo@gmail.com>2013-02-20 02:21:22 -0500
commitc73bc7026d7785fc627a8da7bbdd9d172001a59f (patch)
treede8990ff59aef779890982a23c91d06be241d419 /arch/blackfin
parentc428f8eb2f473963585b0c0a31cb33dccad62b9a (diff)
blackfin: use bitmap library functions
The bitmap library provides more efficient functions than accessing individual bits with bitops. This uses bitmap_find_next_zero_area() to find a continuing zero area, and uses bitmap_set()/bitmap_clear() to set/clear specified bit area. Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Cc: Mike Frysinger <vapier@gentoo.org> Cc: uclinux-dist-devel@blackfin.uclinux.org Signed-off-by: Bob Liu <lliubbo@gmail.com>
Diffstat (limited to 'arch/blackfin')
-rw-r--r--arch/blackfin/kernel/dma-mapping.c23
1 files changed, 7 insertions, 16 deletions
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
index e7be6532d6a0..df437e52d9df 100644
--- a/arch/blackfin/kernel/dma-mapping.c
+++ b/arch/blackfin/kernel/dma-mapping.c
@@ -13,6 +13,7 @@
13#include <linux/dma-mapping.h> 13#include <linux/dma-mapping.h>
14#include <linux/scatterlist.h> 14#include <linux/scatterlist.h>
15#include <linux/export.h> 15#include <linux/export.h>
16#include <linux/bitmap.h>
16 17
17static spinlock_t dma_page_lock; 18static spinlock_t dma_page_lock;
18static unsigned long *dma_page; 19static unsigned long *dma_page;
@@ -46,24 +47,17 @@ static inline unsigned int get_pages(size_t size)
46static unsigned long __alloc_dma_pages(unsigned int pages) 47static unsigned long __alloc_dma_pages(unsigned int pages)
47{ 48{
48 unsigned long ret = 0, flags; 49 unsigned long ret = 0, flags;
49 int i, count = 0; 50 unsigned long start;
50 51
51 if (dma_initialized == 0) 52 if (dma_initialized == 0)
52 dma_alloc_init(_ramend - DMA_UNCACHED_REGION, _ramend); 53 dma_alloc_init(_ramend - DMA_UNCACHED_REGION, _ramend);
53 54
54 spin_lock_irqsave(&dma_page_lock, flags); 55 spin_lock_irqsave(&dma_page_lock, flags);
55 56
56 for (i = 0; i < dma_pages;) { 57 start = bitmap_find_next_zero_area(dma_page, dma_pages, 0, pages, 0);
57 if (test_bit(i++, dma_page) == 0) { 58 if (start < dma_pages) {
58 if (++count == pages) { 59 ret = dma_base + (start << PAGE_SHIFT);
59 while (count--) 60 bitmap_set(dma_page, start, pages);
60 __set_bit(--i, dma_page);
61
62 ret = dma_base + (i << PAGE_SHIFT);
63 break;
64 }
65 } else
66 count = 0;
67 } 61 }
68 spin_unlock_irqrestore(&dma_page_lock, flags); 62 spin_unlock_irqrestore(&dma_page_lock, flags);
69 return ret; 63 return ret;
@@ -73,7 +67,6 @@ static void __free_dma_pages(unsigned long addr, unsigned int pages)
73{ 67{
74 unsigned long page = (addr - dma_base) >> PAGE_SHIFT; 68 unsigned long page = (addr - dma_base) >> PAGE_SHIFT;
75 unsigned long flags; 69 unsigned long flags;
76 int i;
77 70
78 if ((page + pages) > dma_pages) { 71 if ((page + pages) > dma_pages) {
79 printk(KERN_ERR "%s: freeing outside range.\n", __func__); 72 printk(KERN_ERR "%s: freeing outside range.\n", __func__);
@@ -81,9 +74,7 @@ static void __free_dma_pages(unsigned long addr, unsigned int pages)
81 } 74 }
82 75
83 spin_lock_irqsave(&dma_page_lock, flags); 76 spin_lock_irqsave(&dma_page_lock, flags);
84 for (i = page; i < page + pages; i++) 77 bitmap_clear(dma_page, page, pages);
85 __clear_bit(i, dma_page);
86
87 spin_unlock_irqrestore(&dma_page_lock, flags); 78 spin_unlock_irqrestore(&dma_page_lock, flags);
88} 79}
89 80