diff options
Diffstat (limited to 'arch/sh/mm/consistent.c')
-rw-r--r-- | arch/sh/mm/consistent.c | 174 |
1 files changed, 133 insertions, 41 deletions
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index e220c29a3c0..7b2131c9eed 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c | |||
@@ -1,7 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * arch/sh/mm/consistent.c | 2 | * arch/sh/mm/consistent.c |
3 | * | 3 | * |
4 | * Copyright (C) 2004 Paul Mundt | 4 | * Copyright (C) 2004 - 2007 Paul Mundt |
5 | * | ||
6 | * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c | ||
5 | * | 7 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public | 8 | * This file is subject to the terms and conditions of the GNU General Public |
7 | * License. See the file "COPYING" in the main directory of this archive | 9 | * License. See the file "COPYING" in the main directory of this archive |
@@ -13,58 +15,152 @@ | |||
13 | #include <asm/addrspace.h> | 15 | #include <asm/addrspace.h> |
14 | #include <asm/io.h> | 16 | #include <asm/io.h> |
15 | 17 | ||
16 | void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle) | 18 | struct dma_coherent_mem { |
19 | void *virt_base; | ||
20 | u32 device_base; | ||
21 | int size; | ||
22 | int flags; | ||
23 | unsigned long *bitmap; | ||
24 | }; | ||
25 | |||
26 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
27 | dma_addr_t *dma_handle, gfp_t gfp) | ||
17 | { | 28 | { |
18 | struct page *page, *end, *free; | ||
19 | void *ret; | 29 | void *ret; |
20 | int order; | 30 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; |
31 | int order = get_order(size); | ||
21 | 32 | ||
22 | size = PAGE_ALIGN(size); | 33 | if (mem) { |
23 | order = get_order(size); | 34 | int page = bitmap_find_free_region(mem->bitmap, mem->size, |
35 | order); | ||
36 | if (page >= 0) { | ||
37 | *dma_handle = mem->device_base + (page << PAGE_SHIFT); | ||
38 | ret = mem->virt_base + (page << PAGE_SHIFT); | ||
39 | memset(ret, 0, size); | ||
40 | return ret; | ||
41 | } | ||
42 | if (mem->flags & DMA_MEMORY_EXCLUSIVE) | ||
43 | return NULL; | ||
44 | } | ||
24 | 45 | ||
25 | page = alloc_pages(gfp, order); | 46 | ret = (void *)__get_free_pages(gfp, order); |
26 | if (!page) | ||
27 | return NULL; | ||
28 | split_page(page, order); | ||
29 | 47 | ||
30 | ret = page_address(page); | 48 | if (ret != NULL) { |
31 | memset(ret, 0, size); | 49 | memset(ret, 0, size); |
32 | *handle = virt_to_phys(ret); | 50 | /* |
51 | * Pages from the page allocator may have data present in | ||
52 | * cache. So flush the cache before using uncached memory. | ||
53 | */ | ||
54 | dma_cache_sync(NULL, ret, size, DMA_BIDIRECTIONAL); | ||
55 | *dma_handle = virt_to_phys(ret); | ||
56 | } | ||
57 | return ret; | ||
58 | } | ||
59 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
33 | 60 | ||
34 | /* | 61 | void dma_free_coherent(struct device *dev, size_t size, |
35 | * We must flush the cache before we pass it on to the device | 62 | void *vaddr, dma_addr_t dma_handle) |
36 | */ | 63 | { |
37 | __flush_purge_region(ret, size); | 64 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; |
65 | int order = get_order(size); | ||
38 | 66 | ||
39 | page = virt_to_page(ret); | 67 | if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { |
40 | free = page + (size >> PAGE_SHIFT); | 68 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; |
41 | end = page + (1 << order); | ||
42 | 69 | ||
43 | while (++page < end) { | 70 | bitmap_release_region(mem->bitmap, page, order); |
44 | /* Free any unused pages */ | 71 | } else { |
45 | if (page >= free) { | 72 | WARN_ON(irqs_disabled()); /* for portability */ |
46 | __free_page(page); | 73 | BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE); |
47 | } | 74 | free_pages((unsigned long)vaddr, order); |
48 | } | 75 | } |
76 | } | ||
77 | EXPORT_SYMBOL(dma_free_coherent); | ||
78 | |||
79 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | ||
80 | dma_addr_t device_addr, size_t size, int flags) | ||
81 | { | ||
82 | void __iomem *mem_base = NULL; | ||
83 | int pages = size >> PAGE_SHIFT; | ||
84 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | ||
85 | |||
86 | if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) | ||
87 | goto out; | ||
88 | if (!size) | ||
89 | goto out; | ||
90 | if (dev->dma_mem) | ||
91 | goto out; | ||
92 | |||
93 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ | ||
49 | 94 | ||
50 | return P2SEGADDR(ret); | 95 | mem_base = ioremap_nocache(bus_addr, size); |
96 | if (!mem_base) | ||
97 | goto out; | ||
98 | |||
99 | dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); | ||
100 | if (!dev->dma_mem) | ||
101 | goto out; | ||
102 | dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | ||
103 | if (!dev->dma_mem->bitmap) | ||
104 | goto free1_out; | ||
105 | |||
106 | dev->dma_mem->virt_base = mem_base; | ||
107 | dev->dma_mem->device_base = device_addr; | ||
108 | dev->dma_mem->size = pages; | ||
109 | dev->dma_mem->flags = flags; | ||
110 | |||
111 | if (flags & DMA_MEMORY_MAP) | ||
112 | return DMA_MEMORY_MAP; | ||
113 | |||
114 | return DMA_MEMORY_IO; | ||
115 | |||
116 | free1_out: | ||
117 | kfree(dev->dma_mem); | ||
118 | out: | ||
119 | if (mem_base) | ||
120 | iounmap(mem_base); | ||
121 | return 0; | ||
51 | } | 122 | } |
123 | EXPORT_SYMBOL(dma_declare_coherent_memory); | ||
52 | 124 | ||
53 | void consistent_free(void *vaddr, size_t size) | 125 | void dma_release_declared_memory(struct device *dev) |
54 | { | 126 | { |
55 | unsigned long addr = P1SEGADDR((unsigned long)vaddr); | 127 | struct dma_coherent_mem *mem = dev->dma_mem; |
56 | struct page *page=virt_to_page(addr); | ||
57 | int num_pages=(size+PAGE_SIZE-1) >> PAGE_SHIFT; | ||
58 | int i; | ||
59 | 128 | ||
60 | for(i=0;i<num_pages;i++) { | 129 | if (!mem) |
61 | __free_page((page+i)); | 130 | return; |
62 | } | 131 | dev->dma_mem = NULL; |
132 | iounmap(mem->virt_base); | ||
133 | kfree(mem->bitmap); | ||
134 | kfree(mem); | ||
63 | } | 135 | } |
136 | EXPORT_SYMBOL(dma_release_declared_memory); | ||
64 | 137 | ||
65 | void consistent_sync(void *vaddr, size_t size, int direction) | 138 | void *dma_mark_declared_memory_occupied(struct device *dev, |
139 | dma_addr_t device_addr, size_t size) | ||
66 | { | 140 | { |
67 | void * p1addr = (void*) P1SEGADDR((unsigned long)vaddr); | 141 | struct dma_coherent_mem *mem = dev->dma_mem; |
142 | int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
143 | int pos, err; | ||
144 | |||
145 | if (!mem) | ||
146 | return ERR_PTR(-EINVAL); | ||
147 | |||
148 | pos = (device_addr - mem->device_base) >> PAGE_SHIFT; | ||
149 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages)); | ||
150 | if (err != 0) | ||
151 | return ERR_PTR(err); | ||
152 | return mem->virt_base + (pos << PAGE_SHIFT); | ||
153 | } | ||
154 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | ||
155 | |||
156 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
157 | enum dma_data_direction direction) | ||
158 | { | ||
159 | #ifdef CONFIG_CPU_SH5 | ||
160 | void *p1addr = vaddr; | ||
161 | #else | ||
162 | void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr); | ||
163 | #endif | ||
68 | 164 | ||
69 | switch (direction) { | 165 | switch (direction) { |
70 | case DMA_FROM_DEVICE: /* invalidate only */ | 166 | case DMA_FROM_DEVICE: /* invalidate only */ |
@@ -80,8 +176,4 @@ void consistent_sync(void *vaddr, size_t size, int direction) | |||
80 | BUG(); | 176 | BUG(); |
81 | } | 177 | } |
82 | } | 178 | } |
83 | 179 | EXPORT_SYMBOL(dma_cache_sync); | |
84 | EXPORT_SYMBOL(consistent_alloc); | ||
85 | EXPORT_SYMBOL(consistent_free); | ||
86 | EXPORT_SYMBOL(consistent_sync); | ||
87 | |||