diff options
-rw-r--r-- | arch/sh/mm/consistent.c | 174 | ||||
-rw-r--r-- | include/asm-sh/dma-mapping.h | 56 |
2 files changed, 148 insertions, 82 deletions
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index 65ad30031ad7..7b2131c9eeda 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c | |||
@@ -3,6 +3,8 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2004 - 2007 Paul Mundt | 4 | * Copyright (C) 2004 - 2007 Paul Mundt |
5 | * | 5 | * |
6 | * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c | ||
7 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | 8 | * This file is subject to the terms and conditions of the GNU General Public |
7 | * License. See the file "COPYING" in the main directory of this archive | 9 | * License. See the file "COPYING" in the main directory of this archive |
8 | * for more details. | 10 | * for more details. |
@@ -13,66 +15,146 @@ | |||
13 | #include <asm/addrspace.h> | 15 | #include <asm/addrspace.h> |
14 | #include <asm/io.h> | 16 | #include <asm/io.h> |
15 | 17 | ||
16 | void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle) | 18 | struct dma_coherent_mem { |
17 | { | 19 | void *virt_base; |
18 | struct page *page, *end, *free; | 20 | u32 device_base; |
19 | void *ret, *vp; | 21 | int size; |
20 | int order; | 22 | int flags; |
21 | 23 | unsigned long *bitmap; | |
22 | size = PAGE_ALIGN(size); | 24 | }; |
23 | order = get_order(size); | ||
24 | 25 | ||
25 | page = alloc_pages(gfp, order); | 26 | void *dma_alloc_coherent(struct device *dev, size_t size, |
26 | if (!page) | 27 | dma_addr_t *dma_handle, gfp_t gfp) |
27 | return NULL; | 28 | { |
28 | split_page(page, order); | 29 | void *ret; |
30 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
31 | int order = get_order(size); | ||
32 | |||
33 | if (mem) { | ||
34 | int page = bitmap_find_free_region(mem->bitmap, mem->size, | ||
35 | order); | ||
36 | if (page >= 0) { | ||
37 | *dma_handle = mem->device_base + (page << PAGE_SHIFT); | ||
38 | ret = mem->virt_base + (page << PAGE_SHIFT); | ||
39 | memset(ret, 0, size); | ||
40 | return ret; | ||
41 | } | ||
42 | if (mem->flags & DMA_MEMORY_EXCLUSIVE) | ||
43 | return NULL; | ||
44 | } | ||
29 | 45 | ||
30 | ret = page_address(page); | 46 | ret = (void *)__get_free_pages(gfp, order); |
31 | *handle = virt_to_phys(ret); | ||
32 | 47 | ||
33 | vp = ioremap_nocache(*handle, size); | 48 | if (ret != NULL) { |
34 | if (!vp) { | 49 | memset(ret, 0, size); |
35 | free_pages((unsigned long)ret, order); | 50 | /* |
36 | return NULL; | 51 | * Pages from the page allocator may have data present in |
52 | * cache. So flush the cache before using uncached memory. | ||
53 | */ | ||
54 | dma_cache_sync(NULL, ret, size, DMA_BIDIRECTIONAL); | ||
55 | *dma_handle = virt_to_phys(ret); | ||
37 | } | 56 | } |
57 | return ret; | ||
58 | } | ||
59 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
38 | 60 | ||
39 | memset(vp, 0, size); | 61 | void dma_free_coherent(struct device *dev, size_t size, |
40 | 62 | void *vaddr, dma_addr_t dma_handle) | |
41 | /* | 63 | { |
42 | * We must flush the cache before we pass it on to the device | 64 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; |
43 | */ | 65 | int order = get_order(size); |
44 | dma_cache_sync(NULL, ret, size, DMA_BIDIRECTIONAL); | ||
45 | 66 | ||
46 | page = virt_to_page(ret); | 67 | if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { |
47 | free = page + (size >> PAGE_SHIFT); | 68 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; |
48 | end = page + (1 << order); | ||
49 | 69 | ||
50 | while (++page < end) { | 70 | bitmap_release_region(mem->bitmap, page, order); |
51 | /* Free any unused pages */ | 71 | } else { |
52 | if (page >= free) { | 72 | WARN_ON(irqs_disabled()); /* for portability */ |
53 | __free_page(page); | 73 | BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE); |
54 | } | 74 | free_pages((unsigned long)vaddr, order); |
55 | } | 75 | } |
56 | |||
57 | return vp; | ||
58 | } | 76 | } |
59 | EXPORT_SYMBOL(consistent_alloc); | 77 | EXPORT_SYMBOL(dma_free_coherent); |
60 | 78 | ||
61 | void consistent_free(void *vaddr, size_t size, dma_addr_t dma_handle) | 79 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, |
80 | dma_addr_t device_addr, size_t size, int flags) | ||
62 | { | 81 | { |
63 | struct page *page; | 82 | void __iomem *mem_base = NULL; |
64 | unsigned long addr; | 83 | int pages = size >> PAGE_SHIFT; |
65 | 84 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | |
66 | addr = (unsigned long)phys_to_virt((unsigned long)dma_handle); | 85 | |
67 | page = virt_to_page(addr); | 86 | if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) |
87 | goto out; | ||
88 | if (!size) | ||
89 | goto out; | ||
90 | if (dev->dma_mem) | ||
91 | goto out; | ||
92 | |||
93 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ | ||
94 | |||
95 | mem_base = ioremap_nocache(bus_addr, size); | ||
96 | if (!mem_base) | ||
97 | goto out; | ||
98 | |||
99 | dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); | ||
100 | if (!dev->dma_mem) | ||
101 | goto out; | ||
102 | dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | ||
103 | if (!dev->dma_mem->bitmap) | ||
104 | goto free1_out; | ||
105 | |||
106 | dev->dma_mem->virt_base = mem_base; | ||
107 | dev->dma_mem->device_base = device_addr; | ||
108 | dev->dma_mem->size = pages; | ||
109 | dev->dma_mem->flags = flags; | ||
110 | |||
111 | if (flags & DMA_MEMORY_MAP) | ||
112 | return DMA_MEMORY_MAP; | ||
113 | |||
114 | return DMA_MEMORY_IO; | ||
115 | |||
116 | free1_out: | ||
117 | kfree(dev->dma_mem); | ||
118 | out: | ||
119 | if (mem_base) | ||
120 | iounmap(mem_base); | ||
121 | return 0; | ||
122 | } | ||
123 | EXPORT_SYMBOL(dma_declare_coherent_memory); | ||
68 | 124 | ||
69 | free_pages(addr, get_order(size)); | 125 | void dma_release_declared_memory(struct device *dev) |
126 | { | ||
127 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
128 | |||
129 | if (!mem) | ||
130 | return; | ||
131 | dev->dma_mem = NULL; | ||
132 | iounmap(mem->virt_base); | ||
133 | kfree(mem->bitmap); | ||
134 | kfree(mem); | ||
135 | } | ||
136 | EXPORT_SYMBOL(dma_release_declared_memory); | ||
70 | 137 | ||
71 | iounmap(vaddr); | 138 | void *dma_mark_declared_memory_occupied(struct device *dev, |
139 | dma_addr_t device_addr, size_t size) | ||
140 | { | ||
141 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
142 | int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
143 | int pos, err; | ||
144 | |||
145 | if (!mem) | ||
146 | return ERR_PTR(-EINVAL); | ||
147 | |||
148 | pos = (device_addr - mem->device_base) >> PAGE_SHIFT; | ||
149 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages)); | ||
150 | if (err != 0) | ||
151 | return ERR_PTR(err); | ||
152 | return mem->virt_base + (pos << PAGE_SHIFT); | ||
72 | } | 153 | } |
73 | EXPORT_SYMBOL(consistent_free); | 154 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); |
74 | 155 | ||
75 | void consistent_sync(void *vaddr, size_t size, int direction) | 156 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
157 | enum dma_data_direction direction) | ||
76 | { | 158 | { |
77 | #ifdef CONFIG_CPU_SH5 | 159 | #ifdef CONFIG_CPU_SH5 |
78 | void *p1addr = vaddr; | 160 | void *p1addr = vaddr; |
@@ -94,4 +176,4 @@ void consistent_sync(void *vaddr, size_t size, int direction) | |||
94 | BUG(); | 176 | BUG(); |
95 | } | 177 | } |
96 | } | 178 | } |
97 | EXPORT_SYMBOL(consistent_sync); | 179 | EXPORT_SYMBOL(dma_cache_sync); |
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h index 20ae762e5258..22cc419389fe 100644 --- a/include/asm-sh/dma-mapping.h +++ b/include/asm-sh/dma-mapping.h | |||
@@ -8,11 +8,6 @@ | |||
8 | 8 | ||
9 | extern struct bus_type pci_bus_type; | 9 | extern struct bus_type pci_bus_type; |
10 | 10 | ||
11 | /* arch/sh/mm/consistent.c */ | ||
12 | extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle); | ||
13 | extern void consistent_free(void *vaddr, size_t size, dma_addr_t handle); | ||
14 | extern void consistent_sync(void *vaddr, size_t size, int direction); | ||
15 | |||
16 | #define dma_supported(dev, mask) (1) | 11 | #define dma_supported(dev, mask) (1) |
17 | 12 | ||
18 | static inline int dma_set_mask(struct device *dev, u64 mask) | 13 | static inline int dma_set_mask(struct device *dev, u64 mask) |
@@ -25,44 +20,19 @@ static inline int dma_set_mask(struct device *dev, u64 mask) | |||
25 | return 0; | 20 | return 0; |
26 | } | 21 | } |
27 | 22 | ||
28 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | 23 | void *dma_alloc_coherent(struct device *dev, size_t size, |
29 | dma_addr_t *dma_handle, gfp_t flag) | 24 | dma_addr_t *dma_handle, gfp_t flag); |
30 | { | ||
31 | if (sh_mv.mv_consistent_alloc) { | ||
32 | void *ret; | ||
33 | 25 | ||
34 | ret = sh_mv.mv_consistent_alloc(dev, size, dma_handle, flag); | 26 | void dma_free_coherent(struct device *dev, size_t size, |
35 | if (ret != NULL) | 27 | void *vaddr, dma_addr_t dma_handle); |
36 | return ret; | ||
37 | } | ||
38 | |||
39 | return consistent_alloc(flag, size, dma_handle); | ||
40 | } | ||
41 | |||
42 | static inline void dma_free_coherent(struct device *dev, size_t size, | ||
43 | void *vaddr, dma_addr_t dma_handle) | ||
44 | { | ||
45 | if (sh_mv.mv_consistent_free) { | ||
46 | int ret; | ||
47 | |||
48 | ret = sh_mv.mv_consistent_free(dev, size, vaddr, dma_handle); | ||
49 | if (ret == 0) | ||
50 | return; | ||
51 | } | ||
52 | 28 | ||
53 | consistent_free(vaddr, size, dma_handle); | 29 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
54 | } | 30 | enum dma_data_direction dir); |
55 | 31 | ||
56 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 32 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
57 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 33 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
58 | #define dma_is_consistent(d, h) (1) | 34 | #define dma_is_consistent(d, h) (1) |
59 | 35 | ||
60 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
61 | enum dma_data_direction dir) | ||
62 | { | ||
63 | consistent_sync(vaddr, size, (int)dir); | ||
64 | } | ||
65 | |||
66 | static inline dma_addr_t dma_map_single(struct device *dev, | 36 | static inline dma_addr_t dma_map_single(struct device *dev, |
67 | void *ptr, size_t size, | 37 | void *ptr, size_t size, |
68 | enum dma_data_direction dir) | 38 | enum dma_data_direction dir) |
@@ -205,4 +175,18 @@ static inline int dma_mapping_error(dma_addr_t dma_addr) | |||
205 | { | 175 | { |
206 | return dma_addr == 0; | 176 | return dma_addr == 0; |
207 | } | 177 | } |
178 | |||
179 | #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY | ||
180 | |||
181 | extern int | ||
182 | dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | ||
183 | dma_addr_t device_addr, size_t size, int flags); | ||
184 | |||
185 | extern void | ||
186 | dma_release_declared_memory(struct device *dev); | ||
187 | |||
188 | extern void * | ||
189 | dma_mark_declared_memory_occupied(struct device *dev, | ||
190 | dma_addr_t device_addr, size_t size); | ||
191 | |||
208 | #endif /* __ASM_SH_DMA_MAPPING_H */ | 192 | #endif /* __ASM_SH_DMA_MAPPING_H */ |