diff options
author | Dmitry Baryshkov <dbaryshkov@gmail.com> | 2008-07-18 05:30:31 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-18 15:14:02 -0400 |
commit | 9de90ac27d752bc0177baf2699ab483888de0743 (patch) | |
tree | 9b368663c57849aec3d182b662ff467956a83a77 /arch/sh/mm | |
parent | 1fe532685a1984dc9f2603ed20bd5e630ba79709 (diff) |
Sh: use generic per-device coherent dma allocator
Signed-off-by: Dmitry Baryshkov <dbaryshkov@gmail.com>
Cc: Jesse Barnes <jbarnes@virtuousgeek.org>
Cc: Paul Mundt <lethal@linux-sh.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/consistent.c | 98 |
1 files changed, 3 insertions, 95 deletions
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index d3c33fc5b1c2..3095d9581475 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c | |||
@@ -27,21 +27,10 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
27 | dma_addr_t *dma_handle, gfp_t gfp) | 27 | dma_addr_t *dma_handle, gfp_t gfp) |
28 | { | 28 | { |
29 | void *ret, *ret_nocache; | 29 | void *ret, *ret_nocache; |
30 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
31 | int order = get_order(size); | 30 | int order = get_order(size); |
32 | 31 | ||
33 | if (mem) { | 32 | if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) |
34 | int page = bitmap_find_free_region(mem->bitmap, mem->size, | 33 | return ret; |
35 | order); | ||
36 | if (page >= 0) { | ||
37 | *dma_handle = mem->device_base + (page << PAGE_SHIFT); | ||
38 | ret = mem->virt_base + (page << PAGE_SHIFT); | ||
39 | memset(ret, 0, size); | ||
40 | return ret; | ||
41 | } | ||
42 | if (mem->flags & DMA_MEMORY_EXCLUSIVE) | ||
43 | return NULL; | ||
44 | } | ||
45 | 34 | ||
46 | ret = (void *)__get_free_pages(gfp, order); | 35 | ret = (void *)__get_free_pages(gfp, order); |
47 | if (!ret) | 36 | if (!ret) |
@@ -71,11 +60,7 @@ void dma_free_coherent(struct device *dev, size_t size, | |||
71 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | 60 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; |
72 | int order = get_order(size); | 61 | int order = get_order(size); |
73 | 62 | ||
74 | if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { | 63 | if (!dma_release_from_coherent(dev, order, vaddr)) { |
75 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | ||
76 | |||
77 | bitmap_release_region(mem->bitmap, page, order); | ||
78 | } else { | ||
79 | WARN_ON(irqs_disabled()); /* for portability */ | 64 | WARN_ON(irqs_disabled()); /* for portability */ |
80 | BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE); | 65 | BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE); |
81 | free_pages((unsigned long)phys_to_virt(dma_handle), order); | 66 | free_pages((unsigned long)phys_to_virt(dma_handle), order); |
@@ -84,83 +69,6 @@ void dma_free_coherent(struct device *dev, size_t size, | |||
84 | } | 69 | } |
85 | EXPORT_SYMBOL(dma_free_coherent); | 70 | EXPORT_SYMBOL(dma_free_coherent); |
86 | 71 | ||
87 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | ||
88 | dma_addr_t device_addr, size_t size, int flags) | ||
89 | { | ||
90 | void __iomem *mem_base = NULL; | ||
91 | int pages = size >> PAGE_SHIFT; | ||
92 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | ||
93 | |||
94 | if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) | ||
95 | goto out; | ||
96 | if (!size) | ||
97 | goto out; | ||
98 | if (dev->dma_mem) | ||
99 | goto out; | ||
100 | |||
101 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ | ||
102 | |||
103 | mem_base = ioremap_nocache(bus_addr, size); | ||
104 | if (!mem_base) | ||
105 | goto out; | ||
106 | |||
107 | dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); | ||
108 | if (!dev->dma_mem) | ||
109 | goto out; | ||
110 | dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | ||
111 | if (!dev->dma_mem->bitmap) | ||
112 | goto free1_out; | ||
113 | |||
114 | dev->dma_mem->virt_base = mem_base; | ||
115 | dev->dma_mem->device_base = device_addr; | ||
116 | dev->dma_mem->size = pages; | ||
117 | dev->dma_mem->flags = flags; | ||
118 | |||
119 | if (flags & DMA_MEMORY_MAP) | ||
120 | return DMA_MEMORY_MAP; | ||
121 | |||
122 | return DMA_MEMORY_IO; | ||
123 | |||
124 | free1_out: | ||
125 | kfree(dev->dma_mem); | ||
126 | out: | ||
127 | if (mem_base) | ||
128 | iounmap(mem_base); | ||
129 | return 0; | ||
130 | } | ||
131 | EXPORT_SYMBOL(dma_declare_coherent_memory); | ||
132 | |||
133 | void dma_release_declared_memory(struct device *dev) | ||
134 | { | ||
135 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
136 | |||
137 | if (!mem) | ||
138 | return; | ||
139 | dev->dma_mem = NULL; | ||
140 | iounmap(mem->virt_base); | ||
141 | kfree(mem->bitmap); | ||
142 | kfree(mem); | ||
143 | } | ||
144 | EXPORT_SYMBOL(dma_release_declared_memory); | ||
145 | |||
146 | void *dma_mark_declared_memory_occupied(struct device *dev, | ||
147 | dma_addr_t device_addr, size_t size) | ||
148 | { | ||
149 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
150 | int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
151 | int pos, err; | ||
152 | |||
153 | if (!mem) | ||
154 | return ERR_PTR(-EINVAL); | ||
155 | |||
156 | pos = (device_addr - mem->device_base) >> PAGE_SHIFT; | ||
157 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages)); | ||
158 | if (err != 0) | ||
159 | return ERR_PTR(err); | ||
160 | return mem->virt_base + (pos << PAGE_SHIFT); | ||
161 | } | ||
162 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | ||
163 | |||
164 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 72 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
165 | enum dma_data_direction direction) | 73 | enum dma_data_direction direction) |
166 | { | 74 | { |