diff options
Diffstat (limited to 'arch/sh/mm/consistent.c')
-rw-r--r-- | arch/sh/mm/consistent.c | 128 |
1 files changed, 33 insertions, 95 deletions
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index d3c33fc5b1c2..b2ce014401b5 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * for more details. | 10 | * for more details. |
11 | */ | 11 | */ |
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <linux/platform_device.h> | ||
13 | #include <linux/dma-mapping.h> | 14 | #include <linux/dma-mapping.h> |
14 | #include <asm/cacheflush.h> | 15 | #include <asm/cacheflush.h> |
15 | #include <asm/addrspace.h> | 16 | #include <asm/addrspace.h> |
@@ -27,21 +28,10 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
27 | dma_addr_t *dma_handle, gfp_t gfp) | 28 | dma_addr_t *dma_handle, gfp_t gfp) |
28 | { | 29 | { |
29 | void *ret, *ret_nocache; | 30 | void *ret, *ret_nocache; |
30 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
31 | int order = get_order(size); | 31 | int order = get_order(size); |
32 | 32 | ||
33 | if (mem) { | 33 | if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) |
34 | int page = bitmap_find_free_region(mem->bitmap, mem->size, | 34 | return ret; |
35 | order); | ||
36 | if (page >= 0) { | ||
37 | *dma_handle = mem->device_base + (page << PAGE_SHIFT); | ||
38 | ret = mem->virt_base + (page << PAGE_SHIFT); | ||
39 | memset(ret, 0, size); | ||
40 | return ret; | ||
41 | } | ||
42 | if (mem->flags & DMA_MEMORY_EXCLUSIVE) | ||
43 | return NULL; | ||
44 | } | ||
45 | 35 | ||
46 | ret = (void *)__get_free_pages(gfp, order); | 36 | ret = (void *)__get_free_pages(gfp, order); |
47 | if (!ret) | 37 | if (!ret) |
@@ -71,11 +61,7 @@ void dma_free_coherent(struct device *dev, size_t size, | |||
71 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | 61 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; |
72 | int order = get_order(size); | 62 | int order = get_order(size); |
73 | 63 | ||
74 | if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { | 64 | if (!dma_release_from_coherent(dev, order, vaddr)) { |
75 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | ||
76 | |||
77 | bitmap_release_region(mem->bitmap, page, order); | ||
78 | } else { | ||
79 | WARN_ON(irqs_disabled()); /* for portability */ | 65 | WARN_ON(irqs_disabled()); /* for portability */ |
80 | BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE); | 66 | BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE); |
81 | free_pages((unsigned long)phys_to_virt(dma_handle), order); | 67 | free_pages((unsigned long)phys_to_virt(dma_handle), order); |
@@ -84,83 +70,6 @@ void dma_free_coherent(struct device *dev, size_t size, | |||
84 | } | 70 | } |
85 | EXPORT_SYMBOL(dma_free_coherent); | 71 | EXPORT_SYMBOL(dma_free_coherent); |
86 | 72 | ||
87 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | ||
88 | dma_addr_t device_addr, size_t size, int flags) | ||
89 | { | ||
90 | void __iomem *mem_base = NULL; | ||
91 | int pages = size >> PAGE_SHIFT; | ||
92 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | ||
93 | |||
94 | if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) | ||
95 | goto out; | ||
96 | if (!size) | ||
97 | goto out; | ||
98 | if (dev->dma_mem) | ||
99 | goto out; | ||
100 | |||
101 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ | ||
102 | |||
103 | mem_base = ioremap_nocache(bus_addr, size); | ||
104 | if (!mem_base) | ||
105 | goto out; | ||
106 | |||
107 | dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); | ||
108 | if (!dev->dma_mem) | ||
109 | goto out; | ||
110 | dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | ||
111 | if (!dev->dma_mem->bitmap) | ||
112 | goto free1_out; | ||
113 | |||
114 | dev->dma_mem->virt_base = mem_base; | ||
115 | dev->dma_mem->device_base = device_addr; | ||
116 | dev->dma_mem->size = pages; | ||
117 | dev->dma_mem->flags = flags; | ||
118 | |||
119 | if (flags & DMA_MEMORY_MAP) | ||
120 | return DMA_MEMORY_MAP; | ||
121 | |||
122 | return DMA_MEMORY_IO; | ||
123 | |||
124 | free1_out: | ||
125 | kfree(dev->dma_mem); | ||
126 | out: | ||
127 | if (mem_base) | ||
128 | iounmap(mem_base); | ||
129 | return 0; | ||
130 | } | ||
131 | EXPORT_SYMBOL(dma_declare_coherent_memory); | ||
132 | |||
133 | void dma_release_declared_memory(struct device *dev) | ||
134 | { | ||
135 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
136 | |||
137 | if (!mem) | ||
138 | return; | ||
139 | dev->dma_mem = NULL; | ||
140 | iounmap(mem->virt_base); | ||
141 | kfree(mem->bitmap); | ||
142 | kfree(mem); | ||
143 | } | ||
144 | EXPORT_SYMBOL(dma_release_declared_memory); | ||
145 | |||
146 | void *dma_mark_declared_memory_occupied(struct device *dev, | ||
147 | dma_addr_t device_addr, size_t size) | ||
148 | { | ||
149 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
150 | int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
151 | int pos, err; | ||
152 | |||
153 | if (!mem) | ||
154 | return ERR_PTR(-EINVAL); | ||
155 | |||
156 | pos = (device_addr - mem->device_base) >> PAGE_SHIFT; | ||
157 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages)); | ||
158 | if (err != 0) | ||
159 | return ERR_PTR(err); | ||
160 | return mem->virt_base + (pos << PAGE_SHIFT); | ||
161 | } | ||
162 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | ||
163 | |||
164 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 73 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
165 | enum dma_data_direction direction) | 74 | enum dma_data_direction direction) |
166 | { | 75 | { |
@@ -185,3 +94,32 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
185 | } | 94 | } |
186 | } | 95 | } |
187 | EXPORT_SYMBOL(dma_cache_sync); | 96 | EXPORT_SYMBOL(dma_cache_sync); |
97 | |||
98 | int platform_resource_setup_memory(struct platform_device *pdev, | ||
99 | char *name, unsigned long memsize) | ||
100 | { | ||
101 | struct resource *r; | ||
102 | dma_addr_t dma_handle; | ||
103 | void *buf; | ||
104 | |||
105 | r = pdev->resource + pdev->num_resources - 1; | ||
106 | if (r->flags) { | ||
107 | pr_warning("%s: unable to find empty space for resource\n", | ||
108 | name); | ||
109 | return -EINVAL; | ||
110 | } | ||
111 | |||
112 | buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL); | ||
113 | if (!buf) { | ||
114 | pr_warning("%s: unable to allocate memory\n", name); | ||
115 | return -ENOMEM; | ||
116 | } | ||
117 | |||
118 | memset(buf, 0, memsize); | ||
119 | |||
120 | r->flags = IORESOURCE_MEM; | ||
121 | r->start = dma_handle; | ||
122 | r->end = r->start + memsize - 1; | ||
123 | r->name = name; | ||
124 | return 0; | ||
125 | } | ||