diff options
author | Ming Lei <tom.leiming@gmail.com> | 2009-07-12 09:44:55 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-09-15 12:50:47 -0400 |
commit | a56af87648054089d89874b52e3fc23ed4f274ad (patch) | |
tree | 81714be5cc39f830b4acf55ec782b46f36e67ebe /drivers/base/dma-coherent.c | |
parent | 162dd4212409fd2d36ff22547ea821bf3e86bcc9 (diff) |
driver-core: move dma-coherent.c from kernel to driver/base
Placing dma-coherent.c in driver/base is better than in kernel,
since it contains code to do per-device coherent dma memory
handling.
Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/base/dma-coherent.c')
-rw-r--r-- | drivers/base/dma-coherent.c | 176 |
1 files changed, 176 insertions, 0 deletions
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c new file mode 100644 index 000000000000..962a3b574f21 --- /dev/null +++ b/drivers/base/dma-coherent.c | |||
@@ -0,0 +1,176 @@ | |||
1 | /* | ||
2 | * Coherent per-device memory handling. | ||
3 | * Borrowed from i386 | ||
4 | */ | ||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/dma-mapping.h> | ||
7 | |||
8 | struct dma_coherent_mem { | ||
9 | void *virt_base; | ||
10 | u32 device_base; | ||
11 | int size; | ||
12 | int flags; | ||
13 | unsigned long *bitmap; | ||
14 | }; | ||
15 | |||
16 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | ||
17 | dma_addr_t device_addr, size_t size, int flags) | ||
18 | { | ||
19 | void __iomem *mem_base = NULL; | ||
20 | int pages = size >> PAGE_SHIFT; | ||
21 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | ||
22 | |||
23 | if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) | ||
24 | goto out; | ||
25 | if (!size) | ||
26 | goto out; | ||
27 | if (dev->dma_mem) | ||
28 | goto out; | ||
29 | |||
30 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ | ||
31 | |||
32 | mem_base = ioremap(bus_addr, size); | ||
33 | if (!mem_base) | ||
34 | goto out; | ||
35 | |||
36 | dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); | ||
37 | if (!dev->dma_mem) | ||
38 | goto out; | ||
39 | dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | ||
40 | if (!dev->dma_mem->bitmap) | ||
41 | goto free1_out; | ||
42 | |||
43 | dev->dma_mem->virt_base = mem_base; | ||
44 | dev->dma_mem->device_base = device_addr; | ||
45 | dev->dma_mem->size = pages; | ||
46 | dev->dma_mem->flags = flags; | ||
47 | |||
48 | if (flags & DMA_MEMORY_MAP) | ||
49 | return DMA_MEMORY_MAP; | ||
50 | |||
51 | return DMA_MEMORY_IO; | ||
52 | |||
53 | free1_out: | ||
54 | kfree(dev->dma_mem); | ||
55 | out: | ||
56 | if (mem_base) | ||
57 | iounmap(mem_base); | ||
58 | return 0; | ||
59 | } | ||
60 | EXPORT_SYMBOL(dma_declare_coherent_memory); | ||
61 | |||
62 | void dma_release_declared_memory(struct device *dev) | ||
63 | { | ||
64 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
65 | |||
66 | if (!mem) | ||
67 | return; | ||
68 | dev->dma_mem = NULL; | ||
69 | iounmap(mem->virt_base); | ||
70 | kfree(mem->bitmap); | ||
71 | kfree(mem); | ||
72 | } | ||
73 | EXPORT_SYMBOL(dma_release_declared_memory); | ||
74 | |||
75 | void *dma_mark_declared_memory_occupied(struct device *dev, | ||
76 | dma_addr_t device_addr, size_t size) | ||
77 | { | ||
78 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
79 | int pos, err; | ||
80 | |||
81 | size += device_addr & ~PAGE_MASK; | ||
82 | |||
83 | if (!mem) | ||
84 | return ERR_PTR(-EINVAL); | ||
85 | |||
86 | pos = (device_addr - mem->device_base) >> PAGE_SHIFT; | ||
87 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); | ||
88 | if (err != 0) | ||
89 | return ERR_PTR(err); | ||
90 | return mem->virt_base + (pos << PAGE_SHIFT); | ||
91 | } | ||
92 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | ||
93 | |||
94 | /** | ||
95 | * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area | ||
96 | * | ||
97 | * @dev: device from which we allocate memory | ||
98 | * @size: size of requested memory area | ||
99 | * @dma_handle: This will be filled with the correct dma handle | ||
100 | * @ret: This pointer will be filled with the virtual address | ||
101 | * to allocated area. | ||
102 | * | ||
103 | * This function should be only called from per-arch dma_alloc_coherent() | ||
104 | * to support allocation from per-device coherent memory pools. | ||
105 | * | ||
106 | * Returns 0 if dma_alloc_coherent should continue with allocating from | ||
107 | * generic memory areas, or !0 if dma_alloc_coherent should return @ret. | ||
108 | */ | ||
109 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, | ||
110 | dma_addr_t *dma_handle, void **ret) | ||
111 | { | ||
112 | struct dma_coherent_mem *mem; | ||
113 | int order = get_order(size); | ||
114 | int pageno; | ||
115 | |||
116 | if (!dev) | ||
117 | return 0; | ||
118 | mem = dev->dma_mem; | ||
119 | if (!mem) | ||
120 | return 0; | ||
121 | |||
122 | *ret = NULL; | ||
123 | |||
124 | if (unlikely(size > (mem->size << PAGE_SHIFT))) | ||
125 | goto err; | ||
126 | |||
127 | pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); | ||
128 | if (unlikely(pageno < 0)) | ||
129 | goto err; | ||
130 | |||
131 | /* | ||
132 | * Memory was found in the per-device area. | ||
133 | */ | ||
134 | *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); | ||
135 | *ret = mem->virt_base + (pageno << PAGE_SHIFT); | ||
136 | memset(*ret, 0, size); | ||
137 | |||
138 | return 1; | ||
139 | |||
140 | err: | ||
141 | /* | ||
142 | * In the case where the allocation can not be satisfied from the | ||
143 | * per-device area, try to fall back to generic memory if the | ||
144 | * constraints allow it. | ||
145 | */ | ||
146 | return mem->flags & DMA_MEMORY_EXCLUSIVE; | ||
147 | } | ||
148 | EXPORT_SYMBOL(dma_alloc_from_coherent); | ||
149 | |||
150 | /** | ||
151 | * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool | ||
152 | * @dev: device from which the memory was allocated | ||
153 | * @order: the order of pages allocated | ||
154 | * @vaddr: virtual address of allocated pages | ||
155 | * | ||
156 | * This checks whether the memory was allocated from the per-device | ||
157 | * coherent memory pool and if so, releases that memory. | ||
158 | * | ||
159 | * Returns 1 if we correctly released the memory, or 0 if | ||
160 | * dma_release_coherent() should proceed with releasing memory from | ||
161 | * generic pools. | ||
162 | */ | ||
163 | int dma_release_from_coherent(struct device *dev, int order, void *vaddr) | ||
164 | { | ||
165 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
166 | |||
167 | if (mem && vaddr >= mem->virt_base && vaddr < | ||
168 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | ||
169 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | ||
170 | |||
171 | bitmap_release_region(mem->bitmap, page, order); | ||
172 | return 1; | ||
173 | } | ||
174 | return 0; | ||
175 | } | ||
176 | EXPORT_SYMBOL(dma_release_from_coherent); | ||