aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/dma-contiguous.c
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2013-07-29 08:31:45 -0400
committerMarek Szyprowski <m.szyprowski@samsung.com>2013-08-27 03:18:29 -0400
commita2547380393ac82c659b40182b0da8d05a8365f3 (patch)
tree45ff811ef870825743dd88eb14128003a9e3e375 /drivers/base/dma-contiguous.c
parentf7d8f1e9cb44e6ee1602586dbf7f2bed637a2b4e (diff)
drivers: dma-contiguous: clean source code and prepare for device tree
This patch cleans the initialization of dma contiguous framework. The all-in-one dma_declare_contiguous() function is now separated into dma_contiguous_reserve_area() which only steals the the memory from memblock allocator and dma_contiguous_add_device() function, which assigns given device to the specified reserved memory area. This improves the flexibility in defining contiguous memory areas and assigning device to them, because now it is possible to assign more than one device to the given contiguous memory area. Such split in initialization procedure is also required for upcoming device tree support. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Kyungmin Park <kyungmin.park@samsung.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Acked-by: Tomasz Figa <t.figa@samsung.com>
Diffstat (limited to 'drivers/base/dma-contiguous.c')
-rw-r--r--drivers/base/dma-contiguous.c119
1 files changed, 44 insertions, 75 deletions
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 0ca54421ce97..99802d6f3c60 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -96,7 +96,7 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
96#endif 96#endif
97 97
98/** 98/**
99 * dma_contiguous_reserve() - reserve area for contiguous memory handling 99 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
100 * @limit: End address of the reserved memory (optional, 0 for any). 100 * @limit: End address of the reserved memory (optional, 0 for any).
101 * 101 *
102 * This function reserves memory from early allocator. It should be 102 * This function reserves memory from early allocator. It should be
@@ -124,22 +124,29 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
124#endif 124#endif
125 } 125 }
126 126
127 if (selected_size) { 127 if (selected_size && !dma_contiguous_default_area) {
128 pr_debug("%s: reserving %ld MiB for global area\n", __func__, 128 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
129 (unsigned long)selected_size / SZ_1M); 129 (unsigned long)selected_size / SZ_1M);
130 130
131 dma_declare_contiguous(NULL, selected_size, 0, limit); 131 dma_contiguous_reserve_area(selected_size, 0, limit,
132 &dma_contiguous_default_area);
132 } 133 }
133}; 134};
134 135
135static DEFINE_MUTEX(cma_mutex); 136static DEFINE_MUTEX(cma_mutex);
136 137
137static __init int cma_activate_area(unsigned long base_pfn, unsigned long count) 138static int __init cma_activate_area(struct cma *cma)
138{ 139{
139 unsigned long pfn = base_pfn; 140 int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
140 unsigned i = count >> pageblock_order; 141 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
142 unsigned i = cma->count >> pageblock_order;
141 struct zone *zone; 143 struct zone *zone;
142 144
145 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
146
147 if (!cma->bitmap)
148 return -ENOMEM;
149
143 WARN_ON_ONCE(!pfn_valid(pfn)); 150 WARN_ON_ONCE(!pfn_valid(pfn));
144 zone = page_zone(pfn_to_page(pfn)); 151 zone = page_zone(pfn_to_page(pfn));
145 152
@@ -153,92 +160,53 @@ static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
153 } 160 }
154 init_cma_reserved_pageblock(pfn_to_page(base_pfn)); 161 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
155 } while (--i); 162 } while (--i);
156 return 0;
157}
158
159static __init struct cma *cma_create_area(unsigned long base_pfn,
160 unsigned long count)
161{
162 int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
163 struct cma *cma;
164 int ret = -ENOMEM;
165
166 pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
167
168 cma = kmalloc(sizeof *cma, GFP_KERNEL);
169 if (!cma)
170 return ERR_PTR(-ENOMEM);
171
172 cma->base_pfn = base_pfn;
173 cma->count = count;
174 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
175 163
176 if (!cma->bitmap) 164 return 0;
177 goto no_mem;
178
179 ret = cma_activate_area(base_pfn, count);
180 if (ret)
181 goto error;
182
183 pr_debug("%s: returned %p\n", __func__, (void *)cma);
184 return cma;
185
186error:
187 kfree(cma->bitmap);
188no_mem:
189 kfree(cma);
190 return ERR_PTR(ret);
191} 165}
192 166
193static struct cma_reserved { 167static struct cma cma_areas[MAX_CMA_AREAS];
194 phys_addr_t start; 168static unsigned cma_area_count;
195 unsigned long size;
196 struct device *dev;
197} cma_reserved[MAX_CMA_AREAS] __initdata;
198static unsigned cma_reserved_count __initdata;
199 169
200static int __init cma_init_reserved_areas(void) 170static int __init cma_init_reserved_areas(void)
201{ 171{
202 struct cma_reserved *r = cma_reserved; 172 int i;
203 unsigned i = cma_reserved_count;
204
205 pr_debug("%s()\n", __func__);
206 173
207 for (; i; --i, ++r) { 174 for (i = 0; i < cma_area_count; i++) {
208 struct cma *cma; 175 int ret = cma_activate_area(&cma_areas[i]);
209 cma = cma_create_area(PFN_DOWN(r->start), 176 if (ret)
210 r->size >> PAGE_SHIFT); 177 return ret;
211 if (!IS_ERR(cma))
212 dev_set_cma_area(r->dev, cma);
213 } 178 }
179
214 return 0; 180 return 0;
215} 181}
216core_initcall(cma_init_reserved_areas); 182core_initcall(cma_init_reserved_areas);
217 183
218/** 184/**
219 * dma_declare_contiguous() - reserve area for contiguous memory handling 185 * dma_contiguous_reserve_area() - reserve custom contiguous area
220 * for particular device 186 * @size: Size of the reserved area (in bytes),
221 * @dev: Pointer to device structure. 187 * @base: Base address of the reserved area optional, use 0 for any
222 * @size: Size of the reserved memory.
223 * @base: Start address of the reserved memory (optional, 0 for any).
224 * @limit: End address of the reserved memory (optional, 0 for any). 188 * @limit: End address of the reserved memory (optional, 0 for any).
189 * @res_cma: Pointer to store the created cma region.
225 * 190 *
226 * This function reserves memory for specified device. It should be 191 * This function reserves memory from early allocator. It should be
227 * called by board specific code when early allocator (memblock or bootmem) 192 * called by arch specific code once the early allocator (memblock or bootmem)
228 * is still activate. 193 * has been activated and all other subsystems have already allocated/reserved
194 * memory. This function allows to create custom reserved areas for specific
195 * devices.
229 */ 196 */
230int __init dma_declare_contiguous(struct device *dev, phys_addr_t size, 197int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
231 phys_addr_t base, phys_addr_t limit) 198 phys_addr_t limit, struct cma **res_cma)
232{ 199{
233 struct cma_reserved *r = &cma_reserved[cma_reserved_count]; 200 struct cma *cma = &cma_areas[cma_area_count];
234 phys_addr_t alignment; 201 phys_addr_t alignment;
202 int ret = 0;
235 203
236 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, 204 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
237 (unsigned long)size, (unsigned long)base, 205 (unsigned long)size, (unsigned long)base,
238 (unsigned long)limit); 206 (unsigned long)limit);
239 207
240 /* Sanity checks */ 208 /* Sanity checks */
241 if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) { 209 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
242 pr_err("Not enough slots for CMA reserved regions!\n"); 210 pr_err("Not enough slots for CMA reserved regions!\n");
243 return -ENOSPC; 211 return -ENOSPC;
244 } 212 }
@@ -256,7 +224,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
256 if (base) { 224 if (base) {
257 if (memblock_is_region_reserved(base, size) || 225 if (memblock_is_region_reserved(base, size) ||
258 memblock_reserve(base, size) < 0) { 226 memblock_reserve(base, size) < 0) {
259 base = -EBUSY; 227 ret = -EBUSY;
260 goto err; 228 goto err;
261 } 229 }
262 } else { 230 } else {
@@ -266,7 +234,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
266 */ 234 */
267 phys_addr_t addr = __memblock_alloc_base(size, alignment, limit); 235 phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
268 if (!addr) { 236 if (!addr) {
269 base = -ENOMEM; 237 ret = -ENOMEM;
270 goto err; 238 goto err;
271 } else { 239 } else {
272 base = addr; 240 base = addr;
@@ -277,10 +245,11 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
277 * Each reserved area must be initialised later, when more kernel 245 * Each reserved area must be initialised later, when more kernel
278 * subsystems (like slab allocator) are available. 246 * subsystems (like slab allocator) are available.
279 */ 247 */
280 r->start = base; 248 cma->base_pfn = PFN_DOWN(base);
281 r->size = size; 249 cma->count = size >> PAGE_SHIFT;
282 r->dev = dev; 250 *res_cma = cma;
283 cma_reserved_count++; 251 cma_area_count++;
252
284 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, 253 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
285 (unsigned long)base); 254 (unsigned long)base);
286 255
@@ -289,7 +258,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
289 return 0; 258 return 0;
290err: 259err:
291 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); 260 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
292 return base; 261 return ret;
293} 262}
294 263
295/** 264/**