aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-09 13:26:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-09 13:26:33 -0400
commit64c353864e3f7ccba0ade1bd6f562f9a3bc7e68d (patch)
treefdd4d4c0cc90ef920cd755b835f0acf1e6ef8fbf /drivers
parentd8cacd3a259bf522ea5e6c4c60eba67ba22f599c (diff)
parent10bcdfb8ba24760f715f0a700c3812747eddddf5 (diff)
Merge branch 'for-v3.12' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping
Pull DMA mapping update from Marek Szyprowski: "This contains an addition of Device Tree support for reserved memory regions (Contiguous Memory Allocator is one of the drivers for it) and changes required by the KVM extensions for PowerPC architectue" * 'for-v3.12' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping: ARM: init: add support for reserved memory defined by device tree drivers: of: add initialization code for dma reserved memory drivers: of: add function to scan fdt nodes given by path drivers: dma-contiguous: clean source code and prepare for device tree
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/dma-contiguous.c119
-rw-r--r--drivers/of/Kconfig6
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/fdt.c76
-rw-r--r--drivers/of/of_reserved_mem.c175
-rw-r--r--drivers/of/platform.c4
6 files changed, 306 insertions, 75 deletions
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 6c9cdaa9200d..99802d6f3c60 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -96,7 +96,7 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
96#endif 96#endif
97 97
98/** 98/**
99 * dma_contiguous_reserve() - reserve area for contiguous memory handling 99 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
100 * @limit: End address of the reserved memory (optional, 0 for any). 100 * @limit: End address of the reserved memory (optional, 0 for any).
101 * 101 *
102 * This function reserves memory from early allocator. It should be 102 * This function reserves memory from early allocator. It should be
@@ -124,22 +124,29 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
124#endif 124#endif
125 } 125 }
126 126
127 if (selected_size) { 127 if (selected_size && !dma_contiguous_default_area) {
128 pr_debug("%s: reserving %ld MiB for global area\n", __func__, 128 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
129 (unsigned long)selected_size / SZ_1M); 129 (unsigned long)selected_size / SZ_1M);
130 130
131 dma_declare_contiguous(NULL, selected_size, 0, limit); 131 dma_contiguous_reserve_area(selected_size, 0, limit,
132 &dma_contiguous_default_area);
132 } 133 }
133}; 134};
134 135
135static DEFINE_MUTEX(cma_mutex); 136static DEFINE_MUTEX(cma_mutex);
136 137
137static int __init cma_activate_area(unsigned long base_pfn, unsigned long count) 138static int __init cma_activate_area(struct cma *cma)
138{ 139{
139 unsigned long pfn = base_pfn; 140 int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
140 unsigned i = count >> pageblock_order; 141 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
142 unsigned i = cma->count >> pageblock_order;
141 struct zone *zone; 143 struct zone *zone;
142 144
145 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
146
147 if (!cma->bitmap)
148 return -ENOMEM;
149
143 WARN_ON_ONCE(!pfn_valid(pfn)); 150 WARN_ON_ONCE(!pfn_valid(pfn));
144 zone = page_zone(pfn_to_page(pfn)); 151 zone = page_zone(pfn_to_page(pfn));
145 152
@@ -153,92 +160,53 @@ static int __init cma_activate_area(unsigned long base_pfn, unsigned long count)
153 } 160 }
154 init_cma_reserved_pageblock(pfn_to_page(base_pfn)); 161 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
155 } while (--i); 162 } while (--i);
156 return 0;
157}
158
159static struct cma * __init cma_create_area(unsigned long base_pfn,
160 unsigned long count)
161{
162 int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
163 struct cma *cma;
164 int ret = -ENOMEM;
165
166 pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
167
168 cma = kmalloc(sizeof *cma, GFP_KERNEL);
169 if (!cma)
170 return ERR_PTR(-ENOMEM);
171
172 cma->base_pfn = base_pfn;
173 cma->count = count;
174 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
175 163
176 if (!cma->bitmap) 164 return 0;
177 goto no_mem;
178
179 ret = cma_activate_area(base_pfn, count);
180 if (ret)
181 goto error;
182
183 pr_debug("%s: returned %p\n", __func__, (void *)cma);
184 return cma;
185
186error:
187 kfree(cma->bitmap);
188no_mem:
189 kfree(cma);
190 return ERR_PTR(ret);
191} 165}
192 166
193static struct cma_reserved { 167static struct cma cma_areas[MAX_CMA_AREAS];
194 phys_addr_t start; 168static unsigned cma_area_count;
195 unsigned long size;
196 struct device *dev;
197} cma_reserved[MAX_CMA_AREAS] __initdata;
198static unsigned cma_reserved_count __initdata;
199 169
200static int __init cma_init_reserved_areas(void) 170static int __init cma_init_reserved_areas(void)
201{ 171{
202 struct cma_reserved *r = cma_reserved; 172 int i;
203 unsigned i = cma_reserved_count;
204
205 pr_debug("%s()\n", __func__);
206 173
207 for (; i; --i, ++r) { 174 for (i = 0; i < cma_area_count; i++) {
208 struct cma *cma; 175 int ret = cma_activate_area(&cma_areas[i]);
209 cma = cma_create_area(PFN_DOWN(r->start), 176 if (ret)
210 r->size >> PAGE_SHIFT); 177 return ret;
211 if (!IS_ERR(cma))
212 dev_set_cma_area(r->dev, cma);
213 } 178 }
179
214 return 0; 180 return 0;
215} 181}
216core_initcall(cma_init_reserved_areas); 182core_initcall(cma_init_reserved_areas);
217 183
218/** 184/**
219 * dma_declare_contiguous() - reserve area for contiguous memory handling 185 * dma_contiguous_reserve_area() - reserve custom contiguous area
220 * for particular device 186 * @size: Size of the reserved area (in bytes),
221 * @dev: Pointer to device structure. 187 * @base: Base address of the reserved area optional, use 0 for any
222 * @size: Size of the reserved memory.
223 * @base: Start address of the reserved memory (optional, 0 for any).
224 * @limit: End address of the reserved memory (optional, 0 for any). 188 * @limit: End address of the reserved memory (optional, 0 for any).
189 * @res_cma: Pointer to store the created cma region.
225 * 190 *
226 * This function reserves memory for specified device. It should be 191 * This function reserves memory from early allocator. It should be
227 * called by board specific code when early allocator (memblock or bootmem) 192 * called by arch specific code once the early allocator (memblock or bootmem)
228 * is still activate. 193 * has been activated and all other subsystems have already allocated/reserved
194 * memory. This function allows to create custom reserved areas for specific
195 * devices.
229 */ 196 */
230int __init dma_declare_contiguous(struct device *dev, phys_addr_t size, 197int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
231 phys_addr_t base, phys_addr_t limit) 198 phys_addr_t limit, struct cma **res_cma)
232{ 199{
233 struct cma_reserved *r = &cma_reserved[cma_reserved_count]; 200 struct cma *cma = &cma_areas[cma_area_count];
234 phys_addr_t alignment; 201 phys_addr_t alignment;
202 int ret = 0;
235 203
236 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, 204 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
237 (unsigned long)size, (unsigned long)base, 205 (unsigned long)size, (unsigned long)base,
238 (unsigned long)limit); 206 (unsigned long)limit);
239 207
240 /* Sanity checks */ 208 /* Sanity checks */
241 if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) { 209 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
242 pr_err("Not enough slots for CMA reserved regions!\n"); 210 pr_err("Not enough slots for CMA reserved regions!\n");
243 return -ENOSPC; 211 return -ENOSPC;
244 } 212 }
@@ -256,7 +224,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
256 if (base) { 224 if (base) {
257 if (memblock_is_region_reserved(base, size) || 225 if (memblock_is_region_reserved(base, size) ||
258 memblock_reserve(base, size) < 0) { 226 memblock_reserve(base, size) < 0) {
259 base = -EBUSY; 227 ret = -EBUSY;
260 goto err; 228 goto err;
261 } 229 }
262 } else { 230 } else {
@@ -266,7 +234,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
266 */ 234 */
267 phys_addr_t addr = __memblock_alloc_base(size, alignment, limit); 235 phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
268 if (!addr) { 236 if (!addr) {
269 base = -ENOMEM; 237 ret = -ENOMEM;
270 goto err; 238 goto err;
271 } else { 239 } else {
272 base = addr; 240 base = addr;
@@ -277,10 +245,11 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
277 * Each reserved area must be initialised later, when more kernel 245 * Each reserved area must be initialised later, when more kernel
278 * subsystems (like slab allocator) are available. 246 * subsystems (like slab allocator) are available.
279 */ 247 */
280 r->start = base; 248 cma->base_pfn = PFN_DOWN(base);
281 r->size = size; 249 cma->count = size >> PAGE_SHIFT;
282 r->dev = dev; 250 *res_cma = cma;
283 cma_reserved_count++; 251 cma_area_count++;
252
284 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, 253 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
285 (unsigned long)base); 254 (unsigned long)base);
286 255
@@ -289,7 +258,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
289 return 0; 258 return 0;
290err: 259err:
291 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); 260 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
292 return base; 261 return ret;
293} 262}
294 263
295/** 264/**
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 78cc76053328..9d2009a9004d 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -74,4 +74,10 @@ config OF_MTD
74 depends on MTD 74 depends on MTD
75 def_bool y 75 def_bool y
76 76
77config OF_RESERVED_MEM
78 depends on OF_FLATTREE && (DMA_CMA || (HAVE_GENERIC_DMA_COHERENT && HAVE_MEMBLOCK))
79 def_bool y
80 help
81 Initialization code for DMA reserved memory
82
77endmenu # OF 83endmenu # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index efd05102c405..ed9660adad77 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_OF_MDIO) += of_mdio.o
9obj-$(CONFIG_OF_PCI) += of_pci.o 9obj-$(CONFIG_OF_PCI) += of_pci.o
10obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o 10obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o
11obj-$(CONFIG_OF_MTD) += of_mtd.o 11obj-$(CONFIG_OF_MTD) += of_mtd.o
12obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index b10ba00cc3e6..4fb06f3e7b3c 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -545,6 +545,82 @@ int __init of_flat_dt_match(unsigned long node, const char *const *compat)
545 return of_fdt_match(initial_boot_params, node, compat); 545 return of_fdt_match(initial_boot_params, node, compat);
546} 546}
547 547
548struct fdt_scan_status {
549 const char *name;
550 int namelen;
551 int depth;
552 int found;
553 int (*iterator)(unsigned long node, const char *uname, int depth, void *data);
554 void *data;
555};
556
557/**
558 * fdt_scan_node_by_path - iterator for of_scan_flat_dt_by_path function
559 */
560static int __init fdt_scan_node_by_path(unsigned long node, const char *uname,
561 int depth, void *data)
562{
563 struct fdt_scan_status *st = data;
564
565 /*
566 * if scan at the requested fdt node has been completed,
567 * return -ENXIO to abort further scanning
568 */
569 if (depth <= st->depth)
570 return -ENXIO;
571
572 /* requested fdt node has been found, so call iterator function */
573 if (st->found)
574 return st->iterator(node, uname, depth, st->data);
575
576 /* check if scanning automata is entering next level of fdt nodes */
577 if (depth == st->depth + 1 &&
578 strncmp(st->name, uname, st->namelen) == 0 &&
579 uname[st->namelen] == 0) {
580 st->depth += 1;
581 if (st->name[st->namelen] == 0) {
582 st->found = 1;
583 } else {
584 const char *next = st->name + st->namelen + 1;
585 st->name = next;
586 st->namelen = strcspn(next, "/");
587 }
588 return 0;
589 }
590
591 /* scan next fdt node */
592 return 0;
593}
594
595/**
596 * of_scan_flat_dt_by_path - scan flattened tree blob and call callback on each
597 * child of the given path.
598 * @path: path to start searching for children
599 * @it: callback function
600 * @data: context data pointer
601 *
602 * This function is used to scan the flattened device-tree starting from the
603 * node given by path. It is used to extract information (like reserved
604 * memory), which is required on ealy boot before we can unflatten the tree.
605 */
606int __init of_scan_flat_dt_by_path(const char *path,
607 int (*it)(unsigned long node, const char *name, int depth, void *data),
608 void *data)
609{
610 struct fdt_scan_status st = {path, 0, -1, 0, it, data};
611 int ret = 0;
612
613 if (initial_boot_params)
614 ret = of_scan_flat_dt(fdt_scan_node_by_path, &st);
615
616 if (!st.found)
617 return -ENOENT;
618 else if (ret == -ENXIO) /* scan has been completed */
619 return 0;
620 else
621 return ret;
622}
623
548#ifdef CONFIG_BLK_DEV_INITRD 624#ifdef CONFIG_BLK_DEV_INITRD
549/** 625/**
550 * early_init_dt_check_for_initrd - Decode initrd location from flat tree 626 * early_init_dt_check_for_initrd - Decode initrd location from flat tree
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
new file mode 100644
index 000000000000..a754b84ba016
--- /dev/null
+++ b/drivers/of/of_reserved_mem.c
@@ -0,0 +1,175 @@
1/*
2 * Device tree based initialization code for reserved memory.
3 *
4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 * Author: Marek Szyprowski <m.szyprowski@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
12 */
13
14#include <asm/dma-contiguous.h>
15
16#include <linux/memblock.h>
17#include <linux/err.h>
18#include <linux/of.h>
19#include <linux/of_fdt.h>
20#include <linux/of_platform.h>
21#include <linux/mm.h>
22#include <linux/sizes.h>
23#include <linux/mm_types.h>
24#include <linux/dma-contiguous.h>
25#include <linux/dma-mapping.h>
26#include <linux/of_reserved_mem.h>
27
28#define MAX_RESERVED_REGIONS 16
29struct reserved_mem {
30 phys_addr_t base;
31 unsigned long size;
32 struct cma *cma;
33 char name[32];
34};
35static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
36static int reserved_mem_count;
37
38static int __init fdt_scan_reserved_mem(unsigned long node, const char *uname,
39 int depth, void *data)
40{
41 struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
42 phys_addr_t base, size;
43 int is_cma, is_reserved;
44 unsigned long len;
45 const char *status;
46 __be32 *prop;
47
48 is_cma = IS_ENABLED(CONFIG_DMA_CMA) &&
49 of_flat_dt_is_compatible(node, "linux,contiguous-memory-region");
50 is_reserved = of_flat_dt_is_compatible(node, "reserved-memory-region");
51
52 if (!is_reserved && !is_cma) {
53 /* ignore node and scan next one */
54 return 0;
55 }
56
57 status = of_get_flat_dt_prop(node, "status", &len);
58 if (status && strcmp(status, "okay") != 0) {
59 /* ignore disabled node nad scan next one */
60 return 0;
61 }
62
63 prop = of_get_flat_dt_prop(node, "reg", &len);
64 if (!prop || (len < (dt_root_size_cells + dt_root_addr_cells) *
65 sizeof(__be32))) {
66 pr_err("Reserved mem: node %s, incorrect \"reg\" property\n",
67 uname);
68 /* ignore node and scan next one */
69 return 0;
70 }
71 base = dt_mem_next_cell(dt_root_addr_cells, &prop);
72 size = dt_mem_next_cell(dt_root_size_cells, &prop);
73
74 if (!size) {
75 /* ignore node and scan next one */
76 return 0;
77 }
78
79 pr_info("Reserved mem: found %s, memory base %lx, size %ld MiB\n",
80 uname, (unsigned long)base, (unsigned long)size / SZ_1M);
81
82 if (reserved_mem_count == ARRAY_SIZE(reserved_mem))
83 return -ENOSPC;
84
85 rmem->base = base;
86 rmem->size = size;
87 strlcpy(rmem->name, uname, sizeof(rmem->name));
88
89 if (is_cma) {
90 struct cma *cma;
91 if (dma_contiguous_reserve_area(size, base, 0, &cma) == 0) {
92 rmem->cma = cma;
93 reserved_mem_count++;
94 if (of_get_flat_dt_prop(node,
95 "linux,default-contiguous-region",
96 NULL))
97 dma_contiguous_set_default(cma);
98 }
99 } else if (is_reserved) {
100 if (memblock_remove(base, size) == 0)
101 reserved_mem_count++;
102 else
103 pr_err("Failed to reserve memory for %s\n", uname);
104 }
105
106 return 0;
107}
108
109static struct reserved_mem *get_dma_memory_region(struct device *dev)
110{
111 struct device_node *node;
112 const char *name;
113 int i;
114
115 node = of_parse_phandle(dev->of_node, "memory-region", 0);
116 if (!node)
117 return NULL;
118
119 name = kbasename(node->full_name);
120 for (i = 0; i < reserved_mem_count; i++)
121 if (strcmp(name, reserved_mem[i].name) == 0)
122 return &reserved_mem[i];
123 return NULL;
124}
125
126/**
127 * of_reserved_mem_device_init() - assign reserved memory region to given device
128 *
129 * This function assign memory region pointed by "memory-region" device tree
130 * property to the given device.
131 */
132void of_reserved_mem_device_init(struct device *dev)
133{
134 struct reserved_mem *region = get_dma_memory_region(dev);
135 if (!region)
136 return;
137
138 if (region->cma) {
139 dev_set_cma_area(dev, region->cma);
140 pr_info("Assigned CMA %s to %s device\n", region->name,
141 dev_name(dev));
142 } else {
143 if (dma_declare_coherent_memory(dev, region->base, region->base,
144 region->size, DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) != 0)
145 pr_info("Declared reserved memory %s to %s device\n",
146 region->name, dev_name(dev));
147 }
148}
149
150/**
151 * of_reserved_mem_device_release() - release reserved memory device structures
152 *
153 * This function releases structures allocated for memory region handling for
154 * the given device.
155 */
156void of_reserved_mem_device_release(struct device *dev)
157{
158 struct reserved_mem *region = get_dma_memory_region(dev);
159 if (!region && !region->cma)
160 dma_release_declared_memory(dev);
161}
162
163/**
164 * early_init_dt_scan_reserved_mem() - create reserved memory regions
165 *
166 * This function grabs memory from early allocator for device exclusive use
167 * defined in device tree structures. It should be called by arch specific code
168 * once the early allocator (memblock) has been activated and all other
169 * subsystems have already allocated/reserved memory.
170 */
171void __init early_init_dt_scan_reserved_mem(void)
172{
173 of_scan_flat_dt_by_path("/memory/reserved-memory",
174 fdt_scan_reserved_mem, NULL);
175}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index e0a6514ab46c..eeca8a596973 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -21,6 +21,7 @@
21#include <linux/of_device.h> 21#include <linux/of_device.h>
22#include <linux/of_irq.h> 22#include <linux/of_irq.h>
23#include <linux/of_platform.h> 23#include <linux/of_platform.h>
24#include <linux/of_reserved_mem.h>
24#include <linux/platform_device.h> 25#include <linux/platform_device.h>
25 26
26const struct of_device_id of_default_bus_match_table[] = { 27const struct of_device_id of_default_bus_match_table[] = {
@@ -218,6 +219,8 @@ struct platform_device *of_platform_device_create_pdata(
218 dev->dev.bus = &platform_bus_type; 219 dev->dev.bus = &platform_bus_type;
219 dev->dev.platform_data = platform_data; 220 dev->dev.platform_data = platform_data;
220 221
222 of_reserved_mem_device_init(&dev->dev);
223
221 /* We do not fill the DMA ops for platform devices by default. 224 /* We do not fill the DMA ops for platform devices by default.
222 * This is currently the responsibility of the platform code 225 * This is currently the responsibility of the platform code
223 * to do such, possibly using a device notifier 226 * to do such, possibly using a device notifier
@@ -225,6 +228,7 @@ struct platform_device *of_platform_device_create_pdata(
225 228
226 if (of_device_add(dev) != 0) { 229 if (of_device_add(dev) != 0) {
227 platform_device_put(dev); 230 platform_device_put(dev);
231 of_reserved_mem_device_release(&dev->dev);
228 return NULL; 232 return NULL;
229 } 233 }
230 234