aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/dma-contiguous.h1
-rw-r--r--arch/x86/include/asm/dma-contiguous.h1
-rw-r--r--drivers/base/dma-contiguous.c119
-rw-r--r--include/asm-generic/dma-contiguous.h28
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/dma-contiguous.h62
6 files changed, 105 insertions, 108 deletions
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
index e072bb2ba1b1..4f8e9e5514b1 100644
--- a/arch/arm/include/asm/dma-contiguous.h
+++ b/arch/arm/include/asm/dma-contiguous.h
@@ -5,7 +5,6 @@
5#ifdef CONFIG_DMA_CMA 5#ifdef CONFIG_DMA_CMA
6 6
7#include <linux/types.h> 7#include <linux/types.h>
8#include <asm-generic/dma-contiguous.h>
9 8
10void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size); 9void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
11 10
diff --git a/arch/x86/include/asm/dma-contiguous.h b/arch/x86/include/asm/dma-contiguous.h
index c09241659971..b4b38bacb404 100644
--- a/arch/x86/include/asm/dma-contiguous.h
+++ b/arch/x86/include/asm/dma-contiguous.h
@@ -4,7 +4,6 @@
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <linux/types.h> 6#include <linux/types.h>
7#include <asm-generic/dma-contiguous.h>
8 7
9static inline void 8static inline void
10dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { } 9dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 0ca54421ce97..99802d6f3c60 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -96,7 +96,7 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
96#endif 96#endif
97 97
98/** 98/**
99 * dma_contiguous_reserve() - reserve area for contiguous memory handling 99 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
100 * @limit: End address of the reserved memory (optional, 0 for any). 100 * @limit: End address of the reserved memory (optional, 0 for any).
101 * 101 *
102 * This function reserves memory from early allocator. It should be 102 * This function reserves memory from early allocator. It should be
@@ -124,22 +124,29 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
124#endif 124#endif
125 } 125 }
126 126
127 if (selected_size) { 127 if (selected_size && !dma_contiguous_default_area) {
128 pr_debug("%s: reserving %ld MiB for global area\n", __func__, 128 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
129 (unsigned long)selected_size / SZ_1M); 129 (unsigned long)selected_size / SZ_1M);
130 130
131 dma_declare_contiguous(NULL, selected_size, 0, limit); 131 dma_contiguous_reserve_area(selected_size, 0, limit,
132 &dma_contiguous_default_area);
132 } 133 }
133}; 134};
134 135
135static DEFINE_MUTEX(cma_mutex); 136static DEFINE_MUTEX(cma_mutex);
136 137
137static __init int cma_activate_area(unsigned long base_pfn, unsigned long count) 138static int __init cma_activate_area(struct cma *cma)
138{ 139{
139 unsigned long pfn = base_pfn; 140 int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
140 unsigned i = count >> pageblock_order; 141 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
142 unsigned i = cma->count >> pageblock_order;
141 struct zone *zone; 143 struct zone *zone;
142 144
145 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
146
147 if (!cma->bitmap)
148 return -ENOMEM;
149
143 WARN_ON_ONCE(!pfn_valid(pfn)); 150 WARN_ON_ONCE(!pfn_valid(pfn));
144 zone = page_zone(pfn_to_page(pfn)); 151 zone = page_zone(pfn_to_page(pfn));
145 152
@@ -153,92 +160,53 @@ static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
153 } 160 }
154 init_cma_reserved_pageblock(pfn_to_page(base_pfn)); 161 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
155 } while (--i); 162 } while (--i);
156 return 0;
157}
158
159static __init struct cma *cma_create_area(unsigned long base_pfn,
160 unsigned long count)
161{
162 int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
163 struct cma *cma;
164 int ret = -ENOMEM;
165
166 pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
167
168 cma = kmalloc(sizeof *cma, GFP_KERNEL);
169 if (!cma)
170 return ERR_PTR(-ENOMEM);
171
172 cma->base_pfn = base_pfn;
173 cma->count = count;
174 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
175 163
176 if (!cma->bitmap) 164 return 0;
177 goto no_mem;
178
179 ret = cma_activate_area(base_pfn, count);
180 if (ret)
181 goto error;
182
183 pr_debug("%s: returned %p\n", __func__, (void *)cma);
184 return cma;
185
186error:
187 kfree(cma->bitmap);
188no_mem:
189 kfree(cma);
190 return ERR_PTR(ret);
191} 165}
192 166
193static struct cma_reserved { 167static struct cma cma_areas[MAX_CMA_AREAS];
194 phys_addr_t start; 168static unsigned cma_area_count;
195 unsigned long size;
196 struct device *dev;
197} cma_reserved[MAX_CMA_AREAS] __initdata;
198static unsigned cma_reserved_count __initdata;
199 169
200static int __init cma_init_reserved_areas(void) 170static int __init cma_init_reserved_areas(void)
201{ 171{
202 struct cma_reserved *r = cma_reserved; 172 int i;
203 unsigned i = cma_reserved_count;
204
205 pr_debug("%s()\n", __func__);
206 173
207 for (; i; --i, ++r) { 174 for (i = 0; i < cma_area_count; i++) {
208 struct cma *cma; 175 int ret = cma_activate_area(&cma_areas[i]);
209 cma = cma_create_area(PFN_DOWN(r->start), 176 if (ret)
210 r->size >> PAGE_SHIFT); 177 return ret;
211 if (!IS_ERR(cma))
212 dev_set_cma_area(r->dev, cma);
213 } 178 }
179
214 return 0; 180 return 0;
215} 181}
216core_initcall(cma_init_reserved_areas); 182core_initcall(cma_init_reserved_areas);
217 183
218/** 184/**
219 * dma_declare_contiguous() - reserve area for contiguous memory handling 185 * dma_contiguous_reserve_area() - reserve custom contiguous area
220 * for particular device 186 * @size: Size of the reserved area (in bytes),
221 * @dev: Pointer to device structure. 187 * @base: Base address of the reserved area optional, use 0 for any
222 * @size: Size of the reserved memory.
223 * @base: Start address of the reserved memory (optional, 0 for any).
224 * @limit: End address of the reserved memory (optional, 0 for any). 188 * @limit: End address of the reserved memory (optional, 0 for any).
189 * @res_cma: Pointer to store the created cma region.
225 * 190 *
226 * This function reserves memory for specified device. It should be 191 * This function reserves memory from early allocator. It should be
227 * called by board specific code when early allocator (memblock or bootmem) 192 * called by arch specific code once the early allocator (memblock or bootmem)
228 * is still activate. 193 * has been activated and all other subsystems have already allocated/reserved
194 * memory. This function allows to create custom reserved areas for specific
195 * devices.
229 */ 196 */
230int __init dma_declare_contiguous(struct device *dev, phys_addr_t size, 197int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
231 phys_addr_t base, phys_addr_t limit) 198 phys_addr_t limit, struct cma **res_cma)
232{ 199{
233 struct cma_reserved *r = &cma_reserved[cma_reserved_count]; 200 struct cma *cma = &cma_areas[cma_area_count];
234 phys_addr_t alignment; 201 phys_addr_t alignment;
202 int ret = 0;
235 203
236 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, 204 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
237 (unsigned long)size, (unsigned long)base, 205 (unsigned long)size, (unsigned long)base,
238 (unsigned long)limit); 206 (unsigned long)limit);
239 207
240 /* Sanity checks */ 208 /* Sanity checks */
241 if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) { 209 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
242 pr_err("Not enough slots for CMA reserved regions!\n"); 210 pr_err("Not enough slots for CMA reserved regions!\n");
243 return -ENOSPC; 211 return -ENOSPC;
244 } 212 }
@@ -256,7 +224,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
256 if (base) { 224 if (base) {
257 if (memblock_is_region_reserved(base, size) || 225 if (memblock_is_region_reserved(base, size) ||
258 memblock_reserve(base, size) < 0) { 226 memblock_reserve(base, size) < 0) {
259 base = -EBUSY; 227 ret = -EBUSY;
260 goto err; 228 goto err;
261 } 229 }
262 } else { 230 } else {
@@ -266,7 +234,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
266 */ 234 */
267 phys_addr_t addr = __memblock_alloc_base(size, alignment, limit); 235 phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
268 if (!addr) { 236 if (!addr) {
269 base = -ENOMEM; 237 ret = -ENOMEM;
270 goto err; 238 goto err;
271 } else { 239 } else {
272 base = addr; 240 base = addr;
@@ -277,10 +245,11 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
277 * Each reserved area must be initialised later, when more kernel 245 * Each reserved area must be initialised later, when more kernel
278 * subsystems (like slab allocator) are available. 246 * subsystems (like slab allocator) are available.
279 */ 247 */
280 r->start = base; 248 cma->base_pfn = PFN_DOWN(base);
281 r->size = size; 249 cma->count = size >> PAGE_SHIFT;
282 r->dev = dev; 250 *res_cma = cma;
283 cma_reserved_count++; 251 cma_area_count++;
252
284 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, 253 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
285 (unsigned long)base); 254 (unsigned long)base);
286 255
@@ -289,7 +258,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
289 return 0; 258 return 0;
290err: 259err:
291 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); 260 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
292 return base; 261 return ret;
293} 262}
294 263
295/** 264/**
diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
deleted file mode 100644
index 294b1e755ab2..000000000000
--- a/include/asm-generic/dma-contiguous.h
+++ /dev/null
@@ -1,28 +0,0 @@
1#ifndef ASM_DMA_CONTIGUOUS_H
2#define ASM_DMA_CONTIGUOUS_H
3
4#ifdef __KERNEL__
5#ifdef CONFIG_CMA
6
7#include <linux/device.h>
8#include <linux/dma-contiguous.h>
9
10static inline struct cma *dev_get_cma_area(struct device *dev)
11{
12 if (dev && dev->cma_area)
13 return dev->cma_area;
14 return dma_contiguous_default_area;
15}
16
17static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
18{
19 if (dev)
20 dev->cma_area = cma;
21 if (!dev && !dma_contiguous_default_area)
22 dma_contiguous_default_area = cma;
23}
24
25#endif
26#endif
27
28#endif
diff --git a/include/linux/device.h b/include/linux/device.h
index bcf8c0d4cd98..9200cfd75f15 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -711,7 +711,7 @@ struct device {
711 711
712 struct dma_coherent_mem *dma_mem; /* internal for coherent mem 712 struct dma_coherent_mem *dma_mem; /* internal for coherent mem
713 override */ 713 override */
714#ifdef CONFIG_CMA 714#ifdef CONFIG_DMA_CMA
715 struct cma *cma_area; /* contiguous memory area for dma 715 struct cma *cma_area; /* contiguous memory area for dma
716 allocations */ 716 allocations */
717#endif 717#endif
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index 00141d3325fe..3b28f937d959 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -67,9 +67,53 @@ struct device;
67 67
68extern struct cma *dma_contiguous_default_area; 68extern struct cma *dma_contiguous_default_area;
69 69
70static inline struct cma *dev_get_cma_area(struct device *dev)
71{
72 if (dev && dev->cma_area)
73 return dev->cma_area;
74 return dma_contiguous_default_area;
75}
76
77static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
78{
79 if (dev)
80 dev->cma_area = cma;
81}
82
83static inline void dma_contiguous_set_default(struct cma *cma)
84{
85 dma_contiguous_default_area = cma;
86}
87
70void dma_contiguous_reserve(phys_addr_t addr_limit); 88void dma_contiguous_reserve(phys_addr_t addr_limit);
71int dma_declare_contiguous(struct device *dev, phys_addr_t size, 89
72 phys_addr_t base, phys_addr_t limit); 90int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
91 phys_addr_t limit, struct cma **res_cma);
92
93/**
94 * dma_declare_contiguous() - reserve area for contiguous memory handling
95 * for particular device
96 * @dev: Pointer to device structure.
97 * @size: Size of the reserved memory.
98 * @base: Start address of the reserved memory (optional, 0 for any).
99 * @limit: End address of the reserved memory (optional, 0 for any).
100 *
101 * This function reserves memory for specified device. It should be
102 * called by board specific code when early allocator (memblock or bootmem)
103 * is still activate.
104 */
105
106static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
107 phys_addr_t base, phys_addr_t limit)
108{
109 struct cma *cma;
110 int ret;
111 ret = dma_contiguous_reserve_area(size, base, limit, &cma);
112 if (ret == 0)
113 dev_set_cma_area(dev, cma);
114
115 return ret;
116}
73 117
74struct page *dma_alloc_from_contiguous(struct device *dev, int count, 118struct page *dma_alloc_from_contiguous(struct device *dev, int count,
75 unsigned int order); 119 unsigned int order);
@@ -80,8 +124,22 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
80 124
81#define MAX_CMA_AREAS (0) 125#define MAX_CMA_AREAS (0)
82 126
127static inline struct cma *dev_get_cma_area(struct device *dev)
128{
129 return NULL;
130}
131
132static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { }
133
134static inline void dma_contiguous_set_default(struct cma *cma) { }
135
83static inline void dma_contiguous_reserve(phys_addr_t limit) { } 136static inline void dma_contiguous_reserve(phys_addr_t limit) { }
84 137
138static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
139 phys_addr_t limit, struct cma **res_cma) {
140 return -ENOSYS;
141}
142
85static inline 143static inline
86int dma_declare_contiguous(struct device *dev, phys_addr_t size, 144int dma_declare_contiguous(struct device *dev, phys_addr_t size,
87 phys_addr_t base, phys_addr_t limit) 145 phys_addr_t base, phys_addr_t limit)