aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-07 00:14:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-07 00:14:42 -0400
commit33caee39925b887a99a2400dc5c980097c3573f9 (patch)
tree8e68ad97e1fee88c4a3f31453041f8d139f2027e /drivers/base
parent6456a0438b984186a0c9c8ecc9fe3d97b7ac3613 (diff)
parentf84223087402c45179be5e7060c5736c17a7b271 (diff)
Merge branch 'akpm' (patchbomb from Andrew Morton)
Merge incoming from Andrew Morton: - Various misc things. - arch/sh updates. - Part of ocfs2. Review is slow. - Slab updates. - Most of -mm. - printk updates. - lib/ updates. - checkpatch updates. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (226 commits) checkpatch: update $declaration_macros, add uninitialized_var checkpatch: warn on missing spaces in broken up quoted checkpatch: fix false positives for --strict "space after cast" test checkpatch: fix false positive MISSING_BREAK warnings with --file checkpatch: add test for native c90 types in unusual order checkpatch: add signed generic types checkpatch: add short int to c variable types checkpatch: add for_each tests to indentation and brace tests checkpatch: fix brace style misuses of else and while checkpatch: add --fix option for a couple OPEN_BRACE misuses checkpatch: use the correct indentation for which() checkpatch: add fix_insert_line and fix_delete_line helpers checkpatch: add ability to insert and delete lines to patch/file checkpatch: add an index variable for fixed lines checkpatch: warn on break after goto or return with same tab indentation checkpatch: emit a warning on file add/move/delete checkpatch: add test for commit id formatting style in commit log checkpatch: emit fewer kmalloc_array/kcalloc conversion warnings checkpatch: improve "no space after cast" test checkpatch: allow multiple const * types ...
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig10
-rw-r--r--drivers/base/dma-contiguous.c220
-rw-r--r--drivers/base/memory.c30
-rw-r--r--drivers/base/node.c2
4 files changed, 29 insertions, 233 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 88500fed3c7a..4e7f0ff83ae7 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -289,16 +289,6 @@ config CMA_ALIGNMENT
289 289
290 If unsure, leave the default value "8". 290 If unsure, leave the default value "8".
291 291
292config CMA_AREAS
293 int "Maximum count of the CMA device-private areas"
294 default 7
295 help
296 CMA allows to create CMA areas for particular devices. This parameter
297 sets the maximum number of such device private CMA areas in the
298 system.
299
300 If unsure, leave the default value "7".
301
302endif 292endif
303 293
304endmenu 294endmenu
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 6467c919c509..6606abdf880c 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -24,23 +24,9 @@
24 24
25#include <linux/memblock.h> 25#include <linux/memblock.h>
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/mm.h>
28#include <linux/mutex.h>
29#include <linux/page-isolation.h>
30#include <linux/sizes.h> 27#include <linux/sizes.h>
31#include <linux/slab.h>
32#include <linux/swap.h>
33#include <linux/mm_types.h>
34#include <linux/dma-contiguous.h> 28#include <linux/dma-contiguous.h>
35 29#include <linux/cma.h>
36struct cma {
37 unsigned long base_pfn;
38 unsigned long count;
39 unsigned long *bitmap;
40 struct mutex lock;
41};
42
43struct cma *dma_contiguous_default_area;
44 30
45#ifdef CONFIG_CMA_SIZE_MBYTES 31#ifdef CONFIG_CMA_SIZE_MBYTES
46#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES 32#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
@@ -48,6 +34,8 @@ struct cma *dma_contiguous_default_area;
48#define CMA_SIZE_MBYTES 0 34#define CMA_SIZE_MBYTES 0
49#endif 35#endif
50 36
37struct cma *dma_contiguous_default_area;
38
51/* 39/*
52 * Default global CMA area size can be defined in kernel's .config. 40 * Default global CMA area size can be defined in kernel's .config.
53 * This is useful mainly for distro maintainers to create a kernel 41 * This is useful mainly for distro maintainers to create a kernel
@@ -154,65 +142,6 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
154 } 142 }
155} 143}
156 144
157static DEFINE_MUTEX(cma_mutex);
158
159static int __init cma_activate_area(struct cma *cma)
160{
161 int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
162 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
163 unsigned i = cma->count >> pageblock_order;
164 struct zone *zone;
165
166 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
167
168 if (!cma->bitmap)
169 return -ENOMEM;
170
171 WARN_ON_ONCE(!pfn_valid(pfn));
172 zone = page_zone(pfn_to_page(pfn));
173
174 do {
175 unsigned j;
176 base_pfn = pfn;
177 for (j = pageblock_nr_pages; j; --j, pfn++) {
178 WARN_ON_ONCE(!pfn_valid(pfn));
179 /*
180 * alloc_contig_range requires the pfn range
181 * specified to be in the same zone. Make this
182 * simple by forcing the entire CMA resv range
183 * to be in the same zone.
184 */
185 if (page_zone(pfn_to_page(pfn)) != zone)
186 goto err;
187 }
188 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
189 } while (--i);
190
191 mutex_init(&cma->lock);
192 return 0;
193
194err:
195 kfree(cma->bitmap);
196 return -EINVAL;
197}
198
199static struct cma cma_areas[MAX_CMA_AREAS];
200static unsigned cma_area_count;
201
202static int __init cma_init_reserved_areas(void)
203{
204 int i;
205
206 for (i = 0; i < cma_area_count; i++) {
207 int ret = cma_activate_area(&cma_areas[i]);
208 if (ret)
209 return ret;
210 }
211
212 return 0;
213}
214core_initcall(cma_init_reserved_areas);
215
216/** 145/**
217 * dma_contiguous_reserve_area() - reserve custom contiguous area 146 * dma_contiguous_reserve_area() - reserve custom contiguous area
218 * @size: Size of the reserved area (in bytes), 147 * @size: Size of the reserved area (in bytes),
@@ -234,72 +163,17 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
234 phys_addr_t limit, struct cma **res_cma, 163 phys_addr_t limit, struct cma **res_cma,
235 bool fixed) 164 bool fixed)
236{ 165{
237 struct cma *cma = &cma_areas[cma_area_count]; 166 int ret;
238 phys_addr_t alignment;
239 int ret = 0;
240
241 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
242 (unsigned long)size, (unsigned long)base,
243 (unsigned long)limit);
244
245 /* Sanity checks */
246 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
247 pr_err("Not enough slots for CMA reserved regions!\n");
248 return -ENOSPC;
249 }
250
251 if (!size)
252 return -EINVAL;
253
254 /* Sanitise input arguments */
255 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
256 base = ALIGN(base, alignment);
257 size = ALIGN(size, alignment);
258 limit &= ~(alignment - 1);
259
260 /* Reserve memory */
261 if (base && fixed) {
262 if (memblock_is_region_reserved(base, size) ||
263 memblock_reserve(base, size) < 0) {
264 ret = -EBUSY;
265 goto err;
266 }
267 } else {
268 phys_addr_t addr = memblock_alloc_range(size, alignment, base,
269 limit);
270 if (!addr) {
271 ret = -ENOMEM;
272 goto err;
273 } else {
274 base = addr;
275 }
276 }
277
278 /*
279 * Each reserved area must be initialised later, when more kernel
280 * subsystems (like slab allocator) are available.
281 */
282 cma->base_pfn = PFN_DOWN(base);
283 cma->count = size >> PAGE_SHIFT;
284 *res_cma = cma;
285 cma_area_count++;
286 167
287 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, 168 ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma);
288 (unsigned long)base); 169 if (ret)
170 return ret;
289 171
290 /* Architecture specific contiguous memory fixup. */ 172 /* Architecture specific contiguous memory fixup. */
291 dma_contiguous_early_fixup(base, size); 173 dma_contiguous_early_fixup(cma_get_base(*res_cma),
292 return 0; 174 cma_get_size(*res_cma));
293err:
294 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
295 return ret;
296}
297 175
298static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) 176 return 0;
299{
300 mutex_lock(&cma->lock);
301 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
302 mutex_unlock(&cma->lock);
303} 177}
304 178
305/** 179/**
@@ -316,62 +190,10 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
316struct page *dma_alloc_from_contiguous(struct device *dev, int count, 190struct page *dma_alloc_from_contiguous(struct device *dev, int count,
317 unsigned int align) 191 unsigned int align)
318{ 192{
319 unsigned long mask, pfn, pageno, start = 0;
320 struct cma *cma = dev_get_cma_area(dev);
321 struct page *page = NULL;
322 int ret;
323
324 if (!cma || !cma->count)
325 return NULL;
326
327 if (align > CONFIG_CMA_ALIGNMENT) 193 if (align > CONFIG_CMA_ALIGNMENT)
328 align = CONFIG_CMA_ALIGNMENT; 194 align = CONFIG_CMA_ALIGNMENT;
329 195
330 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, 196 return cma_alloc(dev_get_cma_area(dev), count, align);
331 count, align);
332
333 if (!count)
334 return NULL;
335
336 mask = (1 << align) - 1;
337
338
339 for (;;) {
340 mutex_lock(&cma->lock);
341 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
342 start, count, mask);
343 if (pageno >= cma->count) {
344 mutex_unlock(&cma->lock);
345 break;
346 }
347 bitmap_set(cma->bitmap, pageno, count);
348 /*
349 * It's safe to drop the lock here. We've marked this region for
350 * our exclusive use. If the migration fails we will take the
351 * lock again and unmark it.
352 */
353 mutex_unlock(&cma->lock);
354
355 pfn = cma->base_pfn + pageno;
356 mutex_lock(&cma_mutex);
357 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
358 mutex_unlock(&cma_mutex);
359 if (ret == 0) {
360 page = pfn_to_page(pfn);
361 break;
362 } else if (ret != -EBUSY) {
363 clear_cma_bitmap(cma, pfn, count);
364 break;
365 }
366 clear_cma_bitmap(cma, pfn, count);
367 pr_debug("%s(): memory range at %p is busy, retrying\n",
368 __func__, pfn_to_page(pfn));
369 /* try again with a bit different memory target */
370 start = pageno + mask + 1;
371 }
372
373 pr_debug("%s(): returned %p\n", __func__, page);
374 return page;
375} 197}
376 198
377/** 199/**
@@ -387,23 +209,5 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
387bool dma_release_from_contiguous(struct device *dev, struct page *pages, 209bool dma_release_from_contiguous(struct device *dev, struct page *pages,
388 int count) 210 int count)
389{ 211{
390 struct cma *cma = dev_get_cma_area(dev); 212 return cma_release(dev_get_cma_area(dev), pages, count);
391 unsigned long pfn;
392
393 if (!cma || !pages)
394 return false;
395
396 pr_debug("%s(page %p)\n", __func__, (void *)pages);
397
398 pfn = page_to_pfn(pages);
399
400 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
401 return false;
402
403 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
404
405 free_contig_range(pfn, count);
406 clear_cma_bitmap(cma, pfn, count);
407
408 return true;
409} 213}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 89f752dd8465..a2e13e250bba 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -284,7 +284,7 @@ static int memory_subsys_online(struct device *dev)
284 * attribute and need to set the online_type. 284 * attribute and need to set the online_type.
285 */ 285 */
286 if (mem->online_type < 0) 286 if (mem->online_type < 0)
287 mem->online_type = ONLINE_KEEP; 287 mem->online_type = MMOP_ONLINE_KEEP;
288 288
289 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); 289 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
290 290
@@ -315,23 +315,23 @@ store_mem_state(struct device *dev,
315 if (ret) 315 if (ret)
316 return ret; 316 return ret;
317 317
318 if (!strncmp(buf, "online_kernel", min_t(int, count, 13))) 318 if (sysfs_streq(buf, "online_kernel"))
319 online_type = ONLINE_KERNEL; 319 online_type = MMOP_ONLINE_KERNEL;
320 else if (!strncmp(buf, "online_movable", min_t(int, count, 14))) 320 else if (sysfs_streq(buf, "online_movable"))
321 online_type = ONLINE_MOVABLE; 321 online_type = MMOP_ONLINE_MOVABLE;
322 else if (!strncmp(buf, "online", min_t(int, count, 6))) 322 else if (sysfs_streq(buf, "online"))
323 online_type = ONLINE_KEEP; 323 online_type = MMOP_ONLINE_KEEP;
324 else if (!strncmp(buf, "offline", min_t(int, count, 7))) 324 else if (sysfs_streq(buf, "offline"))
325 online_type = -1; 325 online_type = MMOP_OFFLINE;
326 else { 326 else {
327 ret = -EINVAL; 327 ret = -EINVAL;
328 goto err; 328 goto err;
329 } 329 }
330 330
331 switch (online_type) { 331 switch (online_type) {
332 case ONLINE_KERNEL: 332 case MMOP_ONLINE_KERNEL:
333 case ONLINE_MOVABLE: 333 case MMOP_ONLINE_MOVABLE:
334 case ONLINE_KEEP: 334 case MMOP_ONLINE_KEEP:
335 /* 335 /*
336 * mem->online_type is not protected so there can be a 336 * mem->online_type is not protected so there can be a
337 * race here. However, when racing online, the first 337 * race here. However, when racing online, the first
@@ -342,7 +342,7 @@ store_mem_state(struct device *dev,
342 mem->online_type = online_type; 342 mem->online_type = online_type;
343 ret = device_online(&mem->dev); 343 ret = device_online(&mem->dev);
344 break; 344 break;
345 case -1: 345 case MMOP_OFFLINE:
346 ret = device_offline(&mem->dev); 346 ret = device_offline(&mem->dev);
347 break; 347 break;
348 default: 348 default:
@@ -406,7 +406,9 @@ memory_probe_store(struct device *dev, struct device_attribute *attr,
406 int i, ret; 406 int i, ret;
407 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block; 407 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
408 408
409 phys_addr = simple_strtoull(buf, NULL, 0); 409 ret = kstrtoull(buf, 0, &phys_addr);
410 if (ret)
411 return ret;
410 412
411 if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1)) 413 if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
412 return -EINVAL; 414 return -EINVAL;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 8f7ed9933a7c..c6d3ae05f1ca 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -126,7 +126,7 @@ static ssize_t node_read_meminfo(struct device *dev,
126 nid, K(node_page_state(nid, NR_FILE_PAGES)), 126 nid, K(node_page_state(nid, NR_FILE_PAGES)),
127 nid, K(node_page_state(nid, NR_FILE_MAPPED)), 127 nid, K(node_page_state(nid, NR_FILE_MAPPED)),
128 nid, K(node_page_state(nid, NR_ANON_PAGES)), 128 nid, K(node_page_state(nid, NR_ANON_PAGES)),
129 nid, K(node_page_state(nid, NR_SHMEM)), 129 nid, K(i.sharedram),
130 nid, node_page_state(nid, NR_KERNEL_STACK) * 130 nid, node_page_state(nid, NR_KERNEL_STACK) *
131 THREAD_SIZE / 1024, 131 THREAD_SIZE / 1024,
132 nid, K(node_page_state(nid, NR_PAGETABLE)), 132 nid, K(node_page_state(nid, NR_PAGETABLE)),