diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-13 16:00:36 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-13 16:00:36 -0500 |
commit | 78a45c6f067824cf5d0a9fedea7339ac2e28603c (patch) | |
tree | b4f78c8b6b9059ddace0a18c11629b8d2045f793 /mm/cma.c | |
parent | f96fe225677b3efb74346ebd56fafe3997b02afa (diff) | |
parent | 29d293b6007b91a4463f05bc8d0b26e0e65c5816 (diff) |
Merge branch 'akpm' (second patch-bomb from Andrew)
Merge second patchbomb from Andrew Morton:
- the rest of MM
- misc fs fixes
- add execveat() syscall
- new ratelimit feature for fault-injection
- decompressor updates
- ipc/ updates
- fallocate feature creep
- fsnotify cleanups
- a few other misc things
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (99 commits)
cgroups: Documentation: fix trivial typos and wrong paragraph numberings
parisc: percpu: update comments referring to __get_cpu_var
percpu: update local_ops.txt to reflect this_cpu operations
percpu: remove __get_cpu_var and __raw_get_cpu_var macros
fsnotify: remove destroy_list from fsnotify_mark
fsnotify: unify inode and mount marks handling
fallocate: create FAN_MODIFY and IN_MODIFY events
mm/cma: make kmemleak ignore CMA regions
slub: fix cpuset check in get_any_partial
slab: fix cpuset check in fallback_alloc
shmdt: use i_size_read() instead of ->i_size
ipc/shm.c: fix overly aggressive shmdt() when calls span multiple segments
ipc/msg: increase MSGMNI, remove scaling
ipc/sem.c: increase SEMMSL, SEMMNI, SEMOPM
ipc/sem.c: change memory barrier in sem_lock() to smp_rmb()
lib/decompress.c: consistency of compress formats for kernel image
decompress_bunzip2: off by one in get_next_block()
usr/Kconfig: make initrd compression algorithm selection not expert
fault-inject: add ratelimit option
ratelimit: add initialization macro
...
Diffstat (limited to 'mm/cma.c')
-rw-r--r-- | mm/cma.c | 25 |
1 files changed, 22 insertions, 3 deletions
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/log2.h> | 33 | #include <linux/log2.h> |
34 | #include <linux/cma.h> | 34 | #include <linux/cma.h> |
35 | #include <linux/highmem.h> | 35 | #include <linux/highmem.h> |
36 | #include <linux/io.h> | ||
36 | 37 | ||
37 | struct cma { | 38 | struct cma { |
38 | unsigned long base_pfn; | 39 | unsigned long base_pfn; |
@@ -63,6 +64,17 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) | |||
63 | return (1UL << (align_order - cma->order_per_bit)) - 1; | 64 | return (1UL << (align_order - cma->order_per_bit)) - 1; |
64 | } | 65 | } |
65 | 66 | ||
67 | static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order) | ||
68 | { | ||
69 | unsigned int alignment; | ||
70 | |||
71 | if (align_order <= cma->order_per_bit) | ||
72 | return 0; | ||
73 | alignment = 1UL << (align_order - cma->order_per_bit); | ||
74 | return ALIGN(cma->base_pfn, alignment) - | ||
75 | (cma->base_pfn >> cma->order_per_bit); | ||
76 | } | ||
77 | |||
66 | static unsigned long cma_bitmap_maxno(struct cma *cma) | 78 | static unsigned long cma_bitmap_maxno(struct cma *cma) |
67 | { | 79 | { |
68 | return cma->count >> cma->order_per_bit; | 80 | return cma->count >> cma->order_per_bit; |
@@ -313,6 +325,11 @@ int __init cma_declare_contiguous(phys_addr_t base, | |||
313 | } | 325 | } |
314 | } | 326 | } |
315 | 327 | ||
328 | /* | ||
329 | * kmemleak scans/reads tracked objects for pointers to other | ||
330 | * objects but this address isn't mapped and accessible | ||
331 | */ | ||
332 | kmemleak_ignore(phys_to_virt(addr)); | ||
316 | base = addr; | 333 | base = addr; |
317 | } | 334 | } |
318 | 335 | ||
@@ -340,7 +357,7 @@ err: | |||
340 | */ | 357 | */ |
341 | struct page *cma_alloc(struct cma *cma, int count, unsigned int align) | 358 | struct page *cma_alloc(struct cma *cma, int count, unsigned int align) |
342 | { | 359 | { |
343 | unsigned long mask, pfn, start = 0; | 360 | unsigned long mask, offset, pfn, start = 0; |
344 | unsigned long bitmap_maxno, bitmap_no, bitmap_count; | 361 | unsigned long bitmap_maxno, bitmap_no, bitmap_count; |
345 | struct page *page = NULL; | 362 | struct page *page = NULL; |
346 | int ret; | 363 | int ret; |
@@ -355,13 +372,15 @@ struct page *cma_alloc(struct cma *cma, int count, unsigned int align) | |||
355 | return NULL; | 372 | return NULL; |
356 | 373 | ||
357 | mask = cma_bitmap_aligned_mask(cma, align); | 374 | mask = cma_bitmap_aligned_mask(cma, align); |
375 | offset = cma_bitmap_aligned_offset(cma, align); | ||
358 | bitmap_maxno = cma_bitmap_maxno(cma); | 376 | bitmap_maxno = cma_bitmap_maxno(cma); |
359 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); | 377 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); |
360 | 378 | ||
361 | for (;;) { | 379 | for (;;) { |
362 | mutex_lock(&cma->lock); | 380 | mutex_lock(&cma->lock); |
363 | bitmap_no = bitmap_find_next_zero_area(cma->bitmap, | 381 | bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, |
364 | bitmap_maxno, start, bitmap_count, mask); | 382 | bitmap_maxno, start, bitmap_count, mask, |
383 | offset); | ||
365 | if (bitmap_no >= bitmap_maxno) { | 384 | if (bitmap_no >= bitmap_maxno) { |
366 | mutex_unlock(&cma->lock); | 385 | mutex_unlock(&cma->lock); |
367 | break; | 386 | break; |