diff options
author | Sasha Levin <sasha.levin@oracle.com> | 2015-04-14 18:47:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-14 19:49:04 -0400 |
commit | ac173824959adeb489f9fcf88858774c4535a241 (patch) | |
tree | 2c26317c0d3974bda7827118886cc08d71e8b356 | |
parent | 6e276d2a517fba71a2be2252fdffb237e746e784 (diff) |
mm: cma: constify and use correct signness in mm/cma.c
Constify function parameters and use correct signness where needed.
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
Acked-by: Gregory Fong <gregory.0xf0@gmail.com>
Cc: Pintu Kumar <pintu.k@samsung.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/cma.h | 12 | ||||
-rw-r--r-- | mm/cma.c | 24 |
2 files changed, 20 insertions, 16 deletions
diff --git a/include/linux/cma.h b/include/linux/cma.h index 9384ba66e975..f7ef093ec49a 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h | |||
@@ -16,16 +16,16 @@ | |||
16 | struct cma; | 16 | struct cma; |
17 | 17 | ||
18 | extern unsigned long totalcma_pages; | 18 | extern unsigned long totalcma_pages; |
19 | extern phys_addr_t cma_get_base(struct cma *cma); | 19 | extern phys_addr_t cma_get_base(const struct cma *cma); |
20 | extern unsigned long cma_get_size(struct cma *cma); | 20 | extern unsigned long cma_get_size(const struct cma *cma); |
21 | 21 | ||
22 | extern int __init cma_declare_contiguous(phys_addr_t base, | 22 | extern int __init cma_declare_contiguous(phys_addr_t base, |
23 | phys_addr_t size, phys_addr_t limit, | 23 | phys_addr_t size, phys_addr_t limit, |
24 | phys_addr_t alignment, unsigned int order_per_bit, | 24 | phys_addr_t alignment, unsigned int order_per_bit, |
25 | bool fixed, struct cma **res_cma); | 25 | bool fixed, struct cma **res_cma); |
26 | extern int cma_init_reserved_mem(phys_addr_t base, | 26 | extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, |
27 | phys_addr_t size, int order_per_bit, | 27 | unsigned int order_per_bit, |
28 | struct cma **res_cma); | 28 | struct cma **res_cma); |
29 | extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align); | 29 | extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align); |
30 | extern bool cma_release(struct cma *cma, struct page *pages, int count); | 30 | extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); |
31 | #endif | 31 | #endif |
@@ -41,17 +41,18 @@ struct cma cma_areas[MAX_CMA_AREAS]; | |||
41 | unsigned cma_area_count; | 41 | unsigned cma_area_count; |
42 | static DEFINE_MUTEX(cma_mutex); | 42 | static DEFINE_MUTEX(cma_mutex); |
43 | 43 | ||
44 | phys_addr_t cma_get_base(struct cma *cma) | 44 | phys_addr_t cma_get_base(const struct cma *cma) |
45 | { | 45 | { |
46 | return PFN_PHYS(cma->base_pfn); | 46 | return PFN_PHYS(cma->base_pfn); |
47 | } | 47 | } |
48 | 48 | ||
49 | unsigned long cma_get_size(struct cma *cma) | 49 | unsigned long cma_get_size(const struct cma *cma) |
50 | { | 50 | { |
51 | return cma->count << PAGE_SHIFT; | 51 | return cma->count << PAGE_SHIFT; |
52 | } | 52 | } |
53 | 53 | ||
54 | static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) | 54 | static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, |
55 | int align_order) | ||
55 | { | 56 | { |
56 | if (align_order <= cma->order_per_bit) | 57 | if (align_order <= cma->order_per_bit) |
57 | return 0; | 58 | return 0; |
@@ -62,7 +63,8 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) | |||
62 | * Find a PFN aligned to the specified order and return an offset represented in | 63 | * Find a PFN aligned to the specified order and return an offset represented in |
63 | * order_per_bits. | 64 | * order_per_bits. |
64 | */ | 65 | */ |
65 | static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order) | 66 | static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, |
67 | int align_order) | ||
66 | { | 68 | { |
67 | if (align_order <= cma->order_per_bit) | 69 | if (align_order <= cma->order_per_bit) |
68 | return 0; | 70 | return 0; |
@@ -71,13 +73,14 @@ static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order) | |||
71 | - cma->base_pfn) >> cma->order_per_bit; | 73 | - cma->base_pfn) >> cma->order_per_bit; |
72 | } | 74 | } |
73 | 75 | ||
74 | static unsigned long cma_bitmap_pages_to_bits(struct cma *cma, | 76 | static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, |
75 | unsigned long pages) | 77 | unsigned long pages) |
76 | { | 78 | { |
77 | return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; | 79 | return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; |
78 | } | 80 | } |
79 | 81 | ||
80 | static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count) | 82 | static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, |
83 | unsigned int count) | ||
81 | { | 84 | { |
82 | unsigned long bitmap_no, bitmap_count; | 85 | unsigned long bitmap_no, bitmap_count; |
83 | 86 | ||
@@ -162,7 +165,8 @@ core_initcall(cma_init_reserved_areas); | |||
162 | * This function creates custom contiguous area from already reserved memory. | 165 | * This function creates custom contiguous area from already reserved memory. |
163 | */ | 166 | */ |
164 | int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, | 167 | int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, |
165 | int order_per_bit, struct cma **res_cma) | 168 | unsigned int order_per_bit, |
169 | struct cma **res_cma) | ||
166 | { | 170 | { |
167 | struct cma *cma; | 171 | struct cma *cma; |
168 | phys_addr_t alignment; | 172 | phys_addr_t alignment; |
@@ -353,7 +357,7 @@ err: | |||
353 | * This function allocates part of contiguous memory on specific | 357 | * This function allocates part of contiguous memory on specific |
354 | * contiguous memory area. | 358 | * contiguous memory area. |
355 | */ | 359 | */ |
356 | struct page *cma_alloc(struct cma *cma, int count, unsigned int align) | 360 | struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align) |
357 | { | 361 | { |
358 | unsigned long mask, offset, pfn, start = 0; | 362 | unsigned long mask, offset, pfn, start = 0; |
359 | unsigned long bitmap_maxno, bitmap_no, bitmap_count; | 363 | unsigned long bitmap_maxno, bitmap_no, bitmap_count; |
@@ -424,7 +428,7 @@ struct page *cma_alloc(struct cma *cma, int count, unsigned int align) | |||
424 | * It returns false when provided pages do not belong to contiguous area and | 428 | * It returns false when provided pages do not belong to contiguous area and |
425 | * true otherwise. | 429 | * true otherwise. |
426 | */ | 430 | */ |
427 | bool cma_release(struct cma *cma, struct page *pages, int count) | 431 | bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) |
428 | { | 432 | { |
429 | unsigned long pfn; | 433 | unsigned long pfn; |
430 | 434 | ||