diff options
Diffstat (limited to 'mm/percpu.c')
-rw-r--r-- | mm/percpu.c | 36 |
1 files changed, 18 insertions, 18 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index 083e7c91e5f6..768419d44ad7 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -80,13 +80,15 @@ | |||
80 | /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ | 80 | /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ |
81 | #ifndef __addr_to_pcpu_ptr | 81 | #ifndef __addr_to_pcpu_ptr |
82 | #define __addr_to_pcpu_ptr(addr) \ | 82 | #define __addr_to_pcpu_ptr(addr) \ |
83 | (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \ | 83 | (void __percpu *)((unsigned long)(addr) - \ |
84 | + (unsigned long)__per_cpu_start) | 84 | (unsigned long)pcpu_base_addr + \ |
85 | (unsigned long)__per_cpu_start) | ||
85 | #endif | 86 | #endif |
86 | #ifndef __pcpu_ptr_to_addr | 87 | #ifndef __pcpu_ptr_to_addr |
87 | #define __pcpu_ptr_to_addr(ptr) \ | 88 | #define __pcpu_ptr_to_addr(ptr) \ |
88 | (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \ | 89 | (void __force *)((unsigned long)(ptr) + \ |
89 | - (unsigned long)__per_cpu_start) | 90 | (unsigned long)pcpu_base_addr - \ |
91 | (unsigned long)__per_cpu_start) | ||
90 | #endif | 92 | #endif |
91 | 93 | ||
92 | struct pcpu_chunk { | 94 | struct pcpu_chunk { |
@@ -913,11 +915,10 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) | |||
913 | int rs, re; | 915 | int rs, re; |
914 | 916 | ||
915 | /* quick path, check whether it's empty already */ | 917 | /* quick path, check whether it's empty already */ |
916 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { | 918 | rs = page_start; |
917 | if (rs == page_start && re == page_end) | 919 | pcpu_next_unpop(chunk, &rs, &re, page_end); |
918 | return; | 920 | if (rs == page_start && re == page_end) |
919 | break; | 921 | return; |
920 | } | ||
921 | 922 | ||
922 | /* immutable chunks can't be depopulated */ | 923 | /* immutable chunks can't be depopulated */ |
923 | WARN_ON(chunk->immutable); | 924 | WARN_ON(chunk->immutable); |
@@ -968,11 +969,10 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) | |||
968 | int rs, re, rc; | 969 | int rs, re, rc; |
969 | 970 | ||
970 | /* quick path, check whether all pages are already there */ | 971 | /* quick path, check whether all pages are already there */ |
971 | pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) { | 972 | rs = page_start; |
972 | if (rs == page_start && re == page_end) | 973 | pcpu_next_pop(chunk, &rs, &re, page_end); |
973 | goto clear; | 974 | if (rs == page_start && re == page_end) |
974 | break; | 975 | goto clear; |
975 | } | ||
976 | 976 | ||
977 | /* need to allocate and map pages, this chunk can't be immutable */ | 977 | /* need to allocate and map pages, this chunk can't be immutable */ |
978 | WARN_ON(chunk->immutable); | 978 | WARN_ON(chunk->immutable); |
@@ -1067,7 +1067,7 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void) | |||
1067 | * RETURNS: | 1067 | * RETURNS: |
1068 | * Percpu pointer to the allocated area on success, NULL on failure. | 1068 | * Percpu pointer to the allocated area on success, NULL on failure. |
1069 | */ | 1069 | */ |
1070 | static void *pcpu_alloc(size_t size, size_t align, bool reserved) | 1070 | static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved) |
1071 | { | 1071 | { |
1072 | static int warn_limit = 10; | 1072 | static int warn_limit = 10; |
1073 | struct pcpu_chunk *chunk; | 1073 | struct pcpu_chunk *chunk; |
@@ -1196,7 +1196,7 @@ fail_unlock_mutex: | |||
1196 | * RETURNS: | 1196 | * RETURNS: |
1197 | * Percpu pointer to the allocated area on success, NULL on failure. | 1197 | * Percpu pointer to the allocated area on success, NULL on failure. |
1198 | */ | 1198 | */ |
1199 | void *__alloc_percpu(size_t size, size_t align) | 1199 | void __percpu *__alloc_percpu(size_t size, size_t align) |
1200 | { | 1200 | { |
1201 | return pcpu_alloc(size, align, false); | 1201 | return pcpu_alloc(size, align, false); |
1202 | } | 1202 | } |
@@ -1217,7 +1217,7 @@ EXPORT_SYMBOL_GPL(__alloc_percpu); | |||
1217 | * RETURNS: | 1217 | * RETURNS: |
1218 | * Percpu pointer to the allocated area on success, NULL on failure. | 1218 | * Percpu pointer to the allocated area on success, NULL on failure. |
1219 | */ | 1219 | */ |
1220 | void *__alloc_reserved_percpu(size_t size, size_t align) | 1220 | void __percpu *__alloc_reserved_percpu(size_t size, size_t align) |
1221 | { | 1221 | { |
1222 | return pcpu_alloc(size, align, true); | 1222 | return pcpu_alloc(size, align, true); |
1223 | } | 1223 | } |
@@ -1269,7 +1269,7 @@ static void pcpu_reclaim(struct work_struct *work) | |||
1269 | * CONTEXT: | 1269 | * CONTEXT: |
1270 | * Can be called from atomic context. | 1270 | * Can be called from atomic context. |
1271 | */ | 1271 | */ |
1272 | void free_percpu(void *ptr) | 1272 | void free_percpu(void __percpu *ptr) |
1273 | { | 1273 | { |
1274 | void *addr; | 1274 | void *addr; |
1275 | struct pcpu_chunk *chunk; | 1275 | struct pcpu_chunk *chunk; |