aboutsummaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index b336638d20e7..768419d44ad7 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -80,13 +80,15 @@
80/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 80/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
81#ifndef __addr_to_pcpu_ptr 81#ifndef __addr_to_pcpu_ptr
82#define __addr_to_pcpu_ptr(addr) \ 82#define __addr_to_pcpu_ptr(addr) \
83 (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \ 83 (void __percpu *)((unsigned long)(addr) - \
84 + (unsigned long)__per_cpu_start) 84 (unsigned long)pcpu_base_addr + \
85 (unsigned long)__per_cpu_start)
85#endif 86#endif
86#ifndef __pcpu_ptr_to_addr 87#ifndef __pcpu_ptr_to_addr
87#define __pcpu_ptr_to_addr(ptr) \ 88#define __pcpu_ptr_to_addr(ptr) \
88 (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \ 89 (void __force *)((unsigned long)(ptr) + \
89 - (unsigned long)__per_cpu_start) 90 (unsigned long)pcpu_base_addr - \
91 (unsigned long)__per_cpu_start)
90#endif 92#endif
91 93
92struct pcpu_chunk { 94struct pcpu_chunk {
@@ -1065,7 +1067,7 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
1065 * RETURNS: 1067 * RETURNS:
1066 * Percpu pointer to the allocated area on success, NULL on failure. 1068 * Percpu pointer to the allocated area on success, NULL on failure.
1067 */ 1069 */
1068static void *pcpu_alloc(size_t size, size_t align, bool reserved) 1070static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
1069{ 1071{
1070 static int warn_limit = 10; 1072 static int warn_limit = 10;
1071 struct pcpu_chunk *chunk; 1073 struct pcpu_chunk *chunk;
@@ -1194,7 +1196,7 @@ fail_unlock_mutex:
1194 * RETURNS: 1196 * RETURNS:
1195 * Percpu pointer to the allocated area on success, NULL on failure. 1197 * Percpu pointer to the allocated area on success, NULL on failure.
1196 */ 1198 */
1197void *__alloc_percpu(size_t size, size_t align) 1199void __percpu *__alloc_percpu(size_t size, size_t align)
1198{ 1200{
1199 return pcpu_alloc(size, align, false); 1201 return pcpu_alloc(size, align, false);
1200} 1202}
@@ -1215,7 +1217,7 @@ EXPORT_SYMBOL_GPL(__alloc_percpu);
1215 * RETURNS: 1217 * RETURNS:
1216 * Percpu pointer to the allocated area on success, NULL on failure. 1218 * Percpu pointer to the allocated area on success, NULL on failure.
1217 */ 1219 */
1218void *__alloc_reserved_percpu(size_t size, size_t align) 1220void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1219{ 1221{
1220 return pcpu_alloc(size, align, true); 1222 return pcpu_alloc(size, align, true);
1221} 1223}
@@ -1267,7 +1269,7 @@ static void pcpu_reclaim(struct work_struct *work)
1267 * CONTEXT: 1269 * CONTEXT:
1268 * Can be called from atomic context. 1270 * Can be called from atomic context.
1269 */ 1271 */
1270void free_percpu(void *ptr) 1272void free_percpu(void __percpu *ptr)
1271{ 1273{
1272 void *addr; 1274 void *addr;
1273 struct pcpu_chunk *chunk; 1275 struct pcpu_chunk *chunk;