diff options
Diffstat (limited to 'mm/percpu.c')
-rw-r--r-- | mm/percpu.c | 90 |
1 files changed, 69 insertions, 21 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index 5adfc268b408..6e09741ddc62 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -46,8 +46,6 @@ | |||
46 | * | 46 | * |
47 | * To use this allocator, arch code should do the followings. | 47 | * To use this allocator, arch code should do the followings. |
48 | * | 48 | * |
49 | * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA | ||
50 | * | ||
51 | * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate | 49 | * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate |
52 | * regular address to percpu pointer and back if they need to be | 50 | * regular address to percpu pointer and back if they need to be |
53 | * different from the default | 51 | * different from the default |
@@ -74,6 +72,7 @@ | |||
74 | #include <asm/cacheflush.h> | 72 | #include <asm/cacheflush.h> |
75 | #include <asm/sections.h> | 73 | #include <asm/sections.h> |
76 | #include <asm/tlbflush.h> | 74 | #include <asm/tlbflush.h> |
75 | #include <asm/io.h> | ||
77 | 76 | ||
78 | #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ | 77 | #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ |
79 | #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ | 78 | #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ |
@@ -81,13 +80,15 @@ | |||
81 | /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ | 80 | /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ |
82 | #ifndef __addr_to_pcpu_ptr | 81 | #ifndef __addr_to_pcpu_ptr |
83 | #define __addr_to_pcpu_ptr(addr) \ | 82 | #define __addr_to_pcpu_ptr(addr) \ |
84 | (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \ | 83 | (void __percpu *)((unsigned long)(addr) - \ |
85 | + (unsigned long)__per_cpu_start) | 84 | (unsigned long)pcpu_base_addr + \ |
85 | (unsigned long)__per_cpu_start) | ||
86 | #endif | 86 | #endif |
87 | #ifndef __pcpu_ptr_to_addr | 87 | #ifndef __pcpu_ptr_to_addr |
88 | #define __pcpu_ptr_to_addr(ptr) \ | 88 | #define __pcpu_ptr_to_addr(ptr) \ |
89 | (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \ | 89 | (void __force *)((unsigned long)(ptr) + \ |
90 | - (unsigned long)__per_cpu_start) | 90 | (unsigned long)pcpu_base_addr - \ |
91 | (unsigned long)__per_cpu_start) | ||
91 | #endif | 92 | #endif |
92 | 93 | ||
93 | struct pcpu_chunk { | 94 | struct pcpu_chunk { |
@@ -914,11 +915,10 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) | |||
914 | int rs, re; | 915 | int rs, re; |
915 | 916 | ||
916 | /* quick path, check whether it's empty already */ | 917 | /* quick path, check whether it's empty already */ |
917 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { | 918 | rs = page_start; |
918 | if (rs == page_start && re == page_end) | 919 | pcpu_next_unpop(chunk, &rs, &re, page_end); |
919 | return; | 920 | if (rs == page_start && re == page_end) |
920 | break; | 921 | return; |
921 | } | ||
922 | 922 | ||
923 | /* immutable chunks can't be depopulated */ | 923 | /* immutable chunks can't be depopulated */ |
924 | WARN_ON(chunk->immutable); | 924 | WARN_ON(chunk->immutable); |
@@ -969,11 +969,10 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) | |||
969 | int rs, re, rc; | 969 | int rs, re, rc; |
970 | 970 | ||
971 | /* quick path, check whether all pages are already there */ | 971 | /* quick path, check whether all pages are already there */ |
972 | pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) { | 972 | rs = page_start; |
973 | if (rs == page_start && re == page_end) | 973 | pcpu_next_pop(chunk, &rs, &re, page_end); |
974 | goto clear; | 974 | if (rs == page_start && re == page_end) |
975 | break; | 975 | goto clear; |
976 | } | ||
977 | 976 | ||
978 | /* need to allocate and map pages, this chunk can't be immutable */ | 977 | /* need to allocate and map pages, this chunk can't be immutable */ |
979 | WARN_ON(chunk->immutable); | 978 | WARN_ON(chunk->immutable); |
@@ -1068,7 +1067,7 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void) | |||
1068 | * RETURNS: | 1067 | * RETURNS: |
1069 | * Percpu pointer to the allocated area on success, NULL on failure. | 1068 | * Percpu pointer to the allocated area on success, NULL on failure. |
1070 | */ | 1069 | */ |
1071 | static void *pcpu_alloc(size_t size, size_t align, bool reserved) | 1070 | static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved) |
1072 | { | 1071 | { |
1073 | static int warn_limit = 10; | 1072 | static int warn_limit = 10; |
1074 | struct pcpu_chunk *chunk; | 1073 | struct pcpu_chunk *chunk; |
@@ -1197,7 +1196,7 @@ fail_unlock_mutex: | |||
1197 | * RETURNS: | 1196 | * RETURNS: |
1198 | * Percpu pointer to the allocated area on success, NULL on failure. | 1197 | * Percpu pointer to the allocated area on success, NULL on failure. |
1199 | */ | 1198 | */ |
1200 | void *__alloc_percpu(size_t size, size_t align) | 1199 | void __percpu *__alloc_percpu(size_t size, size_t align) |
1201 | { | 1200 | { |
1202 | return pcpu_alloc(size, align, false); | 1201 | return pcpu_alloc(size, align, false); |
1203 | } | 1202 | } |
@@ -1218,7 +1217,7 @@ EXPORT_SYMBOL_GPL(__alloc_percpu); | |||
1218 | * RETURNS: | 1217 | * RETURNS: |
1219 | * Percpu pointer to the allocated area on success, NULL on failure. | 1218 | * Percpu pointer to the allocated area on success, NULL on failure. |
1220 | */ | 1219 | */ |
1221 | void *__alloc_reserved_percpu(size_t size, size_t align) | 1220 | void __percpu *__alloc_reserved_percpu(size_t size, size_t align) |
1222 | { | 1221 | { |
1223 | return pcpu_alloc(size, align, true); | 1222 | return pcpu_alloc(size, align, true); |
1224 | } | 1223 | } |
@@ -1270,9 +1269,9 @@ static void pcpu_reclaim(struct work_struct *work) | |||
1270 | * CONTEXT: | 1269 | * CONTEXT: |
1271 | * Can be called from atomic context. | 1270 | * Can be called from atomic context. |
1272 | */ | 1271 | */ |
1273 | void free_percpu(void *ptr) | 1272 | void free_percpu(void __percpu *ptr) |
1274 | { | 1273 | { |
1275 | void *addr = __pcpu_ptr_to_addr(ptr); | 1274 | void *addr; |
1276 | struct pcpu_chunk *chunk; | 1275 | struct pcpu_chunk *chunk; |
1277 | unsigned long flags; | 1276 | unsigned long flags; |
1278 | int off; | 1277 | int off; |
@@ -1280,6 +1279,8 @@ void free_percpu(void *ptr) | |||
1280 | if (!ptr) | 1279 | if (!ptr) |
1281 | return; | 1280 | return; |
1282 | 1281 | ||
1282 | addr = __pcpu_ptr_to_addr(ptr); | ||
1283 | |||
1283 | spin_lock_irqsave(&pcpu_lock, flags); | 1284 | spin_lock_irqsave(&pcpu_lock, flags); |
1284 | 1285 | ||
1285 | chunk = pcpu_chunk_addr_search(addr); | 1286 | chunk = pcpu_chunk_addr_search(addr); |
@@ -1302,6 +1303,53 @@ void free_percpu(void *ptr) | |||
1302 | } | 1303 | } |
1303 | EXPORT_SYMBOL_GPL(free_percpu); | 1304 | EXPORT_SYMBOL_GPL(free_percpu); |
1304 | 1305 | ||
1306 | /** | ||
1307 | * is_kernel_percpu_address - test whether address is from static percpu area | ||
1308 | * @addr: address to test | ||
1309 | * | ||
1310 | * Test whether @addr belongs to in-kernel static percpu area. Module | ||
1311 | * static percpu areas are not considered. For those, use | ||
1312 | * is_module_percpu_address(). | ||
1313 | * | ||
1314 | * RETURNS: | ||
1315 | * %true if @addr is from in-kernel static percpu area, %false otherwise. | ||
1316 | */ | ||
1317 | bool is_kernel_percpu_address(unsigned long addr) | ||
1318 | { | ||
1319 | const size_t static_size = __per_cpu_end - __per_cpu_start; | ||
1320 | void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); | ||
1321 | unsigned int cpu; | ||
1322 | |||
1323 | for_each_possible_cpu(cpu) { | ||
1324 | void *start = per_cpu_ptr(base, cpu); | ||
1325 | |||
1326 | if ((void *)addr >= start && (void *)addr < start + static_size) | ||
1327 | return true; | ||
1328 | } | ||
1329 | return false; | ||
1330 | } | ||
1331 | |||
1332 | /** | ||
1333 | * per_cpu_ptr_to_phys - convert translated percpu address to physical address | ||
1334 | * @addr: the address to be converted to physical address | ||
1335 | * | ||
1336 | * Given @addr which is dereferenceable address obtained via one of | ||
1337 | * percpu access macros, this function translates it into its physical | ||
1338 | * address. The caller is responsible for ensuring @addr stays valid | ||
1339 | * until this function finishes. | ||
1340 | * | ||
1341 | * RETURNS: | ||
1342 | * The physical address for @addr. | ||
1343 | */ | ||
1344 | phys_addr_t per_cpu_ptr_to_phys(void *addr) | ||
1345 | { | ||
1346 | if ((unsigned long)addr < VMALLOC_START || | ||
1347 | (unsigned long)addr >= VMALLOC_END) | ||
1348 | return __pa(addr); | ||
1349 | else | ||
1350 | return page_to_phys(vmalloc_to_page(addr)); | ||
1351 | } | ||
1352 | |||
1305 | static inline size_t pcpu_calc_fc_sizes(size_t static_size, | 1353 | static inline size_t pcpu_calc_fc_sizes(size_t static_size, |
1306 | size_t reserved_size, | 1354 | size_t reserved_size, |
1307 | ssize_t *dyn_sizep) | 1355 | ssize_t *dyn_sizep) |