diff options
Diffstat (limited to 'mm/percpu.c')
-rw-r--r-- | mm/percpu.c | 40 |
1 files changed, 39 insertions, 1 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index b70f2acd8853..b14984566f5a 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -43,7 +43,7 @@ | |||
43 | * | 43 | * |
44 | * To use this allocator, arch code should do the followings. | 44 | * To use this allocator, arch code should do the followings. |
45 | * | 45 | * |
46 | * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA | 46 | * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA |
47 | * | 47 | * |
48 | * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate | 48 | * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate |
49 | * regular address to percpu pointer and back if they need to be | 49 | * regular address to percpu pointer and back if they need to be |
@@ -1275,3 +1275,41 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, | |||
1275 | reserved_size, dyn_size, | 1275 | reserved_size, dyn_size, |
1276 | pcpue_unit_size, pcpue_ptr, NULL); | 1276 | pcpue_unit_size, pcpue_ptr, NULL); |
1277 | } | 1277 | } |
1278 | |||
1279 | /* | ||
1280 | * Generic percpu area setup. | ||
1281 | * | ||
1282 | * The embedding helper is used because its behavior closely resembles | ||
1283 | * the original non-dynamic generic percpu area setup. This is | ||
1284 | * important because many archs have addressing restrictions and might | ||
1285 | * fail if the percpu area is located far away from the previous | ||
1286 | * location. As an added bonus, in non-NUMA cases, embedding is | ||
1287 | * generally a good idea TLB-wise because percpu area can piggy back | ||
1288 | * on the physical linear memory mapping which uses large page | ||
1289 | * mappings on applicable archs. | ||
1290 | */ | ||
1291 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | ||
1292 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; | ||
1293 | EXPORT_SYMBOL(__per_cpu_offset); | ||
1294 | |||
1295 | void __init setup_per_cpu_areas(void) | ||
1296 | { | ||
1297 | size_t static_size = __per_cpu_end - __per_cpu_start; | ||
1298 | ssize_t unit_size; | ||
1299 | unsigned long delta; | ||
1300 | unsigned int cpu; | ||
1301 | |||
1302 | /* | ||
1303 | * Always reserve area for module percpu variables. That's | ||
1304 | * what the legacy allocator did. | ||
1305 | */ | ||
1306 | unit_size = pcpu_embed_first_chunk(static_size, PERCPU_MODULE_RESERVE, | ||
1307 | PERCPU_DYNAMIC_RESERVE, -1); | ||
1308 | if (unit_size < 0) | ||
1309 | panic("Failed to initialized percpu areas."); | ||
1310 | |||
1311 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | ||
1312 | for_each_possible_cpu(cpu) | ||
1313 | __per_cpu_offset[cpu] = delta + cpu * unit_size; | ||
1314 | } | ||
1315 | #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ | ||