diff options
| -rw-r--r-- | include/linux/percpu.h | 13 | ||||
| -rw-r--r-- | init/main.c | 1 | ||||
| -rw-r--r-- | mm/percpu.c | 52 |
3 files changed, 54 insertions, 12 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 3ffd05e550de..b8b9084527b1 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
| @@ -45,6 +45,16 @@ | |||
| 45 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) | 45 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) |
| 46 | 46 | ||
| 47 | /* | 47 | /* |
| 48 | * Percpu allocator can serve percpu allocations before slab is | ||
| 49 | * initialized which allows slab to depend on the percpu allocator. | ||
| 50 | * The following two parameters decide how much resource to | ||
| 51 | * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or | ||
| 52 | * larger than PERCPU_DYNAMIC_EARLY_SIZE. | ||
| 53 | */ | ||
| 54 | #define PERCPU_DYNAMIC_EARLY_SLOTS 128 | ||
| 55 | #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10) | ||
| 56 | |||
| 57 | /* | ||
| 48 | * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy | 58 | * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy |
| 49 | * back on the first chunk for dynamic percpu allocation if arch is | 59 | * back on the first chunk for dynamic percpu allocation if arch is |
| 50 | * manually allocating and mapping it for faster access (as a part of | 60 | * manually allocating and mapping it for faster access (as a part of |
| @@ -135,6 +145,7 @@ extern bool is_kernel_percpu_address(unsigned long addr); | |||
| 135 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | 145 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA |
| 136 | extern void __init setup_per_cpu_areas(void); | 146 | extern void __init setup_per_cpu_areas(void); |
| 137 | #endif | 147 | #endif |
| 148 | extern void __init percpu_init_late(void); | ||
| 138 | 149 | ||
| 139 | #else /* CONFIG_SMP */ | 150 | #else /* CONFIG_SMP */ |
| 140 | 151 | ||
| @@ -148,6 +159,8 @@ static inline bool is_kernel_percpu_address(unsigned long addr) | |||
| 148 | 159 | ||
| 149 | static inline void __init setup_per_cpu_areas(void) { } | 160 | static inline void __init setup_per_cpu_areas(void) { } |
| 150 | 161 | ||
| 162 | static inline void __init percpu_init_late(void) { } | ||
| 163 | |||
| 151 | static inline void *pcpu_lpage_remapped(void *kaddr) | 164 | static inline void *pcpu_lpage_remapped(void *kaddr) |
| 152 | { | 165 | { |
| 153 | return NULL; | 166 | return NULL; |
diff --git a/init/main.c b/init/main.c index 3bdb152f412f..3ff8dd0fb512 100644 --- a/init/main.c +++ b/init/main.c | |||
| @@ -522,6 +522,7 @@ static void __init mm_init(void) | |||
| 522 | page_cgroup_init_flatmem(); | 522 | page_cgroup_init_flatmem(); |
| 523 | mem_init(); | 523 | mem_init(); |
| 524 | kmem_cache_init(); | 524 | kmem_cache_init(); |
| 525 | percpu_init_late(); | ||
| 525 | pgtable_cache_init(); | 526 | pgtable_cache_init(); |
| 526 | vmalloc_init(); | 527 | vmalloc_init(); |
| 527 | } | 528 | } |
diff --git a/mm/percpu.c b/mm/percpu.c index c3e7010c6d71..e61dc2cc5873 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
| @@ -282,6 +282,9 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, | |||
| 282 | */ | 282 | */ |
| 283 | static void *pcpu_mem_alloc(size_t size) | 283 | static void *pcpu_mem_alloc(size_t size) |
| 284 | { | 284 | { |
| 285 | if (WARN_ON_ONCE(!slab_is_available())) | ||
| 286 | return NULL; | ||
| 287 | |||
| 285 | if (size <= PAGE_SIZE) | 288 | if (size <= PAGE_SIZE) |
| 286 | return kzalloc(size, GFP_KERNEL); | 289 | return kzalloc(size, GFP_KERNEL); |
| 287 | else { | 290 | else { |
| @@ -392,13 +395,6 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) | |||
| 392 | old_size = chunk->map_alloc * sizeof(chunk->map[0]); | 395 | old_size = chunk->map_alloc * sizeof(chunk->map[0]); |
| 393 | memcpy(new, chunk->map, old_size); | 396 | memcpy(new, chunk->map, old_size); |
| 394 | 397 | ||
| 395 | /* | ||
| 396 | * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is | ||
| 397 | * one of the first chunks and still using static map. | ||
| 398 | */ | ||
| 399 | if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) | ||
| 400 | old = chunk->map; | ||
| 401 | |||
| 402 | chunk->map_alloc = new_alloc; | 398 | chunk->map_alloc = new_alloc; |
| 403 | chunk->map = new; | 399 | chunk->map = new; |
| 404 | new = NULL; | 400 | new = NULL; |
| @@ -604,7 +600,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void) | |||
| 604 | { | 600 | { |
| 605 | struct pcpu_chunk *chunk; | 601 | struct pcpu_chunk *chunk; |
| 606 | 602 | ||
| 607 | chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL); | 603 | chunk = pcpu_mem_alloc(pcpu_chunk_struct_size); |
| 608 | if (!chunk) | 604 | if (!chunk) |
| 609 | return NULL; | 605 | return NULL; |
| 610 | 606 | ||
| @@ -1109,7 +1105,9 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info( | |||
| 1109 | memset(group_map, 0, sizeof(group_map)); | 1105 | memset(group_map, 0, sizeof(group_map)); |
| 1110 | memset(group_cnt, 0, sizeof(group_cnt)); | 1106 | memset(group_cnt, 0, sizeof(group_cnt)); |
| 1111 | 1107 | ||
| 1112 | size_sum = PFN_ALIGN(static_size + reserved_size + dyn_size); | 1108 | /* calculate size_sum and ensure dyn_size is enough for early alloc */ |
| 1109 | size_sum = PFN_ALIGN(static_size + reserved_size + | ||
| 1110 | max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); | ||
| 1113 | dyn_size = size_sum - static_size - reserved_size; | 1111 | dyn_size = size_sum - static_size - reserved_size; |
| 1114 | 1112 | ||
| 1115 | /* | 1113 | /* |
| @@ -1338,7 +1336,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |||
| 1338 | void *base_addr) | 1336 | void *base_addr) |
| 1339 | { | 1337 | { |
| 1340 | static char cpus_buf[4096] __initdata; | 1338 | static char cpus_buf[4096] __initdata; |
| 1341 | static int smap[2], dmap[2]; | 1339 | static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; |
| 1340 | static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; | ||
| 1342 | size_t dyn_size = ai->dyn_size; | 1341 | size_t dyn_size = ai->dyn_size; |
| 1343 | size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; | 1342 | size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; |
| 1344 | struct pcpu_chunk *schunk, *dchunk = NULL; | 1343 | struct pcpu_chunk *schunk, *dchunk = NULL; |
| @@ -1361,14 +1360,13 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |||
| 1361 | } while (0) | 1360 | } while (0) |
| 1362 | 1361 | ||
| 1363 | /* sanity checks */ | 1362 | /* sanity checks */ |
| 1364 | BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC || | ||
| 1365 | ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); | ||
| 1366 | PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); | 1363 | PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); |
| 1367 | PCPU_SETUP_BUG_ON(!ai->static_size); | 1364 | PCPU_SETUP_BUG_ON(!ai->static_size); |
| 1368 | PCPU_SETUP_BUG_ON(!base_addr); | 1365 | PCPU_SETUP_BUG_ON(!base_addr); |
| 1369 | PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); | 1366 | PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); |
| 1370 | PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); | 1367 | PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); |
| 1371 | PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); | 1368 | PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); |
| 1369 | PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); | ||
| 1372 | PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); | 1370 | PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); |
| 1373 | 1371 | ||
| 1374 | /* process group information and build config tables accordingly */ | 1372 | /* process group information and build config tables accordingly */ |
| @@ -1806,3 +1804,33 @@ void __init setup_per_cpu_areas(void) | |||
| 1806 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; | 1804 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; |
| 1807 | } | 1805 | } |
| 1808 | #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ | 1806 | #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ |
| 1807 | |||
| 1808 | /* | ||
| 1809 | * First and reserved chunks are initialized with temporary allocation | ||
| 1810 | * map in initdata so that they can be used before slab is online. | ||
| 1811 | * This function is called after slab is brought up and replaces those | ||
| 1812 | * with properly allocated maps. | ||
| 1813 | */ | ||
| 1814 | void __init percpu_init_late(void) | ||
| 1815 | { | ||
| 1816 | struct pcpu_chunk *target_chunks[] = | ||
| 1817 | { pcpu_first_chunk, pcpu_reserved_chunk, NULL }; | ||
| 1818 | struct pcpu_chunk *chunk; | ||
| 1819 | unsigned long flags; | ||
| 1820 | int i; | ||
| 1821 | |||
| 1822 | for (i = 0; (chunk = target_chunks[i]); i++) { | ||
| 1823 | int *map; | ||
| 1824 | const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]); | ||
| 1825 | |||
| 1826 | BUILD_BUG_ON(size > PAGE_SIZE); | ||
| 1827 | |||
| 1828 | map = pcpu_mem_alloc(size); | ||
| 1829 | BUG_ON(!map); | ||
| 1830 | |||
| 1831 | spin_lock_irqsave(&pcpu_lock, flags); | ||
| 1832 | memcpy(map, chunk->map, size); | ||
| 1833 | chunk->map = map; | ||
| 1834 | spin_unlock_irqrestore(&pcpu_lock, flags); | ||
| 1835 | } | ||
| 1836 | } | ||
