aboutsummaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c52
1 files changed, 40 insertions, 12 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index c3e7010c6d71..e61dc2cc5873 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -282,6 +282,9 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
282 */ 282 */
283static void *pcpu_mem_alloc(size_t size) 283static void *pcpu_mem_alloc(size_t size)
284{ 284{
285 if (WARN_ON_ONCE(!slab_is_available()))
286 return NULL;
287
285 if (size <= PAGE_SIZE) 288 if (size <= PAGE_SIZE)
286 return kzalloc(size, GFP_KERNEL); 289 return kzalloc(size, GFP_KERNEL);
287 else { 290 else {
@@ -392,13 +395,6 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
392 old_size = chunk->map_alloc * sizeof(chunk->map[0]); 395 old_size = chunk->map_alloc * sizeof(chunk->map[0]);
393 memcpy(new, chunk->map, old_size); 396 memcpy(new, chunk->map, old_size);
394 397
395 /*
396 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
397 * one of the first chunks and still using static map.
398 */
399 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
400 old = chunk->map;
401
402 chunk->map_alloc = new_alloc; 398 chunk->map_alloc = new_alloc;
403 chunk->map = new; 399 chunk->map = new;
404 new = NULL; 400 new = NULL;
@@ -604,7 +600,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
604{ 600{
605 struct pcpu_chunk *chunk; 601 struct pcpu_chunk *chunk;
606 602
607 chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL); 603 chunk = pcpu_mem_alloc(pcpu_chunk_struct_size);
608 if (!chunk) 604 if (!chunk)
609 return NULL; 605 return NULL;
610 606
@@ -1109,7 +1105,9 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1109 memset(group_map, 0, sizeof(group_map)); 1105 memset(group_map, 0, sizeof(group_map));
1110 memset(group_cnt, 0, sizeof(group_cnt)); 1106 memset(group_cnt, 0, sizeof(group_cnt));
1111 1107
1112 size_sum = PFN_ALIGN(static_size + reserved_size + dyn_size); 1108 /* calculate size_sum and ensure dyn_size is enough for early alloc */
1109 size_sum = PFN_ALIGN(static_size + reserved_size +
1110 max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1113 dyn_size = size_sum - static_size - reserved_size; 1111 dyn_size = size_sum - static_size - reserved_size;
1114 1112
1115 /* 1113 /*
@@ -1338,7 +1336,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1338 void *base_addr) 1336 void *base_addr)
1339{ 1337{
1340 static char cpus_buf[4096] __initdata; 1338 static char cpus_buf[4096] __initdata;
1341 static int smap[2], dmap[2]; 1339 static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1340 static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1342 size_t dyn_size = ai->dyn_size; 1341 size_t dyn_size = ai->dyn_size;
1343 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; 1342 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1344 struct pcpu_chunk *schunk, *dchunk = NULL; 1343 struct pcpu_chunk *schunk, *dchunk = NULL;
@@ -1361,14 +1360,13 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1361} while (0) 1360} while (0)
1362 1361
1363 /* sanity checks */ 1362 /* sanity checks */
1364 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
1365 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
1366 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); 1363 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1367 PCPU_SETUP_BUG_ON(!ai->static_size); 1364 PCPU_SETUP_BUG_ON(!ai->static_size);
1368 PCPU_SETUP_BUG_ON(!base_addr); 1365 PCPU_SETUP_BUG_ON(!base_addr);
1369 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); 1366 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1370 PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); 1367 PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1371 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 1368 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1369 PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1372 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); 1370 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1373 1371
1374 /* process group information and build config tables accordingly */ 1372 /* process group information and build config tables accordingly */
@@ -1806,3 +1804,33 @@ void __init setup_per_cpu_areas(void)
1806 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 1804 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1807} 1805}
1808#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 1806#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
1807
1808/*
1809 * First and reserved chunks are initialized with temporary allocation
1810 * map in initdata so that they can be used before slab is online.
1811 * This function is called after slab is brought up and replaces those
1812 * with properly allocated maps.
1813 */
1814void __init percpu_init_late(void)
1815{
1816 struct pcpu_chunk *target_chunks[] =
1817 { pcpu_first_chunk, pcpu_reserved_chunk, NULL };
1818 struct pcpu_chunk *chunk;
1819 unsigned long flags;
1820 int i;
1821
1822 for (i = 0; (chunk = target_chunks[i]); i++) {
1823 int *map;
1824 const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
1825
1826 BUILD_BUG_ON(size > PAGE_SIZE);
1827
1828 map = pcpu_mem_alloc(size);
1829 BUG_ON(!map);
1830
1831 spin_lock_irqsave(&pcpu_lock, flags);
1832 memcpy(map, chunk->map, size);
1833 chunk->map = map;
1834 spin_unlock_irqrestore(&pcpu_lock, flags);
1835 }
1836}