aboutsummaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c217
1 files changed, 157 insertions, 60 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 43d8cacfdaa5..5adfc268b408 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -153,7 +153,10 @@ static int pcpu_reserved_chunk_limit;
153 * 153 *
154 * During allocation, pcpu_alloc_mutex is kept locked all the time and 154 * During allocation, pcpu_alloc_mutex is kept locked all the time and
155 * pcpu_lock is grabbed and released as necessary. All actual memory 155 * pcpu_lock is grabbed and released as necessary. All actual memory
156 * allocations are done using GFP_KERNEL with pcpu_lock released. 156 * allocations are done using GFP_KERNEL with pcpu_lock released. In
157 * general, percpu memory can't be allocated with irq off but
158 * irqsave/restore are still used in alloc path so that it can be used
159 * from early init path - sched_init() specifically.
157 * 160 *
158 * Free path accesses and alters only the index data structures, so it 161 * Free path accesses and alters only the index data structures, so it
159 * can be safely called from atomic context. When memory needs to be 162 * can be safely called from atomic context. When memory needs to be
@@ -352,62 +355,86 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
352} 355}
353 356
354/** 357/**
355 * pcpu_extend_area_map - extend area map for allocation 358 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
356 * @chunk: target chunk 359 * @chunk: chunk of interest
357 * 360 *
358 * Extend area map of @chunk so that it can accomodate an allocation. 361 * Determine whether area map of @chunk needs to be extended to
359 * A single allocation can split an area into three areas, so this 362 * accomodate a new allocation.
360 * function makes sure that @chunk->map has at least two extra slots.
361 * 363 *
362 * CONTEXT: 364 * CONTEXT:
363 * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired 365 * pcpu_lock.
364 * if area map is extended.
365 * 366 *
366 * RETURNS: 367 * RETURNS:
367 * 0 if noop, 1 if successfully extended, -errno on failure. 368 * New target map allocation length if extension is necessary, 0
369 * otherwise.
368 */ 370 */
369static int pcpu_extend_area_map(struct pcpu_chunk *chunk) 371static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
370{ 372{
371 int new_alloc; 373 int new_alloc;
372 int *new;
373 size_t size;
374 374
375 /* has enough? */
376 if (chunk->map_alloc >= chunk->map_used + 2) 375 if (chunk->map_alloc >= chunk->map_used + 2)
377 return 0; 376 return 0;
378 377
379 spin_unlock_irq(&pcpu_lock);
380
381 new_alloc = PCPU_DFL_MAP_ALLOC; 378 new_alloc = PCPU_DFL_MAP_ALLOC;
382 while (new_alloc < chunk->map_used + 2) 379 while (new_alloc < chunk->map_used + 2)
383 new_alloc *= 2; 380 new_alloc *= 2;
384 381
385 new = pcpu_mem_alloc(new_alloc * sizeof(new[0])); 382 return new_alloc;
386 if (!new) { 383}
387 spin_lock_irq(&pcpu_lock); 384
385/**
386 * pcpu_extend_area_map - extend area map of a chunk
387 * @chunk: chunk of interest
388 * @new_alloc: new target allocation length of the area map
389 *
390 * Extend area map of @chunk to have @new_alloc entries.
391 *
392 * CONTEXT:
393 * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
394 *
395 * RETURNS:
396 * 0 on success, -errno on failure.
397 */
398static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
399{
400 int *old = NULL, *new = NULL;
401 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
402 unsigned long flags;
403
404 new = pcpu_mem_alloc(new_size);
405 if (!new)
388 return -ENOMEM; 406 return -ENOMEM;
389 }
390 407
391 /* 408 /* acquire pcpu_lock and switch to new area map */
392 * Acquire pcpu_lock and switch to new area map. Only free 409 spin_lock_irqsave(&pcpu_lock, flags);
393 * could have happened inbetween, so map_used couldn't have 410
394 * grown. 411 if (new_alloc <= chunk->map_alloc)
395 */ 412 goto out_unlock;
396 spin_lock_irq(&pcpu_lock);
397 BUG_ON(new_alloc < chunk->map_used + 2);
398 413
399 size = chunk->map_alloc * sizeof(chunk->map[0]); 414 old_size = chunk->map_alloc * sizeof(chunk->map[0]);
400 memcpy(new, chunk->map, size); 415 memcpy(new, chunk->map, old_size);
401 416
402 /* 417 /*
403 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is 418 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
404 * one of the first chunks and still using static map. 419 * one of the first chunks and still using static map.
405 */ 420 */
406 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) 421 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
407 pcpu_mem_free(chunk->map, size); 422 old = chunk->map;
408 423
409 chunk->map_alloc = new_alloc; 424 chunk->map_alloc = new_alloc;
410 chunk->map = new; 425 chunk->map = new;
426 new = NULL;
427
428out_unlock:
429 spin_unlock_irqrestore(&pcpu_lock, flags);
430
431 /*
432 * pcpu_mem_free() might end up calling vfree() which uses
433 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
434 */
435 pcpu_mem_free(old, old_size);
436 pcpu_mem_free(new, new_size);
437
411 return 0; 438 return 0;
412} 439}
413 440
@@ -1043,8 +1070,11 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
1043 */ 1070 */
1044static void *pcpu_alloc(size_t size, size_t align, bool reserved) 1071static void *pcpu_alloc(size_t size, size_t align, bool reserved)
1045{ 1072{
1073 static int warn_limit = 10;
1046 struct pcpu_chunk *chunk; 1074 struct pcpu_chunk *chunk;
1047 int slot, off; 1075 const char *err;
1076 int slot, off, new_alloc;
1077 unsigned long flags;
1048 1078
1049 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 1079 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
1050 WARN(true, "illegal size (%zu) or align (%zu) for " 1080 WARN(true, "illegal size (%zu) or align (%zu) for "
@@ -1053,17 +1083,31 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
1053 } 1083 }
1054 1084
1055 mutex_lock(&pcpu_alloc_mutex); 1085 mutex_lock(&pcpu_alloc_mutex);
1056 spin_lock_irq(&pcpu_lock); 1086 spin_lock_irqsave(&pcpu_lock, flags);
1057 1087
1058 /* serve reserved allocations from the reserved chunk if available */ 1088 /* serve reserved allocations from the reserved chunk if available */
1059 if (reserved && pcpu_reserved_chunk) { 1089 if (reserved && pcpu_reserved_chunk) {
1060 chunk = pcpu_reserved_chunk; 1090 chunk = pcpu_reserved_chunk;
1061 if (size > chunk->contig_hint || 1091
1062 pcpu_extend_area_map(chunk) < 0) 1092 if (size > chunk->contig_hint) {
1093 err = "alloc from reserved chunk failed";
1063 goto fail_unlock; 1094 goto fail_unlock;
1095 }
1096
1097 while ((new_alloc = pcpu_need_to_extend(chunk))) {
1098 spin_unlock_irqrestore(&pcpu_lock, flags);
1099 if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
1100 err = "failed to extend area map of reserved chunk";
1101 goto fail_unlock_mutex;
1102 }
1103 spin_lock_irqsave(&pcpu_lock, flags);
1104 }
1105
1064 off = pcpu_alloc_area(chunk, size, align); 1106 off = pcpu_alloc_area(chunk, size, align);
1065 if (off >= 0) 1107 if (off >= 0)
1066 goto area_found; 1108 goto area_found;
1109
1110 err = "alloc from reserved chunk failed";
1067 goto fail_unlock; 1111 goto fail_unlock;
1068 } 1112 }
1069 1113
@@ -1074,13 +1118,20 @@ restart:
1074 if (size > chunk->contig_hint) 1118 if (size > chunk->contig_hint)
1075 continue; 1119 continue;
1076 1120
1077 switch (pcpu_extend_area_map(chunk)) { 1121 new_alloc = pcpu_need_to_extend(chunk);
1078 case 0: 1122 if (new_alloc) {
1079 break; 1123 spin_unlock_irqrestore(&pcpu_lock, flags);
1080 case 1: 1124 if (pcpu_extend_area_map(chunk,
1081 goto restart; /* pcpu_lock dropped, restart */ 1125 new_alloc) < 0) {
1082 default: 1126 err = "failed to extend area map";
1083 goto fail_unlock; 1127 goto fail_unlock_mutex;
1128 }
1129 spin_lock_irqsave(&pcpu_lock, flags);
1130 /*
1131 * pcpu_lock has been dropped, need to
1132 * restart cpu_slot list walking.
1133 */
1134 goto restart;
1084 } 1135 }
1085 1136
1086 off = pcpu_alloc_area(chunk, size, align); 1137 off = pcpu_alloc_area(chunk, size, align);
@@ -1090,23 +1141,26 @@ restart:
1090 } 1141 }
1091 1142
1092 /* hmmm... no space left, create a new chunk */ 1143 /* hmmm... no space left, create a new chunk */
1093 spin_unlock_irq(&pcpu_lock); 1144 spin_unlock_irqrestore(&pcpu_lock, flags);
1094 1145
1095 chunk = alloc_pcpu_chunk(); 1146 chunk = alloc_pcpu_chunk();
1096 if (!chunk) 1147 if (!chunk) {
1148 err = "failed to allocate new chunk";
1097 goto fail_unlock_mutex; 1149 goto fail_unlock_mutex;
1150 }
1098 1151
1099 spin_lock_irq(&pcpu_lock); 1152 spin_lock_irqsave(&pcpu_lock, flags);
1100 pcpu_chunk_relocate(chunk, -1); 1153 pcpu_chunk_relocate(chunk, -1);
1101 goto restart; 1154 goto restart;
1102 1155
1103area_found: 1156area_found:
1104 spin_unlock_irq(&pcpu_lock); 1157 spin_unlock_irqrestore(&pcpu_lock, flags);
1105 1158
1106 /* populate, map and clear the area */ 1159 /* populate, map and clear the area */
1107 if (pcpu_populate_chunk(chunk, off, size)) { 1160 if (pcpu_populate_chunk(chunk, off, size)) {
1108 spin_lock_irq(&pcpu_lock); 1161 spin_lock_irqsave(&pcpu_lock, flags);
1109 pcpu_free_area(chunk, off); 1162 pcpu_free_area(chunk, off);
1163 err = "failed to populate";
1110 goto fail_unlock; 1164 goto fail_unlock;
1111 } 1165 }
1112 1166
@@ -1116,9 +1170,16 @@ area_found:
1116 return __addr_to_pcpu_ptr(chunk->base_addr + off); 1170 return __addr_to_pcpu_ptr(chunk->base_addr + off);
1117 1171
1118fail_unlock: 1172fail_unlock:
1119 spin_unlock_irq(&pcpu_lock); 1173 spin_unlock_irqrestore(&pcpu_lock, flags);
1120fail_unlock_mutex: 1174fail_unlock_mutex:
1121 mutex_unlock(&pcpu_alloc_mutex); 1175 mutex_unlock(&pcpu_alloc_mutex);
1176 if (warn_limit) {
1177 pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
1178 "%s\n", size, align, err);
1179 dump_stack();
1180 if (!--warn_limit)
1181 pr_info("PERCPU: limit reached, disable warning\n");
1182 }
1122 return NULL; 1183 return NULL;
1123} 1184}
1124 1185
@@ -1347,6 +1408,10 @@ struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1347 struct pcpu_alloc_info *ai; 1408 struct pcpu_alloc_info *ai;
1348 unsigned int *cpu_map; 1409 unsigned int *cpu_map;
1349 1410
1411 /* this function may be called multiple times */
1412 memset(group_map, 0, sizeof(group_map));
1413 memset(group_cnt, 0, sizeof(group_map));
1414
1350 /* 1415 /*
1351 * Determine min_unit_size, alloc_size and max_upa such that 1416 * Determine min_unit_size, alloc_size and max_upa such that
1352 * alloc_size is multiple of atom_size and is the smallest 1417 * alloc_size is multiple of atom_size and is the smallest
@@ -1574,6 +1639,7 @@ static void pcpu_dump_alloc_info(const char *lvl,
1574int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 1639int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1575 void *base_addr) 1640 void *base_addr)
1576{ 1641{
1642 static char cpus_buf[4096] __initdata;
1577 static int smap[2], dmap[2]; 1643 static int smap[2], dmap[2];
1578 size_t dyn_size = ai->dyn_size; 1644 size_t dyn_size = ai->dyn_size;
1579 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; 1645 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
@@ -1585,17 +1651,26 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1585 int *unit_map; 1651 int *unit_map;
1586 int group, unit, i; 1652 int group, unit, i;
1587 1653
1654 cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1655
1656#define PCPU_SETUP_BUG_ON(cond) do { \
1657 if (unlikely(cond)) { \
1658 pr_emerg("PERCPU: failed to initialize, %s", #cond); \
1659 pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \
1660 pcpu_dump_alloc_info(KERN_EMERG, ai); \
1661 BUG(); \
1662 } \
1663} while (0)
1664
1588 /* sanity checks */ 1665 /* sanity checks */
1589 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC || 1666 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
1590 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); 1667 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
1591 BUG_ON(ai->nr_groups <= 0); 1668 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1592 BUG_ON(!ai->static_size); 1669 PCPU_SETUP_BUG_ON(!ai->static_size);
1593 BUG_ON(!base_addr); 1670 PCPU_SETUP_BUG_ON(!base_addr);
1594 BUG_ON(ai->unit_size < size_sum); 1671 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1595 BUG_ON(ai->unit_size & ~PAGE_MASK); 1672 PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1596 BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 1673 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1597
1598 pcpu_dump_alloc_info(KERN_DEBUG, ai);
1599 1674
1600 /* process group information and build config tables accordingly */ 1675 /* process group information and build config tables accordingly */
1601 group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0])); 1676 group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
@@ -1604,7 +1679,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1604 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0])); 1679 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
1605 1680
1606 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 1681 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1607 unit_map[cpu] = NR_CPUS; 1682 unit_map[cpu] = UINT_MAX;
1608 pcpu_first_unit_cpu = NR_CPUS; 1683 pcpu_first_unit_cpu = NR_CPUS;
1609 1684
1610 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 1685 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
@@ -1618,8 +1693,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1618 if (cpu == NR_CPUS) 1693 if (cpu == NR_CPUS)
1619 continue; 1694 continue;
1620 1695
1621 BUG_ON(cpu > nr_cpu_ids || !cpu_possible(cpu)); 1696 PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1622 BUG_ON(unit_map[cpu] != NR_CPUS); 1697 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1698 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1623 1699
1624 unit_map[cpu] = unit + i; 1700 unit_map[cpu] = unit + i;
1625 unit_off[cpu] = gi->base_offset + i * ai->unit_size; 1701 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
@@ -1632,7 +1708,11 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1632 pcpu_nr_units = unit; 1708 pcpu_nr_units = unit;
1633 1709
1634 for_each_possible_cpu(cpu) 1710 for_each_possible_cpu(cpu)
1635 BUG_ON(unit_map[cpu] == NR_CPUS); 1711 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1712
1713 /* we're done parsing the input, undefine BUG macro and dump config */
1714#undef PCPU_SETUP_BUG_ON
1715 pcpu_dump_alloc_info(KERN_INFO, ai);
1636 1716
1637 pcpu_nr_groups = ai->nr_groups; 1717 pcpu_nr_groups = ai->nr_groups;
1638 pcpu_group_offsets = group_offsets; 1718 pcpu_group_offsets = group_offsets;
@@ -1782,7 +1862,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
1782 void *base = (void *)ULONG_MAX; 1862 void *base = (void *)ULONG_MAX;
1783 void **areas = NULL; 1863 void **areas = NULL;
1784 struct pcpu_alloc_info *ai; 1864 struct pcpu_alloc_info *ai;
1785 size_t size_sum, areas_size; 1865 size_t size_sum, areas_size, max_distance;
1786 int group, i, rc; 1866 int group, i, rc;
1787 1867
1788 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 1868 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
@@ -1832,8 +1912,25 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
1832 } 1912 }
1833 1913
1834 /* base address is now known, determine group base offsets */ 1914 /* base address is now known, determine group base offsets */
1835 for (group = 0; group < ai->nr_groups; group++) 1915 max_distance = 0;
1916 for (group = 0; group < ai->nr_groups; group++) {
1836 ai->groups[group].base_offset = areas[group] - base; 1917 ai->groups[group].base_offset = areas[group] - base;
1918 max_distance = max_t(size_t, max_distance,
1919 ai->groups[group].base_offset);
1920 }
1921 max_distance += ai->unit_size;
1922
1923 /* warn if maximum distance is further than 75% of vmalloc space */
1924 if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1925 pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1926 "space 0x%lx\n",
1927 max_distance, VMALLOC_END - VMALLOC_START);
1928#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1929 /* and fail if we have fallback */
1930 rc = -EINVAL;
1931 goto out_free;
1932#endif
1933 }
1837 1934
1838 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", 1935 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1839 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, 1936 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,