aboutsummaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c83
1 files changed, 67 insertions, 16 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 43d8cacfdaa5..4a048abad043 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1043,7 +1043,9 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
1043 */ 1043 */
1044static void *pcpu_alloc(size_t size, size_t align, bool reserved) 1044static void *pcpu_alloc(size_t size, size_t align, bool reserved)
1045{ 1045{
1046 static int warn_limit = 10;
1046 struct pcpu_chunk *chunk; 1047 struct pcpu_chunk *chunk;
1048 const char *err;
1047 int slot, off; 1049 int slot, off;
1048 1050
1049 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 1051 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
@@ -1059,11 +1061,14 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
1059 if (reserved && pcpu_reserved_chunk) { 1061 if (reserved && pcpu_reserved_chunk) {
1060 chunk = pcpu_reserved_chunk; 1062 chunk = pcpu_reserved_chunk;
1061 if (size > chunk->contig_hint || 1063 if (size > chunk->contig_hint ||
1062 pcpu_extend_area_map(chunk) < 0) 1064 pcpu_extend_area_map(chunk) < 0) {
1065 err = "failed to extend area map of reserved chunk";
1063 goto fail_unlock; 1066 goto fail_unlock;
1067 }
1064 off = pcpu_alloc_area(chunk, size, align); 1068 off = pcpu_alloc_area(chunk, size, align);
1065 if (off >= 0) 1069 if (off >= 0)
1066 goto area_found; 1070 goto area_found;
1071 err = "alloc from reserved chunk failed";
1067 goto fail_unlock; 1072 goto fail_unlock;
1068 } 1073 }
1069 1074
@@ -1080,6 +1085,7 @@ restart:
1080 case 1: 1085 case 1:
1081 goto restart; /* pcpu_lock dropped, restart */ 1086 goto restart; /* pcpu_lock dropped, restart */
1082 default: 1087 default:
1088 err = "failed to extend area map";
1083 goto fail_unlock; 1089 goto fail_unlock;
1084 } 1090 }
1085 1091
@@ -1093,8 +1099,10 @@ restart:
1093 spin_unlock_irq(&pcpu_lock); 1099 spin_unlock_irq(&pcpu_lock);
1094 1100
1095 chunk = alloc_pcpu_chunk(); 1101 chunk = alloc_pcpu_chunk();
1096 if (!chunk) 1102 if (!chunk) {
1103 err = "failed to allocate new chunk";
1097 goto fail_unlock_mutex; 1104 goto fail_unlock_mutex;
1105 }
1098 1106
1099 spin_lock_irq(&pcpu_lock); 1107 spin_lock_irq(&pcpu_lock);
1100 pcpu_chunk_relocate(chunk, -1); 1108 pcpu_chunk_relocate(chunk, -1);
@@ -1107,6 +1115,7 @@ area_found:
1107 if (pcpu_populate_chunk(chunk, off, size)) { 1115 if (pcpu_populate_chunk(chunk, off, size)) {
1108 spin_lock_irq(&pcpu_lock); 1116 spin_lock_irq(&pcpu_lock);
1109 pcpu_free_area(chunk, off); 1117 pcpu_free_area(chunk, off);
1118 err = "failed to populate";
1110 goto fail_unlock; 1119 goto fail_unlock;
1111 } 1120 }
1112 1121
@@ -1119,6 +1128,13 @@ fail_unlock:
1119 spin_unlock_irq(&pcpu_lock); 1128 spin_unlock_irq(&pcpu_lock);
1120fail_unlock_mutex: 1129fail_unlock_mutex:
1121 mutex_unlock(&pcpu_alloc_mutex); 1130 mutex_unlock(&pcpu_alloc_mutex);
1131 if (warn_limit) {
1132 pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
1133 "%s\n", size, align, err);
1134 dump_stack();
1135 if (!--warn_limit)
1136 pr_info("PERCPU: limit reached, disable warning\n");
1137 }
1122 return NULL; 1138 return NULL;
1123} 1139}
1124 1140
@@ -1347,6 +1363,10 @@ struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1347 struct pcpu_alloc_info *ai; 1363 struct pcpu_alloc_info *ai;
1348 unsigned int *cpu_map; 1364 unsigned int *cpu_map;
1349 1365
1366 /* this function may be called multiple times */
1367 memset(group_map, 0, sizeof(group_map));
1368 memset(group_cnt, 0, sizeof(group_map));
1369
1350 /* 1370 /*
1351 * Determine min_unit_size, alloc_size and max_upa such that 1371 * Determine min_unit_size, alloc_size and max_upa such that
1352 * alloc_size is multiple of atom_size and is the smallest 1372 * alloc_size is multiple of atom_size and is the smallest
@@ -1574,6 +1594,7 @@ static void pcpu_dump_alloc_info(const char *lvl,
1574int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 1594int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1575 void *base_addr) 1595 void *base_addr)
1576{ 1596{
1597 static char cpus_buf[4096] __initdata;
1577 static int smap[2], dmap[2]; 1598 static int smap[2], dmap[2];
1578 size_t dyn_size = ai->dyn_size; 1599 size_t dyn_size = ai->dyn_size;
1579 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; 1600 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
@@ -1585,17 +1606,26 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1585 int *unit_map; 1606 int *unit_map;
1586 int group, unit, i; 1607 int group, unit, i;
1587 1608
1609 cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1610
1611#define PCPU_SETUP_BUG_ON(cond) do { \
1612 if (unlikely(cond)) { \
1613 pr_emerg("PERCPU: failed to initialize, %s", #cond); \
1614 pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \
1615 pcpu_dump_alloc_info(KERN_EMERG, ai); \
1616 BUG(); \
1617 } \
1618} while (0)
1619
1588 /* sanity checks */ 1620 /* sanity checks */
1589 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC || 1621 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
1590 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); 1622 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
1591 BUG_ON(ai->nr_groups <= 0); 1623 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1592 BUG_ON(!ai->static_size); 1624 PCPU_SETUP_BUG_ON(!ai->static_size);
1593 BUG_ON(!base_addr); 1625 PCPU_SETUP_BUG_ON(!base_addr);
1594 BUG_ON(ai->unit_size < size_sum); 1626 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1595 BUG_ON(ai->unit_size & ~PAGE_MASK); 1627 PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1596 BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 1628 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1597
1598 pcpu_dump_alloc_info(KERN_DEBUG, ai);
1599 1629
1600 /* process group information and build config tables accordingly */ 1630 /* process group information and build config tables accordingly */
1601 group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0])); 1631 group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
@@ -1604,7 +1634,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1604 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0])); 1634 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
1605 1635
1606 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 1636 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1607 unit_map[cpu] = NR_CPUS; 1637 unit_map[cpu] = UINT_MAX;
1608 pcpu_first_unit_cpu = NR_CPUS; 1638 pcpu_first_unit_cpu = NR_CPUS;
1609 1639
1610 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 1640 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
@@ -1618,8 +1648,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1618 if (cpu == NR_CPUS) 1648 if (cpu == NR_CPUS)
1619 continue; 1649 continue;
1620 1650
1621 BUG_ON(cpu > nr_cpu_ids || !cpu_possible(cpu)); 1651 PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1622 BUG_ON(unit_map[cpu] != NR_CPUS); 1652 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1653 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1623 1654
1624 unit_map[cpu] = unit + i; 1655 unit_map[cpu] = unit + i;
1625 unit_off[cpu] = gi->base_offset + i * ai->unit_size; 1656 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
@@ -1632,7 +1663,11 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1632 pcpu_nr_units = unit; 1663 pcpu_nr_units = unit;
1633 1664
1634 for_each_possible_cpu(cpu) 1665 for_each_possible_cpu(cpu)
1635 BUG_ON(unit_map[cpu] == NR_CPUS); 1666 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1667
1668 /* we're done parsing the input, undefine BUG macro and dump config */
1669#undef PCPU_SETUP_BUG_ON
1670 pcpu_dump_alloc_info(KERN_INFO, ai);
1636 1671
1637 pcpu_nr_groups = ai->nr_groups; 1672 pcpu_nr_groups = ai->nr_groups;
1638 pcpu_group_offsets = group_offsets; 1673 pcpu_group_offsets = group_offsets;
@@ -1782,7 +1817,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
1782 void *base = (void *)ULONG_MAX; 1817 void *base = (void *)ULONG_MAX;
1783 void **areas = NULL; 1818 void **areas = NULL;
1784 struct pcpu_alloc_info *ai; 1819 struct pcpu_alloc_info *ai;
1785 size_t size_sum, areas_size; 1820 size_t size_sum, areas_size, max_distance;
1786 int group, i, rc; 1821 int group, i, rc;
1787 1822
1788 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 1823 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
@@ -1832,8 +1867,24 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
1832 } 1867 }
1833 1868
1834 /* base address is now known, determine group base offsets */ 1869 /* base address is now known, determine group base offsets */
1835 for (group = 0; group < ai->nr_groups; group++) 1870 max_distance = 0;
1871 for (group = 0; group < ai->nr_groups; group++) {
1836 ai->groups[group].base_offset = areas[group] - base; 1872 ai->groups[group].base_offset = areas[group] - base;
1873 max_distance = max(max_distance, ai->groups[group].base_offset);
1874 }
1875 max_distance += ai->unit_size;
1876
1877 /* warn if maximum distance is further than 75% of vmalloc space */
1878 if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1879 pr_warning("PERCPU: max_distance=0x%lx too large for vmalloc "
1880 "space 0x%lx\n",
1881 max_distance, VMALLOC_END - VMALLOC_START);
1882#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1883 /* and fail if we have fallback */
1884 rc = -EINVAL;
1885 goto out_free;
1886#endif
1887 }
1837 1888
1838 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", 1889 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1839 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, 1890 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,