aboutsummaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-09-24 05:46:01 -0400
committerTejun Heo <tj@kernel.org>2009-09-28 20:17:57 -0400
commit6ea529a2037ce662fc6bfa572b46d47407d08805 (patch)
treeb9e8721eb4edc563ae0e40500a58dfa053ceed39 /mm/percpu.c
parenta70c691376c7c7f94af41395848066f59501fffd (diff)
percpu: make embedding first chunk allocator check vmalloc space size
Embedding first chunk allocator maintains the distances between units in the vmalloc area and thus needs vmalloc space to be larger than the maximum distances between units; otherwise, it wouldn't be able to create any dynamic chunks. This patch makes the embedding first chunk allocator check vmalloc space size and if the maximum distance between units is larger than 75% of it, print warning and, if page mapping allocator is available, fail initialization so that the system falls back onto it. This should work around percpu allocation failure problems on certain sparc64 configurations where distances between NUMA nodes are larger than the vmalloc area and makes percpu allocator more robust for future configurations. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c20
1 files changed, 18 insertions, 2 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index a64133f8af45..c43da8c024d1 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1786,7 +1786,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
1786 void *base = (void *)ULONG_MAX; 1786 void *base = (void *)ULONG_MAX;
1787 void **areas = NULL; 1787 void **areas = NULL;
1788 struct pcpu_alloc_info *ai; 1788 struct pcpu_alloc_info *ai;
1789 size_t size_sum, areas_size; 1789 size_t size_sum, areas_size, max_distance;
1790 int group, i, rc; 1790 int group, i, rc;
1791 1791
1792 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 1792 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
@@ -1836,8 +1836,24 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
1836 } 1836 }
1837 1837
1838 /* base address is now known, determine group base offsets */ 1838 /* base address is now known, determine group base offsets */
1839 for (group = 0; group < ai->nr_groups; group++) 1839 max_distance = 0;
1840 for (group = 0; group < ai->nr_groups; group++) {
1840 ai->groups[group].base_offset = areas[group] - base; 1841 ai->groups[group].base_offset = areas[group] - base;
1842 max_distance = max(max_distance, ai->groups[group].base_offset);
1843 }
1844 max_distance += ai->unit_size;
1845
1846 /* warn if maximum distance is further than 75% of vmalloc space */
1847 if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1848 pr_warning("PERCPU: max_distance=0x%lx too large for vmalloc "
1849 "space 0x%lx\n",
1850 max_distance, VMALLOC_END - VMALLOC_START);
1851#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1852 /* and fail if we have fallback */
1853 rc = -EINVAL;
1854 goto out_free;
1855#endif
1856 }
1841 1857
1842 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", 1858 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1843 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, 1859 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,