aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-14 14:46:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-14 14:46:25 -0400
commitb6daa51b9a6a02a644dcf6b880fd50c1f70ec07f (patch)
tree745ebeefb8f4225460774ef0037ca2f9022d4a4a /mm
parentf96ed2612260a8a415512eed4fe3f5c77247d4a1 (diff)
parent9b7396624a7b503220d85428654634b60762f2b0 (diff)
Merge branch 'for-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
Pull percpu updates from Tejun Heo: - Nick improved generic implementations of percpu operations which modify the variable and return so that they calculate the physical address only once. - percpu_ref percpu <-> atomic mode switching improvements. The patchset was originally posted about a year ago but fell through the crack. - misc non-critical fixes. * 'for-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: mm/percpu.c: fix potential memory leakage for pcpu_embed_first_chunk() mm/percpu.c: correct max_distance calculation for pcpu_embed_first_chunk() percpu: eliminate two sparse warnings percpu: improve generic percpu modify-return implementation percpu-refcount: init ->confirm_switch member properly percpu_ref: allow operation mode switching operations to be called concurrently percpu_ref: restructure operation mode switching percpu_ref: unify staggered atomic switching wait behavior percpu_ref: reorganize __percpu_ref_switch_to_atomic() and relocate percpu_ref_switch_to_atomic() percpu_ref: remove unnecessary RCU grace period for staggered atomic switching confirmation
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu.c38
1 files changed, 20 insertions, 18 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 9903830aaebb..255714302394 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1961,8 +1961,9 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1961 void *base = (void *)ULONG_MAX; 1961 void *base = (void *)ULONG_MAX;
1962 void **areas = NULL; 1962 void **areas = NULL;
1963 struct pcpu_alloc_info *ai; 1963 struct pcpu_alloc_info *ai;
1964 size_t size_sum, areas_size, max_distance; 1964 size_t size_sum, areas_size;
1965 int group, i, rc; 1965 unsigned long max_distance;
1966 int group, i, highest_group, rc;
1966 1967
1967 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 1968 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1968 cpu_distance_fn); 1969 cpu_distance_fn);
@@ -1978,7 +1979,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1978 goto out_free; 1979 goto out_free;
1979 } 1980 }
1980 1981
1981 /* allocate, copy and determine base address */ 1982 /* allocate, copy and determine base address & max_distance */
1983 highest_group = 0;
1982 for (group = 0; group < ai->nr_groups; group++) { 1984 for (group = 0; group < ai->nr_groups; group++) {
1983 struct pcpu_group_info *gi = &ai->groups[group]; 1985 struct pcpu_group_info *gi = &ai->groups[group];
1984 unsigned int cpu = NR_CPUS; 1986 unsigned int cpu = NR_CPUS;
@@ -1999,6 +2001,21 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1999 areas[group] = ptr; 2001 areas[group] = ptr;
2000 2002
2001 base = min(ptr, base); 2003 base = min(ptr, base);
2004 if (ptr > areas[highest_group])
2005 highest_group = group;
2006 }
2007 max_distance = areas[highest_group] - base;
2008 max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
2009
2010 /* warn if maximum distance is further than 75% of vmalloc space */
2011 if (max_distance > VMALLOC_TOTAL * 3 / 4) {
2012 pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
2013 max_distance, VMALLOC_TOTAL);
2014#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2015 /* and fail if we have fallback */
2016 rc = -EINVAL;
2017 goto out_free_areas;
2018#endif
2002 } 2019 }
2003 2020
2004 /* 2021 /*
@@ -2023,23 +2040,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
2023 } 2040 }
2024 2041
2025 /* base address is now known, determine group base offsets */ 2042 /* base address is now known, determine group base offsets */
2026 max_distance = 0;
2027 for (group = 0; group < ai->nr_groups; group++) { 2043 for (group = 0; group < ai->nr_groups; group++) {
2028 ai->groups[group].base_offset = areas[group] - base; 2044 ai->groups[group].base_offset = areas[group] - base;
2029 max_distance = max_t(size_t, max_distance,
2030 ai->groups[group].base_offset);
2031 }
2032 max_distance += ai->unit_size;
2033
2034 /* warn if maximum distance is further than 75% of vmalloc space */
2035 if (max_distance > VMALLOC_TOTAL * 3 / 4) {
2036 pr_warn("max_distance=0x%zx too large for vmalloc space 0x%lx\n",
2037 max_distance, VMALLOC_TOTAL);
2038#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2039 /* and fail if we have fallback */
2040 rc = -EINVAL;
2041 goto out_free;
2042#endif
2043 } 2045 }
2044 2046
2045 pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", 2047 pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",