aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-11-18 13:55:35 -0500
committerTejun Heo <tj@kernel.org>2011-11-22 11:09:46 -0500
commita855b84c3d8c73220d4d3cd392a7bee7c83de70e (patch)
tree3134cd884a2c625cf72172c9cb4e4a5e68d749f2 /mm
parent90459ce06f410b983540be56209c0abcbce23944 (diff)
percpu: fix chunk range calculation
Percpu allocator recorded the cpus which map to the first and last units in pcpu_first/last_unit_cpu respectively and used them to determine the address range of a chunk - e.g. it assumed that the first unit has the lowest address in a chunk while the last unit has the highest address. This simply isn't true. Groups in a chunk can have arbitrary positive or negative offsets from the previous one and there is no guarantee that the first unit occupies the lowest offset while the last one the highest. Fix it by actually comparing unit offsets to determine cpus occupying the lowest and highest offsets. Also, rename pcu_first/last_unit_cpu to pcpu_low/high_unit_cpu to avoid confusion. The chunk address range is used to flush cache on vmalloc area map/unmap and decide whether a given address is in the first chunk by per_cpu_ptr_to_phys() and the bug was discovered by invalid per_cpu_ptr_to_phys() translation for crash_note. Kudos to Dave Young for tracking down the problem. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: WANG Cong <xiyou.wangcong@gmail.com> Reported-by: Dave Young <dyoung@redhat.com> Tested-by: Dave Young <dyoung@redhat.com> LKML-Reference: <4EC21F67.10905@redhat.com> Cc: stable @kernel.org
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu-vm.c12
-rw-r--r--mm/percpu.c34
2 files changed, 26 insertions, 20 deletions
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 29e3730d2ffd..12a48a88c0d8 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -142,8 +142,8 @@ static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
142 int page_start, int page_end) 142 int page_start, int page_end)
143{ 143{
144 flush_cache_vunmap( 144 flush_cache_vunmap(
145 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 145 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
146 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 146 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
147} 147}
148 148
149static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) 149static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
@@ -205,8 +205,8 @@ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
205 int page_start, int page_end) 205 int page_start, int page_end)
206{ 206{
207 flush_tlb_kernel_range( 207 flush_tlb_kernel_range(
208 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 208 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
209 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 209 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
210} 210}
211 211
212static int __pcpu_map_pages(unsigned long addr, struct page **pages, 212static int __pcpu_map_pages(unsigned long addr, struct page **pages,
@@ -283,8 +283,8 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
283 int page_start, int page_end) 283 int page_start, int page_end)
284{ 284{
285 flush_cache_vmap( 285 flush_cache_vmap(
286 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 286 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
287 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 287 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
288} 288}
289 289
290/** 290/**
diff --git a/mm/percpu.c b/mm/percpu.c
index 28c37a2e2de2..2473ff06dc76 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -116,9 +116,9 @@ static int pcpu_atom_size __read_mostly;
116static int pcpu_nr_slots __read_mostly; 116static int pcpu_nr_slots __read_mostly;
117static size_t pcpu_chunk_struct_size __read_mostly; 117static size_t pcpu_chunk_struct_size __read_mostly;
118 118
119/* cpus with the lowest and highest unit numbers */ 119/* cpus with the lowest and highest unit addresses */
120static unsigned int pcpu_first_unit_cpu __read_mostly; 120static unsigned int pcpu_low_unit_cpu __read_mostly;
121static unsigned int pcpu_last_unit_cpu __read_mostly; 121static unsigned int pcpu_high_unit_cpu __read_mostly;
122 122
123/* the address of the first chunk which starts with the kernel static area */ 123/* the address of the first chunk which starts with the kernel static area */
124void *pcpu_base_addr __read_mostly; 124void *pcpu_base_addr __read_mostly;
@@ -985,19 +985,19 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
985{ 985{
986 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 986 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
987 bool in_first_chunk = false; 987 bool in_first_chunk = false;
988 unsigned long first_start, first_end; 988 unsigned long first_low, first_high;
989 unsigned int cpu; 989 unsigned int cpu;
990 990
991 /* 991 /*
992 * The following test on first_start/end isn't strictly 992 * The following test on unit_low/high isn't strictly
993 * necessary but will speed up lookups of addresses which 993 * necessary but will speed up lookups of addresses which
994 * aren't in the first chunk. 994 * aren't in the first chunk.
995 */ 995 */
996 first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0); 996 first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
997 first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu, 997 first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
998 pcpu_unit_pages); 998 pcpu_unit_pages);
999 if ((unsigned long)addr >= first_start && 999 if ((unsigned long)addr >= first_low &&
1000 (unsigned long)addr < first_end) { 1000 (unsigned long)addr < first_high) {
1001 for_each_possible_cpu(cpu) { 1001 for_each_possible_cpu(cpu) {
1002 void *start = per_cpu_ptr(base, cpu); 1002 void *start = per_cpu_ptr(base, cpu);
1003 1003
@@ -1234,7 +1234,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1234 1234
1235 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 1235 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1236 unit_map[cpu] = UINT_MAX; 1236 unit_map[cpu] = UINT_MAX;
1237 pcpu_first_unit_cpu = NR_CPUS; 1237
1238 pcpu_low_unit_cpu = NR_CPUS;
1239 pcpu_high_unit_cpu = NR_CPUS;
1238 1240
1239 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 1241 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1240 const struct pcpu_group_info *gi = &ai->groups[group]; 1242 const struct pcpu_group_info *gi = &ai->groups[group];
@@ -1254,9 +1256,13 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1254 unit_map[cpu] = unit + i; 1256 unit_map[cpu] = unit + i;
1255 unit_off[cpu] = gi->base_offset + i * ai->unit_size; 1257 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1256 1258
1257 if (pcpu_first_unit_cpu == NR_CPUS) 1259 /* determine low/high unit_cpu */
1258 pcpu_first_unit_cpu = cpu; 1260 if (pcpu_low_unit_cpu == NR_CPUS ||
1259 pcpu_last_unit_cpu = cpu; 1261 unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
1262 pcpu_low_unit_cpu = cpu;
1263 if (pcpu_high_unit_cpu == NR_CPUS ||
1264 unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
1265 pcpu_high_unit_cpu = cpu;
1260 } 1266 }
1261 } 1267 }
1262 pcpu_nr_units = unit; 1268 pcpu_nr_units = unit;