aboutsummaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c34
1 files changed, 20 insertions, 14 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 28c37a2e2de2..2473ff06dc76 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -116,9 +116,9 @@ static int pcpu_atom_size __read_mostly;
116static int pcpu_nr_slots __read_mostly; 116static int pcpu_nr_slots __read_mostly;
117static size_t pcpu_chunk_struct_size __read_mostly; 117static size_t pcpu_chunk_struct_size __read_mostly;
118 118
119/* cpus with the lowest and highest unit numbers */ 119/* cpus with the lowest and highest unit addresses */
120static unsigned int pcpu_first_unit_cpu __read_mostly; 120static unsigned int pcpu_low_unit_cpu __read_mostly;
121static unsigned int pcpu_last_unit_cpu __read_mostly; 121static unsigned int pcpu_high_unit_cpu __read_mostly;
122 122
123/* the address of the first chunk which starts with the kernel static area */ 123/* the address of the first chunk which starts with the kernel static area */
124void *pcpu_base_addr __read_mostly; 124void *pcpu_base_addr __read_mostly;
@@ -985,19 +985,19 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
985{ 985{
986 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 986 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
987 bool in_first_chunk = false; 987 bool in_first_chunk = false;
988 unsigned long first_start, first_end; 988 unsigned long first_low, first_high;
989 unsigned int cpu; 989 unsigned int cpu;
990 990
991 /* 991 /*
992 * The following test on first_start/end isn't strictly 992 * The following test on unit_low/high isn't strictly
993 * necessary but will speed up lookups of addresses which 993 * necessary but will speed up lookups of addresses which
994 * aren't in the first chunk. 994 * aren't in the first chunk.
995 */ 995 */
996 first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0); 996 first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
997 first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu, 997 first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
998 pcpu_unit_pages); 998 pcpu_unit_pages);
999 if ((unsigned long)addr >= first_start && 999 if ((unsigned long)addr >= first_low &&
1000 (unsigned long)addr < first_end) { 1000 (unsigned long)addr < first_high) {
1001 for_each_possible_cpu(cpu) { 1001 for_each_possible_cpu(cpu) {
1002 void *start = per_cpu_ptr(base, cpu); 1002 void *start = per_cpu_ptr(base, cpu);
1003 1003
@@ -1234,7 +1234,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1234 1234
1235 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 1235 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1236 unit_map[cpu] = UINT_MAX; 1236 unit_map[cpu] = UINT_MAX;
1237 pcpu_first_unit_cpu = NR_CPUS; 1237
1238 pcpu_low_unit_cpu = NR_CPUS;
1239 pcpu_high_unit_cpu = NR_CPUS;
1238 1240
1239 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 1241 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1240 const struct pcpu_group_info *gi = &ai->groups[group]; 1242 const struct pcpu_group_info *gi = &ai->groups[group];
@@ -1254,9 +1256,13 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1254 unit_map[cpu] = unit + i; 1256 unit_map[cpu] = unit + i;
1255 unit_off[cpu] = gi->base_offset + i * ai->unit_size; 1257 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1256 1258
1257 if (pcpu_first_unit_cpu == NR_CPUS) 1259 /* determine low/high unit_cpu */
1258 pcpu_first_unit_cpu = cpu; 1260 if (pcpu_low_unit_cpu == NR_CPUS ||
1259 pcpu_last_unit_cpu = cpu; 1261 unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
1262 pcpu_low_unit_cpu = cpu;
1263 if (pcpu_high_unit_cpu == NR_CPUS ||
1264 unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
1265 pcpu_high_unit_cpu = cpu;
1260 } 1266 }
1261 } 1267 }
1262 pcpu_nr_units = unit; 1268 pcpu_nr_units = unit;