diff options
Diffstat (limited to 'mm/percpu.c')
-rw-r--r-- | mm/percpu.c | 432 |
1 files changed, 247 insertions, 185 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index c76ef3891e0d..bf80e55dbed7 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -31,7 +31,7 @@ | |||
31 | * as small as 4 bytes. The allocator organizes chunks into lists | 31 | * as small as 4 bytes. The allocator organizes chunks into lists |
32 | * according to free size and tries to allocate from the fullest one. | 32 | * according to free size and tries to allocate from the fullest one. |
33 | * Each chunk keeps the maximum contiguous area size hint which is | 33 | * Each chunk keeps the maximum contiguous area size hint which is |
34 | * guaranteed to be eqaul to or larger than the maximum contiguous | 34 | * guaranteed to be equal to or larger than the maximum contiguous |
35 | * area in the chunk. This helps the allocator not to iterate the | 35 | * area in the chunk. This helps the allocator not to iterate the |
36 | * chunk maps unnecessarily. | 36 | * chunk maps unnecessarily. |
37 | * | 37 | * |
@@ -76,6 +76,7 @@ | |||
76 | #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ | 76 | #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ |
77 | #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ | 77 | #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ |
78 | 78 | ||
79 | #ifdef CONFIG_SMP | ||
79 | /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ | 80 | /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ |
80 | #ifndef __addr_to_pcpu_ptr | 81 | #ifndef __addr_to_pcpu_ptr |
81 | #define __addr_to_pcpu_ptr(addr) \ | 82 | #define __addr_to_pcpu_ptr(addr) \ |
@@ -89,6 +90,11 @@ | |||
89 | (unsigned long)pcpu_base_addr - \ | 90 | (unsigned long)pcpu_base_addr - \ |
90 | (unsigned long)__per_cpu_start) | 91 | (unsigned long)__per_cpu_start) |
91 | #endif | 92 | #endif |
93 | #else /* CONFIG_SMP */ | ||
94 | /* on UP, it's always identity mapped */ | ||
95 | #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) | ||
96 | #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) | ||
97 | #endif /* CONFIG_SMP */ | ||
92 | 98 | ||
93 | struct pcpu_chunk { | 99 | struct pcpu_chunk { |
94 | struct list_head list; /* linked to pcpu_slot lists */ | 100 | struct list_head list; /* linked to pcpu_slot lists */ |
@@ -252,7 +258,7 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, | |||
252 | 258 | ||
253 | /* | 259 | /* |
254 | * (Un)populated page region iterators. Iterate over (un)populated | 260 | * (Un)populated page region iterators. Iterate over (un)populated |
255 | * page regions betwen @start and @end in @chunk. @rs and @re should | 261 | * page regions between @start and @end in @chunk. @rs and @re should |
256 | * be integer variables and will be set to start and end page index of | 262 | * be integer variables and will be set to start and end page index of |
257 | * the current region. | 263 | * the current region. |
258 | */ | 264 | */ |
@@ -287,12 +293,8 @@ static void *pcpu_mem_alloc(size_t size) | |||
287 | 293 | ||
288 | if (size <= PAGE_SIZE) | 294 | if (size <= PAGE_SIZE) |
289 | return kzalloc(size, GFP_KERNEL); | 295 | return kzalloc(size, GFP_KERNEL); |
290 | else { | 296 | else |
291 | void *ptr = vmalloc(size); | 297 | return vzalloc(size); |
292 | if (ptr) | ||
293 | memset(ptr, 0, size); | ||
294 | return ptr; | ||
295 | } | ||
296 | } | 298 | } |
297 | 299 | ||
298 | /** | 300 | /** |
@@ -340,7 +342,7 @@ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) | |||
340 | * @chunk: chunk of interest | 342 | * @chunk: chunk of interest |
341 | * | 343 | * |
342 | * Determine whether area map of @chunk needs to be extended to | 344 | * Determine whether area map of @chunk needs to be extended to |
343 | * accomodate a new allocation. | 345 | * accommodate a new allocation. |
344 | * | 346 | * |
345 | * CONTEXT: | 347 | * CONTEXT: |
346 | * pcpu_lock. | 348 | * pcpu_lock. |
@@ -429,7 +431,7 @@ out_unlock: | |||
429 | * depending on @head, is reduced by @tail bytes and @tail byte block | 431 | * depending on @head, is reduced by @tail bytes and @tail byte block |
430 | * is inserted after the target block. | 432 | * is inserted after the target block. |
431 | * | 433 | * |
432 | * @chunk->map must have enough free slots to accomodate the split. | 434 | * @chunk->map must have enough free slots to accommodate the split. |
433 | * | 435 | * |
434 | * CONTEXT: | 436 | * CONTEXT: |
435 | * pcpu_lock. | 437 | * pcpu_lock. |
@@ -820,8 +822,8 @@ fail_unlock_mutex: | |||
820 | * @size: size of area to allocate in bytes | 822 | * @size: size of area to allocate in bytes |
821 | * @align: alignment of area (max PAGE_SIZE) | 823 | * @align: alignment of area (max PAGE_SIZE) |
822 | * | 824 | * |
823 | * Allocate percpu area of @size bytes aligned at @align. Might | 825 | * Allocate zero-filled percpu area of @size bytes aligned at @align. |
824 | * sleep. Might trigger writeouts. | 826 | * Might sleep. Might trigger writeouts. |
825 | * | 827 | * |
826 | * CONTEXT: | 828 | * CONTEXT: |
827 | * Does GFP_KERNEL allocation. | 829 | * Does GFP_KERNEL allocation. |
@@ -840,9 +842,10 @@ EXPORT_SYMBOL_GPL(__alloc_percpu); | |||
840 | * @size: size of area to allocate in bytes | 842 | * @size: size of area to allocate in bytes |
841 | * @align: alignment of area (max PAGE_SIZE) | 843 | * @align: alignment of area (max PAGE_SIZE) |
842 | * | 844 | * |
843 | * Allocate percpu area of @size bytes aligned at @align from reserved | 845 | * Allocate zero-filled percpu area of @size bytes aligned at @align |
844 | * percpu area if arch has set it up; otherwise, allocation is served | 846 | * from reserved percpu area if arch has set it up; otherwise, |
845 | * from the same dynamic area. Might sleep. Might trigger writeouts. | 847 | * allocation is served from the same dynamic area. Might sleep. |
848 | * Might trigger writeouts. | ||
846 | * | 849 | * |
847 | * CONTEXT: | 850 | * CONTEXT: |
848 | * Does GFP_KERNEL allocation. | 851 | * Does GFP_KERNEL allocation. |
@@ -949,6 +952,7 @@ EXPORT_SYMBOL_GPL(free_percpu); | |||
949 | */ | 952 | */ |
950 | bool is_kernel_percpu_address(unsigned long addr) | 953 | bool is_kernel_percpu_address(unsigned long addr) |
951 | { | 954 | { |
955 | #ifdef CONFIG_SMP | ||
952 | const size_t static_size = __per_cpu_end - __per_cpu_start; | 956 | const size_t static_size = __per_cpu_end - __per_cpu_start; |
953 | void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); | 957 | void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); |
954 | unsigned int cpu; | 958 | unsigned int cpu; |
@@ -959,6 +963,8 @@ bool is_kernel_percpu_address(unsigned long addr) | |||
959 | if ((void *)addr >= start && (void *)addr < start + static_size) | 963 | if ((void *)addr >= start && (void *)addr < start + static_size) |
960 | return true; | 964 | return true; |
961 | } | 965 | } |
966 | #endif | ||
967 | /* on UP, can't distinguish from other static vars, always false */ | ||
962 | return false; | 968 | return false; |
963 | } | 969 | } |
964 | 970 | ||
@@ -1002,8 +1008,7 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr) | |||
1002 | } | 1008 | } |
1003 | 1009 | ||
1004 | if (in_first_chunk) { | 1010 | if (in_first_chunk) { |
1005 | if ((unsigned long)addr < VMALLOC_START || | 1011 | if (!is_vmalloc_addr(addr)) |
1006 | (unsigned long)addr >= VMALLOC_END) | ||
1007 | return __pa(addr); | 1012 | return __pa(addr); |
1008 | else | 1013 | else |
1009 | return page_to_phys(vmalloc_to_page(addr)); | 1014 | return page_to_phys(vmalloc_to_page(addr)); |
@@ -1067,161 +1072,6 @@ void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) | |||
1067 | } | 1072 | } |
1068 | 1073 | ||
1069 | /** | 1074 | /** |
1070 | * pcpu_build_alloc_info - build alloc_info considering distances between CPUs | ||
1071 | * @reserved_size: the size of reserved percpu area in bytes | ||
1072 | * @dyn_size: minimum free size for dynamic allocation in bytes | ||
1073 | * @atom_size: allocation atom size | ||
1074 | * @cpu_distance_fn: callback to determine distance between cpus, optional | ||
1075 | * | ||
1076 | * This function determines grouping of units, their mappings to cpus | ||
1077 | * and other parameters considering needed percpu size, allocation | ||
1078 | * atom size and distances between CPUs. | ||
1079 | * | ||
1080 | * Groups are always mutliples of atom size and CPUs which are of | ||
1081 | * LOCAL_DISTANCE both ways are grouped together and share space for | ||
1082 | * units in the same group. The returned configuration is guaranteed | ||
1083 | * to have CPUs on different nodes on different groups and >=75% usage | ||
1084 | * of allocated virtual address space. | ||
1085 | * | ||
1086 | * RETURNS: | ||
1087 | * On success, pointer to the new allocation_info is returned. On | ||
1088 | * failure, ERR_PTR value is returned. | ||
1089 | */ | ||
1090 | static struct pcpu_alloc_info * __init pcpu_build_alloc_info( | ||
1091 | size_t reserved_size, size_t dyn_size, | ||
1092 | size_t atom_size, | ||
1093 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn) | ||
1094 | { | ||
1095 | static int group_map[NR_CPUS] __initdata; | ||
1096 | static int group_cnt[NR_CPUS] __initdata; | ||
1097 | const size_t static_size = __per_cpu_end - __per_cpu_start; | ||
1098 | int nr_groups = 1, nr_units = 0; | ||
1099 | size_t size_sum, min_unit_size, alloc_size; | ||
1100 | int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ | ||
1101 | int last_allocs, group, unit; | ||
1102 | unsigned int cpu, tcpu; | ||
1103 | struct pcpu_alloc_info *ai; | ||
1104 | unsigned int *cpu_map; | ||
1105 | |||
1106 | /* this function may be called multiple times */ | ||
1107 | memset(group_map, 0, sizeof(group_map)); | ||
1108 | memset(group_cnt, 0, sizeof(group_cnt)); | ||
1109 | |||
1110 | /* calculate size_sum and ensure dyn_size is enough for early alloc */ | ||
1111 | size_sum = PFN_ALIGN(static_size + reserved_size + | ||
1112 | max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); | ||
1113 | dyn_size = size_sum - static_size - reserved_size; | ||
1114 | |||
1115 | /* | ||
1116 | * Determine min_unit_size, alloc_size and max_upa such that | ||
1117 | * alloc_size is multiple of atom_size and is the smallest | ||
1118 | * which can accomodate 4k aligned segments which are equal to | ||
1119 | * or larger than min_unit_size. | ||
1120 | */ | ||
1121 | min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); | ||
1122 | |||
1123 | alloc_size = roundup(min_unit_size, atom_size); | ||
1124 | upa = alloc_size / min_unit_size; | ||
1125 | while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) | ||
1126 | upa--; | ||
1127 | max_upa = upa; | ||
1128 | |||
1129 | /* group cpus according to their proximity */ | ||
1130 | for_each_possible_cpu(cpu) { | ||
1131 | group = 0; | ||
1132 | next_group: | ||
1133 | for_each_possible_cpu(tcpu) { | ||
1134 | if (cpu == tcpu) | ||
1135 | break; | ||
1136 | if (group_map[tcpu] == group && cpu_distance_fn && | ||
1137 | (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || | ||
1138 | cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { | ||
1139 | group++; | ||
1140 | nr_groups = max(nr_groups, group + 1); | ||
1141 | goto next_group; | ||
1142 | } | ||
1143 | } | ||
1144 | group_map[cpu] = group; | ||
1145 | group_cnt[group]++; | ||
1146 | } | ||
1147 | |||
1148 | /* | ||
1149 | * Expand unit size until address space usage goes over 75% | ||
1150 | * and then as much as possible without using more address | ||
1151 | * space. | ||
1152 | */ | ||
1153 | last_allocs = INT_MAX; | ||
1154 | for (upa = max_upa; upa; upa--) { | ||
1155 | int allocs = 0, wasted = 0; | ||
1156 | |||
1157 | if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) | ||
1158 | continue; | ||
1159 | |||
1160 | for (group = 0; group < nr_groups; group++) { | ||
1161 | int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); | ||
1162 | allocs += this_allocs; | ||
1163 | wasted += this_allocs * upa - group_cnt[group]; | ||
1164 | } | ||
1165 | |||
1166 | /* | ||
1167 | * Don't accept if wastage is over 1/3. The | ||
1168 | * greater-than comparison ensures upa==1 always | ||
1169 | * passes the following check. | ||
1170 | */ | ||
1171 | if (wasted > num_possible_cpus() / 3) | ||
1172 | continue; | ||
1173 | |||
1174 | /* and then don't consume more memory */ | ||
1175 | if (allocs > last_allocs) | ||
1176 | break; | ||
1177 | last_allocs = allocs; | ||
1178 | best_upa = upa; | ||
1179 | } | ||
1180 | upa = best_upa; | ||
1181 | |||
1182 | /* allocate and fill alloc_info */ | ||
1183 | for (group = 0; group < nr_groups; group++) | ||
1184 | nr_units += roundup(group_cnt[group], upa); | ||
1185 | |||
1186 | ai = pcpu_alloc_alloc_info(nr_groups, nr_units); | ||
1187 | if (!ai) | ||
1188 | return ERR_PTR(-ENOMEM); | ||
1189 | cpu_map = ai->groups[0].cpu_map; | ||
1190 | |||
1191 | for (group = 0; group < nr_groups; group++) { | ||
1192 | ai->groups[group].cpu_map = cpu_map; | ||
1193 | cpu_map += roundup(group_cnt[group], upa); | ||
1194 | } | ||
1195 | |||
1196 | ai->static_size = static_size; | ||
1197 | ai->reserved_size = reserved_size; | ||
1198 | ai->dyn_size = dyn_size; | ||
1199 | ai->unit_size = alloc_size / upa; | ||
1200 | ai->atom_size = atom_size; | ||
1201 | ai->alloc_size = alloc_size; | ||
1202 | |||
1203 | for (group = 0, unit = 0; group_cnt[group]; group++) { | ||
1204 | struct pcpu_group_info *gi = &ai->groups[group]; | ||
1205 | |||
1206 | /* | ||
1207 | * Initialize base_offset as if all groups are located | ||
1208 | * back-to-back. The caller should update this to | ||
1209 | * reflect actual allocation. | ||
1210 | */ | ||
1211 | gi->base_offset = unit * ai->unit_size; | ||
1212 | |||
1213 | for_each_possible_cpu(cpu) | ||
1214 | if (group_map[cpu] == group) | ||
1215 | gi->cpu_map[gi->nr_units++] = cpu; | ||
1216 | gi->nr_units = roundup(gi->nr_units, upa); | ||
1217 | unit += gi->nr_units; | ||
1218 | } | ||
1219 | BUG_ON(unit != nr_units); | ||
1220 | |||
1221 | return ai; | ||
1222 | } | ||
1223 | |||
1224 | /** | ||
1225 | * pcpu_dump_alloc_info - print out information about pcpu_alloc_info | 1075 | * pcpu_dump_alloc_info - print out information about pcpu_alloc_info |
1226 | * @lvl: loglevel | 1076 | * @lvl: loglevel |
1227 | * @ai: allocation info to dump | 1077 | * @ai: allocation info to dump |
@@ -1363,8 +1213,12 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |||
1363 | 1213 | ||
1364 | /* sanity checks */ | 1214 | /* sanity checks */ |
1365 | PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); | 1215 | PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); |
1216 | #ifdef CONFIG_SMP | ||
1366 | PCPU_SETUP_BUG_ON(!ai->static_size); | 1217 | PCPU_SETUP_BUG_ON(!ai->static_size); |
1218 | PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK); | ||
1219 | #endif | ||
1367 | PCPU_SETUP_BUG_ON(!base_addr); | 1220 | PCPU_SETUP_BUG_ON(!base_addr); |
1221 | PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK); | ||
1368 | PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); | 1222 | PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); |
1369 | PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); | 1223 | PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); |
1370 | PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); | 1224 | PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); |
@@ -1411,7 +1265,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |||
1411 | 1265 | ||
1412 | /* we're done parsing the input, undefine BUG macro and dump config */ | 1266 | /* we're done parsing the input, undefine BUG macro and dump config */ |
1413 | #undef PCPU_SETUP_BUG_ON | 1267 | #undef PCPU_SETUP_BUG_ON |
1414 | pcpu_dump_alloc_info(KERN_INFO, ai); | 1268 | pcpu_dump_alloc_info(KERN_DEBUG, ai); |
1415 | 1269 | ||
1416 | pcpu_nr_groups = ai->nr_groups; | 1270 | pcpu_nr_groups = ai->nr_groups; |
1417 | pcpu_group_offsets = group_offsets; | 1271 | pcpu_group_offsets = group_offsets; |
@@ -1488,6 +1342,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |||
1488 | return 0; | 1342 | return 0; |
1489 | } | 1343 | } |
1490 | 1344 | ||
1345 | #ifdef CONFIG_SMP | ||
1346 | |||
1491 | const char *pcpu_fc_names[PCPU_FC_NR] __initdata = { | 1347 | const char *pcpu_fc_names[PCPU_FC_NR] __initdata = { |
1492 | [PCPU_FC_AUTO] = "auto", | 1348 | [PCPU_FC_AUTO] = "auto", |
1493 | [PCPU_FC_EMBED] = "embed", | 1349 | [PCPU_FC_EMBED] = "embed", |
@@ -1515,8 +1371,180 @@ static int __init percpu_alloc_setup(char *str) | |||
1515 | } | 1371 | } |
1516 | early_param("percpu_alloc", percpu_alloc_setup); | 1372 | early_param("percpu_alloc", percpu_alloc_setup); |
1517 | 1373 | ||
1374 | /* | ||
1375 | * pcpu_embed_first_chunk() is used by the generic percpu setup. | ||
1376 | * Build it if needed by the arch config or the generic setup is going | ||
1377 | * to be used. | ||
1378 | */ | ||
1518 | #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ | 1379 | #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ |
1519 | !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) | 1380 | !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) |
1381 | #define BUILD_EMBED_FIRST_CHUNK | ||
1382 | #endif | ||
1383 | |||
1384 | /* build pcpu_page_first_chunk() iff needed by the arch config */ | ||
1385 | #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) | ||
1386 | #define BUILD_PAGE_FIRST_CHUNK | ||
1387 | #endif | ||
1388 | |||
1389 | /* pcpu_build_alloc_info() is used by both embed and page first chunk */ | ||
1390 | #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) | ||
1391 | /** | ||
1392 | * pcpu_build_alloc_info - build alloc_info considering distances between CPUs | ||
1393 | * @reserved_size: the size of reserved percpu area in bytes | ||
1394 | * @dyn_size: minimum free size for dynamic allocation in bytes | ||
1395 | * @atom_size: allocation atom size | ||
1396 | * @cpu_distance_fn: callback to determine distance between cpus, optional | ||
1397 | * | ||
1398 | * This function determines grouping of units, their mappings to cpus | ||
1399 | * and other parameters considering needed percpu size, allocation | ||
1400 | * atom size and distances between CPUs. | ||
1401 | * | ||
1402 | * Groups are always mutliples of atom size and CPUs which are of | ||
1403 | * LOCAL_DISTANCE both ways are grouped together and share space for | ||
1404 | * units in the same group. The returned configuration is guaranteed | ||
1405 | * to have CPUs on different nodes on different groups and >=75% usage | ||
1406 | * of allocated virtual address space. | ||
1407 | * | ||
1408 | * RETURNS: | ||
1409 | * On success, pointer to the new allocation_info is returned. On | ||
1410 | * failure, ERR_PTR value is returned. | ||
1411 | */ | ||
1412 | static struct pcpu_alloc_info * __init pcpu_build_alloc_info( | ||
1413 | size_t reserved_size, size_t dyn_size, | ||
1414 | size_t atom_size, | ||
1415 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn) | ||
1416 | { | ||
1417 | static int group_map[NR_CPUS] __initdata; | ||
1418 | static int group_cnt[NR_CPUS] __initdata; | ||
1419 | const size_t static_size = __per_cpu_end - __per_cpu_start; | ||
1420 | int nr_groups = 1, nr_units = 0; | ||
1421 | size_t size_sum, min_unit_size, alloc_size; | ||
1422 | int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ | ||
1423 | int last_allocs, group, unit; | ||
1424 | unsigned int cpu, tcpu; | ||
1425 | struct pcpu_alloc_info *ai; | ||
1426 | unsigned int *cpu_map; | ||
1427 | |||
1428 | /* this function may be called multiple times */ | ||
1429 | memset(group_map, 0, sizeof(group_map)); | ||
1430 | memset(group_cnt, 0, sizeof(group_cnt)); | ||
1431 | |||
1432 | /* calculate size_sum and ensure dyn_size is enough for early alloc */ | ||
1433 | size_sum = PFN_ALIGN(static_size + reserved_size + | ||
1434 | max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); | ||
1435 | dyn_size = size_sum - static_size - reserved_size; | ||
1436 | |||
1437 | /* | ||
1438 | * Determine min_unit_size, alloc_size and max_upa such that | ||
1439 | * alloc_size is multiple of atom_size and is the smallest | ||
1440 | * which can accommodate 4k aligned segments which are equal to | ||
1441 | * or larger than min_unit_size. | ||
1442 | */ | ||
1443 | min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); | ||
1444 | |||
1445 | alloc_size = roundup(min_unit_size, atom_size); | ||
1446 | upa = alloc_size / min_unit_size; | ||
1447 | while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) | ||
1448 | upa--; | ||
1449 | max_upa = upa; | ||
1450 | |||
1451 | /* group cpus according to their proximity */ | ||
1452 | for_each_possible_cpu(cpu) { | ||
1453 | group = 0; | ||
1454 | next_group: | ||
1455 | for_each_possible_cpu(tcpu) { | ||
1456 | if (cpu == tcpu) | ||
1457 | break; | ||
1458 | if (group_map[tcpu] == group && cpu_distance_fn && | ||
1459 | (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || | ||
1460 | cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { | ||
1461 | group++; | ||
1462 | nr_groups = max(nr_groups, group + 1); | ||
1463 | goto next_group; | ||
1464 | } | ||
1465 | } | ||
1466 | group_map[cpu] = group; | ||
1467 | group_cnt[group]++; | ||
1468 | } | ||
1469 | |||
1470 | /* | ||
1471 | * Expand unit size until address space usage goes over 75% | ||
1472 | * and then as much as possible without using more address | ||
1473 | * space. | ||
1474 | */ | ||
1475 | last_allocs = INT_MAX; | ||
1476 | for (upa = max_upa; upa; upa--) { | ||
1477 | int allocs = 0, wasted = 0; | ||
1478 | |||
1479 | if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) | ||
1480 | continue; | ||
1481 | |||
1482 | for (group = 0; group < nr_groups; group++) { | ||
1483 | int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); | ||
1484 | allocs += this_allocs; | ||
1485 | wasted += this_allocs * upa - group_cnt[group]; | ||
1486 | } | ||
1487 | |||
1488 | /* | ||
1489 | * Don't accept if wastage is over 1/3. The | ||
1490 | * greater-than comparison ensures upa==1 always | ||
1491 | * passes the following check. | ||
1492 | */ | ||
1493 | if (wasted > num_possible_cpus() / 3) | ||
1494 | continue; | ||
1495 | |||
1496 | /* and then don't consume more memory */ | ||
1497 | if (allocs > last_allocs) | ||
1498 | break; | ||
1499 | last_allocs = allocs; | ||
1500 | best_upa = upa; | ||
1501 | } | ||
1502 | upa = best_upa; | ||
1503 | |||
1504 | /* allocate and fill alloc_info */ | ||
1505 | for (group = 0; group < nr_groups; group++) | ||
1506 | nr_units += roundup(group_cnt[group], upa); | ||
1507 | |||
1508 | ai = pcpu_alloc_alloc_info(nr_groups, nr_units); | ||
1509 | if (!ai) | ||
1510 | return ERR_PTR(-ENOMEM); | ||
1511 | cpu_map = ai->groups[0].cpu_map; | ||
1512 | |||
1513 | for (group = 0; group < nr_groups; group++) { | ||
1514 | ai->groups[group].cpu_map = cpu_map; | ||
1515 | cpu_map += roundup(group_cnt[group], upa); | ||
1516 | } | ||
1517 | |||
1518 | ai->static_size = static_size; | ||
1519 | ai->reserved_size = reserved_size; | ||
1520 | ai->dyn_size = dyn_size; | ||
1521 | ai->unit_size = alloc_size / upa; | ||
1522 | ai->atom_size = atom_size; | ||
1523 | ai->alloc_size = alloc_size; | ||
1524 | |||
1525 | for (group = 0, unit = 0; group_cnt[group]; group++) { | ||
1526 | struct pcpu_group_info *gi = &ai->groups[group]; | ||
1527 | |||
1528 | /* | ||
1529 | * Initialize base_offset as if all groups are located | ||
1530 | * back-to-back. The caller should update this to | ||
1531 | * reflect actual allocation. | ||
1532 | */ | ||
1533 | gi->base_offset = unit * ai->unit_size; | ||
1534 | |||
1535 | for_each_possible_cpu(cpu) | ||
1536 | if (group_map[cpu] == group) | ||
1537 | gi->cpu_map[gi->nr_units++] = cpu; | ||
1538 | gi->nr_units = roundup(gi->nr_units, upa); | ||
1539 | unit += gi->nr_units; | ||
1540 | } | ||
1541 | BUG_ON(unit != nr_units); | ||
1542 | |||
1543 | return ai; | ||
1544 | } | ||
1545 | #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ | ||
1546 | |||
1547 | #if defined(BUILD_EMBED_FIRST_CHUNK) | ||
1520 | /** | 1548 | /** |
1521 | * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem | 1549 | * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem |
1522 | * @reserved_size: the size of reserved percpu area in bytes | 1550 | * @reserved_size: the size of reserved percpu area in bytes |
@@ -1524,7 +1552,7 @@ early_param("percpu_alloc", percpu_alloc_setup); | |||
1524 | * @atom_size: allocation atom size | 1552 | * @atom_size: allocation atom size |
1525 | * @cpu_distance_fn: callback to determine distance between cpus, optional | 1553 | * @cpu_distance_fn: callback to determine distance between cpus, optional |
1526 | * @alloc_fn: function to allocate percpu page | 1554 | * @alloc_fn: function to allocate percpu page |
1527 | * @free_fn: funtion to free percpu page | 1555 | * @free_fn: function to free percpu page |
1528 | * | 1556 | * |
1529 | * This is a helper to ease setting up embedded first percpu chunk and | 1557 | * This is a helper to ease setting up embedded first percpu chunk and |
1530 | * can be called where pcpu_setup_first_chunk() is expected. | 1558 | * can be called where pcpu_setup_first_chunk() is expected. |
@@ -1619,8 +1647,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, | |||
1619 | /* warn if maximum distance is further than 75% of vmalloc space */ | 1647 | /* warn if maximum distance is further than 75% of vmalloc space */ |
1620 | if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) { | 1648 | if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) { |
1621 | pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc " | 1649 | pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc " |
1622 | "space 0x%lx\n", | 1650 | "space 0x%lx\n", max_distance, |
1623 | max_distance, VMALLOC_END - VMALLOC_START); | 1651 | (unsigned long)(VMALLOC_END - VMALLOC_START)); |
1624 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK | 1652 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK |
1625 | /* and fail if we have fallback */ | 1653 | /* and fail if we have fallback */ |
1626 | rc = -EINVAL; | 1654 | rc = -EINVAL; |
@@ -1645,15 +1673,14 @@ out_free: | |||
1645 | free_bootmem(__pa(areas), areas_size); | 1673 | free_bootmem(__pa(areas), areas_size); |
1646 | return rc; | 1674 | return rc; |
1647 | } | 1675 | } |
1648 | #endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK || | 1676 | #endif /* BUILD_EMBED_FIRST_CHUNK */ |
1649 | !CONFIG_HAVE_SETUP_PER_CPU_AREA */ | ||
1650 | 1677 | ||
1651 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK | 1678 | #ifdef BUILD_PAGE_FIRST_CHUNK |
1652 | /** | 1679 | /** |
1653 | * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages | 1680 | * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages |
1654 | * @reserved_size: the size of reserved percpu area in bytes | 1681 | * @reserved_size: the size of reserved percpu area in bytes |
1655 | * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE | 1682 | * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE |
1656 | * @free_fn: funtion to free percpu page, always called with PAGE_SIZE | 1683 | * @free_fn: function to free percpu page, always called with PAGE_SIZE |
1657 | * @populate_pte_fn: function to populate pte | 1684 | * @populate_pte_fn: function to populate pte |
1658 | * | 1685 | * |
1659 | * This is a helper to ease setting up page-remapped first percpu | 1686 | * This is a helper to ease setting up page-remapped first percpu |
@@ -1756,10 +1783,11 @@ out_free_ar: | |||
1756 | pcpu_free_alloc_info(ai); | 1783 | pcpu_free_alloc_info(ai); |
1757 | return rc; | 1784 | return rc; |
1758 | } | 1785 | } |
1759 | #endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */ | 1786 | #endif /* BUILD_PAGE_FIRST_CHUNK */ |
1760 | 1787 | ||
1788 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | ||
1761 | /* | 1789 | /* |
1762 | * Generic percpu area setup. | 1790 | * Generic SMP percpu area setup. |
1763 | * | 1791 | * |
1764 | * The embedding helper is used because its behavior closely resembles | 1792 | * The embedding helper is used because its behavior closely resembles |
1765 | * the original non-dynamic generic percpu area setup. This is | 1793 | * the original non-dynamic generic percpu area setup. This is |
@@ -1770,7 +1798,6 @@ out_free_ar: | |||
1770 | * on the physical linear memory mapping which uses large page | 1798 | * on the physical linear memory mapping which uses large page |
1771 | * mappings on applicable archs. | 1799 | * mappings on applicable archs. |
1772 | */ | 1800 | */ |
1773 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | ||
1774 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; | 1801 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; |
1775 | EXPORT_SYMBOL(__per_cpu_offset); | 1802 | EXPORT_SYMBOL(__per_cpu_offset); |
1776 | 1803 | ||
@@ -1799,13 +1826,48 @@ void __init setup_per_cpu_areas(void) | |||
1799 | PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, | 1826 | PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, |
1800 | pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); | 1827 | pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); |
1801 | if (rc < 0) | 1828 | if (rc < 0) |
1802 | panic("Failed to initialized percpu areas."); | 1829 | panic("Failed to initialize percpu areas."); |
1803 | 1830 | ||
1804 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | 1831 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; |
1805 | for_each_possible_cpu(cpu) | 1832 | for_each_possible_cpu(cpu) |
1806 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; | 1833 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; |
1807 | } | 1834 | } |
1808 | #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ | 1835 | #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ |
1836 | |||
1837 | #else /* CONFIG_SMP */ | ||
1838 | |||
1839 | /* | ||
1840 | * UP percpu area setup. | ||
1841 | * | ||
1842 | * UP always uses km-based percpu allocator with identity mapping. | ||
1843 | * Static percpu variables are indistinguishable from the usual static | ||
1844 | * variables and don't require any special preparation. | ||
1845 | */ | ||
1846 | void __init setup_per_cpu_areas(void) | ||
1847 | { | ||
1848 | const size_t unit_size = | ||
1849 | roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, | ||
1850 | PERCPU_DYNAMIC_RESERVE)); | ||
1851 | struct pcpu_alloc_info *ai; | ||
1852 | void *fc; | ||
1853 | |||
1854 | ai = pcpu_alloc_alloc_info(1, 1); | ||
1855 | fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | ||
1856 | if (!ai || !fc) | ||
1857 | panic("Failed to allocate memory for percpu areas."); | ||
1858 | |||
1859 | ai->dyn_size = unit_size; | ||
1860 | ai->unit_size = unit_size; | ||
1861 | ai->atom_size = unit_size; | ||
1862 | ai->alloc_size = unit_size; | ||
1863 | ai->groups[0].nr_units = 1; | ||
1864 | ai->groups[0].cpu_map[0] = 0; | ||
1865 | |||
1866 | if (pcpu_setup_first_chunk(ai, fc) < 0) | ||
1867 | panic("Failed to initialize percpu areas."); | ||
1868 | } | ||
1869 | |||
1870 | #endif /* CONFIG_SMP */ | ||
1809 | 1871 | ||
1810 | /* | 1872 | /* |
1811 | * First and reserved chunks are initialized with temporary allocation | 1873 | * First and reserved chunks are initialized with temporary allocation |