aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S12
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S9
-rw-r--r--arch/x86/include/asm/percpu.h8
-rw-r--r--arch/x86/kernel/setup_percpu.c63
-rw-r--r--include/linux/percpu.h6
-rw-r--r--mm/allocpercpu.c2
-rw-r--r--mm/percpu.c130
8 files changed, 135 insertions, 96 deletions
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 85598f7da407..1602373e539c 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -64,6 +64,7 @@ SECTIONS
64 __initramfs_end = .; 64 __initramfs_end = .;
65#endif 65#endif
66 . = ALIGN(4096); 66 . = ALIGN(4096);
67 __per_cpu_load = .;
67 __per_cpu_start = .; 68 __per_cpu_start = .;
68 *(.data.percpu.page_aligned) 69 *(.data.percpu.page_aligned)
69 *(.data.percpu) 70 *(.data.percpu)
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index f45e4e508eca..3765efc5f963 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -213,17 +213,9 @@ SECTIONS
213 { *(.data.cacheline_aligned) } 213 { *(.data.cacheline_aligned) }
214 214
215 /* Per-cpu data: */ 215 /* Per-cpu data: */
216 percpu : { } :percpu
217 . = ALIGN(PERCPU_PAGE_SIZE); 216 . = ALIGN(PERCPU_PAGE_SIZE);
218 __phys_per_cpu_start = .; 217 PERCPU_VADDR(PERCPU_ADDR, :percpu)
219 .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET) 218 __phys_per_cpu_start = __per_cpu_load;
220 {
221 __per_cpu_start = .;
222 *(.data.percpu.page_aligned)
223 *(.data.percpu)
224 *(.data.percpu.shared_aligned)
225 __per_cpu_end = .;
226 }
227 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits 219 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
228 * into percpu page size 220 * into percpu page size
229 */ 221 */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 295ccc5e86b1..67f07f453385 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -181,14 +181,7 @@ SECTIONS
181 __initramfs_end = .; 181 __initramfs_end = .;
182 } 182 }
183#endif 183#endif
184 . = ALIGN(PAGE_SIZE); 184 PERCPU(PAGE_SIZE)
185 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
186 __per_cpu_start = .;
187 *(.data.percpu.page_aligned)
188 *(.data.percpu)
189 *(.data.percpu.shared_aligned)
190 __per_cpu_end = .;
191 }
192 185
193 . = ALIGN(8); 186 . = ALIGN(8);
194 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { 187 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 8f1d2fbec1d4..aee103b26d01 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -43,14 +43,6 @@
43#else /* ...!ASSEMBLY */ 43#else /* ...!ASSEMBLY */
44 44
45#include <linux/stringify.h> 45#include <linux/stringify.h>
46#include <asm/sections.h>
47
48#define __addr_to_pcpu_ptr(addr) \
49 (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
50 + (unsigned long)__per_cpu_start)
51#define __pcpu_ptr_to_addr(ptr) \
52 (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
53 - (unsigned long)__per_cpu_start)
54 46
55#ifdef CONFIG_SMP 47#ifdef CONFIG_SMP
56#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x 48#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index efa615f2bf43..400331b50a53 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -233,8 +233,8 @@ proceed:
233 "%zu bytes\n", vm.addr, static_size); 233 "%zu bytes\n", vm.addr, static_size);
234 234
235 ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, 235 ret = pcpu_setup_first_chunk(pcpur_get_page, static_size,
236 PERCPU_FIRST_CHUNK_RESERVE, 236 PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
237 PMD_SIZE, dyn_size, vm.addr, NULL); 237 PMD_SIZE, vm.addr, NULL);
238 goto out_free_ar; 238 goto out_free_ar;
239 239
240enomem: 240enomem:
@@ -257,31 +257,13 @@ static ssize_t __init setup_pcpu_remap(size_t static_size)
257 * Embedding allocator 257 * Embedding allocator
258 * 258 *
259 * The first chunk is sized to just contain the static area plus 259 * The first chunk is sized to just contain the static area plus
260 * module and dynamic reserves, and allocated as a contiguous area 260 * module and dynamic reserves and embedded into linear physical
261 * using bootmem allocator and used as-is without being mapped into 261 * mapping so that it can use PMD mapping without additional TLB
262 * vmalloc area. This enables the first chunk to piggy back on the 262 * pressure.
263 * linear physical PMD mapping and doesn't add any additional pressure
264 * to TLB. Note that if the needed size is smaller than the minimum
265 * unit size, the leftover is returned to the bootmem allocator.
266 */ 263 */
267static void *pcpue_ptr __initdata;
268static size_t pcpue_size __initdata;
269static size_t pcpue_unit_size __initdata;
270
271static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
272{
273 size_t off = (size_t)pageno << PAGE_SHIFT;
274
275 if (off >= pcpue_size)
276 return NULL;
277
278 return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off);
279}
280
281static ssize_t __init setup_pcpu_embed(size_t static_size) 264static ssize_t __init setup_pcpu_embed(size_t static_size)
282{ 265{
283 unsigned int cpu; 266 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
284 size_t dyn_size;
285 267
286 /* 268 /*
287 * If large page isn't supported, there's no benefit in doing 269 * If large page isn't supported, there's no benefit in doing
@@ -291,33 +273,8 @@ static ssize_t __init setup_pcpu_embed(size_t static_size)
291 if (!cpu_has_pse || pcpu_need_numa()) 273 if (!cpu_has_pse || pcpu_need_numa())
292 return -EINVAL; 274 return -EINVAL;
293 275
294 /* allocate and copy */ 276 return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
295 pcpue_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + 277 reserve - PERCPU_FIRST_CHUNK_RESERVE, -1);
296 PERCPU_DYNAMIC_RESERVE);
297 pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
298 dyn_size = pcpue_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
299
300 pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size,
301 PAGE_SIZE);
302 if (!pcpue_ptr)
303 return -ENOMEM;
304
305 for_each_possible_cpu(cpu) {
306 void *ptr = pcpue_ptr + cpu * pcpue_unit_size;
307
308 free_bootmem(__pa(ptr + pcpue_size),
309 pcpue_unit_size - pcpue_size);
310 memcpy(ptr, __per_cpu_load, static_size);
311 }
312
313 /* we're ready, commit */
314 pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
315 pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size);
316
317 return pcpu_setup_first_chunk(pcpue_get_page, static_size,
318 PERCPU_FIRST_CHUNK_RESERVE,
319 pcpue_unit_size, dyn_size,
320 pcpue_ptr, NULL);
321} 278}
322 279
323/* 280/*
@@ -375,8 +332,8 @@ static ssize_t __init setup_pcpu_4k(size_t static_size)
375 pcpu4k_nr_static_pages, static_size); 332 pcpu4k_nr_static_pages, static_size);
376 333
377 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 334 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
378 PERCPU_FIRST_CHUNK_RESERVE, -1, -1, NULL, 335 PERCPU_FIRST_CHUNK_RESERVE, -1,
379 pcpu4k_populate_pte); 336 -1, NULL, pcpu4k_populate_pte);
380 goto out_free_ar; 337 goto out_free_ar;
381 338
382enomem: 339enomem:
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 54a968b4b924..ee5615d65211 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -107,10 +107,14 @@ typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
107 107
108extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, 108extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
109 size_t static_size, size_t reserved_size, 109 size_t static_size, size_t reserved_size,
110 ssize_t unit_size, ssize_t dyn_size, 110 ssize_t dyn_size, ssize_t unit_size,
111 void *base_addr, 111 void *base_addr,
112 pcpu_populate_pte_fn_t populate_pte_fn); 112 pcpu_populate_pte_fn_t populate_pte_fn);
113 113
114extern ssize_t __init pcpu_embed_first_chunk(
115 size_t static_size, size_t reserved_size,
116 ssize_t dyn_size, ssize_t unit_size);
117
114/* 118/*
115 * Use this to get to a cpu's version of the per-cpu object 119 * Use this to get to a cpu's version of the per-cpu object
116 * dynamically allocated. Non-atomic access to the current CPU's 120 * dynamically allocated. Non-atomic access to the current CPU's
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index 3653c570232b..1882923bc706 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -120,7 +120,7 @@ void *__alloc_percpu(size_t size, size_t align)
120 * on it. Larger alignment should only be used for module 120 * on it. Larger alignment should only be used for module
121 * percpu sections on SMP for which this path isn't used. 121 * percpu sections on SMP for which this path isn't used.
122 */ 122 */
123 WARN_ON_ONCE(align > __alignof__(unsigned long long)); 123 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
124 124
125 if (unlikely(!pdata)) 125 if (unlikely(!pdata))
126 return NULL; 126 return NULL;
diff --git a/mm/percpu.c b/mm/percpu.c
index bfe6a3afaf45..1aa5d8fbca12 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -46,7 +46,8 @@
46 * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA 46 * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
47 * 47 *
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 * regular address to percpu pointer and back 49 * regular address to percpu pointer and back if they need to be
50 * different from the default
50 * 51 *
51 * - use pcpu_setup_first_chunk() during percpu area initialization to 52 * - use pcpu_setup_first_chunk() during percpu area initialization to
52 * setup the first chunk containing the kernel static percpu area 53 * setup the first chunk containing the kernel static percpu area
@@ -67,11 +68,24 @@
67#include <linux/workqueue.h> 68#include <linux/workqueue.h>
68 69
69#include <asm/cacheflush.h> 70#include <asm/cacheflush.h>
71#include <asm/sections.h>
70#include <asm/tlbflush.h> 72#include <asm/tlbflush.h>
71 73
72#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 74#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
73#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 75#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
74 76
77/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
78#ifndef __addr_to_pcpu_ptr
79#define __addr_to_pcpu_ptr(addr) \
80 (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
81 + (unsigned long)__per_cpu_start)
82#endif
83#ifndef __pcpu_ptr_to_addr
84#define __pcpu_ptr_to_addr(ptr) \
85 (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
86 - (unsigned long)__per_cpu_start)
87#endif
88
75struct pcpu_chunk { 89struct pcpu_chunk {
76 struct list_head list; /* linked to pcpu_slot lists */ 90 struct list_head list; /* linked to pcpu_slot lists */
77 struct rb_node rb_node; /* key is chunk->vm->addr */ 91 struct rb_node rb_node; /* key is chunk->vm->addr */
@@ -1013,8 +1027,8 @@ EXPORT_SYMBOL_GPL(free_percpu);
1013 * @get_page_fn: callback to fetch page pointer 1027 * @get_page_fn: callback to fetch page pointer
1014 * @static_size: the size of static percpu area in bytes 1028 * @static_size: the size of static percpu area in bytes
1015 * @reserved_size: the size of reserved percpu area in bytes 1029 * @reserved_size: the size of reserved percpu area in bytes
1016 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
1017 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto 1030 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1031 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
1018 * @base_addr: mapped address, NULL for auto 1032 * @base_addr: mapped address, NULL for auto
1019 * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary 1033 * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
1020 * 1034 *
@@ -1039,14 +1053,14 @@ EXPORT_SYMBOL_GPL(free_percpu);
1039 * limited offset range for symbol relocations to guarantee module 1053 * limited offset range for symbol relocations to guarantee module
1040 * percpu symbols fall inside the relocatable range. 1054 * percpu symbols fall inside the relocatable range.
1041 * 1055 *
1056 * @dyn_size, if non-negative, determines the number of bytes
1057 * available for dynamic allocation in the first chunk. Specifying
1058 * non-negative value makes percpu leave alone the area beyond
1059 * @static_size + @reserved_size + @dyn_size.
1060 *
1042 * @unit_size, if non-negative, specifies unit size and must be 1061 * @unit_size, if non-negative, specifies unit size and must be
1043 * aligned to PAGE_SIZE and equal to or larger than @static_size + 1062 * aligned to PAGE_SIZE and equal to or larger than @static_size +
1044 * @reserved_size + @dyn_size. 1063 * @reserved_size + if non-negative, @dyn_size.
1045 *
1046 * @dyn_size, if non-negative, limits the number of bytes available
1047 * for dynamic allocation in the first chunk. Specifying non-negative
1048 * value make percpu leave alone the area beyond @static_size +
1049 * @reserved_size + @dyn_size.
1050 * 1064 *
1051 * Non-null @base_addr means that the caller already allocated virtual 1065 * Non-null @base_addr means that the caller already allocated virtual
1052 * region for the first chunk and mapped it. percpu must not mess 1066 * region for the first chunk and mapped it. percpu must not mess
@@ -1069,12 +1083,14 @@ EXPORT_SYMBOL_GPL(free_percpu);
1069 */ 1083 */
1070size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, 1084size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
1071 size_t static_size, size_t reserved_size, 1085 size_t static_size, size_t reserved_size,
1072 ssize_t unit_size, ssize_t dyn_size, 1086 ssize_t dyn_size, ssize_t unit_size,
1073 void *base_addr, 1087 void *base_addr,
1074 pcpu_populate_pte_fn_t populate_pte_fn) 1088 pcpu_populate_pte_fn_t populate_pte_fn)
1075{ 1089{
1076 static struct vm_struct first_vm; 1090 static struct vm_struct first_vm;
1077 static int smap[2], dmap[2]; 1091 static int smap[2], dmap[2];
1092 size_t size_sum = static_size + reserved_size +
1093 (dyn_size >= 0 ? dyn_size : 0);
1078 struct pcpu_chunk *schunk, *dchunk = NULL; 1094 struct pcpu_chunk *schunk, *dchunk = NULL;
1079 unsigned int cpu; 1095 unsigned int cpu;
1080 int nr_pages; 1096 int nr_pages;
@@ -1085,20 +1101,18 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
1085 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); 1101 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
1086 BUG_ON(!static_size); 1102 BUG_ON(!static_size);
1087 if (unit_size >= 0) { 1103 if (unit_size >= 0) {
1088 BUG_ON(unit_size < static_size + reserved_size + 1104 BUG_ON(unit_size < size_sum);
1089 (dyn_size >= 0 ? dyn_size : 0));
1090 BUG_ON(unit_size & ~PAGE_MASK); 1105 BUG_ON(unit_size & ~PAGE_MASK);
1091 } else { 1106 BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE);
1092 BUG_ON(dyn_size >= 0); 1107 } else
1093 BUG_ON(base_addr); 1108 BUG_ON(base_addr);
1094 }
1095 BUG_ON(base_addr && populate_pte_fn); 1109 BUG_ON(base_addr && populate_pte_fn);
1096 1110
1097 if (unit_size >= 0) 1111 if (unit_size >= 0)
1098 pcpu_unit_pages = unit_size >> PAGE_SHIFT; 1112 pcpu_unit_pages = unit_size >> PAGE_SHIFT;
1099 else 1113 else
1100 pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT, 1114 pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT,
1101 PFN_UP(static_size + reserved_size)); 1115 PFN_UP(size_sum));
1102 1116
1103 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 1117 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1104 pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size; 1118 pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size;
@@ -1224,3 +1238,89 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
1224 pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0); 1238 pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0);
1225 return pcpu_unit_size; 1239 return pcpu_unit_size;
1226} 1240}
1241
1242/*
1243 * Embedding first chunk setup helper.
1244 */
1245static void *pcpue_ptr __initdata;
1246static size_t pcpue_size __initdata;
1247static size_t pcpue_unit_size __initdata;
1248
1249static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
1250{
1251 size_t off = (size_t)pageno << PAGE_SHIFT;
1252
1253 if (off >= pcpue_size)
1254 return NULL;
1255
1256 return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off);
1257}
1258
1259/**
1260 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1261 * @static_size: the size of static percpu area in bytes
1262 * @reserved_size: the size of reserved percpu area in bytes
1263 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1264 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
1265 *
1266 * This is a helper to ease setting up embedded first percpu chunk and
1267 * can be called where pcpu_setup_first_chunk() is expected.
1268 *
1269 * If this function is used to setup the first chunk, it is allocated
1270 * as a contiguous area using bootmem allocator and used as-is without
1271 * being mapped into vmalloc area. This enables the first chunk to
1272 * piggy back on the linear physical mapping which often uses larger
1273 * page size.
1274 *
1275 * When @dyn_size is positive, dynamic area might be larger than
1276 * specified to fill page alignment. Also, when @dyn_size is auto,
1277 * @dyn_size does not fill the whole first chunk but only what's
1278 * necessary for page alignment after static and reserved areas.
1279 *
1280 * If the needed size is smaller than the minimum or specified unit
1281 * size, the leftover is returned to the bootmem allocator.
1282 *
1283 * RETURNS:
1284 * The determined pcpu_unit_size which can be used to initialize
1285 * percpu access on success, -errno on failure.
1286 */
1287ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
1288 ssize_t dyn_size, ssize_t unit_size)
1289{
1290 unsigned int cpu;
1291
1292 /* determine parameters and allocate */
1293 pcpue_size = PFN_ALIGN(static_size + reserved_size +
1294 (dyn_size >= 0 ? dyn_size : 0));
1295 if (dyn_size != 0)
1296 dyn_size = pcpue_size - static_size - reserved_size;
1297
1298 if (unit_size >= 0) {
1299 BUG_ON(unit_size < pcpue_size);
1300 pcpue_unit_size = unit_size;
1301 } else
1302 pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
1303
1304 pcpue_ptr = __alloc_bootmem_nopanic(
1305 num_possible_cpus() * pcpue_unit_size,
1306 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
1307 if (!pcpue_ptr)
1308 return -ENOMEM;
1309
1310 /* return the leftover and copy */
1311 for_each_possible_cpu(cpu) {
1312 void *ptr = pcpue_ptr + cpu * pcpue_unit_size;
1313
1314 free_bootmem(__pa(ptr + pcpue_size),
1315 pcpue_unit_size - pcpue_size);
1316 memcpy(ptr, __per_cpu_load, static_size);
1317 }
1318
1319 /* we're ready, commit */
1320 pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
1321 pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size);
1322
1323 return pcpu_setup_first_chunk(pcpue_get_page, static_size,
1324 reserved_size, dyn_size,
1325 pcpue_unit_size, pcpue_ptr, NULL);
1326}