aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/setup_percpu.c34
-rw-r--r--include/linux/percpu.h18
-rw-r--r--mm/percpu.c29
3 files changed, 36 insertions, 45 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index b961d99e6416..8aad486c688f 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -157,7 +157,7 @@ static int pcpu_lpage_cpu_distance(unsigned int from, unsigned int to)
157 return REMOTE_DISTANCE; 157 return REMOTE_DISTANCE;
158} 158}
159 159
160static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) 160static ssize_t __init setup_pcpu_lpage(bool chosen)
161{ 161{
162 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; 162 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
163 size_t dyn_size = reserve - PERCPU_FIRST_CHUNK_RESERVE; 163 size_t dyn_size = reserve - PERCPU_FIRST_CHUNK_RESERVE;
@@ -184,8 +184,7 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
184 return -ENOMEM; 184 return -ENOMEM;
185 } 185 }
186 186
187 ret = pcpu_lpage_build_unit_map(static_size, 187 ret = pcpu_lpage_build_unit_map(PERCPU_FIRST_CHUNK_RESERVE,
188 PERCPU_FIRST_CHUNK_RESERVE,
189 &dyn_size, &unit_size, PMD_SIZE, 188 &dyn_size, &unit_size, PMD_SIZE,
190 unit_map, pcpu_lpage_cpu_distance); 189 unit_map, pcpu_lpage_cpu_distance);
191 if (ret < 0) { 190 if (ret < 0) {
@@ -208,9 +207,8 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
208 } 207 }
209 } 208 }
210 209
211 ret = pcpu_lpage_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, 210 ret = pcpu_lpage_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
212 dyn_size, unit_size, PMD_SIZE, 211 unit_size, PMD_SIZE, unit_map, nr_units,
213 unit_map, nr_units,
214 pcpu_fc_alloc, pcpu_fc_free, pcpul_map); 212 pcpu_fc_alloc, pcpu_fc_free, pcpul_map);
215out_free: 213out_free:
216 if (ret < 0) 214 if (ret < 0)
@@ -218,7 +216,7 @@ out_free:
218 return ret; 216 return ret;
219} 217}
220#else 218#else
221static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) 219static ssize_t __init setup_pcpu_lpage(bool chosen)
222{ 220{
223 return -EINVAL; 221 return -EINVAL;
224} 222}
@@ -232,7 +230,7 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
232 * mapping so that it can use PMD mapping without additional TLB 230 * mapping so that it can use PMD mapping without additional TLB
233 * pressure. 231 * pressure.
234 */ 232 */
235static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen) 233static ssize_t __init setup_pcpu_embed(bool chosen)
236{ 234{
237 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; 235 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
238 236
@@ -244,7 +242,7 @@ static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen)
244 if (!chosen && (!cpu_has_pse || pcpu_need_numa())) 242 if (!chosen && (!cpu_has_pse || pcpu_need_numa()))
245 return -EINVAL; 243 return -EINVAL;
246 244
247 return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, 245 return pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
248 reserve - PERCPU_FIRST_CHUNK_RESERVE); 246 reserve - PERCPU_FIRST_CHUNK_RESERVE);
249} 247}
250 248
@@ -260,9 +258,9 @@ static void __init pcpup_populate_pte(unsigned long addr)
260 populate_extra_pte(addr); 258 populate_extra_pte(addr);
261} 259}
262 260
263static ssize_t __init setup_pcpu_page(size_t static_size) 261static ssize_t __init setup_pcpu_page(void)
264{ 262{
265 return pcpu_page_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, 263 return pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
266 pcpu_fc_alloc, pcpu_fc_free, 264 pcpu_fc_alloc, pcpu_fc_free,
267 pcpup_populate_pte); 265 pcpup_populate_pte);
268} 266}
@@ -282,7 +280,6 @@ static inline void setup_percpu_segment(int cpu)
282 280
283void __init setup_per_cpu_areas(void) 281void __init setup_per_cpu_areas(void)
284{ 282{
285 size_t static_size = __per_cpu_end - __per_cpu_start;
286 unsigned int cpu; 283 unsigned int cpu;
287 unsigned long delta; 284 unsigned long delta;
288 size_t pcpu_unit_size; 285 size_t pcpu_unit_size;
@@ -300,9 +297,9 @@ void __init setup_per_cpu_areas(void)
300 if (pcpu_chosen_fc != PCPU_FC_AUTO) { 297 if (pcpu_chosen_fc != PCPU_FC_AUTO) {
301 if (pcpu_chosen_fc != PCPU_FC_PAGE) { 298 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
302 if (pcpu_chosen_fc == PCPU_FC_LPAGE) 299 if (pcpu_chosen_fc == PCPU_FC_LPAGE)
303 ret = setup_pcpu_lpage(static_size, true); 300 ret = setup_pcpu_lpage(true);
304 else 301 else
305 ret = setup_pcpu_embed(static_size, true); 302 ret = setup_pcpu_embed(true);
306 303
307 if (ret < 0) 304 if (ret < 0)
308 pr_warning("PERCPU: %s allocator failed (%zd), " 305 pr_warning("PERCPU: %s allocator failed (%zd), "
@@ -310,15 +307,14 @@ void __init setup_per_cpu_areas(void)
310 pcpu_fc_names[pcpu_chosen_fc], ret); 307 pcpu_fc_names[pcpu_chosen_fc], ret);
311 } 308 }
312 } else { 309 } else {
313 ret = setup_pcpu_lpage(static_size, false); 310 ret = setup_pcpu_lpage(false);
314 if (ret < 0) 311 if (ret < 0)
315 ret = setup_pcpu_embed(static_size, false); 312 ret = setup_pcpu_embed(false);
316 } 313 }
317 if (ret < 0) 314 if (ret < 0)
318 ret = setup_pcpu_page(static_size); 315 ret = setup_pcpu_page();
319 if (ret < 0) 316 if (ret < 0)
320 panic("cannot allocate static percpu area (%zu bytes, err=%zd)", 317 panic("cannot initialize percpu area (err=%zd)", ret);
321 static_size, ret);
322 318
323 pcpu_unit_size = ret; 319 pcpu_unit_size = ret;
324 320
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 9be05cbe5ee0..be2fc8fb9b6f 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -84,13 +84,12 @@ extern size_t __init pcpu_setup_first_chunk(
84 84
85#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 85#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
86extern ssize_t __init pcpu_embed_first_chunk( 86extern ssize_t __init pcpu_embed_first_chunk(
87 size_t static_size, size_t reserved_size, 87 size_t reserved_size, ssize_t dyn_size);
88 ssize_t dyn_size);
89#endif 88#endif
90 89
91#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 90#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
92extern ssize_t __init pcpu_page_first_chunk( 91extern ssize_t __init pcpu_page_first_chunk(
93 size_t static_size, size_t reserved_size, 92 size_t reserved_size,
94 pcpu_fc_alloc_fn_t alloc_fn, 93 pcpu_fc_alloc_fn_t alloc_fn,
95 pcpu_fc_free_fn_t free_fn, 94 pcpu_fc_free_fn_t free_fn,
96 pcpu_fc_populate_pte_fn_t populate_pte_fn); 95 pcpu_fc_populate_pte_fn_t populate_pte_fn);
@@ -98,16 +97,15 @@ extern ssize_t __init pcpu_page_first_chunk(
98 97
99#ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK 98#ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK
100extern int __init pcpu_lpage_build_unit_map( 99extern int __init pcpu_lpage_build_unit_map(
101 size_t static_size, size_t reserved_size, 100 size_t reserved_size, ssize_t *dyn_sizep,
102 ssize_t *dyn_sizep, size_t *unit_sizep, 101 size_t *unit_sizep, size_t lpage_size,
103 size_t lpage_size, int *unit_map, 102 int *unit_map,
104 pcpu_fc_cpu_distance_fn_t cpu_distance_fn); 103 pcpu_fc_cpu_distance_fn_t cpu_distance_fn);
105 104
106extern ssize_t __init pcpu_lpage_first_chunk( 105extern ssize_t __init pcpu_lpage_first_chunk(
107 size_t static_size, size_t reserved_size, 106 size_t reserved_size, size_t dyn_size,
108 size_t dyn_size, size_t unit_size, 107 size_t unit_size, size_t lpage_size,
109 size_t lpage_size, const int *unit_map, 108 const int *unit_map, int nr_units,
110 int nr_units,
111 pcpu_fc_alloc_fn_t alloc_fn, 109 pcpu_fc_alloc_fn_t alloc_fn,
112 pcpu_fc_free_fn_t free_fn, 110 pcpu_fc_free_fn_t free_fn,
113 pcpu_fc_map_fn_t map_fn); 111 pcpu_fc_map_fn_t map_fn);
diff --git a/mm/percpu.c b/mm/percpu.c
index 7fb40bb1555a..e2ac58a39bb2 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1464,7 +1464,6 @@ static inline size_t pcpu_calc_fc_sizes(size_t static_size,
1464 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 1464 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1465/** 1465/**
1466 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 1466 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1467 * @static_size: the size of static percpu area in bytes
1468 * @reserved_size: the size of reserved percpu area in bytes 1467 * @reserved_size: the size of reserved percpu area in bytes
1469 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto 1468 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1470 * 1469 *
@@ -1489,9 +1488,9 @@ static inline size_t pcpu_calc_fc_sizes(size_t static_size,
1489 * The determined pcpu_unit_size which can be used to initialize 1488 * The determined pcpu_unit_size which can be used to initialize
1490 * percpu access on success, -errno on failure. 1489 * percpu access on success, -errno on failure.
1491 */ 1490 */
1492ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, 1491ssize_t __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size)
1493 ssize_t dyn_size)
1494{ 1492{
1493 const size_t static_size = __per_cpu_end - __per_cpu_start;
1495 size_t size_sum, unit_size, chunk_size; 1494 size_t size_sum, unit_size, chunk_size;
1496 void *base; 1495 void *base;
1497 unsigned int cpu; 1496 unsigned int cpu;
@@ -1536,7 +1535,6 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
1536#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 1535#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1537/** 1536/**
1538 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 1537 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1539 * @static_size: the size of static percpu area in bytes
1540 * @reserved_size: the size of reserved percpu area in bytes 1538 * @reserved_size: the size of reserved percpu area in bytes
1541 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE 1539 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1542 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE 1540 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
@@ -1552,12 +1550,13 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
1552 * The determined pcpu_unit_size which can be used to initialize 1550 * The determined pcpu_unit_size which can be used to initialize
1553 * percpu access on success, -errno on failure. 1551 * percpu access on success, -errno on failure.
1554 */ 1552 */
1555ssize_t __init pcpu_page_first_chunk(size_t static_size, size_t reserved_size, 1553ssize_t __init pcpu_page_first_chunk(size_t reserved_size,
1556 pcpu_fc_alloc_fn_t alloc_fn, 1554 pcpu_fc_alloc_fn_t alloc_fn,
1557 pcpu_fc_free_fn_t free_fn, 1555 pcpu_fc_free_fn_t free_fn,
1558 pcpu_fc_populate_pte_fn_t populate_pte_fn) 1556 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1559{ 1557{
1560 static struct vm_struct vm; 1558 static struct vm_struct vm;
1559 const size_t static_size = __per_cpu_end - __per_cpu_start;
1561 char psize_str[16]; 1560 char psize_str[16];
1562 int unit_pages; 1561 int unit_pages;
1563 size_t pages_size; 1562 size_t pages_size;
@@ -1641,7 +1640,6 @@ out_free_ar:
1641#ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK 1640#ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK
1642/** 1641/**
1643 * pcpu_lpage_build_unit_map - build unit_map for large page remapping 1642 * pcpu_lpage_build_unit_map - build unit_map for large page remapping
1644 * @static_size: the size of static percpu area in bytes
1645 * @reserved_size: the size of reserved percpu area in bytes 1643 * @reserved_size: the size of reserved percpu area in bytes
1646 * @dyn_sizep: in/out parameter for dynamic size, -1 for auto 1644 * @dyn_sizep: in/out parameter for dynamic size, -1 for auto
1647 * @unit_sizep: out parameter for unit size 1645 * @unit_sizep: out parameter for unit size
@@ -1661,13 +1659,14 @@ out_free_ar:
1661 * On success, fills in @unit_map, sets *@dyn_sizep, *@unit_sizep and 1659 * On success, fills in @unit_map, sets *@dyn_sizep, *@unit_sizep and
1662 * returns the number of units to be allocated. -errno on failure. 1660 * returns the number of units to be allocated. -errno on failure.
1663 */ 1661 */
1664int __init pcpu_lpage_build_unit_map(size_t static_size, size_t reserved_size, 1662int __init pcpu_lpage_build_unit_map(size_t reserved_size, ssize_t *dyn_sizep,
1665 ssize_t *dyn_sizep, size_t *unit_sizep, 1663 size_t *unit_sizep, size_t lpage_size,
1666 size_t lpage_size, int *unit_map, 1664 int *unit_map,
1667 pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 1665 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1668{ 1666{
1669 static int group_map[NR_CPUS] __initdata; 1667 static int group_map[NR_CPUS] __initdata;
1670 static int group_cnt[NR_CPUS] __initdata; 1668 static int group_cnt[NR_CPUS] __initdata;
1669 const size_t static_size = __per_cpu_end - __per_cpu_start;
1671 int group_cnt_max = 0; 1670 int group_cnt_max = 0;
1672 size_t size_sum, min_unit_size, alloc_size; 1671 size_t size_sum, min_unit_size, alloc_size;
1673 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ 1672 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
@@ -1819,7 +1818,6 @@ static void __init pcpul_lpage_dump_cfg(const char *lvl, size_t static_size,
1819 1818
1820/** 1819/**
1821 * pcpu_lpage_first_chunk - remap the first percpu chunk using large page 1820 * pcpu_lpage_first_chunk - remap the first percpu chunk using large page
1822 * @static_size: the size of static percpu area in bytes
1823 * @reserved_size: the size of reserved percpu area in bytes 1821 * @reserved_size: the size of reserved percpu area in bytes
1824 * @dyn_size: free size for dynamic allocation in bytes 1822 * @dyn_size: free size for dynamic allocation in bytes
1825 * @unit_size: unit size in bytes 1823 * @unit_size: unit size in bytes
@@ -1850,15 +1848,15 @@ static void __init pcpul_lpage_dump_cfg(const char *lvl, size_t static_size,
1850 * The determined pcpu_unit_size which can be used to initialize 1848 * The determined pcpu_unit_size which can be used to initialize
1851 * percpu access on success, -errno on failure. 1849 * percpu access on success, -errno on failure.
1852 */ 1850 */
1853ssize_t __init pcpu_lpage_first_chunk(size_t static_size, size_t reserved_size, 1851ssize_t __init pcpu_lpage_first_chunk(size_t reserved_size, size_t dyn_size,
1854 size_t dyn_size, size_t unit_size, 1852 size_t unit_size, size_t lpage_size,
1855 size_t lpage_size, const int *unit_map, 1853 const int *unit_map, int nr_units,
1856 int nr_units,
1857 pcpu_fc_alloc_fn_t alloc_fn, 1854 pcpu_fc_alloc_fn_t alloc_fn,
1858 pcpu_fc_free_fn_t free_fn, 1855 pcpu_fc_free_fn_t free_fn,
1859 pcpu_fc_map_fn_t map_fn) 1856 pcpu_fc_map_fn_t map_fn)
1860{ 1857{
1861 static struct vm_struct vm; 1858 static struct vm_struct vm;
1859 const size_t static_size = __per_cpu_end - __per_cpu_start;
1862 size_t chunk_size = unit_size * nr_units; 1860 size_t chunk_size = unit_size * nr_units;
1863 size_t map_size; 1861 size_t map_size;
1864 unsigned int cpu; 1862 unsigned int cpu;
@@ -2037,7 +2035,6 @@ EXPORT_SYMBOL(__per_cpu_offset);
2037 2035
2038void __init setup_per_cpu_areas(void) 2036void __init setup_per_cpu_areas(void)
2039{ 2037{
2040 size_t static_size = __per_cpu_end - __per_cpu_start;
2041 ssize_t unit_size; 2038 ssize_t unit_size;
2042 unsigned long delta; 2039 unsigned long delta;
2043 unsigned int cpu; 2040 unsigned int cpu;
@@ -2046,7 +2043,7 @@ void __init setup_per_cpu_areas(void)
2046 * Always reserve area for module percpu variables. That's 2043 * Always reserve area for module percpu variables. That's
2047 * what the legacy allocator did. 2044 * what the legacy allocator did.
2048 */ 2045 */
2049 unit_size = pcpu_embed_first_chunk(static_size, PERCPU_MODULE_RESERVE, 2046 unit_size = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2050 PERCPU_DYNAMIC_RESERVE); 2047 PERCPU_DYNAMIC_RESERVE);
2051 if (unit_size < 0) 2048 if (unit_size < 0)
2052 panic("Failed to initialized percpu areas."); 2049 panic("Failed to initialized percpu areas.");