aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-08-14 02:00:51 -0400
committerTejun Heo <tj@kernel.org>2009-08-14 02:00:51 -0400
commitfb435d5233f8b6f9b93c11d6304d8e98fed03234 (patch)
tree76a210c3895b9db5dc7e1f185ee0a60744fef99a
parentfd1e8a1fe2b54df6c185b4fa65f181f50b9c4d4e (diff)
percpu: add pcpu_unit_offsets[]
Currently units are mapped sequentially into address space. This patch adds pcpu_unit_offsets[] which allows units to be mapped to arbitrary offsets from the chunk base address. This is necessary to allow sparse embedding which might would need to allocate address ranges and memory areas which aren't aligned to unit size but allocation atom size (page or large page size). This also simplifies things a bit by removing the need to calculate offset from unit number. With this change, there's no need for the arch code to know pcpu_unit_size. Update pcpu_setup_first_chunk() and first chunk allocators to return regular 0 or -errno return code instead of unit size or -errno. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc/kernel/smp_64.c12
-rw-r--r--arch/x86/kernel/setup_percpu.c51
-rw-r--r--include/linux/percpu.h16
-rw-r--r--mm/percpu.c95
4 files changed, 84 insertions, 90 deletions
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index a42a4a744d14..b03fd362c629 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1478,9 +1478,10 @@ void __init setup_per_cpu_areas(void)
1478 static struct vm_struct vm; 1478 static struct vm_struct vm;
1479 struct pcpu_alloc_info *ai; 1479 struct pcpu_alloc_info *ai;
1480 unsigned long delta, cpu; 1480 unsigned long delta, cpu;
1481 size_t size_sum, pcpu_unit_size; 1481 size_t size_sum;
1482 size_t ptrs_size; 1482 size_t ptrs_size;
1483 void **ptrs; 1483 void **ptrs;
1484 int rc;
1484 1485
1485 ai = pcpu_alloc_alloc_info(1, nr_cpu_ids); 1486 ai = pcpu_alloc_alloc_info(1, nr_cpu_ids);
1486 1487
@@ -1526,14 +1527,15 @@ void __init setup_per_cpu_areas(void)
1526 pcpu_map_range(start, end, virt_to_page(ptrs[cpu])); 1527 pcpu_map_range(start, end, virt_to_page(ptrs[cpu]));
1527 } 1528 }
1528 1529
1529 pcpu_unit_size = pcpu_setup_first_chunk(ai, vm.addr); 1530 rc = pcpu_setup_first_chunk(ai, vm.addr);
1531 if (rc)
1532 panic("failed to setup percpu first chunk (%d)", rc);
1530 1533
1531 free_bootmem(__pa(ptrs), ptrs_size); 1534 free_bootmem(__pa(ptrs), ptrs_size);
1532 1535
1533 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 1536 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1534 for_each_possible_cpu(cpu) { 1537 for_each_possible_cpu(cpu)
1535 __per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; 1538 __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1536 }
1537 1539
1538 /* Setup %g5 for the boot cpu. */ 1540 /* Setup %g5 for the boot cpu. */
1539 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); 1541 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index db5f9c49fec5..9becc5d4b518 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -157,12 +157,12 @@ static int pcpu_lpage_cpu_distance(unsigned int from, unsigned int to)
157 return REMOTE_DISTANCE; 157 return REMOTE_DISTANCE;
158} 158}
159 159
160static ssize_t __init setup_pcpu_lpage(bool chosen) 160static int __init setup_pcpu_lpage(bool chosen)
161{ 161{
162 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; 162 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
163 size_t dyn_size = reserve - PERCPU_FIRST_CHUNK_RESERVE; 163 size_t dyn_size = reserve - PERCPU_FIRST_CHUNK_RESERVE;
164 struct pcpu_alloc_info *ai; 164 struct pcpu_alloc_info *ai;
165 ssize_t ret; 165 int rc;
166 166
167 /* on non-NUMA, embedding is better */ 167 /* on non-NUMA, embedding is better */
168 if (!chosen && !pcpu_need_numa()) 168 if (!chosen && !pcpu_need_numa())
@@ -196,19 +196,18 @@ static ssize_t __init setup_pcpu_lpage(bool chosen)
196 if (tot_size > vm_size / 5) { 196 if (tot_size > vm_size / 5) {
197 pr_info("PERCPU: too large chunk size %zuMB for " 197 pr_info("PERCPU: too large chunk size %zuMB for "
198 "large page remap\n", tot_size >> 20); 198 "large page remap\n", tot_size >> 20);
199 ret = -EINVAL; 199 rc = -EINVAL;
200 goto out_free; 200 goto out_free;
201 } 201 }
202 } 202 }
203 203
204 ret = pcpu_lpage_first_chunk(ai, pcpu_fc_alloc, pcpu_fc_free, 204 rc = pcpu_lpage_first_chunk(ai, pcpu_fc_alloc, pcpu_fc_free, pcpul_map);
205 pcpul_map);
206out_free: 205out_free:
207 pcpu_free_alloc_info(ai); 206 pcpu_free_alloc_info(ai);
208 return ret; 207 return rc;
209} 208}
210#else 209#else
211static ssize_t __init setup_pcpu_lpage(bool chosen) 210static int __init setup_pcpu_lpage(bool chosen)
212{ 211{
213 return -EINVAL; 212 return -EINVAL;
214} 213}
@@ -222,7 +221,7 @@ static ssize_t __init setup_pcpu_lpage(bool chosen)
222 * mapping so that it can use PMD mapping without additional TLB 221 * mapping so that it can use PMD mapping without additional TLB
223 * pressure. 222 * pressure.
224 */ 223 */
225static ssize_t __init setup_pcpu_embed(bool chosen) 224static int __init setup_pcpu_embed(bool chosen)
226{ 225{
227 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; 226 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
228 227
@@ -250,7 +249,7 @@ static void __init pcpup_populate_pte(unsigned long addr)
250 populate_extra_pte(addr); 249 populate_extra_pte(addr);
251} 250}
252 251
253static ssize_t __init setup_pcpu_page(void) 252static int __init setup_pcpu_page(void)
254{ 253{
255 return pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, 254 return pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
256 pcpu_fc_alloc, pcpu_fc_free, 255 pcpu_fc_alloc, pcpu_fc_free,
@@ -274,8 +273,7 @@ void __init setup_per_cpu_areas(void)
274{ 273{
275 unsigned int cpu; 274 unsigned int cpu;
276 unsigned long delta; 275 unsigned long delta;
277 size_t pcpu_unit_size; 276 int rc;
278 ssize_t ret;
279 277
280 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", 278 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
281 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 279 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
@@ -285,36 +283,33 @@ void __init setup_per_cpu_areas(void)
285 * of large page mappings. Please read comments on top of 283 * of large page mappings. Please read comments on top of
286 * each allocator for details. 284 * each allocator for details.
287 */ 285 */
288 ret = -EINVAL; 286 rc = -EINVAL;
289 if (pcpu_chosen_fc != PCPU_FC_AUTO) { 287 if (pcpu_chosen_fc != PCPU_FC_AUTO) {
290 if (pcpu_chosen_fc != PCPU_FC_PAGE) { 288 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
291 if (pcpu_chosen_fc == PCPU_FC_LPAGE) 289 if (pcpu_chosen_fc == PCPU_FC_LPAGE)
292 ret = setup_pcpu_lpage(true); 290 rc = setup_pcpu_lpage(true);
293 else 291 else
294 ret = setup_pcpu_embed(true); 292 rc = setup_pcpu_embed(true);
295 293
296 if (ret < 0) 294 if (rc < 0)
297 pr_warning("PERCPU: %s allocator failed (%zd), " 295 pr_warning("PERCPU: %s allocator failed (%d), "
298 "falling back to page size\n", 296 "falling back to page size\n",
299 pcpu_fc_names[pcpu_chosen_fc], ret); 297 pcpu_fc_names[pcpu_chosen_fc], rc);
300 } 298 }
301 } else { 299 } else {
302 ret = setup_pcpu_lpage(false); 300 rc = setup_pcpu_lpage(false);
303 if (ret < 0) 301 if (rc < 0)
304 ret = setup_pcpu_embed(false); 302 rc = setup_pcpu_embed(false);
305 } 303 }
306 if (ret < 0) 304 if (rc < 0)
307 ret = setup_pcpu_page(); 305 rc = setup_pcpu_page();
308 if (ret < 0) 306 if (rc < 0)
309 panic("cannot initialize percpu area (err=%zd)", ret); 307 panic("cannot initialize percpu area (err=%d)", rc);
310
311 pcpu_unit_size = ret;
312 308
313 /* alrighty, percpu areas up and running */ 309 /* alrighty, percpu areas up and running */
314 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 310 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
315 for_each_possible_cpu(cpu) { 311 for_each_possible_cpu(cpu) {
316 per_cpu_offset(cpu) = 312 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
317 delta + pcpu_unit_map[cpu] * pcpu_unit_size;
318 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); 313 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
319 per_cpu(cpu_number, cpu) = cpu; 314 per_cpu(cpu_number, cpu) = cpu;
320 setup_percpu_segment(cpu); 315 setup_percpu_segment(cpu);
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 77b86be8ce4f..a7ec840f596c 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -57,7 +57,7 @@
57#endif 57#endif
58 58
59extern void *pcpu_base_addr; 59extern void *pcpu_base_addr;
60extern const int *pcpu_unit_map; 60extern const unsigned long *pcpu_unit_offsets;
61 61
62struct pcpu_group_info { 62struct pcpu_group_info {
63 int nr_units; /* aligned # of units */ 63 int nr_units; /* aligned # of units */
@@ -106,25 +106,23 @@ extern struct pcpu_alloc_info * __init pcpu_build_alloc_info(
106 size_t atom_size, 106 size_t atom_size,
107 pcpu_fc_cpu_distance_fn_t cpu_distance_fn); 107 pcpu_fc_cpu_distance_fn_t cpu_distance_fn);
108 108
109extern size_t __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 109extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
110 void *base_addr); 110 void *base_addr);
111 111
112#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 112#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
113extern ssize_t __init pcpu_embed_first_chunk( 113extern int __init pcpu_embed_first_chunk(size_t reserved_size,
114 size_t reserved_size, ssize_t dyn_size); 114 ssize_t dyn_size);
115#endif 115#endif
116 116
117#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 117#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
118extern ssize_t __init pcpu_page_first_chunk( 118extern int __init pcpu_page_first_chunk(size_t reserved_size,
119 size_t reserved_size,
120 pcpu_fc_alloc_fn_t alloc_fn, 119 pcpu_fc_alloc_fn_t alloc_fn,
121 pcpu_fc_free_fn_t free_fn, 120 pcpu_fc_free_fn_t free_fn,
122 pcpu_fc_populate_pte_fn_t populate_pte_fn); 121 pcpu_fc_populate_pte_fn_t populate_pte_fn);
123#endif 122#endif
124 123
125#ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK 124#ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK
126extern ssize_t __init pcpu_lpage_first_chunk( 125extern int __init pcpu_lpage_first_chunk(const struct pcpu_alloc_info *ai,
127 const struct pcpu_alloc_info *ai,
128 pcpu_fc_alloc_fn_t alloc_fn, 126 pcpu_fc_alloc_fn_t alloc_fn,
129 pcpu_fc_free_fn_t free_fn, 127 pcpu_fc_free_fn_t free_fn,
130 pcpu_fc_map_fn_t map_fn); 128 pcpu_fc_map_fn_t map_fn);
diff --git a/mm/percpu.c b/mm/percpu.c
index 99f7fa682722..653b02c40200 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -117,8 +117,8 @@ static unsigned int pcpu_last_unit_cpu __read_mostly;
117void *pcpu_base_addr __read_mostly; 117void *pcpu_base_addr __read_mostly;
118EXPORT_SYMBOL_GPL(pcpu_base_addr); 118EXPORT_SYMBOL_GPL(pcpu_base_addr);
119 119
120/* cpu -> unit map */ 120static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
121const int *pcpu_unit_map __read_mostly; 121const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
122 122
123/* 123/*
124 * The first chunk which always exists. Note that unlike other 124 * The first chunk which always exists. Note that unlike other
@@ -196,8 +196,8 @@ static int pcpu_page_idx(unsigned int cpu, int page_idx)
196static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 196static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
197 unsigned int cpu, int page_idx) 197 unsigned int cpu, int page_idx)
198{ 198{
199 return (unsigned long)chunk->vm->addr + 199 return (unsigned long)chunk->vm->addr + pcpu_unit_offsets[cpu] +
200 (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT); 200 (page_idx << PAGE_SHIFT);
201} 201}
202 202
203static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, 203static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
@@ -341,7 +341,7 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
341 * space. Note that any possible cpu id can be used here, so 341 * space. Note that any possible cpu id can be used here, so
342 * there's no need to worry about preemption or cpu hotplug. 342 * there's no need to worry about preemption or cpu hotplug.
343 */ 343 */
344 addr += pcpu_unit_map[smp_processor_id()] * pcpu_unit_size; 344 addr += pcpu_unit_offsets[smp_processor_id()];
345 return pcpu_get_page_chunk(vmalloc_to_page(addr)); 345 return pcpu_get_page_chunk(vmalloc_to_page(addr));
346} 346}
347 347
@@ -1560,17 +1560,17 @@ static void pcpu_dump_alloc_info(const char *lvl,
1560 * and available for dynamic allocation like any other chunks. 1560 * and available for dynamic allocation like any other chunks.
1561 * 1561 *
1562 * RETURNS: 1562 * RETURNS:
1563 * The determined pcpu_unit_size which can be used to initialize 1563 * 0 on success, -errno on failure.
1564 * percpu access.
1565 */ 1564 */
1566size_t __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 1565int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1567 void *base_addr) 1566 void *base_addr)
1568{ 1567{
1569 static struct vm_struct first_vm; 1568 static struct vm_struct first_vm;
1570 static int smap[2], dmap[2]; 1569 static int smap[2], dmap[2];
1571 size_t dyn_size = ai->dyn_size; 1570 size_t dyn_size = ai->dyn_size;
1572 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; 1571 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1573 struct pcpu_chunk *schunk, *dchunk = NULL; 1572 struct pcpu_chunk *schunk, *dchunk = NULL;
1573 unsigned long *unit_off;
1574 unsigned int cpu; 1574 unsigned int cpu;
1575 int *unit_map; 1575 int *unit_map;
1576 int group, unit, i; 1576 int group, unit, i;
@@ -1587,8 +1587,9 @@ size_t __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1587 1587
1588 pcpu_dump_alloc_info(KERN_DEBUG, ai); 1588 pcpu_dump_alloc_info(KERN_DEBUG, ai);
1589 1589
1590 /* determine number of units and verify and initialize pcpu_unit_map */ 1590 /* determine number of units and initialize unit_map and base */
1591 unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0])); 1591 unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
1592 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
1592 1593
1593 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 1594 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1594 unit_map[cpu] = NR_CPUS; 1595 unit_map[cpu] = NR_CPUS;
@@ -1606,6 +1607,8 @@ size_t __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1606 BUG_ON(unit_map[cpu] != NR_CPUS); 1607 BUG_ON(unit_map[cpu] != NR_CPUS);
1607 1608
1608 unit_map[cpu] = unit + i; 1609 unit_map[cpu] = unit + i;
1610 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1611
1609 if (pcpu_first_unit_cpu == NR_CPUS) 1612 if (pcpu_first_unit_cpu == NR_CPUS)
1610 pcpu_first_unit_cpu = cpu; 1613 pcpu_first_unit_cpu = cpu;
1611 } 1614 }
@@ -1617,6 +1620,7 @@ size_t __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1617 BUG_ON(unit_map[cpu] == NR_CPUS); 1620 BUG_ON(unit_map[cpu] == NR_CPUS);
1618 1621
1619 pcpu_unit_map = unit_map; 1622 pcpu_unit_map = unit_map;
1623 pcpu_unit_offsets = unit_off;
1620 1624
1621 /* determine basic parameters */ 1625 /* determine basic parameters */
1622 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; 1626 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
@@ -1688,7 +1692,7 @@ size_t __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1688 1692
1689 /* we're done */ 1693 /* we're done */
1690 pcpu_base_addr = schunk->vm->addr; 1694 pcpu_base_addr = schunk->vm->addr;
1691 return pcpu_unit_size; 1695 return 0;
1692} 1696}
1693 1697
1694const char *pcpu_fc_names[PCPU_FC_NR] __initdata = { 1698const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
@@ -1748,16 +1752,15 @@ early_param("percpu_alloc", percpu_alloc_setup);
1748 * size, the leftover is returned to the bootmem allocator. 1752 * size, the leftover is returned to the bootmem allocator.
1749 * 1753 *
1750 * RETURNS: 1754 * RETURNS:
1751 * The determined pcpu_unit_size which can be used to initialize 1755 * 0 on success, -errno on failure.
1752 * percpu access on success, -errno on failure.
1753 */ 1756 */
1754ssize_t __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size) 1757int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size)
1755{ 1758{
1756 struct pcpu_alloc_info *ai; 1759 struct pcpu_alloc_info *ai;
1757 size_t size_sum, chunk_size; 1760 size_t size_sum, chunk_size;
1758 void *base; 1761 void *base;
1759 int unit; 1762 int unit;
1760 ssize_t ret; 1763 int rc;
1761 1764
1762 ai = pcpu_build_alloc_info(reserved_size, dyn_size, PAGE_SIZE, NULL); 1765 ai = pcpu_build_alloc_info(reserved_size, dyn_size, PAGE_SIZE, NULL);
1763 if (IS_ERR(ai)) 1766 if (IS_ERR(ai))
@@ -1773,7 +1776,7 @@ ssize_t __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size)
1773 if (!base) { 1776 if (!base) {
1774 pr_warning("PERCPU: failed to allocate %zu bytes for " 1777 pr_warning("PERCPU: failed to allocate %zu bytes for "
1775 "embedding\n", chunk_size); 1778 "embedding\n", chunk_size);
1776 ret = -ENOMEM; 1779 rc = -ENOMEM;
1777 goto out_free_ai; 1780 goto out_free_ai;
1778 } 1781 }
1779 1782
@@ -1790,10 +1793,10 @@ ssize_t __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size)
1790 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, 1793 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1791 ai->dyn_size, ai->unit_size); 1794 ai->dyn_size, ai->unit_size);
1792 1795
1793 ret = pcpu_setup_first_chunk(ai, base); 1796 rc = pcpu_setup_first_chunk(ai, base);
1794out_free_ai: 1797out_free_ai:
1795 pcpu_free_alloc_info(ai); 1798 pcpu_free_alloc_info(ai);
1796 return ret; 1799 return rc;
1797} 1800}
1798#endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK || 1801#endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
1799 !CONFIG_HAVE_SETUP_PER_CPU_AREA */ 1802 !CONFIG_HAVE_SETUP_PER_CPU_AREA */
@@ -1813,13 +1816,12 @@ out_free_ai:
1813 * page-by-page into vmalloc area. 1816 * page-by-page into vmalloc area.
1814 * 1817 *
1815 * RETURNS: 1818 * RETURNS:
1816 * The determined pcpu_unit_size which can be used to initialize 1819 * 0 on success, -errno on failure.
1817 * percpu access on success, -errno on failure.
1818 */ 1820 */
1819ssize_t __init pcpu_page_first_chunk(size_t reserved_size, 1821int __init pcpu_page_first_chunk(size_t reserved_size,
1820 pcpu_fc_alloc_fn_t alloc_fn, 1822 pcpu_fc_alloc_fn_t alloc_fn,
1821 pcpu_fc_free_fn_t free_fn, 1823 pcpu_fc_free_fn_t free_fn,
1822 pcpu_fc_populate_pte_fn_t populate_pte_fn) 1824 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1823{ 1825{
1824 static struct vm_struct vm; 1826 static struct vm_struct vm;
1825 struct pcpu_alloc_info *ai; 1827 struct pcpu_alloc_info *ai;
@@ -1827,8 +1829,7 @@ ssize_t __init pcpu_page_first_chunk(size_t reserved_size,
1827 int unit_pages; 1829 int unit_pages;
1828 size_t pages_size; 1830 size_t pages_size;
1829 struct page **pages; 1831 struct page **pages;
1830 int unit, i, j; 1832 int unit, i, j, rc;
1831 ssize_t ret;
1832 1833
1833 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 1834 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1834 1835
@@ -1874,10 +1875,10 @@ ssize_t __init pcpu_page_first_chunk(size_t reserved_size,
1874 populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); 1875 populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1875 1876
1876 /* pte already populated, the following shouldn't fail */ 1877 /* pte already populated, the following shouldn't fail */
1877 ret = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], 1878 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1878 unit_pages); 1879 unit_pages);
1879 if (ret < 0) 1880 if (rc < 0)
1880 panic("failed to map percpu area, err=%zd\n", ret); 1881 panic("failed to map percpu area, err=%d\n", rc);
1881 1882
1882 /* 1883 /*
1883 * FIXME: Archs with virtual cache should flush local 1884 * FIXME: Archs with virtual cache should flush local
@@ -1896,17 +1897,17 @@ ssize_t __init pcpu_page_first_chunk(size_t reserved_size,
1896 unit_pages, psize_str, vm.addr, ai->static_size, 1897 unit_pages, psize_str, vm.addr, ai->static_size,
1897 ai->reserved_size, ai->dyn_size); 1898 ai->reserved_size, ai->dyn_size);
1898 1899
1899 ret = pcpu_setup_first_chunk(ai, vm.addr); 1900 rc = pcpu_setup_first_chunk(ai, vm.addr);
1900 goto out_free_ar; 1901 goto out_free_ar;
1901 1902
1902enomem: 1903enomem:
1903 while (--j >= 0) 1904 while (--j >= 0)
1904 free_fn(page_address(pages[j]), PAGE_SIZE); 1905 free_fn(page_address(pages[j]), PAGE_SIZE);
1905 ret = -ENOMEM; 1906 rc = -ENOMEM;
1906out_free_ar: 1907out_free_ar:
1907 free_bootmem(__pa(pages), pages_size); 1908 free_bootmem(__pa(pages), pages_size);
1908 pcpu_free_alloc_info(ai); 1909 pcpu_free_alloc_info(ai);
1909 return ret; 1910 return rc;
1910} 1911}
1911#endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */ 1912#endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
1912 1913
@@ -1977,20 +1978,18 @@ static int __init pcpul_cpu_to_unit(int cpu, const struct pcpu_alloc_info *ai)
1977 * pcpu_lpage_remapped(). 1978 * pcpu_lpage_remapped().
1978 * 1979 *
1979 * RETURNS: 1980 * RETURNS:
1980 * The determined pcpu_unit_size which can be used to initialize 1981 * 0 on success, -errno on failure.
1981 * percpu access on success, -errno on failure.
1982 */ 1982 */
1983ssize_t __init pcpu_lpage_first_chunk(const struct pcpu_alloc_info *ai, 1983int __init pcpu_lpage_first_chunk(const struct pcpu_alloc_info *ai,
1984 pcpu_fc_alloc_fn_t alloc_fn, 1984 pcpu_fc_alloc_fn_t alloc_fn,
1985 pcpu_fc_free_fn_t free_fn, 1985 pcpu_fc_free_fn_t free_fn,
1986 pcpu_fc_map_fn_t map_fn) 1986 pcpu_fc_map_fn_t map_fn)
1987{ 1987{
1988 static struct vm_struct vm; 1988 static struct vm_struct vm;
1989 const size_t lpage_size = ai->atom_size; 1989 const size_t lpage_size = ai->atom_size;
1990 size_t chunk_size, map_size; 1990 size_t chunk_size, map_size;
1991 unsigned int cpu; 1991 unsigned int cpu;
1992 ssize_t ret; 1992 int i, j, unit, nr_units, rc;
1993 int i, j, unit, nr_units;
1994 1993
1995 nr_units = 0; 1994 nr_units = 0;
1996 for (i = 0; i < ai->nr_groups; i++) 1995 for (i = 0; i < ai->nr_groups; i++)
@@ -2070,7 +2069,7 @@ ssize_t __init pcpu_lpage_first_chunk(const struct pcpu_alloc_info *ai,
2070 vm.addr, ai->static_size, ai->reserved_size, ai->dyn_size, 2069 vm.addr, ai->static_size, ai->reserved_size, ai->dyn_size,
2071 ai->unit_size); 2070 ai->unit_size);
2072 2071
2073 ret = pcpu_setup_first_chunk(ai, vm.addr); 2072 rc = pcpu_setup_first_chunk(ai, vm.addr);
2074 2073
2075 /* 2074 /*
2076 * Sort pcpul_map array for pcpu_lpage_remapped(). Unmapped 2075 * Sort pcpul_map array for pcpu_lpage_remapped(). Unmapped
@@ -2094,7 +2093,7 @@ ssize_t __init pcpu_lpage_first_chunk(const struct pcpu_alloc_info *ai,
2094 while (pcpul_nr_lpages && !pcpul_map[pcpul_nr_lpages - 1].ptr) 2093 while (pcpul_nr_lpages && !pcpul_map[pcpul_nr_lpages - 1].ptr)
2095 pcpul_nr_lpages--; 2094 pcpul_nr_lpages--;
2096 2095
2097 return ret; 2096 return rc;
2098 2097
2099enomem: 2098enomem:
2100 for (i = 0; i < pcpul_nr_lpages; i++) 2099 for (i = 0; i < pcpul_nr_lpages; i++)
@@ -2166,21 +2165,21 @@ EXPORT_SYMBOL(__per_cpu_offset);
2166 2165
2167void __init setup_per_cpu_areas(void) 2166void __init setup_per_cpu_areas(void)
2168{ 2167{
2169 ssize_t unit_size;
2170 unsigned long delta; 2168 unsigned long delta;
2171 unsigned int cpu; 2169 unsigned int cpu;
2170 int rc;
2172 2171
2173 /* 2172 /*
2174 * Always reserve area for module percpu variables. That's 2173 * Always reserve area for module percpu variables. That's
2175 * what the legacy allocator did. 2174 * what the legacy allocator did.
2176 */ 2175 */
2177 unit_size = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 2176 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2178 PERCPU_DYNAMIC_RESERVE); 2177 PERCPU_DYNAMIC_RESERVE);
2179 if (unit_size < 0) 2178 if (rc < 0)
2180 panic("Failed to initialized percpu areas."); 2179 panic("Failed to initialized percpu areas.");
2181 2180
2182 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 2181 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2183 for_each_possible_cpu(cpu) 2182 for_each_possible_cpu(cpu)
2184 __per_cpu_offset[cpu] = delta + cpu * unit_size; 2183 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
2185} 2184}
2186#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 2185#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */