aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-08-14 02:00:51 -0400
committerTejun Heo <tj@kernel.org>2009-08-14 02:00:51 -0400
commitfb435d5233f8b6f9b93c11d6304d8e98fed03234 (patch)
tree76a210c3895b9db5dc7e1f185ee0a60744fef99a /mm
parentfd1e8a1fe2b54df6c185b4fa65f181f50b9c4d4e (diff)
percpu: add pcpu_unit_offsets[]
Currently units are mapped sequentially into address space. This patch adds pcpu_unit_offsets[] which allows units to be mapped to arbitrary offsets from the chunk base address. This is necessary to allow sparse embedding which might would need to allocate address ranges and memory areas which aren't aligned to unit size but allocation atom size (page or large page size). This also simplifies things a bit by removing the need to calculate offset from unit number. With this change, there's no need for the arch code to know pcpu_unit_size. Update pcpu_setup_first_chunk() and first chunk allocators to return regular 0 or -errno return code instead of unit size or -errno. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu.c95
1 files changed, 47 insertions, 48 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 99f7fa682722..653b02c40200 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -117,8 +117,8 @@ static unsigned int pcpu_last_unit_cpu __read_mostly;
117void *pcpu_base_addr __read_mostly; 117void *pcpu_base_addr __read_mostly;
118EXPORT_SYMBOL_GPL(pcpu_base_addr); 118EXPORT_SYMBOL_GPL(pcpu_base_addr);
119 119
120/* cpu -> unit map */ 120static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
121const int *pcpu_unit_map __read_mostly; 121const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
122 122
123/* 123/*
124 * The first chunk which always exists. Note that unlike other 124 * The first chunk which always exists. Note that unlike other
@@ -196,8 +196,8 @@ static int pcpu_page_idx(unsigned int cpu, int page_idx)
196static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 196static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
197 unsigned int cpu, int page_idx) 197 unsigned int cpu, int page_idx)
198{ 198{
199 return (unsigned long)chunk->vm->addr + 199 return (unsigned long)chunk->vm->addr + pcpu_unit_offsets[cpu] +
200 (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT); 200 (page_idx << PAGE_SHIFT);
201} 201}
202 202
203static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, 203static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
@@ -341,7 +341,7 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
341 * space. Note that any possible cpu id can be used here, so 341 * space. Note that any possible cpu id can be used here, so
342 * there's no need to worry about preemption or cpu hotplug. 342 * there's no need to worry about preemption or cpu hotplug.
343 */ 343 */
344 addr += pcpu_unit_map[smp_processor_id()] * pcpu_unit_size; 344 addr += pcpu_unit_offsets[smp_processor_id()];
345 return pcpu_get_page_chunk(vmalloc_to_page(addr)); 345 return pcpu_get_page_chunk(vmalloc_to_page(addr));
346} 346}
347 347
@@ -1560,17 +1560,17 @@ static void pcpu_dump_alloc_info(const char *lvl,
1560 * and available for dynamic allocation like any other chunks. 1560 * and available for dynamic allocation like any other chunks.
1561 * 1561 *
1562 * RETURNS: 1562 * RETURNS:
1563 * The determined pcpu_unit_size which can be used to initialize 1563 * 0 on success, -errno on failure.
1564 * percpu access.
1565 */ 1564 */
1566size_t __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 1565int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1567 void *base_addr) 1566 void *base_addr)
1568{ 1567{
1569 static struct vm_struct first_vm; 1568 static struct vm_struct first_vm;
1570 static int smap[2], dmap[2]; 1569 static int smap[2], dmap[2];
1571 size_t dyn_size = ai->dyn_size; 1570 size_t dyn_size = ai->dyn_size;
1572 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; 1571 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1573 struct pcpu_chunk *schunk, *dchunk = NULL; 1572 struct pcpu_chunk *schunk, *dchunk = NULL;
1573 unsigned long *unit_off;
1574 unsigned int cpu; 1574 unsigned int cpu;
1575 int *unit_map; 1575 int *unit_map;
1576 int group, unit, i; 1576 int group, unit, i;
@@ -1587,8 +1587,9 @@ size_t __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1587 1587
1588 pcpu_dump_alloc_info(KERN_DEBUG, ai); 1588 pcpu_dump_alloc_info(KERN_DEBUG, ai);
1589 1589
1590 /* determine number of units and verify and initialize pcpu_unit_map */ 1590 /* determine number of units and initialize unit_map and base */
1591 unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0])); 1591 unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
1592 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
1592 1593
1593 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 1594 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1594 unit_map[cpu] = NR_CPUS; 1595 unit_map[cpu] = NR_CPUS;
@@ -1606,6 +1607,8 @@ size_t __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1606 BUG_ON(unit_map[cpu] != NR_CPUS); 1607 BUG_ON(unit_map[cpu] != NR_CPUS);
1607 1608
1608 unit_map[cpu] = unit + i; 1609 unit_map[cpu] = unit + i;
1610 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1611
1609 if (pcpu_first_unit_cpu == NR_CPUS) 1612 if (pcpu_first_unit_cpu == NR_CPUS)
1610 pcpu_first_unit_cpu = cpu; 1613 pcpu_first_unit_cpu = cpu;
1611 } 1614 }
@@ -1617,6 +1620,7 @@ size_t __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1617 BUG_ON(unit_map[cpu] == NR_CPUS); 1620 BUG_ON(unit_map[cpu] == NR_CPUS);
1618 1621
1619 pcpu_unit_map = unit_map; 1622 pcpu_unit_map = unit_map;
1623 pcpu_unit_offsets = unit_off;
1620 1624
1621 /* determine basic parameters */ 1625 /* determine basic parameters */
1622 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; 1626 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
@@ -1688,7 +1692,7 @@ size_t __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1688 1692
1689 /* we're done */ 1693 /* we're done */
1690 pcpu_base_addr = schunk->vm->addr; 1694 pcpu_base_addr = schunk->vm->addr;
1691 return pcpu_unit_size; 1695 return 0;
1692} 1696}
1693 1697
1694const char *pcpu_fc_names[PCPU_FC_NR] __initdata = { 1698const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
@@ -1748,16 +1752,15 @@ early_param("percpu_alloc", percpu_alloc_setup);
1748 * size, the leftover is returned to the bootmem allocator. 1752 * size, the leftover is returned to the bootmem allocator.
1749 * 1753 *
1750 * RETURNS: 1754 * RETURNS:
1751 * The determined pcpu_unit_size which can be used to initialize 1755 * 0 on success, -errno on failure.
1752 * percpu access on success, -errno on failure.
1753 */ 1756 */
1754ssize_t __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size) 1757int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size)
1755{ 1758{
1756 struct pcpu_alloc_info *ai; 1759 struct pcpu_alloc_info *ai;
1757 size_t size_sum, chunk_size; 1760 size_t size_sum, chunk_size;
1758 void *base; 1761 void *base;
1759 int unit; 1762 int unit;
1760 ssize_t ret; 1763 int rc;
1761 1764
1762 ai = pcpu_build_alloc_info(reserved_size, dyn_size, PAGE_SIZE, NULL); 1765 ai = pcpu_build_alloc_info(reserved_size, dyn_size, PAGE_SIZE, NULL);
1763 if (IS_ERR(ai)) 1766 if (IS_ERR(ai))
@@ -1773,7 +1776,7 @@ ssize_t __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size)
1773 if (!base) { 1776 if (!base) {
1774 pr_warning("PERCPU: failed to allocate %zu bytes for " 1777 pr_warning("PERCPU: failed to allocate %zu bytes for "
1775 "embedding\n", chunk_size); 1778 "embedding\n", chunk_size);
1776 ret = -ENOMEM; 1779 rc = -ENOMEM;
1777 goto out_free_ai; 1780 goto out_free_ai;
1778 } 1781 }
1779 1782
@@ -1790,10 +1793,10 @@ ssize_t __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size)
1790 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, 1793 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1791 ai->dyn_size, ai->unit_size); 1794 ai->dyn_size, ai->unit_size);
1792 1795
1793 ret = pcpu_setup_first_chunk(ai, base); 1796 rc = pcpu_setup_first_chunk(ai, base);
1794out_free_ai: 1797out_free_ai:
1795 pcpu_free_alloc_info(ai); 1798 pcpu_free_alloc_info(ai);
1796 return ret; 1799 return rc;
1797} 1800}
1798#endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK || 1801#endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
1799 !CONFIG_HAVE_SETUP_PER_CPU_AREA */ 1802 !CONFIG_HAVE_SETUP_PER_CPU_AREA */
@@ -1813,13 +1816,12 @@ out_free_ai:
1813 * page-by-page into vmalloc area. 1816 * page-by-page into vmalloc area.
1814 * 1817 *
1815 * RETURNS: 1818 * RETURNS:
1816 * The determined pcpu_unit_size which can be used to initialize 1819 * 0 on success, -errno on failure.
1817 * percpu access on success, -errno on failure.
1818 */ 1820 */
1819ssize_t __init pcpu_page_first_chunk(size_t reserved_size, 1821int __init pcpu_page_first_chunk(size_t reserved_size,
1820 pcpu_fc_alloc_fn_t alloc_fn, 1822 pcpu_fc_alloc_fn_t alloc_fn,
1821 pcpu_fc_free_fn_t free_fn, 1823 pcpu_fc_free_fn_t free_fn,
1822 pcpu_fc_populate_pte_fn_t populate_pte_fn) 1824 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1823{ 1825{
1824 static struct vm_struct vm; 1826 static struct vm_struct vm;
1825 struct pcpu_alloc_info *ai; 1827 struct pcpu_alloc_info *ai;
@@ -1827,8 +1829,7 @@ ssize_t __init pcpu_page_first_chunk(size_t reserved_size,
1827 int unit_pages; 1829 int unit_pages;
1828 size_t pages_size; 1830 size_t pages_size;
1829 struct page **pages; 1831 struct page **pages;
1830 int unit, i, j; 1832 int unit, i, j, rc;
1831 ssize_t ret;
1832 1833
1833 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 1834 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1834 1835
@@ -1874,10 +1875,10 @@ ssize_t __init pcpu_page_first_chunk(size_t reserved_size,
1874 populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); 1875 populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1875 1876
1876 /* pte already populated, the following shouldn't fail */ 1877 /* pte already populated, the following shouldn't fail */
1877 ret = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], 1878 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1878 unit_pages); 1879 unit_pages);
1879 if (ret < 0) 1880 if (rc < 0)
1880 panic("failed to map percpu area, err=%zd\n", ret); 1881 panic("failed to map percpu area, err=%d\n", rc);
1881 1882
1882 /* 1883 /*
1883 * FIXME: Archs with virtual cache should flush local 1884 * FIXME: Archs with virtual cache should flush local
@@ -1896,17 +1897,17 @@ ssize_t __init pcpu_page_first_chunk(size_t reserved_size,
1896 unit_pages, psize_str, vm.addr, ai->static_size, 1897 unit_pages, psize_str, vm.addr, ai->static_size,
1897 ai->reserved_size, ai->dyn_size); 1898 ai->reserved_size, ai->dyn_size);
1898 1899
1899 ret = pcpu_setup_first_chunk(ai, vm.addr); 1900 rc = pcpu_setup_first_chunk(ai, vm.addr);
1900 goto out_free_ar; 1901 goto out_free_ar;
1901 1902
1902enomem: 1903enomem:
1903 while (--j >= 0) 1904 while (--j >= 0)
1904 free_fn(page_address(pages[j]), PAGE_SIZE); 1905 free_fn(page_address(pages[j]), PAGE_SIZE);
1905 ret = -ENOMEM; 1906 rc = -ENOMEM;
1906out_free_ar: 1907out_free_ar:
1907 free_bootmem(__pa(pages), pages_size); 1908 free_bootmem(__pa(pages), pages_size);
1908 pcpu_free_alloc_info(ai); 1909 pcpu_free_alloc_info(ai);
1909 return ret; 1910 return rc;
1910} 1911}
1911#endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */ 1912#endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
1912 1913
@@ -1977,20 +1978,18 @@ static int __init pcpul_cpu_to_unit(int cpu, const struct pcpu_alloc_info *ai)
1977 * pcpu_lpage_remapped(). 1978 * pcpu_lpage_remapped().
1978 * 1979 *
1979 * RETURNS: 1980 * RETURNS:
1980 * The determined pcpu_unit_size which can be used to initialize 1981 * 0 on success, -errno on failure.
1981 * percpu access on success, -errno on failure.
1982 */ 1982 */
1983ssize_t __init pcpu_lpage_first_chunk(const struct pcpu_alloc_info *ai, 1983int __init pcpu_lpage_first_chunk(const struct pcpu_alloc_info *ai,
1984 pcpu_fc_alloc_fn_t alloc_fn, 1984 pcpu_fc_alloc_fn_t alloc_fn,
1985 pcpu_fc_free_fn_t free_fn, 1985 pcpu_fc_free_fn_t free_fn,
1986 pcpu_fc_map_fn_t map_fn) 1986 pcpu_fc_map_fn_t map_fn)
1987{ 1987{
1988 static struct vm_struct vm; 1988 static struct vm_struct vm;
1989 const size_t lpage_size = ai->atom_size; 1989 const size_t lpage_size = ai->atom_size;
1990 size_t chunk_size, map_size; 1990 size_t chunk_size, map_size;
1991 unsigned int cpu; 1991 unsigned int cpu;
1992 ssize_t ret; 1992 int i, j, unit, nr_units, rc;
1993 int i, j, unit, nr_units;
1994 1993
1995 nr_units = 0; 1994 nr_units = 0;
1996 for (i = 0; i < ai->nr_groups; i++) 1995 for (i = 0; i < ai->nr_groups; i++)
@@ -2070,7 +2069,7 @@ ssize_t __init pcpu_lpage_first_chunk(const struct pcpu_alloc_info *ai,
2070 vm.addr, ai->static_size, ai->reserved_size, ai->dyn_size, 2069 vm.addr, ai->static_size, ai->reserved_size, ai->dyn_size,
2071 ai->unit_size); 2070 ai->unit_size);
2072 2071
2073 ret = pcpu_setup_first_chunk(ai, vm.addr); 2072 rc = pcpu_setup_first_chunk(ai, vm.addr);
2074 2073
2075 /* 2074 /*
2076 * Sort pcpul_map array for pcpu_lpage_remapped(). Unmapped 2075 * Sort pcpul_map array for pcpu_lpage_remapped(). Unmapped
@@ -2094,7 +2093,7 @@ ssize_t __init pcpu_lpage_first_chunk(const struct pcpu_alloc_info *ai,
2094 while (pcpul_nr_lpages && !pcpul_map[pcpul_nr_lpages - 1].ptr) 2093 while (pcpul_nr_lpages && !pcpul_map[pcpul_nr_lpages - 1].ptr)
2095 pcpul_nr_lpages--; 2094 pcpul_nr_lpages--;
2096 2095
2097 return ret; 2096 return rc;
2098 2097
2099enomem: 2098enomem:
2100 for (i = 0; i < pcpul_nr_lpages; i++) 2099 for (i = 0; i < pcpul_nr_lpages; i++)
@@ -2166,21 +2165,21 @@ EXPORT_SYMBOL(__per_cpu_offset);
2166 2165
2167void __init setup_per_cpu_areas(void) 2166void __init setup_per_cpu_areas(void)
2168{ 2167{
2169 ssize_t unit_size;
2170 unsigned long delta; 2168 unsigned long delta;
2171 unsigned int cpu; 2169 unsigned int cpu;
2170 int rc;
2172 2171
2173 /* 2172 /*
2174 * Always reserve area for module percpu variables. That's 2173 * Always reserve area for module percpu variables. That's
2175 * what the legacy allocator did. 2174 * what the legacy allocator did.
2176 */ 2175 */
2177 unit_size = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 2176 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2178 PERCPU_DYNAMIC_RESERVE); 2177 PERCPU_DYNAMIC_RESERVE);
2179 if (unit_size < 0) 2178 if (rc < 0)
2180 panic("Failed to initialized percpu areas."); 2179 panic("Failed to initialized percpu areas.");
2181 2180
2182 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 2181 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2183 for_each_possible_cpu(cpu) 2182 for_each_possible_cpu(cpu)
2184 __per_cpu_offset[cpu] = delta + cpu * unit_size; 2183 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
2185} 2184}
2186#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 2185#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */