aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorSantosh Shilimkar <santosh.shilimkar@ti.com>2014-01-21 18:50:40 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 19:19:47 -0500
commit999c17e3de4855af4e829c0871ad32fc76a93991 (patch)
tree5cf6df6899637182bca35591c6e5812b75c20002 /mm
parent0d036e9e33df8befa9348683ba68258fee7f0a00 (diff)
mm/percpu.c: use memblock apis for early memory allocations
Switch to memblock interfaces for early memory allocator instead of bootmem allocator. No functional change in beahvior than what it is in current code from bootmem users points of view. Archs already converted to NO_BOOTMEM now directly use memblock interfaces instead of bootmem wrappers build on top of memblock. And the archs which still uses bootmem, these new apis just fallback to exiting bootmem APIs. Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Grygorii Strashko <grygorii.strashko@ti.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Paul Walmsley <paul@pwsan.com> Cc: Pavel Machek <pavel@ucw.cz> Cc: Russell King <linux@arm.linux.org.uk> Cc: Tejun Heo <tj@kernel.org> Cc: Tony Lindgren <tony@atomide.com> Cc: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu.c38
1 files changed, 22 insertions, 16 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 0d10defe951e..65fd8a749712 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1063,7 +1063,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1063 __alignof__(ai->groups[0].cpu_map[0])); 1063 __alignof__(ai->groups[0].cpu_map[0]));
1064 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); 1064 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1065 1065
1066 ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size)); 1066 ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
1067 if (!ptr) 1067 if (!ptr)
1068 return NULL; 1068 return NULL;
1069 ai = ptr; 1069 ai = ptr;
@@ -1088,7 +1088,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1088 */ 1088 */
1089void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) 1089void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1090{ 1090{
1091 free_bootmem(__pa(ai), ai->__ai_size); 1091 memblock_free_early(__pa(ai), ai->__ai_size);
1092} 1092}
1093 1093
1094/** 1094/**
@@ -1246,10 +1246,12 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1246 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); 1246 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1247 1247
1248 /* process group information and build config tables accordingly */ 1248 /* process group information and build config tables accordingly */
1249 group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0])); 1249 group_offsets = memblock_virt_alloc(ai->nr_groups *
1250 group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0])); 1250 sizeof(group_offsets[0]), 0);
1251 unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0])); 1251 group_sizes = memblock_virt_alloc(ai->nr_groups *
1252 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0])); 1252 sizeof(group_sizes[0]), 0);
1253 unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
1254 unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
1253 1255
1254 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 1256 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1255 unit_map[cpu] = UINT_MAX; 1257 unit_map[cpu] = UINT_MAX;
@@ -1311,7 +1313,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1311 * empty chunks. 1313 * empty chunks.
1312 */ 1314 */
1313 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 1315 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1314 pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); 1316 pcpu_slot = memblock_virt_alloc(
1317 pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
1315 for (i = 0; i < pcpu_nr_slots; i++) 1318 for (i = 0; i < pcpu_nr_slots; i++)
1316 INIT_LIST_HEAD(&pcpu_slot[i]); 1319 INIT_LIST_HEAD(&pcpu_slot[i]);
1317 1320
@@ -1322,7 +1325,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1322 * covers static area + reserved area (mostly used for module 1325 * covers static area + reserved area (mostly used for module
1323 * static percpu allocation). 1326 * static percpu allocation).
1324 */ 1327 */
1325 schunk = alloc_bootmem(pcpu_chunk_struct_size); 1328 schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1326 INIT_LIST_HEAD(&schunk->list); 1329 INIT_LIST_HEAD(&schunk->list);
1327 schunk->base_addr = base_addr; 1330 schunk->base_addr = base_addr;
1328 schunk->map = smap; 1331 schunk->map = smap;
@@ -1346,7 +1349,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1346 1349
1347 /* init dynamic chunk if necessary */ 1350 /* init dynamic chunk if necessary */
1348 if (dyn_size) { 1351 if (dyn_size) {
1349 dchunk = alloc_bootmem(pcpu_chunk_struct_size); 1352 dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1350 INIT_LIST_HEAD(&dchunk->list); 1353 INIT_LIST_HEAD(&dchunk->list);
1351 dchunk->base_addr = base_addr; 1354 dchunk->base_addr = base_addr;
1352 dchunk->map = dmap; 1355 dchunk->map = dmap;
@@ -1626,7 +1629,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1626 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 1629 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1627 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 1630 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1628 1631
1629 areas = alloc_bootmem_nopanic(areas_size); 1632 areas = memblock_virt_alloc_nopanic(areas_size, 0);
1630 if (!areas) { 1633 if (!areas) {
1631 rc = -ENOMEM; 1634 rc = -ENOMEM;
1632 goto out_free; 1635 goto out_free;
@@ -1712,7 +1715,7 @@ out_free_areas:
1712out_free: 1715out_free:
1713 pcpu_free_alloc_info(ai); 1716 pcpu_free_alloc_info(ai);
1714 if (areas) 1717 if (areas)
1715 free_bootmem(__pa(areas), areas_size); 1718 memblock_free_early(__pa(areas), areas_size);
1716 return rc; 1719 return rc;
1717} 1720}
1718#endif /* BUILD_EMBED_FIRST_CHUNK */ 1721#endif /* BUILD_EMBED_FIRST_CHUNK */
@@ -1760,7 +1763,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
1760 /* unaligned allocations can't be freed, round up to page size */ 1763 /* unaligned allocations can't be freed, round up to page size */
1761 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 1764 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1762 sizeof(pages[0])); 1765 sizeof(pages[0]));
1763 pages = alloc_bootmem(pages_size); 1766 pages = memblock_virt_alloc(pages_size, 0);
1764 1767
1765 /* allocate pages */ 1768 /* allocate pages */
1766 j = 0; 1769 j = 0;
@@ -1823,7 +1826,7 @@ enomem:
1823 free_fn(page_address(pages[j]), PAGE_SIZE); 1826 free_fn(page_address(pages[j]), PAGE_SIZE);
1824 rc = -ENOMEM; 1827 rc = -ENOMEM;
1825out_free_ar: 1828out_free_ar:
1826 free_bootmem(__pa(pages), pages_size); 1829 memblock_free_early(__pa(pages), pages_size);
1827 pcpu_free_alloc_info(ai); 1830 pcpu_free_alloc_info(ai);
1828 return rc; 1831 return rc;
1829} 1832}
@@ -1848,12 +1851,13 @@ EXPORT_SYMBOL(__per_cpu_offset);
1848static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, 1851static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
1849 size_t align) 1852 size_t align)
1850{ 1853{
1851 return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS)); 1854 return memblock_virt_alloc_from_nopanic(
1855 size, align, __pa(MAX_DMA_ADDRESS));
1852} 1856}
1853 1857
1854static void __init pcpu_dfl_fc_free(void *ptr, size_t size) 1858static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
1855{ 1859{
1856 free_bootmem(__pa(ptr), size); 1860 memblock_free_early(__pa(ptr), size);
1857} 1861}
1858 1862
1859void __init setup_per_cpu_areas(void) 1863void __init setup_per_cpu_areas(void)
@@ -1896,7 +1900,9 @@ void __init setup_per_cpu_areas(void)
1896 void *fc; 1900 void *fc;
1897 1901
1898 ai = pcpu_alloc_alloc_info(1, 1); 1902 ai = pcpu_alloc_alloc_info(1, 1);
1899 fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 1903 fc = memblock_virt_alloc_from_nopanic(unit_size,
1904 PAGE_SIZE,
1905 __pa(MAX_DMA_ADDRESS));
1900 if (!ai || !fc) 1906 if (!ai || !fc)
1901 panic("Failed to allocate memory for percpu areas."); 1907 panic("Failed to allocate memory for percpu areas.");
1902 /* kmemleak tracks the percpu allocations separately */ 1908 /* kmemleak tracks the percpu allocations separately */