aboutsummaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 22:05:45 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 22:05:45 -0500
commitdf32e43a54d04eda35d2859beaf90e3864d53288 (patch)
tree7a61cf658b2949bd426285eb9902be7758ced1ba /mm/percpu.c
parentfbd918a2026d0464ce9c23f57b7de4bcfccdc2e6 (diff)
parent78d5506e82b21a1a1de68c24182db2c2fe521422 (diff)
Merge branch 'akpm' (incoming from Andrew)
Merge first patch-bomb from Andrew Morton: - a couple of misc things - inotify/fsnotify work from Jan - ocfs2 updates (partial) - about half of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (117 commits) mm/migrate: remove unused function, fail_migrate_page() mm/migrate: remove putback_lru_pages, fix comment on putback_movable_pages mm/migrate: correct failure handling if !hugepage_migration_support() mm/migrate: add comment about permanent failure path mm, page_alloc: warn for non-blockable __GFP_NOFAIL allocation failure mm: compaction: reset scanner positions immediately when they meet mm: compaction: do not mark unmovable pageblocks as skipped in async compaction mm: compaction: detect when scanners meet in isolate_freepages mm: compaction: reset cached scanner pfn's before reading them mm: compaction: encapsulate defer reset logic mm: compaction: trace compaction begin and end memcg, oom: lock mem_cgroup_print_oom_info sched: add tracepoints related to NUMA task migration mm: numa: do not automatically migrate KSM pages mm: numa: trace tasks that fail migration due to rate limiting mm: numa: limit scope of lock for NUMA migrate rate limiting mm: numa: make NUMA-migrate related functions static lib/show_mem.c: show num_poisoned_pages when oom mm/hwpoison: add '#' to hwpoison_inject mm/memblock: use WARN_ONCE when MAX_NUMNODES passed as input parameter ...
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c38
1 files changed, 22 insertions, 16 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index afbf352ae580..036cfe07050f 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1063,7 +1063,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1063 __alignof__(ai->groups[0].cpu_map[0])); 1063 __alignof__(ai->groups[0].cpu_map[0]));
1064 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); 1064 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1065 1065
1066 ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size)); 1066 ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
1067 if (!ptr) 1067 if (!ptr)
1068 return NULL; 1068 return NULL;
1069 ai = ptr; 1069 ai = ptr;
@@ -1088,7 +1088,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1088 */ 1088 */
1089void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) 1089void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1090{ 1090{
1091 free_bootmem(__pa(ai), ai->__ai_size); 1091 memblock_free_early(__pa(ai), ai->__ai_size);
1092} 1092}
1093 1093
1094/** 1094/**
@@ -1246,10 +1246,12 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1246 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); 1246 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1247 1247
1248 /* process group information and build config tables accordingly */ 1248 /* process group information and build config tables accordingly */
1249 group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0])); 1249 group_offsets = memblock_virt_alloc(ai->nr_groups *
1250 group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0])); 1250 sizeof(group_offsets[0]), 0);
1251 unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0])); 1251 group_sizes = memblock_virt_alloc(ai->nr_groups *
1252 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0])); 1252 sizeof(group_sizes[0]), 0);
1253 unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
1254 unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
1253 1255
1254 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 1256 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1255 unit_map[cpu] = UINT_MAX; 1257 unit_map[cpu] = UINT_MAX;
@@ -1311,7 +1313,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1311 * empty chunks. 1313 * empty chunks.
1312 */ 1314 */
1313 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 1315 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1314 pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); 1316 pcpu_slot = memblock_virt_alloc(
1317 pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
1315 for (i = 0; i < pcpu_nr_slots; i++) 1318 for (i = 0; i < pcpu_nr_slots; i++)
1316 INIT_LIST_HEAD(&pcpu_slot[i]); 1319 INIT_LIST_HEAD(&pcpu_slot[i]);
1317 1320
@@ -1322,7 +1325,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1322 * covers static area + reserved area (mostly used for module 1325 * covers static area + reserved area (mostly used for module
1323 * static percpu allocation). 1326 * static percpu allocation).
1324 */ 1327 */
1325 schunk = alloc_bootmem(pcpu_chunk_struct_size); 1328 schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1326 INIT_LIST_HEAD(&schunk->list); 1329 INIT_LIST_HEAD(&schunk->list);
1327 schunk->base_addr = base_addr; 1330 schunk->base_addr = base_addr;
1328 schunk->map = smap; 1331 schunk->map = smap;
@@ -1346,7 +1349,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1346 1349
1347 /* init dynamic chunk if necessary */ 1350 /* init dynamic chunk if necessary */
1348 if (dyn_size) { 1351 if (dyn_size) {
1349 dchunk = alloc_bootmem(pcpu_chunk_struct_size); 1352 dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1350 INIT_LIST_HEAD(&dchunk->list); 1353 INIT_LIST_HEAD(&dchunk->list);
1351 dchunk->base_addr = base_addr; 1354 dchunk->base_addr = base_addr;
1352 dchunk->map = dmap; 1355 dchunk->map = dmap;
@@ -1626,7 +1629,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1626 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 1629 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1627 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 1630 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1628 1631
1629 areas = alloc_bootmem_nopanic(areas_size); 1632 areas = memblock_virt_alloc_nopanic(areas_size, 0);
1630 if (!areas) { 1633 if (!areas) {
1631 rc = -ENOMEM; 1634 rc = -ENOMEM;
1632 goto out_free; 1635 goto out_free;
@@ -1712,7 +1715,7 @@ out_free_areas:
1712out_free: 1715out_free:
1713 pcpu_free_alloc_info(ai); 1716 pcpu_free_alloc_info(ai);
1714 if (areas) 1717 if (areas)
1715 free_bootmem(__pa(areas), areas_size); 1718 memblock_free_early(__pa(areas), areas_size);
1716 return rc; 1719 return rc;
1717} 1720}
1718#endif /* BUILD_EMBED_FIRST_CHUNK */ 1721#endif /* BUILD_EMBED_FIRST_CHUNK */
@@ -1760,7 +1763,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
1760 /* unaligned allocations can't be freed, round up to page size */ 1763 /* unaligned allocations can't be freed, round up to page size */
1761 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 1764 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1762 sizeof(pages[0])); 1765 sizeof(pages[0]));
1763 pages = alloc_bootmem(pages_size); 1766 pages = memblock_virt_alloc(pages_size, 0);
1764 1767
1765 /* allocate pages */ 1768 /* allocate pages */
1766 j = 0; 1769 j = 0;
@@ -1823,7 +1826,7 @@ enomem:
1823 free_fn(page_address(pages[j]), PAGE_SIZE); 1826 free_fn(page_address(pages[j]), PAGE_SIZE);
1824 rc = -ENOMEM; 1827 rc = -ENOMEM;
1825out_free_ar: 1828out_free_ar:
1826 free_bootmem(__pa(pages), pages_size); 1829 memblock_free_early(__pa(pages), pages_size);
1827 pcpu_free_alloc_info(ai); 1830 pcpu_free_alloc_info(ai);
1828 return rc; 1831 return rc;
1829} 1832}
@@ -1848,12 +1851,13 @@ EXPORT_SYMBOL(__per_cpu_offset);
1848static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, 1851static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
1849 size_t align) 1852 size_t align)
1850{ 1853{
1851 return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS)); 1854 return memblock_virt_alloc_from_nopanic(
1855 size, align, __pa(MAX_DMA_ADDRESS));
1852} 1856}
1853 1857
1854static void __init pcpu_dfl_fc_free(void *ptr, size_t size) 1858static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
1855{ 1859{
1856 free_bootmem(__pa(ptr), size); 1860 memblock_free_early(__pa(ptr), size);
1857} 1861}
1858 1862
1859void __init setup_per_cpu_areas(void) 1863void __init setup_per_cpu_areas(void)
@@ -1896,7 +1900,9 @@ void __init setup_per_cpu_areas(void)
1896 void *fc; 1900 void *fc;
1897 1901
1898 ai = pcpu_alloc_alloc_info(1, 1); 1902 ai = pcpu_alloc_alloc_info(1, 1);
1899 fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 1903 fc = memblock_virt_alloc_from_nopanic(unit_size,
1904 PAGE_SIZE,
1905 __pa(MAX_DMA_ADDRESS));
1900 if (!ai || !fc) 1906 if (!ai || !fc)
1901 panic("Failed to allocate memory for percpu areas."); 1907 panic("Failed to allocate memory for percpu areas.");
1902 /* kmemleak tracks the percpu allocations separately */ 1908 /* kmemleak tracks the percpu allocations separately */