summaryrefslogtreecommitdiffstats
path: root/mm/memblock.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memblock.c')
-rw-r--r--mm/memblock.c52
1 files changed, 22 insertions, 30 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index 81ae63ca78d0..022d4cbb3618 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -262,7 +262,8 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
262 phys_addr_t kernel_end, ret; 262 phys_addr_t kernel_end, ret;
263 263
264 /* pump up @end */ 264 /* pump up @end */
265 if (end == MEMBLOCK_ALLOC_ACCESSIBLE) 265 if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
266 end == MEMBLOCK_ALLOC_KASAN)
266 end = memblock.current_limit; 267 end = memblock.current_limit;
267 268
268 /* avoid allocating the first page */ 269 /* avoid allocating the first page */
@@ -800,7 +801,14 @@ int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
800 return memblock_remove_range(&memblock.memory, base, size); 801 return memblock_remove_range(&memblock.memory, base, size);
801} 802}
802 803
803 804/**
805 * memblock_free - free boot memory block
806 * @base: phys starting address of the boot memory block
807 * @size: size of the boot memory block in bytes
808 *
809 * Free boot memory block previously allocated by memblock_alloc_xx() API.
810 * The freeing memory will not be released to the buddy allocator.
811 */
804int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 812int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
805{ 813{
806 phys_addr_t end = base + size - 1; 814 phys_addr_t end = base + size - 1;
@@ -1412,13 +1420,15 @@ again:
1412done: 1420done:
1413 ptr = phys_to_virt(alloc); 1421 ptr = phys_to_virt(alloc);
1414 1422
1415 /* 1423 /* Skip kmemleak for kasan_init() due to high volume. */
1416 * The min_count is set to 0 so that bootmem allocated blocks 1424 if (max_addr != MEMBLOCK_ALLOC_KASAN)
1417 * are never reported as leaks. This is because many of these blocks 1425 /*
1418 * are only referred via the physical address which is not 1426 * The min_count is set to 0 so that bootmem allocated
1419 * looked up by kmemleak. 1427 * blocks are never reported as leaks. This is because many
1420 */ 1428 * of these blocks are only referred via the physical
1421 kmemleak_alloc(ptr, size, 0, 0); 1429 * address which is not looked up by kmemleak.
1430 */
1431 kmemleak_alloc(ptr, size, 0, 0);
1422 1432
1423 return ptr; 1433 return ptr;
1424} 1434}
@@ -1537,24 +1547,6 @@ void * __init memblock_alloc_try_nid(
1537} 1547}
1538 1548
1539/** 1549/**
1540 * __memblock_free_early - free boot memory block
1541 * @base: phys starting address of the boot memory block
1542 * @size: size of the boot memory block in bytes
1543 *
1544 * Free boot memory block previously allocated by memblock_alloc_xx() API.
1545 * The freeing memory will not be released to the buddy allocator.
1546 */
1547void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
1548{
1549 phys_addr_t end = base + size - 1;
1550
1551 memblock_dbg("%s: [%pa-%pa] %pF\n",
1552 __func__, &base, &end, (void *)_RET_IP_);
1553 kmemleak_free_part_phys(base, size);
1554 memblock_remove_range(&memblock.reserved, base, size);
1555}
1556
1557/**
1558 * __memblock_free_late - free bootmem block pages directly to buddy allocator 1550 * __memblock_free_late - free bootmem block pages directly to buddy allocator
1559 * @base: phys starting address of the boot memory block 1551 * @base: phys starting address of the boot memory block
1560 * @size: size of the boot memory block in bytes 1552 * @size: size of the boot memory block in bytes
@@ -1576,7 +1568,7 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1576 1568
1577 for (; cursor < end; cursor++) { 1569 for (; cursor < end; cursor++) {
1578 memblock_free_pages(pfn_to_page(cursor), cursor, 0); 1570 memblock_free_pages(pfn_to_page(cursor), cursor, 0);
1579 totalram_pages++; 1571 totalram_pages_inc();
1580 } 1572 }
1581} 1573}
1582 1574
@@ -1950,7 +1942,7 @@ void reset_node_managed_pages(pg_data_t *pgdat)
1950 struct zone *z; 1942 struct zone *z;
1951 1943
1952 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 1944 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
1953 z->managed_pages = 0; 1945 atomic_long_set(&z->managed_pages, 0);
1954} 1946}
1955 1947
1956void __init reset_all_zones_managed_pages(void) 1948void __init reset_all_zones_managed_pages(void)
@@ -1978,7 +1970,7 @@ unsigned long __init memblock_free_all(void)
1978 reset_all_zones_managed_pages(); 1970 reset_all_zones_managed_pages();
1979 1971
1980 pages = free_low_memory_core_early(); 1972 pages = free_low_memory_core_early();
1981 totalram_pages += pages; 1973 totalram_pages_add(pages);
1982 1974
1983 return pages; 1975 return pages;
1984} 1976}