diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/gup.c | 7 | ||||
| -rw-r--r-- | mm/hmm.c | 13 | ||||
| -rw-r--r-- | mm/memory.c | 16 | ||||
| -rw-r--r-- | mm/memory_hotplug.c | 39 | ||||
| -rw-r--r-- | mm/page_alloc.c | 6 | ||||
| -rw-r--r-- | mm/sparse-vmemmap.c | 67 | ||||
| -rw-r--r-- | mm/sparse.c | 43 |
7 files changed, 96 insertions, 95 deletions
| @@ -1410,7 +1410,6 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, | |||
| 1410 | 1410 | ||
| 1411 | VM_BUG_ON_PAGE(compound_head(page) != head, page); | 1411 | VM_BUG_ON_PAGE(compound_head(page) != head, page); |
| 1412 | 1412 | ||
| 1413 | put_dev_pagemap(pgmap); | ||
| 1414 | SetPageReferenced(page); | 1413 | SetPageReferenced(page); |
| 1415 | pages[*nr] = page; | 1414 | pages[*nr] = page; |
| 1416 | (*nr)++; | 1415 | (*nr)++; |
| @@ -1420,6 +1419,8 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, | |||
| 1420 | ret = 1; | 1419 | ret = 1; |
| 1421 | 1420 | ||
| 1422 | pte_unmap: | 1421 | pte_unmap: |
| 1422 | if (pgmap) | ||
| 1423 | put_dev_pagemap(pgmap); | ||
| 1423 | pte_unmap(ptem); | 1424 | pte_unmap(ptem); |
| 1424 | return ret; | 1425 | return ret; |
| 1425 | } | 1426 | } |
| @@ -1459,10 +1460,12 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr, | |||
| 1459 | SetPageReferenced(page); | 1460 | SetPageReferenced(page); |
| 1460 | pages[*nr] = page; | 1461 | pages[*nr] = page; |
| 1461 | get_page(page); | 1462 | get_page(page); |
| 1462 | put_dev_pagemap(pgmap); | ||
| 1463 | (*nr)++; | 1463 | (*nr)++; |
| 1464 | pfn++; | 1464 | pfn++; |
| 1465 | } while (addr += PAGE_SIZE, addr != end); | 1465 | } while (addr += PAGE_SIZE, addr != end); |
| 1466 | |||
| 1467 | if (pgmap) | ||
| 1468 | put_dev_pagemap(pgmap); | ||
| 1466 | return 1; | 1469 | return 1; |
| 1467 | } | 1470 | } |
| 1468 | 1471 | ||
| @@ -838,10 +838,10 @@ static void hmm_devmem_release(struct device *dev, void *data) | |||
| 838 | 838 | ||
| 839 | mem_hotplug_begin(); | 839 | mem_hotplug_begin(); |
| 840 | if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) | 840 | if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) |
| 841 | __remove_pages(zone, start_pfn, npages); | 841 | __remove_pages(zone, start_pfn, npages, NULL); |
| 842 | else | 842 | else |
| 843 | arch_remove_memory(start_pfn << PAGE_SHIFT, | 843 | arch_remove_memory(start_pfn << PAGE_SHIFT, |
| 844 | npages << PAGE_SHIFT); | 844 | npages << PAGE_SHIFT, NULL); |
| 845 | mem_hotplug_done(); | 845 | mem_hotplug_done(); |
| 846 | 846 | ||
| 847 | hmm_devmem_radix_release(resource); | 847 | hmm_devmem_radix_release(resource); |
| @@ -882,7 +882,7 @@ static int hmm_devmem_pages_create(struct hmm_devmem *devmem) | |||
| 882 | else | 882 | else |
| 883 | devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; | 883 | devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; |
| 884 | 884 | ||
| 885 | devmem->pagemap.res = devmem->resource; | 885 | devmem->pagemap.res = *devmem->resource; |
| 886 | devmem->pagemap.page_fault = hmm_devmem_fault; | 886 | devmem->pagemap.page_fault = hmm_devmem_fault; |
| 887 | devmem->pagemap.page_free = hmm_devmem_free; | 887 | devmem->pagemap.page_free = hmm_devmem_free; |
| 888 | devmem->pagemap.dev = devmem->device; | 888 | devmem->pagemap.dev = devmem->device; |
| @@ -931,17 +931,18 @@ static int hmm_devmem_pages_create(struct hmm_devmem *devmem) | |||
| 931 | * want the linear mapping and thus use arch_add_memory(). | 931 | * want the linear mapping and thus use arch_add_memory(). |
| 932 | */ | 932 | */ |
| 933 | if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC) | 933 | if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC) |
| 934 | ret = arch_add_memory(nid, align_start, align_size, false); | 934 | ret = arch_add_memory(nid, align_start, align_size, NULL, |
| 935 | false); | ||
| 935 | else | 936 | else |
| 936 | ret = add_pages(nid, align_start >> PAGE_SHIFT, | 937 | ret = add_pages(nid, align_start >> PAGE_SHIFT, |
| 937 | align_size >> PAGE_SHIFT, false); | 938 | align_size >> PAGE_SHIFT, NULL, false); |
| 938 | if (ret) { | 939 | if (ret) { |
| 939 | mem_hotplug_done(); | 940 | mem_hotplug_done(); |
| 940 | goto error_add_memory; | 941 | goto error_add_memory; |
| 941 | } | 942 | } |
| 942 | move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], | 943 | move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], |
| 943 | align_start >> PAGE_SHIFT, | 944 | align_start >> PAGE_SHIFT, |
| 944 | align_size >> PAGE_SHIFT); | 945 | align_size >> PAGE_SHIFT, NULL); |
| 945 | mem_hotplug_done(); | 946 | mem_hotplug_done(); |
| 946 | 947 | ||
| 947 | for (pfn = devmem->pfn_first; pfn < devmem->pfn_last; pfn++) { | 948 | for (pfn = devmem->pfn_first; pfn < devmem->pfn_last; pfn++) { |
diff --git a/mm/memory.c b/mm/memory.c index ca5674cbaff2..46b6c33b7f04 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -1897,12 +1897,26 @@ int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, | |||
| 1897 | } | 1897 | } |
| 1898 | EXPORT_SYMBOL(vm_insert_pfn_prot); | 1898 | EXPORT_SYMBOL(vm_insert_pfn_prot); |
| 1899 | 1899 | ||
| 1900 | static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn) | ||
| 1901 | { | ||
| 1902 | /* these checks mirror the abort conditions in vm_normal_page */ | ||
| 1903 | if (vma->vm_flags & VM_MIXEDMAP) | ||
| 1904 | return true; | ||
| 1905 | if (pfn_t_devmap(pfn)) | ||
| 1906 | return true; | ||
| 1907 | if (pfn_t_special(pfn)) | ||
| 1908 | return true; | ||
| 1909 | if (is_zero_pfn(pfn_t_to_pfn(pfn))) | ||
| 1910 | return true; | ||
| 1911 | return false; | ||
| 1912 | } | ||
| 1913 | |||
| 1900 | static int __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, | 1914 | static int __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, |
| 1901 | pfn_t pfn, bool mkwrite) | 1915 | pfn_t pfn, bool mkwrite) |
| 1902 | { | 1916 | { |
| 1903 | pgprot_t pgprot = vma->vm_page_prot; | 1917 | pgprot_t pgprot = vma->vm_page_prot; |
| 1904 | 1918 | ||
| 1905 | BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); | 1919 | BUG_ON(!vm_mixed_ok(vma, pfn)); |
| 1906 | 1920 | ||
| 1907 | if (addr < vma->vm_start || addr >= vma->vm_end) | 1921 | if (addr < vma->vm_start || addr >= vma->vm_end) |
| 1908 | return -EFAULT; | 1922 | return -EFAULT; |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index c52aa05b106c..12df8a5fadcc 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
| @@ -250,7 +250,7 @@ void __init register_page_bootmem_info_node(struct pglist_data *pgdat) | |||
| 250 | #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */ | 250 | #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */ |
| 251 | 251 | ||
| 252 | static int __meminit __add_section(int nid, unsigned long phys_start_pfn, | 252 | static int __meminit __add_section(int nid, unsigned long phys_start_pfn, |
| 253 | bool want_memblock) | 253 | struct vmem_altmap *altmap, bool want_memblock) |
| 254 | { | 254 | { |
| 255 | int ret; | 255 | int ret; |
| 256 | int i; | 256 | int i; |
| @@ -258,7 +258,7 @@ static int __meminit __add_section(int nid, unsigned long phys_start_pfn, | |||
| 258 | if (pfn_valid(phys_start_pfn)) | 258 | if (pfn_valid(phys_start_pfn)) |
| 259 | return -EEXIST; | 259 | return -EEXIST; |
| 260 | 260 | ||
| 261 | ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn); | 261 | ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn, altmap); |
| 262 | if (ret < 0) | 262 | if (ret < 0) |
| 263 | return ret; | 263 | return ret; |
| 264 | 264 | ||
| @@ -292,18 +292,17 @@ static int __meminit __add_section(int nid, unsigned long phys_start_pfn, | |||
| 292 | * add the new pages. | 292 | * add the new pages. |
| 293 | */ | 293 | */ |
| 294 | int __ref __add_pages(int nid, unsigned long phys_start_pfn, | 294 | int __ref __add_pages(int nid, unsigned long phys_start_pfn, |
| 295 | unsigned long nr_pages, bool want_memblock) | 295 | unsigned long nr_pages, struct vmem_altmap *altmap, |
| 296 | bool want_memblock) | ||
| 296 | { | 297 | { |
| 297 | unsigned long i; | 298 | unsigned long i; |
| 298 | int err = 0; | 299 | int err = 0; |
| 299 | int start_sec, end_sec; | 300 | int start_sec, end_sec; |
| 300 | struct vmem_altmap *altmap; | ||
| 301 | 301 | ||
| 302 | /* during initialize mem_map, align hot-added range to section */ | 302 | /* during initialize mem_map, align hot-added range to section */ |
| 303 | start_sec = pfn_to_section_nr(phys_start_pfn); | 303 | start_sec = pfn_to_section_nr(phys_start_pfn); |
| 304 | end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); | 304 | end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); |
| 305 | 305 | ||
| 306 | altmap = to_vmem_altmap((unsigned long) pfn_to_page(phys_start_pfn)); | ||
| 307 | if (altmap) { | 306 | if (altmap) { |
| 308 | /* | 307 | /* |
| 309 | * Validate altmap is within bounds of the total request | 308 | * Validate altmap is within bounds of the total request |
| @@ -318,7 +317,8 @@ int __ref __add_pages(int nid, unsigned long phys_start_pfn, | |||
| 318 | } | 317 | } |
| 319 | 318 | ||
| 320 | for (i = start_sec; i <= end_sec; i++) { | 319 | for (i = start_sec; i <= end_sec; i++) { |
| 321 | err = __add_section(nid, section_nr_to_pfn(i), want_memblock); | 320 | err = __add_section(nid, section_nr_to_pfn(i), altmap, |
| 321 | want_memblock); | ||
| 322 | 322 | ||
| 323 | /* | 323 | /* |
| 324 | * EEXIST is finally dealt with by ioresource collision | 324 | * EEXIST is finally dealt with by ioresource collision |
| @@ -334,7 +334,6 @@ int __ref __add_pages(int nid, unsigned long phys_start_pfn, | |||
| 334 | out: | 334 | out: |
| 335 | return err; | 335 | return err; |
| 336 | } | 336 | } |
| 337 | EXPORT_SYMBOL_GPL(__add_pages); | ||
| 338 | 337 | ||
| 339 | #ifdef CONFIG_MEMORY_HOTREMOVE | 338 | #ifdef CONFIG_MEMORY_HOTREMOVE |
| 340 | /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ | 339 | /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ |
| @@ -537,7 +536,7 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn) | |||
| 537 | } | 536 | } |
| 538 | 537 | ||
| 539 | static int __remove_section(struct zone *zone, struct mem_section *ms, | 538 | static int __remove_section(struct zone *zone, struct mem_section *ms, |
| 540 | unsigned long map_offset) | 539 | unsigned long map_offset, struct vmem_altmap *altmap) |
| 541 | { | 540 | { |
| 542 | unsigned long start_pfn; | 541 | unsigned long start_pfn; |
| 543 | int scn_nr; | 542 | int scn_nr; |
| @@ -554,7 +553,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms, | |||
| 554 | start_pfn = section_nr_to_pfn((unsigned long)scn_nr); | 553 | start_pfn = section_nr_to_pfn((unsigned long)scn_nr); |
| 555 | __remove_zone(zone, start_pfn); | 554 | __remove_zone(zone, start_pfn); |
| 556 | 555 | ||
| 557 | sparse_remove_one_section(zone, ms, map_offset); | 556 | sparse_remove_one_section(zone, ms, map_offset, altmap); |
| 558 | return 0; | 557 | return 0; |
| 559 | } | 558 | } |
| 560 | 559 | ||
| @@ -570,7 +569,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms, | |||
| 570 | * calling offline_pages(). | 569 | * calling offline_pages(). |
| 571 | */ | 570 | */ |
| 572 | int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, | 571 | int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, |
| 573 | unsigned long nr_pages) | 572 | unsigned long nr_pages, struct vmem_altmap *altmap) |
| 574 | { | 573 | { |
| 575 | unsigned long i; | 574 | unsigned long i; |
| 576 | unsigned long map_offset = 0; | 575 | unsigned long map_offset = 0; |
| @@ -578,10 +577,6 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, | |||
| 578 | 577 | ||
| 579 | /* In the ZONE_DEVICE case device driver owns the memory region */ | 578 | /* In the ZONE_DEVICE case device driver owns the memory region */ |
| 580 | if (is_dev_zone(zone)) { | 579 | if (is_dev_zone(zone)) { |
| 581 | struct page *page = pfn_to_page(phys_start_pfn); | ||
| 582 | struct vmem_altmap *altmap; | ||
| 583 | |||
| 584 | altmap = to_vmem_altmap((unsigned long) page); | ||
| 585 | if (altmap) | 580 | if (altmap) |
| 586 | map_offset = vmem_altmap_offset(altmap); | 581 | map_offset = vmem_altmap_offset(altmap); |
| 587 | } else { | 582 | } else { |
| @@ -612,7 +607,8 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, | |||
| 612 | for (i = 0; i < sections_to_remove; i++) { | 607 | for (i = 0; i < sections_to_remove; i++) { |
| 613 | unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; | 608 | unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; |
| 614 | 609 | ||
| 615 | ret = __remove_section(zone, __pfn_to_section(pfn), map_offset); | 610 | ret = __remove_section(zone, __pfn_to_section(pfn), map_offset, |
| 611 | altmap); | ||
| 616 | map_offset = 0; | 612 | map_offset = 0; |
| 617 | if (ret) | 613 | if (ret) |
| 618 | break; | 614 | break; |
| @@ -802,8 +798,8 @@ static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned lon | |||
| 802 | pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; | 798 | pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; |
| 803 | } | 799 | } |
| 804 | 800 | ||
| 805 | void __ref move_pfn_range_to_zone(struct zone *zone, | 801 | void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, |
| 806 | unsigned long start_pfn, unsigned long nr_pages) | 802 | unsigned long nr_pages, struct vmem_altmap *altmap) |
| 807 | { | 803 | { |
| 808 | struct pglist_data *pgdat = zone->zone_pgdat; | 804 | struct pglist_data *pgdat = zone->zone_pgdat; |
| 809 | int nid = pgdat->node_id; | 805 | int nid = pgdat->node_id; |
| @@ -828,7 +824,8 @@ void __ref move_pfn_range_to_zone(struct zone *zone, | |||
| 828 | * expects the zone spans the pfn range. All the pages in the range | 824 | * expects the zone spans the pfn range. All the pages in the range |
| 829 | * are reserved so nobody should be touching them so we should be safe | 825 | * are reserved so nobody should be touching them so we should be safe |
| 830 | */ | 826 | */ |
| 831 | memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, MEMMAP_HOTPLUG); | 827 | memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, |
| 828 | MEMMAP_HOTPLUG, altmap); | ||
| 832 | 829 | ||
| 833 | set_zone_contiguous(zone); | 830 | set_zone_contiguous(zone); |
| 834 | } | 831 | } |
| @@ -900,7 +897,7 @@ static struct zone * __meminit move_pfn_range(int online_type, int nid, | |||
| 900 | struct zone *zone; | 897 | struct zone *zone; |
| 901 | 898 | ||
| 902 | zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); | 899 | zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); |
| 903 | move_pfn_range_to_zone(zone, start_pfn, nr_pages); | 900 | move_pfn_range_to_zone(zone, start_pfn, nr_pages, NULL); |
| 904 | return zone; | 901 | return zone; |
| 905 | } | 902 | } |
| 906 | 903 | ||
| @@ -1149,7 +1146,7 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online) | |||
| 1149 | } | 1146 | } |
| 1150 | 1147 | ||
| 1151 | /* call arch's memory hotadd */ | 1148 | /* call arch's memory hotadd */ |
| 1152 | ret = arch_add_memory(nid, start, size, true); | 1149 | ret = arch_add_memory(nid, start, size, NULL, true); |
| 1153 | 1150 | ||
| 1154 | if (ret < 0) | 1151 | if (ret < 0) |
| 1155 | goto error; | 1152 | goto error; |
| @@ -1891,7 +1888,7 @@ void __ref remove_memory(int nid, u64 start, u64 size) | |||
| 1891 | memblock_free(start, size); | 1888 | memblock_free(start, size); |
| 1892 | memblock_remove(start, size); | 1889 | memblock_remove(start, size); |
| 1893 | 1890 | ||
| 1894 | arch_remove_memory(start, size); | 1891 | arch_remove_memory(start, size, NULL); |
| 1895 | 1892 | ||
| 1896 | try_offline_node(nid); | 1893 | try_offline_node(nid); |
| 1897 | 1894 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 76c9688b6a0a..2bb7f163baca 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -5314,9 +5314,9 @@ void __ref build_all_zonelists(pg_data_t *pgdat) | |||
| 5314 | * done. Non-atomic initialization, single-pass. | 5314 | * done. Non-atomic initialization, single-pass. |
| 5315 | */ | 5315 | */ |
| 5316 | void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | 5316 | void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, |
| 5317 | unsigned long start_pfn, enum memmap_context context) | 5317 | unsigned long start_pfn, enum memmap_context context, |
| 5318 | struct vmem_altmap *altmap) | ||
| 5318 | { | 5319 | { |
| 5319 | struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn)); | ||
| 5320 | unsigned long end_pfn = start_pfn + size; | 5320 | unsigned long end_pfn = start_pfn + size; |
| 5321 | pg_data_t *pgdat = NODE_DATA(nid); | 5321 | pg_data_t *pgdat = NODE_DATA(nid); |
| 5322 | unsigned long pfn; | 5322 | unsigned long pfn; |
| @@ -5417,7 +5417,7 @@ static void __meminit zone_init_free_lists(struct zone *zone) | |||
| 5417 | 5417 | ||
| 5418 | #ifndef __HAVE_ARCH_MEMMAP_INIT | 5418 | #ifndef __HAVE_ARCH_MEMMAP_INIT |
| 5419 | #define memmap_init(size, nid, zone, start_pfn) \ | 5419 | #define memmap_init(size, nid, zone, start_pfn) \ |
| 5420 | memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) | 5420 | memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY, NULL) |
| 5421 | #endif | 5421 | #endif |
| 5422 | 5422 | ||
| 5423 | static int zone_batchsize(struct zone *zone) | 5423 | static int zone_batchsize(struct zone *zone) |
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 17acf01791fa..bd0276d5f66b 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c | |||
| @@ -74,7 +74,7 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node) | |||
| 74 | } | 74 | } |
| 75 | 75 | ||
| 76 | /* need to make sure size is all the same during early stage */ | 76 | /* need to make sure size is all the same during early stage */ |
| 77 | static void * __meminit alloc_block_buf(unsigned long size, int node) | 77 | void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) |
| 78 | { | 78 | { |
| 79 | void *ptr; | 79 | void *ptr; |
| 80 | 80 | ||
| @@ -107,33 +107,16 @@ static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap) | |||
| 107 | } | 107 | } |
| 108 | 108 | ||
| 109 | /** | 109 | /** |
| 110 | * vmem_altmap_alloc - allocate pages from the vmem_altmap reservation | 110 | * altmap_alloc_block_buf - allocate pages from the device page map |
| 111 | * @altmap - reserved page pool for the allocation | 111 | * @altmap: device page map |
| 112 | * @nr_pfns - size (in pages) of the allocation | 112 | * @size: size (in bytes) of the allocation |
| 113 | * | 113 | * |
| 114 | * Allocations are aligned to the size of the request | 114 | * Allocations are aligned to the size of the request. |
| 115 | */ | 115 | */ |
| 116 | static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap, | 116 | void * __meminit altmap_alloc_block_buf(unsigned long size, |
| 117 | unsigned long nr_pfns) | ||
| 118 | { | ||
| 119 | unsigned long pfn = vmem_altmap_next_pfn(altmap); | ||
| 120 | unsigned long nr_align; | ||
| 121 | |||
| 122 | nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); | ||
| 123 | nr_align = ALIGN(pfn, nr_align) - pfn; | ||
| 124 | |||
| 125 | if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap)) | ||
| 126 | return ULONG_MAX; | ||
| 127 | altmap->alloc += nr_pfns; | ||
| 128 | altmap->align += nr_align; | ||
| 129 | return pfn + nr_align; | ||
| 130 | } | ||
| 131 | |||
| 132 | static void * __meminit altmap_alloc_block_buf(unsigned long size, | ||
| 133 | struct vmem_altmap *altmap) | 117 | struct vmem_altmap *altmap) |
| 134 | { | 118 | { |
| 135 | unsigned long pfn, nr_pfns; | 119 | unsigned long pfn, nr_pfns, nr_align; |
| 136 | void *ptr; | ||
| 137 | 120 | ||
| 138 | if (size & ~PAGE_MASK) { | 121 | if (size & ~PAGE_MASK) { |
| 139 | pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", | 122 | pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", |
| @@ -141,25 +124,20 @@ static void * __meminit altmap_alloc_block_buf(unsigned long size, | |||
| 141 | return NULL; | 124 | return NULL; |
| 142 | } | 125 | } |
| 143 | 126 | ||
| 127 | pfn = vmem_altmap_next_pfn(altmap); | ||
| 144 | nr_pfns = size >> PAGE_SHIFT; | 128 | nr_pfns = size >> PAGE_SHIFT; |
| 145 | pfn = vmem_altmap_alloc(altmap, nr_pfns); | 129 | nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); |
| 146 | if (pfn < ULONG_MAX) | 130 | nr_align = ALIGN(pfn, nr_align) - pfn; |
| 147 | ptr = __va(__pfn_to_phys(pfn)); | 131 | if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap)) |
| 148 | else | 132 | return NULL; |
| 149 | ptr = NULL; | ||
| 150 | pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", | ||
| 151 | __func__, pfn, altmap->alloc, altmap->align, nr_pfns); | ||
| 152 | 133 | ||
| 153 | return ptr; | 134 | altmap->alloc += nr_pfns; |
| 154 | } | 135 | altmap->align += nr_align; |
| 136 | pfn += nr_align; | ||
| 155 | 137 | ||
| 156 | /* need to make sure size is all the same during early stage */ | 138 | pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", |
| 157 | void * __meminit __vmemmap_alloc_block_buf(unsigned long size, int node, | 139 | __func__, pfn, altmap->alloc, altmap->align, nr_pfns); |
| 158 | struct vmem_altmap *altmap) | 140 | return __va(__pfn_to_phys(pfn)); |
| 159 | { | ||
| 160 | if (altmap) | ||
| 161 | return altmap_alloc_block_buf(size, altmap); | ||
| 162 | return alloc_block_buf(size, node); | ||
| 163 | } | 141 | } |
| 164 | 142 | ||
| 165 | void __meminit vmemmap_verify(pte_t *pte, int node, | 143 | void __meminit vmemmap_verify(pte_t *pte, int node, |
| @@ -178,7 +156,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) | |||
| 178 | pte_t *pte = pte_offset_kernel(pmd, addr); | 156 | pte_t *pte = pte_offset_kernel(pmd, addr); |
| 179 | if (pte_none(*pte)) { | 157 | if (pte_none(*pte)) { |
| 180 | pte_t entry; | 158 | pte_t entry; |
| 181 | void *p = alloc_block_buf(PAGE_SIZE, node); | 159 | void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node); |
| 182 | if (!p) | 160 | if (!p) |
| 183 | return NULL; | 161 | return NULL; |
| 184 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); | 162 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); |
| @@ -278,7 +256,8 @@ int __meminit vmemmap_populate_basepages(unsigned long start, | |||
| 278 | return 0; | 256 | return 0; |
| 279 | } | 257 | } |
| 280 | 258 | ||
| 281 | struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid) | 259 | struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid, |
| 260 | struct vmem_altmap *altmap) | ||
| 282 | { | 261 | { |
| 283 | unsigned long start; | 262 | unsigned long start; |
| 284 | unsigned long end; | 263 | unsigned long end; |
| @@ -288,7 +267,7 @@ struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid) | |||
| 288 | start = (unsigned long)map; | 267 | start = (unsigned long)map; |
| 289 | end = (unsigned long)(map + PAGES_PER_SECTION); | 268 | end = (unsigned long)(map + PAGES_PER_SECTION); |
| 290 | 269 | ||
| 291 | if (vmemmap_populate(start, end, nid)) | 270 | if (vmemmap_populate(start, end, nid, altmap)) |
| 292 | return NULL; | 271 | return NULL; |
| 293 | 272 | ||
| 294 | return map; | 273 | return map; |
| @@ -318,7 +297,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map, | |||
| 318 | if (!present_section_nr(pnum)) | 297 | if (!present_section_nr(pnum)) |
| 319 | continue; | 298 | continue; |
| 320 | 299 | ||
| 321 | map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); | 300 | map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL); |
| 322 | if (map_map[pnum]) | 301 | if (map_map[pnum]) |
| 323 | continue; | 302 | continue; |
| 324 | ms = __nr_to_section(pnum); | 303 | ms = __nr_to_section(pnum); |
diff --git a/mm/sparse.c b/mm/sparse.c index 2609aba121e8..2583174b1d62 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
| @@ -417,7 +417,8 @@ static void __init sparse_early_usemaps_alloc_node(void *data, | |||
| 417 | } | 417 | } |
| 418 | 418 | ||
| 419 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | 419 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
| 420 | struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) | 420 | struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid, |
| 421 | struct vmem_altmap *altmap) | ||
| 421 | { | 422 | { |
| 422 | struct page *map; | 423 | struct page *map; |
| 423 | unsigned long size; | 424 | unsigned long size; |
| @@ -472,7 +473,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map, | |||
| 472 | 473 | ||
| 473 | if (!present_section_nr(pnum)) | 474 | if (!present_section_nr(pnum)) |
| 474 | continue; | 475 | continue; |
| 475 | map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); | 476 | map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL); |
| 476 | if (map_map[pnum]) | 477 | if (map_map[pnum]) |
| 477 | continue; | 478 | continue; |
| 478 | ms = __nr_to_section(pnum); | 479 | ms = __nr_to_section(pnum); |
| @@ -500,7 +501,7 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) | |||
| 500 | struct mem_section *ms = __nr_to_section(pnum); | 501 | struct mem_section *ms = __nr_to_section(pnum); |
| 501 | int nid = sparse_early_nid(ms); | 502 | int nid = sparse_early_nid(ms); |
| 502 | 503 | ||
| 503 | map = sparse_mem_map_populate(pnum, nid); | 504 | map = sparse_mem_map_populate(pnum, nid, NULL); |
| 504 | if (map) | 505 | if (map) |
| 505 | return map; | 506 | return map; |
| 506 | 507 | ||
| @@ -678,17 +679,19 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) | |||
| 678 | #endif | 679 | #endif |
| 679 | 680 | ||
| 680 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 681 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 681 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) | 682 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, |
| 683 | struct vmem_altmap *altmap) | ||
| 682 | { | 684 | { |
| 683 | /* This will make the necessary allocations eventually. */ | 685 | /* This will make the necessary allocations eventually. */ |
| 684 | return sparse_mem_map_populate(pnum, nid); | 686 | return sparse_mem_map_populate(pnum, nid, altmap); |
| 685 | } | 687 | } |
| 686 | static void __kfree_section_memmap(struct page *memmap) | 688 | static void __kfree_section_memmap(struct page *memmap, |
| 689 | struct vmem_altmap *altmap) | ||
| 687 | { | 690 | { |
| 688 | unsigned long start = (unsigned long)memmap; | 691 | unsigned long start = (unsigned long)memmap; |
| 689 | unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); | 692 | unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); |
| 690 | 693 | ||
| 691 | vmemmap_free(start, end); | 694 | vmemmap_free(start, end, altmap); |
| 692 | } | 695 | } |
| 693 | #ifdef CONFIG_MEMORY_HOTREMOVE | 696 | #ifdef CONFIG_MEMORY_HOTREMOVE |
| 694 | static void free_map_bootmem(struct page *memmap) | 697 | static void free_map_bootmem(struct page *memmap) |
| @@ -696,7 +699,7 @@ static void free_map_bootmem(struct page *memmap) | |||
| 696 | unsigned long start = (unsigned long)memmap; | 699 | unsigned long start = (unsigned long)memmap; |
| 697 | unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); | 700 | unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); |
| 698 | 701 | ||
| 699 | vmemmap_free(start, end); | 702 | vmemmap_free(start, end, NULL); |
| 700 | } | 703 | } |
| 701 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | 704 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
| 702 | #else | 705 | #else |
| @@ -721,12 +724,14 @@ got_map_ptr: | |||
| 721 | return ret; | 724 | return ret; |
| 722 | } | 725 | } |
| 723 | 726 | ||
| 724 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) | 727 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, |
| 728 | struct vmem_altmap *altmap) | ||
| 725 | { | 729 | { |
| 726 | return __kmalloc_section_memmap(); | 730 | return __kmalloc_section_memmap(); |
| 727 | } | 731 | } |
| 728 | 732 | ||
| 729 | static void __kfree_section_memmap(struct page *memmap) | 733 | static void __kfree_section_memmap(struct page *memmap, |
| 734 | struct vmem_altmap *altmap) | ||
| 730 | { | 735 | { |
| 731 | if (is_vmalloc_addr(memmap)) | 736 | if (is_vmalloc_addr(memmap)) |
| 732 | vfree(memmap); | 737 | vfree(memmap); |
| @@ -773,7 +778,8 @@ static void free_map_bootmem(struct page *memmap) | |||
| 773 | * set. If this is <=0, then that means that the passed-in | 778 | * set. If this is <=0, then that means that the passed-in |
| 774 | * map was not consumed and must be freed. | 779 | * map was not consumed and must be freed. |
| 775 | */ | 780 | */ |
| 776 | int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn) | 781 | int __meminit sparse_add_one_section(struct pglist_data *pgdat, |
| 782 | unsigned long start_pfn, struct vmem_altmap *altmap) | ||
| 777 | { | 783 | { |
| 778 | unsigned long section_nr = pfn_to_section_nr(start_pfn); | 784 | unsigned long section_nr = pfn_to_section_nr(start_pfn); |
| 779 | struct mem_section *ms; | 785 | struct mem_section *ms; |
| @@ -789,12 +795,12 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long st | |||
| 789 | ret = sparse_index_init(section_nr, pgdat->node_id); | 795 | ret = sparse_index_init(section_nr, pgdat->node_id); |
| 790 | if (ret < 0 && ret != -EEXIST) | 796 | if (ret < 0 && ret != -EEXIST) |
| 791 | return ret; | 797 | return ret; |
| 792 | memmap = kmalloc_section_memmap(section_nr, pgdat->node_id); | 798 | memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap); |
| 793 | if (!memmap) | 799 | if (!memmap) |
| 794 | return -ENOMEM; | 800 | return -ENOMEM; |
| 795 | usemap = __kmalloc_section_usemap(); | 801 | usemap = __kmalloc_section_usemap(); |
| 796 | if (!usemap) { | 802 | if (!usemap) { |
| 797 | __kfree_section_memmap(memmap); | 803 | __kfree_section_memmap(memmap, altmap); |
| 798 | return -ENOMEM; | 804 | return -ENOMEM; |
| 799 | } | 805 | } |
| 800 | 806 | ||
| @@ -816,7 +822,7 @@ out: | |||
| 816 | pgdat_resize_unlock(pgdat, &flags); | 822 | pgdat_resize_unlock(pgdat, &flags); |
| 817 | if (ret <= 0) { | 823 | if (ret <= 0) { |
| 818 | kfree(usemap); | 824 | kfree(usemap); |
| 819 | __kfree_section_memmap(memmap); | 825 | __kfree_section_memmap(memmap, altmap); |
| 820 | } | 826 | } |
| 821 | return ret; | 827 | return ret; |
| 822 | } | 828 | } |
| @@ -843,7 +849,8 @@ static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) | |||
| 843 | } | 849 | } |
| 844 | #endif | 850 | #endif |
| 845 | 851 | ||
| 846 | static void free_section_usemap(struct page *memmap, unsigned long *usemap) | 852 | static void free_section_usemap(struct page *memmap, unsigned long *usemap, |
| 853 | struct vmem_altmap *altmap) | ||
| 847 | { | 854 | { |
| 848 | struct page *usemap_page; | 855 | struct page *usemap_page; |
| 849 | 856 | ||
| @@ -857,7 +864,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap) | |||
| 857 | if (PageSlab(usemap_page) || PageCompound(usemap_page)) { | 864 | if (PageSlab(usemap_page) || PageCompound(usemap_page)) { |
| 858 | kfree(usemap); | 865 | kfree(usemap); |
| 859 | if (memmap) | 866 | if (memmap) |
| 860 | __kfree_section_memmap(memmap); | 867 | __kfree_section_memmap(memmap, altmap); |
| 861 | return; | 868 | return; |
| 862 | } | 869 | } |
| 863 | 870 | ||
| @@ -871,7 +878,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap) | |||
| 871 | } | 878 | } |
| 872 | 879 | ||
| 873 | void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, | 880 | void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, |
| 874 | unsigned long map_offset) | 881 | unsigned long map_offset, struct vmem_altmap *altmap) |
| 875 | { | 882 | { |
| 876 | struct page *memmap = NULL; | 883 | struct page *memmap = NULL; |
| 877 | unsigned long *usemap = NULL, flags; | 884 | unsigned long *usemap = NULL, flags; |
| @@ -889,7 +896,7 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, | |||
| 889 | 896 | ||
| 890 | clear_hwpoisoned_pages(memmap + map_offset, | 897 | clear_hwpoisoned_pages(memmap + map_offset, |
| 891 | PAGES_PER_SECTION - map_offset); | 898 | PAGES_PER_SECTION - map_offset); |
| 892 | free_section_usemap(memmap, usemap); | 899 | free_section_usemap(memmap, usemap, altmap); |
| 893 | } | 900 | } |
| 894 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | 901 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
| 895 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 902 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
