aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-18 12:37:14 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-18 12:37:14 -0500
commitb2b062b8163391c42b3219d466ca1ac9742b9c7b (patch)
treef3f920c09b8de694b1bc1d4b878cfd2b0b98c913 /arch/x86/mm
parenta9de18eb761f7c1c860964b2e5addc1a35c7e861 (diff)
parent99937d6455cea95405ac681c86a857d0fcd530bd (diff)
Merge branch 'core/percpu' into stackprotector
Conflicts: arch/x86/include/asm/pda.h arch/x86/include/asm/system.h Also, moved include/asm-x86/stackprotector.h to arch/x86/include/asm. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/fault.c26
-rw-r--r--arch/x86/mm/init_32.c13
-rw-r--r--arch/x86/mm/init_64.c4
-rw-r--r--arch/x86/mm/k8topology_64.c20
-rw-r--r--arch/x86/mm/numa_32.c4
-rw-r--r--arch/x86/mm/numa_64.c4
-rw-r--r--arch/x86/mm/pageattr.c10
-rw-r--r--arch/x86/mm/pat.c127
-rw-r--r--arch/x86/mm/srat_64.c2
9 files changed, 127 insertions, 83 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 4c056b5d6a95..37242c405f16 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -535,7 +535,7 @@ static int vmalloc_fault(unsigned long address)
535 happen within a race in page table update. In the later 535 happen within a race in page table update. In the later
536 case just flush. */ 536 case just flush. */
537 537
538 pgd = pgd_offset(current->mm ?: &init_mm, address); 538 pgd = pgd_offset(current->active_mm, address);
539 pgd_ref = pgd_offset_k(address); 539 pgd_ref = pgd_offset_k(address);
540 if (pgd_none(*pgd_ref)) 540 if (pgd_none(*pgd_ref))
541 return -1; 541 return -1;
@@ -670,7 +670,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
670 if (unlikely(in_atomic() || !mm)) 670 if (unlikely(in_atomic() || !mm))
671 goto bad_area_nosemaphore; 671 goto bad_area_nosemaphore;
672 672
673again:
674 /* 673 /*
675 * When running in the kernel we expect faults to occur only to 674 * When running in the kernel we expect faults to occur only to
676 * addresses in user space. All other faults represent errors in the 675 * addresses in user space. All other faults represent errors in the
@@ -866,25 +865,14 @@ no_context:
866 oops_end(flags, regs, sig); 865 oops_end(flags, regs, sig);
867#endif 866#endif
868 867
869/*
870 * We ran out of memory, or some other thing happened to us that made
871 * us unable to handle the page fault gracefully.
872 */
873out_of_memory: 868out_of_memory:
869 /*
870 * We ran out of memory, call the OOM killer, and return the userspace
871 * (which will retry the fault, or kill us if we got oom-killed).
872 */
874 up_read(&mm->mmap_sem); 873 up_read(&mm->mmap_sem);
875 if (is_global_init(tsk)) { 874 pagefault_out_of_memory();
876 yield(); 875 return;
877 /*
878 * Re-lookup the vma - in theory the vma tree might
879 * have changed:
880 */
881 goto again;
882 }
883
884 printk("VM: killing process %s\n", tsk->comm);
885 if (error_code & PF_USER)
886 do_group_exit(SIGKILL);
887 goto no_context;
888 876
889do_sigbus: 877do_sigbus:
890 up_read(&mm->mmap_sem); 878 up_read(&mm->mmap_sem);
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 8655b5bb0963..4a6989e47a53 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -49,7 +49,6 @@
49#include <asm/paravirt.h> 49#include <asm/paravirt.h>
50#include <asm/setup.h> 50#include <asm/setup.h>
51#include <asm/cacheflush.h> 51#include <asm/cacheflush.h>
52#include <asm/smp.h>
53 52
54unsigned int __VMALLOC_RESERVE = 128 << 20; 53unsigned int __VMALLOC_RESERVE = 128 << 20;
55 54
@@ -328,6 +327,8 @@ int devmem_is_allowed(unsigned long pagenr)
328{ 327{
329 if (pagenr <= 256) 328 if (pagenr <= 256)
330 return 1; 329 return 1;
330 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
331 return 0;
331 if (!page_is_ram(pagenr)) 332 if (!page_is_ram(pagenr))
332 return 1; 333 return 1;
333 return 0; 334 return 0;
@@ -435,8 +436,12 @@ static void __init set_highmem_pages_init(void)
435#endif /* !CONFIG_NUMA */ 436#endif /* !CONFIG_NUMA */
436 437
437#else 438#else
438# define permanent_kmaps_init(pgd_base) do { } while (0) 439static inline void permanent_kmaps_init(pgd_t *pgd_base)
439# define set_highmem_pages_init() do { } while (0) 440{
441}
442static inline void set_highmem_pages_init(void)
443{
444}
440#endif /* CONFIG_HIGHMEM */ 445#endif /* CONFIG_HIGHMEM */
441 446
442void __init native_pagetable_setup_start(pgd_t *base) 447void __init native_pagetable_setup_start(pgd_t *base)
@@ -1075,7 +1080,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
1075 unsigned long start_pfn = start >> PAGE_SHIFT; 1080 unsigned long start_pfn = start >> PAGE_SHIFT;
1076 unsigned long nr_pages = size >> PAGE_SHIFT; 1081 unsigned long nr_pages = size >> PAGE_SHIFT;
1077 1082
1078 return __add_pages(zone, start_pfn, nr_pages); 1083 return __add_pages(nid, zone, start_pfn, nr_pages);
1079} 1084}
1080#endif 1085#endif
1081 1086
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 9f7a0d24d42a..23f68e77ad1f 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -857,7 +857,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
857 if (last_mapped_pfn > max_pfn_mapped) 857 if (last_mapped_pfn > max_pfn_mapped)
858 max_pfn_mapped = last_mapped_pfn; 858 max_pfn_mapped = last_mapped_pfn;
859 859
860 ret = __add_pages(zone, start_pfn, nr_pages); 860 ret = __add_pages(nid, zone, start_pfn, nr_pages);
861 WARN_ON_ONCE(ret); 861 WARN_ON_ONCE(ret);
862 862
863 return ret; 863 return ret;
@@ -888,6 +888,8 @@ int devmem_is_allowed(unsigned long pagenr)
888{ 888{
889 if (pagenr <= 256) 889 if (pagenr <= 256)
890 return 1; 890 return 1;
891 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
892 return 0;
891 if (!page_is_ram(pagenr)) 893 if (!page_is_ram(pagenr))
892 return 1; 894 return 1;
893 return 0; 895 return 0;
diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c
index 41f1b5c00a1d..268f8255280f 100644
--- a/arch/x86/mm/k8topology_64.c
+++ b/arch/x86/mm/k8topology_64.c
@@ -81,7 +81,6 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
81 unsigned numnodes, cores, bits, apicid_base; 81 unsigned numnodes, cores, bits, apicid_base;
82 unsigned long prevbase; 82 unsigned long prevbase;
83 struct bootnode nodes[8]; 83 struct bootnode nodes[8];
84 unsigned char nodeids[8];
85 int i, j, nb, found = 0; 84 int i, j, nb, found = 0;
86 u32 nodeid, reg; 85 u32 nodeid, reg;
87 86
@@ -110,7 +109,6 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
110 limit = read_pci_config(0, nb, 1, 0x44 + i*8); 109 limit = read_pci_config(0, nb, 1, 0x44 + i*8);
111 110
112 nodeid = limit & 7; 111 nodeid = limit & 7;
113 nodeids[i] = nodeid;
114 if ((base & 3) == 0) { 112 if ((base & 3) == 0) {
115 if (i < numnodes) 113 if (i < numnodes)
116 printk("Skipping disabled node %d\n", i); 114 printk("Skipping disabled node %d\n", i);
@@ -179,9 +177,6 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
179 177
180 nodes[nodeid].start = base; 178 nodes[nodeid].start = base;
181 nodes[nodeid].end = limit; 179 nodes[nodeid].end = limit;
182 e820_register_active_regions(nodeid,
183 nodes[nodeid].start >> PAGE_SHIFT,
184 nodes[nodeid].end >> PAGE_SHIFT);
185 180
186 prevbase = base; 181 prevbase = base;
187 182
@@ -211,12 +206,15 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
211 } 206 }
212 207
213 for (i = 0; i < 8; i++) { 208 for (i = 0; i < 8; i++) {
214 if (nodes[i].start != nodes[i].end) { 209 if (nodes[i].start == nodes[i].end)
215 nodeid = nodeids[i]; 210 continue;
216 for (j = apicid_base; j < cores + apicid_base; j++) 211
217 apicid_to_node[(nodeid << bits) + j] = i; 212 e820_register_active_regions(i,
218 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 213 nodes[i].start >> PAGE_SHIFT,
219 } 214 nodes[i].end >> PAGE_SHIFT);
215 for (j = apicid_base; j < cores + apicid_base; j++)
216 apicid_to_node[(i << bits) + j] = i;
217 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
220 } 218 }
221 219
222 numa_init_array(); 220 numa_init_array();
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 8518c678d83f..d1f7439d173c 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -239,7 +239,7 @@ void resume_map_numa_kva(pgd_t *pgd_base)
239 start_pfn = node_remap_start_pfn[node]; 239 start_pfn = node_remap_start_pfn[node];
240 size = node_remap_size[node]; 240 size = node_remap_size[node];
241 241
242 printk(KERN_DEBUG "%s: node %d\n", __FUNCTION__, node); 242 printk(KERN_DEBUG "%s: node %d\n", __func__, node);
243 243
244 for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) { 244 for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) {
245 unsigned long vaddr = start_va + (pfn << PAGE_SHIFT); 245 unsigned long vaddr = start_va + (pfn << PAGE_SHIFT);
@@ -251,7 +251,7 @@ void resume_map_numa_kva(pgd_t *pgd_base)
251 PAGE_KERNEL_LARGE_EXEC)); 251 PAGE_KERNEL_LARGE_EXEC));
252 252
253 printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n", 253 printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n",
254 __FUNCTION__, vaddr, start_pfn + pfn); 254 __func__, vaddr, start_pfn + pfn);
255 } 255 }
256 } 256 }
257} 257}
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index cebcbf152d46..71a14f89f89e 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -278,7 +278,7 @@ void __init numa_init_array(void)
278 int rr, i; 278 int rr, i;
279 279
280 rr = first_node(node_online_map); 280 rr = first_node(node_online_map);
281 for (i = 0; i < NR_CPUS; i++) { 281 for (i = 0; i < nr_cpu_ids; i++) {
282 if (early_cpu_to_node(i) != NUMA_NO_NODE) 282 if (early_cpu_to_node(i) != NUMA_NO_NODE)
283 continue; 283 continue;
284 numa_set_node(i, rr); 284 numa_set_node(i, rr);
@@ -549,7 +549,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn)
549 memnodemap[0] = 0; 549 memnodemap[0] = 0;
550 node_set_online(0); 550 node_set_online(0);
551 node_set(0, node_possible_map); 551 node_set(0, node_possible_map);
552 for (i = 0; i < NR_CPUS; i++) 552 for (i = 0; i < nr_cpu_ids; i++)
553 numa_set_node(i, 0); 553 numa_set_node(i, 0);
554 e820_register_active_regions(0, start_pfn, last_pfn); 554 e820_register_active_regions(0, start_pfn, last_pfn);
555 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); 555 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index e89d24815f26..4cf30dee8161 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -555,10 +555,12 @@ repeat:
555 if (!pte_val(old_pte)) { 555 if (!pte_val(old_pte)) {
556 if (!primary) 556 if (!primary)
557 return 0; 557 return 0;
558 WARN(1, KERN_WARNING "CPA: called for zero pte. " 558
559 "vaddr = %lx cpa->vaddr = %lx\n", address, 559 /*
560 *cpa->vaddr); 560 * Special error value returned, indicating that the mapping
561 return -EINVAL; 561 * did not exist at this address.
562 */
563 return -EFAULT;
562 } 564 }
563 565
564 if (level == PG_LEVEL_4K) { 566 if (level == PG_LEVEL_4K) {
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 85cbd3cd3723..3be399013de6 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -333,11 +333,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
333 req_type & _PAGE_CACHE_MASK); 333 req_type & _PAGE_CACHE_MASK);
334 } 334 }
335 335
336 is_range_ram = pagerange_is_ram(start, end); 336 /*
337 if (is_range_ram == 1) 337 * For legacy reasons, some parts of the physical address range in the
338 return reserve_ram_pages_type(start, end, req_type, new_type); 338 * legacy 1MB region is treated as non-RAM (even when listed as RAM in
339 else if (is_range_ram < 0) 339 * the e820 tables). So we will track the memory attributes of this
340 return -EINVAL; 340 * legacy 1MB region using the linear memtype_list always.
341 */
342 if (end >= ISA_END_ADDRESS) {
343 is_range_ram = pagerange_is_ram(start, end);
344 if (is_range_ram == 1)
345 return reserve_ram_pages_type(start, end, req_type,
346 new_type);
347 else if (is_range_ram < 0)
348 return -EINVAL;
349 }
341 350
342 new = kmalloc(sizeof(struct memtype), GFP_KERNEL); 351 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
343 if (!new) 352 if (!new)
@@ -437,11 +446,19 @@ int free_memtype(u64 start, u64 end)
437 if (is_ISA_range(start, end - 1)) 446 if (is_ISA_range(start, end - 1))
438 return 0; 447 return 0;
439 448
440 is_range_ram = pagerange_is_ram(start, end); 449 /*
441 if (is_range_ram == 1) 450 * For legacy reasons, some parts of the physical address range in the
442 return free_ram_pages_type(start, end); 451 * legacy 1MB region is treated as non-RAM (even when listed as RAM in
443 else if (is_range_ram < 0) 452 * the e820 tables). So we will track the memory attributes of this
444 return -EINVAL; 453 * legacy 1MB region using the linear memtype_list always.
454 */
455 if (end >= ISA_END_ADDRESS) {
456 is_range_ram = pagerange_is_ram(start, end);
457 if (is_range_ram == 1)
458 return free_ram_pages_type(start, end);
459 else if (is_range_ram < 0)
460 return -EINVAL;
461 }
445 462
446 spin_lock(&memtype_lock); 463 spin_lock(&memtype_lock);
447 list_for_each_entry(entry, &memtype_list, nd) { 464 list_for_each_entry(entry, &memtype_list, nd) {
@@ -505,6 +522,35 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
505} 522}
506#endif /* CONFIG_STRICT_DEVMEM */ 523#endif /* CONFIG_STRICT_DEVMEM */
507 524
525/*
526 * Change the memory type for the physial address range in kernel identity
527 * mapping space if that range is a part of identity map.
528 */
529static int kernel_map_sync_memtype(u64 base, unsigned long size,
530 unsigned long flags)
531{
532 unsigned long id_sz;
533 int ret;
534
535 if (!pat_enabled || base >= __pa(high_memory))
536 return 0;
537
538 id_sz = (__pa(high_memory) < base + size) ?
539 __pa(high_memory) - base :
540 size;
541
542 ret = ioremap_change_attr((unsigned long)__va(base), id_sz, flags);
543 /*
544 * -EFAULT return means that the addr was not valid and did not have
545 * any identity mapping. That case is a success for
546 * kernel_map_sync_memtype.
547 */
548 if (ret == -EFAULT)
549 ret = 0;
550
551 return ret;
552}
553
508int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 554int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
509 unsigned long size, pgprot_t *vma_prot) 555 unsigned long size, pgprot_t *vma_prot)
510{ 556{
@@ -555,9 +601,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
555 if (retval < 0) 601 if (retval < 0)
556 return 0; 602 return 0;
557 603
558 if (((pfn < max_low_pfn_mapped) || 604 if (kernel_map_sync_memtype(offset, size, flags)) {
559 (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
560 ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
561 free_memtype(offset, offset + size); 605 free_memtype(offset, offset + size);
562 printk(KERN_INFO 606 printk(KERN_INFO
563 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n", 607 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
@@ -601,12 +645,13 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
601 * Reserved non RAM regions only and after successful reserve_memtype, 645 * Reserved non RAM regions only and after successful reserve_memtype,
602 * this func also keeps identity mapping (if any) in sync with this new prot. 646 * this func also keeps identity mapping (if any) in sync with this new prot.
603 */ 647 */
604static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot) 648static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
649 int strict_prot)
605{ 650{
606 int is_ram = 0; 651 int is_ram = 0;
607 int id_sz, ret; 652 int ret;
608 unsigned long flags; 653 unsigned long flags;
609 unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK); 654 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
610 655
611 is_ram = pagerange_is_ram(paddr, paddr + size); 656 is_ram = pagerange_is_ram(paddr, paddr + size);
612 657
@@ -625,26 +670,27 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot)
625 return ret; 670 return ret;
626 671
627 if (flags != want_flags) { 672 if (flags != want_flags) {
628 free_memtype(paddr, paddr + size); 673 if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) {
629 printk(KERN_ERR 674 free_memtype(paddr, paddr + size);
630 "%s:%d map pfn expected mapping type %s for %Lx-%Lx, got %s\n", 675 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
631 current->comm, current->pid, 676 " for %Lx-%Lx, got %s\n",
632 cattr_name(want_flags), 677 current->comm, current->pid,
633 (unsigned long long)paddr, 678 cattr_name(want_flags),
634 (unsigned long long)(paddr + size), 679 (unsigned long long)paddr,
635 cattr_name(flags)); 680 (unsigned long long)(paddr + size),
636 return -EINVAL; 681 cattr_name(flags));
682 return -EINVAL;
683 }
684 /*
685 * We allow returning different type than the one requested in
686 * non strict case.
687 */
688 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
689 (~_PAGE_CACHE_MASK)) |
690 flags);
637 } 691 }
638 692
639 /* Need to keep identity mapping in sync */ 693 if (kernel_map_sync_memtype(paddr, size, flags)) {
640 if (paddr >= __pa(high_memory))
641 return 0;
642
643 id_sz = (__pa(high_memory) < paddr + size) ?
644 __pa(high_memory) - paddr :
645 size;
646
647 if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
648 free_memtype(paddr, paddr + size); 694 free_memtype(paddr, paddr + size);
649 printk(KERN_ERR 695 printk(KERN_ERR
650 "%s:%d reserve_pfn_range ioremap_change_attr failed %s " 696 "%s:%d reserve_pfn_range ioremap_change_attr failed %s "
@@ -689,6 +735,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
689 unsigned long vma_start = vma->vm_start; 735 unsigned long vma_start = vma->vm_start;
690 unsigned long vma_end = vma->vm_end; 736 unsigned long vma_end = vma->vm_end;
691 unsigned long vma_size = vma_end - vma_start; 737 unsigned long vma_size = vma_end - vma_start;
738 pgprot_t pgprot;
692 739
693 if (!pat_enabled) 740 if (!pat_enabled)
694 return 0; 741 return 0;
@@ -702,7 +749,8 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
702 WARN_ON_ONCE(1); 749 WARN_ON_ONCE(1);
703 return -EINVAL; 750 return -EINVAL;
704 } 751 }
705 return reserve_pfn_range(paddr, vma_size, __pgprot(prot)); 752 pgprot = __pgprot(prot);
753 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
706 } 754 }
707 755
708 /* reserve entire vma page by page, using pfn and prot from pte */ 756 /* reserve entire vma page by page, using pfn and prot from pte */
@@ -710,7 +758,8 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
710 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) 758 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
711 continue; 759 continue;
712 760
713 retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot)); 761 pgprot = __pgprot(prot);
762 retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
714 if (retval) 763 if (retval)
715 goto cleanup_ret; 764 goto cleanup_ret;
716 } 765 }
@@ -741,7 +790,7 @@ cleanup_ret:
741 * Note that this function can be called with caller trying to map only a 790 * Note that this function can be called with caller trying to map only a
742 * subrange/page inside the vma. 791 * subrange/page inside the vma.
743 */ 792 */
744int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, 793int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
745 unsigned long pfn, unsigned long size) 794 unsigned long pfn, unsigned long size)
746{ 795{
747 int retval = 0; 796 int retval = 0;
@@ -758,14 +807,14 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
758 if (is_linear_pfn_mapping(vma)) { 807 if (is_linear_pfn_mapping(vma)) {
759 /* reserve the whole chunk starting from vm_pgoff */ 808 /* reserve the whole chunk starting from vm_pgoff */
760 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 809 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
761 return reserve_pfn_range(paddr, vma_size, prot); 810 return reserve_pfn_range(paddr, vma_size, prot, 0);
762 } 811 }
763 812
764 /* reserve page by page using pfn and size */ 813 /* reserve page by page using pfn and size */
765 base_paddr = (resource_size_t)pfn << PAGE_SHIFT; 814 base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
766 for (i = 0; i < size; i += PAGE_SIZE) { 815 for (i = 0; i < size; i += PAGE_SIZE) {
767 paddr = base_paddr + i; 816 paddr = base_paddr + i;
768 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot); 817 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
769 if (retval) 818 if (retval)
770 goto cleanup_ret; 819 goto cleanup_ret;
771 } 820 }
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 51c0a2fc14fe..09737c8af074 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -382,7 +382,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
382 if (!node_online(i)) 382 if (!node_online(i))
383 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 383 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
384 384
385 for (i = 0; i < NR_CPUS; i++) { 385 for (i = 0; i < nr_cpu_ids; i++) {
386 int node = early_cpu_to_node(i); 386 int node = early_cpu_to_node(i);
387 387
388 if (node == NUMA_NO_NODE) 388 if (node == NUMA_NO_NODE)