diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-09-07 02:19:51 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-07 02:19:51 -0400 |
commit | a1922ed661ab2c1637d0b10cde933bd9cd33d965 (patch) | |
tree | 0f1777542b385ebefd30b3586d830fd8ed6fda5b /arch/x86/mm | |
parent | 75e33751ca8bbb72dd6f1a74d2810ddc8cbe4bdf (diff) | |
parent | d28daf923ac5e4a0d7cecebae56f3e339189366b (diff) |
Merge branch 'tracing/core' into tracing/hw-breakpoints
Conflicts:
arch/Kconfig
kernel/trace/trace.h
Merge reason: resolve the conflicts, plus adopt to the new
ring-buffer APIs.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/fault.c | 16 | ||||
-rw-r--r-- | arch/x86/mm/gup.c | 67 | ||||
-rw-r--r-- | arch/x86/mm/highmem_32.c | 1 | ||||
-rw-r--r-- | arch/x86/mm/init.c | 18 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 13 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 104 | ||||
-rw-r--r-- | arch/x86/mm/pat.c | 3 | ||||
-rw-r--r-- | arch/x86/mm/pgtable.c | 7 | ||||
-rw-r--r-- | arch/x86/mm/srat_64.c | 6 | ||||
-rw-r--r-- | arch/x86/mm/tlb.c | 21 |
10 files changed, 171 insertions, 85 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index baa0e86adfbc..bfae139182ff 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -426,10 +426,11 @@ static noinline int vmalloc_fault(unsigned long address) | |||
426 | } | 426 | } |
427 | 427 | ||
428 | static const char errata93_warning[] = | 428 | static const char errata93_warning[] = |
429 | KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n" | 429 | KERN_ERR |
430 | KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n" | 430 | "******* Your BIOS seems to not contain a fix for K8 errata #93\n" |
431 | KERN_ERR "******* Please consider a BIOS update.\n" | 431 | "******* Working around it, but it may cause SEGVs or burn power.\n" |
432 | KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n"; | 432 | "******* Please consider a BIOS update.\n" |
433 | "******* Disabling USB legacy in the BIOS may also help.\n"; | ||
433 | 434 | ||
434 | /* | 435 | /* |
435 | * No vm86 mode in 64-bit mode: | 436 | * No vm86 mode in 64-bit mode: |
@@ -696,7 +697,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code, | |||
696 | if (!printk_ratelimit()) | 697 | if (!printk_ratelimit()) |
697 | return; | 698 | return; |
698 | 699 | ||
699 | printk(KERN_CONT "%s%s[%d]: segfault at %lx ip %p sp %p error %lx", | 700 | printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx", |
700 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, | 701 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, |
701 | tsk->comm, task_pid_nr(tsk), address, | 702 | tsk->comm, task_pid_nr(tsk), address, |
702 | (void *)regs->ip, (void *)regs->sp, error_code); | 703 | (void *)regs->ip, (void *)regs->sp, error_code); |
@@ -952,8 +953,6 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
952 | tsk = current; | 953 | tsk = current; |
953 | mm = tsk->mm; | 954 | mm = tsk->mm; |
954 | 955 | ||
955 | prefetchw(&mm->mmap_sem); | ||
956 | |||
957 | /* Get the faulting address: */ | 956 | /* Get the faulting address: */ |
958 | address = read_cr2(); | 957 | address = read_cr2(); |
959 | 958 | ||
@@ -963,6 +962,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
963 | */ | 962 | */ |
964 | if (kmemcheck_active(regs)) | 963 | if (kmemcheck_active(regs)) |
965 | kmemcheck_hide(regs); | 964 | kmemcheck_hide(regs); |
965 | prefetchw(&mm->mmap_sem); | ||
966 | 966 | ||
967 | if (unlikely(kmmio_fault(regs, address))) | 967 | if (unlikely(kmmio_fault(regs, address))) |
968 | return; | 968 | return; |
@@ -1114,7 +1114,7 @@ good_area: | |||
1114 | * make sure we exit gracefully rather than endlessly redo | 1114 | * make sure we exit gracefully rather than endlessly redo |
1115 | * the fault: | 1115 | * the fault: |
1116 | */ | 1116 | */ |
1117 | fault = handle_mm_fault(mm, vma, address, write); | 1117 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
1118 | 1118 | ||
1119 | if (unlikely(fault & VM_FAULT_ERROR)) { | 1119 | if (unlikely(fault & VM_FAULT_ERROR)) { |
1120 | mm_fault_error(regs, error_code, address, fault); | 1120 | mm_fault_error(regs, error_code, address, fault); |
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 6340cef6798a..71da1bca13cb 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c | |||
@@ -14,7 +14,7 @@ | |||
14 | static inline pte_t gup_get_pte(pte_t *ptep) | 14 | static inline pte_t gup_get_pte(pte_t *ptep) |
15 | { | 15 | { |
16 | #ifndef CONFIG_X86_PAE | 16 | #ifndef CONFIG_X86_PAE |
17 | return *ptep; | 17 | return ACCESS_ONCE(*ptep); |
18 | #else | 18 | #else |
19 | /* | 19 | /* |
20 | * With get_user_pages_fast, we walk down the pagetables without taking | 20 | * With get_user_pages_fast, we walk down the pagetables without taking |
@@ -219,6 +219,62 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, | |||
219 | return 1; | 219 | return 1; |
220 | } | 220 | } |
221 | 221 | ||
222 | /* | ||
223 | * Like get_user_pages_fast() except its IRQ-safe in that it won't fall | ||
224 | * back to the regular GUP. | ||
225 | */ | ||
226 | int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | ||
227 | struct page **pages) | ||
228 | { | ||
229 | struct mm_struct *mm = current->mm; | ||
230 | unsigned long addr, len, end; | ||
231 | unsigned long next; | ||
232 | unsigned long flags; | ||
233 | pgd_t *pgdp; | ||
234 | int nr = 0; | ||
235 | |||
236 | start &= PAGE_MASK; | ||
237 | addr = start; | ||
238 | len = (unsigned long) nr_pages << PAGE_SHIFT; | ||
239 | end = start + len; | ||
240 | if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, | ||
241 | (void __user *)start, len))) | ||
242 | return 0; | ||
243 | |||
244 | /* | ||
245 | * XXX: batch / limit 'nr', to avoid large irq off latency | ||
246 | * needs some instrumenting to determine the common sizes used by | ||
247 | * important workloads (eg. DB2), and whether limiting the batch size | ||
248 | * will decrease performance. | ||
249 | * | ||
250 | * It seems like we're in the clear for the moment. Direct-IO is | ||
251 | * the main guy that batches up lots of get_user_pages, and even | ||
252 | * they are limited to 64-at-a-time which is not so many. | ||
253 | */ | ||
254 | /* | ||
255 | * This doesn't prevent pagetable teardown, but does prevent | ||
256 | * the pagetables and pages from being freed on x86. | ||
257 | * | ||
258 | * So long as we atomically load page table pointers versus teardown | ||
259 | * (which we do on x86, with the above PAE exception), we can follow the | ||
260 | * address down to the the page and take a ref on it. | ||
261 | */ | ||
262 | local_irq_save(flags); | ||
263 | pgdp = pgd_offset(mm, addr); | ||
264 | do { | ||
265 | pgd_t pgd = *pgdp; | ||
266 | |||
267 | next = pgd_addr_end(addr, end); | ||
268 | if (pgd_none(pgd)) | ||
269 | break; | ||
270 | if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) | ||
271 | break; | ||
272 | } while (pgdp++, addr = next, addr != end); | ||
273 | local_irq_restore(flags); | ||
274 | |||
275 | return nr; | ||
276 | } | ||
277 | |||
222 | /** | 278 | /** |
223 | * get_user_pages_fast() - pin user pages in memory | 279 | * get_user_pages_fast() - pin user pages in memory |
224 | * @start: starting user address | 280 | * @start: starting user address |
@@ -247,11 +303,16 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
247 | start &= PAGE_MASK; | 303 | start &= PAGE_MASK; |
248 | addr = start; | 304 | addr = start; |
249 | len = (unsigned long) nr_pages << PAGE_SHIFT; | 305 | len = (unsigned long) nr_pages << PAGE_SHIFT; |
306 | |||
250 | end = start + len; | 307 | end = start + len; |
251 | if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, | 308 | if (end < start) |
252 | (void __user *)start, len))) | ||
253 | goto slow_irqon; | 309 | goto slow_irqon; |
254 | 310 | ||
311 | #ifdef CONFIG_X86_64 | ||
312 | if (end >> __VIRTUAL_MASK_SHIFT) | ||
313 | goto slow_irqon; | ||
314 | #endif | ||
315 | |||
255 | /* | 316 | /* |
256 | * XXX: batch / limit 'nr', to avoid large irq off latency | 317 | * XXX: batch / limit 'nr', to avoid large irq off latency |
257 | * needs some instrumenting to determine the common sizes used by | 318 | * needs some instrumenting to determine the common sizes used by |
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 58f621e81919..2112ed55e7ea 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c | |||
@@ -103,6 +103,7 @@ EXPORT_SYMBOL(kmap); | |||
103 | EXPORT_SYMBOL(kunmap); | 103 | EXPORT_SYMBOL(kunmap); |
104 | EXPORT_SYMBOL(kmap_atomic); | 104 | EXPORT_SYMBOL(kmap_atomic); |
105 | EXPORT_SYMBOL(kunmap_atomic); | 105 | EXPORT_SYMBOL(kunmap_atomic); |
106 | EXPORT_SYMBOL(kmap_atomic_prot); | ||
106 | 107 | ||
107 | void __init set_highmem_pages_init(void) | 108 | void __init set_highmem_pages_init(void) |
108 | { | 109 | { |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index f53b57e4086f..0607119cef94 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <asm/system.h> | 12 | #include <asm/system.h> |
13 | #include <asm/tlbflush.h> | 13 | #include <asm/tlbflush.h> |
14 | #include <asm/tlb.h> | 14 | #include <asm/tlb.h> |
15 | #include <asm/proto.h> | ||
15 | 16 | ||
16 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 17 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
17 | 18 | ||
@@ -177,20 +178,6 @@ static int __meminit save_mr(struct map_range *mr, int nr_range, | |||
177 | return nr_range; | 178 | return nr_range; |
178 | } | 179 | } |
179 | 180 | ||
180 | #ifdef CONFIG_X86_64 | ||
181 | static void __init init_gbpages(void) | ||
182 | { | ||
183 | if (direct_gbpages && cpu_has_gbpages) | ||
184 | printk(KERN_INFO "Using GB pages for direct mapping\n"); | ||
185 | else | ||
186 | direct_gbpages = 0; | ||
187 | } | ||
188 | #else | ||
189 | static inline void init_gbpages(void) | ||
190 | { | ||
191 | } | ||
192 | #endif | ||
193 | |||
194 | /* | 181 | /* |
195 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. | 182 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. |
196 | * This runs before bootmem is initialized and gets pages directly from | 183 | * This runs before bootmem is initialized and gets pages directly from |
@@ -210,9 +197,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
210 | 197 | ||
211 | printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end); | 198 | printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end); |
212 | 199 | ||
213 | if (!after_bootmem) | ||
214 | init_gbpages(); | ||
215 | |||
216 | #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) | 200 | #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) |
217 | /* | 201 | /* |
218 | * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages. | 202 | * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages. |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 9c543290a813..ea56b8cbb6a6 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -527,7 +527,7 @@ phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end, | |||
527 | return phys_pud_init(pud, addr, end, page_size_mask); | 527 | return phys_pud_init(pud, addr, end, page_size_mask); |
528 | } | 528 | } |
529 | 529 | ||
530 | unsigned long __init | 530 | unsigned long __meminit |
531 | kernel_physical_mapping_init(unsigned long start, | 531 | kernel_physical_mapping_init(unsigned long start, |
532 | unsigned long end, | 532 | unsigned long end, |
533 | unsigned long page_size_mask) | 533 | unsigned long page_size_mask) |
@@ -598,6 +598,15 @@ void __init paging_init(void) | |||
598 | 598 | ||
599 | sparse_memory_present_with_active_regions(MAX_NUMNODES); | 599 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
600 | sparse_init(); | 600 | sparse_init(); |
601 | |||
602 | /* | ||
603 | * clear the default setting with node 0 | ||
604 | * note: don't use nodes_clear here, that is really clearing when | ||
605 | * numa support is not compiled in, and later node_set_state | ||
606 | * will not set it back. | ||
607 | */ | ||
608 | node_clear_state(0, N_NORMAL_MEMORY); | ||
609 | |||
601 | free_area_init_nodes(max_zone_pfns); | 610 | free_area_init_nodes(max_zone_pfns); |
602 | } | 611 | } |
603 | 612 | ||
@@ -787,7 +796,7 @@ int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, | |||
787 | return ret; | 796 | return ret; |
788 | 797 | ||
789 | #else | 798 | #else |
790 | reserve_bootmem(phys, len, BOOTMEM_DEFAULT); | 799 | reserve_bootmem(phys, len, flags); |
791 | #endif | 800 | #endif |
792 | 801 | ||
793 | if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { | 802 | if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 3cfe9ced8a4c..7e600c1962db 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
12 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
13 | #include <linux/debugfs.h> | 13 | #include <linux/debugfs.h> |
14 | #include <linux/pfn.h> | ||
14 | 15 | ||
15 | #include <asm/e820.h> | 16 | #include <asm/e820.h> |
16 | #include <asm/processor.h> | 17 | #include <asm/processor.h> |
@@ -590,9 +591,12 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) | |||
590 | unsigned int level; | 591 | unsigned int level; |
591 | pte_t *kpte, old_pte; | 592 | pte_t *kpte, old_pte; |
592 | 593 | ||
593 | if (cpa->flags & CPA_PAGES_ARRAY) | 594 | if (cpa->flags & CPA_PAGES_ARRAY) { |
594 | address = (unsigned long)page_address(cpa->pages[cpa->curpage]); | 595 | struct page *page = cpa->pages[cpa->curpage]; |
595 | else if (cpa->flags & CPA_ARRAY) | 596 | if (unlikely(PageHighMem(page))) |
597 | return 0; | ||
598 | address = (unsigned long)page_address(page); | ||
599 | } else if (cpa->flags & CPA_ARRAY) | ||
596 | address = cpa->vaddr[cpa->curpage]; | 600 | address = cpa->vaddr[cpa->curpage]; |
597 | else | 601 | else |
598 | address = *cpa->vaddr; | 602 | address = *cpa->vaddr; |
@@ -681,8 +685,9 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); | |||
681 | static int cpa_process_alias(struct cpa_data *cpa) | 685 | static int cpa_process_alias(struct cpa_data *cpa) |
682 | { | 686 | { |
683 | struct cpa_data alias_cpa; | 687 | struct cpa_data alias_cpa; |
684 | int ret = 0; | 688 | unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT); |
685 | unsigned long temp_cpa_vaddr, vaddr; | 689 | unsigned long vaddr, remapped; |
690 | int ret; | ||
686 | 691 | ||
687 | if (cpa->pfn >= max_pfn_mapped) | 692 | if (cpa->pfn >= max_pfn_mapped) |
688 | return 0; | 693 | return 0; |
@@ -695,9 +700,12 @@ static int cpa_process_alias(struct cpa_data *cpa) | |||
695 | * No need to redo, when the primary call touched the direct | 700 | * No need to redo, when the primary call touched the direct |
696 | * mapping already: | 701 | * mapping already: |
697 | */ | 702 | */ |
698 | if (cpa->flags & CPA_PAGES_ARRAY) | 703 | if (cpa->flags & CPA_PAGES_ARRAY) { |
699 | vaddr = (unsigned long)page_address(cpa->pages[cpa->curpage]); | 704 | struct page *page = cpa->pages[cpa->curpage]; |
700 | else if (cpa->flags & CPA_ARRAY) | 705 | if (unlikely(PageHighMem(page))) |
706 | return 0; | ||
707 | vaddr = (unsigned long)page_address(page); | ||
708 | } else if (cpa->flags & CPA_ARRAY) | ||
701 | vaddr = cpa->vaddr[cpa->curpage]; | 709 | vaddr = cpa->vaddr[cpa->curpage]; |
702 | else | 710 | else |
703 | vaddr = *cpa->vaddr; | 711 | vaddr = *cpa->vaddr; |
@@ -706,42 +714,55 @@ static int cpa_process_alias(struct cpa_data *cpa) | |||
706 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { | 714 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { |
707 | 715 | ||
708 | alias_cpa = *cpa; | 716 | alias_cpa = *cpa; |
709 | temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); | 717 | alias_cpa.vaddr = &laddr; |
710 | alias_cpa.vaddr = &temp_cpa_vaddr; | ||
711 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); | 718 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); |
712 | 719 | ||
713 | |||
714 | ret = __change_page_attr_set_clr(&alias_cpa, 0); | 720 | ret = __change_page_attr_set_clr(&alias_cpa, 0); |
721 | if (ret) | ||
722 | return ret; | ||
715 | } | 723 | } |
716 | 724 | ||
717 | #ifdef CONFIG_X86_64 | 725 | #ifdef CONFIG_X86_64 |
718 | if (ret) | ||
719 | return ret; | ||
720 | /* | ||
721 | * No need to redo, when the primary call touched the high | ||
722 | * mapping already: | ||
723 | */ | ||
724 | if (within(vaddr, (unsigned long) _text, _brk_end)) | ||
725 | return 0; | ||
726 | |||
727 | /* | 726 | /* |
728 | * If the physical address is inside the kernel map, we need | 727 | * If the primary call didn't touch the high mapping already |
728 | * and the physical address is inside the kernel map, we need | ||
729 | * to touch the high mapped kernel as well: | 729 | * to touch the high mapped kernel as well: |
730 | */ | 730 | */ |
731 | if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) | 731 | if (!within(vaddr, (unsigned long)_text, _brk_end) && |
732 | return 0; | 732 | within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) { |
733 | unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + | ||
734 | __START_KERNEL_map - phys_base; | ||
735 | alias_cpa = *cpa; | ||
736 | alias_cpa.vaddr = &temp_cpa_vaddr; | ||
737 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); | ||
733 | 738 | ||
734 | alias_cpa = *cpa; | 739 | /* |
735 | temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base; | 740 | * The high mapping range is imprecise, so ignore the |
736 | alias_cpa.vaddr = &temp_cpa_vaddr; | 741 | * return value. |
737 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); | 742 | */ |
743 | __change_page_attr_set_clr(&alias_cpa, 0); | ||
744 | } | ||
745 | #endif | ||
738 | 746 | ||
739 | /* | 747 | /* |
740 | * The high mapping range is imprecise, so ignore the return value. | 748 | * If the PMD page was partially used for per-cpu remapping, |
749 | * the recycled area needs to be split and modified. Because | ||
750 | * the area is always proper subset of a PMD page | ||
751 | * cpa->numpages is guaranteed to be 1 for these areas, so | ||
752 | * there's no need to loop over and check for further remaps. | ||
741 | */ | 753 | */ |
742 | __change_page_attr_set_clr(&alias_cpa, 0); | 754 | remapped = (unsigned long)pcpu_lpage_remapped((void *)laddr); |
743 | #endif | 755 | if (remapped) { |
744 | return ret; | 756 | WARN_ON(cpa->numpages > 1); |
757 | alias_cpa = *cpa; | ||
758 | alias_cpa.vaddr = &remapped; | ||
759 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); | ||
760 | ret = __change_page_attr_set_clr(&alias_cpa, 0); | ||
761 | if (ret) | ||
762 | return ret; | ||
763 | } | ||
764 | |||
765 | return 0; | ||
745 | } | 766 | } |
746 | 767 | ||
747 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) | 768 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) |
@@ -982,12 +1003,15 @@ EXPORT_SYMBOL(set_memory_array_uc); | |||
982 | int _set_memory_wc(unsigned long addr, int numpages) | 1003 | int _set_memory_wc(unsigned long addr, int numpages) |
983 | { | 1004 | { |
984 | int ret; | 1005 | int ret; |
1006 | unsigned long addr_copy = addr; | ||
1007 | |||
985 | ret = change_page_attr_set(&addr, numpages, | 1008 | ret = change_page_attr_set(&addr, numpages, |
986 | __pgprot(_PAGE_CACHE_UC_MINUS), 0); | 1009 | __pgprot(_PAGE_CACHE_UC_MINUS), 0); |
987 | |||
988 | if (!ret) { | 1010 | if (!ret) { |
989 | ret = change_page_attr_set(&addr, numpages, | 1011 | ret = change_page_attr_set_clr(&addr_copy, numpages, |
990 | __pgprot(_PAGE_CACHE_WC), 0); | 1012 | __pgprot(_PAGE_CACHE_WC), |
1013 | __pgprot(_PAGE_CACHE_MASK), | ||
1014 | 0, 0, NULL); | ||
991 | } | 1015 | } |
992 | return ret; | 1016 | return ret; |
993 | } | 1017 | } |
@@ -1104,7 +1128,9 @@ int set_pages_array_uc(struct page **pages, int addrinarray) | |||
1104 | int free_idx; | 1128 | int free_idx; |
1105 | 1129 | ||
1106 | for (i = 0; i < addrinarray; i++) { | 1130 | for (i = 0; i < addrinarray; i++) { |
1107 | start = (unsigned long)page_address(pages[i]); | 1131 | if (PageHighMem(pages[i])) |
1132 | continue; | ||
1133 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; | ||
1108 | end = start + PAGE_SIZE; | 1134 | end = start + PAGE_SIZE; |
1109 | if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL)) | 1135 | if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL)) |
1110 | goto err_out; | 1136 | goto err_out; |
@@ -1117,7 +1143,9 @@ int set_pages_array_uc(struct page **pages, int addrinarray) | |||
1117 | err_out: | 1143 | err_out: |
1118 | free_idx = i; | 1144 | free_idx = i; |
1119 | for (i = 0; i < free_idx; i++) { | 1145 | for (i = 0; i < free_idx; i++) { |
1120 | start = (unsigned long)page_address(pages[i]); | 1146 | if (PageHighMem(pages[i])) |
1147 | continue; | ||
1148 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; | ||
1121 | end = start + PAGE_SIZE; | 1149 | end = start + PAGE_SIZE; |
1122 | free_memtype(start, end); | 1150 | free_memtype(start, end); |
1123 | } | 1151 | } |
@@ -1146,7 +1174,9 @@ int set_pages_array_wb(struct page **pages, int addrinarray) | |||
1146 | return retval; | 1174 | return retval; |
1147 | 1175 | ||
1148 | for (i = 0; i < addrinarray; i++) { | 1176 | for (i = 0; i < addrinarray; i++) { |
1149 | start = (unsigned long)page_address(pages[i]); | 1177 | if (PageHighMem(pages[i])) |
1178 | continue; | ||
1179 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; | ||
1150 | end = start + PAGE_SIZE; | 1180 | end = start + PAGE_SIZE; |
1151 | free_memtype(start, end); | 1181 | free_memtype(start, end); |
1152 | } | 1182 | } |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index e6718bb28065..352aa9e927e2 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -623,7 +623,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, | |||
623 | return ret; | 623 | return ret; |
624 | 624 | ||
625 | if (flags != want_flags) { | 625 | if (flags != want_flags) { |
626 | if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) { | 626 | if (strict_prot || |
627 | !is_new_memtype_allowed(paddr, size, want_flags, flags)) { | ||
627 | free_memtype(paddr, paddr + size); | 628 | free_memtype(paddr, paddr + size); |
628 | printk(KERN_ERR "%s:%d map pfn expected mapping type %s" | 629 | printk(KERN_ERR "%s:%d map pfn expected mapping type %s" |
629 | " for %Lx-%Lx, got %s\n", | 630 | " for %Lx-%Lx, got %s\n", |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 8e43bdd45456..ed34f5e35999 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -25,7 +25,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) | |||
25 | return pte; | 25 | return pte; |
26 | } | 26 | } |
27 | 27 | ||
28 | void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) | 28 | void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) |
29 | { | 29 | { |
30 | pgtable_page_dtor(pte); | 30 | pgtable_page_dtor(pte); |
31 | paravirt_release_pte(page_to_pfn(pte)); | 31 | paravirt_release_pte(page_to_pfn(pte)); |
@@ -33,14 +33,14 @@ void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) | |||
33 | } | 33 | } |
34 | 34 | ||
35 | #if PAGETABLE_LEVELS > 2 | 35 | #if PAGETABLE_LEVELS > 2 |
36 | void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) | 36 | void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) |
37 | { | 37 | { |
38 | paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); | 38 | paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); |
39 | tlb_remove_page(tlb, virt_to_page(pmd)); | 39 | tlb_remove_page(tlb, virt_to_page(pmd)); |
40 | } | 40 | } |
41 | 41 | ||
42 | #if PAGETABLE_LEVELS > 3 | 42 | #if PAGETABLE_LEVELS > 3 |
43 | void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) | 43 | void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) |
44 | { | 44 | { |
45 | paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); | 45 | paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); |
46 | tlb_remove_page(tlb, virt_to_page(pud)); | 46 | tlb_remove_page(tlb, virt_to_page(pud)); |
@@ -329,7 +329,6 @@ void __init reserve_top_address(unsigned long reserve) | |||
329 | printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", | 329 | printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", |
330 | (int)-reserve); | 330 | (int)-reserve); |
331 | __FIXADDR_TOP = -reserve - PAGE_SIZE; | 331 | __FIXADDR_TOP = -reserve - PAGE_SIZE; |
332 | __VMALLOC_RESERVE += reserve; | ||
333 | #endif | 332 | #endif |
334 | } | 333 | } |
335 | 334 | ||
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 2dfcbf9df2ae..dbb5381f7b3b 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
@@ -79,8 +79,10 @@ static __init void bad_srat(void) | |||
79 | acpi_numa = -1; | 79 | acpi_numa = -1; |
80 | for (i = 0; i < MAX_LOCAL_APIC; i++) | 80 | for (i = 0; i < MAX_LOCAL_APIC; i++) |
81 | apicid_to_node[i] = NUMA_NO_NODE; | 81 | apicid_to_node[i] = NUMA_NO_NODE; |
82 | for (i = 0; i < MAX_NUMNODES; i++) | 82 | for (i = 0; i < MAX_NUMNODES; i++) { |
83 | nodes_add[i].start = nodes[i].end = 0; | 83 | nodes[i].start = nodes[i].end = 0; |
84 | nodes_add[i].start = nodes_add[i].end = 0; | ||
85 | } | ||
84 | remove_all_active_ranges(); | 86 | remove_all_active_ranges(); |
85 | } | 87 | } |
86 | 88 | ||
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 821e97017e95..c814e144a3f0 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -183,18 +183,17 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask, | |||
183 | 183 | ||
184 | f->flush_mm = mm; | 184 | f->flush_mm = mm; |
185 | f->flush_va = va; | 185 | f->flush_va = va; |
186 | cpumask_andnot(to_cpumask(f->flush_cpumask), | 186 | if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) { |
187 | cpumask, cpumask_of(smp_processor_id())); | 187 | /* |
188 | 188 | * We have to send the IPI only to | |
189 | /* | 189 | * CPUs affected. |
190 | * We have to send the IPI only to | 190 | */ |
191 | * CPUs affected. | 191 | apic->send_IPI_mask(to_cpumask(f->flush_cpumask), |
192 | */ | 192 | INVALIDATE_TLB_VECTOR_START + sender); |
193 | apic->send_IPI_mask(to_cpumask(f->flush_cpumask), | ||
194 | INVALIDATE_TLB_VECTOR_START + sender); | ||
195 | 193 | ||
196 | while (!cpumask_empty(to_cpumask(f->flush_cpumask))) | 194 | while (!cpumask_empty(to_cpumask(f->flush_cpumask))) |
197 | cpu_relax(); | 195 | cpu_relax(); |
196 | } | ||
198 | 197 | ||
199 | f->flush_mm = NULL; | 198 | f->flush_mm = NULL; |
200 | f->flush_va = 0; | 199 | f->flush_va = 0; |