aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/fault.c45
-rw-r--r--arch/x86/mm/highmem_32.c1
-rw-r--r--arch/x86/mm/ioremap.c6
-rw-r--r--arch/x86/mm/pageattr.c2
4 files changed, 26 insertions, 28 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 3f2b8962cbd..31e8730fa24 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -640,24 +640,23 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
640 } 640 }
641 641
642 642
643#ifdef CONFIG_X86_32
644 /* It's safe to allow irq's after cr2 has been saved and the vmalloc
645 fault has been handled. */
646 if (regs->flags & (X86_EFLAGS_IF | X86_VM_MASK))
647 local_irq_enable();
648
649 /* 643 /*
650 * If we're in an interrupt, have no user context or are running in an 644 * It's safe to allow irq's after cr2 has been saved and the
651 * atomic region then we must not take the fault. 645 * vmalloc fault has been handled.
646 *
647 * User-mode registers count as a user access even for any
648 * potential system fault or CPU buglet.
652 */ 649 */
653 if (in_atomic() || !mm) 650 if (user_mode_vm(regs)) {
654 goto bad_area_nosemaphore; 651 local_irq_enable();
655#else /* CONFIG_X86_64 */ 652 error_code |= PF_USER;
656 if (likely(regs->flags & X86_EFLAGS_IF)) 653 } else if (regs->flags & X86_EFLAGS_IF)
657 local_irq_enable(); 654 local_irq_enable();
658 655
656#ifdef CONFIG_X86_64
659 if (unlikely(error_code & PF_RSVD)) 657 if (unlikely(error_code & PF_RSVD))
660 pgtable_bad(address, regs, error_code); 658 pgtable_bad(address, regs, error_code);
659#endif
661 660
662 /* 661 /*
663 * If we're in an interrupt, have no user context or are running in an 662 * If we're in an interrupt, have no user context or are running in an
@@ -666,15 +665,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
666 if (unlikely(in_atomic() || !mm)) 665 if (unlikely(in_atomic() || !mm))
667 goto bad_area_nosemaphore; 666 goto bad_area_nosemaphore;
668 667
669 /*
670 * User-mode registers count as a user access even for any
671 * potential system fault or CPU buglet.
672 */
673 if (user_mode_vm(regs))
674 error_code |= PF_USER;
675again: 668again:
676#endif 669 /*
677 /* When running in the kernel we expect faults to occur only to 670 * When running in the kernel we expect faults to occur only to
678 * addresses in user space. All other faults represent errors in the 671 * addresses in user space. All other faults represent errors in the
679 * kernel and should generate an OOPS. Unfortunately, in the case of an 672 * kernel and should generate an OOPS. Unfortunately, in the case of an
680 * erroneous fault occurring in a code path which already holds mmap_sem 673 * erroneous fault occurring in a code path which already holds mmap_sem
@@ -737,9 +730,6 @@ good_area:
737 goto bad_area; 730 goto bad_area;
738 } 731 }
739 732
740#ifdef CONFIG_X86_32
741survive:
742#endif
743 /* 733 /*
744 * If for any reason at all we couldn't handle the fault, 734 * If for any reason at all we couldn't handle the fault,
745 * make sure we exit gracefully rather than endlessly redo 735 * make sure we exit gracefully rather than endlessly redo
@@ -874,12 +864,11 @@ out_of_memory:
874 up_read(&mm->mmap_sem); 864 up_read(&mm->mmap_sem);
875 if (is_global_init(tsk)) { 865 if (is_global_init(tsk)) {
876 yield(); 866 yield();
877#ifdef CONFIG_X86_32 867 /*
878 down_read(&mm->mmap_sem); 868 * Re-lookup the vma - in theory the vma tree might
879 goto survive; 869 * have changed:
880#else 870 */
881 goto again; 871 goto again;
882#endif
883 } 872 }
884 873
885 printk("VM: killing process %s\n", tsk->comm); 874 printk("VM: killing process %s\n", tsk->comm);
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 165c871ba9a..bcc079c282d 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -137,6 +137,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
137 137
138 return (void*) vaddr; 138 return (void*) vaddr;
139} 139}
140EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
140 141
141struct page *kmap_atomic_to_page(void *ptr) 142struct page *kmap_atomic_to_page(void *ptr)
142{ 143{
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index e4c43ec71b2..ae71e11eb3e 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -220,6 +220,12 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
220 return (__force void __iomem *)phys_to_virt(phys_addr); 220 return (__force void __iomem *)phys_to_virt(phys_addr);
221 221
222 /* 222 /*
223 * Check if the request spans more than any BAR in the iomem resource
224 * tree.
225 */
226 WARN_ON(iomem_map_sanity_check(phys_addr, size));
227
228 /*
223 * Don't allow anybody to remap normal RAM that we're using.. 229 * Don't allow anybody to remap normal RAM that we're using..
224 */ 230 */
225 for (pfn = phys_addr >> PAGE_SHIFT; 231 for (pfn = phys_addr >> PAGE_SHIFT;
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index a9ec89c3fbc..407d8784f66 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -792,6 +792,8 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
792 /* Must avoid aliasing mappings in the highmem code */ 792 /* Must avoid aliasing mappings in the highmem code */
793 kmap_flush_unused(); 793 kmap_flush_unused();
794 794
795 vm_unmap_aliases();
796
795 cpa.vaddr = addr; 797 cpa.vaddr = addr;
796 cpa.numpages = numpages; 798 cpa.numpages = numpages;
797 cpa.mask_set = mask_set; 799 cpa.mask_set = mask_set;