diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-12 18:44:27 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-12 18:44:27 -0400 |
commit | ac4de9543aca59f2b763746647577302fbedd57e (patch) | |
tree | 40407750569ee030de56233c41c9a97f7e89cf67 /arch/x86 | |
parent | 26935fb06ee88f1188789807687c03041f3c70d9 (diff) | |
parent | de32a8177f64bc62e1b19c685dd391af664ab13f (diff) |
Merge branch 'akpm' (patches from Andrew Morton)
Merge more patches from Andrew Morton:
"The rest of MM. Plus one misc cleanup"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (35 commits)
mm/Kconfig: add MMU dependency for MIGRATION.
kernel: replace strict_strto*() with kstrto*()
mm, thp: count thp_fault_fallback anytime thp fault fails
thp: consolidate code between handle_mm_fault() and do_huge_pmd_anonymous_page()
thp: do_huge_pmd_anonymous_page() cleanup
thp: move maybe_pmd_mkwrite() out of mk_huge_pmd()
mm: cleanup add_to_page_cache_locked()
thp: account anon transparent huge pages into NR_ANON_PAGES
truncate: drop 'oldsize' truncate_pagecache() parameter
mm: make lru_add_drain_all() selective
memcg: document cgroup dirty/writeback memory statistics
memcg: add per cgroup writeback pages accounting
memcg: check for proper lock held in mem_cgroup_update_page_stat
memcg: remove MEMCG_NR_FILE_MAPPED
memcg: reduce function dereference
memcg: avoid overflow caused by PAGE_ALIGN
memcg: rename RESOURCE_MAX to RES_COUNTER_MAX
memcg: correct RESOURCE_MAX to ULLONG_MAX
mm: memcg: do not trap chargers with full callstack on OOM
mm: memcg: rework and document OOM waiting and wakeup
...
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/mm/fault.c | 43 |
1 files changed, 22 insertions, 21 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 654be4ae3047..3aaeffcfd67a 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -842,23 +842,15 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, | |||
842 | force_sig_info_fault(SIGBUS, code, address, tsk, fault); | 842 | force_sig_info_fault(SIGBUS, code, address, tsk, fault); |
843 | } | 843 | } |
844 | 844 | ||
845 | static noinline int | 845 | static noinline void |
846 | mm_fault_error(struct pt_regs *regs, unsigned long error_code, | 846 | mm_fault_error(struct pt_regs *regs, unsigned long error_code, |
847 | unsigned long address, unsigned int fault) | 847 | unsigned long address, unsigned int fault) |
848 | { | 848 | { |
849 | /* | 849 | if (fatal_signal_pending(current) && !(error_code & PF_USER)) { |
850 | * Pagefault was interrupted by SIGKILL. We have no reason to | 850 | up_read(¤t->mm->mmap_sem); |
851 | * continue pagefault. | 851 | no_context(regs, error_code, address, 0, 0); |
852 | */ | 852 | return; |
853 | if (fatal_signal_pending(current)) { | ||
854 | if (!(fault & VM_FAULT_RETRY)) | ||
855 | up_read(¤t->mm->mmap_sem); | ||
856 | if (!(error_code & PF_USER)) | ||
857 | no_context(regs, error_code, address, 0, 0); | ||
858 | return 1; | ||
859 | } | 853 | } |
860 | if (!(fault & VM_FAULT_ERROR)) | ||
861 | return 0; | ||
862 | 854 | ||
863 | if (fault & VM_FAULT_OOM) { | 855 | if (fault & VM_FAULT_OOM) { |
864 | /* Kernel mode? Handle exceptions or die: */ | 856 | /* Kernel mode? Handle exceptions or die: */ |
@@ -866,7 +858,7 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, | |||
866 | up_read(¤t->mm->mmap_sem); | 858 | up_read(¤t->mm->mmap_sem); |
867 | no_context(regs, error_code, address, | 859 | no_context(regs, error_code, address, |
868 | SIGSEGV, SEGV_MAPERR); | 860 | SIGSEGV, SEGV_MAPERR); |
869 | return 1; | 861 | return; |
870 | } | 862 | } |
871 | 863 | ||
872 | up_read(¤t->mm->mmap_sem); | 864 | up_read(¤t->mm->mmap_sem); |
@@ -884,7 +876,6 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, | |||
884 | else | 876 | else |
885 | BUG(); | 877 | BUG(); |
886 | } | 878 | } |
887 | return 1; | ||
888 | } | 879 | } |
889 | 880 | ||
890 | static int spurious_fault_check(unsigned long error_code, pte_t *pte) | 881 | static int spurious_fault_check(unsigned long error_code, pte_t *pte) |
@@ -1011,9 +1002,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
1011 | unsigned long address; | 1002 | unsigned long address; |
1012 | struct mm_struct *mm; | 1003 | struct mm_struct *mm; |
1013 | int fault; | 1004 | int fault; |
1014 | int write = error_code & PF_WRITE; | 1005 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
1015 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | | ||
1016 | (write ? FAULT_FLAG_WRITE : 0); | ||
1017 | 1006 | ||
1018 | tsk = current; | 1007 | tsk = current; |
1019 | mm = tsk->mm; | 1008 | mm = tsk->mm; |
@@ -1083,6 +1072,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
1083 | if (user_mode_vm(regs)) { | 1072 | if (user_mode_vm(regs)) { |
1084 | local_irq_enable(); | 1073 | local_irq_enable(); |
1085 | error_code |= PF_USER; | 1074 | error_code |= PF_USER; |
1075 | flags |= FAULT_FLAG_USER; | ||
1086 | } else { | 1076 | } else { |
1087 | if (regs->flags & X86_EFLAGS_IF) | 1077 | if (regs->flags & X86_EFLAGS_IF) |
1088 | local_irq_enable(); | 1078 | local_irq_enable(); |
@@ -1109,6 +1099,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
1109 | return; | 1099 | return; |
1110 | } | 1100 | } |
1111 | 1101 | ||
1102 | if (error_code & PF_WRITE) | ||
1103 | flags |= FAULT_FLAG_WRITE; | ||
1104 | |||
1112 | /* | 1105 | /* |
1113 | * When running in the kernel we expect faults to occur only to | 1106 | * When running in the kernel we expect faults to occur only to |
1114 | * addresses in user space. All other faults represent errors in | 1107 | * addresses in user space. All other faults represent errors in |
@@ -1187,9 +1180,17 @@ good_area: | |||
1187 | */ | 1180 | */ |
1188 | fault = handle_mm_fault(mm, vma, address, flags); | 1181 | fault = handle_mm_fault(mm, vma, address, flags); |
1189 | 1182 | ||
1190 | if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { | 1183 | /* |
1191 | if (mm_fault_error(regs, error_code, address, fault)) | 1184 | * If we need to retry but a fatal signal is pending, handle the |
1192 | return; | 1185 | * signal first. We do not need to release the mmap_sem because it |
1186 | * would already be released in __lock_page_or_retry in mm/filemap.c. | ||
1187 | */ | ||
1188 | if (unlikely((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))) | ||
1189 | return; | ||
1190 | |||
1191 | if (unlikely(fault & VM_FAULT_ERROR)) { | ||
1192 | mm_fault_error(regs, error_code, address, fault); | ||
1193 | return; | ||
1193 | } | 1194 | } |
1194 | 1195 | ||
1195 | /* | 1196 | /* |