diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-22 19:07:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-22 19:07:45 -0400 |
commit | 5b160bd426946c85f32b15e5d34d62d2618a5a87 (patch) | |
tree | d12e53fc438587d726f5dbdb0e7f2f4742d51300 /mm | |
parent | 7100e505b76b4e2efd88b2459d1a932214e29f8a (diff) | |
parent | bb65a764de59b76323e0b72abbd9fc31401a53fa (diff) |
Merge branch 'x86-mce-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86/mce changes from Ingo Molnar:
"This tree improves the AMD thresholding bank code and includes a
memory fault signal handling fixlet."
* 'x86-mce-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mce: Fix siginfo_t->si_addr value for non-recoverable memory faults
x86, MCE, AMD: Update copyrights and boilerplate
x86, MCE, AMD: Give proper names to the thresholding banks
x86, MCE, AMD: Make error_count read only
x86, MCE, AMD: Cleanup reading of error_count
x86, MCE, AMD: Print decimal thresholding values
x86, MCE, AMD: Move shared bank to node descriptor
x86, MCE, AMD: Remove local_allocate_... wrapper
x86, MCE, AMD: Remove shared banks sysfs linking
x86, amd_nb: Export model 0x10 and later PCI id
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory-failure.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index ab1e7145e290..de4ce7058450 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -345,14 +345,14 @@ static void add_to_kill(struct task_struct *tsk, struct page *p, | |||
345 | * Also when FAIL is set do a force kill because something went | 345 | * Also when FAIL is set do a force kill because something went |
346 | * wrong earlier. | 346 | * wrong earlier. |
347 | */ | 347 | */ |
348 | static void kill_procs(struct list_head *to_kill, int doit, int trapno, | 348 | static void kill_procs(struct list_head *to_kill, int forcekill, int trapno, |
349 | int fail, struct page *page, unsigned long pfn, | 349 | int fail, struct page *page, unsigned long pfn, |
350 | int flags) | 350 | int flags) |
351 | { | 351 | { |
352 | struct to_kill *tk, *next; | 352 | struct to_kill *tk, *next; |
353 | 353 | ||
354 | list_for_each_entry_safe (tk, next, to_kill, nd) { | 354 | list_for_each_entry_safe (tk, next, to_kill, nd) { |
355 | if (doit) { | 355 | if (forcekill) { |
356 | /* | 356 | /* |
357 | * In case something went wrong with munmapping | 357 | * In case something went wrong with munmapping |
358 | * make sure the process doesn't catch the | 358 | * make sure the process doesn't catch the |
@@ -858,7 +858,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
858 | struct address_space *mapping; | 858 | struct address_space *mapping; |
859 | LIST_HEAD(tokill); | 859 | LIST_HEAD(tokill); |
860 | int ret; | 860 | int ret; |
861 | int kill = 1; | 861 | int kill = 1, forcekill; |
862 | struct page *hpage = compound_head(p); | 862 | struct page *hpage = compound_head(p); |
863 | struct page *ppage; | 863 | struct page *ppage; |
864 | 864 | ||
@@ -888,7 +888,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
888 | * be called inside page lock (it's recommended but not enforced). | 888 | * be called inside page lock (it's recommended but not enforced). |
889 | */ | 889 | */ |
890 | mapping = page_mapping(hpage); | 890 | mapping = page_mapping(hpage); |
891 | if (!PageDirty(hpage) && mapping && | 891 | if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping && |
892 | mapping_cap_writeback_dirty(mapping)) { | 892 | mapping_cap_writeback_dirty(mapping)) { |
893 | if (page_mkclean(hpage)) { | 893 | if (page_mkclean(hpage)) { |
894 | SetPageDirty(hpage); | 894 | SetPageDirty(hpage); |
@@ -965,12 +965,14 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
965 | * Now that the dirty bit has been propagated to the | 965 | * Now that the dirty bit has been propagated to the |
966 | * struct page and all unmaps done we can decide if | 966 | * struct page and all unmaps done we can decide if |
967 | * killing is needed or not. Only kill when the page | 967 | * killing is needed or not. Only kill when the page |
968 | * was dirty, otherwise the tokill list is merely | 968 | * was dirty or the process is not restartable, |
969 | * otherwise the tokill list is merely | ||
969 | * freed. When there was a problem unmapping earlier | 970 | * freed. When there was a problem unmapping earlier |
970 | * use a more force-full uncatchable kill to prevent | 971 | * use a more force-full uncatchable kill to prevent |
971 | * any accesses to the poisoned memory. | 972 | * any accesses to the poisoned memory. |
972 | */ | 973 | */ |
973 | kill_procs(&tokill, !!PageDirty(ppage), trapno, | 974 | forcekill = PageDirty(ppage) || (flags & MF_MUST_KILL); |
975 | kill_procs(&tokill, forcekill, trapno, | ||
974 | ret != SWAP_SUCCESS, p, pfn, flags); | 976 | ret != SWAP_SUCCESS, p, pfn, flags); |
975 | 977 | ||
976 | return ret; | 978 | return ret; |