diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-02-17 15:36:49 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-02-17 15:36:49 -0500 |
commit | f2a77abdb8226b8dbad039d415d5d70a934ac605 (patch) | |
tree | 5bafcd57b253dec45b6e7d8995821653acecfba1 /mm | |
parent | e4178d809fdaee32a56833fff1f5056c99e90a1a (diff) | |
parent | 66f9af83e56bfa12964d251df9d60fb571579913 (diff) |
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc fixes from Ben Herrenschmidt:
"Here are some more powerpc fixes for 3.14
The main one is a nasty issue with the NUMA balancing support which
requires a small generic change and the addition of a new accessor to
set _PAGE_NUMA. Both have been reviewed and acked by Mel and Rik.
The changelog should have plenty of details but basically, without
this fix, we get random user segfaults and/or corruptions due to
missing TLB/hash flushes. Aneesh series of 3 patches fixes it.
We have some vDSO vs. perf fixes from Anton, some small EEH fixes
from Gavin, a ppc32 regression vs the stack overflow detector, and a
fix for the way we handle PCIe host bridge speed settings on pseries
(which is needed for proper operations of AMD graphics cards on
Power8)"
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
powerpc/eeh: Disable EEH on reboot
powerpc/eeh: Cleanup on eeh_subsystem_enabled
powerpc/powernv: Rework EEH reset
powerpc: Use unstripped VDSO image for more accurate profiling data
powerpc: Link VDSOs at 0x0
mm: Use ptep/pmdp_set_numa() for updating _PAGE_NUMA bit
mm: Dirty accountable change only apply to non prot numa case
powerpc/mm: Add new "set" flag argument to pte/pmd update function
powerpc/pseries: Add Gen3 definitions for PCIE link speed
powerpc/pseries: Fix regression on PCI link speed
powerpc: Set the correct ksp_limit on ppc32 when switching to irq stack
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 9 | ||||
-rw-r--r-- | mm/mprotect.c | 25 |
2 files changed, 10 insertions, 24 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 82166bf974e1..da23eb96779f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1545,6 +1545,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1545 | entry = pmd_mknonnuma(entry); | 1545 | entry = pmd_mknonnuma(entry); |
1546 | entry = pmd_modify(entry, newprot); | 1546 | entry = pmd_modify(entry, newprot); |
1547 | ret = HPAGE_PMD_NR; | 1547 | ret = HPAGE_PMD_NR; |
1548 | set_pmd_at(mm, addr, pmd, entry); | ||
1548 | BUG_ON(pmd_write(entry)); | 1549 | BUG_ON(pmd_write(entry)); |
1549 | } else { | 1550 | } else { |
1550 | struct page *page = pmd_page(*pmd); | 1551 | struct page *page = pmd_page(*pmd); |
@@ -1557,16 +1558,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1557 | */ | 1558 | */ |
1558 | if (!is_huge_zero_page(page) && | 1559 | if (!is_huge_zero_page(page) && |
1559 | !pmd_numa(*pmd)) { | 1560 | !pmd_numa(*pmd)) { |
1560 | entry = *pmd; | 1561 | pmdp_set_numa(mm, addr, pmd); |
1561 | entry = pmd_mknuma(entry); | ||
1562 | ret = HPAGE_PMD_NR; | 1562 | ret = HPAGE_PMD_NR; |
1563 | } | 1563 | } |
1564 | } | 1564 | } |
1565 | |||
1566 | /* Set PMD if cleared earlier */ | ||
1567 | if (ret == HPAGE_PMD_NR) | ||
1568 | set_pmd_at(mm, addr, pmd, entry); | ||
1569 | |||
1570 | spin_unlock(ptl); | 1565 | spin_unlock(ptl); |
1571 | } | 1566 | } |
1572 | 1567 | ||
diff --git a/mm/mprotect.c b/mm/mprotect.c index 7332c1785744..769a67a15803 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
@@ -58,36 +58,27 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
58 | if (pte_numa(ptent)) | 58 | if (pte_numa(ptent)) |
59 | ptent = pte_mknonnuma(ptent); | 59 | ptent = pte_mknonnuma(ptent); |
60 | ptent = pte_modify(ptent, newprot); | 60 | ptent = pte_modify(ptent, newprot); |
61 | /* | ||
62 | * Avoid taking write faults for pages we | ||
63 | * know to be dirty. | ||
64 | */ | ||
65 | if (dirty_accountable && pte_dirty(ptent)) | ||
66 | ptent = pte_mkwrite(ptent); | ||
67 | ptep_modify_prot_commit(mm, addr, pte, ptent); | ||
61 | updated = true; | 68 | updated = true; |
62 | } else { | 69 | } else { |
63 | struct page *page; | 70 | struct page *page; |
64 | 71 | ||
65 | ptent = *pte; | ||
66 | page = vm_normal_page(vma, addr, oldpte); | 72 | page = vm_normal_page(vma, addr, oldpte); |
67 | if (page && !PageKsm(page)) { | 73 | if (page && !PageKsm(page)) { |
68 | if (!pte_numa(oldpte)) { | 74 | if (!pte_numa(oldpte)) { |
69 | ptent = pte_mknuma(ptent); | 75 | ptep_set_numa(mm, addr, pte); |
70 | set_pte_at(mm, addr, pte, ptent); | ||
71 | updated = true; | 76 | updated = true; |
72 | } | 77 | } |
73 | } | 78 | } |
74 | } | 79 | } |
75 | |||
76 | /* | ||
77 | * Avoid taking write faults for pages we know to be | ||
78 | * dirty. | ||
79 | */ | ||
80 | if (dirty_accountable && pte_dirty(ptent)) { | ||
81 | ptent = pte_mkwrite(ptent); | ||
82 | updated = true; | ||
83 | } | ||
84 | |||
85 | if (updated) | 80 | if (updated) |
86 | pages++; | 81 | pages++; |
87 | |||
88 | /* Only !prot_numa always clears the pte */ | ||
89 | if (!prot_numa) | ||
90 | ptep_modify_prot_commit(mm, addr, pte, ptent); | ||
91 | } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { | 82 | } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { |
92 | swp_entry_t entry = pte_to_swp_entry(oldpte); | 83 | swp_entry_t entry = pte_to_swp_entry(oldpte); |
93 | 84 | ||