aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorGerald Schaefer <gerald.schaefer@de.ibm.com>2016-03-17 10:00:04 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2016-03-17 11:42:14 -0400
commitfc897c95e91451271cd707ee0f71022b9b201ce9 (patch)
treeb0a44cffdfb8193ba708719e9d7a9bb033f64e61 /arch/s390/mm
parent91d37211769510ae0b4747045d8f81d3b9dd4278 (diff)
s390/mm: handle PTE-mapped tail pages in fast gup
With the THP refcounting rework it is possible to see THP compound tail pages mapped with PTEs during a THP split. This needs to be considered when using page_cache_get_speculative(), which will always fail on tail pages because ->_count is always zero. commit 7aef4172 "mm: handle PTE-mapped tail pages in gerneric fast gup implementaiton" fixed it for the generic fast gup code by using compound_head(page) instead of page, but not for s390. This patch is a 1:1 adaption of commit 7aef4172 for the s390 fast gup code. Without this fix, gup will fall back to the slow path or fail in the unlikely scenario that we hit a THP under splitting in-between the page table split and the compound page split. Cc: stable@vger.kernel.org # v4.5 Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/gup.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 13dab0c1645c..3776aca22082 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -20,9 +20,9 @@
20static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, 20static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
21 unsigned long end, int write, struct page **pages, int *nr) 21 unsigned long end, int write, struct page **pages, int *nr)
22{ 22{
23 struct page *head, *page;
23 unsigned long mask; 24 unsigned long mask;
24 pte_t *ptep, pte; 25 pte_t *ptep, pte;
25 struct page *page;
26 26
27 mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL; 27 mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
28 28
@@ -37,12 +37,14 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
37 return 0; 37 return 0;
38 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 38 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
39 page = pte_page(pte); 39 page = pte_page(pte);
40 if (!page_cache_get_speculative(page)) 40 head = compound_head(page);
41 if (!page_cache_get_speculative(head))
41 return 0; 42 return 0;
42 if (unlikely(pte_val(pte) != pte_val(*ptep))) { 43 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
43 put_page(page); 44 put_page(head);
44 return 0; 45 return 0;
45 } 46 }
47 VM_BUG_ON_PAGE(compound_head(page) != head, page);
46 pages[*nr] = page; 48 pages[*nr] = page;
47 (*nr)++; 49 (*nr)++;
48 50