diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 4 | ||||
-rw-r--r-- | mm/memory.c | 16 | ||||
-rw-r--r-- | mm/mempolicy.c | 2 | ||||
-rw-r--r-- | mm/mincore.c | 2 | ||||
-rw-r--r-- | mm/pagewalk.c | 2 | ||||
-rw-r--r-- | mm/swapfile.c | 4 |
6 files changed, 20 insertions, 10 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 26c6f4ec20f4..37281816ff67 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -5230,6 +5230,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, | |||
5230 | spinlock_t *ptl; | 5230 | spinlock_t *ptl; |
5231 | 5231 | ||
5232 | split_huge_page_pmd(walk->mm, pmd); | 5232 | split_huge_page_pmd(walk->mm, pmd); |
5233 | if (pmd_trans_unstable(pmd)) | ||
5234 | return 0; | ||
5233 | 5235 | ||
5234 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 5236 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
5235 | for (; addr != end; pte++, addr += PAGE_SIZE) | 5237 | for (; addr != end; pte++, addr += PAGE_SIZE) |
@@ -5390,6 +5392,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, | |||
5390 | spinlock_t *ptl; | 5392 | spinlock_t *ptl; |
5391 | 5393 | ||
5392 | split_huge_page_pmd(walk->mm, pmd); | 5394 | split_huge_page_pmd(walk->mm, pmd); |
5395 | if (pmd_trans_unstable(pmd)) | ||
5396 | return 0; | ||
5393 | retry: | 5397 | retry: |
5394 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 5398 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
5395 | for (; addr != end; addr += PAGE_SIZE) { | 5399 | for (; addr != end; addr += PAGE_SIZE) { |
diff --git a/mm/memory.c b/mm/memory.c index 347e5fad1cfa..e01abb908b6b 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1247,16 +1247,24 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, | |||
1247 | do { | 1247 | do { |
1248 | next = pmd_addr_end(addr, end); | 1248 | next = pmd_addr_end(addr, end); |
1249 | if (pmd_trans_huge(*pmd)) { | 1249 | if (pmd_trans_huge(*pmd)) { |
1250 | if (next-addr != HPAGE_PMD_SIZE) { | 1250 | if (next - addr != HPAGE_PMD_SIZE) { |
1251 | VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); | 1251 | VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); |
1252 | split_huge_page_pmd(vma->vm_mm, pmd); | 1252 | split_huge_page_pmd(vma->vm_mm, pmd); |
1253 | } else if (zap_huge_pmd(tlb, vma, pmd, addr)) | 1253 | } else if (zap_huge_pmd(tlb, vma, pmd, addr)) |
1254 | continue; | 1254 | goto next; |
1255 | /* fall through */ | 1255 | /* fall through */ |
1256 | } | 1256 | } |
1257 | if (pmd_none_or_clear_bad(pmd)) | 1257 | /* |
1258 | continue; | 1258 | * Here there can be other concurrent MADV_DONTNEED or |
1259 | * trans huge page faults running, and if the pmd is | ||
1260 | * none or trans huge it can change under us. This is | ||
1261 | * because MADV_DONTNEED holds the mmap_sem in read | ||
1262 | * mode. | ||
1263 | */ | ||
1264 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) | ||
1265 | goto next; | ||
1259 | next = zap_pte_range(tlb, vma, pmd, addr, next, details); | 1266 | next = zap_pte_range(tlb, vma, pmd, addr, next, details); |
1267 | next: | ||
1260 | cond_resched(); | 1268 | cond_resched(); |
1261 | } while (pmd++, addr = next, addr != end); | 1269 | } while (pmd++, addr = next, addr != end); |
1262 | 1270 | ||
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 47296fee23db..0a3757067631 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -512,7 +512,7 @@ static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, | |||
512 | do { | 512 | do { |
513 | next = pmd_addr_end(addr, end); | 513 | next = pmd_addr_end(addr, end); |
514 | split_huge_page_pmd(vma->vm_mm, pmd); | 514 | split_huge_page_pmd(vma->vm_mm, pmd); |
515 | if (pmd_none_or_clear_bad(pmd)) | 515 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) |
516 | continue; | 516 | continue; |
517 | if (check_pte_range(vma, pmd, addr, next, nodes, | 517 | if (check_pte_range(vma, pmd, addr, next, nodes, |
518 | flags, private)) | 518 | flags, private)) |
diff --git a/mm/mincore.c b/mm/mincore.c index 636a86876ff2..936b4cee8cb1 100644 --- a/mm/mincore.c +++ b/mm/mincore.c | |||
@@ -164,7 +164,7 @@ static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud, | |||
164 | } | 164 | } |
165 | /* fall through */ | 165 | /* fall through */ |
166 | } | 166 | } |
167 | if (pmd_none_or_clear_bad(pmd)) | 167 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) |
168 | mincore_unmapped_range(vma, addr, next, vec); | 168 | mincore_unmapped_range(vma, addr, next, vec); |
169 | else | 169 | else |
170 | mincore_pte_range(vma, pmd, addr, next, vec); | 170 | mincore_pte_range(vma, pmd, addr, next, vec); |
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 2f5cf10ff660..aa9701e12714 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
@@ -59,7 +59,7 @@ again: | |||
59 | continue; | 59 | continue; |
60 | 60 | ||
61 | split_huge_page_pmd(walk->mm, pmd); | 61 | split_huge_page_pmd(walk->mm, pmd); |
62 | if (pmd_none_or_clear_bad(pmd)) | 62 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) |
63 | goto again; | 63 | goto again; |
64 | err = walk_pte_range(pmd, addr, next, walk); | 64 | err = walk_pte_range(pmd, addr, next, walk); |
65 | if (err) | 65 | if (err) |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 00a962caab1a..44595a373e42 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, | |||
932 | pmd = pmd_offset(pud, addr); | 932 | pmd = pmd_offset(pud, addr); |
933 | do { | 933 | do { |
934 | next = pmd_addr_end(addr, end); | 934 | next = pmd_addr_end(addr, end); |
935 | if (unlikely(pmd_trans_huge(*pmd))) | 935 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) |
936 | continue; | ||
937 | if (pmd_none_or_clear_bad(pmd)) | ||
938 | continue; | 936 | continue; |
939 | ret = unuse_pte_range(vma, pmd, addr, next, entry, page); | 937 | ret = unuse_pte_range(vma, pmd, addr, next, entry, page); |
940 | if (ret) | 938 | if (ret) |