diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 22:55:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 22:55:54 -0400 |
commit | 0e06f5c0deeef0332a5da2ecb8f1fcf3e024d958 (patch) | |
tree | e0f0af4aadf10c713c5cf1b65356844b3c9b3215 /arch/x86/mm | |
parent | f7816ad0f878dacd5f0120476f9b836ccf8699ea (diff) | |
parent | 8f19b0c058d93a678a99dd6fec03af2e769943f2 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton:
- a few misc bits
- ocfs2
- most(?) of MM
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (125 commits)
thp: fix comments of __pmd_trans_huge_lock()
cgroup: remove unnecessary 0 check from css_from_id()
cgroup: fix idr leak for the first cgroup root
mm: memcontrol: fix documentation for compound parameter
mm: memcontrol: remove BUG_ON in uncharge_list
mm: fix build warnings in <linux/compaction.h>
mm, thp: convert from optimistic swapin collapsing to conservative
mm, thp: fix comment inconsistency for swapin readahead functions
thp: update Documentation/{vm/transhuge,filesystems/proc}.txt
shmem: split huge pages beyond i_size under memory pressure
thp: introduce CONFIG_TRANSPARENT_HUGE_PAGECACHE
khugepaged: add support of collapse for tmpfs/shmem pages
shmem: make shmem_inode_info::lock irq-safe
khugepaged: move up_read(mmap_sem) out of khugepaged_alloc_page()
thp: extract khugepaged from mm/huge_memory.c
shmem, thp: respect MADV_{NO,}HUGEPAGE for file mappings
shmem: add huge pages support
shmem: get_unmapped_area align huge page
shmem: prepare huge= mount option and sysfs knob
mm, rmap: account shmem thp pages
...
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/pgtable.c | 10 |
2 files changed, 8 insertions, 4 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index d22161ab941d..dc8023060456 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -1353,7 +1353,7 @@ good_area: | |||
1353 | * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if | 1353 | * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if |
1354 | * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. | 1354 | * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. |
1355 | */ | 1355 | */ |
1356 | fault = handle_mm_fault(mm, vma, address, flags); | 1356 | fault = handle_mm_fault(vma, address, flags); |
1357 | major |= fault & VM_FAULT_MAJOR; | 1357 | major |= fault & VM_FAULT_MAJOR; |
1358 | 1358 | ||
1359 | /* | 1359 | /* |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index aa0ff4b02a96..3feec5af4e67 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -6,7 +6,7 @@ | |||
6 | #include <asm/fixmap.h> | 6 | #include <asm/fixmap.h> |
7 | #include <asm/mtrr.h> | 7 | #include <asm/mtrr.h> |
8 | 8 | ||
9 | #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO | 9 | #define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO) |
10 | 10 | ||
11 | #ifdef CONFIG_HIGHPTE | 11 | #ifdef CONFIG_HIGHPTE |
12 | #define PGALLOC_USER_GFP __GFP_HIGHMEM | 12 | #define PGALLOC_USER_GFP __GFP_HIGHMEM |
@@ -18,7 +18,7 @@ gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP; | |||
18 | 18 | ||
19 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | 19 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
20 | { | 20 | { |
21 | return (pte_t *)__get_free_page(PGALLOC_GFP); | 21 | return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT); |
22 | } | 22 | } |
23 | 23 | ||
24 | pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) | 24 | pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) |
@@ -207,9 +207,13 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[]) | |||
207 | { | 207 | { |
208 | int i; | 208 | int i; |
209 | bool failed = false; | 209 | bool failed = false; |
210 | gfp_t gfp = PGALLOC_GFP; | ||
211 | |||
212 | if (mm == &init_mm) | ||
213 | gfp &= ~__GFP_ACCOUNT; | ||
210 | 214 | ||
211 | for(i = 0; i < PREALLOCATED_PMDS; i++) { | 215 | for(i = 0; i < PREALLOCATED_PMDS; i++) { |
212 | pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP); | 216 | pmd_t *pmd = (pmd_t *)__get_free_page(gfp); |
213 | if (!pmd) | 217 | if (!pmd) |
214 | failed = true; | 218 | failed = true; |
215 | if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) { | 219 | if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) { |