diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-15 19:39:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-15 19:39:15 -0400 |
commit | eea3a00264cf243a28e4331566ce67b86059339d (patch) | |
tree | 487f16389e0dfa32e9caa7604d1274a7dcda8f04 /mm/memory.c | |
parent | e7c82412433a8039616c7314533a0a1c025d99bf (diff) | |
parent | e693d73c20ffdb06840c9378f367bad849ac0d5d (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge second patchbomb from Andrew Morton:
- the rest of MM
- various misc bits
- add ability to run /sbin/reboot at reboot time
- printk/vsprintf changes
- fiddle with seq_printf() return value
* akpm: (114 commits)
parisc: remove use of seq_printf return value
lru_cache: remove use of seq_printf return value
tracing: remove use of seq_printf return value
cgroup: remove use of seq_printf return value
proc: remove use of seq_printf return value
s390: remove use of seq_printf return value
cris fasttimer: remove use of seq_printf return value
cris: remove use of seq_printf return value
openrisc: remove use of seq_printf return value
ARM: plat-pxa: remove use of seq_printf return value
nios2: cpuinfo: remove use of seq_printf return value
microblaze: mb: remove use of seq_printf return value
ipc: remove use of seq_printf return value
rtc: remove use of seq_printf return value
power: wakeup: remove use of seq_printf return value
x86: mtrr: if: remove use of seq_printf return value
linux/bitmap.h: improve BITMAP_{LAST,FIRST}_WORD_MASK
MAINTAINERS: CREDITS: remove Stefano Brivio from B43
.mailmap: add Ricardo Ribalda
CREDITS: add Ricardo Ribalda Delgado
...
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 56 |
1 files changed, 45 insertions, 11 deletions
diff --git a/mm/memory.c b/mm/memory.c index ac20b2a6a0c3..22e037e3364e 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -690,12 +690,11 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, | |||
690 | /* | 690 | /* |
691 | * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y | 691 | * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y |
692 | */ | 692 | */ |
693 | if (vma->vm_ops) | 693 | pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n", |
694 | printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n", | 694 | vma->vm_file, |
695 | vma->vm_ops->fault); | 695 | vma->vm_ops ? vma->vm_ops->fault : NULL, |
696 | if (vma->vm_file) | 696 | vma->vm_file ? vma->vm_file->f_op->mmap : NULL, |
697 | printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n", | 697 | mapping ? mapping->a_ops->readpage : NULL); |
698 | vma->vm_file->f_op->mmap); | ||
699 | dump_stack(); | 698 | dump_stack(); |
700 | add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); | 699 | add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); |
701 | } | 700 | } |
@@ -2181,6 +2180,42 @@ oom: | |||
2181 | return VM_FAULT_OOM; | 2180 | return VM_FAULT_OOM; |
2182 | } | 2181 | } |
2183 | 2182 | ||
2183 | /* | ||
2184 | * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED | ||
2185 | * mapping | ||
2186 | */ | ||
2187 | static int wp_pfn_shared(struct mm_struct *mm, | ||
2188 | struct vm_area_struct *vma, unsigned long address, | ||
2189 | pte_t *page_table, spinlock_t *ptl, pte_t orig_pte, | ||
2190 | pmd_t *pmd) | ||
2191 | { | ||
2192 | if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { | ||
2193 | struct vm_fault vmf = { | ||
2194 | .page = NULL, | ||
2195 | .pgoff = linear_page_index(vma, address), | ||
2196 | .virtual_address = (void __user *)(address & PAGE_MASK), | ||
2197 | .flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE, | ||
2198 | }; | ||
2199 | int ret; | ||
2200 | |||
2201 | pte_unmap_unlock(page_table, ptl); | ||
2202 | ret = vma->vm_ops->pfn_mkwrite(vma, &vmf); | ||
2203 | if (ret & VM_FAULT_ERROR) | ||
2204 | return ret; | ||
2205 | page_table = pte_offset_map_lock(mm, pmd, address, &ptl); | ||
2206 | /* | ||
2207 | * We might have raced with another page fault while we | ||
2208 | * released the pte_offset_map_lock. | ||
2209 | */ | ||
2210 | if (!pte_same(*page_table, orig_pte)) { | ||
2211 | pte_unmap_unlock(page_table, ptl); | ||
2212 | return 0; | ||
2213 | } | ||
2214 | } | ||
2215 | return wp_page_reuse(mm, vma, address, page_table, ptl, orig_pte, | ||
2216 | NULL, 0, 0); | ||
2217 | } | ||
2218 | |||
2184 | static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, | 2219 | static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, |
2185 | unsigned long address, pte_t *page_table, | 2220 | unsigned long address, pte_t *page_table, |
2186 | pmd_t *pmd, spinlock_t *ptl, pte_t orig_pte, | 2221 | pmd_t *pmd, spinlock_t *ptl, pte_t orig_pte, |
@@ -2259,13 +2294,12 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2259 | * VM_PFNMAP VMA. | 2294 | * VM_PFNMAP VMA. |
2260 | * | 2295 | * |
2261 | * We should not cow pages in a shared writeable mapping. | 2296 | * We should not cow pages in a shared writeable mapping. |
2262 | * Just mark the pages writable as we can't do any dirty | 2297 | * Just mark the pages writable and/or call ops->pfn_mkwrite. |
2263 | * accounting on raw pfn maps. | ||
2264 | */ | 2298 | */ |
2265 | if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == | 2299 | if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == |
2266 | (VM_WRITE|VM_SHARED)) | 2300 | (VM_WRITE|VM_SHARED)) |
2267 | return wp_page_reuse(mm, vma, address, page_table, ptl, | 2301 | return wp_pfn_shared(mm, vma, address, page_table, ptl, |
2268 | orig_pte, old_page, 0, 0); | 2302 | orig_pte, pmd); |
2269 | 2303 | ||
2270 | pte_unmap_unlock(page_table, ptl); | 2304 | pte_unmap_unlock(page_table, ptl); |
2271 | return wp_page_copy(mm, vma, address, page_table, pmd, | 2305 | return wp_page_copy(mm, vma, address, page_table, pmd, |
@@ -2845,7 +2879,7 @@ static void do_fault_around(struct vm_area_struct *vma, unsigned long address, | |||
2845 | struct vm_fault vmf; | 2879 | struct vm_fault vmf; |
2846 | int off; | 2880 | int off; |
2847 | 2881 | ||
2848 | nr_pages = ACCESS_ONCE(fault_around_bytes) >> PAGE_SHIFT; | 2882 | nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT; |
2849 | mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK; | 2883 | mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK; |
2850 | 2884 | ||
2851 | start_addr = max(address & mask, vma->vm_start); | 2885 | start_addr = max(address & mask, vma->vm_start); |