aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@linux.intel.com>2013-01-29 17:59:09 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2013-01-29 18:10:15 -0500
commitde65d816aa44f9ddd79861ae21d75010cc1fd003 (patch)
tree04a637a43b2e52a733d0dcb7595a47057571e7da /arch/x86/mm
parent9710f581bb4c35589ac046b0cfc0deb7f369fc85 (diff)
parent5dcd14ecd41ea2b3ae3295a9b30d98769d52165f (diff)
Merge remote-tracking branch 'origin/x86/boot' into x86/mm2
Coming patches to x86/mm2 require the changes and advanced baseline in x86/boot. Resolved Conflicts: arch/x86/kernel/setup.c mm/nobootmem.c Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/fault.c25
-rw-r--r--arch/x86/mm/hugetlbpage.c130
-rw-r--r--arch/x86/mm/init_32.c5
-rw-r--r--arch/x86/mm/init_64.c4
-rw-r--r--arch/x86/mm/pgtable.c10
-rw-r--r--arch/x86/mm/tlb.c10
6 files changed, 50 insertions, 134 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 8e13ecb41bee..027088f2f7dd 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -18,7 +18,7 @@
18#include <asm/pgalloc.h> /* pgd_*(), ... */ 18#include <asm/pgalloc.h> /* pgd_*(), ... */
19#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ 19#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20#include <asm/fixmap.h> /* VSYSCALL_START */ 20#include <asm/fixmap.h> /* VSYSCALL_START */
21#include <asm/rcu.h> /* exception_enter(), ... */ 21#include <asm/context_tracking.h> /* exception_enter(), ... */
22 22
23/* 23/*
24 * Page fault error code bits: 24 * Page fault error code bits:
@@ -803,20 +803,6 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
803 __bad_area(regs, error_code, address, SEGV_ACCERR); 803 __bad_area(regs, error_code, address, SEGV_ACCERR);
804} 804}
805 805
806/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
807static void
808out_of_memory(struct pt_regs *regs, unsigned long error_code,
809 unsigned long address)
810{
811 /*
812 * We ran out of memory, call the OOM killer, and return the userspace
813 * (which will retry the fault, or kill us if we got oom-killed):
814 */
815 up_read(&current->mm->mmap_sem);
816
817 pagefault_out_of_memory();
818}
819
820static void 806static void
821do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, 807do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
822 unsigned int fault) 808 unsigned int fault)
@@ -879,7 +865,14 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
879 return 1; 865 return 1;
880 } 866 }
881 867
882 out_of_memory(regs, error_code, address); 868 up_read(&current->mm->mmap_sem);
869
870 /*
871 * We ran out of memory, call the OOM killer, and return the
872 * userspace (which will retry the fault, or kill us if we got
873 * oom-killed):
874 */
875 pagefault_out_of_memory();
883 } else { 876 } else {
884 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| 877 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
885 VM_FAULT_HWPOISON_LARGE)) 878 VM_FAULT_HWPOISON_LARGE))
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 937bff5cdaa7..ae1aa71d0115 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -274,42 +274,15 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
274 unsigned long pgoff, unsigned long flags) 274 unsigned long pgoff, unsigned long flags)
275{ 275{
276 struct hstate *h = hstate_file(file); 276 struct hstate *h = hstate_file(file);
277 struct mm_struct *mm = current->mm; 277 struct vm_unmapped_area_info info;
278 struct vm_area_struct *vma; 278
279 unsigned long start_addr; 279 info.flags = 0;
280 280 info.length = len;
281 if (len > mm->cached_hole_size) { 281 info.low_limit = TASK_UNMAPPED_BASE;
282 start_addr = mm->free_area_cache; 282 info.high_limit = TASK_SIZE;
283 } else { 283 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
284 start_addr = TASK_UNMAPPED_BASE; 284 info.align_offset = 0;
285 mm->cached_hole_size = 0; 285 return vm_unmapped_area(&info);
286 }
287
288full_search:
289 addr = ALIGN(start_addr, huge_page_size(h));
290
291 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
292 /* At this point: (!vma || addr < vma->vm_end). */
293 if (TASK_SIZE - len < addr) {
294 /*
295 * Start a new search - just in case we missed
296 * some holes.
297 */
298 if (start_addr != TASK_UNMAPPED_BASE) {
299 start_addr = TASK_UNMAPPED_BASE;
300 mm->cached_hole_size = 0;
301 goto full_search;
302 }
303 return -ENOMEM;
304 }
305 if (!vma || addr + len <= vma->vm_start) {
306 mm->free_area_cache = addr + len;
307 return addr;
308 }
309 if (addr + mm->cached_hole_size < vma->vm_start)
310 mm->cached_hole_size = vma->vm_start - addr;
311 addr = ALIGN(vma->vm_end, huge_page_size(h));
312 }
313} 286}
314 287
315static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, 288static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
@@ -317,83 +290,30 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
317 unsigned long pgoff, unsigned long flags) 290 unsigned long pgoff, unsigned long flags)
318{ 291{
319 struct hstate *h = hstate_file(file); 292 struct hstate *h = hstate_file(file);
320 struct mm_struct *mm = current->mm; 293 struct vm_unmapped_area_info info;
321 struct vm_area_struct *vma; 294 unsigned long addr;
322 unsigned long base = mm->mmap_base;
323 unsigned long addr = addr0;
324 unsigned long largest_hole = mm->cached_hole_size;
325 unsigned long start_addr;
326
327 /* don't allow allocations above current base */
328 if (mm->free_area_cache > base)
329 mm->free_area_cache = base;
330
331 if (len <= largest_hole) {
332 largest_hole = 0;
333 mm->free_area_cache = base;
334 }
335try_again:
336 start_addr = mm->free_area_cache;
337
338 /* make sure it can fit in the remaining address space */
339 if (mm->free_area_cache < len)
340 goto fail;
341
342 /* either no address requested or can't fit in requested address hole */
343 addr = (mm->free_area_cache - len) & huge_page_mask(h);
344 do {
345 /*
346 * Lookup failure means no vma is above this address,
347 * i.e. return with success:
348 */
349 vma = find_vma(mm, addr);
350 if (!vma)
351 return addr;
352 295
353 if (addr + len <= vma->vm_start) { 296 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
354 /* remember the address as a hint for next time */ 297 info.length = len;
355 mm->cached_hole_size = largest_hole; 298 info.low_limit = PAGE_SIZE;
356 return (mm->free_area_cache = addr); 299 info.high_limit = current->mm->mmap_base;
357 } else if (mm->free_area_cache == vma->vm_end) { 300 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
358 /* pull free_area_cache down to the first hole */ 301 info.align_offset = 0;
359 mm->free_area_cache = vma->vm_start; 302 addr = vm_unmapped_area(&info);
360 mm->cached_hole_size = largest_hole;
361 }
362 303
363 /* remember the largest hole we saw so far */
364 if (addr + largest_hole < vma->vm_start)
365 largest_hole = vma->vm_start - addr;
366
367 /* try just below the current vma->vm_start */
368 addr = (vma->vm_start - len) & huge_page_mask(h);
369 } while (len <= vma->vm_start);
370
371fail:
372 /*
373 * if hint left us with no space for the requested
374 * mapping then try again:
375 */
376 if (start_addr != base) {
377 mm->free_area_cache = base;
378 largest_hole = 0;
379 goto try_again;
380 }
381 /* 304 /*
382 * A failed mmap() very likely causes application failure, 305 * A failed mmap() very likely causes application failure,
383 * so fall back to the bottom-up function here. This scenario 306 * so fall back to the bottom-up function here. This scenario
384 * can happen with large stack limits and large mmap() 307 * can happen with large stack limits and large mmap()
385 * allocations. 308 * allocations.
386 */ 309 */
387 mm->free_area_cache = TASK_UNMAPPED_BASE; 310 if (addr & ~PAGE_MASK) {
388 mm->cached_hole_size = ~0UL; 311 VM_BUG_ON(addr != -ENOMEM);
389 addr = hugetlb_get_unmapped_area_bottomup(file, addr0, 312 info.flags = 0;
390 len, pgoff, flags); 313 info.low_limit = TASK_UNMAPPED_BASE;
391 314 info.high_limit = TASK_SIZE;
392 /* 315 addr = vm_unmapped_area(&info);
393 * Restore the topdown base: 316 }
394 */
395 mm->free_area_cache = base;
396 mm->cached_hole_size = ~0UL;
397 317
398 return addr; 318 return addr;
399} 319}
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index f4fc4a28393a..b299724f6e34 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -739,10 +739,7 @@ static void __init test_wp_bit(void)
739 739
740 if (!boot_cpu_data.wp_works_ok) { 740 if (!boot_cpu_data.wp_works_ok) {
741 printk(KERN_CONT "No.\n"); 741 printk(KERN_CONT "No.\n");
742#ifdef CONFIG_X86_WP_WORKS_OK 742 panic("Linux doesn't support CPUs with broken WP.");
743 panic(
744 "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
745#endif
746 } else { 743 } else {
747 printk(KERN_CONT "Ok.\n"); 744 printk(KERN_CONT "Ok.\n");
748 } 745 }
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 41785305f645..191ab12f5ff3 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -577,7 +577,9 @@ void __init paging_init(void)
577 * numa support is not compiled in, and later node_set_state 577 * numa support is not compiled in, and later node_set_state
578 * will not set it back. 578 * will not set it back.
579 */ 579 */
580 node_clear_state(0, N_NORMAL_MEMORY); 580 node_clear_state(0, N_MEMORY);
581 if (N_MEMORY != N_NORMAL_MEMORY)
582 node_clear_state(0, N_NORMAL_MEMORY);
581 583
582 zone_sizes_init(); 584 zone_sizes_init();
583} 585}
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 8573b83a63d0..e27fbf887f3b 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -137,7 +137,7 @@ static void pgd_dtor(pgd_t *pgd)
137 * against pageattr.c; it is the unique case in which a valid change 137 * against pageattr.c; it is the unique case in which a valid change
138 * of kernel pagetables can't be lazily synchronized by vmalloc faults. 138 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
139 * vmalloc faults work because attached pagetables are never freed. 139 * vmalloc faults work because attached pagetables are never freed.
140 * -- wli 140 * -- nyc
141 */ 141 */
142 142
143#ifdef CONFIG_X86_PAE 143#ifdef CONFIG_X86_PAE
@@ -301,6 +301,13 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
301 free_page((unsigned long)pgd); 301 free_page((unsigned long)pgd);
302} 302}
303 303
304/*
305 * Used to set accessed or dirty bits in the page table entries
306 * on other architectures. On x86, the accessed and dirty bits
307 * are tracked by hardware. However, do_wp_page calls this function
308 * to also make the pte writeable at the same time the dirty bit is
309 * set. In that case we do actually need to write the PTE.
310 */
304int ptep_set_access_flags(struct vm_area_struct *vma, 311int ptep_set_access_flags(struct vm_area_struct *vma,
305 unsigned long address, pte_t *ptep, 312 unsigned long address, pte_t *ptep,
306 pte_t entry, int dirty) 313 pte_t entry, int dirty)
@@ -310,7 +317,6 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
310 if (changed && dirty) { 317 if (changed && dirty) {
311 *ptep = entry; 318 *ptep = entry;
312 pte_update_defer(vma->vm_mm, address, ptep); 319 pte_update_defer(vma->vm_mm, address, ptep);
313 flush_tlb_page(vma, address);
314 } 320 }
315 321
316 return changed; 322 return changed;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 0777f042e400..13a6b29e2e5d 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -104,7 +104,7 @@ static void flush_tlb_func(void *info)
104 return; 104 return;
105 105
106 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { 106 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
107 if (f->flush_end == TLB_FLUSH_ALL || !cpu_has_invlpg) 107 if (f->flush_end == TLB_FLUSH_ALL)
108 local_flush_tlb(); 108 local_flush_tlb();
109 else if (!f->flush_end) 109 else if (!f->flush_end)
110 __flush_tlb_single(f->flush_start); 110 __flush_tlb_single(f->flush_start);
@@ -197,7 +197,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
197 } 197 }
198 198
199 if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 199 if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1
200 || vmflag == VM_HUGETLB) { 200 || vmflag & VM_HUGETLB) {
201 local_flush_tlb(); 201 local_flush_tlb();
202 goto flush_all; 202 goto flush_all;
203 } 203 }
@@ -337,10 +337,8 @@ static const struct file_operations fops_tlbflush = {
337 337
338static int __cpuinit create_tlb_flushall_shift(void) 338static int __cpuinit create_tlb_flushall_shift(void)
339{ 339{
340 if (cpu_has_invlpg) { 340 debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR,
341 debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR, 341 arch_debugfs_dir, NULL, &fops_tlbflush);
342 arch_debugfs_dir, NULL, &fops_tlbflush);
343 }
344 return 0; 342 return 0;
345} 343}
346late_initcall(create_tlb_flushall_shift); 344late_initcall(create_tlb_flushall_shift);