aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-14 19:49:17 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-14 19:49:17 -0400
commit1dcf58d6e6e6eb7ec10e9abc56887b040205b06f (patch)
treec03e7a25ef13eea62f1547914a76e5c68f3f4c28 /arch/x86
parent80dcc31fbe55932ac9204daee5f2ebc0c49b6da3 (diff)
parente4b0db72be2487bae0e3251c22f82c104f7c1cfd (diff)
Merge branch 'akpm' (patches from Andrew)
Merge first patchbomb from Andrew Morton: - arch/sh updates - ocfs2 updates - kernel/watchdog feature - about half of mm/ * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (122 commits) Documentation: update arch list in the 'memtest' entry Kconfig: memtest: update number of test patterns up to 17 arm: add support for memtest arm64: add support for memtest memtest: use phys_addr_t for physical addresses mm: move memtest under mm mm, hugetlb: abort __get_user_pages if current has been oom killed mm, mempool: do not allow atomic resizing memcg: print cgroup information when system panics due to panic_on_oom mm: numa: remove migrate_ratelimited mm: fold arch_randomize_brk into ARCH_HAS_ELF_RANDOMIZE mm: split ET_DYN ASLR from mmap ASLR s390: redefine randomize_et_dyn for ELF_ET_DYN_BASE mm: expose arch_mmap_rnd when available s390: standardize mmap_rnd() usage powerpc: standardize mmap_rnd() usage mips: extract logic for mmap_rnd() arm64: standardize mmap_rnd() usage x86: standardize mmap_rnd() usage arm: factor out mmap ASLR into mmap_rnd ...
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig20
-rw-r--r--arch/x86/include/asm/e820.h8
-rw-r--r--arch/x86/include/asm/elf.h3
-rw-r--r--arch/x86/include/asm/page_types.h2
-rw-r--r--arch/x86/include/asm/paravirt.h8
-rw-r--r--arch/x86/include/asm/paravirt_types.h8
-rw-r--r--arch/x86/include/asm/pgalloc.h8
-rw-r--r--arch/x86/include/asm/pgtable-2level_types.h1
-rw-r--r--arch/x86/include/asm/pgtable-3level_types.h2
-rw-r--r--arch/x86/include/asm/pgtable.h8
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h1
-rw-r--r--arch/x86/include/asm/pgtable_types.h4
-rw-r--r--arch/x86/kernel/kvm.c2
-rw-r--r--arch/x86/kernel/paravirt.c6
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/ioremap.c23
-rw-r--r--arch/x86/mm/memtest.c118
-rw-r--r--arch/x86/mm/mmap.c38
-rw-r--r--arch/x86/mm/pgtable.c79
-rw-r--r--arch/x86/xen/mmu.c14
20 files changed, 153 insertions, 202 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index faff6934c05a..d43e7e1c784b 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -87,7 +87,7 @@ config X86
87 select HAVE_ARCH_KMEMCHECK 87 select HAVE_ARCH_KMEMCHECK
88 select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP 88 select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
89 select HAVE_USER_RETURN_NOTIFIER 89 select HAVE_USER_RETURN_NOTIFIER
90 select ARCH_BINFMT_ELF_RANDOMIZE_PIE 90 select ARCH_HAS_ELF_RANDOMIZE
91 select HAVE_ARCH_JUMP_LABEL 91 select HAVE_ARCH_JUMP_LABEL
92 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 92 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
93 select SPARSE_IRQ 93 select SPARSE_IRQ
@@ -99,6 +99,7 @@ config X86
99 select IRQ_FORCED_THREADING 99 select IRQ_FORCED_THREADING
100 select HAVE_BPF_JIT if X86_64 100 select HAVE_BPF_JIT if X86_64
101 select HAVE_ARCH_TRANSPARENT_HUGEPAGE 101 select HAVE_ARCH_TRANSPARENT_HUGEPAGE
102 select HAVE_ARCH_HUGE_VMAP if X86_64 || (X86_32 && X86_PAE)
102 select ARCH_HAS_SG_CHAIN 103 select ARCH_HAS_SG_CHAIN
103 select CLKEVT_I8253 104 select CLKEVT_I8253
104 select ARCH_HAVE_NMI_SAFE_CMPXCHG 105 select ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -277,6 +278,12 @@ config ARCH_SUPPORTS_UPROBES
277config FIX_EARLYCON_MEM 278config FIX_EARLYCON_MEM
278 def_bool y 279 def_bool y
279 280
281config PGTABLE_LEVELS
282 int
283 default 4 if X86_64
284 default 3 if X86_PAE
285 default 2
286
280source "init/Kconfig" 287source "init/Kconfig"
281source "kernel/Kconfig.freezer" 288source "kernel/Kconfig.freezer"
282 289
@@ -714,17 +721,6 @@ endif #HYPERVISOR_GUEST
714config NO_BOOTMEM 721config NO_BOOTMEM
715 def_bool y 722 def_bool y
716 723
717config MEMTEST
718 bool "Memtest"
719 ---help---
720 This option adds a kernel parameter 'memtest', which allows memtest
721 to be set.
722 memtest=0, mean disabled; -- default
723 memtest=1, mean do 1 test pattern;
724 ...
725 memtest=4, mean do 4 test patterns.
726 If you are unsure how to answer this question, answer N.
727
728source "arch/x86/Kconfig.cpu" 724source "arch/x86/Kconfig.cpu"
729 725
730config HPET_TIMER 726config HPET_TIMER
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 779c2efe2e97..3ab0537872fb 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -40,14 +40,6 @@ static inline void e820_mark_nosave_regions(unsigned long limit_pfn)
40} 40}
41#endif 41#endif
42 42
43#ifdef CONFIG_MEMTEST
44extern void early_memtest(unsigned long start, unsigned long end);
45#else
46static inline void early_memtest(unsigned long start, unsigned long end)
47{
48}
49#endif
50
51extern unsigned long e820_end_of_ram_pfn(void); 43extern unsigned long e820_end_of_ram_pfn(void);
52extern unsigned long e820_end_of_low_ram_pfn(void); 44extern unsigned long e820_end_of_low_ram_pfn(void);
53extern u64 early_reserve_e820(u64 sizet, u64 align); 45extern u64 early_reserve_e820(u64 sizet, u64 align);
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 935588d95c82..f161c189c27b 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -339,9 +339,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
339 int uses_interp); 339 int uses_interp);
340#define compat_arch_setup_additional_pages compat_arch_setup_additional_pages 340#define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
341 341
342extern unsigned long arch_randomize_brk(struct mm_struct *mm);
343#define arch_randomize_brk arch_randomize_brk
344
345/* 342/*
346 * True on X86_32 or when emulating IA32 on X86_64 343 * True on X86_32 or when emulating IA32 on X86_64
347 */ 344 */
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index f97fbe3abb67..c7c712f2648b 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -40,8 +40,10 @@
40 40
41#ifdef CONFIG_X86_64 41#ifdef CONFIG_X86_64
42#include <asm/page_64_types.h> 42#include <asm/page_64_types.h>
43#define IOREMAP_MAX_ORDER (PUD_SHIFT)
43#else 44#else
44#include <asm/page_32_types.h> 45#include <asm/page_32_types.h>
46#define IOREMAP_MAX_ORDER (PMD_SHIFT)
45#endif /* CONFIG_X86_64 */ 47#endif /* CONFIG_X86_64 */
46 48
47#ifndef __ASSEMBLY__ 49#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 5f6051d5d139..8957810ad7d1 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -545,7 +545,7 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
545 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val); 545 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
546} 546}
547 547
548#if PAGETABLE_LEVELS >= 3 548#if CONFIG_PGTABLE_LEVELS >= 3
549static inline pmd_t __pmd(pmdval_t val) 549static inline pmd_t __pmd(pmdval_t val)
550{ 550{
551 pmdval_t ret; 551 pmdval_t ret;
@@ -585,7 +585,7 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
585 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp, 585 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
586 val); 586 val);
587} 587}
588#if PAGETABLE_LEVELS == 4 588#if CONFIG_PGTABLE_LEVELS == 4
589static inline pud_t __pud(pudval_t val) 589static inline pud_t __pud(pudval_t val)
590{ 590{
591 pudval_t ret; 591 pudval_t ret;
@@ -636,9 +636,9 @@ static inline void pud_clear(pud_t *pudp)
636 set_pud(pudp, __pud(0)); 636 set_pud(pudp, __pud(0));
637} 637}
638 638
639#endif /* PAGETABLE_LEVELS == 4 */ 639#endif /* CONFIG_PGTABLE_LEVELS == 4 */
640 640
641#endif /* PAGETABLE_LEVELS >= 3 */ 641#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
642 642
643#ifdef CONFIG_X86_PAE 643#ifdef CONFIG_X86_PAE
644/* Special-case pte-setting operations for PAE, which can't update a 644/* Special-case pte-setting operations for PAE, which can't update a
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 7549b8b369e4..f7b0b5c112f2 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -294,7 +294,7 @@ struct pv_mmu_ops {
294 struct paravirt_callee_save pgd_val; 294 struct paravirt_callee_save pgd_val;
295 struct paravirt_callee_save make_pgd; 295 struct paravirt_callee_save make_pgd;
296 296
297#if PAGETABLE_LEVELS >= 3 297#if CONFIG_PGTABLE_LEVELS >= 3
298#ifdef CONFIG_X86_PAE 298#ifdef CONFIG_X86_PAE
299 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); 299 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
300 void (*pte_clear)(struct mm_struct *mm, unsigned long addr, 300 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
@@ -308,13 +308,13 @@ struct pv_mmu_ops {
308 struct paravirt_callee_save pmd_val; 308 struct paravirt_callee_save pmd_val;
309 struct paravirt_callee_save make_pmd; 309 struct paravirt_callee_save make_pmd;
310 310
311#if PAGETABLE_LEVELS == 4 311#if CONFIG_PGTABLE_LEVELS == 4
312 struct paravirt_callee_save pud_val; 312 struct paravirt_callee_save pud_val;
313 struct paravirt_callee_save make_pud; 313 struct paravirt_callee_save make_pud;
314 314
315 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); 315 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
316#endif /* PAGETABLE_LEVELS == 4 */ 316#endif /* CONFIG_PGTABLE_LEVELS == 4 */
317#endif /* PAGETABLE_LEVELS >= 3 */ 317#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
318 318
319 struct pv_lazy_ops lazy_mode; 319 struct pv_lazy_ops lazy_mode;
320 320
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index c4412e972bbd..bf7f8b55b0f9 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -77,7 +77,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
77 77
78#define pmd_pgtable(pmd) pmd_page(pmd) 78#define pmd_pgtable(pmd) pmd_page(pmd)
79 79
80#if PAGETABLE_LEVELS > 2 80#if CONFIG_PGTABLE_LEVELS > 2
81static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 81static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
82{ 82{
83 struct page *page; 83 struct page *page;
@@ -116,7 +116,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
116} 116}
117#endif /* CONFIG_X86_PAE */ 117#endif /* CONFIG_X86_PAE */
118 118
119#if PAGETABLE_LEVELS > 3 119#if CONFIG_PGTABLE_LEVELS > 3
120static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) 120static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
121{ 121{
122 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); 122 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
@@ -142,7 +142,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
142 ___pud_free_tlb(tlb, pud); 142 ___pud_free_tlb(tlb, pud);
143} 143}
144 144
145#endif /* PAGETABLE_LEVELS > 3 */ 145#endif /* CONFIG_PGTABLE_LEVELS > 3 */
146#endif /* PAGETABLE_LEVELS > 2 */ 146#endif /* CONFIG_PGTABLE_LEVELS > 2 */
147 147
148#endif /* _ASM_X86_PGALLOC_H */ 148#endif /* _ASM_X86_PGALLOC_H */
diff --git a/arch/x86/include/asm/pgtable-2level_types.h b/arch/x86/include/asm/pgtable-2level_types.h
index daacc23e3fb9..392576433e77 100644
--- a/arch/x86/include/asm/pgtable-2level_types.h
+++ b/arch/x86/include/asm/pgtable-2level_types.h
@@ -17,7 +17,6 @@ typedef union {
17#endif /* !__ASSEMBLY__ */ 17#endif /* !__ASSEMBLY__ */
18 18
19#define SHARED_KERNEL_PMD 0 19#define SHARED_KERNEL_PMD 0
20#define PAGETABLE_LEVELS 2
21 20
22/* 21/*
23 * traditional i386 two-level paging structure: 22 * traditional i386 two-level paging structure:
diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h
index 1bd5876c8649..bcc89625ebe5 100644
--- a/arch/x86/include/asm/pgtable-3level_types.h
+++ b/arch/x86/include/asm/pgtable-3level_types.h
@@ -24,8 +24,6 @@ typedef union {
24#define SHARED_KERNEL_PMD 1 24#define SHARED_KERNEL_PMD 1
25#endif 25#endif
26 26
27#define PAGETABLE_LEVELS 3
28
29/* 27/*
30 * PGDIR_SHIFT determines what a top-level page table entry can map 28 * PGDIR_SHIFT determines what a top-level page table entry can map
31 */ 29 */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index a0c35bf6cb92..fe57e7a98839 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -551,7 +551,7 @@ static inline unsigned long pages_to_mb(unsigned long npg)
551 return npg >> (20 - PAGE_SHIFT); 551 return npg >> (20 - PAGE_SHIFT);
552} 552}
553 553
554#if PAGETABLE_LEVELS > 2 554#if CONFIG_PGTABLE_LEVELS > 2
555static inline int pud_none(pud_t pud) 555static inline int pud_none(pud_t pud)
556{ 556{
557 return native_pud_val(pud) == 0; 557 return native_pud_val(pud) == 0;
@@ -594,9 +594,9 @@ static inline int pud_large(pud_t pud)
594{ 594{
595 return 0; 595 return 0;
596} 596}
597#endif /* PAGETABLE_LEVELS > 2 */ 597#endif /* CONFIG_PGTABLE_LEVELS > 2 */
598 598
599#if PAGETABLE_LEVELS > 3 599#if CONFIG_PGTABLE_LEVELS > 3
600static inline int pgd_present(pgd_t pgd) 600static inline int pgd_present(pgd_t pgd)
601{ 601{
602 return pgd_flags(pgd) & _PAGE_PRESENT; 602 return pgd_flags(pgd) & _PAGE_PRESENT;
@@ -633,7 +633,7 @@ static inline int pgd_none(pgd_t pgd)
633{ 633{
634 return !native_pgd_val(pgd); 634 return !native_pgd_val(pgd);
635} 635}
636#endif /* PAGETABLE_LEVELS > 3 */ 636#endif /* CONFIG_PGTABLE_LEVELS > 3 */
637 637
638#endif /* __ASSEMBLY__ */ 638#endif /* __ASSEMBLY__ */
639 639
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 602b6028c5b6..e6844dfb4471 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -20,7 +20,6 @@ typedef struct { pteval_t pte; } pte_t;
20#endif /* !__ASSEMBLY__ */ 20#endif /* !__ASSEMBLY__ */
21 21
22#define SHARED_KERNEL_PMD 0 22#define SHARED_KERNEL_PMD 0
23#define PAGETABLE_LEVELS 4
24 23
25/* 24/*
26 * PGDIR_SHIFT determines what a top-level page table entry can map 25 * PGDIR_SHIFT determines what a top-level page table entry can map
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 8c7c10802e9c..78f0c8cbe316 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -234,7 +234,7 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
234 return native_pgd_val(pgd) & PTE_FLAGS_MASK; 234 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
235} 235}
236 236
237#if PAGETABLE_LEVELS > 3 237#if CONFIG_PGTABLE_LEVELS > 3
238typedef struct { pudval_t pud; } pud_t; 238typedef struct { pudval_t pud; } pud_t;
239 239
240static inline pud_t native_make_pud(pmdval_t val) 240static inline pud_t native_make_pud(pmdval_t val)
@@ -255,7 +255,7 @@ static inline pudval_t native_pud_val(pud_t pud)
255} 255}
256#endif 256#endif
257 257
258#if PAGETABLE_LEVELS > 2 258#if CONFIG_PGTABLE_LEVELS > 2
259typedef struct { pmdval_t pmd; } pmd_t; 259typedef struct { pmdval_t pmd; } pmd_t;
260 260
261static inline pmd_t native_make_pmd(pmdval_t val) 261static inline pmd_t native_make_pmd(pmdval_t val)
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index e354cc6446ab..9435620062df 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -513,7 +513,7 @@ void __init kvm_guest_init(void)
513 * can get false positives too easily, for example if the host is 513 * can get false positives too easily, for example if the host is
514 * overcommitted. 514 * overcommitted.
515 */ 515 */
516 watchdog_enable_hardlockup_detector(false); 516 hardlockup_detector_disable();
517} 517}
518 518
519static noinline uint32_t __kvm_cpuid_base(void) 519static noinline uint32_t __kvm_cpuid_base(void)
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 548d25f00c90..c614dd492f5f 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -443,7 +443,7 @@ struct pv_mmu_ops pv_mmu_ops = {
443 .ptep_modify_prot_start = __ptep_modify_prot_start, 443 .ptep_modify_prot_start = __ptep_modify_prot_start,
444 .ptep_modify_prot_commit = __ptep_modify_prot_commit, 444 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
445 445
446#if PAGETABLE_LEVELS >= 3 446#if CONFIG_PGTABLE_LEVELS >= 3
447#ifdef CONFIG_X86_PAE 447#ifdef CONFIG_X86_PAE
448 .set_pte_atomic = native_set_pte_atomic, 448 .set_pte_atomic = native_set_pte_atomic,
449 .pte_clear = native_pte_clear, 449 .pte_clear = native_pte_clear,
@@ -454,13 +454,13 @@ struct pv_mmu_ops pv_mmu_ops = {
454 .pmd_val = PTE_IDENT, 454 .pmd_val = PTE_IDENT,
455 .make_pmd = PTE_IDENT, 455 .make_pmd = PTE_IDENT,
456 456
457#if PAGETABLE_LEVELS == 4 457#if CONFIG_PGTABLE_LEVELS == 4
458 .pud_val = PTE_IDENT, 458 .pud_val = PTE_IDENT,
459 .make_pud = PTE_IDENT, 459 .make_pud = PTE_IDENT,
460 460
461 .set_pgd = native_set_pgd, 461 .set_pgd = native_set_pgd,
462#endif 462#endif
463#endif /* PAGETABLE_LEVELS >= 3 */ 463#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
464 464
465 .pte_val = PTE_IDENT, 465 .pte_val = PTE_IDENT,
466 .pgd_val = PTE_IDENT, 466 .pgd_val = PTE_IDENT,
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index c4cc74006c61..a482d105172b 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -32,6 +32,4 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o
32obj-$(CONFIG_ACPI_NUMA) += srat.o 32obj-$(CONFIG_ACPI_NUMA) += srat.o
33obj-$(CONFIG_NUMA_EMU) += numa_emulation.o 33obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
34 34
35obj-$(CONFIG_MEMTEST) += memtest.o
36
37obj-$(CONFIG_X86_INTEL_MPX) += mpx.o 35obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index fdf617c00e2f..5ead4d6cf3a7 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -67,8 +67,13 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
67 67
68/* 68/*
69 * Remap an arbitrary physical address space into the kernel virtual 69 * Remap an arbitrary physical address space into the kernel virtual
70 * address space. Needed when the kernel wants to access high addresses 70 * address space. It transparently creates kernel huge I/O mapping when
71 * directly. 71 * the physical address is aligned by a huge page size (1GB or 2MB) and
72 * the requested size is at least the huge page size.
73 *
74 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
75 * Therefore, the mapping code falls back to use a smaller page toward 4KB
76 * when a mapping range is covered by non-WB type of MTRRs.
72 * 77 *
73 * NOTE! We need to allow non-page-aligned mappings too: we will obviously 78 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
74 * have to convert them into an offset in a page-aligned mapping, but the 79 * have to convert them into an offset in a page-aligned mapping, but the
@@ -326,6 +331,20 @@ void iounmap(volatile void __iomem *addr)
326} 331}
327EXPORT_SYMBOL(iounmap); 332EXPORT_SYMBOL(iounmap);
328 333
334int arch_ioremap_pud_supported(void)
335{
336#ifdef CONFIG_X86_64
337 return cpu_has_gbpages;
338#else
339 return 0;
340#endif
341}
342
343int arch_ioremap_pmd_supported(void)
344{
345 return cpu_has_pse;
346}
347
329/* 348/*
330 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 349 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
331 * access 350 * access
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c
deleted file mode 100644
index 1e9da795767a..000000000000
--- a/arch/x86/mm/memtest.c
+++ /dev/null
@@ -1,118 +0,0 @@
1#include <linux/kernel.h>
2#include <linux/errno.h>
3#include <linux/string.h>
4#include <linux/types.h>
5#include <linux/mm.h>
6#include <linux/smp.h>
7#include <linux/init.h>
8#include <linux/pfn.h>
9#include <linux/memblock.h>
10
11static u64 patterns[] __initdata = {
12 /* The first entry has to be 0 to leave memtest with zeroed memory */
13 0,
14 0xffffffffffffffffULL,
15 0x5555555555555555ULL,
16 0xaaaaaaaaaaaaaaaaULL,
17 0x1111111111111111ULL,
18 0x2222222222222222ULL,
19 0x4444444444444444ULL,
20 0x8888888888888888ULL,
21 0x3333333333333333ULL,
22 0x6666666666666666ULL,
23 0x9999999999999999ULL,
24 0xccccccccccccccccULL,
25 0x7777777777777777ULL,
26 0xbbbbbbbbbbbbbbbbULL,
27 0xddddddddddddddddULL,
28 0xeeeeeeeeeeeeeeeeULL,
29 0x7a6c7258554e494cULL, /* yeah ;-) */
30};
31
32static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad)
33{
34 printk(KERN_INFO " %016llx bad mem addr %010llx - %010llx reserved\n",
35 (unsigned long long) pattern,
36 (unsigned long long) start_bad,
37 (unsigned long long) end_bad);
38 memblock_reserve(start_bad, end_bad - start_bad);
39}
40
41static void __init memtest(u64 pattern, u64 start_phys, u64 size)
42{
43 u64 *p, *start, *end;
44 u64 start_bad, last_bad;
45 u64 start_phys_aligned;
46 const size_t incr = sizeof(pattern);
47
48 start_phys_aligned = ALIGN(start_phys, incr);
49 start = __va(start_phys_aligned);
50 end = start + (size - (start_phys_aligned - start_phys)) / incr;
51 start_bad = 0;
52 last_bad = 0;
53
54 for (p = start; p < end; p++)
55 *p = pattern;
56
57 for (p = start; p < end; p++, start_phys_aligned += incr) {
58 if (*p == pattern)
59 continue;
60 if (start_phys_aligned == last_bad + incr) {
61 last_bad += incr;
62 continue;
63 }
64 if (start_bad)
65 reserve_bad_mem(pattern, start_bad, last_bad + incr);
66 start_bad = last_bad = start_phys_aligned;
67 }
68 if (start_bad)
69 reserve_bad_mem(pattern, start_bad, last_bad + incr);
70}
71
72static void __init do_one_pass(u64 pattern, u64 start, u64 end)
73{
74 u64 i;
75 phys_addr_t this_start, this_end;
76
77 for_each_free_mem_range(i, NUMA_NO_NODE, &this_start, &this_end, NULL) {
78 this_start = clamp_t(phys_addr_t, this_start, start, end);
79 this_end = clamp_t(phys_addr_t, this_end, start, end);
80 if (this_start < this_end) {
81 printk(KERN_INFO " %010llx - %010llx pattern %016llx\n",
82 (unsigned long long)this_start,
83 (unsigned long long)this_end,
84 (unsigned long long)cpu_to_be64(pattern));
85 memtest(pattern, this_start, this_end - this_start);
86 }
87 }
88}
89
90/* default is disabled */
91static int memtest_pattern __initdata;
92
93static int __init parse_memtest(char *arg)
94{
95 if (arg)
96 memtest_pattern = simple_strtoul(arg, NULL, 0);
97 else
98 memtest_pattern = ARRAY_SIZE(patterns);
99
100 return 0;
101}
102
103early_param("memtest", parse_memtest);
104
105void __init early_memtest(unsigned long start, unsigned long end)
106{
107 unsigned int i;
108 unsigned int idx = 0;
109
110 if (!memtest_pattern)
111 return;
112
113 printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern);
114 for (i = memtest_pattern-1; i < UINT_MAX; --i) {
115 idx = i % ARRAY_SIZE(patterns);
116 do_one_pass(patterns[idx], start, end);
117 }
118}
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index df4552bd239e..9d518d693b4b 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -65,24 +65,23 @@ static int mmap_is_legacy(void)
65 return sysctl_legacy_va_layout; 65 return sysctl_legacy_va_layout;
66} 66}
67 67
68static unsigned long mmap_rnd(void) 68unsigned long arch_mmap_rnd(void)
69{ 69{
70 unsigned long rnd = 0; 70 unsigned long rnd;
71 71
72 /* 72 /*
73 * 8 bits of randomness in 32bit mmaps, 20 address space bits 73 * 8 bits of randomness in 32bit mmaps, 20 address space bits
74 * 28 bits of randomness in 64bit mmaps, 40 address space bits 74 * 28 bits of randomness in 64bit mmaps, 40 address space bits
75 */ 75 */
76 if (current->flags & PF_RANDOMIZE) { 76 if (mmap_is_ia32())
77 if (mmap_is_ia32()) 77 rnd = (unsigned long)get_random_int() % (1<<8);
78 rnd = get_random_int() % (1<<8); 78 else
79 else 79 rnd = (unsigned long)get_random_int() % (1<<28);
80 rnd = get_random_int() % (1<<28); 80
81 }
82 return rnd << PAGE_SHIFT; 81 return rnd << PAGE_SHIFT;
83} 82}
84 83
85static unsigned long mmap_base(void) 84static unsigned long mmap_base(unsigned long rnd)
86{ 85{
87 unsigned long gap = rlimit(RLIMIT_STACK); 86 unsigned long gap = rlimit(RLIMIT_STACK);
88 87
@@ -91,19 +90,19 @@ static unsigned long mmap_base(void)
91 else if (gap > MAX_GAP) 90 else if (gap > MAX_GAP)
92 gap = MAX_GAP; 91 gap = MAX_GAP;
93 92
94 return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd()); 93 return PAGE_ALIGN(TASK_SIZE - gap - rnd);
95} 94}
96 95
97/* 96/*
98 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64 97 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
99 * does, but not when emulating X86_32 98 * does, but not when emulating X86_32
100 */ 99 */
101static unsigned long mmap_legacy_base(void) 100static unsigned long mmap_legacy_base(unsigned long rnd)
102{ 101{
103 if (mmap_is_ia32()) 102 if (mmap_is_ia32())
104 return TASK_UNMAPPED_BASE; 103 return TASK_UNMAPPED_BASE;
105 else 104 else
106 return TASK_UNMAPPED_BASE + mmap_rnd(); 105 return TASK_UNMAPPED_BASE + rnd;
107} 106}
108 107
109/* 108/*
@@ -112,13 +111,18 @@ static unsigned long mmap_legacy_base(void)
112 */ 111 */
113void arch_pick_mmap_layout(struct mm_struct *mm) 112void arch_pick_mmap_layout(struct mm_struct *mm)
114{ 113{
115 mm->mmap_legacy_base = mmap_legacy_base(); 114 unsigned long random_factor = 0UL;
116 mm->mmap_base = mmap_base(); 115
116 if (current->flags & PF_RANDOMIZE)
117 random_factor = arch_mmap_rnd();
118
119 mm->mmap_legacy_base = mmap_legacy_base(random_factor);
117 120
118 if (mmap_is_legacy()) { 121 if (mmap_is_legacy()) {
119 mm->mmap_base = mm->mmap_legacy_base; 122 mm->mmap_base = mm->mmap_legacy_base;
120 mm->get_unmapped_area = arch_get_unmapped_area; 123 mm->get_unmapped_area = arch_get_unmapped_area;
121 } else { 124 } else {
125 mm->mmap_base = mmap_base(random_factor);
122 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 126 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
123 } 127 }
124} 128}
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 5a7e5252c878..0b97d2c75df3 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -4,6 +4,7 @@
4#include <asm/pgtable.h> 4#include <asm/pgtable.h>
5#include <asm/tlb.h> 5#include <asm/tlb.h>
6#include <asm/fixmap.h> 6#include <asm/fixmap.h>
7#include <asm/mtrr.h>
7 8
8#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO 9#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
9 10
@@ -58,7 +59,7 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
58 tlb_remove_page(tlb, pte); 59 tlb_remove_page(tlb, pte);
59} 60}
60 61
61#if PAGETABLE_LEVELS > 2 62#if CONFIG_PGTABLE_LEVELS > 2
62void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 63void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
63{ 64{
64 struct page *page = virt_to_page(pmd); 65 struct page *page = virt_to_page(pmd);
@@ -74,14 +75,14 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
74 tlb_remove_page(tlb, page); 75 tlb_remove_page(tlb, page);
75} 76}
76 77
77#if PAGETABLE_LEVELS > 3 78#if CONFIG_PGTABLE_LEVELS > 3
78void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) 79void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
79{ 80{
80 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); 81 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
81 tlb_remove_page(tlb, virt_to_page(pud)); 82 tlb_remove_page(tlb, virt_to_page(pud));
82} 83}
83#endif /* PAGETABLE_LEVELS > 3 */ 84#endif /* CONFIG_PGTABLE_LEVELS > 3 */
84#endif /* PAGETABLE_LEVELS > 2 */ 85#endif /* CONFIG_PGTABLE_LEVELS > 2 */
85 86
86static inline void pgd_list_add(pgd_t *pgd) 87static inline void pgd_list_add(pgd_t *pgd)
87{ 88{
@@ -117,9 +118,9 @@ static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
117 /* If the pgd points to a shared pagetable level (either the 118 /* If the pgd points to a shared pagetable level (either the
118 ptes in non-PAE, or shared PMD in PAE), then just copy the 119 ptes in non-PAE, or shared PMD in PAE), then just copy the
119 references from swapper_pg_dir. */ 120 references from swapper_pg_dir. */
120 if (PAGETABLE_LEVELS == 2 || 121 if (CONFIG_PGTABLE_LEVELS == 2 ||
121 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) || 122 (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
122 PAGETABLE_LEVELS == 4) { 123 CONFIG_PGTABLE_LEVELS == 4) {
123 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, 124 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
124 swapper_pg_dir + KERNEL_PGD_BOUNDARY, 125 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
125 KERNEL_PGD_PTRS); 126 KERNEL_PGD_PTRS);
@@ -560,3 +561,67 @@ void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
560{ 561{
561 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); 562 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
562} 563}
564
565#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
566int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
567{
568 u8 mtrr;
569
570 /*
571 * Do not use a huge page when the range is covered by non-WB type
572 * of MTRRs.
573 */
574 mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE);
575 if ((mtrr != MTRR_TYPE_WRBACK) && (mtrr != 0xFF))
576 return 0;
577
578 prot = pgprot_4k_2_large(prot);
579
580 set_pte((pte_t *)pud, pfn_pte(
581 (u64)addr >> PAGE_SHIFT,
582 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
583
584 return 1;
585}
586
587int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
588{
589 u8 mtrr;
590
591 /*
592 * Do not use a huge page when the range is covered by non-WB type
593 * of MTRRs.
594 */
595 mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE);
596 if ((mtrr != MTRR_TYPE_WRBACK) && (mtrr != 0xFF))
597 return 0;
598
599 prot = pgprot_4k_2_large(prot);
600
601 set_pte((pte_t *)pmd, pfn_pte(
602 (u64)addr >> PAGE_SHIFT,
603 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
604
605 return 1;
606}
607
608int pud_clear_huge(pud_t *pud)
609{
610 if (pud_large(*pud)) {
611 pud_clear(pud);
612 return 1;
613 }
614
615 return 0;
616}
617
618int pmd_clear_huge(pmd_t *pmd)
619{
620 if (pmd_large(*pmd)) {
621 pmd_clear(pmd);
622 return 1;
623 }
624
625 return 0;
626}
627#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index adca9e2b6553..65083ad63b6f 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -502,7 +502,7 @@ __visible pmd_t xen_make_pmd(pmdval_t pmd)
502} 502}
503PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); 503PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
504 504
505#if PAGETABLE_LEVELS == 4 505#if CONFIG_PGTABLE_LEVELS == 4
506__visible pudval_t xen_pud_val(pud_t pud) 506__visible pudval_t xen_pud_val(pud_t pud)
507{ 507{
508 return pte_mfn_to_pfn(pud.pud); 508 return pte_mfn_to_pfn(pud.pud);
@@ -589,7 +589,7 @@ static void xen_set_pgd(pgd_t *ptr, pgd_t val)
589 589
590 xen_mc_issue(PARAVIRT_LAZY_MMU); 590 xen_mc_issue(PARAVIRT_LAZY_MMU);
591} 591}
592#endif /* PAGETABLE_LEVELS == 4 */ 592#endif /* CONFIG_PGTABLE_LEVELS == 4 */
593 593
594/* 594/*
595 * (Yet another) pagetable walker. This one is intended for pinning a 595 * (Yet another) pagetable walker. This one is intended for pinning a
@@ -1628,7 +1628,7 @@ static void xen_release_pmd(unsigned long pfn)
1628 xen_release_ptpage(pfn, PT_PMD); 1628 xen_release_ptpage(pfn, PT_PMD);
1629} 1629}
1630 1630
1631#if PAGETABLE_LEVELS == 4 1631#if CONFIG_PGTABLE_LEVELS == 4
1632static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) 1632static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1633{ 1633{
1634 xen_alloc_ptpage(mm, pfn, PT_PUD); 1634 xen_alloc_ptpage(mm, pfn, PT_PUD);
@@ -2046,7 +2046,7 @@ static void __init xen_post_allocator_init(void)
2046 pv_mmu_ops.set_pte = xen_set_pte; 2046 pv_mmu_ops.set_pte = xen_set_pte;
2047 pv_mmu_ops.set_pmd = xen_set_pmd; 2047 pv_mmu_ops.set_pmd = xen_set_pmd;
2048 pv_mmu_ops.set_pud = xen_set_pud; 2048 pv_mmu_ops.set_pud = xen_set_pud;
2049#if PAGETABLE_LEVELS == 4 2049#if CONFIG_PGTABLE_LEVELS == 4
2050 pv_mmu_ops.set_pgd = xen_set_pgd; 2050 pv_mmu_ops.set_pgd = xen_set_pgd;
2051#endif 2051#endif
2052 2052
@@ -2056,7 +2056,7 @@ static void __init xen_post_allocator_init(void)
2056 pv_mmu_ops.alloc_pmd = xen_alloc_pmd; 2056 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2057 pv_mmu_ops.release_pte = xen_release_pte; 2057 pv_mmu_ops.release_pte = xen_release_pte;
2058 pv_mmu_ops.release_pmd = xen_release_pmd; 2058 pv_mmu_ops.release_pmd = xen_release_pmd;
2059#if PAGETABLE_LEVELS == 4 2059#if CONFIG_PGTABLE_LEVELS == 4
2060 pv_mmu_ops.alloc_pud = xen_alloc_pud; 2060 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2061 pv_mmu_ops.release_pud = xen_release_pud; 2061 pv_mmu_ops.release_pud = xen_release_pud;
2062#endif 2062#endif
@@ -2122,14 +2122,14 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2122 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd), 2122 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2123 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val), 2123 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
2124 2124
2125#if PAGETABLE_LEVELS == 4 2125#if CONFIG_PGTABLE_LEVELS == 4
2126 .pud_val = PV_CALLEE_SAVE(xen_pud_val), 2126 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2127 .make_pud = PV_CALLEE_SAVE(xen_make_pud), 2127 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
2128 .set_pgd = xen_set_pgd_hyper, 2128 .set_pgd = xen_set_pgd_hyper,
2129 2129
2130 .alloc_pud = xen_alloc_pmd_init, 2130 .alloc_pud = xen_alloc_pmd_init,
2131 .release_pud = xen_release_pmd_init, 2131 .release_pud = xen_release_pmd_init,
2132#endif /* PAGETABLE_LEVELS == 4 */ 2132#endif /* CONFIG_PGTABLE_LEVELS == 4 */
2133 2133
2134 .activate_mm = xen_activate_mm, 2134 .activate_mm = xen_activate_mm,
2135 .dup_mmap = xen_dup_mmap, 2135 .dup_mmap = xen_dup_mmap,