aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-21 21:06:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-21 21:06:55 -0500
commit2ef14f465b9e096531343f5b734cffc5f759f4a6 (patch)
tree07b504d7105842a4b1a74cf1e153023a02fb9c1e /arch/x86/include/asm
parentcb715a836642e0ec69350670d1c2f800f3e2d2e4 (diff)
parent0da3e7f526fde7a6522a3038b7ce609fc50f6707 (diff)
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm changes from Peter Anvin: "This is a huge set of several partly interrelated (and concurrently developed) changes, which is why the branch history is messier than one would like. The *really* big items are two humonguous patchsets mostly developed by Yinghai Lu at my request, which completely revamps the way we create initial page tables. In particular, rather than estimating how much memory we will need for page tables and then build them into that memory -- a calculation that has shown to be incredibly fragile -- we now build them (on 64 bits) with the aid of a "pseudo-linear mode" -- a #PF handler which creates temporary page tables on demand. This has several advantages: 1. It makes it much easier to support things that need access to data very early (a followon patchset uses this to load microcode way early in the kernel startup). 2. It allows the kernel and all the kernel data objects to be invoked from above the 4 GB limit. This allows kdump to work on very large systems. 3. It greatly reduces the difference between Xen and native (Xen's equivalent of the #PF handler are the temporary page tables created by the domain builder), eliminating a bunch of fragile hooks. The patch series also gets us a bit closer to W^X. Additional work in this pull is the 64-bit get_user() work which you were also involved with, and a bunch of cleanups/speedups to __phys_addr()/__pa()." * 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (105 commits) x86, mm: Move reserving low memory later in initialization x86, doc: Clarify the use of asm("%edx") in uaccess.h x86, mm: Redesign get_user with a __builtin_choose_expr hack x86: Be consistent with data size in getuser.S x86, mm: Use a bitfield to mask nuisance get_user() warnings x86/kvm: Fix compile warning in kvm_register_steal_time() x86-32: Add support for 64bit get_user() x86-32, mm: Remove reference to alloc_remap() x86-32, mm: Remove reference to resume_map_numa_kva() x86-32, mm: Rip out x86_32 NUMA remapping code x86/numa: Use __pa_nodebug() instead x86: Don't panic if can not alloc buffer for swiotlb mm: Add alloc_bootmem_low_pages_nopanic() x86, 64bit, mm: hibernate use generic mapping_init x86, 64bit, mm: Mark data/bss/brk to nx x86: Merge early kernel reserve for 32bit and 64bit x86: Add Crash kernel low reservation x86, kdump: Remove crashkernel range find limit for 64bit memblock: Add memblock_mem_size() x86, boot: Not need to check setup_header version for setup_data ...
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/init.h28
-rw-r--r--arch/x86/include/asm/kexec.h6
-rw-r--r--arch/x86/include/asm/mmzone_32.h6
-rw-r--r--arch/x86/include/asm/numa.h2
-rw-r--r--arch/x86/include/asm/numa_64.h6
-rw-r--r--arch/x86/include/asm/page.h7
-rw-r--r--arch/x86/include/asm/page_32.h1
-rw-r--r--arch/x86/include/asm/page_64.h36
-rw-r--r--arch/x86/include/asm/page_64_types.h22
-rw-r--r--arch/x86/include/asm/page_types.h2
-rw-r--r--arch/x86/include/asm/pgtable.h17
-rw-r--r--arch/x86/include/asm/pgtable_64.h5
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h4
-rw-r--r--arch/x86/include/asm/pgtable_types.h4
-rw-r--r--arch/x86/include/asm/processor.h1
-rw-r--r--arch/x86/include/asm/realmode.h3
-rw-r--r--arch/x86/include/asm/uaccess.h55
-rw-r--r--arch/x86/include/asm/x86_init.h12
18 files changed, 111 insertions, 106 deletions
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index adcc0ae73d09..223042086f4e 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -1,20 +1,14 @@
1#ifndef _ASM_X86_INIT_32_H 1#ifndef _ASM_X86_INIT_H
2#define _ASM_X86_INIT_32_H 2#define _ASM_X86_INIT_H
3 3
4#ifdef CONFIG_X86_32 4struct x86_mapping_info {
5extern void __init early_ioremap_page_table_range_init(void); 5 void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
6#endif 6 void *context; /* context for alloc_pgt_page */
7 unsigned long pmd_flag; /* page flag for PMD entry */
8 bool kernel_mapping; /* kernel mapping or ident mapping */
9};
7 10
8extern void __init zone_sizes_init(void); 11int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
12 unsigned long addr, unsigned long end);
9 13
10extern unsigned long __init 14#endif /* _ASM_X86_INIT_H */
11kernel_physical_mapping_init(unsigned long start,
12 unsigned long end,
13 unsigned long page_size_mask);
14
15
16extern unsigned long __initdata pgt_buf_start;
17extern unsigned long __meminitdata pgt_buf_end;
18extern unsigned long __meminitdata pgt_buf_top;
19
20#endif /* _ASM_X86_INIT_32_H */
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 6080d2694bad..17483a492f18 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -48,11 +48,11 @@
48# define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64) 48# define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
49#else 49#else
50/* Maximum physical address we can use pages from */ 50/* Maximum physical address we can use pages from */
51# define KEXEC_SOURCE_MEMORY_LIMIT (0xFFFFFFFFFFUL) 51# define KEXEC_SOURCE_MEMORY_LIMIT (MAXMEM-1)
52/* Maximum address we can reach in physical address mode */ 52/* Maximum address we can reach in physical address mode */
53# define KEXEC_DESTINATION_MEMORY_LIMIT (0xFFFFFFFFFFUL) 53# define KEXEC_DESTINATION_MEMORY_LIMIT (MAXMEM-1)
54/* Maximum address we can use for the control pages */ 54/* Maximum address we can use for the control pages */
55# define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL) 55# define KEXEC_CONTROL_MEMORY_LIMIT (MAXMEM-1)
56 56
57/* Allocate one page for the pdp and the second for the code */ 57/* Allocate one page for the pdp and the second for the code */
58# define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL) 58# define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL)
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index eb05fb3b02fb..8a9b3e288cb4 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -14,12 +14,6 @@ extern struct pglist_data *node_data[];
14 14
15#include <asm/numaq.h> 15#include <asm/numaq.h>
16 16
17extern void resume_map_numa_kva(pgd_t *pgd);
18
19#else /* !CONFIG_NUMA */
20
21static inline void resume_map_numa_kva(pgd_t *pgd) {}
22
23#endif /* CONFIG_NUMA */ 17#endif /* CONFIG_NUMA */
24 18
25#ifdef CONFIG_DISCONTIGMEM 19#ifdef CONFIG_DISCONTIGMEM
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index 49119fcea2dc..52560a2038e1 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -54,8 +54,6 @@ static inline int numa_cpu_node(int cpu)
54 54
55#ifdef CONFIG_X86_32 55#ifdef CONFIG_X86_32
56# include <asm/numa_32.h> 56# include <asm/numa_32.h>
57#else
58# include <asm/numa_64.h>
59#endif 57#endif
60 58
61#ifdef CONFIG_NUMA 59#ifdef CONFIG_NUMA
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h
deleted file mode 100644
index 0c05f7ae46e8..000000000000
--- a/arch/x86/include/asm/numa_64.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_X86_NUMA_64_H
2#define _ASM_X86_NUMA_64_H
3
4extern unsigned long numa_free_all_bootmem(void);
5
6#endif /* _ASM_X86_NUMA_64_H */
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 8ca82839288a..c87892442e53 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -17,6 +17,10 @@
17 17
18struct page; 18struct page;
19 19
20#include <linux/range.h>
21extern struct range pfn_mapped[];
22extern int nr_pfn_mapped;
23
20static inline void clear_user_page(void *page, unsigned long vaddr, 24static inline void clear_user_page(void *page, unsigned long vaddr,
21 struct page *pg) 25 struct page *pg)
22{ 26{
@@ -44,7 +48,8 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
44 * case properly. Once all supported versions of gcc understand it, we can 48 * case properly. Once all supported versions of gcc understand it, we can
45 * remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated) 49 * remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated)
46 */ 50 */
47#define __pa_symbol(x) __pa(__phys_reloc_hide((unsigned long)(x))) 51#define __pa_symbol(x) \
52 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
48 53
49#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) 54#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
50 55
diff --git a/arch/x86/include/asm/page_32.h b/arch/x86/include/asm/page_32.h
index da4e762406f7..4d550d04b609 100644
--- a/arch/x86/include/asm/page_32.h
+++ b/arch/x86/include/asm/page_32.h
@@ -15,6 +15,7 @@ extern unsigned long __phys_addr(unsigned long);
15#else 15#else
16#define __phys_addr(x) __phys_addr_nodebug(x) 16#define __phys_addr(x) __phys_addr_nodebug(x)
17#endif 17#endif
18#define __phys_addr_symbol(x) __phys_addr(x)
18#define __phys_reloc_hide(x) RELOC_HIDE((x), 0) 19#define __phys_reloc_hide(x) RELOC_HIDE((x), 0)
19 20
20#ifdef CONFIG_FLATMEM 21#ifdef CONFIG_FLATMEM
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index 072694ed81a5..0f1ddee6a0ce 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -3,4 +3,40 @@
3 3
4#include <asm/page_64_types.h> 4#include <asm/page_64_types.h>
5 5
6#ifndef __ASSEMBLY__
7
8/* duplicated to the one in bootmem.h */
9extern unsigned long max_pfn;
10extern unsigned long phys_base;
11
12static inline unsigned long __phys_addr_nodebug(unsigned long x)
13{
14 unsigned long y = x - __START_KERNEL_map;
15
16 /* use the carry flag to determine if x was < __START_KERNEL_map */
17 x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
18
19 return x;
20}
21
22#ifdef CONFIG_DEBUG_VIRTUAL
23extern unsigned long __phys_addr(unsigned long);
24extern unsigned long __phys_addr_symbol(unsigned long);
25#else
26#define __phys_addr(x) __phys_addr_nodebug(x)
27#define __phys_addr_symbol(x) \
28 ((unsigned long)(x) - __START_KERNEL_map + phys_base)
29#endif
30
31#define __phys_reloc_hide(x) (x)
32
33#ifdef CONFIG_FLATMEM
34#define pfn_valid(pfn) ((pfn) < max_pfn)
35#endif
36
37void clear_page(void *page);
38void copy_page(void *to, void *from);
39
40#endif /* !__ASSEMBLY__ */
41
6#endif /* _ASM_X86_PAGE_64_H */ 42#endif /* _ASM_X86_PAGE_64_H */
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 320f7bb95f76..8b491e66eaa8 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -50,26 +50,4 @@
50#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) 50#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
51#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL) 51#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
52 52
53#ifndef __ASSEMBLY__
54void clear_page(void *page);
55void copy_page(void *to, void *from);
56
57/* duplicated to the one in bootmem.h */
58extern unsigned long max_pfn;
59extern unsigned long phys_base;
60
61extern unsigned long __phys_addr(unsigned long);
62#define __phys_reloc_hide(x) (x)
63
64#define vmemmap ((struct page *)VMEMMAP_START)
65
66extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
67extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
68
69#endif /* !__ASSEMBLY__ */
70
71#ifdef CONFIG_FLATMEM
72#define pfn_valid(pfn) ((pfn) < max_pfn)
73#endif
74
75#endif /* _ASM_X86_PAGE_64_DEFS_H */ 53#endif /* _ASM_X86_PAGE_64_DEFS_H */
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index e21fdd10479f..54c97879195e 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -51,6 +51,8 @@ static inline phys_addr_t get_max_mapped(void)
51 return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; 51 return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
52} 52}
53 53
54bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn);
55
54extern unsigned long init_memory_mapping(unsigned long start, 56extern unsigned long init_memory_mapping(unsigned long start,
55 unsigned long end); 57 unsigned long end);
56 58
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index fc304279b559..1e672234c4ff 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -395,6 +395,7 @@ pte_t *populate_extra_pte(unsigned long vaddr);
395 395
396#ifndef __ASSEMBLY__ 396#ifndef __ASSEMBLY__
397#include <linux/mm_types.h> 397#include <linux/mm_types.h>
398#include <linux/log2.h>
398 399
399static inline int pte_none(pte_t pte) 400static inline int pte_none(pte_t pte)
400{ 401{
@@ -620,6 +621,8 @@ static inline int pgd_none(pgd_t pgd)
620#ifndef __ASSEMBLY__ 621#ifndef __ASSEMBLY__
621 622
622extern int direct_gbpages; 623extern int direct_gbpages;
624void init_mem_mapping(void);
625void early_alloc_pgt_buf(void);
623 626
624/* local pte updates need not use xchg for locking */ 627/* local pte updates need not use xchg for locking */
625static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) 628static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
@@ -786,6 +789,20 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
786 memcpy(dst, src, count * sizeof(pgd_t)); 789 memcpy(dst, src, count * sizeof(pgd_t));
787} 790}
788 791
792#define PTE_SHIFT ilog2(PTRS_PER_PTE)
793static inline int page_level_shift(enum pg_level level)
794{
795 return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
796}
797static inline unsigned long page_level_size(enum pg_level level)
798{
799 return 1UL << page_level_shift(level);
800}
801static inline unsigned long page_level_mask(enum pg_level level)
802{
803 return ~(page_level_size(level) - 1);
804}
805
789/* 806/*
790 * The x86 doesn't have any external MMU info: the kernel page 807 * The x86 doesn't have any external MMU info: the kernel page
791 * tables contain all the necessary information. 808 * tables contain all the necessary information.
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 615b0c78449f..e22c1dbf7feb 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -180,6 +180,11 @@ extern void cleanup_highmap(void);
180 180
181#define __HAVE_ARCH_PTE_SAME 181#define __HAVE_ARCH_PTE_SAME
182 182
183#define vmemmap ((struct page *)VMEMMAP_START)
184
185extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
186extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
187
183#endif /* !__ASSEMBLY__ */ 188#endif /* !__ASSEMBLY__ */
184 189
185#endif /* _ASM_X86_PGTABLE_64_H */ 190#endif /* _ASM_X86_PGTABLE_64_H */
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 766ea16fbbbd..2d883440cb9a 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_PGTABLE_64_DEFS_H 1#ifndef _ASM_X86_PGTABLE_64_DEFS_H
2#define _ASM_X86_PGTABLE_64_DEFS_H 2#define _ASM_X86_PGTABLE_64_DEFS_H
3 3
4#include <asm/sparsemem.h>
5
4#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
5#include <linux/types.h> 7#include <linux/types.h>
6 8
@@ -60,4 +62,6 @@ typedef struct { pteval_t pte; } pte_t;
60#define MODULES_END _AC(0xffffffffff000000, UL) 62#define MODULES_END _AC(0xffffffffff000000, UL)
61#define MODULES_LEN (MODULES_END - MODULES_VADDR) 63#define MODULES_LEN (MODULES_END - MODULES_VADDR)
62 64
65#define EARLY_DYNAMIC_PAGE_TABLES 64
66
63#endif /* _ASM_X86_PGTABLE_64_DEFS_H */ 67#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 3c32db8c539d..e6423002c10b 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -321,7 +321,6 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
321/* Install a pte for a particular vaddr in kernel space. */ 321/* Install a pte for a particular vaddr in kernel space. */
322void set_pte_vaddr(unsigned long vaddr, pte_t pte); 322void set_pte_vaddr(unsigned long vaddr, pte_t pte);
323 323
324extern void native_pagetable_reserve(u64 start, u64 end);
325#ifdef CONFIG_X86_32 324#ifdef CONFIG_X86_32
326extern void native_pagetable_init(void); 325extern void native_pagetable_init(void);
327#else 326#else
@@ -331,7 +330,7 @@ extern void native_pagetable_init(void);
331struct seq_file; 330struct seq_file;
332extern void arch_report_meminfo(struct seq_file *m); 331extern void arch_report_meminfo(struct seq_file *m);
333 332
334enum { 333enum pg_level {
335 PG_LEVEL_NONE, 334 PG_LEVEL_NONE,
336 PG_LEVEL_4K, 335 PG_LEVEL_4K,
337 PG_LEVEL_2M, 336 PG_LEVEL_2M,
@@ -352,6 +351,7 @@ static inline void update_page_count(int level, unsigned long pages) { }
352 * as a pte too. 351 * as a pte too.
353 */ 352 */
354extern pte_t *lookup_address(unsigned long address, unsigned int *level); 353extern pte_t *lookup_address(unsigned long address, unsigned int *level);
354extern phys_addr_t slow_virt_to_phys(void *__address);
355 355
356#endif /* !__ASSEMBLY__ */ 356#endif /* !__ASSEMBLY__ */
357 357
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index d172588efae5..8277941cbe99 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -721,6 +721,7 @@ extern void enable_sep_cpu(void);
721extern int sysenter_setup(void); 721extern int sysenter_setup(void);
722 722
723extern void early_trap_init(void); 723extern void early_trap_init(void);
724void early_trap_pf_init(void);
724 725
725/* Defined in head.S */ 726/* Defined in head.S */
726extern struct desc_ptr early_gdt_descr; 727extern struct desc_ptr early_gdt_descr;
diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
index fe1ec5bcd846..9c6b890d5e7a 100644
--- a/arch/x86/include/asm/realmode.h
+++ b/arch/x86/include/asm/realmode.h
@@ -58,6 +58,7 @@ extern unsigned char boot_gdt[];
58extern unsigned char secondary_startup_64[]; 58extern unsigned char secondary_startup_64[];
59#endif 59#endif
60 60
61extern void __init setup_real_mode(void); 61void reserve_real_mode(void);
62void setup_real_mode(void);
62 63
63#endif /* _ARCH_X86_REALMODE_H */ 64#endif /* _ARCH_X86_REALMODE_H */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 1709801d18ec..5ee26875baea 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -125,13 +125,12 @@ extern int __get_user_4(void);
125extern int __get_user_8(void); 125extern int __get_user_8(void);
126extern int __get_user_bad(void); 126extern int __get_user_bad(void);
127 127
128#define __get_user_x(size, ret, x, ptr) \ 128/*
129 asm volatile("call __get_user_" #size \ 129 * This is a type: either unsigned long, if the argument fits into
130 : "=a" (ret), "=d" (x) \ 130 * that type, or otherwise unsigned long long.
131 : "0" (ptr)) \ 131 */
132 132#define __inttype(x) \
133/* Careful: we have to cast the result to the type of the pointer 133__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
134 * for sign reasons */
135 134
136/** 135/**
137 * get_user: - Get a simple variable from user space. 136 * get_user: - Get a simple variable from user space.
@@ -150,38 +149,26 @@ extern int __get_user_bad(void);
150 * Returns zero on success, or -EFAULT on error. 149 * Returns zero on success, or -EFAULT on error.
151 * On error, the variable @x is set to zero. 150 * On error, the variable @x is set to zero.
152 */ 151 */
153#ifdef CONFIG_X86_32 152/*
154#define __get_user_8(__ret_gu, __val_gu, ptr) \ 153 * Careful: we have to cast the result to the type of the pointer
155 __get_user_x(X, __ret_gu, __val_gu, ptr) 154 * for sign reasons.
156#else 155 *
157#define __get_user_8(__ret_gu, __val_gu, ptr) \ 156 * The use of %edx as the register specifier is a bit of a
158 __get_user_x(8, __ret_gu, __val_gu, ptr) 157 * simplification, as gcc only cares about it as the starting point
159#endif 158 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
160 159 * (%ecx being the next register in gcc's x86 register sequence), and
160 * %rdx on 64 bits.
161 */
161#define get_user(x, ptr) \ 162#define get_user(x, ptr) \
162({ \ 163({ \
163 int __ret_gu; \ 164 int __ret_gu; \
164 unsigned long __val_gu; \ 165 register __inttype(*(ptr)) __val_gu asm("%edx"); \
165 __chk_user_ptr(ptr); \ 166 __chk_user_ptr(ptr); \
166 might_fault(); \ 167 might_fault(); \
167 switch (sizeof(*(ptr))) { \ 168 asm volatile("call __get_user_%P3" \
168 case 1: \ 169 : "=a" (__ret_gu), "=r" (__val_gu) \
169 __get_user_x(1, __ret_gu, __val_gu, ptr); \ 170 : "0" (ptr), "i" (sizeof(*(ptr)))); \
170 break; \ 171 (x) = (__typeof__(*(ptr))) __val_gu; \
171 case 2: \
172 __get_user_x(2, __ret_gu, __val_gu, ptr); \
173 break; \
174 case 4: \
175 __get_user_x(4, __ret_gu, __val_gu, ptr); \
176 break; \
177 case 8: \
178 __get_user_8(__ret_gu, __val_gu, ptr); \
179 break; \
180 default: \
181 __get_user_x(X, __ret_gu, __val_gu, ptr); \
182 break; \
183 } \
184 (x) = (__typeof__(*(ptr)))__val_gu; \
185 __ret_gu; \ 172 __ret_gu; \
186}) 173})
187 174
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 7669941cc9d2..d8d99222b36a 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -69,17 +69,6 @@ struct x86_init_oem {
69}; 69};
70 70
71/** 71/**
72 * struct x86_init_mapping - platform specific initial kernel pagetable setup
73 * @pagetable_reserve: reserve a range of addresses for kernel pagetable usage
74 *
75 * For more details on the purpose of this hook, look in
76 * init_memory_mapping and the commit that added it.
77 */
78struct x86_init_mapping {
79 void (*pagetable_reserve)(u64 start, u64 end);
80};
81
82/**
83 * struct x86_init_paging - platform specific paging functions 72 * struct x86_init_paging - platform specific paging functions
84 * @pagetable_init: platform specific paging initialization call to setup 73 * @pagetable_init: platform specific paging initialization call to setup
85 * the kernel pagetables and prepare accessors functions. 74 * the kernel pagetables and prepare accessors functions.
@@ -136,7 +125,6 @@ struct x86_init_ops {
136 struct x86_init_mpparse mpparse; 125 struct x86_init_mpparse mpparse;
137 struct x86_init_irqs irqs; 126 struct x86_init_irqs irqs;
138 struct x86_init_oem oem; 127 struct x86_init_oem oem;
139 struct x86_init_mapping mapping;
140 struct x86_init_paging paging; 128 struct x86_init_paging paging;
141 struct x86_init_timers timers; 129 struct x86_init_timers timers;
142 struct x86_init_iommu iommu; 130 struct x86_init_iommu iommu;