diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-x86/apic.h | 4 | ||||
-rw-r--r-- | include/asm-x86/asm.h | 7 | ||||
-rw-r--r-- | include/asm-x86/elf.h | 5 | ||||
-rw-r--r-- | include/asm-x86/futex.h | 6 | ||||
-rw-r--r-- | include/asm-x86/gart.h | 6 | ||||
-rw-r--r-- | include/asm-x86/mach-rdc321x/gpio.h | 3 | ||||
-rw-r--r-- | include/asm-x86/mmu.h | 5 | ||||
-rw-r--r-- | include/asm-x86/msr.h | 23 | ||||
-rw-r--r-- | include/asm-x86/nmi.h | 1 | ||||
-rw-r--r-- | include/asm-x86/page_32.h | 3 | ||||
-rw-r--r-- | include/asm-x86/paravirt.h | 42 | ||||
-rw-r--r-- | include/asm-x86/pgtable-2level.h | 2 | ||||
-rw-r--r-- | include/asm-x86/pgtable-3level.h | 7 | ||||
-rw-r--r-- | include/asm-x86/pgtable.h | 7 | ||||
-rw-r--r-- | include/asm-x86/pgtable_32.h | 5 | ||||
-rw-r--r-- | include/asm-x86/pgtable_64.h | 2 | ||||
-rw-r--r-- | include/asm-x86/resume-trace.h | 2 | ||||
-rw-r--r-- | include/asm-x86/spinlock.h | 6 |
18 files changed, 84 insertions, 52 deletions
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index 4a59f0d6e38c..65590c9aecd4 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h | |||
@@ -81,9 +81,7 @@ extern int get_physical_broadcast(void); | |||
81 | static inline void ack_APIC_irq(void) | 81 | static inline void ack_APIC_irq(void) |
82 | { | 82 | { |
83 | /* | 83 | /* |
84 | * ack_APIC_irq() actually gets compiled as a single instruction: | 84 | * ack_APIC_irq() actually gets compiled as a single instruction |
85 | * - a single rmw on Pentium/82489DX | ||
86 | * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC) | ||
87 | * ... yummie. | 85 | * ... yummie. |
88 | */ | 86 | */ |
89 | 87 | ||
diff --git a/include/asm-x86/asm.h b/include/asm-x86/asm.h index 2439ae49e8ac..e1355f44d7c3 100644 --- a/include/asm-x86/asm.h +++ b/include/asm-x86/asm.h | |||
@@ -20,17 +20,22 @@ | |||
20 | 20 | ||
21 | #define _ASM_PTR __ASM_SEL(.long, .quad) | 21 | #define _ASM_PTR __ASM_SEL(.long, .quad) |
22 | #define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8) | 22 | #define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8) |
23 | #define _ASM_MOV_UL __ASM_SIZE(mov) | ||
24 | 23 | ||
24 | #define _ASM_MOV __ASM_SIZE(mov) | ||
25 | #define _ASM_INC __ASM_SIZE(inc) | 25 | #define _ASM_INC __ASM_SIZE(inc) |
26 | #define _ASM_DEC __ASM_SIZE(dec) | 26 | #define _ASM_DEC __ASM_SIZE(dec) |
27 | #define _ASM_ADD __ASM_SIZE(add) | 27 | #define _ASM_ADD __ASM_SIZE(add) |
28 | #define _ASM_SUB __ASM_SIZE(sub) | 28 | #define _ASM_SUB __ASM_SIZE(sub) |
29 | #define _ASM_XADD __ASM_SIZE(xadd) | 29 | #define _ASM_XADD __ASM_SIZE(xadd) |
30 | |||
30 | #define _ASM_AX __ASM_REG(ax) | 31 | #define _ASM_AX __ASM_REG(ax) |
31 | #define _ASM_BX __ASM_REG(bx) | 32 | #define _ASM_BX __ASM_REG(bx) |
32 | #define _ASM_CX __ASM_REG(cx) | 33 | #define _ASM_CX __ASM_REG(cx) |
33 | #define _ASM_DX __ASM_REG(dx) | 34 | #define _ASM_DX __ASM_REG(dx) |
35 | #define _ASM_SP __ASM_REG(sp) | ||
36 | #define _ASM_BP __ASM_REG(bp) | ||
37 | #define _ASM_SI __ASM_REG(si) | ||
38 | #define _ASM_DI __ASM_REG(di) | ||
34 | 39 | ||
35 | /* Exception table entry */ | 40 | /* Exception table entry */ |
36 | # define _ASM_EXTABLE(from,to) \ | 41 | # define _ASM_EXTABLE(from,to) \ |
diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h index cd678b2d6a74..5c4745bec906 100644 --- a/include/asm-x86/elf.h +++ b/include/asm-x86/elf.h | |||
@@ -148,8 +148,9 @@ do { \ | |||
148 | 148 | ||
149 | static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp) | 149 | static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp) |
150 | { | 150 | { |
151 | asm volatile("movl %0,%%fs" :: "r" (0)); | 151 | loadsegment(fs, 0); |
152 | asm volatile("movl %0,%%es; movl %0,%%ds" : : "r" (__USER32_DS)); | 152 | loadsegment(ds, __USER32_DS); |
153 | loadsegment(es, __USER32_DS); | ||
153 | load_gs_index(0); | 154 | load_gs_index(0); |
154 | regs->ip = ip; | 155 | regs->ip = ip; |
155 | regs->sp = sp; | 156 | regs->sp = sp; |
diff --git a/include/asm-x86/futex.h b/include/asm-x86/futex.h index 45dc24d84186..06b924ef6fa5 100644 --- a/include/asm-x86/futex.h +++ b/include/asm-x86/futex.h | |||
@@ -25,7 +25,7 @@ | |||
25 | asm volatile("1:\tmovl %2, %0\n" \ | 25 | asm volatile("1:\tmovl %2, %0\n" \ |
26 | "\tmovl\t%0, %3\n" \ | 26 | "\tmovl\t%0, %3\n" \ |
27 | "\t" insn "\n" \ | 27 | "\t" insn "\n" \ |
28 | "2:\tlock; cmpxchgl %3, %2\n" \ | 28 | "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ |
29 | "\tjnz\t1b\n" \ | 29 | "\tjnz\t1b\n" \ |
30 | "3:\t.section .fixup,\"ax\"\n" \ | 30 | "3:\t.section .fixup,\"ax\"\n" \ |
31 | "4:\tmov\t%5, %1\n" \ | 31 | "4:\tmov\t%5, %1\n" \ |
@@ -64,7 +64,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |||
64 | __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); | 64 | __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); |
65 | break; | 65 | break; |
66 | case FUTEX_OP_ADD: | 66 | case FUTEX_OP_ADD: |
67 | __futex_atomic_op1("lock; xaddl %0, %2", ret, oldval, | 67 | __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval, |
68 | uaddr, oparg); | 68 | uaddr, oparg); |
69 | break; | 69 | break; |
70 | case FUTEX_OP_OR: | 70 | case FUTEX_OP_OR: |
@@ -122,7 +122,7 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, | |||
122 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 122 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) |
123 | return -EFAULT; | 123 | return -EFAULT; |
124 | 124 | ||
125 | asm volatile("1:\tlock; cmpxchgl %3, %1\n" | 125 | asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n" |
126 | "2:\t.section .fixup, \"ax\"\n" | 126 | "2:\t.section .fixup, \"ax\"\n" |
127 | "3:\tmov %2, %0\n" | 127 | "3:\tmov %2, %0\n" |
128 | "\tjmp 2b\n" | 128 | "\tjmp 2b\n" |
diff --git a/include/asm-x86/gart.h b/include/asm-x86/gart.h index 07f445844146..baa54faba892 100644 --- a/include/asm-x86/gart.h +++ b/include/asm-x86/gart.h | |||
@@ -52,15 +52,15 @@ static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size) | |||
52 | return 0; | 52 | return 0; |
53 | 53 | ||
54 | if (aper_base + aper_size > 0x100000000ULL) { | 54 | if (aper_base + aper_size > 0x100000000ULL) { |
55 | printk(KERN_ERR "Aperture beyond 4GB. Ignoring.\n"); | 55 | printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n"); |
56 | return 0; | 56 | return 0; |
57 | } | 57 | } |
58 | if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) { | 58 | if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) { |
59 | printk(KERN_ERR "Aperture pointing to e820 RAM. Ignoring.\n"); | 59 | printk(KERN_INFO "Aperture pointing to e820 RAM. Ignoring.\n"); |
60 | return 0; | 60 | return 0; |
61 | } | 61 | } |
62 | if (aper_size < min_size) { | 62 | if (aper_size < min_size) { |
63 | printk(KERN_ERR "Aperture too small (%d MB) than (%d MB)\n", | 63 | printk(KERN_INFO "Aperture too small (%d MB) than (%d MB)\n", |
64 | aper_size>>20, min_size>>20); | 64 | aper_size>>20, min_size>>20); |
65 | return 0; | 65 | return 0; |
66 | } | 66 | } |
diff --git a/include/asm-x86/mach-rdc321x/gpio.h b/include/asm-x86/mach-rdc321x/gpio.h index 6184561980f2..94b6cdf532e2 100644 --- a/include/asm-x86/mach-rdc321x/gpio.h +++ b/include/asm-x86/mach-rdc321x/gpio.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef ASM_X86__MACH_RDC321X__GPIO_H | 1 | #ifndef ASM_X86__MACH_RDC321X__GPIO_H |
2 | #define ASM_X86__MACH_RDC321X__GPIO_H | 2 | #define ASM_X86__MACH_RDC321X__GPIO_H |
3 | 3 | ||
4 | #include <linux/kernel.h> | ||
5 | |||
4 | extern int rdc_gpio_get_value(unsigned gpio); | 6 | extern int rdc_gpio_get_value(unsigned gpio); |
5 | extern void rdc_gpio_set_value(unsigned gpio, int value); | 7 | extern void rdc_gpio_set_value(unsigned gpio, int value); |
6 | extern int rdc_gpio_direction_input(unsigned gpio); | 8 | extern int rdc_gpio_direction_input(unsigned gpio); |
@@ -18,6 +20,7 @@ static inline int gpio_request(unsigned gpio, const char *label) | |||
18 | 20 | ||
19 | static inline void gpio_free(unsigned gpio) | 21 | static inline void gpio_free(unsigned gpio) |
20 | { | 22 | { |
23 | might_sleep(); | ||
21 | rdc_gpio_free(gpio); | 24 | rdc_gpio_free(gpio); |
22 | } | 25 | } |
23 | 26 | ||
diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h index a30d7a9c8297..9d5aff14334a 100644 --- a/include/asm-x86/mmu.h +++ b/include/asm-x86/mmu.h | |||
@@ -7,14 +7,9 @@ | |||
7 | /* | 7 | /* |
8 | * The x86 doesn't have a mmu context, but | 8 | * The x86 doesn't have a mmu context, but |
9 | * we put the segment information here. | 9 | * we put the segment information here. |
10 | * | ||
11 | * cpu_vm_mask is used to optimize ldt flushing. | ||
12 | */ | 10 | */ |
13 | typedef struct { | 11 | typedef struct { |
14 | void *ldt; | 12 | void *ldt; |
15 | #ifdef CONFIG_X86_64 | ||
16 | rwlock_t ldtlock; | ||
17 | #endif | ||
18 | int size; | 13 | int size; |
19 | struct mutex lock; | 14 | struct mutex lock; |
20 | void *vdso; | 15 | void *vdso; |
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h index eee83f783f6d..530af1f6389e 100644 --- a/include/asm-x86/msr.h +++ b/include/asm-x86/msr.h | |||
@@ -63,6 +63,22 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr, | |||
63 | return EAX_EDX_VAL(val, low, high); | 63 | return EAX_EDX_VAL(val, low, high); |
64 | } | 64 | } |
65 | 65 | ||
66 | static inline unsigned long long native_read_msr_amd_safe(unsigned int msr, | ||
67 | int *err) | ||
68 | { | ||
69 | DECLARE_ARGS(val, low, high); | ||
70 | |||
71 | asm volatile("2: rdmsr ; xor %0,%0\n" | ||
72 | "1:\n\t" | ||
73 | ".section .fixup,\"ax\"\n\t" | ||
74 | "3: mov %3,%0 ; jmp 1b\n\t" | ||
75 | ".previous\n\t" | ||
76 | _ASM_EXTABLE(2b, 3b) | ||
77 | : "=r" (*err), EAX_EDX_RET(val, low, high) | ||
78 | : "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT)); | ||
79 | return EAX_EDX_VAL(val, low, high); | ||
80 | } | ||
81 | |||
66 | static inline void native_write_msr(unsigned int msr, | 82 | static inline void native_write_msr(unsigned int msr, |
67 | unsigned low, unsigned high) | 83 | unsigned low, unsigned high) |
68 | { | 84 | { |
@@ -158,6 +174,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) | |||
158 | *p = native_read_msr_safe(msr, &err); | 174 | *p = native_read_msr_safe(msr, &err); |
159 | return err; | 175 | return err; |
160 | } | 176 | } |
177 | static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) | ||
178 | { | ||
179 | int err; | ||
180 | |||
181 | *p = native_read_msr_amd_safe(msr, &err); | ||
182 | return err; | ||
183 | } | ||
161 | 184 | ||
162 | #define rdtscl(low) \ | 185 | #define rdtscl(low) \ |
163 | ((low) = (u32)native_read_tsc()) | 186 | ((low) = (u32)native_read_tsc()) |
diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h index f8b76f383904..d5e715f024dc 100644 --- a/include/asm-x86/nmi.h +++ b/include/asm-x86/nmi.h | |||
@@ -34,6 +34,7 @@ extern void stop_apic_nmi_watchdog(void *); | |||
34 | extern void disable_timer_nmi_watchdog(void); | 34 | extern void disable_timer_nmi_watchdog(void); |
35 | extern void enable_timer_nmi_watchdog(void); | 35 | extern void enable_timer_nmi_watchdog(void); |
36 | extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason); | 36 | extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason); |
37 | extern void cpu_nmi_set_wd_enabled(void); | ||
37 | 38 | ||
38 | extern atomic_t nmi_active; | 39 | extern atomic_t nmi_active; |
39 | extern unsigned int nmi_watchdog; | 40 | extern unsigned int nmi_watchdog; |
diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h index f32062a821c5..72f7305682c6 100644 --- a/include/asm-x86/page_32.h +++ b/include/asm-x86/page_32.h | |||
@@ -89,9 +89,6 @@ extern int nx_enabled; | |||
89 | extern unsigned int __VMALLOC_RESERVE; | 89 | extern unsigned int __VMALLOC_RESERVE; |
90 | extern int sysctl_legacy_va_layout; | 90 | extern int sysctl_legacy_va_layout; |
91 | 91 | ||
92 | #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) | ||
93 | #define MAXMEM (-__PAGE_OFFSET - __VMALLOC_RESERVE) | ||
94 | |||
95 | extern void find_low_pfn_range(void); | 92 | extern void find_low_pfn_range(void); |
96 | extern unsigned long init_memory_mapping(unsigned long start, | 93 | extern unsigned long init_memory_mapping(unsigned long start, |
97 | unsigned long end); | 94 | unsigned long end); |
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index 2e6821a0b6e7..891971f57d35 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h | |||
@@ -137,6 +137,7 @@ struct pv_cpu_ops { | |||
137 | 137 | ||
138 | /* MSR, PMC and TSR operations. | 138 | /* MSR, PMC and TSR operations. |
139 | err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ | 139 | err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ |
140 | u64 (*read_msr_amd)(unsigned int msr, int *err); | ||
140 | u64 (*read_msr)(unsigned int msr, int *err); | 141 | u64 (*read_msr)(unsigned int msr, int *err); |
141 | int (*write_msr)(unsigned int msr, unsigned low, unsigned high); | 142 | int (*write_msr)(unsigned int msr, unsigned low, unsigned high); |
142 | 143 | ||
@@ -257,13 +258,13 @@ struct pv_mmu_ops { | |||
257 | * Hooks for allocating/releasing pagetable pages when they're | 258 | * Hooks for allocating/releasing pagetable pages when they're |
258 | * attached to a pagetable | 259 | * attached to a pagetable |
259 | */ | 260 | */ |
260 | void (*alloc_pte)(struct mm_struct *mm, u32 pfn); | 261 | void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); |
261 | void (*alloc_pmd)(struct mm_struct *mm, u32 pfn); | 262 | void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); |
262 | void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); | 263 | void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count); |
263 | void (*alloc_pud)(struct mm_struct *mm, u32 pfn); | 264 | void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); |
264 | void (*release_pte)(u32 pfn); | 265 | void (*release_pte)(unsigned long pfn); |
265 | void (*release_pmd)(u32 pfn); | 266 | void (*release_pmd)(unsigned long pfn); |
266 | void (*release_pud)(u32 pfn); | 267 | void (*release_pud)(unsigned long pfn); |
267 | 268 | ||
268 | /* Pagetable manipulation functions */ | 269 | /* Pagetable manipulation functions */ |
269 | void (*set_pte)(pte_t *ptep, pte_t pteval); | 270 | void (*set_pte)(pte_t *ptep, pte_t pteval); |
@@ -726,6 +727,10 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err) | |||
726 | { | 727 | { |
727 | return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); | 728 | return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); |
728 | } | 729 | } |
730 | static inline u64 paravirt_read_msr_amd(unsigned msr, int *err) | ||
731 | { | ||
732 | return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err); | ||
733 | } | ||
729 | static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) | 734 | static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) |
730 | { | 735 | { |
731 | return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); | 736 | return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); |
@@ -771,6 +776,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) | |||
771 | *p = paravirt_read_msr(msr, &err); | 776 | *p = paravirt_read_msr(msr, &err); |
772 | return err; | 777 | return err; |
773 | } | 778 | } |
779 | static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) | ||
780 | { | ||
781 | int err; | ||
782 | |||
783 | *p = paravirt_read_msr_amd(msr, &err); | ||
784 | return err; | ||
785 | } | ||
774 | 786 | ||
775 | static inline u64 paravirt_read_tsc(void) | 787 | static inline u64 paravirt_read_tsc(void) |
776 | { | 788 | { |
@@ -993,35 +1005,35 @@ static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
993 | PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd); | 1005 | PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd); |
994 | } | 1006 | } |
995 | 1007 | ||
996 | static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn) | 1008 | static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) |
997 | { | 1009 | { |
998 | PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn); | 1010 | PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn); |
999 | } | 1011 | } |
1000 | static inline void paravirt_release_pte(unsigned pfn) | 1012 | static inline void paravirt_release_pte(unsigned long pfn) |
1001 | { | 1013 | { |
1002 | PVOP_VCALL1(pv_mmu_ops.release_pte, pfn); | 1014 | PVOP_VCALL1(pv_mmu_ops.release_pte, pfn); |
1003 | } | 1015 | } |
1004 | 1016 | ||
1005 | static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn) | 1017 | static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) |
1006 | { | 1018 | { |
1007 | PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); | 1019 | PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); |
1008 | } | 1020 | } |
1009 | 1021 | ||
1010 | static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn, | 1022 | static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn, |
1011 | unsigned start, unsigned count) | 1023 | unsigned long start, unsigned long count) |
1012 | { | 1024 | { |
1013 | PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count); | 1025 | PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count); |
1014 | } | 1026 | } |
1015 | static inline void paravirt_release_pmd(unsigned pfn) | 1027 | static inline void paravirt_release_pmd(unsigned long pfn) |
1016 | { | 1028 | { |
1017 | PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); | 1029 | PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); |
1018 | } | 1030 | } |
1019 | 1031 | ||
1020 | static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn) | 1032 | static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) |
1021 | { | 1033 | { |
1022 | PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn); | 1034 | PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn); |
1023 | } | 1035 | } |
1024 | static inline void paravirt_release_pud(unsigned pfn) | 1036 | static inline void paravirt_release_pud(unsigned long pfn) |
1025 | { | 1037 | { |
1026 | PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); | 1038 | PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); |
1027 | } | 1039 | } |
diff --git a/include/asm-x86/pgtable-2level.h b/include/asm-x86/pgtable-2level.h index 60440b191626..81762081dcd8 100644 --- a/include/asm-x86/pgtable-2level.h +++ b/include/asm-x86/pgtable-2level.h | |||
@@ -53,9 +53,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp) | |||
53 | #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) | 53 | #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
57 | #define pte_none(x) (!(x).pte_low) | 56 | #define pte_none(x) (!(x).pte_low) |
58 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) | ||
59 | 57 | ||
60 | /* | 58 | /* |
61 | * Bits 0, 6 and 7 are taken, split up the 29 bits of offset | 59 | * Bits 0, 6 and 7 are taken, split up the 29 bits of offset |
diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h index e713bd5f39a6..75f4276b5ddb 100644 --- a/include/asm-x86/pgtable-3level.h +++ b/include/asm-x86/pgtable-3level.h | |||
@@ -151,18 +151,11 @@ static inline int pte_same(pte_t a, pte_t b) | |||
151 | return a.pte_low == b.pte_low && a.pte_high == b.pte_high; | 151 | return a.pte_low == b.pte_low && a.pte_high == b.pte_high; |
152 | } | 152 | } |
153 | 153 | ||
154 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
155 | |||
156 | static inline int pte_none(pte_t pte) | 154 | static inline int pte_none(pte_t pte) |
157 | { | 155 | { |
158 | return !pte.pte_low && !pte.pte_high; | 156 | return !pte.pte_low && !pte.pte_high; |
159 | } | 157 | } |
160 | 158 | ||
161 | static inline unsigned long pte_pfn(pte_t pte) | ||
162 | { | ||
163 | return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; | ||
164 | } | ||
165 | |||
166 | /* | 159 | /* |
167 | * Bits 0, 6 and 7 are taken in the low part of the pte, | 160 | * Bits 0, 6 and 7 are taken in the low part of the pte, |
168 | * put the 32 bits of offset into the high part. | 161 | * put the 32 bits of offset into the high part. |
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 57d919a2d79d..888add7b0882 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h | |||
@@ -186,6 +186,13 @@ static inline int pte_special(pte_t pte) | |||
186 | return pte_val(pte) & _PAGE_SPECIAL; | 186 | return pte_val(pte) & _PAGE_SPECIAL; |
187 | } | 187 | } |
188 | 188 | ||
189 | static inline unsigned long pte_pfn(pte_t pte) | ||
190 | { | ||
191 | return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; | ||
192 | } | ||
193 | |||
194 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) | ||
195 | |||
189 | static inline int pmd_large(pmd_t pte) | 196 | static inline int pmd_large(pmd_t pte) |
190 | { | 197 | { |
191 | return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == | 198 | return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == |
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index 45c8235400fe..8de702dc7d62 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h | |||
@@ -57,8 +57,7 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); | |||
57 | * area for the same reason. ;) | 57 | * area for the same reason. ;) |
58 | */ | 58 | */ |
59 | #define VMALLOC_OFFSET (8 * 1024 * 1024) | 59 | #define VMALLOC_OFFSET (8 * 1024 * 1024) |
60 | #define VMALLOC_START (((unsigned long)high_memory + 2 * VMALLOC_OFFSET - 1) \ | 60 | #define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET) |
61 | & ~(VMALLOC_OFFSET - 1)) | ||
62 | #ifdef CONFIG_X86_PAE | 61 | #ifdef CONFIG_X86_PAE |
63 | #define LAST_PKMAP 512 | 62 | #define LAST_PKMAP 512 |
64 | #else | 63 | #else |
@@ -74,6 +73,8 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); | |||
74 | # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) | 73 | # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) |
75 | #endif | 74 | #endif |
76 | 75 | ||
76 | #define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE) | ||
77 | |||
77 | /* | 78 | /* |
78 | * Define this if things work differently on an i386 and an i486: | 79 | * Define this if things work differently on an i386 and an i486: |
79 | * it will (on an i486) warn about kernel memory accesses that are | 80 | * it will (on an i486) warn about kernel memory accesses that are |
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index e3dcf7a08a0b..fde9770e53d1 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h | |||
@@ -175,8 +175,6 @@ static inline int pmd_bad(pmd_t pmd) | |||
175 | #define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE)) | 175 | #define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE)) |
176 | 176 | ||
177 | #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */ | 177 | #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */ |
178 | #define pte_page(x) pfn_to_page(pte_pfn((x))) | ||
179 | #define pte_pfn(x) ((pte_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT) | ||
180 | 178 | ||
181 | /* | 179 | /* |
182 | * Macro to mark a page protection value as "uncacheable". | 180 | * Macro to mark a page protection value as "uncacheable". |
diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h index 519a8ecbfc95..e39376d7de50 100644 --- a/include/asm-x86/resume-trace.h +++ b/include/asm-x86/resume-trace.h | |||
@@ -7,7 +7,7 @@ | |||
7 | do { \ | 7 | do { \ |
8 | if (pm_trace_enabled) { \ | 8 | if (pm_trace_enabled) { \ |
9 | const void *tracedata; \ | 9 | const void *tracedata; \ |
10 | asm volatile(_ASM_MOV_UL " $1f,%0\n" \ | 10 | asm volatile(_ASM_MOV " $1f,%0\n" \ |
11 | ".section .tracedata,\"a\"\n" \ | 11 | ".section .tracedata,\"a\"\n" \ |
12 | "1:\t.word %c1\n\t" \ | 12 | "1:\t.word %c1\n\t" \ |
13 | _ASM_PTR " %c2\n" \ | 13 | _ASM_PTR " %c2\n" \ |
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index 5d08fa280fdf..93adae338ac6 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h | |||
@@ -97,7 +97,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | |||
97 | "jne 1f\n\t" | 97 | "jne 1f\n\t" |
98 | "movw %w0,%w1\n\t" | 98 | "movw %w0,%w1\n\t" |
99 | "incb %h1\n\t" | 99 | "incb %h1\n\t" |
100 | "lock ; cmpxchgw %w1,%2\n\t" | 100 | LOCK_PREFIX "cmpxchgw %w1,%2\n\t" |
101 | "1:" | 101 | "1:" |
102 | "sete %b1\n\t" | 102 | "sete %b1\n\t" |
103 | "movzbl %b1,%0\n\t" | 103 | "movzbl %b1,%0\n\t" |
@@ -135,7 +135,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | |||
135 | int inc = 0x00010000; | 135 | int inc = 0x00010000; |
136 | int tmp; | 136 | int tmp; |
137 | 137 | ||
138 | asm volatile("lock ; xaddl %0, %1\n" | 138 | asm volatile(LOCK_PREFIX "xaddl %0, %1\n" |
139 | "movzwl %w0, %2\n\t" | 139 | "movzwl %w0, %2\n\t" |
140 | "shrl $16, %0\n\t" | 140 | "shrl $16, %0\n\t" |
141 | "1:\t" | 141 | "1:\t" |
@@ -162,7 +162,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | |||
162 | "cmpl %0,%1\n\t" | 162 | "cmpl %0,%1\n\t" |
163 | "jne 1f\n\t" | 163 | "jne 1f\n\t" |
164 | "addl $0x00010000, %1\n\t" | 164 | "addl $0x00010000, %1\n\t" |
165 | "lock ; cmpxchgl %1,%2\n\t" | 165 | LOCK_PREFIX "cmpxchgl %1,%2\n\t" |
166 | "1:" | 166 | "1:" |
167 | "sete %b1\n\t" | 167 | "sete %b1\n\t" |
168 | "movzbl %b1,%0\n\t" | 168 | "movzbl %b1,%0\n\t" |