diff options
Diffstat (limited to 'include/asm-x86_64')
39 files changed, 225 insertions, 202 deletions
diff --git a/include/asm-x86_64/apicdef.h b/include/asm-x86_64/apicdef.h index decaa2d540e8..1dd40067c67c 100644 --- a/include/asm-x86_64/apicdef.h +++ b/include/asm-x86_64/apicdef.h | |||
@@ -39,6 +39,7 @@ | |||
39 | #define APIC_SPIV_FOCUS_DISABLED (1<<9) | 39 | #define APIC_SPIV_FOCUS_DISABLED (1<<9) |
40 | #define APIC_SPIV_APIC_ENABLED (1<<8) | 40 | #define APIC_SPIV_APIC_ENABLED (1<<8) |
41 | #define APIC_ISR 0x100 | 41 | #define APIC_ISR 0x100 |
42 | #define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ | ||
42 | #define APIC_TMR 0x180 | 43 | #define APIC_TMR 0x180 |
43 | #define APIC_IRR 0x200 | 44 | #define APIC_IRR 0x200 |
44 | #define APIC_ESR 0x280 | 45 | #define APIC_ESR 0x280 |
@@ -136,8 +137,6 @@ | |||
136 | */ | 137 | */ |
137 | #define u32 unsigned int | 138 | #define u32 unsigned int |
138 | 139 | ||
139 | #define lapic ((volatile struct local_apic *)APIC_BASE) | ||
140 | |||
141 | struct local_apic { | 140 | struct local_apic { |
142 | 141 | ||
143 | /*000*/ struct { u32 __reserved[4]; } __reserved_01; | 142 | /*000*/ struct { u32 __reserved[4]; } __reserved_01; |
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h index 4b5cd553e772..cecbf7baa6aa 100644 --- a/include/asm-x86_64/atomic.h +++ b/include/asm-x86_64/atomic.h | |||
@@ -405,8 +405,14 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) | |||
405 | ({ \ | 405 | ({ \ |
406 | int c, old; \ | 406 | int c, old; \ |
407 | c = atomic_read(v); \ | 407 | c = atomic_read(v); \ |
408 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ | 408 | for (;;) { \ |
409 | if (unlikely(c == (u))) \ | ||
410 | break; \ | ||
411 | old = atomic_cmpxchg((v), c, c + (a)); \ | ||
412 | if (likely(old == c)) \ | ||
413 | break; \ | ||
409 | c = old; \ | 414 | c = old; \ |
415 | } \ | ||
410 | c != (u); \ | 416 | c != (u); \ |
411 | }) | 417 | }) |
412 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | 418 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h index eb4df23e1e41..79212128d0f7 100644 --- a/include/asm-x86_64/bitops.h +++ b/include/asm-x86_64/bitops.h | |||
@@ -356,14 +356,7 @@ static __inline__ unsigned long __fls(unsigned long word) | |||
356 | 356 | ||
357 | #ifdef __KERNEL__ | 357 | #ifdef __KERNEL__ |
358 | 358 | ||
359 | static inline int sched_find_first_bit(const unsigned long *b) | 359 | #include <asm-generic/bitops/sched.h> |
360 | { | ||
361 | if (b[0]) | ||
362 | return __ffs(b[0]); | ||
363 | if (b[1]) | ||
364 | return __ffs(b[1]) + 64; | ||
365 | return __ffs(b[2]) + 128; | ||
366 | } | ||
367 | 360 | ||
368 | /** | 361 | /** |
369 | * ffs - find first bit set | 362 | * ffs - find first bit set |
@@ -412,43 +405,20 @@ static __inline__ int fls(int x) | |||
412 | return r+1; | 405 | return r+1; |
413 | } | 406 | } |
414 | 407 | ||
415 | /** | 408 | #include <asm-generic/bitops/hweight.h> |
416 | * hweightN - returns the hamming weight of a N-bit word | ||
417 | * @x: the word to weigh | ||
418 | * | ||
419 | * The Hamming Weight of a number is the total number of bits set in it. | ||
420 | */ | ||
421 | |||
422 | #define hweight64(x) generic_hweight64(x) | ||
423 | #define hweight32(x) generic_hweight32(x) | ||
424 | #define hweight16(x) generic_hweight16(x) | ||
425 | #define hweight8(x) generic_hweight8(x) | ||
426 | 409 | ||
427 | #endif /* __KERNEL__ */ | 410 | #endif /* __KERNEL__ */ |
428 | 411 | ||
429 | #ifdef __KERNEL__ | 412 | #ifdef __KERNEL__ |
430 | 413 | ||
431 | #define ext2_set_bit(nr,addr) \ | 414 | #include <asm-generic/bitops/ext2-non-atomic.h> |
432 | __test_and_set_bit((nr),(unsigned long*)addr) | 415 | |
433 | #define ext2_set_bit_atomic(lock,nr,addr) \ | 416 | #define ext2_set_bit_atomic(lock,nr,addr) \ |
434 | test_and_set_bit((nr),(unsigned long*)addr) | 417 | test_and_set_bit((nr),(unsigned long*)addr) |
435 | #define ext2_clear_bit(nr, addr) \ | ||
436 | __test_and_clear_bit((nr),(unsigned long*)addr) | ||
437 | #define ext2_clear_bit_atomic(lock,nr,addr) \ | 418 | #define ext2_clear_bit_atomic(lock,nr,addr) \ |
438 | test_and_clear_bit((nr),(unsigned long*)addr) | 419 | test_and_clear_bit((nr),(unsigned long*)addr) |
439 | #define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr) | 420 | |
440 | #define ext2_find_first_zero_bit(addr, size) \ | 421 | #include <asm-generic/bitops/minix.h> |
441 | find_first_zero_bit((unsigned long*)addr, size) | ||
442 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
443 | find_next_zero_bit((unsigned long*)addr, size, off) | ||
444 | |||
445 | /* Bitmap functions for the minix filesystem. */ | ||
446 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr) | ||
447 | #define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr) | ||
448 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr) | ||
449 | #define minix_test_bit(nr,addr) test_bit(nr,(void*)addr) | ||
450 | #define minix_find_first_zero_bit(addr,size) \ | ||
451 | find_first_zero_bit((void*)addr,size) | ||
452 | 422 | ||
453 | #endif /* __KERNEL__ */ | 423 | #endif /* __KERNEL__ */ |
454 | 424 | ||
diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h index 263f0a211ed7..f8dff1c67538 100644 --- a/include/asm-x86_64/cache.h +++ b/include/asm-x86_64/cache.h | |||
@@ -22,4 +22,6 @@ | |||
22 | 22 | ||
23 | #endif | 23 | #endif |
24 | 24 | ||
25 | #define __read_mostly __attribute__((__section__(".data.read_mostly"))) | ||
26 | |||
25 | #endif | 27 | #endif |
diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h index 76bb6193ae91..662964b74e34 100644 --- a/include/asm-x86_64/cpufeature.h +++ b/include/asm-x86_64/cpufeature.h | |||
@@ -64,6 +64,7 @@ | |||
64 | #define X86_FEATURE_REP_GOOD (3*32+ 4) /* rep microcode works well on this CPU */ | 64 | #define X86_FEATURE_REP_GOOD (3*32+ 4) /* rep microcode works well on this CPU */ |
65 | #define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */ | 65 | #define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */ |
66 | #define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */ | 66 | #define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */ |
67 | #define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */ | ||
67 | 68 | ||
68 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 69 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
69 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ | 70 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ |
diff --git a/include/asm-x86_64/dmi.h b/include/asm-x86_64/dmi.h new file mode 100644 index 000000000000..93b2b15d4325 --- /dev/null +++ b/include/asm-x86_64/dmi.h | |||
@@ -0,0 +1,27 @@ | |||
1 | #ifndef _ASM_DMI_H | ||
2 | #define _ASM_DMI_H 1 | ||
3 | |||
4 | #include <asm/io.h> | ||
5 | |||
6 | extern void *dmi_ioremap(unsigned long addr, unsigned long size); | ||
7 | extern void dmi_iounmap(void *addr, unsigned long size); | ||
8 | |||
9 | #define DMI_MAX_DATA 2048 | ||
10 | |||
11 | extern int dmi_alloc_index; | ||
12 | extern char dmi_alloc_data[DMI_MAX_DATA]; | ||
13 | |||
14 | /* This is so early that there is no good way to allocate dynamic memory. | ||
15 | Allocate data in an BSS array. */ | ||
16 | static inline void *dmi_alloc(unsigned len) | ||
17 | { | ||
18 | int idx = dmi_alloc_index; | ||
19 | if ((dmi_alloc_index += len) > DMI_MAX_DATA) | ||
20 | return NULL; | ||
21 | return dmi_alloc_data + idx; | ||
22 | } | ||
23 | |||
24 | #define dmi_ioremap early_ioremap | ||
25 | #define dmi_iounmap early_iounmap | ||
26 | |||
27 | #endif | ||
diff --git a/include/asm-x86_64/e820.h b/include/asm-x86_64/e820.h index 8dcc32665240..670a3388e70a 100644 --- a/include/asm-x86_64/e820.h +++ b/include/asm-x86_64/e820.h | |||
@@ -47,7 +47,8 @@ extern void contig_e820_setup(void); | |||
47 | extern unsigned long e820_end_of_ram(void); | 47 | extern unsigned long e820_end_of_ram(void); |
48 | extern void e820_reserve_resources(void); | 48 | extern void e820_reserve_resources(void); |
49 | extern void e820_print_map(char *who); | 49 | extern void e820_print_map(char *who); |
50 | extern int e820_mapped(unsigned long start, unsigned long end, unsigned type); | 50 | extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type); |
51 | extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type); | ||
51 | 52 | ||
52 | extern void e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end); | 53 | extern void e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end); |
53 | extern void e820_setup_gap(void); | 54 | extern void e820_setup_gap(void); |
@@ -58,6 +59,8 @@ extern void __init parse_memopt(char *p, char **end); | |||
58 | extern void __init parse_memmapopt(char *p, char **end); | 59 | extern void __init parse_memmapopt(char *p, char **end); |
59 | 60 | ||
60 | extern struct e820map e820; | 61 | extern struct e820map e820; |
62 | |||
63 | extern unsigned ebda_addr, ebda_size; | ||
61 | #endif/*!__ASSEMBLY__*/ | 64 | #endif/*!__ASSEMBLY__*/ |
62 | 65 | ||
63 | #endif/*__E820_HEADER*/ | 66 | #endif/*__E820_HEADER*/ |
diff --git a/include/asm-x86_64/elf.h b/include/asm-x86_64/elf.h index 43862cd6a569..b4f8f4a41a6e 100644 --- a/include/asm-x86_64/elf.h +++ b/include/asm-x86_64/elf.h | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <asm/ptrace.h> | 8 | #include <asm/ptrace.h> |
9 | #include <asm/user.h> | 9 | #include <asm/user.h> |
10 | #include <asm/processor.h> | 10 | #include <asm/processor.h> |
11 | #include <asm/compat.h> | ||
11 | 12 | ||
12 | /* x86-64 relocation types */ | 13 | /* x86-64 relocation types */ |
13 | #define R_X86_64_NONE 0 /* No reloc */ | 14 | #define R_X86_64_NONE 0 /* No reloc */ |
@@ -157,6 +158,9 @@ extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); | |||
157 | #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) | 158 | #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) |
158 | #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) | 159 | #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) |
159 | 160 | ||
161 | /* 1GB for 64bit, 8MB for 32bit */ | ||
162 | #define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) | ||
163 | |||
160 | #endif | 164 | #endif |
161 | 165 | ||
162 | #endif | 166 | #endif |
diff --git a/include/asm-x86_64/floppy.h b/include/asm-x86_64/floppy.h index af7ded63b517..52825ce689f2 100644 --- a/include/asm-x86_64/floppy.h +++ b/include/asm-x86_64/floppy.h | |||
@@ -155,7 +155,7 @@ static int fd_request_irq(void) | |||
155 | 155 | ||
156 | static unsigned long dma_mem_alloc(unsigned long size) | 156 | static unsigned long dma_mem_alloc(unsigned long size) |
157 | { | 157 | { |
158 | return __get_dma_pages(GFP_KERNEL,get_order(size)); | 158 | return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY,get_order(size)); |
159 | } | 159 | } |
160 | 160 | ||
161 | 161 | ||
diff --git a/include/asm-x86_64/futex.h b/include/asm-x86_64/futex.h index 8602c09bf89e..9804bf07b092 100644 --- a/include/asm-x86_64/futex.h +++ b/include/asm-x86_64/futex.h | |||
@@ -94,5 +94,32 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
94 | return ret; | 94 | return ret; |
95 | } | 95 | } |
96 | 96 | ||
97 | static inline int | ||
98 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | ||
99 | { | ||
100 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | ||
101 | return -EFAULT; | ||
102 | |||
103 | __asm__ __volatile__( | ||
104 | "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n" | ||
105 | |||
106 | "2: .section .fixup, \"ax\" \n" | ||
107 | "3: mov %2, %0 \n" | ||
108 | " jmp 2b \n" | ||
109 | " .previous \n" | ||
110 | |||
111 | " .section __ex_table, \"a\" \n" | ||
112 | " .align 8 \n" | ||
113 | " .quad 1b,3b \n" | ||
114 | " .previous \n" | ||
115 | |||
116 | : "=a" (oldval), "=m" (*uaddr) | ||
117 | : "i" (-EFAULT), "r" (newval), "0" (oldval) | ||
118 | : "memory" | ||
119 | ); | ||
120 | |||
121 | return oldval; | ||
122 | } | ||
123 | |||
97 | #endif | 124 | #endif |
98 | #endif | 125 | #endif |
diff --git a/include/asm-x86_64/hpet.h b/include/asm-x86_64/hpet.h index c20c28f5c7a0..18ff7ee9e774 100644 --- a/include/asm-x86_64/hpet.h +++ b/include/asm-x86_64/hpet.h | |||
@@ -51,10 +51,14 @@ | |||
51 | 51 | ||
52 | #define HPET_TN_ROUTE_SHIFT 9 | 52 | #define HPET_TN_ROUTE_SHIFT 9 |
53 | 53 | ||
54 | #define HPET_TICK_RATE (HZ * 100000UL) | ||
55 | |||
54 | extern int is_hpet_enabled(void); | 56 | extern int is_hpet_enabled(void); |
55 | extern int hpet_rtc_timer_init(void); | 57 | extern int hpet_rtc_timer_init(void); |
56 | extern int oem_force_hpet_timer(void); | 58 | extern int oem_force_hpet_timer(void); |
57 | 59 | ||
60 | extern int hpet_use_timer; | ||
61 | |||
58 | #ifdef CONFIG_HPET_EMULATE_RTC | 62 | #ifdef CONFIG_HPET_EMULATE_RTC |
59 | extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask); | 63 | extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask); |
60 | extern int hpet_set_rtc_irq_bit(unsigned long bit_mask); | 64 | extern int hpet_set_rtc_irq_bit(unsigned long bit_mask); |
diff --git a/include/asm-x86_64/i387.h b/include/asm-x86_64/i387.h index 876eb9a2fe78..cba8a3b0cded 100644 --- a/include/asm-x86_64/i387.h +++ b/include/asm-x86_64/i387.h | |||
@@ -72,6 +72,23 @@ extern int set_fpregs(struct task_struct *tsk, | |||
72 | #define set_fpu_swd(t,val) ((t)->thread.i387.fxsave.swd = (val)) | 72 | #define set_fpu_swd(t,val) ((t)->thread.i387.fxsave.swd = (val)) |
73 | #define set_fpu_fxsr_twd(t,val) ((t)->thread.i387.fxsave.twd = (val)) | 73 | #define set_fpu_fxsr_twd(t,val) ((t)->thread.i387.fxsave.twd = (val)) |
74 | 74 | ||
75 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ | ||
76 | |||
77 | /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
78 | is pending. Clear the x87 state here by setting it to fixed | ||
79 | values. The kernel data segment can be sometimes 0 and sometimes | ||
80 | new user value. Both should be ok. | ||
81 | Use the PDA as safe address because it should be already in L1. */ | ||
82 | static inline void clear_fpu_state(struct i387_fxsave_struct *fx) | ||
83 | { | ||
84 | if (unlikely(fx->swd & X87_FSW_ES)) | ||
85 | asm volatile("fnclex"); | ||
86 | alternative_input(ASM_NOP8 ASM_NOP2, | ||
87 | " emms\n" /* clear stack tags */ | ||
88 | " fildl %%gs:0", /* load to clear state */ | ||
89 | X86_FEATURE_FXSAVE_LEAK); | ||
90 | } | ||
91 | |||
75 | static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) | 92 | static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) |
76 | { | 93 | { |
77 | int err; | 94 | int err; |
@@ -119,6 +136,7 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) | |||
119 | #endif | 136 | #endif |
120 | if (unlikely(err)) | 137 | if (unlikely(err)) |
121 | __clear_user(fx, sizeof(struct i387_fxsave_struct)); | 138 | __clear_user(fx, sizeof(struct i387_fxsave_struct)); |
139 | /* No need to clear here because the caller clears USED_MATH */ | ||
122 | return err; | 140 | return err; |
123 | } | 141 | } |
124 | 142 | ||
@@ -149,7 +167,7 @@ static inline void __fxsave_clear(struct task_struct *tsk) | |||
149 | "i" (offsetof(__typeof__(*tsk), | 167 | "i" (offsetof(__typeof__(*tsk), |
150 | thread.i387.fxsave))); | 168 | thread.i387.fxsave))); |
151 | #endif | 169 | #endif |
152 | __asm__ __volatile__("fnclex"); | 170 | clear_fpu_state(&tsk->thread.i387.fxsave); |
153 | } | 171 | } |
154 | 172 | ||
155 | static inline void kernel_fpu_begin(void) | 173 | static inline void kernel_fpu_begin(void) |
diff --git a/include/asm-x86_64/ia32_unistd.h b/include/asm-x86_64/ia32_unistd.h index 20468983d453..b4f4b172b15a 100644 --- a/include/asm-x86_64/ia32_unistd.h +++ b/include/asm-x86_64/ia32_unistd.h | |||
@@ -305,7 +305,7 @@ | |||
305 | #define __NR_ia32_mknodat 297 | 305 | #define __NR_ia32_mknodat 297 |
306 | #define __NR_ia32_fchownat 298 | 306 | #define __NR_ia32_fchownat 298 |
307 | #define __NR_ia32_futimesat 299 | 307 | #define __NR_ia32_futimesat 299 |
308 | #define __NR_ia32_newfstatat 300 | 308 | #define __NR_ia32_fstatat64 300 |
309 | #define __NR_ia32_unlinkat 301 | 309 | #define __NR_ia32_unlinkat 301 |
310 | #define __NR_ia32_renameat 302 | 310 | #define __NR_ia32_renameat 302 |
311 | #define __NR_ia32_linkat 303 | 311 | #define __NR_ia32_linkat 303 |
@@ -317,6 +317,4 @@ | |||
317 | #define __NR_ia32_ppoll 309 | 317 | #define __NR_ia32_ppoll 309 |
318 | #define __NR_ia32_unshare 310 | 318 | #define __NR_ia32_unshare 310 |
319 | 319 | ||
320 | #define IA32_NR_syscalls 315 /* must be > than biggest syscall! */ | ||
321 | |||
322 | #endif /* _ASM_X86_64_IA32_UNISTD_H_ */ | 320 | #endif /* _ASM_X86_64_IA32_UNISTD_H_ */ |
diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h index 9dac18db8291..a05da8a50bfd 100644 --- a/include/asm-x86_64/io.h +++ b/include/asm-x86_64/io.h | |||
@@ -135,6 +135,9 @@ static inline void __iomem * ioremap (unsigned long offset, unsigned long size) | |||
135 | return __ioremap(offset, size, 0); | 135 | return __ioremap(offset, size, 0); |
136 | } | 136 | } |
137 | 137 | ||
138 | extern void *early_ioremap(unsigned long addr, unsigned long size); | ||
139 | extern void early_iounmap(void *addr, unsigned long size); | ||
140 | |||
138 | /* | 141 | /* |
139 | * This one maps high address device memory and turns off caching for that area. | 142 | * This one maps high address device memory and turns off caching for that area. |
140 | * it's useful if some control registers are in such an area and write combining | 143 | * it's useful if some control registers are in such an area and write combining |
@@ -143,11 +146,6 @@ static inline void __iomem * ioremap (unsigned long offset, unsigned long size) | |||
143 | extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); | 146 | extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); |
144 | extern void iounmap(volatile void __iomem *addr); | 147 | extern void iounmap(volatile void __iomem *addr); |
145 | 148 | ||
146 | /* Use normal IO mappings for DMI */ | ||
147 | #define dmi_ioremap ioremap | ||
148 | #define dmi_iounmap(x,l) iounmap(x) | ||
149 | #define dmi_alloc(l) kmalloc(l, GFP_ATOMIC) | ||
150 | |||
151 | /* | 149 | /* |
152 | * ISA I/O bus memory addresses are 1:1 with the physical address. | 150 | * ISA I/O bus memory addresses are 1:1 with the physical address. |
153 | */ | 151 | */ |
@@ -179,7 +177,7 @@ static inline __u16 __readw(const volatile void __iomem *addr) | |||
179 | { | 177 | { |
180 | return *(__force volatile __u16 *)addr; | 178 | return *(__force volatile __u16 *)addr; |
181 | } | 179 | } |
182 | static inline __u32 __readl(const volatile void __iomem *addr) | 180 | static __always_inline __u32 __readl(const volatile void __iomem *addr) |
183 | { | 181 | { |
184 | return *(__force volatile __u32 *)addr; | 182 | return *(__force volatile __u32 *)addr; |
185 | } | 183 | } |
@@ -202,23 +200,6 @@ static inline __u64 __readq(const volatile void __iomem *addr) | |||
202 | 200 | ||
203 | #define mmiowb() | 201 | #define mmiowb() |
204 | 202 | ||
205 | #ifdef CONFIG_UNORDERED_IO | ||
206 | static inline void __writel(__u32 val, volatile void __iomem *addr) | ||
207 | { | ||
208 | volatile __u32 __iomem *target = addr; | ||
209 | asm volatile("movnti %1,%0" | ||
210 | : "=m" (*target) | ||
211 | : "r" (val) : "memory"); | ||
212 | } | ||
213 | |||
214 | static inline void __writeq(__u64 val, volatile void __iomem *addr) | ||
215 | { | ||
216 | volatile __u64 __iomem *target = addr; | ||
217 | asm volatile("movnti %1,%0" | ||
218 | : "=m" (*target) | ||
219 | : "r" (val) : "memory"); | ||
220 | } | ||
221 | #else | ||
222 | static inline void __writel(__u32 b, volatile void __iomem *addr) | 203 | static inline void __writel(__u32 b, volatile void __iomem *addr) |
223 | { | 204 | { |
224 | *(__force volatile __u32 *)addr = b; | 205 | *(__force volatile __u32 *)addr = b; |
@@ -227,7 +208,6 @@ static inline void __writeq(__u64 b, volatile void __iomem *addr) | |||
227 | { | 208 | { |
228 | *(__force volatile __u64 *)addr = b; | 209 | *(__force volatile __u64 *)addr = b; |
229 | } | 210 | } |
230 | #endif | ||
231 | static inline void __writeb(__u8 b, volatile void __iomem *addr) | 211 | static inline void __writeb(__u8 b, volatile void __iomem *addr) |
232 | { | 212 | { |
233 | *(__force volatile __u8 *)addr = b; | 213 | *(__force volatile __u8 *)addr = b; |
@@ -269,23 +249,11 @@ void memset_io(volatile void __iomem *a, int b, size_t c); | |||
269 | */ | 249 | */ |
270 | #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) | 250 | #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) |
271 | 251 | ||
272 | #define isa_readb(a) readb(__ISA_IO_base + (a)) | ||
273 | #define isa_readw(a) readw(__ISA_IO_base + (a)) | ||
274 | #define isa_readl(a) readl(__ISA_IO_base + (a)) | ||
275 | #define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a)) | ||
276 | #define isa_writew(w,a) writew(w,__ISA_IO_base + (a)) | ||
277 | #define isa_writel(l,a) writel(l,__ISA_IO_base + (a)) | ||
278 | #define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c)) | ||
279 | #define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c)) | ||
280 | #define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c)) | ||
281 | |||
282 | |||
283 | /* | 252 | /* |
284 | * Again, x86-64 does not require mem IO specific function. | 253 | * Again, x86-64 does not require mem IO specific function. |
285 | */ | 254 | */ |
286 | 255 | ||
287 | #define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d)) | 256 | #define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d)) |
288 | #define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(__ISA_IO_base + (b)),(c),(d)) | ||
289 | 257 | ||
290 | /** | 258 | /** |
291 | * check_signature - find BIOS signatures | 259 | * check_signature - find BIOS signatures |
diff --git a/include/asm-x86_64/io_apic.h b/include/asm-x86_64/io_apic.h index ee1bc69aec9c..52484e82c641 100644 --- a/include/asm-x86_64/io_apic.h +++ b/include/asm-x86_64/io_apic.h | |||
@@ -205,6 +205,7 @@ extern int skip_ioapic_setup; | |||
205 | extern int io_apic_get_version (int ioapic); | 205 | extern int io_apic_get_version (int ioapic); |
206 | extern int io_apic_get_redir_entries (int ioapic); | 206 | extern int io_apic_get_redir_entries (int ioapic); |
207 | extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int, int); | 207 | extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int, int); |
208 | extern int timer_uses_ioapic_pin_0; | ||
208 | #endif | 209 | #endif |
209 | 210 | ||
210 | extern int sis_apic_bug; /* dummy */ | 211 | extern int sis_apic_bug; /* dummy */ |
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h index b9ed4c0c8783..cf795631d9b4 100644 --- a/include/asm-x86_64/kdebug.h +++ b/include/asm-x86_64/kdebug.h | |||
@@ -5,21 +5,20 @@ | |||
5 | 5 | ||
6 | struct pt_regs; | 6 | struct pt_regs; |
7 | 7 | ||
8 | struct die_args { | 8 | struct die_args { |
9 | struct pt_regs *regs; | 9 | struct pt_regs *regs; |
10 | const char *str; | 10 | const char *str; |
11 | long err; | 11 | long err; |
12 | int trapnr; | 12 | int trapnr; |
13 | int signr; | 13 | int signr; |
14 | }; | 14 | }; |
15 | |||
16 | extern int register_die_notifier(struct notifier_block *); | ||
17 | extern int unregister_die_notifier(struct notifier_block *); | ||
18 | extern struct atomic_notifier_head die_chain; | ||
15 | 19 | ||
16 | /* Note - you should never unregister because that can race with NMIs. | ||
17 | If you really want to do it first unregister - then synchronize_sched - then free. | ||
18 | */ | ||
19 | int register_die_notifier(struct notifier_block *nb); | ||
20 | extern struct notifier_block *die_chain; | ||
21 | /* Grossly misnamed. */ | 20 | /* Grossly misnamed. */ |
22 | enum die_val { | 21 | enum die_val { |
23 | DIE_OOPS = 1, | 22 | DIE_OOPS = 1, |
24 | DIE_INT3, | 23 | DIE_INT3, |
25 | DIE_DEBUG, | 24 | DIE_DEBUG, |
@@ -33,8 +32,8 @@ enum die_val { | |||
33 | DIE_CALL, | 32 | DIE_CALL, |
34 | DIE_NMI_IPI, | 33 | DIE_NMI_IPI, |
35 | DIE_PAGE_FAULT, | 34 | DIE_PAGE_FAULT, |
36 | }; | 35 | }; |
37 | 36 | ||
38 | static inline int notify_die(enum die_val val, const char *str, | 37 | static inline int notify_die(enum die_val val, const char *str, |
39 | struct pt_regs *regs, long err, int trap, int sig) | 38 | struct pt_regs *regs, long err, int trap, int sig) |
40 | { | 39 | { |
@@ -45,7 +44,7 @@ static inline int notify_die(enum die_val val, const char *str, | |||
45 | .trapnr = trap, | 44 | .trapnr = trap, |
46 | .signr = sig | 45 | .signr = sig |
47 | }; | 46 | }; |
48 | return notifier_call_chain(&die_chain, val, &args); | 47 | return atomic_notifier_call_chain(&die_chain, val, &args); |
49 | } | 48 | } |
50 | 49 | ||
51 | extern int printk_address(unsigned long address); | 50 | extern int printk_address(unsigned long address); |
diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h index 3e72c41727c5..cd17945bf218 100644 --- a/include/asm-x86_64/local.h +++ b/include/asm-x86_64/local.h | |||
@@ -5,7 +5,7 @@ | |||
5 | 5 | ||
6 | typedef struct | 6 | typedef struct |
7 | { | 7 | { |
8 | volatile unsigned int counter; | 8 | volatile long counter; |
9 | } local_t; | 9 | } local_t; |
10 | 10 | ||
11 | #define LOCAL_INIT(i) { (i) } | 11 | #define LOCAL_INIT(i) { (i) } |
@@ -13,34 +13,34 @@ typedef struct | |||
13 | #define local_read(v) ((v)->counter) | 13 | #define local_read(v) ((v)->counter) |
14 | #define local_set(v,i) (((v)->counter) = (i)) | 14 | #define local_set(v,i) (((v)->counter) = (i)) |
15 | 15 | ||
16 | static __inline__ void local_inc(local_t *v) | 16 | static inline void local_inc(local_t *v) |
17 | { | 17 | { |
18 | __asm__ __volatile__( | 18 | __asm__ __volatile__( |
19 | "incl %0" | 19 | "incq %0" |
20 | :"=m" (v->counter) | 20 | :"=m" (v->counter) |
21 | :"m" (v->counter)); | 21 | :"m" (v->counter)); |
22 | } | 22 | } |
23 | 23 | ||
24 | static __inline__ void local_dec(local_t *v) | 24 | static inline void local_dec(local_t *v) |
25 | { | 25 | { |
26 | __asm__ __volatile__( | 26 | __asm__ __volatile__( |
27 | "decl %0" | 27 | "decq %0" |
28 | :"=m" (v->counter) | 28 | :"=m" (v->counter) |
29 | :"m" (v->counter)); | 29 | :"m" (v->counter)); |
30 | } | 30 | } |
31 | 31 | ||
32 | static __inline__ void local_add(unsigned int i, local_t *v) | 32 | static inline void local_add(long i, local_t *v) |
33 | { | 33 | { |
34 | __asm__ __volatile__( | 34 | __asm__ __volatile__( |
35 | "addl %1,%0" | 35 | "addq %1,%0" |
36 | :"=m" (v->counter) | 36 | :"=m" (v->counter) |
37 | :"ir" (i), "m" (v->counter)); | 37 | :"ir" (i), "m" (v->counter)); |
38 | } | 38 | } |
39 | 39 | ||
40 | static __inline__ void local_sub(unsigned int i, local_t *v) | 40 | static inline void local_sub(long i, local_t *v) |
41 | { | 41 | { |
42 | __asm__ __volatile__( | 42 | __asm__ __volatile__( |
43 | "subl %1,%0" | 43 | "subq %1,%0" |
44 | :"=m" (v->counter) | 44 | :"=m" (v->counter) |
45 | :"ir" (i), "m" (v->counter)); | 45 | :"ir" (i), "m" (v->counter)); |
46 | } | 46 | } |
diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h index 5d298b799a9f..7229785094e3 100644 --- a/include/asm-x86_64/mce.h +++ b/include/asm-x86_64/mce.h | |||
@@ -70,6 +70,9 @@ struct mce_log { | |||
70 | #define MCE_THRESHOLD_BASE MCE_EXTENDED_BANK + 1 /* MCE_AMD */ | 70 | #define MCE_THRESHOLD_BASE MCE_EXTENDED_BANK + 1 /* MCE_AMD */ |
71 | #define MCE_THRESHOLD_DRAM_ECC MCE_THRESHOLD_BASE + 4 | 71 | #define MCE_THRESHOLD_DRAM_ECC MCE_THRESHOLD_BASE + 4 |
72 | 72 | ||
73 | #ifdef __KERNEL__ | ||
74 | #include <asm/atomic.h> | ||
75 | |||
73 | void mce_log(struct mce *m); | 76 | void mce_log(struct mce *m); |
74 | #ifdef CONFIG_X86_MCE_INTEL | 77 | #ifdef CONFIG_X86_MCE_INTEL |
75 | void mce_intel_feature_init(struct cpuinfo_x86 *c); | 78 | void mce_intel_feature_init(struct cpuinfo_x86 *c); |
@@ -87,4 +90,8 @@ static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
87 | } | 90 | } |
88 | #endif | 91 | #endif |
89 | 92 | ||
93 | extern atomic_t mce_entry; | ||
94 | |||
95 | #endif | ||
96 | |||
90 | #endif | 97 | #endif |
diff --git a/include/asm-x86_64/mman.h b/include/asm-x86_64/mman.h index d0e97b74f735..dd5cb0534d37 100644 --- a/include/asm-x86_64/mman.h +++ b/include/asm-x86_64/mman.h | |||
@@ -1,19 +1,8 @@ | |||
1 | #ifndef __X8664_MMAN_H__ | 1 | #ifndef __X8664_MMAN_H__ |
2 | #define __X8664_MMAN_H__ | 2 | #define __X8664_MMAN_H__ |
3 | 3 | ||
4 | #define PROT_READ 0x1 /* page can be read */ | 4 | #include <asm-generic/mman.h> |
5 | #define PROT_WRITE 0x2 /* page can be written */ | ||
6 | #define PROT_EXEC 0x4 /* page can be executed */ | ||
7 | #define PROT_NONE 0x0 /* page can not be accessed */ | ||
8 | #define PROT_SEM 0x8 | ||
9 | #define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ | ||
10 | #define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ | ||
11 | 5 | ||
12 | #define MAP_SHARED 0x01 /* Share changes */ | ||
13 | #define MAP_PRIVATE 0x02 /* Changes are private */ | ||
14 | #define MAP_TYPE 0x0f /* Mask for type of mapping */ | ||
15 | #define MAP_FIXED 0x10 /* Interpret addr exactly */ | ||
16 | #define MAP_ANONYMOUS 0x20 /* don't use a file */ | ||
17 | #define MAP_32BIT 0x40 /* only give out 32bit addresses */ | 6 | #define MAP_32BIT 0x40 /* only give out 32bit addresses */ |
18 | 7 | ||
19 | #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ | 8 | #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ |
@@ -24,22 +13,7 @@ | |||
24 | #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ | 13 | #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ |
25 | #define MAP_NONBLOCK 0x10000 /* do not block on IO */ | 14 | #define MAP_NONBLOCK 0x10000 /* do not block on IO */ |
26 | 15 | ||
27 | #define MS_ASYNC 1 /* sync memory asynchronously */ | ||
28 | #define MS_INVALIDATE 2 /* invalidate the caches */ | ||
29 | #define MS_SYNC 4 /* synchronous memory sync */ | ||
30 | |||
31 | #define MCL_CURRENT 1 /* lock all current mappings */ | 16 | #define MCL_CURRENT 1 /* lock all current mappings */ |
32 | #define MCL_FUTURE 2 /* lock all future mappings */ | 17 | #define MCL_FUTURE 2 /* lock all future mappings */ |
33 | 18 | ||
34 | #define MADV_NORMAL 0x0 /* default page-in behavior */ | ||
35 | #define MADV_RANDOM 0x1 /* page-in minimum required */ | ||
36 | #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ | ||
37 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ | ||
38 | #define MADV_DONTNEED 0x4 /* discard these pages */ | ||
39 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ | ||
40 | |||
41 | /* compatibility flags */ | ||
42 | #define MAP_ANON MAP_ANONYMOUS | ||
43 | #define MAP_FILE 0 | ||
44 | |||
45 | #endif | 19 | #endif |
diff --git a/include/asm-x86_64/mmu_context.h b/include/asm-x86_64/mmu_context.h index 16e4be4de0c5..19f0c83d0792 100644 --- a/include/asm-x86_64/mmu_context.h +++ b/include/asm-x86_64/mmu_context.h | |||
@@ -34,12 +34,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
34 | unsigned cpu = smp_processor_id(); | 34 | unsigned cpu = smp_processor_id(); |
35 | if (likely(prev != next)) { | 35 | if (likely(prev != next)) { |
36 | /* stop flush ipis for the previous mm */ | 36 | /* stop flush ipis for the previous mm */ |
37 | clear_bit(cpu, &prev->cpu_vm_mask); | 37 | cpu_clear(cpu, prev->cpu_vm_mask); |
38 | #ifdef CONFIG_SMP | 38 | #ifdef CONFIG_SMP |
39 | write_pda(mmu_state, TLBSTATE_OK); | 39 | write_pda(mmu_state, TLBSTATE_OK); |
40 | write_pda(active_mm, next); | 40 | write_pda(active_mm, next); |
41 | #endif | 41 | #endif |
42 | set_bit(cpu, &next->cpu_vm_mask); | 42 | cpu_set(cpu, next->cpu_vm_mask); |
43 | load_cr3(next->pgd); | 43 | load_cr3(next->pgd); |
44 | 44 | ||
45 | if (unlikely(next->context.ldt != prev->context.ldt)) | 45 | if (unlikely(next->context.ldt != prev->context.ldt)) |
@@ -50,7 +50,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
50 | write_pda(mmu_state, TLBSTATE_OK); | 50 | write_pda(mmu_state, TLBSTATE_OK); |
51 | if (read_pda(active_mm) != next) | 51 | if (read_pda(active_mm) != next) |
52 | out_of_line_bug(); | 52 | out_of_line_bug(); |
53 | if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) { | 53 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { |
54 | /* We were in lazy tlb mode and leave_mm disabled | 54 | /* We were in lazy tlb mode and leave_mm disabled |
55 | * tlb flush IPI delivery. We must reload CR3 | 55 | * tlb flush IPI delivery. We must reload CR3 |
56 | * to make sure to use no freed page tables. | 56 | * to make sure to use no freed page tables. |
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h index 972c9359f7d7..6944e7122df5 100644 --- a/include/asm-x86_64/mmzone.h +++ b/include/asm-x86_64/mmzone.h | |||
@@ -12,11 +12,17 @@ | |||
12 | 12 | ||
13 | #include <asm/smp.h> | 13 | #include <asm/smp.h> |
14 | 14 | ||
15 | #define NODEMAPSIZE 0xfff | 15 | /* Should really switch to dynamic allocation at some point */ |
16 | #define NODEMAPSIZE 0x4fff | ||
16 | 17 | ||
17 | /* Simple perfect hash to map physical addresses to node numbers */ | 18 | /* Simple perfect hash to map physical addresses to node numbers */ |
18 | extern int memnode_shift; | 19 | struct memnode { |
19 | extern u8 memnodemap[NODEMAPSIZE]; | 20 | int shift; |
21 | u8 map[NODEMAPSIZE]; | ||
22 | } ____cacheline_aligned; | ||
23 | extern struct memnode memnode; | ||
24 | #define memnode_shift memnode.shift | ||
25 | #define memnodemap memnode.map | ||
20 | 26 | ||
21 | extern struct pglist_data *node_data[]; | 27 | extern struct pglist_data *node_data[]; |
22 | 28 | ||
@@ -39,12 +45,8 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) | |||
39 | #define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) | 45 | #define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) |
40 | #define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) | 46 | #define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) |
41 | 47 | ||
42 | extern struct page *pfn_to_page(unsigned long pfn); | ||
43 | extern unsigned long page_to_pfn(struct page *page); | ||
44 | extern int pfn_valid(unsigned long pfn); | 48 | extern int pfn_valid(unsigned long pfn); |
45 | #endif | 49 | #endif |
46 | 50 | ||
47 | #define local_mapnr(kvaddr) \ | ||
48 | ( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)) ) | ||
49 | #endif | 51 | #endif |
50 | #endif | 52 | #endif |
diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h index dffe276ca2df..1cc92fe02503 100644 --- a/include/asm-x86_64/numa.h +++ b/include/asm-x86_64/numa.h | |||
@@ -2,13 +2,12 @@ | |||
2 | #define _ASM_X8664_NUMA_H 1 | 2 | #define _ASM_X8664_NUMA_H 1 |
3 | 3 | ||
4 | #include <linux/nodemask.h> | 4 | #include <linux/nodemask.h> |
5 | #include <asm/numnodes.h> | ||
6 | 5 | ||
7 | struct node { | 6 | struct bootnode { |
8 | u64 start,end; | 7 | u64 start,end; |
9 | }; | 8 | }; |
10 | 9 | ||
11 | extern int compute_hash_shift(struct node *nodes, int numnodes); | 10 | extern int compute_hash_shift(struct bootnode *nodes, int numnodes); |
12 | extern int pxm_to_node(int nid); | 11 | extern int pxm_to_node(int nid); |
13 | 12 | ||
14 | #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) | 13 | #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) |
@@ -18,6 +17,8 @@ extern void numa_init_array(void); | |||
18 | extern int numa_off; | 17 | extern int numa_off; |
19 | 18 | ||
20 | extern void numa_set_node(int cpu, int node); | 19 | extern void numa_set_node(int cpu, int node); |
20 | extern void srat_reserve_add_area(int nodeid); | ||
21 | extern int hotadd_percent; | ||
21 | 22 | ||
22 | extern unsigned char apicid_to_node[256]; | 23 | extern unsigned char apicid_to_node[256]; |
23 | #ifdef CONFIG_NUMA | 24 | #ifdef CONFIG_NUMA |
diff --git a/include/asm-x86_64/numnodes.h b/include/asm-x86_64/numnodes.h deleted file mode 100644 index 32be16b8ae96..000000000000 --- a/include/asm-x86_64/numnodes.h +++ /dev/null | |||
@@ -1,12 +0,0 @@ | |||
1 | #ifndef _ASM_X8664_NUMNODES_H | ||
2 | #define _ASM_X8664_NUMNODES_H 1 | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | |||
6 | #ifdef CONFIG_NUMA | ||
7 | #define NODES_SHIFT 6 | ||
8 | #else | ||
9 | #define NODES_SHIFT 0 | ||
10 | #endif | ||
11 | |||
12 | #endif | ||
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h index 615e3e494929..408185bac351 100644 --- a/include/asm-x86_64/page.h +++ b/include/asm-x86_64/page.h | |||
@@ -123,8 +123,6 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
123 | #define __boot_va(x) __va(x) | 123 | #define __boot_va(x) __va(x) |
124 | #define __boot_pa(x) __pa(x) | 124 | #define __boot_pa(x) __pa(x) |
125 | #ifdef CONFIG_FLATMEM | 125 | #ifdef CONFIG_FLATMEM |
126 | #define pfn_to_page(pfn) (mem_map + (pfn)) | ||
127 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) | ||
128 | #define pfn_valid(pfn) ((pfn) < end_pfn) | 126 | #define pfn_valid(pfn) ((pfn) < end_pfn) |
129 | #endif | 127 | #endif |
130 | 128 | ||
@@ -140,6 +138,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
140 | 138 | ||
141 | #endif /* __KERNEL__ */ | 139 | #endif /* __KERNEL__ */ |
142 | 140 | ||
141 | #include <asm-generic/memory_model.h> | ||
143 | #include <asm-generic/page.h> | 142 | #include <asm-generic/page.h> |
144 | 143 | ||
145 | #endif /* _X86_64_PAGE_H */ | 144 | #endif /* _X86_64_PAGE_H */ |
diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h index fd03e15d7ea6..8a05af264d18 100644 --- a/include/asm-x86_64/pci.h +++ b/include/asm-x86_64/pci.h | |||
@@ -19,8 +19,6 @@ extern unsigned int pcibios_assign_all_busses(void); | |||
19 | #endif | 19 | #endif |
20 | #define pcibios_scan_all_fns(a, b) 0 | 20 | #define pcibios_scan_all_fns(a, b) 0 |
21 | 21 | ||
22 | extern int no_iommu, force_iommu; | ||
23 | |||
24 | extern unsigned long pci_mem_start; | 22 | extern unsigned long pci_mem_start; |
25 | #define PCIBIOS_MIN_IO 0x1000 | 23 | #define PCIBIOS_MIN_IO 0x1000 |
26 | #define PCIBIOS_MIN_MEM (pci_mem_start) | 24 | #define PCIBIOS_MIN_MEM (pci_mem_start) |
diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h index c7ab38a601af..b47c3df9ed1d 100644 --- a/include/asm-x86_64/pda.h +++ b/include/asm-x86_64/pda.h | |||
@@ -22,8 +22,8 @@ struct x8664_pda { | |||
22 | int nodenumber; /* number of current node */ | 22 | int nodenumber; /* number of current node */ |
23 | unsigned int __softirq_pending; | 23 | unsigned int __softirq_pending; |
24 | unsigned int __nmi_count; /* number of NMI on this CPUs */ | 24 | unsigned int __nmi_count; /* number of NMI on this CPUs */ |
25 | struct mm_struct *active_mm; | ||
26 | int mmu_state; | 25 | int mmu_state; |
26 | struct mm_struct *active_mm; | ||
27 | unsigned apic_timer_irqs; | 27 | unsigned apic_timer_irqs; |
28 | } ____cacheline_aligned_in_smp; | 28 | } ____cacheline_aligned_in_smp; |
29 | 29 | ||
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h index 29a6b0408f75..7f33aaf9f7b1 100644 --- a/include/asm-x86_64/percpu.h +++ b/include/asm-x86_64/percpu.h | |||
@@ -26,10 +26,9 @@ | |||
26 | #define percpu_modcopy(pcpudst, src, size) \ | 26 | #define percpu_modcopy(pcpudst, src, size) \ |
27 | do { \ | 27 | do { \ |
28 | unsigned int __i; \ | 28 | unsigned int __i; \ |
29 | for (__i = 0; __i < NR_CPUS; __i++) \ | 29 | for_each_possible_cpu(__i) \ |
30 | if (cpu_possible(__i)) \ | 30 | memcpy((pcpudst)+__per_cpu_offset(__i), \ |
31 | memcpy((pcpudst)+__per_cpu_offset(__i), \ | 31 | (src), (size)); \ |
32 | (src), (size)); \ | ||
33 | } while (0) | 32 | } while (0) |
34 | 33 | ||
35 | extern void setup_per_cpu_areas(void); | 34 | extern void setup_per_cpu_areas(void); |
diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h index 08cad2482bcb..43d4c333a8b1 100644 --- a/include/asm-x86_64/pgalloc.h +++ b/include/asm-x86_64/pgalloc.h | |||
@@ -45,12 +45,39 @@ static inline void pud_free (pud_t *pud) | |||
45 | free_page((unsigned long)pud); | 45 | free_page((unsigned long)pud); |
46 | } | 46 | } |
47 | 47 | ||
48 | static inline void pgd_list_add(pgd_t *pgd) | ||
49 | { | ||
50 | struct page *page = virt_to_page(pgd); | ||
51 | |||
52 | spin_lock(&pgd_lock); | ||
53 | page->index = (pgoff_t)pgd_list; | ||
54 | if (pgd_list) | ||
55 | pgd_list->private = (unsigned long)&page->index; | ||
56 | pgd_list = page; | ||
57 | page->private = (unsigned long)&pgd_list; | ||
58 | spin_unlock(&pgd_lock); | ||
59 | } | ||
60 | |||
61 | static inline void pgd_list_del(pgd_t *pgd) | ||
62 | { | ||
63 | struct page *next, **pprev, *page = virt_to_page(pgd); | ||
64 | |||
65 | spin_lock(&pgd_lock); | ||
66 | next = (struct page *)page->index; | ||
67 | pprev = (struct page **)page->private; | ||
68 | *pprev = next; | ||
69 | if (next) | ||
70 | next->private = (unsigned long)pprev; | ||
71 | spin_unlock(&pgd_lock); | ||
72 | } | ||
73 | |||
48 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | 74 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
49 | { | 75 | { |
50 | unsigned boundary; | 76 | unsigned boundary; |
51 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); | 77 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); |
52 | if (!pgd) | 78 | if (!pgd) |
53 | return NULL; | 79 | return NULL; |
80 | pgd_list_add(pgd); | ||
54 | /* | 81 | /* |
55 | * Copy kernel pointers in from init. | 82 | * Copy kernel pointers in from init. |
56 | * Could keep a freelist or slab cache of those because the kernel | 83 | * Could keep a freelist or slab cache of those because the kernel |
@@ -67,6 +94,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |||
67 | static inline void pgd_free(pgd_t *pgd) | 94 | static inline void pgd_free(pgd_t *pgd) |
68 | { | 95 | { |
69 | BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); | 96 | BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); |
97 | pgd_list_del(pgd); | ||
70 | free_page((unsigned long)pgd); | 98 | free_page((unsigned long)pgd); |
71 | } | 99 | } |
72 | 100 | ||
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index 8fbf4dd72115..31e83c3bd022 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h | |||
@@ -131,7 +131,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long | |||
131 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | 131 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
132 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 132 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
133 | 133 | ||
134 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) | 134 | #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1) |
135 | #define FIRST_USER_ADDRESS 0 | 135 | #define FIRST_USER_ADDRESS 0 |
136 | 136 | ||
137 | #ifndef __ASSEMBLY__ | 137 | #ifndef __ASSEMBLY__ |
@@ -273,7 +273,7 @@ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | |||
273 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | 273 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } |
274 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } | 274 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } |
275 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | 275 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } |
276 | static inline int pte_huge(pte_t pte) { return (pte_val(pte) & __LARGE_PTE) == __LARGE_PTE; } | 276 | static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; } |
277 | 277 | ||
278 | static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } | 278 | static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } |
279 | static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } | 279 | static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } |
@@ -285,7 +285,7 @@ static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _ | |||
285 | static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } | 285 | static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } |
286 | static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } | 286 | static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } |
287 | static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } | 287 | static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } |
288 | static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | __LARGE_PTE)); return pte; } | 288 | static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; } |
289 | 289 | ||
290 | struct vm_area_struct; | 290 | struct vm_area_struct; |
291 | 291 | ||
@@ -293,19 +293,19 @@ static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned | |||
293 | { | 293 | { |
294 | if (!pte_dirty(*ptep)) | 294 | if (!pte_dirty(*ptep)) |
295 | return 0; | 295 | return 0; |
296 | return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep); | 296 | return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte); |
297 | } | 297 | } |
298 | 298 | ||
299 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | 299 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) |
300 | { | 300 | { |
301 | if (!pte_young(*ptep)) | 301 | if (!pte_young(*ptep)) |
302 | return 0; | 302 | return 0; |
303 | return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); | 303 | return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte); |
304 | } | 304 | } |
305 | 305 | ||
306 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 306 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
307 | { | 307 | { |
308 | clear_bit(_PAGE_BIT_RW, ptep); | 308 | clear_bit(_PAGE_BIT_RW, &ptep->pte); |
309 | } | 309 | } |
310 | 310 | ||
311 | /* | 311 | /* |
@@ -420,6 +420,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
420 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | 420 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
421 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | 421 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
422 | 422 | ||
423 | extern spinlock_t pgd_lock; | ||
424 | extern struct page *pgd_list; | ||
425 | void vmalloc_sync_all(void); | ||
426 | |||
423 | #endif /* !__ASSEMBLY__ */ | 427 | #endif /* !__ASSEMBLY__ */ |
424 | 428 | ||
425 | extern int kern_addr_valid(unsigned long addr); | 429 | extern int kern_addr_valid(unsigned long addr); |
diff --git a/include/asm-x86_64/poll.h b/include/asm-x86_64/poll.h index c43cbba31913..c0475a9d8bb8 100644 --- a/include/asm-x86_64/poll.h +++ b/include/asm-x86_64/poll.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #define POLLWRBAND 0x0200 | 16 | #define POLLWRBAND 0x0200 |
17 | #define POLLMSG 0x0400 | 17 | #define POLLMSG 0x0400 |
18 | #define POLLREMOVE 0x1000 | 18 | #define POLLREMOVE 0x1000 |
19 | #define POLLRDHUP 0x2000 | ||
19 | 20 | ||
20 | struct pollfd { | 21 | struct pollfd { |
21 | int fd; | 22 | int fd; |
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h index 8c8d88c036ed..37a3ec433ee5 100644 --- a/include/asm-x86_64/processor.h +++ b/include/asm-x86_64/processor.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <asm/mmsegment.h> | 20 | #include <asm/mmsegment.h> |
21 | #include <asm/percpu.h> | 21 | #include <asm/percpu.h> |
22 | #include <linux/personality.h> | 22 | #include <linux/personality.h> |
23 | #include <linux/cpumask.h> | ||
23 | 24 | ||
24 | #define TF_MASK 0x00000100 | 25 | #define TF_MASK 0x00000100 |
25 | #define IF_MASK 0x00000200 | 26 | #define IF_MASK 0x00000200 |
@@ -65,6 +66,9 @@ struct cpuinfo_x86 { | |||
65 | __u32 x86_power; | 66 | __u32 x86_power; |
66 | __u32 extended_cpuid_level; /* Max extended CPUID function supported */ | 67 | __u32 extended_cpuid_level; /* Max extended CPUID function supported */ |
67 | unsigned long loops_per_jiffy; | 68 | unsigned long loops_per_jiffy; |
69 | #ifdef CONFIG_SMP | ||
70 | cpumask_t llc_shared_map; /* cpus sharing the last level cache */ | ||
71 | #endif | ||
68 | __u8 apicid; | 72 | __u8 apicid; |
69 | __u8 booted_cores; /* number of cores as seen by OS */ | 73 | __u8 booted_cores; /* number of cores as seen by OS */ |
70 | } ____cacheline_aligned; | 74 | } ____cacheline_aligned; |
@@ -354,9 +358,6 @@ struct extended_sigtable { | |||
354 | struct extended_signature sigs[0]; | 358 | struct extended_signature sigs[0]; |
355 | }; | 359 | }; |
356 | 360 | ||
357 | /* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */ | ||
358 | #define MICROCODE_IOCFREE _IO('6',0) | ||
359 | |||
360 | 361 | ||
361 | #define ASM_NOP1 K8_NOP1 | 362 | #define ASM_NOP1 K8_NOP1 |
362 | #define ASM_NOP2 K8_NOP2 | 363 | #define ASM_NOP2 K8_NOP2 |
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index c99832e7bf3f..8abf2a43c944 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h | |||
@@ -39,7 +39,6 @@ extern void config_acpi_tables(void); | |||
39 | extern void ia32_syscall(void); | 39 | extern void ia32_syscall(void); |
40 | extern void iommu_hole_init(void); | 40 | extern void iommu_hole_init(void); |
41 | 41 | ||
42 | extern void time_init_gtod(void); | ||
43 | extern int pmtimer_mark_offset(void); | 42 | extern int pmtimer_mark_offset(void); |
44 | extern void pmtimer_resume(void); | 43 | extern void pmtimer_resume(void); |
45 | extern void pmtimer_wait(unsigned); | 44 | extern void pmtimer_wait(unsigned); |
@@ -54,8 +53,6 @@ extern int sysctl_vsyscall; | |||
54 | extern int nohpet; | 53 | extern int nohpet; |
55 | extern unsigned long vxtime_hz; | 54 | extern unsigned long vxtime_hz; |
56 | 55 | ||
57 | extern void do_softirq_thunk(void); | ||
58 | |||
59 | extern int numa_setup(char *opt); | 56 | extern int numa_setup(char *opt); |
60 | 57 | ||
61 | extern int setup_early_printk(char *); | 58 | extern int setup_early_printk(char *); |
@@ -130,9 +127,10 @@ extern int fix_aperture; | |||
130 | #define iommu_aperture 0 | 127 | #define iommu_aperture 0 |
131 | #define iommu_aperture_allowed 0 | 128 | #define iommu_aperture_allowed 0 |
132 | #endif | 129 | #endif |
133 | extern int force_iommu; | ||
134 | 130 | ||
135 | extern int reboot_force; | 131 | extern int reboot_force; |
132 | extern int notsc_setup(char *); | ||
133 | extern int setup_additional_cpus(char *); | ||
136 | 134 | ||
137 | extern void smp_local_timer_interrupt(struct pt_regs * regs); | 135 | extern void smp_local_timer_interrupt(struct pt_regs * regs); |
138 | 136 | ||
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index 9ccbb2cfd5c0..a4fdaeb5c397 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h | |||
@@ -56,6 +56,7 @@ extern cpumask_t cpu_sibling_map[NR_CPUS]; | |||
56 | extern cpumask_t cpu_core_map[NR_CPUS]; | 56 | extern cpumask_t cpu_core_map[NR_CPUS]; |
57 | extern u8 phys_proc_id[NR_CPUS]; | 57 | extern u8 phys_proc_id[NR_CPUS]; |
58 | extern u8 cpu_core_id[NR_CPUS]; | 58 | extern u8 cpu_core_id[NR_CPUS]; |
59 | extern u8 cpu_llc_id[NR_CPUS]; | ||
59 | 60 | ||
60 | #define SMP_TRAMPOLINE_BASE 0x6000 | 61 | #define SMP_TRAMPOLINE_BASE 0x6000 |
61 | 62 | ||
diff --git a/include/asm-x86_64/string.h b/include/asm-x86_64/string.h index a3493ee282bb..ee6bf275349e 100644 --- a/include/asm-x86_64/string.h +++ b/include/asm-x86_64/string.h | |||
@@ -40,26 +40,15 @@ extern void *__memcpy(void *to, const void *from, size_t len); | |||
40 | 40 | ||
41 | 41 | ||
42 | #define __HAVE_ARCH_MEMSET | 42 | #define __HAVE_ARCH_MEMSET |
43 | #define memset __builtin_memset | 43 | void *memset(void *s, int c, size_t n); |
44 | 44 | ||
45 | #define __HAVE_ARCH_MEMMOVE | 45 | #define __HAVE_ARCH_MEMMOVE |
46 | void * memmove(void * dest,const void *src,size_t count); | 46 | void * memmove(void * dest,const void *src,size_t count); |
47 | 47 | ||
48 | /* Use C out of line version for memcmp */ | ||
49 | #define memcmp __builtin_memcmp | ||
50 | int memcmp(const void * cs,const void * ct,size_t count); | 48 | int memcmp(const void * cs,const void * ct,size_t count); |
51 | |||
52 | /* out of line string functions use always C versions */ | ||
53 | #define strlen __builtin_strlen | ||
54 | size_t strlen(const char * s); | 49 | size_t strlen(const char * s); |
55 | 50 | char *strcpy(char * dest,const char *src); | |
56 | #define strcpy __builtin_strcpy | 51 | char *strcat(char * dest, const char * src); |
57 | char * strcpy(char * dest,const char *src); | ||
58 | |||
59 | #define strcat __builtin_strcat | ||
60 | char * strcat(char * dest, const char * src); | ||
61 | |||
62 | #define strcmp __builtin_strcmp | ||
63 | int strcmp(const char * cs,const char * ct); | 52 | int strcmp(const char * cs,const char * ct); |
64 | 53 | ||
65 | #endif /* __KERNEL__ */ | 54 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-x86_64/suspend.h b/include/asm-x86_64/suspend.h index bb9f40597d09..bc7f81715e5e 100644 --- a/include/asm-x86_64/suspend.h +++ b/include/asm-x86_64/suspend.h | |||
@@ -39,9 +39,7 @@ extern unsigned long saved_context_r12, saved_context_r13, saved_context_r14, sa | |||
39 | extern unsigned long saved_context_eflags; | 39 | extern unsigned long saved_context_eflags; |
40 | 40 | ||
41 | #define loaddebug(thread,register) \ | 41 | #define loaddebug(thread,register) \ |
42 | __asm__("movq %0,%%db" #register \ | 42 | set_debugreg((thread)->debugreg##register, register) |
43 | : /* no output */ \ | ||
44 | :"r" ((thread)->debugreg##register)) | ||
45 | 43 | ||
46 | extern void fix_processor_context(void); | 44 | extern void fix_processor_context(void); |
47 | 45 | ||
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index b7f66034ae7a..397598980228 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h | |||
@@ -70,12 +70,6 @@ extern void load_gs_index(unsigned); | |||
70 | ".previous" \ | 70 | ".previous" \ |
71 | : :"r" (value), "r" (0)) | 71 | : :"r" (value), "r" (0)) |
72 | 72 | ||
73 | #define set_debug(value,register) \ | ||
74 | __asm__("movq %0,%%db" #register \ | ||
75 | : /* no output */ \ | ||
76 | :"r" ((unsigned long) value)) | ||
77 | |||
78 | |||
79 | #ifdef __KERNEL__ | 73 | #ifdef __KERNEL__ |
80 | struct alt_instr { | 74 | struct alt_instr { |
81 | __u8 *instr; /* original instruction */ | 75 | __u8 *instr; /* original instruction */ |
diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h index f18443fcdf04..b9e5320b7625 100644 --- a/include/asm-x86_64/timex.h +++ b/include/asm-x86_64/timex.h | |||
@@ -33,7 +33,7 @@ static __always_inline cycles_t get_cycles_sync(void) | |||
33 | unsigned eax; | 33 | unsigned eax; |
34 | /* Don't do an additional sync on CPUs where we know | 34 | /* Don't do an additional sync on CPUs where we know |
35 | RDTSC is already synchronous. */ | 35 | RDTSC is already synchronous. */ |
36 | alternative_io(ASM_NOP2, "cpuid", X86_FEATURE_SYNC_RDTSC, | 36 | alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC, |
37 | "=a" (eax), "0" (1) : "ebx","ecx","edx","memory"); | 37 | "=a" (eax), "0" (1) : "ebx","ecx","edx","memory"); |
38 | rdtscll(ret); | 38 | rdtscll(ret); |
39 | return ret; | 39 | return ret; |
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index c642f5d9882d..9db54e9d17bb 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h | |||
@@ -68,4 +68,6 @@ extern int __node_distance(int, int); | |||
68 | 68 | ||
69 | #include <asm-generic/topology.h> | 69 | #include <asm-generic/topology.h> |
70 | 70 | ||
71 | extern cpumask_t cpu_coregroup_map(int cpu); | ||
72 | |||
71 | #endif | 73 | #endif |
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index da0341c57949..feb77cb8c044 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h | |||
@@ -605,8 +605,20 @@ __SYSCALL(__NR_pselect6, sys_ni_syscall) /* for now */ | |||
605 | __SYSCALL(__NR_ppoll, sys_ni_syscall) /* for now */ | 605 | __SYSCALL(__NR_ppoll, sys_ni_syscall) /* for now */ |
606 | #define __NR_unshare 272 | 606 | #define __NR_unshare 272 |
607 | __SYSCALL(__NR_unshare, sys_unshare) | 607 | __SYSCALL(__NR_unshare, sys_unshare) |
608 | 608 | #define __NR_set_robust_list 273 | |
609 | #define __NR_syscall_max __NR_unshare | 609 | __SYSCALL(__NR_set_robust_list, sys_set_robust_list) |
610 | #define __NR_get_robust_list 274 | ||
611 | __SYSCALL(__NR_get_robust_list, sys_get_robust_list) | ||
612 | #define __NR_splice 275 | ||
613 | __SYSCALL(__NR_splice, sys_splice) | ||
614 | #define __NR_tee 276 | ||
615 | __SYSCALL(__NR_tee, sys_tee) | ||
616 | #define __NR_sync_file_range 277 | ||
617 | __SYSCALL(__NR_sync_file_range, sys_sync_file_range) | ||
618 | #define __NR_vmsplice 278 | ||
619 | __SYSCALL(__NR_vmsplice, sys_vmsplice) | ||
620 | |||
621 | #define __NR_syscall_max __NR_vmsplice | ||
610 | 622 | ||
611 | #ifndef __NO_STUBS | 623 | #ifndef __NO_STUBS |
612 | 624 | ||