diff options
Diffstat (limited to 'include')
127 files changed, 1282 insertions, 913 deletions
diff --git a/include/asm-alpha/core_mcpcia.h b/include/asm-alpha/core_mcpcia.h index 525b4f6a7ace..acf55b483472 100644 --- a/include/asm-alpha/core_mcpcia.h +++ b/include/asm-alpha/core_mcpcia.h | |||
@@ -261,7 +261,7 @@ struct el_MCPCIA_uncorrected_frame_mcheck { | |||
261 | } | 261 | } |
262 | #endif | 262 | #endif |
263 | 263 | ||
264 | static inline int __mcpcia_is_mmio(unsigned long addr) | 264 | extern inline int __mcpcia_is_mmio(unsigned long addr) |
265 | { | 265 | { |
266 | return (addr & 0x80000000UL) == 0; | 266 | return (addr & 0x80000000UL) == 0; |
267 | } | 267 | } |
diff --git a/include/asm-alpha/core_t2.h b/include/asm-alpha/core_t2.h index 90e6b5d6c214..46bfff58f670 100644 --- a/include/asm-alpha/core_t2.h +++ b/include/asm-alpha/core_t2.h | |||
@@ -356,13 +356,13 @@ struct el_t2_frame_corrected { | |||
356 | #define vip volatile int * | 356 | #define vip volatile int * |
357 | #define vuip volatile unsigned int * | 357 | #define vuip volatile unsigned int * |
358 | 358 | ||
359 | static inline u8 t2_inb(unsigned long addr) | 359 | extern inline u8 t2_inb(unsigned long addr) |
360 | { | 360 | { |
361 | long result = *(vip) ((addr << 5) + T2_IO + 0x00); | 361 | long result = *(vip) ((addr << 5) + T2_IO + 0x00); |
362 | return __kernel_extbl(result, addr & 3); | 362 | return __kernel_extbl(result, addr & 3); |
363 | } | 363 | } |
364 | 364 | ||
365 | static inline void t2_outb(u8 b, unsigned long addr) | 365 | extern inline void t2_outb(u8 b, unsigned long addr) |
366 | { | 366 | { |
367 | unsigned long w; | 367 | unsigned long w; |
368 | 368 | ||
@@ -371,13 +371,13 @@ static inline void t2_outb(u8 b, unsigned long addr) | |||
371 | mb(); | 371 | mb(); |
372 | } | 372 | } |
373 | 373 | ||
374 | static inline u16 t2_inw(unsigned long addr) | 374 | extern inline u16 t2_inw(unsigned long addr) |
375 | { | 375 | { |
376 | long result = *(vip) ((addr << 5) + T2_IO + 0x08); | 376 | long result = *(vip) ((addr << 5) + T2_IO + 0x08); |
377 | return __kernel_extwl(result, addr & 3); | 377 | return __kernel_extwl(result, addr & 3); |
378 | } | 378 | } |
379 | 379 | ||
380 | static inline void t2_outw(u16 b, unsigned long addr) | 380 | extern inline void t2_outw(u16 b, unsigned long addr) |
381 | { | 381 | { |
382 | unsigned long w; | 382 | unsigned long w; |
383 | 383 | ||
@@ -386,12 +386,12 @@ static inline void t2_outw(u16 b, unsigned long addr) | |||
386 | mb(); | 386 | mb(); |
387 | } | 387 | } |
388 | 388 | ||
389 | static inline u32 t2_inl(unsigned long addr) | 389 | extern inline u32 t2_inl(unsigned long addr) |
390 | { | 390 | { |
391 | return *(vuip) ((addr << 5) + T2_IO + 0x18); | 391 | return *(vuip) ((addr << 5) + T2_IO + 0x18); |
392 | } | 392 | } |
393 | 393 | ||
394 | static inline void t2_outl(u32 b, unsigned long addr) | 394 | extern inline void t2_outl(u32 b, unsigned long addr) |
395 | { | 395 | { |
396 | *(vuip) ((addr << 5) + T2_IO + 0x18) = b; | 396 | *(vuip) ((addr << 5) + T2_IO + 0x18) = b; |
397 | mb(); | 397 | mb(); |
@@ -435,7 +435,7 @@ static inline void t2_outl(u32 b, unsigned long addr) | |||
435 | set_hae(msb); \ | 435 | set_hae(msb); \ |
436 | } | 436 | } |
437 | 437 | ||
438 | static DEFINE_SPINLOCK(t2_hae_lock); | 438 | extern spinlock_t t2_hae_lock; |
439 | 439 | ||
440 | /* | 440 | /* |
441 | * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since | 441 | * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since |
diff --git a/include/asm-alpha/io.h b/include/asm-alpha/io.h index 38f18cf18c9d..e971ab000f95 100644 --- a/include/asm-alpha/io.h +++ b/include/asm-alpha/io.h | |||
@@ -35,7 +35,7 @@ | |||
35 | * register not being up-to-date with respect to the hardware | 35 | * register not being up-to-date with respect to the hardware |
36 | * value. | 36 | * value. |
37 | */ | 37 | */ |
38 | static inline void __set_hae(unsigned long new_hae) | 38 | extern inline void __set_hae(unsigned long new_hae) |
39 | { | 39 | { |
40 | unsigned long flags; | 40 | unsigned long flags; |
41 | local_irq_save(flags); | 41 | local_irq_save(flags); |
@@ -49,7 +49,7 @@ static inline void __set_hae(unsigned long new_hae) | |||
49 | local_irq_restore(flags); | 49 | local_irq_restore(flags); |
50 | } | 50 | } |
51 | 51 | ||
52 | static inline void set_hae(unsigned long new_hae) | 52 | extern inline void set_hae(unsigned long new_hae) |
53 | { | 53 | { |
54 | if (new_hae != alpha_mv.hae_cache) | 54 | if (new_hae != alpha_mv.hae_cache) |
55 | __set_hae(new_hae); | 55 | __set_hae(new_hae); |
@@ -176,7 +176,7 @@ REMAP2(u64, writeq, volatile) | |||
176 | #undef REMAP1 | 176 | #undef REMAP1 |
177 | #undef REMAP2 | 177 | #undef REMAP2 |
178 | 178 | ||
179 | static inline void __iomem *generic_ioportmap(unsigned long a) | 179 | extern inline void __iomem *generic_ioportmap(unsigned long a) |
180 | { | 180 | { |
181 | return alpha_mv.mv_ioportmap(a); | 181 | return alpha_mv.mv_ioportmap(a); |
182 | } | 182 | } |
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h index 6a5be1f7debf..86c08a02d239 100644 --- a/include/asm-alpha/mmu_context.h +++ b/include/asm-alpha/mmu_context.h | |||
@@ -23,7 +23,7 @@ | |||
23 | #endif | 23 | #endif |
24 | 24 | ||
25 | 25 | ||
26 | extern inline unsigned long | 26 | static inline unsigned long |
27 | __reload_thread(struct pcb_struct *pcb) | 27 | __reload_thread(struct pcb_struct *pcb) |
28 | { | 28 | { |
29 | register unsigned long a0 __asm__("$16"); | 29 | register unsigned long a0 __asm__("$16"); |
@@ -114,7 +114,7 @@ extern unsigned long last_asn; | |||
114 | #define __MMU_EXTERN_INLINE | 114 | #define __MMU_EXTERN_INLINE |
115 | #endif | 115 | #endif |
116 | 116 | ||
117 | static inline unsigned long | 117 | extern inline unsigned long |
118 | __get_new_mm_context(struct mm_struct *mm, long cpu) | 118 | __get_new_mm_context(struct mm_struct *mm, long cpu) |
119 | { | 119 | { |
120 | unsigned long asn = cpu_last_asn(cpu); | 120 | unsigned long asn = cpu_last_asn(cpu); |
@@ -226,7 +226,7 @@ ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm) | |||
226 | # endif | 226 | # endif |
227 | #endif | 227 | #endif |
228 | 228 | ||
229 | extern inline int | 229 | static inline int |
230 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | 230 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
231 | { | 231 | { |
232 | int i; | 232 | int i; |
diff --git a/include/asm-alpha/percpu.h b/include/asm-alpha/percpu.h index 48348fe34c19..3495e8e00d70 100644 --- a/include/asm-alpha/percpu.h +++ b/include/asm-alpha/percpu.h | |||
@@ -1,6 +1,78 @@ | |||
1 | #ifndef __ALPHA_PERCPU_H | 1 | #ifndef __ALPHA_PERCPU_H |
2 | #define __ALPHA_PERCPU_H | 2 | #define __ALPHA_PERCPU_H |
3 | #include <linux/compiler.h> | ||
4 | #include <linux/threads.h> | ||
3 | 5 | ||
4 | #include <asm-generic/percpu.h> | 6 | /* |
7 | * Determine the real variable name from the name visible in the | ||
8 | * kernel sources. | ||
9 | */ | ||
10 | #define per_cpu_var(var) per_cpu__##var | ||
11 | |||
12 | #ifdef CONFIG_SMP | ||
13 | |||
14 | /* | ||
15 | * per_cpu_offset() is the offset that has to be added to a | ||
16 | * percpu variable to get to the instance for a certain processor. | ||
17 | */ | ||
18 | extern unsigned long __per_cpu_offset[NR_CPUS]; | ||
19 | |||
20 | #define per_cpu_offset(x) (__per_cpu_offset[x]) | ||
21 | |||
22 | #define __my_cpu_offset per_cpu_offset(raw_smp_processor_id()) | ||
23 | #ifdef CONFIG_DEBUG_PREEMPT | ||
24 | #define my_cpu_offset per_cpu_offset(smp_processor_id()) | ||
25 | #else | ||
26 | #define my_cpu_offset __my_cpu_offset | ||
27 | #endif | ||
28 | |||
29 | #ifndef MODULE | ||
30 | #define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset)) | ||
31 | #define PER_CPU_ATTRIBUTES | ||
32 | #else | ||
33 | /* | ||
34 | * To calculate addresses of locally defined variables, GCC uses 32-bit | ||
35 | * displacement from the GP. Which doesn't work for per cpu variables in | ||
36 | * modules, as an offset to the kernel per cpu area is way above 4G. | ||
37 | * | ||
38 | * This forces allocation of a GOT entry for per cpu variable using | ||
39 | * ldq instruction with a 'literal' relocation. | ||
40 | */ | ||
41 | #define SHIFT_PERCPU_PTR(var, offset) ({ \ | ||
42 | extern int simple_identifier_##var(void); \ | ||
43 | unsigned long __ptr, tmp_gp; \ | ||
44 | asm ( "br %1, 1f \n\ | ||
45 | 1: ldgp %1, 0(%1) \n\ | ||
46 | ldq %0, per_cpu__" #var"(%1)\t!literal" \ | ||
47 | : "=&r"(__ptr), "=&r"(tmp_gp)); \ | ||
48 | (typeof(&per_cpu_var(var)))(__ptr + (offset)); }) | ||
49 | |||
50 | #define PER_CPU_ATTRIBUTES __used | ||
51 | |||
52 | #endif /* MODULE */ | ||
53 | |||
54 | /* | ||
55 | * A percpu variable may point to a discarded regions. The following are | ||
56 | * established ways to produce a usable pointer from the percpu variable | ||
57 | * offset. | ||
58 | */ | ||
59 | #define per_cpu(var, cpu) \ | ||
60 | (*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu))) | ||
61 | #define __get_cpu_var(var) \ | ||
62 | (*SHIFT_PERCPU_PTR(var, my_cpu_offset)) | ||
63 | #define __raw_get_cpu_var(var) \ | ||
64 | (*SHIFT_PERCPU_PTR(var, __my_cpu_offset)) | ||
65 | |||
66 | #else /* ! SMP */ | ||
67 | |||
68 | #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) | ||
69 | #define __get_cpu_var(var) per_cpu_var(var) | ||
70 | #define __raw_get_cpu_var(var) per_cpu_var(var) | ||
71 | |||
72 | #define PER_CPU_ATTRIBUTES | ||
73 | |||
74 | #endif /* SMP */ | ||
75 | |||
76 | #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu_var(name) | ||
5 | 77 | ||
6 | #endif /* __ALPHA_PERCPU_H */ | 78 | #endif /* __ALPHA_PERCPU_H */ |
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h index ed221d6408fc..afe20fa58c99 100644 --- a/include/asm-alpha/system.h +++ b/include/asm-alpha/system.h | |||
@@ -184,7 +184,7 @@ enum amask_enum { | |||
184 | __amask; }) | 184 | __amask; }) |
185 | 185 | ||
186 | #define __CALL_PAL_R0(NAME, TYPE) \ | 186 | #define __CALL_PAL_R0(NAME, TYPE) \ |
187 | static inline TYPE NAME(void) \ | 187 | extern inline TYPE NAME(void) \ |
188 | { \ | 188 | { \ |
189 | register TYPE __r0 __asm__("$0"); \ | 189 | register TYPE __r0 __asm__("$0"); \ |
190 | __asm__ __volatile__( \ | 190 | __asm__ __volatile__( \ |
@@ -196,7 +196,7 @@ static inline TYPE NAME(void) \ | |||
196 | } | 196 | } |
197 | 197 | ||
198 | #define __CALL_PAL_W1(NAME, TYPE0) \ | 198 | #define __CALL_PAL_W1(NAME, TYPE0) \ |
199 | static inline void NAME(TYPE0 arg0) \ | 199 | extern inline void NAME(TYPE0 arg0) \ |
200 | { \ | 200 | { \ |
201 | register TYPE0 __r16 __asm__("$16") = arg0; \ | 201 | register TYPE0 __r16 __asm__("$16") = arg0; \ |
202 | __asm__ __volatile__( \ | 202 | __asm__ __volatile__( \ |
@@ -207,7 +207,7 @@ static inline void NAME(TYPE0 arg0) \ | |||
207 | } | 207 | } |
208 | 208 | ||
209 | #define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \ | 209 | #define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \ |
210 | static inline void NAME(TYPE0 arg0, TYPE1 arg1) \ | 210 | extern inline void NAME(TYPE0 arg0, TYPE1 arg1) \ |
211 | { \ | 211 | { \ |
212 | register TYPE0 __r16 __asm__("$16") = arg0; \ | 212 | register TYPE0 __r16 __asm__("$16") = arg0; \ |
213 | register TYPE1 __r17 __asm__("$17") = arg1; \ | 213 | register TYPE1 __r17 __asm__("$17") = arg1; \ |
@@ -219,7 +219,7 @@ static inline void NAME(TYPE0 arg0, TYPE1 arg1) \ | |||
219 | } | 219 | } |
220 | 220 | ||
221 | #define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \ | 221 | #define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \ |
222 | static inline RTYPE NAME(TYPE0 arg0) \ | 222 | extern inline RTYPE NAME(TYPE0 arg0) \ |
223 | { \ | 223 | { \ |
224 | register RTYPE __r0 __asm__("$0"); \ | 224 | register RTYPE __r0 __asm__("$0"); \ |
225 | register TYPE0 __r16 __asm__("$16") = arg0; \ | 225 | register TYPE0 __r16 __asm__("$16") = arg0; \ |
@@ -232,7 +232,7 @@ static inline RTYPE NAME(TYPE0 arg0) \ | |||
232 | } | 232 | } |
233 | 233 | ||
234 | #define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \ | 234 | #define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \ |
235 | static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \ | 235 | extern inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \ |
236 | { \ | 236 | { \ |
237 | register RTYPE __r0 __asm__("$0"); \ | 237 | register RTYPE __r0 __asm__("$0"); \ |
238 | register TYPE0 __r16 __asm__("$16") = arg0; \ | 238 | register TYPE0 __r16 __asm__("$16") = arg0; \ |
diff --git a/include/asm-alpha/vga.h b/include/asm-alpha/vga.h index e8df1e7aae6b..c00106bac521 100644 --- a/include/asm-alpha/vga.h +++ b/include/asm-alpha/vga.h | |||
@@ -13,7 +13,7 @@ | |||
13 | #define VT_BUF_HAVE_MEMSETW | 13 | #define VT_BUF_HAVE_MEMSETW |
14 | #define VT_BUF_HAVE_MEMCPYW | 14 | #define VT_BUF_HAVE_MEMCPYW |
15 | 15 | ||
16 | extern inline void scr_writew(u16 val, volatile u16 *addr) | 16 | static inline void scr_writew(u16 val, volatile u16 *addr) |
17 | { | 17 | { |
18 | if (__is_ioaddr(addr)) | 18 | if (__is_ioaddr(addr)) |
19 | __raw_writew(val, (volatile u16 __iomem *) addr); | 19 | __raw_writew(val, (volatile u16 __iomem *) addr); |
@@ -21,7 +21,7 @@ extern inline void scr_writew(u16 val, volatile u16 *addr) | |||
21 | *addr = val; | 21 | *addr = val; |
22 | } | 22 | } |
23 | 23 | ||
24 | extern inline u16 scr_readw(volatile const u16 *addr) | 24 | static inline u16 scr_readw(volatile const u16 *addr) |
25 | { | 25 | { |
26 | if (__is_ioaddr(addr)) | 26 | if (__is_ioaddr(addr)) |
27 | return __raw_readw((volatile const u16 __iomem *) addr); | 27 | return __raw_readw((volatile const u16 __iomem *) addr); |
@@ -29,7 +29,7 @@ extern inline u16 scr_readw(volatile const u16 *addr) | |||
29 | return *addr; | 29 | return *addr; |
30 | } | 30 | } |
31 | 31 | ||
32 | extern inline void scr_memsetw(u16 *s, u16 c, unsigned int count) | 32 | static inline void scr_memsetw(u16 *s, u16 c, unsigned int count) |
33 | { | 33 | { |
34 | if (__is_ioaddr(s)) | 34 | if (__is_ioaddr(s)) |
35 | memsetw_io((u16 __iomem *) s, c, count); | 35 | memsetw_io((u16 __iomem *) s, c, count); |
diff --git a/include/asm-blackfin/mach-bf527/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf527/bfin_serial_5xx.h index 96bd09e31e36..2526b6ed6faa 100644 --- a/include/asm-blackfin/mach-bf527/bfin_serial_5xx.h +++ b/include/asm-blackfin/mach-bf527/bfin_serial_5xx.h | |||
@@ -96,7 +96,7 @@ struct bfin_serial_port { | |||
96 | struct work_struct tx_dma_workqueue; | 96 | struct work_struct tx_dma_workqueue; |
97 | #endif | 97 | #endif |
98 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | 98 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS |
99 | struct work_struct cts_workqueue; | 99 | struct timer_list cts_timer; |
100 | int cts_pin; | 100 | int cts_pin; |
101 | int rts_pin; | 101 | int rts_pin; |
102 | #endif | 102 | #endif |
diff --git a/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h index e924569ad1d8..ebf592b59aab 100644 --- a/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h +++ b/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h | |||
@@ -88,7 +88,7 @@ struct bfin_serial_port { | |||
88 | # endif | 88 | # endif |
89 | #endif | 89 | #endif |
90 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | 90 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS |
91 | struct work_struct cts_workqueue; | 91 | struct timer_list cts_timer; |
92 | int cts_pin; | 92 | int cts_pin; |
93 | int rts_pin; | 93 | int rts_pin; |
94 | #endif | 94 | #endif |
diff --git a/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h index 41d7b6490bb1..1bf56ffa22f9 100644 --- a/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h +++ b/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h | |||
@@ -96,7 +96,7 @@ struct bfin_serial_port { | |||
96 | struct work_struct tx_dma_workqueue; | 96 | struct work_struct tx_dma_workqueue; |
97 | #endif | 97 | #endif |
98 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | 98 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS |
99 | struct work_struct cts_workqueue; | 99 | struct timer_list cts_timer; |
100 | int cts_pin; | 100 | int cts_pin; |
101 | int rts_pin; | 101 | int rts_pin; |
102 | #endif | 102 | #endif |
diff --git a/include/asm-blackfin/mach-bf548/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf548/bfin_serial_5xx.h index 59b4ad4e5b4a..5e29446a8e03 100644 --- a/include/asm-blackfin/mach-bf548/bfin_serial_5xx.h +++ b/include/asm-blackfin/mach-bf548/bfin_serial_5xx.h | |||
@@ -99,7 +99,7 @@ struct bfin_serial_port { | |||
99 | struct work_struct tx_dma_workqueue; | 99 | struct work_struct tx_dma_workqueue; |
100 | #endif | 100 | #endif |
101 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | 101 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS |
102 | struct work_struct cts_workqueue; | 102 | struct timer_list cts_timer; |
103 | int cts_pin; | 103 | int cts_pin; |
104 | int rts_pin; | 104 | int rts_pin; |
105 | #endif | 105 | #endif |
@@ -187,7 +187,7 @@ static void bfin_serial_hw_init(struct bfin_serial_port *uart) | |||
187 | 187 | ||
188 | #ifdef CONFIG_BFIN_UART1_CTSRTS | 188 | #ifdef CONFIG_BFIN_UART1_CTSRTS |
189 | peripheral_request(P_UART1_RTS, DRIVER_NAME); | 189 | peripheral_request(P_UART1_RTS, DRIVER_NAME); |
190 | peripheral_request(P_UART1_CTS DRIVER_NAME); | 190 | peripheral_request(P_UART1_CTS, DRIVER_NAME); |
191 | #endif | 191 | #endif |
192 | #endif | 192 | #endif |
193 | 193 | ||
@@ -202,7 +202,7 @@ static void bfin_serial_hw_init(struct bfin_serial_port *uart) | |||
202 | 202 | ||
203 | #ifdef CONFIG_BFIN_UART3_CTSRTS | 203 | #ifdef CONFIG_BFIN_UART3_CTSRTS |
204 | peripheral_request(P_UART3_RTS, DRIVER_NAME); | 204 | peripheral_request(P_UART3_RTS, DRIVER_NAME); |
205 | peripheral_request(P_UART3_CTS DRIVER_NAME); | 205 | peripheral_request(P_UART3_CTS, DRIVER_NAME); |
206 | #endif | 206 | #endif |
207 | #endif | 207 | #endif |
208 | SSYNC(); | 208 | SSYNC(); |
diff --git a/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h index 30d90b580f18..8aa02780e642 100644 --- a/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h +++ b/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h | |||
@@ -88,7 +88,7 @@ struct bfin_serial_port { | |||
88 | # endif | 88 | # endif |
89 | #endif | 89 | #endif |
90 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | 90 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS |
91 | struct work_struct cts_workqueue; | 91 | struct timer_list cts_timer; |
92 | int cts_pin; | 92 | int cts_pin; |
93 | int rts_pin; | 93 | int rts_pin; |
94 | #endif | 94 | #endif |
diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm index 92a6d91d0c1a..7cd25b8e7c9a 100644 --- a/include/asm-generic/Kbuild.asm +++ b/include/asm-generic/Kbuild.asm | |||
@@ -1,6 +1,6 @@ | |||
1 | header-y += kvm.h | 1 | header-y += kvm.h |
2 | 2 | ||
3 | ifeq ($(wildcard include/asm-$(SRCARCH)/a.out.h),include/asm-$(SRCARCH)/a.out.h) | 3 | ifneq ($(wildcard $(srctree)/include/asm-$(SRCARCH)/a.out.h),) |
4 | unifdef-y += a.out.h | 4 | unifdef-y += a.out.h |
5 | endif | 5 | endif |
6 | unifdef-y += auxvec.h | 6 | unifdef-y += auxvec.h |
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 85fd0aa27a8c..4ec0a296bdec 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _ASM_GENERIC_ATOMIC_H | 2 | #define _ASM_GENERIC_ATOMIC_H |
3 | /* | 3 | /* |
4 | * Copyright (C) 2005 Silicon Graphics, Inc. | 4 | * Copyright (C) 2005 Silicon Graphics, Inc. |
5 | * Christoph Lameter <clameter@sgi.com> | 5 | * Christoph Lameter |
6 | * | 6 | * |
7 | * Allows to provide arch independent atomic definitions without the need to | 7 | * Allows to provide arch independent atomic definitions without the need to |
8 | * edit all arch specific atomic.h files. | 8 | * edit all arch specific atomic.h files. |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 44ef329531c3..4fce3db2cecc 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -197,6 +197,63 @@ static inline int pmd_none_or_clear_bad(pmd_t *pmd) | |||
197 | } | 197 | } |
198 | #endif /* CONFIG_MMU */ | 198 | #endif /* CONFIG_MMU */ |
199 | 199 | ||
200 | static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm, | ||
201 | unsigned long addr, | ||
202 | pte_t *ptep) | ||
203 | { | ||
204 | /* | ||
205 | * Get the current pte state, but zero it out to make it | ||
206 | * non-present, preventing the hardware from asynchronously | ||
207 | * updating it. | ||
208 | */ | ||
209 | return ptep_get_and_clear(mm, addr, ptep); | ||
210 | } | ||
211 | |||
212 | static inline void __ptep_modify_prot_commit(struct mm_struct *mm, | ||
213 | unsigned long addr, | ||
214 | pte_t *ptep, pte_t pte) | ||
215 | { | ||
216 | /* | ||
217 | * The pte is non-present, so there's no hardware state to | ||
218 | * preserve. | ||
219 | */ | ||
220 | set_pte_at(mm, addr, ptep, pte); | ||
221 | } | ||
222 | |||
223 | #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION | ||
224 | /* | ||
225 | * Start a pte protection read-modify-write transaction, which | ||
226 | * protects against asynchronous hardware modifications to the pte. | ||
227 | * The intention is not to prevent the hardware from making pte | ||
228 | * updates, but to prevent any updates it may make from being lost. | ||
229 | * | ||
230 | * This does not protect against other software modifications of the | ||
231 | * pte; the appropriate pte lock must be held over the transation. | ||
232 | * | ||
233 | * Note that this interface is intended to be batchable, meaning that | ||
234 | * ptep_modify_prot_commit may not actually update the pte, but merely | ||
235 | * queue the update to be done at some later time. The update must be | ||
236 | * actually committed before the pte lock is released, however. | ||
237 | */ | ||
238 | static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, | ||
239 | unsigned long addr, | ||
240 | pte_t *ptep) | ||
241 | { | ||
242 | return __ptep_modify_prot_start(mm, addr, ptep); | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * Commit an update to a pte, leaving any hardware-controlled bits in | ||
247 | * the PTE unmodified. | ||
248 | */ | ||
249 | static inline void ptep_modify_prot_commit(struct mm_struct *mm, | ||
250 | unsigned long addr, | ||
251 | pte_t *ptep, pte_t pte) | ||
252 | { | ||
253 | __ptep_modify_prot_commit(mm, addr, ptep, pte); | ||
254 | } | ||
255 | #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ | ||
256 | |||
200 | /* | 257 | /* |
201 | * A facility to provide lazy MMU batching. This allows PTE updates and | 258 | * A facility to provide lazy MMU batching. This allows PTE updates and |
202 | * page invalidations to be delayed until a call to leave lazy MMU mode | 259 | * page invalidations to be delayed until a call to leave lazy MMU mode |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index f054778e916c..f1992dc5c424 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -93,6 +93,8 @@ | |||
93 | VMLINUX_SYMBOL(__end_rio_route_ops) = .; \ | 93 | VMLINUX_SYMBOL(__end_rio_route_ops) = .; \ |
94 | } \ | 94 | } \ |
95 | \ | 95 | \ |
96 | TRACEDATA \ | ||
97 | \ | ||
96 | /* Kernel symbol table: Normal symbols */ \ | 98 | /* Kernel symbol table: Normal symbols */ \ |
97 | __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ | 99 | __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ |
98 | VMLINUX_SYMBOL(__start___ksymtab) = .; \ | 100 | VMLINUX_SYMBOL(__start___ksymtab) = .; \ |
@@ -318,6 +320,18 @@ | |||
318 | __stop___bug_table = .; \ | 320 | __stop___bug_table = .; \ |
319 | } | 321 | } |
320 | 322 | ||
323 | #ifdef CONFIG_PM_TRACE | ||
324 | #define TRACEDATA \ | ||
325 | . = ALIGN(4); \ | ||
326 | .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ | ||
327 | __tracedata_start = .; \ | ||
328 | *(.tracedata) \ | ||
329 | __tracedata_end = .; \ | ||
330 | } | ||
331 | #else | ||
332 | #define TRACEDATA | ||
333 | #endif | ||
334 | |||
321 | #define NOTES \ | 335 | #define NOTES \ |
322 | .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ | 336 | .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ |
323 | VMLINUX_SYMBOL(__start_notes) = .; \ | 337 | VMLINUX_SYMBOL(__start_notes) = .; \ |
diff --git a/include/asm-ia64/sn/simulator.h b/include/asm-ia64/sn/simulator.h index c3fd3eb25768..c2611f6cfe33 100644 --- a/include/asm-ia64/sn/simulator.h +++ b/include/asm-ia64/sn/simulator.h | |||
@@ -8,7 +8,7 @@ | |||
8 | #ifndef _ASM_IA64_SN_SIMULATOR_H | 8 | #ifndef _ASM_IA64_SN_SIMULATOR_H |
9 | #define _ASM_IA64_SN_SIMULATOR_H | 9 | #define _ASM_IA64_SN_SIMULATOR_H |
10 | 10 | ||
11 | 11 | #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_SGI_UV) | |
12 | #define SNMAGIC 0xaeeeeeee8badbeefL | 12 | #define SNMAGIC 0xaeeeeeee8badbeefL |
13 | #define IS_MEDUSA() ({long sn; asm("mov %0=cpuid[%1]" : "=r"(sn) : "r"(2)); sn == SNMAGIC;}) | 13 | #define IS_MEDUSA() ({long sn; asm("mov %0=cpuid[%1]" : "=r"(sn) : "r"(2)); sn == SNMAGIC;}) |
14 | 14 | ||
@@ -16,5 +16,10 @@ | |||
16 | #define IS_RUNNING_ON_SIMULATOR() (sn_prom_type) | 16 | #define IS_RUNNING_ON_SIMULATOR() (sn_prom_type) |
17 | #define IS_RUNNING_ON_FAKE_PROM() (sn_prom_type == 2) | 17 | #define IS_RUNNING_ON_FAKE_PROM() (sn_prom_type == 2) |
18 | extern int sn_prom_type; /* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */ | 18 | extern int sn_prom_type; /* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */ |
19 | #else | ||
20 | #define IS_MEDUSA() 0 | ||
21 | #define SIMULATOR_SLEEP() | ||
22 | #define IS_RUNNING_ON_SIMULATOR() 0 | ||
23 | #endif | ||
19 | 24 | ||
20 | #endif /* _ASM_IA64_SN_SIMULATOR_H */ | 25 | #endif /* _ASM_IA64_SN_SIMULATOR_H */ |
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h index a798d6299a79..1232be3885b0 100644 --- a/include/asm-mips/atomic.h +++ b/include/asm-mips/atomic.h | |||
@@ -283,10 +283,10 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
283 | " beqz %0, 2f \n" | 283 | " beqz %0, 2f \n" |
284 | " subu %0, %1, %3 \n" | 284 | " subu %0, %1, %3 \n" |
285 | " .set reorder \n" | 285 | " .set reorder \n" |
286 | "1: \n" | ||
287 | " .subsection 2 \n" | 286 | " .subsection 2 \n" |
288 | "2: b 1b \n" | 287 | "2: b 1b \n" |
289 | " .previous \n" | 288 | " .previous \n" |
289 | "1: \n" | ||
290 | " .set mips0 \n" | 290 | " .set mips0 \n" |
291 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 291 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
292 | : "Ir" (i), "m" (v->counter) | 292 | : "Ir" (i), "m" (v->counter) |
@@ -664,10 +664,10 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |||
664 | " beqz %0, 2f \n" | 664 | " beqz %0, 2f \n" |
665 | " dsubu %0, %1, %3 \n" | 665 | " dsubu %0, %1, %3 \n" |
666 | " .set reorder \n" | 666 | " .set reorder \n" |
667 | "1: \n" | ||
668 | " .subsection 2 \n" | 667 | " .subsection 2 \n" |
669 | "2: b 1b \n" | 668 | "2: b 1b \n" |
670 | " .previous \n" | 669 | " .previous \n" |
670 | "1: \n" | ||
671 | " .set mips0 \n" | 671 | " .set mips0 \n" |
672 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 672 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
673 | : "Ir" (i), "m" (v->counter) | 673 | : "Ir" (i), "m" (v->counter) |
diff --git a/include/asm-mips/cpu-info.h b/include/asm-mips/cpu-info.h index 0c5a358863f3..2de73dbb2e9e 100644 --- a/include/asm-mips/cpu-info.h +++ b/include/asm-mips/cpu-info.h | |||
@@ -56,7 +56,7 @@ struct cpuinfo_mips { | |||
56 | struct cache_desc tcache; /* Tertiary/split secondary cache */ | 56 | struct cache_desc tcache; /* Tertiary/split secondary cache */ |
57 | int srsets; /* Shadow register sets */ | 57 | int srsets; /* Shadow register sets */ |
58 | int core; /* physical core number */ | 58 | int core; /* physical core number */ |
59 | #if defined(CONFIG_MIPS_MT_SMTC) | 59 | #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) |
60 | /* | 60 | /* |
61 | * In the MIPS MT "SMTC" model, each TC is considered | 61 | * In the MIPS MT "SMTC" model, each TC is considered |
62 | * to be a "CPU" for the purposes of scheduling, but | 62 | * to be a "CPU" for the purposes of scheduling, but |
@@ -64,7 +64,7 @@ struct cpuinfo_mips { | |||
64 | * to all TCs within the same VPE. | 64 | * to all TCs within the same VPE. |
65 | */ | 65 | */ |
66 | int vpe_id; /* Virtual Processor number */ | 66 | int vpe_id; /* Virtual Processor number */ |
67 | #endif /* CONFIG_MIPS_MT */ | 67 | #endif |
68 | #ifdef CONFIG_MIPS_MT_SMTC | 68 | #ifdef CONFIG_MIPS_MT_SMTC |
69 | int tc_id; /* Thread Context number */ | 69 | int tc_id; /* Thread Context number */ |
70 | #endif | 70 | #endif |
diff --git a/include/asm-mips/gic.h b/include/asm-mips/gic.h index 3a492f225f00..954807d9d66a 100644 --- a/include/asm-mips/gic.h +++ b/include/asm-mips/gic.h | |||
@@ -24,8 +24,8 @@ | |||
24 | 24 | ||
25 | #define MSK(n) ((1 << (n)) - 1) | 25 | #define MSK(n) ((1 << (n)) - 1) |
26 | #define REG32(addr) (*(volatile unsigned int *) (addr)) | 26 | #define REG32(addr) (*(volatile unsigned int *) (addr)) |
27 | #define REG(base, offs) REG32((unsigned int)(base) + offs##_##OFS) | 27 | #define REG(base, offs) REG32((unsigned long)(base) + offs##_##OFS) |
28 | #define REGP(base, phys) REG32((unsigned int)(base) + (phys)) | 28 | #define REGP(base, phys) REG32((unsigned long)(base) + (phys)) |
29 | 29 | ||
30 | /* Accessors */ | 30 | /* Accessors */ |
31 | #define GIC_REG(segment, offset) \ | 31 | #define GIC_REG(segment, offset) \ |
diff --git a/include/asm-mips/lasat/serial.h b/include/asm-mips/lasat/serial.h index bafe68b10614..1c37d70579b8 100644 --- a/include/asm-mips/lasat/serial.h +++ b/include/asm-mips/lasat/serial.h | |||
@@ -4,10 +4,10 @@ | |||
4 | #define LASAT_BASE_BAUD_100 (7372800 / 16) | 4 | #define LASAT_BASE_BAUD_100 (7372800 / 16) |
5 | #define LASAT_UART_REGS_BASE_100 0x1c8b0000 | 5 | #define LASAT_UART_REGS_BASE_100 0x1c8b0000 |
6 | #define LASAT_UART_REGS_SHIFT_100 2 | 6 | #define LASAT_UART_REGS_SHIFT_100 2 |
7 | #define LASATINT_UART_100 8 | 7 | #define LASATINT_UART_100 16 |
8 | 8 | ||
9 | /* * LASAT 200 boards serial configuration */ | 9 | /* * LASAT 200 boards serial configuration */ |
10 | #define LASAT_BASE_BAUD_200 (100000000 / 16 / 12) | 10 | #define LASAT_BASE_BAUD_200 (100000000 / 16 / 12) |
11 | #define LASAT_UART_REGS_BASE_200 (Vrc5074_PHYS_BASE + 0x0300) | 11 | #define LASAT_UART_REGS_BASE_200 (Vrc5074_PHYS_BASE + 0x0300) |
12 | #define LASAT_UART_REGS_SHIFT_200 3 | 12 | #define LASAT_UART_REGS_SHIFT_200 3 |
13 | #define LASATINT_UART_200 13 | 13 | #define LASATINT_UART_200 21 |
diff --git a/include/asm-mips/mach-au1x00/au1000.h b/include/asm-mips/mach-au1x00/au1000.h index 1b5064dac007..0d302bad4492 100644 --- a/include/asm-mips/mach-au1x00/au1000.h +++ b/include/asm-mips/mach-au1x00/au1000.h | |||
@@ -615,6 +615,7 @@ enum soc_au1500_ints { | |||
615 | AU1000_RTC_MATCH1_INT, | 615 | AU1000_RTC_MATCH1_INT, |
616 | AU1000_RTC_MATCH2_INT, | 616 | AU1000_RTC_MATCH2_INT, |
617 | AU1500_PCI_ERR_INT, | 617 | AU1500_PCI_ERR_INT, |
618 | AU1500_RESERVED_INT, | ||
618 | AU1000_USB_DEV_REQ_INT, | 619 | AU1000_USB_DEV_REQ_INT, |
619 | AU1000_USB_DEV_SUS_INT, | 620 | AU1000_USB_DEV_SUS_INT, |
620 | AU1000_USB_HOST_INT, | 621 | AU1000_USB_HOST_INT, |
diff --git a/include/asm-mips/pgtable-bits.h b/include/asm-mips/pgtable-bits.h index 60e2f9338fcd..51b34a48c84a 100644 --- a/include/asm-mips/pgtable-bits.h +++ b/include/asm-mips/pgtable-bits.h | |||
@@ -134,6 +134,4 @@ | |||
134 | 134 | ||
135 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK) | 135 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK) |
136 | 136 | ||
137 | #define CONF_CM_DEFAULT (PAGE_CACHABLE_DEFAULT>>_CACHE_SHIFT) | ||
138 | |||
139 | #endif /* _ASM_PGTABLE_BITS_H */ | 137 | #endif /* _ASM_PGTABLE_BITS_H */ |
diff --git a/include/asm-powerpc/Kbuild b/include/asm-powerpc/Kbuild index 7381916dfcbb..bca352e033c3 100644 --- a/include/asm-powerpc/Kbuild +++ b/include/asm-powerpc/Kbuild | |||
@@ -1,6 +1,5 @@ | |||
1 | include include/asm-generic/Kbuild.asm | 1 | include include/asm-generic/Kbuild.asm |
2 | 2 | ||
3 | header-y += a.out.h | ||
4 | header-y += auxvec.h | 3 | header-y += auxvec.h |
5 | header-y += ioctls.h | 4 | header-y += ioctls.h |
6 | header-y += mman.h | 5 | header-y += mman.h |
diff --git a/include/asm-powerpc/mediabay.h b/include/asm-powerpc/mediabay.h index df111c362a7f..b2efb3325808 100644 --- a/include/asm-powerpc/mediabay.h +++ b/include/asm-powerpc/mediabay.h | |||
@@ -17,8 +17,6 @@ | |||
17 | #define MB_POWER 6 /* media bay contains a Power device (???) */ | 17 | #define MB_POWER 6 /* media bay contains a Power device (???) */ |
18 | #define MB_NO 7 /* media bay contains nothing */ | 18 | #define MB_NO 7 /* media bay contains nothing */ |
19 | 19 | ||
20 | int check_media_bay(struct device_node *which_bay, int what); | ||
21 | |||
22 | /* Number of bays in the machine or 0 */ | 20 | /* Number of bays in the machine or 0 */ |
23 | extern int media_bay_count; | 21 | extern int media_bay_count; |
24 | 22 | ||
@@ -29,6 +27,16 @@ int check_media_bay_by_base(unsigned long base, int what); | |||
29 | /* called by IDE PMAC host driver to register IDE controller for media bay */ | 27 | /* called by IDE PMAC host driver to register IDE controller for media bay */ |
30 | int media_bay_set_ide_infos(struct device_node *which_bay, unsigned long base, | 28 | int media_bay_set_ide_infos(struct device_node *which_bay, unsigned long base, |
31 | int irq, ide_hwif_t *hwif); | 29 | int irq, ide_hwif_t *hwif); |
30 | |||
31 | int check_media_bay(struct device_node *which_bay, int what); | ||
32 | |||
33 | #else | ||
34 | |||
35 | static inline int check_media_bay(struct device_node *which_bay, int what) | ||
36 | { | ||
37 | return -ENODEV; | ||
38 | } | ||
39 | |||
32 | #endif | 40 | #endif |
33 | 41 | ||
34 | #endif /* __KERNEL__ */ | 42 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h index 6abead6e681a..99348c1f4cab 100644 --- a/include/asm-powerpc/spu.h +++ b/include/asm-powerpc/spu.h | |||
@@ -131,7 +131,6 @@ struct spu { | |||
131 | u64 flags; | 131 | u64 flags; |
132 | u64 class_0_pending; | 132 | u64 class_0_pending; |
133 | u64 class_0_dar; | 133 | u64 class_0_dar; |
134 | u64 class_0_dsisr; | ||
135 | u64 class_1_dar; | 134 | u64 class_1_dar; |
136 | u64 class_1_dsisr; | 135 | u64 class_1_dsisr; |
137 | size_t ls_size; | 136 | size_t ls_size; |
diff --git a/include/asm-powerpc/spu_csa.h b/include/asm-powerpc/spu_csa.h index 129ec148d451..a40fd491250c 100644 --- a/include/asm-powerpc/spu_csa.h +++ b/include/asm-powerpc/spu_csa.h | |||
@@ -254,7 +254,7 @@ struct spu_state { | |||
254 | u64 spu_chnldata_RW[32]; | 254 | u64 spu_chnldata_RW[32]; |
255 | u32 spu_mailbox_data[4]; | 255 | u32 spu_mailbox_data[4]; |
256 | u32 pu_mailbox_data[1]; | 256 | u32 pu_mailbox_data[1]; |
257 | u64 class_0_dar, class_0_dsisr, class_0_pending; | 257 | u64 class_0_dar, class_0_pending; |
258 | u64 class_1_dar, class_1_dsisr; | 258 | u64 class_1_dar, class_1_dsisr; |
259 | unsigned long suspend_time; | 259 | unsigned long suspend_time; |
260 | spinlock_t register_lock; | 260 | spinlock_t register_lock; |
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index 2b6559a6d113..5235f875b932 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h | |||
@@ -34,7 +34,7 @@ | |||
34 | * SMP since it is only used to order updates to system memory. | 34 | * SMP since it is only used to order updates to system memory. |
35 | */ | 35 | */ |
36 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") | 36 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") |
37 | #define rmb() __asm__ __volatile__ (__stringify(LWSYNC) : : : "memory") | 37 | #define rmb() __asm__ __volatile__ ("sync" : : : "memory") |
38 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") | 38 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") |
39 | #define read_barrier_depends() do { } while(0) | 39 | #define read_barrier_depends() do { } while(0) |
40 | 40 | ||
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index be9639a9a186..313bcaf4b6c3 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h | |||
@@ -36,14 +36,10 @@ extern void generic_apic_probe(void); | |||
36 | #ifdef CONFIG_X86_LOCAL_APIC | 36 | #ifdef CONFIG_X86_LOCAL_APIC |
37 | 37 | ||
38 | extern int apic_verbosity; | 38 | extern int apic_verbosity; |
39 | extern int timer_over_8254; | ||
40 | extern int local_apic_timer_c2_ok; | 39 | extern int local_apic_timer_c2_ok; |
41 | extern int local_apic_timer_disabled; | ||
42 | 40 | ||
43 | extern int apic_runs_main_timer; | ||
44 | extern int ioapic_force; | 41 | extern int ioapic_force; |
45 | extern int disable_apic; | 42 | extern int disable_apic; |
46 | extern int disable_apic_timer; | ||
47 | 43 | ||
48 | /* | 44 | /* |
49 | * Basic functions accessing APICs. | 45 | * Basic functions accessing APICs. |
diff --git a/include/asm-x86/asm.h b/include/asm-x86/asm.h index 90dec0c23646..70939820c55f 100644 --- a/include/asm-x86/asm.h +++ b/include/asm-x86/asm.h | |||
@@ -1,33 +1,29 @@ | |||
1 | #ifndef _ASM_X86_ASM_H | 1 | #ifndef _ASM_X86_ASM_H |
2 | #define _ASM_X86_ASM_H | 2 | #define _ASM_X86_ASM_H |
3 | 3 | ||
4 | #ifdef CONFIG_X86_32 | 4 | #ifdef __ASSEMBLY__ |
5 | /* 32 bits */ | 5 | # define __ASM_FORM(x) x |
6 | 6 | #else | |
7 | # define _ASM_PTR " .long " | 7 | # define __ASM_FORM(x) " " #x " " |
8 | # define _ASM_ALIGN " .balign 4 " | 8 | #endif |
9 | # define _ASM_MOV_UL " movl " | ||
10 | |||
11 | # define _ASM_INC " incl " | ||
12 | # define _ASM_DEC " decl " | ||
13 | # define _ASM_ADD " addl " | ||
14 | # define _ASM_SUB " subl " | ||
15 | # define _ASM_XADD " xaddl " | ||
16 | 9 | ||
10 | #ifdef CONFIG_X86_32 | ||
11 | # define __ASM_SEL(a,b) __ASM_FORM(a) | ||
17 | #else | 12 | #else |
18 | /* 64 bits */ | 13 | # define __ASM_SEL(a,b) __ASM_FORM(b) |
14 | #endif | ||
19 | 15 | ||
20 | # define _ASM_PTR " .quad " | 16 | #define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q) |
21 | # define _ASM_ALIGN " .balign 8 " | ||
22 | # define _ASM_MOV_UL " movq " | ||
23 | 17 | ||
24 | # define _ASM_INC " incq " | 18 | #define _ASM_PTR __ASM_SEL(.long, .quad) |
25 | # define _ASM_DEC " decq " | 19 | #define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8) |
26 | # define _ASM_ADD " addq " | 20 | #define _ASM_MOV_UL __ASM_SIZE(mov) |
27 | # define _ASM_SUB " subq " | ||
28 | # define _ASM_XADD " xaddq " | ||
29 | 21 | ||
30 | #endif /* CONFIG_X86_32 */ | 22 | #define _ASM_INC __ASM_SIZE(inc) |
23 | #define _ASM_DEC __ASM_SIZE(dec) | ||
24 | #define _ASM_ADD __ASM_SIZE(add) | ||
25 | #define _ASM_SUB __ASM_SIZE(sub) | ||
26 | #define _ASM_XADD __ASM_SIZE(xadd) | ||
31 | 27 | ||
32 | /* Exception table entry */ | 28 | /* Exception table entry */ |
33 | # define _ASM_EXTABLE(from,to) \ | 29 | # define _ASM_EXTABLE(from,to) \ |
diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h index 3e0cd7d38335..fe589c153db8 100644 --- a/include/asm-x86/atomic_64.h +++ b/include/asm-x86/atomic_64.h | |||
@@ -11,12 +11,6 @@ | |||
11 | * resource counting etc.. | 11 | * resource counting etc.. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #ifdef CONFIG_SMP | ||
15 | #define LOCK "lock ; " | ||
16 | #else | ||
17 | #define LOCK "" | ||
18 | #endif | ||
19 | |||
20 | /* | 14 | /* |
21 | * Make sure gcc doesn't try to be clever and move things around | 15 | * Make sure gcc doesn't try to be clever and move things around |
22 | * on us. We need to use _exactly_ the address the user gave us, | 16 | * on us. We need to use _exactly_ the address the user gave us, |
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index ee4b3ead6a43..96b1829cea15 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h | |||
@@ -23,11 +23,21 @@ | |||
23 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) | 23 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) |
24 | /* Technically wrong, but this avoids compilation errors on some gcc | 24 | /* Technically wrong, but this avoids compilation errors on some gcc |
25 | versions. */ | 25 | versions. */ |
26 | #define ADDR "=m" (*(volatile long *) addr) | 26 | #define BITOP_ADDR(x) "=m" (*(volatile long *) (x)) |
27 | #else | 27 | #else |
28 | #define ADDR "+m" (*(volatile long *) addr) | 28 | #define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | #define ADDR BITOP_ADDR(addr) | ||
32 | |||
33 | /* | ||
34 | * We do the locked ops that don't return the old value as | ||
35 | * a mask operation on a byte. | ||
36 | */ | ||
37 | #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) | ||
38 | #define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) | ||
39 | #define CONST_MASK(nr) (1 << ((nr) & 7)) | ||
40 | |||
31 | /** | 41 | /** |
32 | * set_bit - Atomically set a bit in memory | 42 | * set_bit - Atomically set a bit in memory |
33 | * @nr: the bit to set | 43 | * @nr: the bit to set |
@@ -43,9 +53,17 @@ | |||
43 | * Note that @nr may be almost arbitrarily large; this function is not | 53 | * Note that @nr may be almost arbitrarily large; this function is not |
44 | * restricted to acting on a single-word quantity. | 54 | * restricted to acting on a single-word quantity. |
45 | */ | 55 | */ |
46 | static inline void set_bit(int nr, volatile void *addr) | 56 | static inline void set_bit(unsigned int nr, volatile unsigned long *addr) |
47 | { | 57 | { |
48 | asm volatile(LOCK_PREFIX "bts %1,%0" : ADDR : "Ir" (nr) : "memory"); | 58 | if (IS_IMMEDIATE(nr)) { |
59 | asm volatile(LOCK_PREFIX "orb %1,%0" | ||
60 | : CONST_MASK_ADDR(nr, addr) | ||
61 | : "iq" ((u8)CONST_MASK(nr)) | ||
62 | : "memory"); | ||
63 | } else { | ||
64 | asm volatile(LOCK_PREFIX "bts %1,%0" | ||
65 | : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); | ||
66 | } | ||
49 | } | 67 | } |
50 | 68 | ||
51 | /** | 69 | /** |
@@ -57,7 +75,7 @@ static inline void set_bit(int nr, volatile void *addr) | |||
57 | * If it's called on the same region of memory simultaneously, the effect | 75 | * If it's called on the same region of memory simultaneously, the effect |
58 | * may be that only one operation succeeds. | 76 | * may be that only one operation succeeds. |
59 | */ | 77 | */ |
60 | static inline void __set_bit(int nr, volatile void *addr) | 78 | static inline void __set_bit(int nr, volatile unsigned long *addr) |
61 | { | 79 | { |
62 | asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); | 80 | asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); |
63 | } | 81 | } |
@@ -72,9 +90,17 @@ static inline void __set_bit(int nr, volatile void *addr) | |||
72 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 90 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
73 | * in order to ensure changes are visible on other processors. | 91 | * in order to ensure changes are visible on other processors. |
74 | */ | 92 | */ |
75 | static inline void clear_bit(int nr, volatile void *addr) | 93 | static inline void clear_bit(int nr, volatile unsigned long *addr) |
76 | { | 94 | { |
77 | asm volatile(LOCK_PREFIX "btr %1,%0" : ADDR : "Ir" (nr)); | 95 | if (IS_IMMEDIATE(nr)) { |
96 | asm volatile(LOCK_PREFIX "andb %1,%0" | ||
97 | : CONST_MASK_ADDR(nr, addr) | ||
98 | : "iq" ((u8)~CONST_MASK(nr))); | ||
99 | } else { | ||
100 | asm volatile(LOCK_PREFIX "btr %1,%0" | ||
101 | : BITOP_ADDR(addr) | ||
102 | : "Ir" (nr)); | ||
103 | } | ||
78 | } | 104 | } |
79 | 105 | ||
80 | /* | 106 | /* |
@@ -85,13 +111,13 @@ static inline void clear_bit(int nr, volatile void *addr) | |||
85 | * clear_bit() is atomic and implies release semantics before the memory | 111 | * clear_bit() is atomic and implies release semantics before the memory |
86 | * operation. It can be used for an unlock. | 112 | * operation. It can be used for an unlock. |
87 | */ | 113 | */ |
88 | static inline void clear_bit_unlock(unsigned nr, volatile void *addr) | 114 | static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr) |
89 | { | 115 | { |
90 | barrier(); | 116 | barrier(); |
91 | clear_bit(nr, addr); | 117 | clear_bit(nr, addr); |
92 | } | 118 | } |
93 | 119 | ||
94 | static inline void __clear_bit(int nr, volatile void *addr) | 120 | static inline void __clear_bit(int nr, volatile unsigned long *addr) |
95 | { | 121 | { |
96 | asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); | 122 | asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); |
97 | } | 123 | } |
@@ -108,7 +134,7 @@ static inline void __clear_bit(int nr, volatile void *addr) | |||
108 | * No memory barrier is required here, because x86 cannot reorder stores past | 134 | * No memory barrier is required here, because x86 cannot reorder stores past |
109 | * older loads. Same principle as spin_unlock. | 135 | * older loads. Same principle as spin_unlock. |
110 | */ | 136 | */ |
111 | static inline void __clear_bit_unlock(unsigned nr, volatile void *addr) | 137 | static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr) |
112 | { | 138 | { |
113 | barrier(); | 139 | barrier(); |
114 | __clear_bit(nr, addr); | 140 | __clear_bit(nr, addr); |
@@ -126,7 +152,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile void *addr) | |||
126 | * If it's called on the same region of memory simultaneously, the effect | 152 | * If it's called on the same region of memory simultaneously, the effect |
127 | * may be that only one operation succeeds. | 153 | * may be that only one operation succeeds. |
128 | */ | 154 | */ |
129 | static inline void __change_bit(int nr, volatile void *addr) | 155 | static inline void __change_bit(int nr, volatile unsigned long *addr) |
130 | { | 156 | { |
131 | asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); | 157 | asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); |
132 | } | 158 | } |
@@ -140,7 +166,7 @@ static inline void __change_bit(int nr, volatile void *addr) | |||
140 | * Note that @nr may be almost arbitrarily large; this function is not | 166 | * Note that @nr may be almost arbitrarily large; this function is not |
141 | * restricted to acting on a single-word quantity. | 167 | * restricted to acting on a single-word quantity. |
142 | */ | 168 | */ |
143 | static inline void change_bit(int nr, volatile void *addr) | 169 | static inline void change_bit(int nr, volatile unsigned long *addr) |
144 | { | 170 | { |
145 | asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr)); | 171 | asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr)); |
146 | } | 172 | } |
@@ -153,7 +179,7 @@ static inline void change_bit(int nr, volatile void *addr) | |||
153 | * This operation is atomic and cannot be reordered. | 179 | * This operation is atomic and cannot be reordered. |
154 | * It also implies a memory barrier. | 180 | * It also implies a memory barrier. |
155 | */ | 181 | */ |
156 | static inline int test_and_set_bit(int nr, volatile void *addr) | 182 | static inline int test_and_set_bit(int nr, volatile unsigned long *addr) |
157 | { | 183 | { |
158 | int oldbit; | 184 | int oldbit; |
159 | 185 | ||
@@ -170,7 +196,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr) | |||
170 | * | 196 | * |
171 | * This is the same as test_and_set_bit on x86. | 197 | * This is the same as test_and_set_bit on x86. |
172 | */ | 198 | */ |
173 | static inline int test_and_set_bit_lock(int nr, volatile void *addr) | 199 | static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr) |
174 | { | 200 | { |
175 | return test_and_set_bit(nr, addr); | 201 | return test_and_set_bit(nr, addr); |
176 | } | 202 | } |
@@ -184,7 +210,7 @@ static inline int test_and_set_bit_lock(int nr, volatile void *addr) | |||
184 | * If two examples of this operation race, one can appear to succeed | 210 | * If two examples of this operation race, one can appear to succeed |
185 | * but actually fail. You must protect multiple accesses with a lock. | 211 | * but actually fail. You must protect multiple accesses with a lock. |
186 | */ | 212 | */ |
187 | static inline int __test_and_set_bit(int nr, volatile void *addr) | 213 | static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) |
188 | { | 214 | { |
189 | int oldbit; | 215 | int oldbit; |
190 | 216 | ||
@@ -203,7 +229,7 @@ static inline int __test_and_set_bit(int nr, volatile void *addr) | |||
203 | * This operation is atomic and cannot be reordered. | 229 | * This operation is atomic and cannot be reordered. |
204 | * It also implies a memory barrier. | 230 | * It also implies a memory barrier. |
205 | */ | 231 | */ |
206 | static inline int test_and_clear_bit(int nr, volatile void *addr) | 232 | static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) |
207 | { | 233 | { |
208 | int oldbit; | 234 | int oldbit; |
209 | 235 | ||
@@ -223,7 +249,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr) | |||
223 | * If two examples of this operation race, one can appear to succeed | 249 | * If two examples of this operation race, one can appear to succeed |
224 | * but actually fail. You must protect multiple accesses with a lock. | 250 | * but actually fail. You must protect multiple accesses with a lock. |
225 | */ | 251 | */ |
226 | static inline int __test_and_clear_bit(int nr, volatile void *addr) | 252 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) |
227 | { | 253 | { |
228 | int oldbit; | 254 | int oldbit; |
229 | 255 | ||
@@ -235,7 +261,7 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr) | |||
235 | } | 261 | } |
236 | 262 | ||
237 | /* WARNING: non atomic and it can be reordered! */ | 263 | /* WARNING: non atomic and it can be reordered! */ |
238 | static inline int __test_and_change_bit(int nr, volatile void *addr) | 264 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) |
239 | { | 265 | { |
240 | int oldbit; | 266 | int oldbit; |
241 | 267 | ||
@@ -255,7 +281,7 @@ static inline int __test_and_change_bit(int nr, volatile void *addr) | |||
255 | * This operation is atomic and cannot be reordered. | 281 | * This operation is atomic and cannot be reordered. |
256 | * It also implies a memory barrier. | 282 | * It also implies a memory barrier. |
257 | */ | 283 | */ |
258 | static inline int test_and_change_bit(int nr, volatile void *addr) | 284 | static inline int test_and_change_bit(int nr, volatile unsigned long *addr) |
259 | { | 285 | { |
260 | int oldbit; | 286 | int oldbit; |
261 | 287 | ||
@@ -266,13 +292,13 @@ static inline int test_and_change_bit(int nr, volatile void *addr) | |||
266 | return oldbit; | 292 | return oldbit; |
267 | } | 293 | } |
268 | 294 | ||
269 | static inline int constant_test_bit(int nr, const volatile void *addr) | 295 | static inline int constant_test_bit(int nr, const volatile unsigned long *addr) |
270 | { | 296 | { |
271 | return ((1UL << (nr % BITS_PER_LONG)) & | 297 | return ((1UL << (nr % BITS_PER_LONG)) & |
272 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; | 298 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; |
273 | } | 299 | } |
274 | 300 | ||
275 | static inline int variable_test_bit(int nr, volatile const void *addr) | 301 | static inline int variable_test_bit(int nr, volatile const unsigned long *addr) |
276 | { | 302 | { |
277 | int oldbit; | 303 | int oldbit; |
278 | 304 | ||
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 0d609c837a41..78b47e7404eb 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h | |||
@@ -142,11 +142,11 @@ extern const char * const x86_power_flags[32]; | |||
142 | #define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability)) | 142 | #define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability)) |
143 | #define setup_clear_cpu_cap(bit) do { \ | 143 | #define setup_clear_cpu_cap(bit) do { \ |
144 | clear_cpu_cap(&boot_cpu_data, bit); \ | 144 | clear_cpu_cap(&boot_cpu_data, bit); \ |
145 | set_bit(bit, cleared_cpu_caps); \ | 145 | set_bit(bit, (unsigned long *)cleared_cpu_caps); \ |
146 | } while (0) | 146 | } while (0) |
147 | #define setup_force_cpu_cap(bit) do { \ | 147 | #define setup_force_cpu_cap(bit) do { \ |
148 | set_cpu_cap(&boot_cpu_data, bit); \ | 148 | set_cpu_cap(&boot_cpu_data, bit); \ |
149 | clear_bit(bit, cleared_cpu_caps); \ | 149 | clear_bit(bit, (unsigned long *)cleared_cpu_caps); \ |
150 | } while (0) | 150 | } while (0) |
151 | 151 | ||
152 | #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) | 152 | #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) |
diff --git a/include/asm-x86/current.h b/include/asm-x86/current.h index d2526d3f7346..7515c19d4988 100644 --- a/include/asm-x86/current.h +++ b/include/asm-x86/current.h | |||
@@ -1,5 +1,39 @@ | |||
1 | #ifndef _X86_CURRENT_H | ||
2 | #define _X86_CURRENT_H | ||
3 | |||
1 | #ifdef CONFIG_X86_32 | 4 | #ifdef CONFIG_X86_32 |
2 | # include "current_32.h" | 5 | #include <linux/compiler.h> |
3 | #else | 6 | #include <asm/percpu.h> |
4 | # include "current_64.h" | 7 | |
5 | #endif | 8 | struct task_struct; |
9 | |||
10 | DECLARE_PER_CPU(struct task_struct *, current_task); | ||
11 | static __always_inline struct task_struct *get_current(void) | ||
12 | { | ||
13 | return x86_read_percpu(current_task); | ||
14 | } | ||
15 | |||
16 | #else /* X86_32 */ | ||
17 | |||
18 | #ifndef __ASSEMBLY__ | ||
19 | #include <asm/pda.h> | ||
20 | |||
21 | struct task_struct; | ||
22 | |||
23 | static __always_inline struct task_struct *get_current(void) | ||
24 | { | ||
25 | return read_pda(pcurrent); | ||
26 | } | ||
27 | |||
28 | #else /* __ASSEMBLY__ */ | ||
29 | |||
30 | #include <asm/asm-offsets.h> | ||
31 | #define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg | ||
32 | |||
33 | #endif /* __ASSEMBLY__ */ | ||
34 | |||
35 | #endif /* X86_32 */ | ||
36 | |||
37 | #define current get_current() | ||
38 | |||
39 | #endif /* X86_CURRENT_H */ | ||
diff --git a/include/asm-x86/current_32.h b/include/asm-x86/current_32.h deleted file mode 100644 index 5af9bdb97a16..000000000000 --- a/include/asm-x86/current_32.h +++ /dev/null | |||
@@ -1,17 +0,0 @@ | |||
1 | #ifndef _I386_CURRENT_H | ||
2 | #define _I386_CURRENT_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <asm/percpu.h> | ||
6 | |||
7 | struct task_struct; | ||
8 | |||
9 | DECLARE_PER_CPU(struct task_struct *, current_task); | ||
10 | static __always_inline struct task_struct *get_current(void) | ||
11 | { | ||
12 | return x86_read_percpu(current_task); | ||
13 | } | ||
14 | |||
15 | #define current get_current() | ||
16 | |||
17 | #endif /* !(_I386_CURRENT_H) */ | ||
diff --git a/include/asm-x86/current_64.h b/include/asm-x86/current_64.h deleted file mode 100644 index 2d368ede2fc1..000000000000 --- a/include/asm-x86/current_64.h +++ /dev/null | |||
@@ -1,27 +0,0 @@ | |||
1 | #ifndef _X86_64_CURRENT_H | ||
2 | #define _X86_64_CURRENT_H | ||
3 | |||
4 | #if !defined(__ASSEMBLY__) | ||
5 | struct task_struct; | ||
6 | |||
7 | #include <asm/pda.h> | ||
8 | |||
9 | static inline struct task_struct *get_current(void) | ||
10 | { | ||
11 | struct task_struct *t = read_pda(pcurrent); | ||
12 | return t; | ||
13 | } | ||
14 | |||
15 | #define current get_current() | ||
16 | |||
17 | #else | ||
18 | |||
19 | #ifndef ASM_OFFSET_H | ||
20 | #include <asm/asm-offsets.h> | ||
21 | #endif | ||
22 | |||
23 | #define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg | ||
24 | |||
25 | #endif | ||
26 | |||
27 | #endif /* !(_X86_64_CURRENT_H) */ | ||
diff --git a/include/asm-x86/e820.h b/include/asm-x86/e820.h index 7004251fc66b..5103d0b2c46c 100644 --- a/include/asm-x86/e820.h +++ b/include/asm-x86/e820.h | |||
@@ -24,6 +24,7 @@ struct e820map { | |||
24 | 24 | ||
25 | #define ISA_START_ADDRESS 0xa0000 | 25 | #define ISA_START_ADDRESS 0xa0000 |
26 | #define ISA_END_ADDRESS 0x100000 | 26 | #define ISA_END_ADDRESS 0x100000 |
27 | #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS) | ||
27 | 28 | ||
28 | #define BIOS_BEGIN 0x000a0000 | 29 | #define BIOS_BEGIN 0x000a0000 |
29 | #define BIOS_END 0x00100000 | 30 | #define BIOS_END 0x00100000 |
diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h index 4b96148e90c1..f0df7ee96816 100644 --- a/include/asm-x86/fixmap_32.h +++ b/include/asm-x86/fixmap_32.h | |||
@@ -79,10 +79,6 @@ enum fixed_addresses { | |||
79 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ | 79 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ |
80 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, | 80 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, |
81 | #endif | 81 | #endif |
82 | #ifdef CONFIG_ACPI | ||
83 | FIX_ACPI_BEGIN, | ||
84 | FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1, | ||
85 | #endif | ||
86 | #ifdef CONFIG_PCI_MMCONFIG | 82 | #ifdef CONFIG_PCI_MMCONFIG |
87 | FIX_PCIE_MCFG, | 83 | FIX_PCIE_MCFG, |
88 | #endif | 84 | #endif |
@@ -103,6 +99,10 @@ enum fixed_addresses { | |||
103 | (__end_of_permanent_fixed_addresses & 511), | 99 | (__end_of_permanent_fixed_addresses & 511), |
104 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1, | 100 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1, |
105 | FIX_WP_TEST, | 101 | FIX_WP_TEST, |
102 | #ifdef CONFIG_ACPI | ||
103 | FIX_ACPI_BEGIN, | ||
104 | FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1, | ||
105 | #endif | ||
106 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | 106 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT |
107 | FIX_OHCI1394_BASE, | 107 | FIX_OHCI1394_BASE, |
108 | #endif | 108 | #endif |
diff --git a/include/asm-x86/gart.h b/include/asm-x86/gart.h index 90958ed993fa..eeca2f51fd8f 100644 --- a/include/asm-x86/gart.h +++ b/include/asm-x86/gart.h | |||
@@ -1,16 +1,20 @@ | |||
1 | #ifndef _ASM_X8664_IOMMU_H | 1 | #ifndef _ASM_X8664_IOMMU_H |
2 | #define _ASM_X8664_IOMMU_H 1 | 2 | #define _ASM_X8664_IOMMU_H 1 |
3 | 3 | ||
4 | #include <asm/e820.h> | ||
5 | |||
4 | extern void pci_iommu_shutdown(void); | 6 | extern void pci_iommu_shutdown(void); |
5 | extern void no_iommu_init(void); | 7 | extern void no_iommu_init(void); |
6 | extern int force_iommu, no_iommu; | 8 | extern int force_iommu, no_iommu; |
7 | extern int iommu_detected; | 9 | extern int iommu_detected; |
10 | extern int agp_amd64_init(void); | ||
8 | #ifdef CONFIG_GART_IOMMU | 11 | #ifdef CONFIG_GART_IOMMU |
9 | extern void gart_iommu_init(void); | 12 | extern void gart_iommu_init(void); |
10 | extern void gart_iommu_shutdown(void); | 13 | extern void gart_iommu_shutdown(void); |
11 | extern void __init gart_parse_options(char *); | 14 | extern void __init gart_parse_options(char *); |
12 | extern void early_gart_iommu_check(void); | 15 | extern void early_gart_iommu_check(void); |
13 | extern void gart_iommu_hole_init(void); | 16 | extern void gart_iommu_hole_init(void); |
17 | extern void set_up_gart_resume(u32, u32); | ||
14 | extern int fallback_aper_order; | 18 | extern int fallback_aper_order; |
15 | extern int fallback_aper_force; | 19 | extern int fallback_aper_force; |
16 | extern int gart_iommu_aperture; | 20 | extern int gart_iommu_aperture; |
@@ -31,4 +35,63 @@ static inline void gart_iommu_shutdown(void) | |||
31 | 35 | ||
32 | #endif | 36 | #endif |
33 | 37 | ||
38 | /* PTE bits. */ | ||
39 | #define GPTE_VALID 1 | ||
40 | #define GPTE_COHERENT 2 | ||
41 | |||
42 | /* Aperture control register bits. */ | ||
43 | #define GARTEN (1<<0) | ||
44 | #define DISGARTCPU (1<<4) | ||
45 | #define DISGARTIO (1<<5) | ||
46 | |||
47 | /* GART cache control register bits. */ | ||
48 | #define INVGART (1<<0) | ||
49 | #define GARTPTEERR (1<<1) | ||
50 | |||
51 | /* K8 On-cpu GART registers */ | ||
52 | #define AMD64_GARTAPERTURECTL 0x90 | ||
53 | #define AMD64_GARTAPERTUREBASE 0x94 | ||
54 | #define AMD64_GARTTABLEBASE 0x98 | ||
55 | #define AMD64_GARTCACHECTL 0x9c | ||
56 | #define AMD64_GARTEN (1<<0) | ||
57 | |||
58 | static inline void enable_gart_translation(struct pci_dev *dev, u64 addr) | ||
59 | { | ||
60 | u32 tmp, ctl; | ||
61 | |||
62 | /* address of the mappings table */ | ||
63 | addr >>= 12; | ||
64 | tmp = (u32) addr<<4; | ||
65 | tmp &= ~0xf; | ||
66 | pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp); | ||
67 | |||
68 | /* Enable GART translation for this hammer. */ | ||
69 | pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); | ||
70 | ctl |= GARTEN; | ||
71 | ctl &= ~(DISGARTCPU | DISGARTIO); | ||
72 | pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); | ||
73 | } | ||
74 | |||
75 | static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size) | ||
76 | { | ||
77 | if (!aper_base) | ||
78 | return 0; | ||
79 | |||
80 | if (aper_base + aper_size > 0x100000000ULL) { | ||
81 | printk(KERN_ERR "Aperture beyond 4GB. Ignoring.\n"); | ||
82 | return 0; | ||
83 | } | ||
84 | if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) { | ||
85 | printk(KERN_ERR "Aperture pointing to e820 RAM. Ignoring.\n"); | ||
86 | return 0; | ||
87 | } | ||
88 | if (aper_size < min_size) { | ||
89 | printk(KERN_ERR "Aperture too small (%d MB) than (%d MB)\n", | ||
90 | aper_size>>20, min_size>>20); | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | return 1; | ||
95 | } | ||
96 | |||
34 | #endif | 97 | #endif |
diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h index 6e6458853a36..bb06027fc83e 100644 --- a/include/asm-x86/geode.h +++ b/include/asm-x86/geode.h | |||
@@ -112,8 +112,8 @@ extern int geode_get_dev_base(unsigned int dev); | |||
112 | #define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */ | 112 | #define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */ |
113 | #define VSA_VR_SIGNATURE 0x0003 | 113 | #define VSA_VR_SIGNATURE 0x0003 |
114 | #define VSA_VR_MEM_SIZE 0x0200 | 114 | #define VSA_VR_MEM_SIZE 0x0200 |
115 | #define VSA_SIG 0x4132 /* signature is ascii 'VSA2' */ | 115 | #define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */ |
116 | 116 | #define GSW_VSA_SIG 0x534d /* General Software signature */ | |
117 | /* GPIO */ | 117 | /* GPIO */ |
118 | 118 | ||
119 | #define GPIO_OUTPUT_VAL 0x00 | 119 | #define GPIO_OUTPUT_VAL 0x00 |
diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h index 45d4df3e51e6..2f98df91f1f2 100644 --- a/include/asm-x86/i8259.h +++ b/include/asm-x86/i8259.h | |||
@@ -55,4 +55,6 @@ static inline void outb_pic(unsigned char value, unsigned int port) | |||
55 | udelay(2); | 55 | udelay(2); |
56 | } | 56 | } |
57 | 57 | ||
58 | extern struct irq_chip i8259A_chip; | ||
59 | |||
58 | #endif /* __ASM_I8259_H__ */ | 60 | #endif /* __ASM_I8259_H__ */ |
diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h index d593e14f0341..dc0f55f2b034 100644 --- a/include/asm-x86/io_apic.h +++ b/include/asm-x86/io_apic.h | |||
@@ -11,6 +11,15 @@ | |||
11 | * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar | 11 | * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar |
12 | */ | 12 | */ |
13 | 13 | ||
14 | /* I/O Unit Redirection Table */ | ||
15 | #define IO_APIC_REDIR_VECTOR_MASK 0x000FF | ||
16 | #define IO_APIC_REDIR_DEST_LOGICAL 0x00800 | ||
17 | #define IO_APIC_REDIR_DEST_PHYSICAL 0x00000 | ||
18 | #define IO_APIC_REDIR_SEND_PENDING (1 << 12) | ||
19 | #define IO_APIC_REDIR_REMOTE_IRR (1 << 14) | ||
20 | #define IO_APIC_REDIR_LEVEL_TRIGGER (1 << 15) | ||
21 | #define IO_APIC_REDIR_MASKED (1 << 16) | ||
22 | |||
14 | /* | 23 | /* |
15 | * The structure of the IO-APIC: | 24 | * The structure of the IO-APIC: |
16 | */ | 25 | */ |
@@ -137,6 +146,9 @@ extern int sis_apic_bug; | |||
137 | /* 1 if "noapic" boot option passed */ | 146 | /* 1 if "noapic" boot option passed */ |
138 | extern int skip_ioapic_setup; | 147 | extern int skip_ioapic_setup; |
139 | 148 | ||
149 | /* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */ | ||
150 | extern int timer_through_8259; | ||
151 | |||
140 | static inline void disable_ioapic_setup(void) | 152 | static inline void disable_ioapic_setup(void) |
141 | { | 153 | { |
142 | skip_ioapic_setup = 1; | 154 | skip_ioapic_setup = 1; |
@@ -162,6 +174,7 @@ extern void ioapic_init_mappings(void); | |||
162 | 174 | ||
163 | #else /* !CONFIG_X86_IO_APIC */ | 175 | #else /* !CONFIG_X86_IO_APIC */ |
164 | #define io_apic_assign_pci_irqs 0 | 176 | #define io_apic_assign_pci_irqs 0 |
177 | static const int timer_through_8259 = 0; | ||
165 | #endif | 178 | #endif |
166 | 179 | ||
167 | #endif | 180 | #endif |
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index 1d8cd01fa514..844f2a89afbc 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/kvm_para.h> | 18 | #include <linux/kvm_para.h> |
19 | #include <linux/kvm_types.h> | 19 | #include <linux/kvm_types.h> |
20 | 20 | ||
21 | #include <asm/pvclock-abi.h> | ||
21 | #include <asm/desc.h> | 22 | #include <asm/desc.h> |
22 | 23 | ||
23 | #define KVM_MAX_VCPUS 16 | 24 | #define KVM_MAX_VCPUS 16 |
@@ -282,7 +283,8 @@ struct kvm_vcpu_arch { | |||
282 | struct x86_emulate_ctxt emulate_ctxt; | 283 | struct x86_emulate_ctxt emulate_ctxt; |
283 | 284 | ||
284 | gpa_t time; | 285 | gpa_t time; |
285 | struct kvm_vcpu_time_info hv_clock; | 286 | struct pvclock_vcpu_time_info hv_clock; |
287 | unsigned int hv_clock_tsc_khz; | ||
286 | unsigned int time_offset; | 288 | unsigned int time_offset; |
287 | struct page *time_page; | 289 | struct page *time_page; |
288 | }; | 290 | }; |
diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h index 509845942070..76f392146daa 100644 --- a/include/asm-x86/kvm_para.h +++ b/include/asm-x86/kvm_para.h | |||
@@ -48,24 +48,6 @@ struct kvm_mmu_op_release_pt { | |||
48 | #ifdef __KERNEL__ | 48 | #ifdef __KERNEL__ |
49 | #include <asm/processor.h> | 49 | #include <asm/processor.h> |
50 | 50 | ||
51 | /* xen binary-compatible interface. See xen headers for details */ | ||
52 | struct kvm_vcpu_time_info { | ||
53 | uint32_t version; | ||
54 | uint32_t pad0; | ||
55 | uint64_t tsc_timestamp; | ||
56 | uint64_t system_time; | ||
57 | uint32_t tsc_to_system_mul; | ||
58 | int8_t tsc_shift; | ||
59 | int8_t pad[3]; | ||
60 | } __attribute__((__packed__)); /* 32 bytes */ | ||
61 | |||
62 | struct kvm_wall_clock { | ||
63 | uint32_t wc_version; | ||
64 | uint32_t wc_sec; | ||
65 | uint32_t wc_nsec; | ||
66 | } __attribute__((__packed__)); | ||
67 | |||
68 | |||
69 | extern void kvmclock_init(void); | 51 | extern void kvmclock_init(void); |
70 | 52 | ||
71 | 53 | ||
@@ -89,7 +71,8 @@ static inline long kvm_hypercall0(unsigned int nr) | |||
89 | long ret; | 71 | long ret; |
90 | asm volatile(KVM_HYPERCALL | 72 | asm volatile(KVM_HYPERCALL |
91 | : "=a"(ret) | 73 | : "=a"(ret) |
92 | : "a"(nr)); | 74 | : "a"(nr) |
75 | : "memory"); | ||
93 | return ret; | 76 | return ret; |
94 | } | 77 | } |
95 | 78 | ||
@@ -98,7 +81,8 @@ static inline long kvm_hypercall1(unsigned int nr, unsigned long p1) | |||
98 | long ret; | 81 | long ret; |
99 | asm volatile(KVM_HYPERCALL | 82 | asm volatile(KVM_HYPERCALL |
100 | : "=a"(ret) | 83 | : "=a"(ret) |
101 | : "a"(nr), "b"(p1)); | 84 | : "a"(nr), "b"(p1) |
85 | : "memory"); | ||
102 | return ret; | 86 | return ret; |
103 | } | 87 | } |
104 | 88 | ||
@@ -108,7 +92,8 @@ static inline long kvm_hypercall2(unsigned int nr, unsigned long p1, | |||
108 | long ret; | 92 | long ret; |
109 | asm volatile(KVM_HYPERCALL | 93 | asm volatile(KVM_HYPERCALL |
110 | : "=a"(ret) | 94 | : "=a"(ret) |
111 | : "a"(nr), "b"(p1), "c"(p2)); | 95 | : "a"(nr), "b"(p1), "c"(p2) |
96 | : "memory"); | ||
112 | return ret; | 97 | return ret; |
113 | } | 98 | } |
114 | 99 | ||
@@ -118,7 +103,8 @@ static inline long kvm_hypercall3(unsigned int nr, unsigned long p1, | |||
118 | long ret; | 103 | long ret; |
119 | asm volatile(KVM_HYPERCALL | 104 | asm volatile(KVM_HYPERCALL |
120 | : "=a"(ret) | 105 | : "=a"(ret) |
121 | : "a"(nr), "b"(p1), "c"(p2), "d"(p3)); | 106 | : "a"(nr), "b"(p1), "c"(p2), "d"(p3) |
107 | : "memory"); | ||
122 | return ret; | 108 | return ret; |
123 | } | 109 | } |
124 | 110 | ||
@@ -129,7 +115,8 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, | |||
129 | long ret; | 115 | long ret; |
130 | asm volatile(KVM_HYPERCALL | 116 | asm volatile(KVM_HYPERCALL |
131 | : "=a"(ret) | 117 | : "=a"(ret) |
132 | : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)); | 118 | : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4) |
119 | : "memory"); | ||
133 | return ret; | 120 | return ret; |
134 | } | 121 | } |
135 | 122 | ||
diff --git a/include/asm-x86/mach-default/smpboot_hooks.h b/include/asm-x86/mach-default/smpboot_hooks.h index 56d0e1fa0258..b63c52182006 100644 --- a/include/asm-x86/mach-default/smpboot_hooks.h +++ b/include/asm-x86/mach-default/smpboot_hooks.h | |||
@@ -41,8 +41,10 @@ static inline void __init smpboot_setup_io_apic(void) | |||
41 | */ | 41 | */ |
42 | if (!skip_ioapic_setup && nr_ioapics) | 42 | if (!skip_ioapic_setup && nr_ioapics) |
43 | setup_IO_APIC(); | 43 | setup_IO_APIC(); |
44 | else | 44 | else { |
45 | nr_ioapics = 0; | 45 | nr_ioapics = 0; |
46 | localise_nmi_watchdog(); | ||
47 | } | ||
46 | } | 48 | } |
47 | 49 | ||
48 | static inline void smpboot_clear_io_apic(void) | 50 | static inline void smpboot_clear_io_apic(void) |
diff --git a/include/asm-x86/mmconfig.h b/include/asm-x86/mmconfig.h new file mode 100644 index 000000000000..95beda07c6fa --- /dev/null +++ b/include/asm-x86/mmconfig.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _ASM_MMCONFIG_H | ||
2 | #define _ASM_MMCONFIG_H | ||
3 | |||
4 | #ifdef CONFIG_PCI_MMCONFIG | ||
5 | extern void __cpuinit fam10h_check_enable_mmcfg(void); | ||
6 | extern void __init check_enable_amd_mmconf_dmi(void); | ||
7 | #else | ||
8 | static inline void fam10h_check_enable_mmcfg(void) { } | ||
9 | static inline void check_enable_amd_mmconf_dmi(void) { } | ||
10 | #endif | ||
11 | |||
12 | #endif | ||
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h index 09413ad39d3c..44bce773012e 100644 --- a/include/asm-x86/msr-index.h +++ b/include/asm-x86/msr-index.h | |||
@@ -111,7 +111,9 @@ | |||
111 | #define MSR_K8_TOP_MEM2 0xc001001d | 111 | #define MSR_K8_TOP_MEM2 0xc001001d |
112 | #define MSR_K8_SYSCFG 0xc0010010 | 112 | #define MSR_K8_SYSCFG 0xc0010010 |
113 | #define MSR_K8_HWCR 0xc0010015 | 113 | #define MSR_K8_HWCR 0xc0010015 |
114 | #define MSR_K8_ENABLE_C1E 0xc0010055 | 114 | #define MSR_K8_INT_PENDING_MSG 0xc0010055 |
115 | /* C1E active bits in int pending message */ | ||
116 | #define K8_INTP_C1E_ACTIVE_MASK 0x18000000 | ||
115 | #define MSR_K8_TSEG_ADDR 0xc0010112 | 117 | #define MSR_K8_TSEG_ADDR 0xc0010112 |
116 | #define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ | 118 | #define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ |
117 | #define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ | 119 | #define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ |
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h index 3707650a169b..2b5f2c91db25 100644 --- a/include/asm-x86/msr.h +++ b/include/asm-x86/msr.h | |||
@@ -18,7 +18,7 @@ static inline unsigned long long native_read_tscp(unsigned int *aux) | |||
18 | unsigned long low, high; | 18 | unsigned long low, high; |
19 | asm volatile(".byte 0x0f,0x01,0xf9" | 19 | asm volatile(".byte 0x0f,0x01,0xf9" |
20 | : "=a" (low), "=d" (high), "=c" (*aux)); | 20 | : "=a" (low), "=d" (high), "=c" (*aux)); |
21 | return low | ((u64)high >> 32); | 21 | return low | ((u64)high << 32); |
22 | } | 22 | } |
23 | 23 | ||
24 | /* | 24 | /* |
diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h index 1e363021e72f..05449ef830a7 100644 --- a/include/asm-x86/nmi.h +++ b/include/asm-x86/nmi.h | |||
@@ -15,27 +15,6 @@ | |||
15 | */ | 15 | */ |
16 | int do_nmi_callback(struct pt_regs *regs, int cpu); | 16 | int do_nmi_callback(struct pt_regs *regs, int cpu); |
17 | 17 | ||
18 | #ifdef CONFIG_PM | ||
19 | |||
20 | /** Replace the PM callback routine for NMI. */ | ||
21 | struct pm_dev *set_nmi_pm_callback(pm_callback callback); | ||
22 | |||
23 | /** Unset the PM callback routine back to the default. */ | ||
24 | void unset_nmi_pm_callback(struct pm_dev *dev); | ||
25 | |||
26 | #else | ||
27 | |||
28 | static inline struct pm_dev *set_nmi_pm_callback(pm_callback callback) | ||
29 | { | ||
30 | return 0; | ||
31 | } | ||
32 | |||
33 | static inline void unset_nmi_pm_callback(struct pm_dev *dev) | ||
34 | { | ||
35 | } | ||
36 | |||
37 | #endif /* CONFIG_PM */ | ||
38 | |||
39 | #ifdef CONFIG_X86_64 | 18 | #ifdef CONFIG_X86_64 |
40 | extern void default_do_nmi(struct pt_regs *); | 19 | extern void default_do_nmi(struct pt_regs *); |
41 | extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); | 20 | extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); |
@@ -46,7 +25,6 @@ extern void nmi_watchdog_default(void); | |||
46 | 25 | ||
47 | extern int check_nmi_watchdog(void); | 26 | extern int check_nmi_watchdog(void); |
48 | extern int nmi_watchdog_enabled; | 27 | extern int nmi_watchdog_enabled; |
49 | extern int unknown_nmi_panic; | ||
50 | extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); | 28 | extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); |
51 | extern int avail_to_resrv_perfctr_nmi(unsigned int); | 29 | extern int avail_to_resrv_perfctr_nmi(unsigned int); |
52 | extern int reserve_perfctr_nmi(unsigned int); | 30 | extern int reserve_perfctr_nmi(unsigned int); |
@@ -78,6 +56,11 @@ extern int unknown_nmi_panic; | |||
78 | void __trigger_all_cpu_backtrace(void); | 56 | void __trigger_all_cpu_backtrace(void); |
79 | #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() | 57 | #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() |
80 | 58 | ||
59 | static inline void localise_nmi_watchdog(void) | ||
60 | { | ||
61 | if (nmi_watchdog == NMI_IO_APIC) | ||
62 | nmi_watchdog = NMI_LOCAL_APIC; | ||
63 | } | ||
81 | #endif | 64 | #endif |
82 | 65 | ||
83 | void lapic_watchdog_stop(void); | 66 | void lapic_watchdog_stop(void); |
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h index dc936dddf161..b52ed85f32f5 100644 --- a/include/asm-x86/page.h +++ b/include/asm-x86/page.h | |||
@@ -51,8 +51,15 @@ | |||
51 | 51 | ||
52 | #ifndef __ASSEMBLY__ | 52 | #ifndef __ASSEMBLY__ |
53 | 53 | ||
54 | typedef struct { pgdval_t pgd; } pgd_t; | ||
55 | typedef struct { pgprotval_t pgprot; } pgprot_t; | ||
56 | |||
54 | extern int page_is_ram(unsigned long pagenr); | 57 | extern int page_is_ram(unsigned long pagenr); |
55 | extern int devmem_is_allowed(unsigned long pagenr); | 58 | extern int devmem_is_allowed(unsigned long pagenr); |
59 | extern void map_devmem(unsigned long pfn, unsigned long size, | ||
60 | pgprot_t vma_prot); | ||
61 | extern void unmap_devmem(unsigned long pfn, unsigned long size, | ||
62 | pgprot_t vma_prot); | ||
56 | 63 | ||
57 | extern unsigned long max_pfn_mapped; | 64 | extern unsigned long max_pfn_mapped; |
58 | 65 | ||
@@ -74,9 +81,6 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr, | |||
74 | alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) | 81 | alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) |
75 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | 82 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
76 | 83 | ||
77 | typedef struct { pgdval_t pgd; } pgd_t; | ||
78 | typedef struct { pgprotval_t pgprot; } pgprot_t; | ||
79 | |||
80 | static inline pgd_t native_make_pgd(pgdval_t val) | 84 | static inline pgd_t native_make_pgd(pgdval_t val) |
81 | { | 85 | { |
82 | return (pgd_t) { val }; | 86 | return (pgd_t) { val }; |
@@ -160,6 +164,7 @@ static inline pteval_t native_pte_val(pte_t pte) | |||
160 | #endif | 164 | #endif |
161 | 165 | ||
162 | #define pte_val(x) native_pte_val(x) | 166 | #define pte_val(x) native_pte_val(x) |
167 | #define pte_flags(x) native_pte_val(x) | ||
163 | #define __pte(x) native_make_pte(x) | 168 | #define __pte(x) native_make_pte(x) |
164 | 169 | ||
165 | #endif /* CONFIG_PARAVIRT */ | 170 | #endif /* CONFIG_PARAVIRT */ |
diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h index 424e82f8ae27..73ed2e4ebf95 100644 --- a/include/asm-x86/page_32.h +++ b/include/asm-x86/page_32.h | |||
@@ -13,8 +13,17 @@ | |||
13 | */ | 13 | */ |
14 | #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) | 14 | #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) |
15 | 15 | ||
16 | #ifdef CONFIG_4KSTACKS | ||
17 | #define THREAD_ORDER 0 | ||
18 | #else | ||
19 | #define THREAD_ORDER 1 | ||
20 | #endif | ||
21 | #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) | ||
22 | |||
23 | |||
16 | #ifdef CONFIG_X86_PAE | 24 | #ifdef CONFIG_X86_PAE |
17 | #define __PHYSICAL_MASK_SHIFT 36 | 25 | /* 44=32+12, the limit we can fit into an unsigned long pfn */ |
26 | #define __PHYSICAL_MASK_SHIFT 44 | ||
18 | #define __VIRTUAL_MASK_SHIFT 32 | 27 | #define __VIRTUAL_MASK_SHIFT 32 |
19 | #define PAGETABLE_LEVELS 3 | 28 | #define PAGETABLE_LEVELS 3 |
20 | 29 | ||
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index 0f13b945e240..e9ada314dfc1 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h | |||
@@ -238,7 +238,13 @@ struct pv_mmu_ops { | |||
238 | void (*pte_update_defer)(struct mm_struct *mm, | 238 | void (*pte_update_defer)(struct mm_struct *mm, |
239 | unsigned long addr, pte_t *ptep); | 239 | unsigned long addr, pte_t *ptep); |
240 | 240 | ||
241 | pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, | ||
242 | pte_t *ptep); | ||
243 | void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr, | ||
244 | pte_t *ptep, pte_t pte); | ||
245 | |||
241 | pteval_t (*pte_val)(pte_t); | 246 | pteval_t (*pte_val)(pte_t); |
247 | pteval_t (*pte_flags)(pte_t); | ||
242 | pte_t (*make_pte)(pteval_t pte); | 248 | pte_t (*make_pte)(pteval_t pte); |
243 | 249 | ||
244 | pgdval_t (*pgd_val)(pgd_t); | 250 | pgdval_t (*pgd_val)(pgd_t); |
@@ -996,6 +1002,20 @@ static inline pteval_t pte_val(pte_t pte) | |||
996 | return ret; | 1002 | return ret; |
997 | } | 1003 | } |
998 | 1004 | ||
1005 | static inline pteval_t pte_flags(pte_t pte) | ||
1006 | { | ||
1007 | pteval_t ret; | ||
1008 | |||
1009 | if (sizeof(pteval_t) > sizeof(long)) | ||
1010 | ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags, | ||
1011 | pte.pte, (u64)pte.pte >> 32); | ||
1012 | else | ||
1013 | ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags, | ||
1014 | pte.pte); | ||
1015 | |||
1016 | return ret; | ||
1017 | } | ||
1018 | |||
999 | static inline pgd_t __pgd(pgdval_t val) | 1019 | static inline pgd_t __pgd(pgdval_t val) |
1000 | { | 1020 | { |
1001 | pgdval_t ret; | 1021 | pgdval_t ret; |
@@ -1024,6 +1044,29 @@ static inline pgdval_t pgd_val(pgd_t pgd) | |||
1024 | return ret; | 1044 | return ret; |
1025 | } | 1045 | } |
1026 | 1046 | ||
1047 | #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION | ||
1048 | static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, | ||
1049 | pte_t *ptep) | ||
1050 | { | ||
1051 | pteval_t ret; | ||
1052 | |||
1053 | ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start, | ||
1054 | mm, addr, ptep); | ||
1055 | |||
1056 | return (pte_t) { .pte = ret }; | ||
1057 | } | ||
1058 | |||
1059 | static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | ||
1060 | pte_t *ptep, pte_t pte) | ||
1061 | { | ||
1062 | if (sizeof(pteval_t) > sizeof(long)) | ||
1063 | /* 5 arg words */ | ||
1064 | pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte); | ||
1065 | else | ||
1066 | PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit, | ||
1067 | mm, addr, ptep, pte.pte); | ||
1068 | } | ||
1069 | |||
1027 | static inline void set_pte(pte_t *ptep, pte_t pte) | 1070 | static inline void set_pte(pte_t *ptep, pte_t pte) |
1028 | { | 1071 | { |
1029 | if (sizeof(pteval_t) > sizeof(long)) | 1072 | if (sizeof(pteval_t) > sizeof(long)) |
diff --git a/include/asm-x86/pat.h b/include/asm-x86/pat.h index 88f60cc6a227..7edc47307217 100644 --- a/include/asm-x86/pat.h +++ b/include/asm-x86/pat.h | |||
@@ -1,14 +1,13 @@ | |||
1 | |||
2 | #ifndef _ASM_PAT_H | 1 | #ifndef _ASM_PAT_H |
3 | #define _ASM_PAT_H 1 | 2 | #define _ASM_PAT_H |
4 | 3 | ||
5 | #include <linux/types.h> | 4 | #include <linux/types.h> |
6 | 5 | ||
7 | #ifdef CONFIG_X86_PAT | 6 | #ifdef CONFIG_X86_PAT |
8 | extern int pat_wc_enabled; | 7 | extern int pat_enabled; |
9 | extern void validate_pat_support(struct cpuinfo_x86 *c); | 8 | extern void validate_pat_support(struct cpuinfo_x86 *c); |
10 | #else | 9 | #else |
11 | static const int pat_wc_enabled = 0; | 10 | static const int pat_enabled; |
12 | static inline void validate_pat_support(struct cpuinfo_x86 *c) { } | 11 | static inline void validate_pat_support(struct cpuinfo_x86 *c) { } |
13 | #endif | 12 | #endif |
14 | 13 | ||
@@ -21,4 +20,3 @@ extern int free_memtype(u64 start, u64 end); | |||
21 | extern void pat_disable(char *reason); | 20 | extern void pat_disable(char *reason); |
22 | 21 | ||
23 | #endif | 22 | #endif |
24 | |||
diff --git a/include/asm-x86/pci.h b/include/asm-x86/pci.h index 30bbde0cb34b..2db14cf17db8 100644 --- a/include/asm-x86/pci.h +++ b/include/asm-x86/pci.h | |||
@@ -18,6 +18,8 @@ struct pci_sysdata { | |||
18 | #endif | 18 | #endif |
19 | }; | 19 | }; |
20 | 20 | ||
21 | extern int pci_routeirq; | ||
22 | |||
21 | /* scan a bus after allocating a pci_sysdata for it */ | 23 | /* scan a bus after allocating a pci_sysdata for it */ |
22 | extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops, | 24 | extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops, |
23 | int node); | 25 | int node); |
diff --git a/include/asm-x86/pci_32.h b/include/asm-x86/pci_32.h index 8c4c3a0368e2..a50d46851285 100644 --- a/include/asm-x86/pci_32.h +++ b/include/asm-x86/pci_32.h | |||
@@ -18,12 +18,14 @@ struct pci_dev; | |||
18 | #define PCI_DMA_BUS_IS_PHYS (1) | 18 | #define PCI_DMA_BUS_IS_PHYS (1) |
19 | 19 | ||
20 | /* pci_unmap_{page,single} is a nop so... */ | 20 | /* pci_unmap_{page,single} is a nop so... */ |
21 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) | 21 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME[0]; |
22 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) | 22 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) unsigned LEN_NAME[0]; |
23 | #define pci_unmap_addr(PTR, ADDR_NAME) (0) | 23 | #define pci_unmap_addr(PTR, ADDR_NAME) sizeof((PTR)->ADDR_NAME) |
24 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) | 24 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ |
25 | #define pci_unmap_len(PTR, LEN_NAME) (0) | 25 | do { break; } while (pci_unmap_addr(PTR, ADDR_NAME)) |
26 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) | 26 | #define pci_unmap_len(PTR, LEN_NAME) sizeof((PTR)->LEN_NAME) |
27 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | ||
28 | do { break; } while (pci_unmap_len(PTR, LEN_NAME)) | ||
27 | 29 | ||
28 | 30 | ||
29 | #endif /* __KERNEL__ */ | 31 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 97c271b2910b..bcb5446a08d1 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h | |||
@@ -20,30 +20,25 @@ | |||
20 | #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ | 20 | #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ |
21 | #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ | 21 | #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ |
22 | 22 | ||
23 | /* | 23 | #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) |
24 | * Note: we use _AC(1, L) instead of _AC(1, UL) so that we get a | 24 | #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW) |
25 | * sign-extended value on 32-bit with all 1's in the upper word, | 25 | #define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER) |
26 | * which preserves the upper pte values on 64-bit ptes: | 26 | #define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT) |
27 | */ | 27 | #define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD) |
28 | #define _PAGE_PRESENT (_AC(1, L)<<_PAGE_BIT_PRESENT) | 28 | #define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED) |
29 | #define _PAGE_RW (_AC(1, L)<<_PAGE_BIT_RW) | 29 | #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY) |
30 | #define _PAGE_USER (_AC(1, L)<<_PAGE_BIT_USER) | 30 | #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE) |
31 | #define _PAGE_PWT (_AC(1, L)<<_PAGE_BIT_PWT) | 31 | #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) |
32 | #define _PAGE_PCD (_AC(1, L)<<_PAGE_BIT_PCD) | 32 | #define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1) |
33 | #define _PAGE_ACCESSED (_AC(1, L)<<_PAGE_BIT_ACCESSED) | 33 | #define _PAGE_UNUSED2 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED2) |
34 | #define _PAGE_DIRTY (_AC(1, L)<<_PAGE_BIT_DIRTY) | 34 | #define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3) |
35 | #define _PAGE_PSE (_AC(1, L)<<_PAGE_BIT_PSE) /* 2MB page */ | 35 | #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) |
36 | #define _PAGE_GLOBAL (_AC(1, L)<<_PAGE_BIT_GLOBAL) /* Global TLB entry */ | 36 | #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) |
37 | #define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1) | ||
38 | #define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2) | ||
39 | #define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3) | ||
40 | #define _PAGE_PAT (_AC(1, L)<<_PAGE_BIT_PAT) | ||
41 | #define _PAGE_PAT_LARGE (_AC(1, L)<<_PAGE_BIT_PAT_LARGE) | ||
42 | 37 | ||
43 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | 38 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
44 | #define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX) | 39 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) |
45 | #else | 40 | #else |
46 | #define _PAGE_NX 0 | 41 | #define _PAGE_NX (_AT(pteval_t, 0)) |
47 | #endif | 42 | #endif |
48 | 43 | ||
49 | /* If _PAGE_PRESENT is clear, we use these: */ | 44 | /* If _PAGE_PRESENT is clear, we use these: */ |
@@ -164,37 +159,37 @@ extern struct list_head pgd_list; | |||
164 | */ | 159 | */ |
165 | static inline int pte_dirty(pte_t pte) | 160 | static inline int pte_dirty(pte_t pte) |
166 | { | 161 | { |
167 | return pte_val(pte) & _PAGE_DIRTY; | 162 | return pte_flags(pte) & _PAGE_DIRTY; |
168 | } | 163 | } |
169 | 164 | ||
170 | static inline int pte_young(pte_t pte) | 165 | static inline int pte_young(pte_t pte) |
171 | { | 166 | { |
172 | return pte_val(pte) & _PAGE_ACCESSED; | 167 | return pte_flags(pte) & _PAGE_ACCESSED; |
173 | } | 168 | } |
174 | 169 | ||
175 | static inline int pte_write(pte_t pte) | 170 | static inline int pte_write(pte_t pte) |
176 | { | 171 | { |
177 | return pte_val(pte) & _PAGE_RW; | 172 | return pte_flags(pte) & _PAGE_RW; |
178 | } | 173 | } |
179 | 174 | ||
180 | static inline int pte_file(pte_t pte) | 175 | static inline int pte_file(pte_t pte) |
181 | { | 176 | { |
182 | return pte_val(pte) & _PAGE_FILE; | 177 | return pte_flags(pte) & _PAGE_FILE; |
183 | } | 178 | } |
184 | 179 | ||
185 | static inline int pte_huge(pte_t pte) | 180 | static inline int pte_huge(pte_t pte) |
186 | { | 181 | { |
187 | return pte_val(pte) & _PAGE_PSE; | 182 | return pte_flags(pte) & _PAGE_PSE; |
188 | } | 183 | } |
189 | 184 | ||
190 | static inline int pte_global(pte_t pte) | 185 | static inline int pte_global(pte_t pte) |
191 | { | 186 | { |
192 | return pte_val(pte) & _PAGE_GLOBAL; | 187 | return pte_flags(pte) & _PAGE_GLOBAL; |
193 | } | 188 | } |
194 | 189 | ||
195 | static inline int pte_exec(pte_t pte) | 190 | static inline int pte_exec(pte_t pte) |
196 | { | 191 | { |
197 | return !(pte_val(pte) & _PAGE_NX); | 192 | return !(pte_flags(pte) & _PAGE_NX); |
198 | } | 193 | } |
199 | 194 | ||
200 | static inline int pte_special(pte_t pte) | 195 | static inline int pte_special(pte_t pte) |
@@ -210,22 +205,22 @@ static inline int pmd_large(pmd_t pte) | |||
210 | 205 | ||
211 | static inline pte_t pte_mkclean(pte_t pte) | 206 | static inline pte_t pte_mkclean(pte_t pte) |
212 | { | 207 | { |
213 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); | 208 | return __pte(pte_val(pte) & ~_PAGE_DIRTY); |
214 | } | 209 | } |
215 | 210 | ||
216 | static inline pte_t pte_mkold(pte_t pte) | 211 | static inline pte_t pte_mkold(pte_t pte) |
217 | { | 212 | { |
218 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); | 213 | return __pte(pte_val(pte) & ~_PAGE_ACCESSED); |
219 | } | 214 | } |
220 | 215 | ||
221 | static inline pte_t pte_wrprotect(pte_t pte) | 216 | static inline pte_t pte_wrprotect(pte_t pte) |
222 | { | 217 | { |
223 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); | 218 | return __pte(pte_val(pte) & ~_PAGE_RW); |
224 | } | 219 | } |
225 | 220 | ||
226 | static inline pte_t pte_mkexec(pte_t pte) | 221 | static inline pte_t pte_mkexec(pte_t pte) |
227 | { | 222 | { |
228 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); | 223 | return __pte(pte_val(pte) & ~_PAGE_NX); |
229 | } | 224 | } |
230 | 225 | ||
231 | static inline pte_t pte_mkdirty(pte_t pte) | 226 | static inline pte_t pte_mkdirty(pte_t pte) |
@@ -250,7 +245,7 @@ static inline pte_t pte_mkhuge(pte_t pte) | |||
250 | 245 | ||
251 | static inline pte_t pte_clrhuge(pte_t pte) | 246 | static inline pte_t pte_clrhuge(pte_t pte) |
252 | { | 247 | { |
253 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); | 248 | return __pte(pte_val(pte) & ~_PAGE_PSE); |
254 | } | 249 | } |
255 | 250 | ||
256 | static inline pte_t pte_mkglobal(pte_t pte) | 251 | static inline pte_t pte_mkglobal(pte_t pte) |
@@ -260,7 +255,7 @@ static inline pte_t pte_mkglobal(pte_t pte) | |||
260 | 255 | ||
261 | static inline pte_t pte_clrglobal(pte_t pte) | 256 | static inline pte_t pte_clrglobal(pte_t pte) |
262 | { | 257 | { |
263 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); | 258 | return __pte(pte_val(pte) & ~_PAGE_GLOBAL); |
264 | } | 259 | } |
265 | 260 | ||
266 | static inline pte_t pte_mkspecial(pte_t pte) | 261 | static inline pte_t pte_mkspecial(pte_t pte) |
@@ -305,7 +300,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) | |||
305 | return __pgprot(preservebits | addbits); | 300 | return __pgprot(preservebits | addbits); |
306 | } | 301 | } |
307 | 302 | ||
308 | #define pte_pgprot(x) __pgprot(pte_val(x) & ~PTE_MASK) | 303 | #define pte_pgprot(x) __pgprot(pte_flags(x) & ~PTE_MASK) |
309 | 304 | ||
310 | #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) | 305 | #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) |
311 | 306 | ||
@@ -369,8 +364,15 @@ enum { | |||
369 | PG_LEVEL_4K, | 364 | PG_LEVEL_4K, |
370 | PG_LEVEL_2M, | 365 | PG_LEVEL_2M, |
371 | PG_LEVEL_1G, | 366 | PG_LEVEL_1G, |
367 | PG_LEVEL_NUM | ||
372 | }; | 368 | }; |
373 | 369 | ||
370 | #ifdef CONFIG_PROC_FS | ||
371 | extern void update_page_count(int level, unsigned long pages); | ||
372 | #else | ||
373 | static inline void update_page_count(int level, unsigned long pages) { } | ||
374 | #endif | ||
375 | |||
374 | /* | 376 | /* |
375 | * Helper function that returns the kernel pagetable entry controlling | 377 | * Helper function that returns the kernel pagetable entry controlling |
376 | * the virtual address 'address'. NULL means no pagetable entry present. | 378 | * the virtual address 'address'. NULL means no pagetable entry present. |
diff --git a/include/asm-x86/processor-flags.h b/include/asm-x86/processor-flags.h index 199cab107d85..092b39b3a7e6 100644 --- a/include/asm-x86/processor-flags.h +++ b/include/asm-x86/processor-flags.h | |||
@@ -88,4 +88,10 @@ | |||
88 | #define CX86_ARR_BASE 0xc4 | 88 | #define CX86_ARR_BASE 0xc4 |
89 | #define CX86_RCR_BASE 0xdc | 89 | #define CX86_RCR_BASE 0xdc |
90 | 90 | ||
91 | #ifdef CONFIG_VM86 | ||
92 | #define X86_VM_MASK X86_EFLAGS_VM | ||
93 | #else | ||
94 | #define X86_VM_MASK 0 /* No VM86 support */ | ||
95 | #endif | ||
96 | |||
91 | #endif /* __ASM_I386_PROCESSOR_FLAGS_H */ | 97 | #endif /* __ASM_I386_PROCESSOR_FLAGS_H */ |
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 559105220a47..4ab2ede6f4b9 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h | |||
@@ -263,15 +263,11 @@ struct tss_struct { | |||
263 | struct thread_struct *io_bitmap_owner; | 263 | struct thread_struct *io_bitmap_owner; |
264 | 264 | ||
265 | /* | 265 | /* |
266 | * Pad the TSS to be cacheline-aligned (size is 0x100): | ||
267 | */ | ||
268 | unsigned long __cacheline_filler[35]; | ||
269 | /* | ||
270 | * .. and then another 0x100 bytes for the emergency kernel stack: | 266 | * .. and then another 0x100 bytes for the emergency kernel stack: |
271 | */ | 267 | */ |
272 | unsigned long stack[64]; | 268 | unsigned long stack[64]; |
273 | 269 | ||
274 | } __attribute__((packed)); | 270 | } ____cacheline_aligned; |
275 | 271 | ||
276 | DECLARE_PER_CPU(struct tss_struct, init_tss); | 272 | DECLARE_PER_CPU(struct tss_struct, init_tss); |
277 | 273 | ||
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h index 9f922b0b95d6..8a71db803da6 100644 --- a/include/asm-x86/ptrace.h +++ b/include/asm-x86/ptrace.h | |||
@@ -3,7 +3,12 @@ | |||
3 | 3 | ||
4 | #include <linux/compiler.h> /* For __user */ | 4 | #include <linux/compiler.h> /* For __user */ |
5 | #include <asm/ptrace-abi.h> | 5 | #include <asm/ptrace-abi.h> |
6 | #include <asm/processor-flags.h> | ||
6 | 7 | ||
8 | #ifdef __KERNEL__ | ||
9 | #include <asm/ds.h> /* the DS BTS struct is used for ptrace too */ | ||
10 | #include <asm/segment.h> | ||
11 | #endif | ||
7 | 12 | ||
8 | #ifndef __ASSEMBLY__ | 13 | #ifndef __ASSEMBLY__ |
9 | 14 | ||
@@ -55,9 +60,6 @@ struct pt_regs { | |||
55 | unsigned long ss; | 60 | unsigned long ss; |
56 | }; | 61 | }; |
57 | 62 | ||
58 | #include <asm/vm86.h> | ||
59 | #include <asm/segment.h> | ||
60 | |||
61 | #endif /* __KERNEL__ */ | 63 | #endif /* __KERNEL__ */ |
62 | 64 | ||
63 | #else /* __i386__ */ | 65 | #else /* __i386__ */ |
diff --git a/include/asm-x86/pvclock-abi.h b/include/asm-x86/pvclock-abi.h new file mode 100644 index 000000000000..6857f840b243 --- /dev/null +++ b/include/asm-x86/pvclock-abi.h | |||
@@ -0,0 +1,42 @@ | |||
1 | #ifndef _ASM_X86_PVCLOCK_ABI_H_ | ||
2 | #define _ASM_X86_PVCLOCK_ABI_H_ | ||
3 | #ifndef __ASSEMBLY__ | ||
4 | |||
5 | /* | ||
6 | * These structs MUST NOT be changed. | ||
7 | * They are the ABI between hypervisor and guest OS. | ||
8 | * Both Xen and KVM are using this. | ||
9 | * | ||
10 | * pvclock_vcpu_time_info holds the system time and the tsc timestamp | ||
11 | * of the last update. So the guest can use the tsc delta to get a | ||
12 | * more precise system time. There is one per virtual cpu. | ||
13 | * | ||
14 | * pvclock_wall_clock references the point in time when the system | ||
15 | * time was zero (usually boot time), thus the guest calculates the | ||
16 | * current wall clock by adding the system time. | ||
17 | * | ||
18 | * Protocol for the "version" fields is: hypervisor raises it (making | ||
19 | * it uneven) before it starts updating the fields and raises it again | ||
20 | * (making it even) when it is done. Thus the guest can make sure the | ||
21 | * time values it got are consistent by checking the version before | ||
22 | * and after reading them. | ||
23 | */ | ||
24 | |||
25 | struct pvclock_vcpu_time_info { | ||
26 | u32 version; | ||
27 | u32 pad0; | ||
28 | u64 tsc_timestamp; | ||
29 | u64 system_time; | ||
30 | u32 tsc_to_system_mul; | ||
31 | s8 tsc_shift; | ||
32 | u8 pad[3]; | ||
33 | } __attribute__((__packed__)); /* 32 bytes */ | ||
34 | |||
35 | struct pvclock_wall_clock { | ||
36 | u32 version; | ||
37 | u32 sec; | ||
38 | u32 nsec; | ||
39 | } __attribute__((__packed__)); | ||
40 | |||
41 | #endif /* __ASSEMBLY__ */ | ||
42 | #endif /* _ASM_X86_PVCLOCK_ABI_H_ */ | ||
diff --git a/include/asm-x86/pvclock.h b/include/asm-x86/pvclock.h new file mode 100644 index 000000000000..85b1bba8e0a3 --- /dev/null +++ b/include/asm-x86/pvclock.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef _ASM_X86_PVCLOCK_H_ | ||
2 | #define _ASM_X86_PVCLOCK_H_ | ||
3 | |||
4 | #include <linux/clocksource.h> | ||
5 | #include <asm/pvclock-abi.h> | ||
6 | |||
7 | /* some helper functions for xen and kvm pv clock sources */ | ||
8 | cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); | ||
9 | void pvclock_read_wallclock(struct pvclock_wall_clock *wall, | ||
10 | struct pvclock_vcpu_time_info *vcpu, | ||
11 | struct timespec *ts); | ||
12 | |||
13 | #endif /* _ASM_X86_PVCLOCK_H_ */ | ||
diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h index e63741f19392..206f355786dc 100644 --- a/include/asm-x86/reboot.h +++ b/include/asm-x86/reboot.h | |||
@@ -14,8 +14,8 @@ struct machine_ops { | |||
14 | 14 | ||
15 | extern struct machine_ops machine_ops; | 15 | extern struct machine_ops machine_ops; |
16 | 16 | ||
17 | void machine_real_restart(unsigned char *code, int length); | ||
18 | void native_machine_crash_shutdown(struct pt_regs *regs); | 17 | void native_machine_crash_shutdown(struct pt_regs *regs); |
19 | void native_machine_shutdown(void); | 18 | void native_machine_shutdown(void); |
19 | void machine_real_restart(const unsigned char *code, int length); | ||
20 | 20 | ||
21 | #endif /* _ASM_REBOOT_H */ | 21 | #endif /* _ASM_REBOOT_H */ |
diff --git a/include/asm-x86/required-features.h b/include/asm-x86/required-features.h index 7400d3ad75c6..8c387198ca88 100644 --- a/include/asm-x86/required-features.h +++ b/include/asm-x86/required-features.h | |||
@@ -19,9 +19,13 @@ | |||
19 | 19 | ||
20 | #if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) | 20 | #if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) |
21 | # define NEED_PAE (1<<(X86_FEATURE_PAE & 31)) | 21 | # define NEED_PAE (1<<(X86_FEATURE_PAE & 31)) |
22 | # define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31)) | ||
23 | #else | 22 | #else |
24 | # define NEED_PAE 0 | 23 | # define NEED_PAE 0 |
24 | #endif | ||
25 | |||
26 | #ifdef CONFIG_X86_CMPXCHG64 | ||
27 | # define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31)) | ||
28 | #else | ||
25 | # define NEED_CX8 0 | 29 | # define NEED_CX8 0 |
26 | #endif | 30 | #endif |
27 | 31 | ||
diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h index 2557514d7ef6..8d9f0b41ee86 100644 --- a/include/asm-x86/resume-trace.h +++ b/include/asm-x86/resume-trace.h | |||
@@ -6,7 +6,7 @@ | |||
6 | #define TRACE_RESUME(user) \ | 6 | #define TRACE_RESUME(user) \ |
7 | do { \ | 7 | do { \ |
8 | if (pm_trace_enabled) { \ | 8 | if (pm_trace_enabled) { \ |
9 | void *tracedata; \ | 9 | const void *tracedata; \ |
10 | asm volatile(_ASM_MOV_UL " $1f,%0\n" \ | 10 | asm volatile(_ASM_MOV_UL " $1f,%0\n" \ |
11 | ".section .tracedata,\"a\"\n" \ | 11 | ".section .tracedata,\"a\"\n" \ |
12 | "1:\t.word %c1\n\t" \ | 12 | "1:\t.word %c1\n\t" \ |
diff --git a/include/asm-x86/seccomp_32.h b/include/asm-x86/seccomp_32.h index 18da19e89bff..36e71c5f306f 100644 --- a/include/asm-x86/seccomp_32.h +++ b/include/asm-x86/seccomp_32.h | |||
@@ -1,4 +1,5 @@ | |||
1 | #ifndef _ASM_SECCOMP_H | 1 | #ifndef _ASM_SECCOMP_H |
2 | #define _ASM_SECCOMP_H | ||
2 | 3 | ||
3 | #include <linux/thread_info.h> | 4 | #include <linux/thread_info.h> |
4 | 5 | ||
diff --git a/include/asm-x86/seccomp_64.h b/include/asm-x86/seccomp_64.h index 553af65a2287..76cfe69aa63c 100644 --- a/include/asm-x86/seccomp_64.h +++ b/include/asm-x86/seccomp_64.h | |||
@@ -1,4 +1,5 @@ | |||
1 | #ifndef _ASM_SECCOMP_H | 1 | #ifndef _ASM_SECCOMP_H |
2 | #define _ASM_SECCOMP_H | ||
2 | 3 | ||
3 | #include <linux/thread_info.h> | 4 | #include <linux/thread_info.h> |
4 | 5 | ||
diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h index b49369ad9a61..193578cd1fd9 100644 --- a/include/asm-x86/string_32.h +++ b/include/asm-x86/string_32.h | |||
@@ -29,81 +29,116 @@ extern char *strchr(const char *s, int c); | |||
29 | #define __HAVE_ARCH_STRLEN | 29 | #define __HAVE_ARCH_STRLEN |
30 | extern size_t strlen(const char *s); | 30 | extern size_t strlen(const char *s); |
31 | 31 | ||
32 | static __always_inline void * __memcpy(void * to, const void * from, size_t n) | 32 | static __always_inline void *__memcpy(void *to, const void *from, size_t n) |
33 | { | 33 | { |
34 | int d0, d1, d2; | 34 | int d0, d1, d2; |
35 | __asm__ __volatile__( | 35 | asm volatile("rep ; movsl\n\t" |
36 | "rep ; movsl\n\t" | 36 | "movl %4,%%ecx\n\t" |
37 | "movl %4,%%ecx\n\t" | 37 | "andl $3,%%ecx\n\t" |
38 | "andl $3,%%ecx\n\t" | 38 | "jz 1f\n\t" |
39 | "jz 1f\n\t" | 39 | "rep ; movsb\n\t" |
40 | "rep ; movsb\n\t" | 40 | "1:" |
41 | "1:" | 41 | : "=&c" (d0), "=&D" (d1), "=&S" (d2) |
42 | : "=&c" (d0), "=&D" (d1), "=&S" (d2) | 42 | : "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from) |
43 | : "0" (n/4), "g" (n), "1" ((long) to), "2" ((long) from) | 43 | : "memory"); |
44 | : "memory"); | 44 | return to; |
45 | return (to); | ||
46 | } | 45 | } |
47 | 46 | ||
48 | /* | 47 | /* |
49 | * This looks ugly, but the compiler can optimize it totally, | 48 | * This looks ugly, but the compiler can optimize it totally, |
50 | * as the count is constant. | 49 | * as the count is constant. |
51 | */ | 50 | */ |
52 | static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n) | 51 | static __always_inline void *__constant_memcpy(void *to, const void *from, |
52 | size_t n) | ||
53 | { | 53 | { |
54 | long esi, edi; | 54 | long esi, edi; |
55 | if (!n) return to; | 55 | if (!n) |
56 | #if 1 /* want to do small copies with non-string ops? */ | 56 | return to; |
57 | |||
57 | switch (n) { | 58 | switch (n) { |
58 | case 1: *(char*)to = *(char*)from; return to; | 59 | case 1: |
59 | case 2: *(short*)to = *(short*)from; return to; | 60 | *(char *)to = *(char *)from; |
60 | case 4: *(int*)to = *(int*)from; return to; | 61 | return to; |
61 | #if 1 /* including those doable with two moves? */ | 62 | case 2: |
62 | case 3: *(short*)to = *(short*)from; | 63 | *(short *)to = *(short *)from; |
63 | *((char*)to+2) = *((char*)from+2); return to; | 64 | return to; |
64 | case 5: *(int*)to = *(int*)from; | 65 | case 4: |
65 | *((char*)to+4) = *((char*)from+4); return to; | 66 | *(int *)to = *(int *)from; |
66 | case 6: *(int*)to = *(int*)from; | 67 | return to; |
67 | *((short*)to+2) = *((short*)from+2); return to; | 68 | |
68 | case 8: *(int*)to = *(int*)from; | 69 | case 3: |
69 | *((int*)to+1) = *((int*)from+1); return to; | 70 | *(short *)to = *(short *)from; |
70 | #endif | 71 | *((char *)to + 2) = *((char *)from + 2); |
72 | return to; | ||
73 | case 5: | ||
74 | *(int *)to = *(int *)from; | ||
75 | *((char *)to + 4) = *((char *)from + 4); | ||
76 | return to; | ||
77 | case 6: | ||
78 | *(int *)to = *(int *)from; | ||
79 | *((short *)to + 2) = *((short *)from + 2); | ||
80 | return to; | ||
81 | case 8: | ||
82 | *(int *)to = *(int *)from; | ||
83 | *((int *)to + 1) = *((int *)from + 1); | ||
84 | return to; | ||
71 | } | 85 | } |
72 | #endif | 86 | |
73 | esi = (long) from; | 87 | esi = (long)from; |
74 | edi = (long) to; | 88 | edi = (long)to; |
75 | if (n >= 5*4) { | 89 | if (n >= 5 * 4) { |
76 | /* large block: use rep prefix */ | 90 | /* large block: use rep prefix */ |
77 | int ecx; | 91 | int ecx; |
78 | __asm__ __volatile__( | 92 | asm volatile("rep ; movsl" |
79 | "rep ; movsl" | 93 | : "=&c" (ecx), "=&D" (edi), "=&S" (esi) |
80 | : "=&c" (ecx), "=&D" (edi), "=&S" (esi) | 94 | : "0" (n / 4), "1" (edi), "2" (esi) |
81 | : "0" (n/4), "1" (edi),"2" (esi) | 95 | : "memory" |
82 | : "memory" | ||
83 | ); | 96 | ); |
84 | } else { | 97 | } else { |
85 | /* small block: don't clobber ecx + smaller code */ | 98 | /* small block: don't clobber ecx + smaller code */ |
86 | if (n >= 4*4) __asm__ __volatile__("movsl" | 99 | if (n >= 4 * 4) |
87 | :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); | 100 | asm volatile("movsl" |
88 | if (n >= 3*4) __asm__ __volatile__("movsl" | 101 | : "=&D"(edi), "=&S"(esi) |
89 | :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); | 102 | : "0"(edi), "1"(esi) |
90 | if (n >= 2*4) __asm__ __volatile__("movsl" | 103 | : "memory"); |
91 | :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); | 104 | if (n >= 3 * 4) |
92 | if (n >= 1*4) __asm__ __volatile__("movsl" | 105 | asm volatile("movsl" |
93 | :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); | 106 | : "=&D"(edi), "=&S"(esi) |
107 | : "0"(edi), "1"(esi) | ||
108 | : "memory"); | ||
109 | if (n >= 2 * 4) | ||
110 | asm volatile("movsl" | ||
111 | : "=&D"(edi), "=&S"(esi) | ||
112 | : "0"(edi), "1"(esi) | ||
113 | : "memory"); | ||
114 | if (n >= 1 * 4) | ||
115 | asm volatile("movsl" | ||
116 | : "=&D"(edi), "=&S"(esi) | ||
117 | : "0"(edi), "1"(esi) | ||
118 | : "memory"); | ||
94 | } | 119 | } |
95 | switch (n % 4) { | 120 | switch (n % 4) { |
96 | /* tail */ | 121 | /* tail */ |
97 | case 0: return to; | 122 | case 0: |
98 | case 1: __asm__ __volatile__("movsb" | 123 | return to; |
99 | :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); | 124 | case 1: |
100 | return to; | 125 | asm volatile("movsb" |
101 | case 2: __asm__ __volatile__("movsw" | 126 | : "=&D"(edi), "=&S"(esi) |
102 | :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); | 127 | : "0"(edi), "1"(esi) |
103 | return to; | 128 | : "memory"); |
104 | default: __asm__ __volatile__("movsw\n\tmovsb" | 129 | return to; |
105 | :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); | 130 | case 2: |
106 | return to; | 131 | asm volatile("movsw" |
132 | : "=&D"(edi), "=&S"(esi) | ||
133 | : "0"(edi), "1"(esi) | ||
134 | : "memory"); | ||
135 | return to; | ||
136 | default: | ||
137 | asm volatile("movsw\n\tmovsb" | ||
138 | : "=&D"(edi), "=&S"(esi) | ||
139 | : "0"(edi), "1"(esi) | ||
140 | : "memory"); | ||
141 | return to; | ||
107 | } | 142 | } |
108 | } | 143 | } |
109 | 144 | ||
@@ -117,87 +152,86 @@ static __always_inline void * __constant_memcpy(void * to, const void * from, si | |||
117 | * This CPU favours 3DNow strongly (eg AMD Athlon) | 152 | * This CPU favours 3DNow strongly (eg AMD Athlon) |
118 | */ | 153 | */ |
119 | 154 | ||
120 | static inline void * __constant_memcpy3d(void * to, const void * from, size_t len) | 155 | static inline void *__constant_memcpy3d(void *to, const void *from, size_t len) |
121 | { | 156 | { |
122 | if (len < 512) | 157 | if (len < 512) |
123 | return __constant_memcpy(to, from, len); | 158 | return __constant_memcpy(to, from, len); |
124 | return _mmx_memcpy(to, from, len); | 159 | return _mmx_memcpy(to, from, len); |
125 | } | 160 | } |
126 | 161 | ||
127 | static __inline__ void *__memcpy3d(void *to, const void *from, size_t len) | 162 | static inline void *__memcpy3d(void *to, const void *from, size_t len) |
128 | { | 163 | { |
129 | if (len < 512) | 164 | if (len < 512) |
130 | return __memcpy(to, from, len); | 165 | return __memcpy(to, from, len); |
131 | return _mmx_memcpy(to, from, len); | 166 | return _mmx_memcpy(to, from, len); |
132 | } | 167 | } |
133 | 168 | ||
134 | #define memcpy(t, f, n) \ | 169 | #define memcpy(t, f, n) \ |
135 | (__builtin_constant_p(n) ? \ | 170 | (__builtin_constant_p((n)) \ |
136 | __constant_memcpy3d((t),(f),(n)) : \ | 171 | ? __constant_memcpy3d((t), (f), (n)) \ |
137 | __memcpy3d((t),(f),(n))) | 172 | : __memcpy3d((t), (f), (n))) |
138 | 173 | ||
139 | #else | 174 | #else |
140 | 175 | ||
141 | /* | 176 | /* |
142 | * No 3D Now! | 177 | * No 3D Now! |
143 | */ | 178 | */ |
144 | 179 | ||
145 | #define memcpy(t, f, n) \ | 180 | #define memcpy(t, f, n) \ |
146 | (__builtin_constant_p(n) ? \ | 181 | (__builtin_constant_p((n)) \ |
147 | __constant_memcpy((t),(f),(n)) : \ | 182 | ? __constant_memcpy((t), (f), (n)) \ |
148 | __memcpy((t),(f),(n))) | 183 | : __memcpy((t), (f), (n))) |
149 | 184 | ||
150 | #endif | 185 | #endif |
151 | 186 | ||
152 | #define __HAVE_ARCH_MEMMOVE | 187 | #define __HAVE_ARCH_MEMMOVE |
153 | void *memmove(void * dest,const void * src, size_t n); | 188 | void *memmove(void *dest, const void *src, size_t n); |
154 | 189 | ||
155 | #define memcmp __builtin_memcmp | 190 | #define memcmp __builtin_memcmp |
156 | 191 | ||
157 | #define __HAVE_ARCH_MEMCHR | 192 | #define __HAVE_ARCH_MEMCHR |
158 | extern void *memchr(const void * cs,int c,size_t count); | 193 | extern void *memchr(const void *cs, int c, size_t count); |
159 | 194 | ||
160 | static inline void * __memset_generic(void * s, char c,size_t count) | 195 | static inline void *__memset_generic(void *s, char c, size_t count) |
161 | { | 196 | { |
162 | int d0, d1; | 197 | int d0, d1; |
163 | __asm__ __volatile__( | 198 | asm volatile("rep\n\t" |
164 | "rep\n\t" | 199 | "stosb" |
165 | "stosb" | 200 | : "=&c" (d0), "=&D" (d1) |
166 | : "=&c" (d0), "=&D" (d1) | 201 | : "a" (c), "1" (s), "0" (count) |
167 | :"a" (c),"1" (s),"0" (count) | 202 | : "memory"); |
168 | :"memory"); | 203 | return s; |
169 | return s; | ||
170 | } | 204 | } |
171 | 205 | ||
172 | /* we might want to write optimized versions of these later */ | 206 | /* we might want to write optimized versions of these later */ |
173 | #define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count)) | 207 | #define __constant_count_memset(s, c, count) __memset_generic((s), (c), (count)) |
174 | 208 | ||
175 | /* | 209 | /* |
176 | * memset(x,0,y) is a reasonably common thing to do, so we want to fill | 210 | * memset(x, 0, y) is a reasonably common thing to do, so we want to fill |
177 | * things 32 bits at a time even when we don't know the size of the | 211 | * things 32 bits at a time even when we don't know the size of the |
178 | * area at compile-time.. | 212 | * area at compile-time.. |
179 | */ | 213 | */ |
180 | static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count) | 214 | static __always_inline |
215 | void *__constant_c_memset(void *s, unsigned long c, size_t count) | ||
181 | { | 216 | { |
182 | int d0, d1; | 217 | int d0, d1; |
183 | __asm__ __volatile__( | 218 | asm volatile("rep ; stosl\n\t" |
184 | "rep ; stosl\n\t" | 219 | "testb $2,%b3\n\t" |
185 | "testb $2,%b3\n\t" | 220 | "je 1f\n\t" |
186 | "je 1f\n\t" | 221 | "stosw\n" |
187 | "stosw\n" | 222 | "1:\ttestb $1,%b3\n\t" |
188 | "1:\ttestb $1,%b3\n\t" | 223 | "je 2f\n\t" |
189 | "je 2f\n\t" | 224 | "stosb\n" |
190 | "stosb\n" | 225 | "2:" |
191 | "2:" | 226 | : "=&c" (d0), "=&D" (d1) |
192 | :"=&c" (d0), "=&D" (d1) | 227 | : "a" (c), "q" (count), "0" (count/4), "1" ((long)s) |
193 | :"a" (c), "q" (count), "0" (count/4), "1" ((long) s) | 228 | : "memory"); |
194 | :"memory"); | 229 | return s; |
195 | return (s); | ||
196 | } | 230 | } |
197 | 231 | ||
198 | /* Added by Gertjan van Wingerde to make minix and sysv module work */ | 232 | /* Added by Gertjan van Wingerde to make minix and sysv module work */ |
199 | #define __HAVE_ARCH_STRNLEN | 233 | #define __HAVE_ARCH_STRNLEN |
200 | extern size_t strnlen(const char * s, size_t count); | 234 | extern size_t strnlen(const char *s, size_t count); |
201 | /* end of additional stuff */ | 235 | /* end of additional stuff */ |
202 | 236 | ||
203 | #define __HAVE_ARCH_STRSTR | 237 | #define __HAVE_ARCH_STRSTR |
@@ -207,66 +241,85 @@ extern char *strstr(const char *cs, const char *ct); | |||
207 | * This looks horribly ugly, but the compiler can optimize it totally, | 241 | * This looks horribly ugly, but the compiler can optimize it totally, |
208 | * as we by now know that both pattern and count is constant.. | 242 | * as we by now know that both pattern and count is constant.. |
209 | */ | 243 | */ |
210 | static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) | 244 | static __always_inline |
245 | void *__constant_c_and_count_memset(void *s, unsigned long pattern, | ||
246 | size_t count) | ||
211 | { | 247 | { |
212 | switch (count) { | 248 | switch (count) { |
249 | case 0: | ||
250 | return s; | ||
251 | case 1: | ||
252 | *(unsigned char *)s = pattern & 0xff; | ||
253 | return s; | ||
254 | case 2: | ||
255 | *(unsigned short *)s = pattern & 0xffff; | ||
256 | return s; | ||
257 | case 3: | ||
258 | *(unsigned short *)s = pattern & 0xffff; | ||
259 | *((unsigned char *)s + 2) = pattern & 0xff; | ||
260 | return s; | ||
261 | case 4: | ||
262 | *(unsigned long *)s = pattern; | ||
263 | return s; | ||
264 | } | ||
265 | |||
266 | #define COMMON(x) \ | ||
267 | asm volatile("rep ; stosl" \ | ||
268 | x \ | ||
269 | : "=&c" (d0), "=&D" (d1) \ | ||
270 | : "a" (eax), "0" (count/4), "1" ((long)s) \ | ||
271 | : "memory") | ||
272 | |||
273 | { | ||
274 | int d0, d1; | ||
275 | #if __GNUC__ == 4 && __GNUC_MINOR__ == 0 | ||
276 | /* Workaround for broken gcc 4.0 */ | ||
277 | register unsigned long eax asm("%eax") = pattern; | ||
278 | #else | ||
279 | unsigned long eax = pattern; | ||
280 | #endif | ||
281 | |||
282 | switch (count % 4) { | ||
213 | case 0: | 283 | case 0: |
284 | COMMON(""); | ||
214 | return s; | 285 | return s; |
215 | case 1: | 286 | case 1: |
216 | *(unsigned char *)s = pattern & 0xff; | 287 | COMMON("\n\tstosb"); |
217 | return s; | 288 | return s; |
218 | case 2: | 289 | case 2: |
219 | *(unsigned short *)s = pattern & 0xffff; | 290 | COMMON("\n\tstosw"); |
220 | return s; | 291 | return s; |
221 | case 3: | 292 | default: |
222 | *(unsigned short *)s = pattern & 0xffff; | 293 | COMMON("\n\tstosw\n\tstosb"); |
223 | *(2+(unsigned char *)s) = pattern & 0xff; | ||
224 | return s; | ||
225 | case 4: | ||
226 | *(unsigned long *)s = pattern; | ||
227 | return s; | 294 | return s; |
295 | } | ||
228 | } | 296 | } |
229 | #define COMMON(x) \ | 297 | |
230 | __asm__ __volatile__( \ | ||
231 | "rep ; stosl" \ | ||
232 | x \ | ||
233 | : "=&c" (d0), "=&D" (d1) \ | ||
234 | : "a" (pattern),"0" (count/4),"1" ((long) s) \ | ||
235 | : "memory") | ||
236 | { | ||
237 | int d0, d1; | ||
238 | switch (count % 4) { | ||
239 | case 0: COMMON(""); return s; | ||
240 | case 1: COMMON("\n\tstosb"); return s; | ||
241 | case 2: COMMON("\n\tstosw"); return s; | ||
242 | default: COMMON("\n\tstosw\n\tstosb"); return s; | ||
243 | } | ||
244 | } | ||
245 | |||
246 | #undef COMMON | 298 | #undef COMMON |
247 | } | 299 | } |
248 | 300 | ||
249 | #define __constant_c_x_memset(s, c, count) \ | 301 | #define __constant_c_x_memset(s, c, count) \ |
250 | (__builtin_constant_p(count) ? \ | 302 | (__builtin_constant_p(count) \ |
251 | __constant_c_and_count_memset((s),(c),(count)) : \ | 303 | ? __constant_c_and_count_memset((s), (c), (count)) \ |
252 | __constant_c_memset((s),(c),(count))) | 304 | : __constant_c_memset((s), (c), (count))) |
253 | 305 | ||
254 | #define __memset(s, c, count) \ | 306 | #define __memset(s, c, count) \ |
255 | (__builtin_constant_p(count) ? \ | 307 | (__builtin_constant_p(count) \ |
256 | __constant_count_memset((s),(c),(count)) : \ | 308 | ? __constant_count_memset((s), (c), (count)) \ |
257 | __memset_generic((s),(c),(count))) | 309 | : __memset_generic((s), (c), (count))) |
258 | 310 | ||
259 | #define __HAVE_ARCH_MEMSET | 311 | #define __HAVE_ARCH_MEMSET |
260 | #define memset(s, c, count) \ | 312 | #define memset(s, c, count) \ |
261 | (__builtin_constant_p(c) ? \ | 313 | (__builtin_constant_p(c) \ |
262 | __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \ | 314 | ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \ |
263 | __memset((s),(c),(count))) | 315 | (count)) \ |
316 | : __memset((s), (c), (count))) | ||
264 | 317 | ||
265 | /* | 318 | /* |
266 | * find the first occurrence of byte 'c', or 1 past the area if none | 319 | * find the first occurrence of byte 'c', or 1 past the area if none |
267 | */ | 320 | */ |
268 | #define __HAVE_ARCH_MEMSCAN | 321 | #define __HAVE_ARCH_MEMSCAN |
269 | extern void *memscan(void * addr, int c, size_t size); | 322 | extern void *memscan(void *addr, int c, size_t size); |
270 | 323 | ||
271 | #endif /* __KERNEL__ */ | 324 | #endif /* __KERNEL__ */ |
272 | 325 | ||
diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h index 24e1c080aa8a..8675c6782a7d 100644 --- a/include/asm-x86/suspend_32.h +++ b/include/asm-x86/suspend_32.h | |||
@@ -3,6 +3,9 @@ | |||
3 | * Based on code | 3 | * Based on code |
4 | * Copyright 2001 Patrick Mochel <mochel@osdl.org> | 4 | * Copyright 2001 Patrick Mochel <mochel@osdl.org> |
5 | */ | 5 | */ |
6 | #ifndef __ASM_X86_32_SUSPEND_H | ||
7 | #define __ASM_X86_32_SUSPEND_H | ||
8 | |||
6 | #include <asm/desc.h> | 9 | #include <asm/desc.h> |
7 | #include <asm/i387.h> | 10 | #include <asm/i387.h> |
8 | 11 | ||
@@ -44,3 +47,5 @@ static inline void acpi_save_register_state(unsigned long return_point) | |||
44 | /* routines for saving/restoring kernel state */ | 47 | /* routines for saving/restoring kernel state */ |
45 | extern int acpi_save_state_mem(void); | 48 | extern int acpi_save_state_mem(void); |
46 | #endif | 49 | #endif |
50 | |||
51 | #endif /* __ASM_X86_32_SUSPEND_H */ | ||
diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h index a2f04cd79b29..7e4c133795a1 100644 --- a/include/asm-x86/system.h +++ b/include/asm-x86/system.h | |||
@@ -289,7 +289,7 @@ static inline void native_wbinvd(void) | |||
289 | 289 | ||
290 | #endif/* CONFIG_PARAVIRT */ | 290 | #endif/* CONFIG_PARAVIRT */ |
291 | 291 | ||
292 | #define stts() write_cr0(8 | read_cr0()) | 292 | #define stts() write_cr0(read_cr0() | X86_CR0_TS) |
293 | 293 | ||
294 | #endif /* __KERNEL__ */ | 294 | #endif /* __KERNEL__ */ |
295 | 295 | ||
diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h index 77244f17993f..895339d2bc0b 100644 --- a/include/asm-x86/thread_info.h +++ b/include/asm-x86/thread_info.h | |||
@@ -1,9 +1,253 @@ | |||
1 | /* thread_info.h: low-level thread information | ||
2 | * | ||
3 | * Copyright (C) 2002 David Howells (dhowells@redhat.com) | ||
4 | * - Incorporating suggestions made by Linus Torvalds and Dave Miller | ||
5 | */ | ||
6 | |||
1 | #ifndef _ASM_X86_THREAD_INFO_H | 7 | #ifndef _ASM_X86_THREAD_INFO_H |
8 | #define _ASM_X86_THREAD_INFO_H | ||
9 | |||
10 | #include <linux/compiler.h> | ||
11 | #include <asm/page.h> | ||
12 | #include <asm/types.h> | ||
13 | |||
14 | /* | ||
15 | * low level task data that entry.S needs immediate access to | ||
16 | * - this struct should fit entirely inside of one cache line | ||
17 | * - this struct shares the supervisor stack pages | ||
18 | */ | ||
19 | #ifndef __ASSEMBLY__ | ||
20 | struct task_struct; | ||
21 | struct exec_domain; | ||
22 | #include <asm/processor.h> | ||
23 | |||
24 | struct thread_info { | ||
25 | struct task_struct *task; /* main task structure */ | ||
26 | struct exec_domain *exec_domain; /* execution domain */ | ||
27 | unsigned long flags; /* low level flags */ | ||
28 | __u32 status; /* thread synchronous flags */ | ||
29 | __u32 cpu; /* current CPU */ | ||
30 | int preempt_count; /* 0 => preemptable, | ||
31 | <0 => BUG */ | ||
32 | mm_segment_t addr_limit; | ||
33 | struct restart_block restart_block; | ||
34 | void __user *sysenter_return; | ||
35 | #ifdef CONFIG_X86_32 | ||
36 | unsigned long previous_esp; /* ESP of the previous stack in | ||
37 | case of nested (IRQ) stacks | ||
38 | */ | ||
39 | __u8 supervisor_stack[0]; | ||
40 | #endif | ||
41 | }; | ||
42 | |||
43 | #define INIT_THREAD_INFO(tsk) \ | ||
44 | { \ | ||
45 | .task = &tsk, \ | ||
46 | .exec_domain = &default_exec_domain, \ | ||
47 | .flags = 0, \ | ||
48 | .cpu = 0, \ | ||
49 | .preempt_count = 1, \ | ||
50 | .addr_limit = KERNEL_DS, \ | ||
51 | .restart_block = { \ | ||
52 | .fn = do_no_restart_syscall, \ | ||
53 | }, \ | ||
54 | } | ||
55 | |||
56 | #define init_thread_info (init_thread_union.thread_info) | ||
57 | #define init_stack (init_thread_union.stack) | ||
58 | |||
59 | #else /* !__ASSEMBLY__ */ | ||
60 | |||
61 | #include <asm/asm-offsets.h> | ||
62 | |||
63 | #endif | ||
64 | |||
65 | /* | ||
66 | * thread information flags | ||
67 | * - these are process state flags that various assembly files | ||
68 | * may need to access | ||
69 | * - pending work-to-be-done flags are in LSW | ||
70 | * - other flags in MSW | ||
71 | * Warning: layout of LSW is hardcoded in entry.S | ||
72 | */ | ||
73 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ | ||
74 | #define TIF_SIGPENDING 2 /* signal pending */ | ||
75 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | ||
76 | #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ | ||
77 | #define TIF_IRET 5 /* force IRET */ | ||
2 | #ifdef CONFIG_X86_32 | 78 | #ifdef CONFIG_X86_32 |
3 | # include "thread_info_32.h" | 79 | #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ |
80 | #endif | ||
81 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ | ||
82 | #define TIF_SECCOMP 8 /* secure computing */ | ||
83 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ | ||
84 | #define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */ | ||
85 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ | ||
86 | #define TIF_IA32 17 /* 32bit process */ | ||
87 | #define TIF_FORK 18 /* ret_from_fork */ | ||
88 | #define TIF_ABI_PENDING 19 | ||
89 | #define TIF_MEMDIE 20 | ||
90 | #define TIF_DEBUG 21 /* uses debug registers */ | ||
91 | #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ | ||
92 | #define TIF_FREEZE 23 /* is freezing for suspend */ | ||
93 | #define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ | ||
94 | #define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ | ||
95 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ | ||
96 | #define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */ | ||
97 | |||
98 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | ||
99 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | ||
100 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | ||
101 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | ||
102 | #define _TIF_IRET (1 << TIF_IRET) | ||
103 | #ifdef CONFIG_X86_32 | ||
104 | #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) | ||
4 | #else | 105 | #else |
5 | # include "thread_info_64.h" | 106 | #define _TIF_SYSCALL_EMU 0 |
6 | #endif | 107 | #endif |
108 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | ||
109 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | ||
110 | #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) | ||
111 | #define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED) | ||
112 | #define _TIF_NOTSC (1 << TIF_NOTSC) | ||
113 | #define _TIF_IA32 (1 << TIF_IA32) | ||
114 | #define _TIF_FORK (1 << TIF_FORK) | ||
115 | #define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING) | ||
116 | #define _TIF_DEBUG (1 << TIF_DEBUG) | ||
117 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) | ||
118 | #define _TIF_FREEZE (1 << TIF_FREEZE) | ||
119 | #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) | ||
120 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) | ||
121 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) | ||
122 | #define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS) | ||
123 | |||
124 | /* work to do on interrupt/exception return */ | ||
125 | #define _TIF_WORK_MASK \ | ||
126 | (0x0000FFFF & \ | ||
127 | ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP| \ | ||
128 | _TIF_SECCOMP|_TIF_SYSCALL_EMU)) | ||
129 | |||
130 | /* work to do on any return to user space */ | ||
131 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) | ||
132 | |||
133 | /* Only used for 64 bit */ | ||
134 | #define _TIF_DO_NOTIFY_MASK \ | ||
135 | (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED) | ||
136 | |||
137 | /* flags to check in __switch_to() */ | ||
138 | #define _TIF_WORK_CTXSW \ | ||
139 | (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS| \ | ||
140 | _TIF_NOTSC) | ||
141 | |||
142 | #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW | ||
143 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) | ||
144 | |||
145 | #define PREEMPT_ACTIVE 0x10000000 | ||
146 | |||
147 | /* thread information allocation */ | ||
148 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
149 | #define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO) | ||
150 | #else | ||
151 | #define THREAD_FLAGS GFP_KERNEL | ||
152 | #endif | ||
153 | |||
154 | #define alloc_thread_info(tsk) \ | ||
155 | ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER)) | ||
156 | |||
157 | #ifdef CONFIG_X86_32 | ||
158 | |||
159 | #define STACK_WARN (THREAD_SIZE/8) | ||
160 | /* | ||
161 | * macros/functions for gaining access to the thread information structure | ||
162 | * | ||
163 | * preempt_count needs to be 1 initially, until the scheduler is functional. | ||
164 | */ | ||
165 | #ifndef __ASSEMBLY__ | ||
166 | |||
167 | |||
168 | /* how to get the current stack pointer from C */ | ||
169 | register unsigned long current_stack_pointer asm("esp") __used; | ||
170 | |||
171 | /* how to get the thread information struct from C */ | ||
172 | static inline struct thread_info *current_thread_info(void) | ||
173 | { | ||
174 | return (struct thread_info *) | ||
175 | (current_stack_pointer & ~(THREAD_SIZE - 1)); | ||
176 | } | ||
177 | |||
178 | #else /* !__ASSEMBLY__ */ | ||
179 | |||
180 | /* how to get the thread information struct from ASM */ | ||
181 | #define GET_THREAD_INFO(reg) \ | ||
182 | movl $-THREAD_SIZE, reg; \ | ||
183 | andl %esp, reg | ||
184 | |||
185 | /* use this one if reg already contains %esp */ | ||
186 | #define GET_THREAD_INFO_WITH_ESP(reg) \ | ||
187 | andl $-THREAD_SIZE, reg | ||
188 | |||
189 | #endif | ||
190 | |||
191 | #else /* X86_32 */ | ||
192 | |||
193 | #include <asm/pda.h> | ||
194 | |||
195 | /* | ||
196 | * macros/functions for gaining access to the thread information structure | ||
197 | * preempt_count needs to be 1 initially, until the scheduler is functional. | ||
198 | */ | ||
199 | #ifndef __ASSEMBLY__ | ||
200 | static inline struct thread_info *current_thread_info(void) | ||
201 | { | ||
202 | struct thread_info *ti; | ||
203 | ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE); | ||
204 | return ti; | ||
205 | } | ||
206 | |||
207 | /* do not use in interrupt context */ | ||
208 | static inline struct thread_info *stack_thread_info(void) | ||
209 | { | ||
210 | struct thread_info *ti; | ||
211 | asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1))); | ||
212 | return ti; | ||
213 | } | ||
214 | |||
215 | #else /* !__ASSEMBLY__ */ | ||
216 | |||
217 | /* how to get the thread information struct from ASM */ | ||
218 | #define GET_THREAD_INFO(reg) \ | ||
219 | movq %gs:pda_kernelstack,reg ; \ | ||
220 | subq $(THREAD_SIZE-PDA_STACKOFFSET),reg | ||
221 | |||
222 | #endif | ||
223 | |||
224 | #endif /* !X86_32 */ | ||
225 | |||
226 | /* | ||
227 | * Thread-synchronous status. | ||
228 | * | ||
229 | * This is different from the flags in that nobody else | ||
230 | * ever touches our thread-synchronous status, so we don't | ||
231 | * have to worry about atomic accesses. | ||
232 | */ | ||
233 | #define TS_USEDFPU 0x0001 /* FPU was used by this task | ||
234 | this quantum (SMP) */ | ||
235 | #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ | ||
236 | #define TS_POLLING 0x0004 /* true if in idle loop | ||
237 | and not sleeping */ | ||
238 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ | ||
239 | |||
240 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | ||
241 | |||
242 | #ifndef __ASSEMBLY__ | ||
243 | #define HAVE_SET_RESTORE_SIGMASK 1 | ||
244 | static inline void set_restore_sigmask(void) | ||
245 | { | ||
246 | struct thread_info *ti = current_thread_info(); | ||
247 | ti->status |= TS_RESTORE_SIGMASK; | ||
248 | set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); | ||
249 | } | ||
250 | #endif /* !__ASSEMBLY__ */ | ||
7 | 251 | ||
8 | #ifndef __ASSEMBLY__ | 252 | #ifndef __ASSEMBLY__ |
9 | extern void arch_task_cache_init(void); | 253 | extern void arch_task_cache_init(void); |
diff --git a/include/asm-x86/thread_info_32.h b/include/asm-x86/thread_info_32.h deleted file mode 100644 index b6338829d1a8..000000000000 --- a/include/asm-x86/thread_info_32.h +++ /dev/null | |||
@@ -1,205 +0,0 @@ | |||
1 | /* thread_info.h: i386 low-level thread information | ||
2 | * | ||
3 | * Copyright (C) 2002 David Howells (dhowells@redhat.com) | ||
4 | * - Incorporating suggestions made by Linus Torvalds and Dave Miller | ||
5 | */ | ||
6 | |||
7 | #ifndef _ASM_THREAD_INFO_H | ||
8 | #define _ASM_THREAD_INFO_H | ||
9 | |||
10 | #ifdef __KERNEL__ | ||
11 | |||
12 | #include <linux/compiler.h> | ||
13 | #include <asm/page.h> | ||
14 | |||
15 | #ifndef __ASSEMBLY__ | ||
16 | #include <asm/processor.h> | ||
17 | #endif | ||
18 | |||
19 | /* | ||
20 | * low level task data that entry.S needs immediate access to | ||
21 | * - this struct should fit entirely inside of one cache line | ||
22 | * - this struct shares the supervisor stack pages | ||
23 | * - if the contents of this structure are changed, | ||
24 | * the assembly constants must also be changed | ||
25 | */ | ||
26 | #ifndef __ASSEMBLY__ | ||
27 | |||
28 | struct thread_info { | ||
29 | struct task_struct *task; /* main task structure */ | ||
30 | struct exec_domain *exec_domain; /* execution domain */ | ||
31 | unsigned long flags; /* low level flags */ | ||
32 | unsigned long status; /* thread-synchronous flags */ | ||
33 | __u32 cpu; /* current CPU */ | ||
34 | int preempt_count; /* 0 => preemptable, | ||
35 | <0 => BUG */ | ||
36 | mm_segment_t addr_limit; /* thread address space: | ||
37 | 0-0xBFFFFFFF user-thread | ||
38 | 0-0xFFFFFFFF kernel-thread | ||
39 | */ | ||
40 | void *sysenter_return; | ||
41 | struct restart_block restart_block; | ||
42 | unsigned long previous_esp; /* ESP of the previous stack in | ||
43 | case of nested (IRQ) stacks | ||
44 | */ | ||
45 | __u8 supervisor_stack[0]; | ||
46 | }; | ||
47 | |||
48 | #else /* !__ASSEMBLY__ */ | ||
49 | |||
50 | #include <asm/asm-offsets.h> | ||
51 | |||
52 | #endif | ||
53 | |||
54 | #define PREEMPT_ACTIVE 0x10000000 | ||
55 | #ifdef CONFIG_4KSTACKS | ||
56 | #define THREAD_SIZE (4096) | ||
57 | #else | ||
58 | #define THREAD_SIZE (8192) | ||
59 | #endif | ||
60 | |||
61 | #define STACK_WARN (THREAD_SIZE/8) | ||
62 | /* | ||
63 | * macros/functions for gaining access to the thread information structure | ||
64 | * | ||
65 | * preempt_count needs to be 1 initially, until the scheduler is functional. | ||
66 | */ | ||
67 | #ifndef __ASSEMBLY__ | ||
68 | |||
69 | #define INIT_THREAD_INFO(tsk) \ | ||
70 | { \ | ||
71 | .task = &tsk, \ | ||
72 | .exec_domain = &default_exec_domain, \ | ||
73 | .flags = 0, \ | ||
74 | .cpu = 0, \ | ||
75 | .preempt_count = 1, \ | ||
76 | .addr_limit = KERNEL_DS, \ | ||
77 | .restart_block = { \ | ||
78 | .fn = do_no_restart_syscall, \ | ||
79 | }, \ | ||
80 | } | ||
81 | |||
82 | #define init_thread_info (init_thread_union.thread_info) | ||
83 | #define init_stack (init_thread_union.stack) | ||
84 | |||
85 | |||
86 | /* how to get the current stack pointer from C */ | ||
87 | register unsigned long current_stack_pointer asm("esp") __used; | ||
88 | |||
89 | /* how to get the thread information struct from C */ | ||
90 | static inline struct thread_info *current_thread_info(void) | ||
91 | { | ||
92 | return (struct thread_info *) | ||
93 | (current_stack_pointer & ~(THREAD_SIZE - 1)); | ||
94 | } | ||
95 | |||
96 | /* thread information allocation */ | ||
97 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
98 | #define alloc_thread_info(tsk) ((struct thread_info *) \ | ||
99 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(THREAD_SIZE))) | ||
100 | #else | ||
101 | #define alloc_thread_info(tsk) ((struct thread_info *) \ | ||
102 | __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE))) | ||
103 | #endif | ||
104 | |||
105 | #else /* !__ASSEMBLY__ */ | ||
106 | |||
107 | /* how to get the thread information struct from ASM */ | ||
108 | #define GET_THREAD_INFO(reg) \ | ||
109 | movl $-THREAD_SIZE, reg; \ | ||
110 | andl %esp, reg | ||
111 | |||
112 | /* use this one if reg already contains %esp */ | ||
113 | #define GET_THREAD_INFO_WITH_ESP(reg) \ | ||
114 | andl $-THREAD_SIZE, reg | ||
115 | |||
116 | #endif | ||
117 | |||
118 | /* | ||
119 | * thread information flags | ||
120 | * - these are process state flags that various | ||
121 | * assembly files may need to access | ||
122 | * - pending work-to-be-done flags are in LSW | ||
123 | * - other flags in MSW | ||
124 | */ | ||
125 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ | ||
126 | #define TIF_SIGPENDING 1 /* signal pending */ | ||
127 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ | ||
128 | #define TIF_SINGLESTEP 3 /* restore singlestep on return to | ||
129 | user mode */ | ||
130 | #define TIF_IRET 4 /* return with iret */ | ||
131 | #define TIF_SYSCALL_EMU 5 /* syscall emulation active */ | ||
132 | #define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ | ||
133 | #define TIF_SECCOMP 7 /* secure computing */ | ||
134 | #define TIF_HRTICK_RESCHED 9 /* reprogram hrtick timer */ | ||
135 | #define TIF_MEMDIE 16 | ||
136 | #define TIF_DEBUG 17 /* uses debug registers */ | ||
137 | #define TIF_IO_BITMAP 18 /* uses I/O bitmap */ | ||
138 | #define TIF_FREEZE 19 /* is freezing for suspend */ | ||
139 | #define TIF_NOTSC 20 /* TSC is not accessible in userland */ | ||
140 | #define TIF_FORCED_TF 21 /* true if TF in eflags artificially */ | ||
141 | #define TIF_DEBUGCTLMSR 22 /* uses thread_struct.debugctlmsr */ | ||
142 | #define TIF_DS_AREA_MSR 23 /* uses thread_struct.ds_area_msr */ | ||
143 | #define TIF_BTS_TRACE_TS 24 /* record scheduling event timestamps */ | ||
144 | |||
145 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | ||
146 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | ||
147 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | ||
148 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | ||
149 | #define _TIF_IRET (1 << TIF_IRET) | ||
150 | #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) | ||
151 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | ||
152 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | ||
153 | #define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED) | ||
154 | #define _TIF_DEBUG (1 << TIF_DEBUG) | ||
155 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) | ||
156 | #define _TIF_FREEZE (1 << TIF_FREEZE) | ||
157 | #define _TIF_NOTSC (1 << TIF_NOTSC) | ||
158 | #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) | ||
159 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) | ||
160 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) | ||
161 | #define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS) | ||
162 | |||
163 | /* work to do on interrupt/exception return */ | ||
164 | #define _TIF_WORK_MASK \ | ||
165 | (0x0000FFFF & ~(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ | ||
166 | _TIF_SECCOMP | _TIF_SYSCALL_EMU)) | ||
167 | /* work to do on any return to u-space */ | ||
168 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) | ||
169 | |||
170 | /* flags to check in __switch_to() */ | ||
171 | #define _TIF_WORK_CTXSW \ | ||
172 | (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUGCTLMSR | \ | ||
173 | _TIF_DS_AREA_MSR | _TIF_BTS_TRACE_TS) | ||
174 | #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW | ||
175 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW | _TIF_DEBUG) | ||
176 | |||
177 | |||
178 | /* | ||
179 | * Thread-synchronous status. | ||
180 | * | ||
181 | * This is different from the flags in that nobody else | ||
182 | * ever touches our thread-synchronous status, so we don't | ||
183 | * have to worry about atomic accesses. | ||
184 | */ | ||
185 | #define TS_USEDFPU 0x0001 /* FPU was used by this task | ||
186 | this quantum (SMP) */ | ||
187 | #define TS_POLLING 0x0002 /* True if in idle loop | ||
188 | and not sleeping */ | ||
189 | #define TS_RESTORE_SIGMASK 0x0004 /* restore signal mask in do_signal() */ | ||
190 | |||
191 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | ||
192 | |||
193 | #ifndef __ASSEMBLY__ | ||
194 | #define HAVE_SET_RESTORE_SIGMASK 1 | ||
195 | static inline void set_restore_sigmask(void) | ||
196 | { | ||
197 | struct thread_info *ti = current_thread_info(); | ||
198 | ti->status |= TS_RESTORE_SIGMASK; | ||
199 | set_bit(TIF_SIGPENDING, &ti->flags); | ||
200 | } | ||
201 | #endif /* !__ASSEMBLY__ */ | ||
202 | |||
203 | #endif /* __KERNEL__ */ | ||
204 | |||
205 | #endif /* _ASM_THREAD_INFO_H */ | ||
diff --git a/include/asm-x86/thread_info_64.h b/include/asm-x86/thread_info_64.h deleted file mode 100644 index cb69f70abba1..000000000000 --- a/include/asm-x86/thread_info_64.h +++ /dev/null | |||
@@ -1,195 +0,0 @@ | |||
1 | /* thread_info.h: x86_64 low-level thread information | ||
2 | * | ||
3 | * Copyright (C) 2002 David Howells (dhowells@redhat.com) | ||
4 | * - Incorporating suggestions made by Linus Torvalds and Dave Miller | ||
5 | */ | ||
6 | |||
7 | #ifndef _ASM_THREAD_INFO_H | ||
8 | #define _ASM_THREAD_INFO_H | ||
9 | |||
10 | #ifdef __KERNEL__ | ||
11 | |||
12 | #include <asm/page.h> | ||
13 | #include <asm/types.h> | ||
14 | #include <asm/pda.h> | ||
15 | |||
16 | /* | ||
17 | * low level task data that entry.S needs immediate access to | ||
18 | * - this struct should fit entirely inside of one cache line | ||
19 | * - this struct shares the supervisor stack pages | ||
20 | */ | ||
21 | #ifndef __ASSEMBLY__ | ||
22 | struct task_struct; | ||
23 | struct exec_domain; | ||
24 | #include <asm/processor.h> | ||
25 | |||
26 | struct thread_info { | ||
27 | struct task_struct *task; /* main task structure */ | ||
28 | struct exec_domain *exec_domain; /* execution domain */ | ||
29 | __u32 flags; /* low level flags */ | ||
30 | __u32 status; /* thread synchronous flags */ | ||
31 | __u32 cpu; /* current CPU */ | ||
32 | int preempt_count; /* 0 => preemptable, | ||
33 | <0 => BUG */ | ||
34 | mm_segment_t addr_limit; | ||
35 | struct restart_block restart_block; | ||
36 | #ifdef CONFIG_IA32_EMULATION | ||
37 | void __user *sysenter_return; | ||
38 | #endif | ||
39 | }; | ||
40 | #endif | ||
41 | |||
42 | /* | ||
43 | * macros/functions for gaining access to the thread information structure | ||
44 | * preempt_count needs to be 1 initially, until the scheduler is functional. | ||
45 | */ | ||
46 | #ifndef __ASSEMBLY__ | ||
47 | #define INIT_THREAD_INFO(tsk) \ | ||
48 | { \ | ||
49 | .task = &tsk, \ | ||
50 | .exec_domain = &default_exec_domain, \ | ||
51 | .flags = 0, \ | ||
52 | .cpu = 0, \ | ||
53 | .preempt_count = 1, \ | ||
54 | .addr_limit = KERNEL_DS, \ | ||
55 | .restart_block = { \ | ||
56 | .fn = do_no_restart_syscall, \ | ||
57 | }, \ | ||
58 | } | ||
59 | |||
60 | #define init_thread_info (init_thread_union.thread_info) | ||
61 | #define init_stack (init_thread_union.stack) | ||
62 | |||
63 | static inline struct thread_info *current_thread_info(void) | ||
64 | { | ||
65 | struct thread_info *ti; | ||
66 | ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE); | ||
67 | return ti; | ||
68 | } | ||
69 | |||
70 | /* do not use in interrupt context */ | ||
71 | static inline struct thread_info *stack_thread_info(void) | ||
72 | { | ||
73 | struct thread_info *ti; | ||
74 | asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1))); | ||
75 | return ti; | ||
76 | } | ||
77 | |||
78 | /* thread information allocation */ | ||
79 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
80 | #define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO) | ||
81 | #else | ||
82 | #define THREAD_FLAGS GFP_KERNEL | ||
83 | #endif | ||
84 | |||
85 | #define alloc_thread_info(tsk) \ | ||
86 | ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER)) | ||
87 | |||
88 | #else /* !__ASSEMBLY__ */ | ||
89 | |||
90 | /* how to get the thread information struct from ASM */ | ||
91 | #define GET_THREAD_INFO(reg) \ | ||
92 | movq %gs:pda_kernelstack,reg ; \ | ||
93 | subq $(THREAD_SIZE-PDA_STACKOFFSET),reg | ||
94 | |||
95 | #endif | ||
96 | |||
97 | /* | ||
98 | * thread information flags | ||
99 | * - these are process state flags that various assembly files | ||
100 | * may need to access | ||
101 | * - pending work-to-be-done flags are in LSW | ||
102 | * - other flags in MSW | ||
103 | * Warning: layout of LSW is hardcoded in entry.S | ||
104 | */ | ||
105 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ | ||
106 | #define TIF_SIGPENDING 2 /* signal pending */ | ||
107 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | ||
108 | #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ | ||
109 | #define TIF_IRET 5 /* force IRET */ | ||
110 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ | ||
111 | #define TIF_SECCOMP 8 /* secure computing */ | ||
112 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ | ||
113 | #define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */ | ||
114 | /* 16 free */ | ||
115 | #define TIF_IA32 17 /* 32bit process */ | ||
116 | #define TIF_FORK 18 /* ret_from_fork */ | ||
117 | #define TIF_ABI_PENDING 19 | ||
118 | #define TIF_MEMDIE 20 | ||
119 | #define TIF_DEBUG 21 /* uses debug registers */ | ||
120 | #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ | ||
121 | #define TIF_FREEZE 23 /* is freezing for suspend */ | ||
122 | #define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ | ||
123 | #define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ | ||
124 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ | ||
125 | #define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */ | ||
126 | #define TIF_NOTSC 28 /* TSC is not accessible in userland */ | ||
127 | |||
128 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | ||
129 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | ||
130 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | ||
131 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | ||
132 | #define _TIF_IRET (1 << TIF_IRET) | ||
133 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | ||
134 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | ||
135 | #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) | ||
136 | #define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED) | ||
137 | #define _TIF_IA32 (1 << TIF_IA32) | ||
138 | #define _TIF_FORK (1 << TIF_FORK) | ||
139 | #define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING) | ||
140 | #define _TIF_DEBUG (1 << TIF_DEBUG) | ||
141 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) | ||
142 | #define _TIF_FREEZE (1 << TIF_FREEZE) | ||
143 | #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) | ||
144 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) | ||
145 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) | ||
146 | #define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS) | ||
147 | #define _TIF_NOTSC (1 << TIF_NOTSC) | ||
148 | |||
149 | /* work to do on interrupt/exception return */ | ||
150 | #define _TIF_WORK_MASK \ | ||
151 | (0x0000FFFF & \ | ||
152 | ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|_TIF_SECCOMP)) | ||
153 | /* work to do on any return to user space */ | ||
154 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) | ||
155 | |||
156 | #define _TIF_DO_NOTIFY_MASK \ | ||
157 | (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED) | ||
158 | |||
159 | /* flags to check in __switch_to() */ | ||
160 | #define _TIF_WORK_CTXSW \ | ||
161 | (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS|_TIF_NOTSC) | ||
162 | #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW | ||
163 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) | ||
164 | |||
165 | #define PREEMPT_ACTIVE 0x10000000 | ||
166 | |||
167 | /* | ||
168 | * Thread-synchronous status. | ||
169 | * | ||
170 | * This is different from the flags in that nobody else | ||
171 | * ever touches our thread-synchronous status, so we don't | ||
172 | * have to worry about atomic accesses. | ||
173 | */ | ||
174 | #define TS_USEDFPU 0x0001 /* FPU was used by this task | ||
175 | this quantum (SMP) */ | ||
176 | #define TS_COMPAT 0x0002 /* 32bit syscall active */ | ||
177 | #define TS_POLLING 0x0004 /* true if in idle loop | ||
178 | and not sleeping */ | ||
179 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ | ||
180 | |||
181 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | ||
182 | |||
183 | #ifndef __ASSEMBLY__ | ||
184 | #define HAVE_SET_RESTORE_SIGMASK 1 | ||
185 | static inline void set_restore_sigmask(void) | ||
186 | { | ||
187 | struct thread_info *ti = current_thread_info(); | ||
188 | ti->status |= TS_RESTORE_SIGMASK; | ||
189 | set_bit(TIF_SIGPENDING, &ti->flags); | ||
190 | } | ||
191 | #endif /* !__ASSEMBLY__ */ | ||
192 | |||
193 | #endif /* __KERNEL__ */ | ||
194 | |||
195 | #endif /* _ASM_THREAD_INFO_H */ | ||
diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h index fe26e36d0f51..9c1a4a3470d9 100644 --- a/include/asm-x86/unistd_64.h +++ b/include/asm-x86/unistd_64.h | |||
@@ -290,7 +290,7 @@ __SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait) | |||
290 | #define __NR_rt_sigqueueinfo 129 | 290 | #define __NR_rt_sigqueueinfo 129 |
291 | __SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo) | 291 | __SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo) |
292 | #define __NR_rt_sigsuspend 130 | 292 | #define __NR_rt_sigsuspend 130 |
293 | __SYSCALL(__NR_rt_sigsuspend, stub_rt_sigsuspend) | 293 | __SYSCALL(__NR_rt_sigsuspend, sys_rt_sigsuspend) |
294 | #define __NR_sigaltstack 131 | 294 | #define __NR_sigaltstack 131 |
295 | __SYSCALL(__NR_sigaltstack, stub_sigaltstack) | 295 | __SYSCALL(__NR_sigaltstack, stub_sigaltstack) |
296 | #define __NR_utime 132 | 296 | #define __NR_utime 132 |
diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h index 074b357146df..5ce351325e01 100644 --- a/include/asm-x86/vm86.h +++ b/include/asm-x86/vm86.h | |||
@@ -14,12 +14,6 @@ | |||
14 | 14 | ||
15 | #include <asm/processor-flags.h> | 15 | #include <asm/processor-flags.h> |
16 | 16 | ||
17 | #ifdef CONFIG_VM86 | ||
18 | #define X86_VM_MASK X86_EFLAGS_VM | ||
19 | #else | ||
20 | #define X86_VM_MASK 0 /* No VM86 support */ | ||
21 | #endif | ||
22 | |||
23 | #define BIOSSEG 0x0f000 | 17 | #define BIOSSEG 0x0f000 |
24 | 18 | ||
25 | #define CPU_086 0 | 19 | #define CPU_086 0 |
@@ -121,7 +115,6 @@ struct vm86plus_info_struct { | |||
121 | unsigned long is_vm86pus:1; /* for vm86 internal use */ | 115 | unsigned long is_vm86pus:1; /* for vm86 internal use */ |
122 | unsigned char vm86dbg_intxxtab[32]; /* for debugger */ | 116 | unsigned char vm86dbg_intxxtab[32]; /* for debugger */ |
123 | }; | 117 | }; |
124 | |||
125 | struct vm86plus_struct { | 118 | struct vm86plus_struct { |
126 | struct vm86_regs regs; | 119 | struct vm86_regs regs; |
127 | unsigned long flags; | 120 | unsigned long flags; |
@@ -133,6 +126,9 @@ struct vm86plus_struct { | |||
133 | }; | 126 | }; |
134 | 127 | ||
135 | #ifdef __KERNEL__ | 128 | #ifdef __KERNEL__ |
129 | |||
130 | #include <asm/ptrace.h> | ||
131 | |||
136 | /* | 132 | /* |
137 | * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86 | 133 | * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86 |
138 | * mode - the main change is that the old segment descriptors aren't | 134 | * mode - the main change is that the old segment descriptors aren't |
@@ -141,7 +137,6 @@ struct vm86plus_struct { | |||
141 | * at the end of the structure. Look at ptrace.h to see the "normal" | 137 | * at the end of the structure. Look at ptrace.h to see the "normal" |
142 | * setup. For user space layout see 'struct vm86_regs' above. | 138 | * setup. For user space layout see 'struct vm86_regs' above. |
143 | */ | 139 | */ |
144 | #include <asm/ptrace.h> | ||
145 | 140 | ||
146 | struct kernel_vm86_regs { | 141 | struct kernel_vm86_regs { |
147 | /* | 142 | /* |
diff --git a/include/asm-x86/xen/hypercall.h b/include/asm-x86/xen/hypercall.h index c2ccd997ed35..2a4f9b41d684 100644 --- a/include/asm-x86/xen/hypercall.h +++ b/include/asm-x86/xen/hypercall.h | |||
@@ -176,9 +176,9 @@ HYPERVISOR_fpu_taskswitch(int set) | |||
176 | } | 176 | } |
177 | 177 | ||
178 | static inline int | 178 | static inline int |
179 | HYPERVISOR_sched_op(int cmd, unsigned long arg) | 179 | HYPERVISOR_sched_op(int cmd, void *arg) |
180 | { | 180 | { |
181 | return _hypercall2(int, sched_op, cmd, arg); | 181 | return _hypercall2(int, sched_op_new, cmd, arg); |
182 | } | 182 | } |
183 | 183 | ||
184 | static inline long | 184 | static inline long |
@@ -315,6 +315,13 @@ HYPERVISOR_nmi_op(unsigned long op, unsigned long arg) | |||
315 | } | 315 | } |
316 | 316 | ||
317 | static inline void | 317 | static inline void |
318 | MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set) | ||
319 | { | ||
320 | mcl->op = __HYPERVISOR_fpu_taskswitch; | ||
321 | mcl->args[0] = set; | ||
322 | } | ||
323 | |||
324 | static inline void | ||
318 | MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, | 325 | MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, |
319 | pte_t new_val, unsigned long flags) | 326 | pte_t new_val, unsigned long flags) |
320 | { | 327 | { |
diff --git a/include/asm-x86/xen/page.h b/include/asm-x86/xen/page.h index baf3a4dce28c..377c04591c15 100644 --- a/include/asm-x86/xen/page.h +++ b/include/asm-x86/xen/page.h | |||
@@ -26,15 +26,20 @@ typedef struct xpaddr { | |||
26 | #define FOREIGN_FRAME_BIT (1UL<<31) | 26 | #define FOREIGN_FRAME_BIT (1UL<<31) |
27 | #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) | 27 | #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) |
28 | 28 | ||
29 | extern unsigned long *phys_to_machine_mapping; | 29 | /* Maximum amount of memory we can handle in a domain in pages */ |
30 | #define MAX_DOMAIN_PAGES \ | ||
31 | ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE)) | ||
32 | |||
33 | |||
34 | extern unsigned long get_phys_to_machine(unsigned long pfn); | ||
35 | extern void set_phys_to_machine(unsigned long pfn, unsigned long mfn); | ||
30 | 36 | ||
31 | static inline unsigned long pfn_to_mfn(unsigned long pfn) | 37 | static inline unsigned long pfn_to_mfn(unsigned long pfn) |
32 | { | 38 | { |
33 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 39 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
34 | return pfn; | 40 | return pfn; |
35 | 41 | ||
36 | return phys_to_machine_mapping[(unsigned int)(pfn)] & | 42 | return get_phys_to_machine(pfn) & ~FOREIGN_FRAME_BIT; |
37 | ~FOREIGN_FRAME_BIT; | ||
38 | } | 43 | } |
39 | 44 | ||
40 | static inline int phys_to_machine_mapping_valid(unsigned long pfn) | 45 | static inline int phys_to_machine_mapping_valid(unsigned long pfn) |
@@ -42,7 +47,7 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn) | |||
42 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 47 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
43 | return 1; | 48 | return 1; |
44 | 49 | ||
45 | return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); | 50 | return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY; |
46 | } | 51 | } |
47 | 52 | ||
48 | static inline unsigned long mfn_to_pfn(unsigned long mfn) | 53 | static inline unsigned long mfn_to_pfn(unsigned long mfn) |
@@ -106,20 +111,12 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) | |||
106 | unsigned long pfn = mfn_to_pfn(mfn); | 111 | unsigned long pfn = mfn_to_pfn(mfn); |
107 | if ((pfn < max_mapnr) | 112 | if ((pfn < max_mapnr) |
108 | && !xen_feature(XENFEAT_auto_translated_physmap) | 113 | && !xen_feature(XENFEAT_auto_translated_physmap) |
109 | && (phys_to_machine_mapping[pfn] != mfn)) | 114 | && (get_phys_to_machine(pfn) != mfn)) |
110 | return max_mapnr; /* force !pfn_valid() */ | 115 | return max_mapnr; /* force !pfn_valid() */ |
116 | /* XXX fixme; not true with sparsemem */ | ||
111 | return pfn; | 117 | return pfn; |
112 | } | 118 | } |
113 | 119 | ||
114 | static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) | ||
115 | { | ||
116 | if (xen_feature(XENFEAT_auto_translated_physmap)) { | ||
117 | BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); | ||
118 | return; | ||
119 | } | ||
120 | phys_to_machine_mapping[pfn] = mfn; | ||
121 | } | ||
122 | |||
123 | /* VIRT <-> MACHINE conversion */ | 120 | /* VIRT <-> MACHINE conversion */ |
124 | #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) | 121 | #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) |
125 | #define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v)))) | 122 | #define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v)))) |
@@ -150,13 +147,9 @@ static inline pte_t __pte_ma(pteval_t x) | |||
150 | return (pte_t) { .pte = x }; | 147 | return (pte_t) { .pte = x }; |
151 | } | 148 | } |
152 | 149 | ||
153 | #ifdef CONFIG_X86_PAE | ||
154 | #define pmd_val_ma(v) ((v).pmd) | 150 | #define pmd_val_ma(v) ((v).pmd) |
155 | #define pud_val_ma(v) ((v).pgd.pgd) | 151 | #define pud_val_ma(v) ((v).pgd.pgd) |
156 | #define __pmd_ma(x) ((pmd_t) { (x) } ) | 152 | #define __pmd_ma(x) ((pmd_t) { (x) } ) |
157 | #else /* !X86_PAE */ | ||
158 | #define pmd_val_ma(v) ((v).pud.pgd.pgd) | ||
159 | #endif /* CONFIG_X86_PAE */ | ||
160 | 153 | ||
161 | #define pgd_val_ma(x) ((x).pgd) | 154 | #define pgd_val_ma(x) ((x).pgd) |
162 | 155 | ||
diff --git a/include/asm-x86/xor_32.h b/include/asm-x86/xor_32.h index 067b5c1835a3..921b45840449 100644 --- a/include/asm-x86/xor_32.h +++ b/include/asm-x86/xor_32.h | |||
@@ -1,3 +1,6 @@ | |||
1 | #ifndef ASM_X86__XOR_32_H | ||
2 | #define ASM_X86__XOR_32_H | ||
3 | |||
1 | /* | 4 | /* |
2 | * Optimized RAID-5 checksumming functions for MMX and SSE. | 5 | * Optimized RAID-5 checksumming functions for MMX and SSE. |
3 | * | 6 | * |
@@ -881,3 +884,5 @@ do { \ | |||
881 | deals with a load to a line that is being prefetched. */ | 884 | deals with a load to a line that is being prefetched. */ |
882 | #define XOR_SELECT_TEMPLATE(FASTEST) \ | 885 | #define XOR_SELECT_TEMPLATE(FASTEST) \ |
883 | (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) | 886 | (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) |
887 | |||
888 | #endif /* ASM_X86__XOR_32_H */ | ||
diff --git a/include/asm-x86/xor_64.h b/include/asm-x86/xor_64.h index 24957e39ac8a..2d3a18de295b 100644 --- a/include/asm-x86/xor_64.h +++ b/include/asm-x86/xor_64.h | |||
@@ -1,3 +1,6 @@ | |||
1 | #ifndef ASM_X86__XOR_64_H | ||
2 | #define ASM_X86__XOR_64_H | ||
3 | |||
1 | /* | 4 | /* |
2 | * Optimized RAID-5 checksumming functions for MMX and SSE. | 5 | * Optimized RAID-5 checksumming functions for MMX and SSE. |
3 | * | 6 | * |
@@ -354,3 +357,5 @@ do { \ | |||
354 | We may also be able to load into the L1 only depending on how the cpu | 357 | We may also be able to load into the L1 only depending on how the cpu |
355 | deals with a load to a line that is being prefetched. */ | 358 | deals with a load to a line that is being prefetched. */ |
356 | #define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse) | 359 | #define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse) |
360 | |||
361 | #endif /* ASM_X86__XOR_64_H */ | ||
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 93b98856007a..71d70d1fbce2 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
@@ -166,6 +166,9 @@ unifdef-y += acct.h | |||
166 | unifdef-y += adb.h | 166 | unifdef-y += adb.h |
167 | unifdef-y += adfs_fs.h | 167 | unifdef-y += adfs_fs.h |
168 | unifdef-y += agpgart.h | 168 | unifdef-y += agpgart.h |
169 | ifneq ($(wildcard $(srctree)/include/asm-$(SRCARCH)/a.out.h),) | ||
170 | unifdef-y += a.out.h | ||
171 | endif | ||
169 | unifdef-y += apm_bios.h | 172 | unifdef-y += apm_bios.h |
170 | unifdef-y += atalk.h | 173 | unifdef-y += atalk.h |
171 | unifdef-y += atmdev.h | 174 | unifdef-y += atmdev.h |
diff --git a/include/linux/a.out.h b/include/linux/a.out.h index 208f4e8ed304..e86dfca44589 100644 --- a/include/linux/a.out.h +++ b/include/linux/a.out.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef __A_OUT_GNU_H__ | 1 | #ifndef __A_OUT_GNU_H__ |
2 | #define __A_OUT_GNU_H__ | 2 | #define __A_OUT_GNU_H__ |
3 | 3 | ||
4 | #ifdef CONFIG_ARCH_SUPPORTS_AOUT | ||
5 | |||
6 | #define __GNU_EXEC_MACROS__ | 4 | #define __GNU_EXEC_MACROS__ |
7 | 5 | ||
8 | #ifndef __STRUCT_EXEC_OVERRIDE__ | 6 | #ifndef __STRUCT_EXEC_OVERRIDE__ |
@@ -277,10 +275,4 @@ struct relocation_info | |||
277 | #endif /* no N_RELOCATION_INFO_DECLARED. */ | 275 | #endif /* no N_RELOCATION_INFO_DECLARED. */ |
278 | 276 | ||
279 | #endif /*__ASSEMBLY__ */ | 277 | #endif /*__ASSEMBLY__ */ |
280 | #else /* CONFIG_ARCH_SUPPORTS_AOUT */ | ||
281 | #ifndef __ASSEMBLY__ | ||
282 | struct exec { | ||
283 | }; | ||
284 | #endif | ||
285 | #endif /* CONFIG_ARCH_SUPPORTS_AOUT */ | ||
286 | #endif /* __A_OUT_GNU_H__ */ | 278 | #endif /* __A_OUT_GNU_H__ */ |
diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h index 661d90d6cf7c..972b12bcfb36 100644 --- a/include/linux/agp_backend.h +++ b/include/linux/agp_backend.h | |||
@@ -30,14 +30,6 @@ | |||
30 | #ifndef _AGP_BACKEND_H | 30 | #ifndef _AGP_BACKEND_H |
31 | #define _AGP_BACKEND_H 1 | 31 | #define _AGP_BACKEND_H 1 |
32 | 32 | ||
33 | #ifndef TRUE | ||
34 | #define TRUE 1 | ||
35 | #endif | ||
36 | |||
37 | #ifndef FALSE | ||
38 | #define FALSE 0 | ||
39 | #endif | ||
40 | |||
41 | enum chipset_type { | 33 | enum chipset_type { |
42 | NOT_SUPPORTED, | 34 | NOT_SUPPORTED, |
43 | SUPPORTED, | 35 | SUPPORTED, |
@@ -57,7 +49,7 @@ struct agp_kern_info { | |||
57 | size_t aper_size; | 49 | size_t aper_size; |
58 | int max_memory; /* In pages */ | 50 | int max_memory; /* In pages */ |
59 | int current_memory; | 51 | int current_memory; |
60 | int cant_use_aperture; | 52 | bool cant_use_aperture; |
61 | unsigned long page_mask; | 53 | unsigned long page_mask; |
62 | struct vm_operations_struct *vm_ops; | 54 | struct vm_operations_struct *vm_ops; |
63 | }; | 55 | }; |
@@ -83,9 +75,9 @@ struct agp_memory { | |||
83 | off_t pg_start; | 75 | off_t pg_start; |
84 | u32 type; | 76 | u32 type; |
85 | u32 physical; | 77 | u32 physical; |
86 | u8 is_bound; | 78 | bool is_bound; |
87 | u8 is_flushed; | 79 | bool is_flushed; |
88 | u8 vmalloc_flag; | 80 | bool vmalloc_flag; |
89 | }; | 81 | }; |
90 | 82 | ||
91 | #define AGP_NORMAL_MEMORY 0 | 83 | #define AGP_NORMAL_MEMORY 0 |
diff --git a/include/linux/agpgart.h b/include/linux/agpgart.h index 62aef589eb94..c8fdb6e658e1 100644 --- a/include/linux/agpgart.h +++ b/include/linux/agpgart.h | |||
@@ -206,8 +206,8 @@ struct agp_front_data { | |||
206 | struct agp_controller *current_controller; | 206 | struct agp_controller *current_controller; |
207 | struct agp_controller *controllers; | 207 | struct agp_controller *controllers; |
208 | struct agp_file_private *file_priv_list; | 208 | struct agp_file_private *file_priv_list; |
209 | u8 used_by_controller; | 209 | bool used_by_controller; |
210 | u8 backend_acquired; | 210 | bool backend_acquired; |
211 | }; | 211 | }; |
212 | 212 | ||
213 | #endif /* __KERNEL__ */ | 213 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/audit.h b/include/linux/audit.h index 63c3bb98558f..8b82974bdc12 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h | |||
@@ -571,7 +571,7 @@ extern void audit_log_lost(const char *message); | |||
571 | extern int audit_update_lsm_rules(void); | 571 | extern int audit_update_lsm_rules(void); |
572 | 572 | ||
573 | /* Private API (for audit.c only) */ | 573 | /* Private API (for audit.c only) */ |
574 | extern int audit_filter_user(struct netlink_skb_parms *cb, int type); | 574 | extern int audit_filter_user(struct netlink_skb_parms *cb); |
575 | extern int audit_filter_type(int type); | 575 | extern int audit_filter_type(int type); |
576 | extern int audit_receive_filter(int type, int pid, int uid, int seq, | 576 | extern int audit_receive_filter(int type, int pid, int uid, int seq, |
577 | void *data, size_t datasz, uid_t loginuid, | 577 | void *data, size_t datasz, uid_t loginuid, |
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 6a5dbdc8a7dc..686895bacd9d 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h | |||
@@ -94,7 +94,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat, | |||
94 | unsigned long freepfn, | 94 | unsigned long freepfn, |
95 | unsigned long startpfn, | 95 | unsigned long startpfn, |
96 | unsigned long endpfn); | 96 | unsigned long endpfn); |
97 | extern void reserve_bootmem_node(pg_data_t *pgdat, | 97 | extern int reserve_bootmem_node(pg_data_t *pgdat, |
98 | unsigned long physaddr, | 98 | unsigned long physaddr, |
99 | unsigned long size, | 99 | unsigned long size, |
100 | int flags); | 100 | int flags); |
diff --git a/include/linux/capability.h b/include/linux/capability.h index fa830f8de032..02673846d205 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h | |||
@@ -501,6 +501,8 @@ extern const kernel_cap_t __cap_empty_set; | |||
501 | extern const kernel_cap_t __cap_full_set; | 501 | extern const kernel_cap_t __cap_full_set; |
502 | extern const kernel_cap_t __cap_init_eff_set; | 502 | extern const kernel_cap_t __cap_init_eff_set; |
503 | 503 | ||
504 | kernel_cap_t cap_set_effective(const kernel_cap_t pE_new); | ||
505 | |||
504 | int capable(int cap); | 506 | int capable(int cap); |
505 | int __capable(struct task_struct *t, int cap); | 507 | int __capable(struct task_struct *t, int cap); |
506 | 508 | ||
diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h index 1605dd8aa646..6f9f19d66591 100644 --- a/include/linux/cfag12864b.h +++ b/include/linux/cfag12864b.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * Description: cfag12864b LCD driver header | 4 | * Description: cfag12864b LCD driver header |
5 | * License: GPLv2 | 5 | * License: GPLv2 |
6 | * | 6 | * |
7 | * Author: Copyright (C) Miguel Ojeda Sandonis <maxextreme@gmail.com> | 7 | * Author: Copyright (C) Miguel Ojeda Sandonis |
8 | * Date: 2006-10-12 | 8 | * Date: 2006-10-12 |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
diff --git a/include/linux/console.h b/include/linux/console.h index a4f27fbdf549..248e6e3b9b73 100644 --- a/include/linux/console.h +++ b/include/linux/console.h | |||
@@ -108,6 +108,8 @@ struct console { | |||
108 | struct console *next; | 108 | struct console *next; |
109 | }; | 109 | }; |
110 | 110 | ||
111 | extern int console_set_on_cmdline; | ||
112 | |||
111 | extern int add_preferred_console(char *name, int idx, char *options); | 113 | extern int add_preferred_console(char *name, int idx, char *options); |
112 | extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options); | 114 | extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options); |
113 | extern void register_console(struct console *); | 115 | extern void register_console(struct console *); |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 5df3db58fcc6..c24875bd9c5b 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -353,6 +353,10 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, | |||
353 | for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) | 353 | for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) |
354 | #endif /* NR_CPUS */ | 354 | #endif /* NR_CPUS */ |
355 | 355 | ||
356 | #define next_cpu_nr(n, src) next_cpu(n, src) | ||
357 | #define cpus_weight_nr(cpumask) cpus_weight(cpumask) | ||
358 | #define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) | ||
359 | |||
356 | /* | 360 | /* |
357 | * The following particular system cpumasks and operations manage | 361 | * The following particular system cpumasks and operations manage |
358 | * possible, present and online cpus. Each of them is a fixed size | 362 | * possible, present and online cpus. Each of them is a fixed size |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 2a6639407c80..d982eb89c77d 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
@@ -300,7 +300,7 @@ extern int d_validate(struct dentry *, struct dentry *); | |||
300 | extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); | 300 | extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); |
301 | 301 | ||
302 | extern char *__d_path(const struct path *path, struct path *root, char *, int); | 302 | extern char *__d_path(const struct path *path, struct path *root, char *, int); |
303 | extern char *d_path(struct path *, char *, int); | 303 | extern char *d_path(const struct path *, char *, int); |
304 | extern char *dentry_path(struct dentry *, char *, int); | 304 | extern char *dentry_path(struct dentry *, char *, int); |
305 | 305 | ||
306 | /* Allocation counts.. */ | 306 | /* Allocation counts.. */ |
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index f4a5871767f5..4aaa4afb1cb9 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef __LINUX_DEBUG_LOCKING_H | 1 | #ifndef __LINUX_DEBUG_LOCKING_H |
2 | #define __LINUX_DEBUG_LOCKING_H | 2 | #define __LINUX_DEBUG_LOCKING_H |
3 | 3 | ||
4 | #include <linux/kernel.h> | ||
5 | |||
4 | struct task_struct; | 6 | struct task_struct; |
5 | 7 | ||
6 | extern int debug_locks; | 8 | extern int debug_locks; |
@@ -11,14 +13,6 @@ extern int debug_locks_silent; | |||
11 | */ | 13 | */ |
12 | extern int debug_locks_off(void); | 14 | extern int debug_locks_off(void); |
13 | 15 | ||
14 | /* | ||
15 | * In the debug case we carry the caller's instruction pointer into | ||
16 | * other functions, but we dont want the function argument overhead | ||
17 | * in the nondebug case - hence these macros: | ||
18 | */ | ||
19 | #define _RET_IP_ (unsigned long)__builtin_return_address(0) | ||
20 | #define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) | ||
21 | |||
22 | #define DEBUG_LOCKS_WARN_ON(c) \ | 16 | #define DEBUG_LOCKS_WARN_ON(c) \ |
23 | ({ \ | 17 | ({ \ |
24 | int __ret = 0; \ | 18 | int __ret = 0; \ |
diff --git a/include/linux/firmware.h b/include/linux/firmware.h index 4d10c7328d2d..6c7eff2ebada 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h | |||
@@ -13,7 +13,7 @@ struct firmware { | |||
13 | 13 | ||
14 | struct device; | 14 | struct device; |
15 | 15 | ||
16 | #if defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE) | 16 | #if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE)) |
17 | int request_firmware(const struct firmware **fw, const char *name, | 17 | int request_firmware(const struct firmware **fw, const char *name, |
18 | struct device *device); | 18 | struct device *device); |
19 | int request_firmware_nowait( | 19 | int request_firmware_nowait( |
diff --git a/include/linux/fs.h b/include/linux/fs.h index d490779f18d9..d8e2762ed14d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -83,6 +83,7 @@ extern int dir_notify_enable; | |||
83 | #define READ_SYNC (READ | (1 << BIO_RW_SYNC)) | 83 | #define READ_SYNC (READ | (1 << BIO_RW_SYNC)) |
84 | #define READ_META (READ | (1 << BIO_RW_META)) | 84 | #define READ_META (READ | (1 << BIO_RW_META)) |
85 | #define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC)) | 85 | #define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC)) |
86 | #define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNC)) | ||
86 | #define WRITE_BARRIER ((1 << BIO_RW) | (1 << BIO_RW_BARRIER)) | 87 | #define WRITE_BARRIER ((1 << BIO_RW) | (1 << BIO_RW_BARRIER)) |
87 | 88 | ||
88 | #define SEL_IN 1 | 89 | #define SEL_IN 1 |
@@ -894,8 +895,6 @@ static inline int file_check_writeable(struct file *filp) | |||
894 | typedef struct files_struct *fl_owner_t; | 895 | typedef struct files_struct *fl_owner_t; |
895 | 896 | ||
896 | struct file_lock_operations { | 897 | struct file_lock_operations { |
897 | void (*fl_insert)(struct file_lock *); /* lock insertion callback */ | ||
898 | void (*fl_remove)(struct file_lock *); /* lock removal callback */ | ||
899 | void (*fl_copy_lock)(struct file_lock *, struct file_lock *); | 898 | void (*fl_copy_lock)(struct file_lock *, struct file_lock *); |
900 | void (*fl_release_private)(struct file_lock *); | 899 | void (*fl_release_private)(struct file_lock *); |
901 | }; | 900 | }; |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index fb9af6a0fe9c..8dc730132192 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
@@ -171,7 +171,7 @@ struct i2c_client { | |||
171 | struct i2c_adapter *adapter; /* the adapter we sit on */ | 171 | struct i2c_adapter *adapter; /* the adapter we sit on */ |
172 | struct i2c_driver *driver; /* and our access routines */ | 172 | struct i2c_driver *driver; /* and our access routines */ |
173 | struct device dev; /* the device structure */ | 173 | struct device dev; /* the device structure */ |
174 | int irq; /* irq issued by device (or -1) */ | 174 | int irq; /* irq issued by device */ |
175 | struct list_head list; /* DEPRECATED */ | 175 | struct list_head list; /* DEPRECATED */ |
176 | struct completion released; | 176 | struct completion released; |
177 | }; | 177 | }; |
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h index f1fbe9c930d7..d4efe4014705 100644 --- a/include/linux/if_tunnel.h +++ b/include/linux/if_tunnel.h | |||
@@ -41,7 +41,7 @@ struct ip_tunnel_prl { | |||
41 | __u16 __reserved; | 41 | __u16 __reserved; |
42 | __u32 datalen; | 42 | __u32 datalen; |
43 | __u32 __reserved2; | 43 | __u32 __reserved2; |
44 | void __user *data; | 44 | /* data follows */ |
45 | }; | 45 | }; |
46 | 46 | ||
47 | /* PRL flags */ | 47 | /* PRL flags */ |
diff --git a/include/linux/inet_lro.h b/include/linux/inet_lro.h index 80335b7d77c5..c4335faebb63 100644 --- a/include/linux/inet_lro.h +++ b/include/linux/inet_lro.h | |||
@@ -84,7 +84,11 @@ struct net_lro_mgr { | |||
84 | from received packets and eth protocol | 84 | from received packets and eth protocol |
85 | is still ETH_P_8021Q */ | 85 | is still ETH_P_8021Q */ |
86 | 86 | ||
87 | u32 ip_summed; /* Set in non generated SKBs in page mode */ | 87 | /* |
88 | * Set for generated SKBs that are not added to | ||
89 | * the frag list in fragmented mode | ||
90 | */ | ||
91 | u32 ip_summed; | ||
88 | u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY | 92 | u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY |
89 | * or CHECKSUM_NONE */ | 93 | * or CHECKSUM_NONE */ |
90 | 94 | ||
diff --git a/include/linux/input.h b/include/linux/input.h index e075c4b762fb..d150c57e5f0a 100644 --- a/include/linux/input.h +++ b/include/linux/input.h | |||
@@ -534,8 +534,8 @@ struct input_absinfo { | |||
534 | 534 | ||
535 | #define KEY_FRAMEBACK 0x1b4 /* Consumer - transport controls */ | 535 | #define KEY_FRAMEBACK 0x1b4 /* Consumer - transport controls */ |
536 | #define KEY_FRAMEFORWARD 0x1b5 | 536 | #define KEY_FRAMEFORWARD 0x1b5 |
537 | |||
538 | #define KEY_CONTEXT_MENU 0x1b6 /* GenDesc - system context menu */ | 537 | #define KEY_CONTEXT_MENU 0x1b6 /* GenDesc - system context menu */ |
538 | #define KEY_MEDIA_REPEAT 0x1b7 /* Consumer - transport control */ | ||
539 | 539 | ||
540 | #define KEY_DEL_EOL 0x1c0 | 540 | #define KEY_DEL_EOL 0x1c0 |
541 | #define KEY_DEL_EOS 0x1c1 | 541 | #define KEY_DEL_EOS 0x1c1 |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 792bf0aa779b..2e70006c7fa8 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -46,6 +46,9 @@ extern const char linux_proc_banner[]; | |||
46 | #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) | 46 | #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) |
47 | #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) | 47 | #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) |
48 | 48 | ||
49 | #define _RET_IP_ (unsigned long)__builtin_return_address(0) | ||
50 | #define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) | ||
51 | |||
49 | #ifdef CONFIG_LBD | 52 | #ifdef CONFIG_LBD |
50 | # include <asm/div64.h> | 53 | # include <asm/div64.h> |
51 | # define sector_div(a, b) do_div(a, b) | 54 | # define sector_div(a, b) do_div(a, b) |
diff --git a/include/linux/ks0108.h b/include/linux/ks0108.h index a2c54acceb4e..cb311798e0bc 100644 --- a/include/linux/ks0108.h +++ b/include/linux/ks0108.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * Description: ks0108 LCD Controller driver header | 4 | * Description: ks0108 LCD Controller driver header |
5 | * License: GPLv2 | 5 | * License: GPLv2 |
6 | * | 6 | * |
7 | * Author: Copyright (C) Miguel Ojeda Sandonis <maxextreme@gmail.com> | 7 | * Author: Copyright (C) Miguel Ojeda Sandonis |
8 | * Date: 2006-10-31 | 8 | * Date: 2006-10-31 |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 092b1b25291d..de9d1df4bba2 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -33,6 +33,7 @@ | |||
33 | #define KVM_REQ_REPORT_TPR_ACCESS 2 | 33 | #define KVM_REQ_REPORT_TPR_ACCESS 2 |
34 | #define KVM_REQ_MMU_RELOAD 3 | 34 | #define KVM_REQ_MMU_RELOAD 3 |
35 | #define KVM_REQ_TRIPLE_FAULT 4 | 35 | #define KVM_REQ_TRIPLE_FAULT 4 |
36 | #define KVM_REQ_PENDING_TIMER 5 | ||
36 | 37 | ||
37 | struct kvm_vcpu; | 38 | struct kvm_vcpu; |
38 | extern struct kmem_cache *kvm_vcpu_cache; | 39 | extern struct kmem_cache *kvm_vcpu_cache; |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f27fd2009334..25f87102ab66 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -88,6 +88,8 @@ struct wireless_dev; | |||
88 | #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/ | 88 | #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/ |
89 | #define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */ | 89 | #define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */ |
90 | 90 | ||
91 | #ifdef __KERNEL__ | ||
92 | |||
91 | /* | 93 | /* |
92 | * Compute the worst case header length according to the protocols | 94 | * Compute the worst case header length according to the protocols |
93 | * used. | 95 | * used. |
@@ -114,6 +116,8 @@ struct wireless_dev; | |||
114 | #define MAX_HEADER (LL_MAX_HEADER + 48) | 116 | #define MAX_HEADER (LL_MAX_HEADER + 48) |
115 | #endif | 117 | #endif |
116 | 118 | ||
119 | #endif /* __KERNEL__ */ | ||
120 | |||
117 | struct net_device_subqueue | 121 | struct net_device_subqueue |
118 | { | 122 | { |
119 | /* Give a control state for each queue. This struct may contain | 123 | /* Give a control state for each queue. This struct may contain |
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index f31debfac926..0d2a4e7012aa 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -157,6 +157,7 @@ PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active) | |||
157 | __PAGEFLAG(Slab, slab) | 157 | __PAGEFLAG(Slab, slab) |
158 | PAGEFLAG(Checked, owner_priv_1) /* Used by some filesystems */ | 158 | PAGEFLAG(Checked, owner_priv_1) /* Used by some filesystems */ |
159 | PAGEFLAG(Pinned, owner_priv_1) TESTSCFLAG(Pinned, owner_priv_1) /* Xen */ | 159 | PAGEFLAG(Pinned, owner_priv_1) TESTSCFLAG(Pinned, owner_priv_1) /* Xen */ |
160 | PAGEFLAG(SavePinned, dirty); /* Xen */ | ||
160 | PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) | 161 | PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) |
161 | PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private) | 162 | PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private) |
162 | __SETPAGEFLAG(Private, private) | 163 | __SETPAGEFLAG(Private, private) |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index eafc9d6d2b35..65953822c9cb 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -1520,6 +1520,7 @@ | |||
1520 | #define PCI_DEVICE_ID_MARVELL_GT64260 0x6430 | 1520 | #define PCI_DEVICE_ID_MARVELL_GT64260 0x6430 |
1521 | #define PCI_DEVICE_ID_MARVELL_MV64360 0x6460 | 1521 | #define PCI_DEVICE_ID_MARVELL_MV64360 0x6460 |
1522 | #define PCI_DEVICE_ID_MARVELL_MV64460 0x6480 | 1522 | #define PCI_DEVICE_ID_MARVELL_MV64460 0x6480 |
1523 | #define PCI_DEVICE_ID_MARVELL_CAFE_SD 0x4101 | ||
1523 | 1524 | ||
1524 | #define PCI_VENDOR_ID_V3 0x11b0 | 1525 | #define PCI_VENDOR_ID_V3 0x11b0 |
1525 | #define PCI_DEVICE_ID_V3_V960 0x0001 | 1526 | #define PCI_DEVICE_ID_V3_V960 0x0001 |
diff --git a/include/linux/rculist.h b/include/linux/rculist.h new file mode 100644 index 000000000000..bde4586f4382 --- /dev/null +++ b/include/linux/rculist.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _LINUX_RCULIST_H | ||
2 | #define _LINUX_RCULIST_H | ||
3 | |||
4 | #include <linux/list.h> | ||
5 | |||
6 | #endif /* _LINUX_RCULIST_H */ | ||
diff --git a/include/linux/resume-trace.h b/include/linux/resume-trace.h index f3f4f28c6960..c9ba2fdf807d 100644 --- a/include/linux/resume-trace.h +++ b/include/linux/resume-trace.h | |||
@@ -8,7 +8,7 @@ extern int pm_trace_enabled; | |||
8 | 8 | ||
9 | struct device; | 9 | struct device; |
10 | extern void set_trace_device(struct device *); | 10 | extern void set_trace_device(struct device *); |
11 | extern void generate_resume_trace(void *tracedata, unsigned int user); | 11 | extern void generate_resume_trace(const void *tracedata, unsigned int user); |
12 | 12 | ||
13 | #define TRACE_DEVICE(dev) do { \ | 13 | #define TRACE_DEVICE(dev) do { \ |
14 | if (pm_trace_enabled) \ | 14 | if (pm_trace_enabled) \ |
diff --git a/include/linux/securebits.h b/include/linux/securebits.h index c1f19dbceb05..92f09bdf1175 100644 --- a/include/linux/securebits.h +++ b/include/linux/securebits.h | |||
@@ -7,14 +7,15 @@ | |||
7 | inheritance of root-permissions and suid-root executable under | 7 | inheritance of root-permissions and suid-root executable under |
8 | compatibility mode. We raise the effective and inheritable bitmasks | 8 | compatibility mode. We raise the effective and inheritable bitmasks |
9 | *of the executable file* if the effective uid of the new process is | 9 | *of the executable file* if the effective uid of the new process is |
10 | 0. If the real uid is 0, we raise the inheritable bitmask of the | 10 | 0. If the real uid is 0, we raise the effective (legacy) bit of the |
11 | executable file. */ | 11 | executable file. */ |
12 | #define SECURE_NOROOT 0 | 12 | #define SECURE_NOROOT 0 |
13 | #define SECURE_NOROOT_LOCKED 1 /* make bit-0 immutable */ | 13 | #define SECURE_NOROOT_LOCKED 1 /* make bit-0 immutable */ |
14 | 14 | ||
15 | /* When set, setuid to/from uid 0 does not trigger capability-"fixes" | 15 | /* When set, setuid to/from uid 0 does not trigger capability-"fixup". |
16 | to be compatible with old programs relying on set*uid to loose | 16 | When unset, to provide compatiblility with old programs relying on |
17 | privileges. When unset, setuid doesn't change privileges. */ | 17 | set*uid to gain/lose privilege, transitions to/from uid 0 cause |
18 | capabilities to be gained/lost. */ | ||
18 | #define SECURE_NO_SETUID_FIXUP 2 | 19 | #define SECURE_NO_SETUID_FIXUP 2 |
19 | #define SECURE_NO_SETUID_FIXUP_LOCKED 3 /* make bit-2 immutable */ | 20 | #define SECURE_NO_SETUID_FIXUP_LOCKED 3 /* make bit-2 immutable */ |
20 | 21 | ||
@@ -26,10 +27,10 @@ | |||
26 | #define SECURE_KEEP_CAPS 4 | 27 | #define SECURE_KEEP_CAPS 4 |
27 | #define SECURE_KEEP_CAPS_LOCKED 5 /* make bit-4 immutable */ | 28 | #define SECURE_KEEP_CAPS_LOCKED 5 /* make bit-4 immutable */ |
28 | 29 | ||
29 | /* Each securesetting is implemented using two bits. One bit specify | 30 | /* Each securesetting is implemented using two bits. One bit specifies |
30 | whether the setting is on or off. The other bit specify whether the | 31 | whether the setting is on or off. The other bit specify whether the |
31 | setting is fixed or not. A setting which is fixed cannot be changed | 32 | setting is locked or not. A setting which is locked cannot be |
32 | from user-level. */ | 33 | changed from user-level. */ |
33 | #define issecure_mask(X) (1 << (X)) | 34 | #define issecure_mask(X) (1 << (X)) |
34 | #define issecure(X) (issecure_mask(X) & current->securebits) | 35 | #define issecure(X) (issecure_mask(X) & current->securebits) |
35 | 36 | ||
diff --git a/include/linux/slab.h b/include/linux/slab.h index c2ad35016599..9aa90a6f20e0 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). | 2 | * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). |
3 | * | 3 | * |
4 | * (C) SGI 2006, Christoph Lameter <clameter@sgi.com> | 4 | * (C) SGI 2006, Christoph Lameter |
5 | * Cleaned up and restructured to ease the addition of alternative | 5 | * Cleaned up and restructured to ease the addition of alternative |
6 | * implementations of SLAB allocators. | 6 | * implementations of SLAB allocators. |
7 | */ | 7 | */ |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 71e43a12ebbb..d117ea2825a9 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -4,7 +4,7 @@ | |||
4 | /* | 4 | /* |
5 | * SLUB : A Slab allocator without object queues. | 5 | * SLUB : A Slab allocator without object queues. |
6 | * | 6 | * |
7 | * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com> | 7 | * (C) 2007 SGI, Christoph Lameter |
8 | */ | 8 | */ |
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
@@ -137,10 +137,12 @@ static __always_inline int kmalloc_index(size_t size) | |||
137 | if (size <= KMALLOC_MIN_SIZE) | 137 | if (size <= KMALLOC_MIN_SIZE) |
138 | return KMALLOC_SHIFT_LOW; | 138 | return KMALLOC_SHIFT_LOW; |
139 | 139 | ||
140 | #if KMALLOC_MIN_SIZE <= 64 | ||
140 | if (size > 64 && size <= 96) | 141 | if (size > 64 && size <= 96) |
141 | return 1; | 142 | return 1; |
142 | if (size > 128 && size <= 192) | 143 | if (size > 128 && size <= 192) |
143 | return 2; | 144 | return 2; |
145 | #endif | ||
144 | if (size <= 8) return 3; | 146 | if (size <= 8) return 3; |
145 | if (size <= 16) return 4; | 147 | if (size <= 16) return 4; |
146 | if (size <= 32) return 5; | 148 | if (size <= 32) return 5; |
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 06d3e6eb9ca8..917707e6151d 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
@@ -66,8 +66,7 @@ struct thermal_cooling_device { | |||
66 | ((long)t-2732+5)/10 : ((long)t-2732-5)/10) | 66 | ((long)t-2732+5)/10 : ((long)t-2732-5)/10) |
67 | #define CELSIUS_TO_KELVIN(t) ((t)*10+2732) | 67 | #define CELSIUS_TO_KELVIN(t) ((t)*10+2732) |
68 | 68 | ||
69 | #if defined(CONFIG_HWMON) || \ | 69 | #if defined(CONFIG_THERMAL_HWMON) |
70 | (defined(CONFIG_HWMON_MODULE) && defined(CONFIG_THERMAL_MODULE)) | ||
71 | /* thermal zone devices with the same type share one hwmon device */ | 70 | /* thermal zone devices with the same type share one hwmon device */ |
72 | struct thermal_hwmon_device { | 71 | struct thermal_hwmon_device { |
73 | char type[THERMAL_NAME_LENGTH]; | 72 | char type[THERMAL_NAME_LENGTH]; |
@@ -94,8 +93,7 @@ struct thermal_zone_device { | |||
94 | struct idr idr; | 93 | struct idr idr; |
95 | struct mutex lock; /* protect cooling devices list */ | 94 | struct mutex lock; /* protect cooling devices list */ |
96 | struct list_head node; | 95 | struct list_head node; |
97 | #if defined(CONFIG_HWMON) || \ | 96 | #if defined(CONFIG_THERMAL_HWMON) |
98 | (defined(CONFIG_HWMON_MODULE) && defined(CONFIG_THERMAL_MODULE)) | ||
99 | struct list_head hwmon_node; | 97 | struct list_head hwmon_node; |
100 | struct thermal_hwmon_device *hwmon; | 98 | struct thermal_hwmon_device *hwmon; |
101 | struct thermal_hwmon_attr temp_input; /* hwmon sys attr */ | 99 | struct thermal_hwmon_attr temp_input; /* hwmon sys attr */ |
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 59f1c0bd8f9c..d2a003586761 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h | |||
@@ -27,8 +27,7 @@ | |||
27 | * This routine is called by the kernel to write a series of | 27 | * This routine is called by the kernel to write a series of |
28 | * characters to the tty device. The characters may come from | 28 | * characters to the tty device. The characters may come from |
29 | * user space or kernel space. This routine will return the | 29 | * user space or kernel space. This routine will return the |
30 | * number of characters actually accepted for writing. This | 30 | * number of characters actually accepted for writing. |
31 | * routine is mandatory. | ||
32 | * | 31 | * |
33 | * Optional: Required for writable devices. | 32 | * Optional: Required for writable devices. |
34 | * | 33 | * |
@@ -134,7 +133,7 @@ | |||
134 | * This routine notifies the tty driver that it should hangup the | 133 | * This routine notifies the tty driver that it should hangup the |
135 | * tty device. | 134 | * tty device. |
136 | * | 135 | * |
137 | * Required: | 136 | * Optional: |
138 | * | 137 | * |
139 | * void (*break_ctl)(struct tty_stuct *tty, int state); | 138 | * void (*break_ctl)(struct tty_stuct *tty, int state); |
140 | * | 139 | * |
diff --git a/include/media/cx25840.h b/include/media/cx25840.h index cd599ad29fb2..db431d513f2f 100644 --- a/include/media/cx25840.h +++ b/include/media/cx25840.h | |||
@@ -32,12 +32,16 @@ enum cx25840_video_input { | |||
32 | CX25840_COMPOSITE7, | 32 | CX25840_COMPOSITE7, |
33 | CX25840_COMPOSITE8, | 33 | CX25840_COMPOSITE8, |
34 | 34 | ||
35 | /* S-Video inputs consist of one luma input (In1-In4) ORed with one | 35 | /* S-Video inputs consist of one luma input (In1-In8) ORed with one |
36 | chroma input (In5-In8) */ | 36 | chroma input (In5-In8) */ |
37 | CX25840_SVIDEO_LUMA1 = 0x10, | 37 | CX25840_SVIDEO_LUMA1 = 0x10, |
38 | CX25840_SVIDEO_LUMA2 = 0x20, | 38 | CX25840_SVIDEO_LUMA2 = 0x20, |
39 | CX25840_SVIDEO_LUMA3 = 0x30, | 39 | CX25840_SVIDEO_LUMA3 = 0x30, |
40 | CX25840_SVIDEO_LUMA4 = 0x40, | 40 | CX25840_SVIDEO_LUMA4 = 0x40, |
41 | CX25840_SVIDEO_LUMA5 = 0x50, | ||
42 | CX25840_SVIDEO_LUMA6 = 0x60, | ||
43 | CX25840_SVIDEO_LUMA7 = 0x70, | ||
44 | CX25840_SVIDEO_LUMA8 = 0x80, | ||
41 | CX25840_SVIDEO_CHROMA4 = 0x400, | 45 | CX25840_SVIDEO_CHROMA4 = 0x400, |
42 | CX25840_SVIDEO_CHROMA5 = 0x500, | 46 | CX25840_SVIDEO_CHROMA5 = 0x500, |
43 | CX25840_SVIDEO_CHROMA6 = 0x600, | 47 | CX25840_SVIDEO_CHROMA6 = 0x600, |
diff --git a/include/media/ir-common.h b/include/media/ir-common.h index bfee8be5d63f..b8e8aa91905a 100644 --- a/include/media/ir-common.h +++ b/include/media/ir-common.h | |||
@@ -146,6 +146,7 @@ extern IR_KEYTAB_TYPE ir_codes_behold_columbus[IR_KEYTAB_SIZE]; | |||
146 | extern IR_KEYTAB_TYPE ir_codes_pinnacle_pctv_hd[IR_KEYTAB_SIZE]; | 146 | extern IR_KEYTAB_TYPE ir_codes_pinnacle_pctv_hd[IR_KEYTAB_SIZE]; |
147 | extern IR_KEYTAB_TYPE ir_codes_genius_tvgo_a11mce[IR_KEYTAB_SIZE]; | 147 | extern IR_KEYTAB_TYPE ir_codes_genius_tvgo_a11mce[IR_KEYTAB_SIZE]; |
148 | extern IR_KEYTAB_TYPE ir_codes_powercolor_real_angel[IR_KEYTAB_SIZE]; | 148 | extern IR_KEYTAB_TYPE ir_codes_powercolor_real_angel[IR_KEYTAB_SIZE]; |
149 | extern IR_KEYTAB_TYPE ir_codes_avermedia_a16d[IR_KEYTAB_SIZE]; | ||
149 | 150 | ||
150 | #endif | 151 | #endif |
151 | 152 | ||
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index 33f01ae08f76..859f7a6f6f67 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h | |||
@@ -40,9 +40,9 @@ | |||
40 | #define VFL_TYPE_VTX 3 | 40 | #define VFL_TYPE_VTX 3 |
41 | 41 | ||
42 | /* Video standard functions */ | 42 | /* Video standard functions */ |
43 | extern char *v4l2_norm_to_name(v4l2_std_id id); | 43 | extern const char *v4l2_norm_to_name(v4l2_std_id id); |
44 | extern int v4l2_video_std_construct(struct v4l2_standard *vs, | 44 | extern int v4l2_video_std_construct(struct v4l2_standard *vs, |
45 | int id, char *name); | 45 | int id, const char *name); |
46 | /* Prints the ioctl in a human-readable format */ | 46 | /* Prints the ioctl in a human-readable format */ |
47 | extern void v4l_printk_ioctl(unsigned int cmd); | 47 | extern void v4l_printk_ioctl(unsigned int cmd); |
48 | 48 | ||
diff --git a/include/net/ipv6.h b/include/net/ipv6.h index e0a612bc9c4e..f422f7218e1c 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h | |||
@@ -367,6 +367,12 @@ static inline int ipv6_addr_any(const struct in6_addr *a) | |||
367 | a->s6_addr32[2] | a->s6_addr32[3] ) == 0); | 367 | a->s6_addr32[2] | a->s6_addr32[3] ) == 0); |
368 | } | 368 | } |
369 | 369 | ||
370 | static inline int ipv6_addr_loopback(const struct in6_addr *a) | ||
371 | { | ||
372 | return ((a->s6_addr32[0] | a->s6_addr32[1] | | ||
373 | a->s6_addr32[2] | (a->s6_addr32[3] ^ htonl(1))) == 0); | ||
374 | } | ||
375 | |||
370 | static inline int ipv6_addr_v4mapped(const struct in6_addr *a) | 376 | static inline int ipv6_addr_v4mapped(const struct in6_addr *a) |
371 | { | 377 | { |
372 | return ((a->s6_addr32[0] | a->s6_addr32[1] | | 378 | return ((a->s6_addr32[0] | a->s6_addr32[1] | |
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index dae3f9ec1154..bcd1623245cb 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h | |||
@@ -595,6 +595,15 @@ enum ieee80211_key_alg { | |||
595 | ALG_CCMP, | 595 | ALG_CCMP, |
596 | }; | 596 | }; |
597 | 597 | ||
598 | /** | ||
599 | * enum ieee80211_key_len - key length | ||
600 | * @WEP40: WEP 5 byte long key | ||
601 | * @WEP104: WEP 13 byte long key | ||
602 | */ | ||
603 | enum ieee80211_key_len { | ||
604 | LEN_WEP40 = 5, | ||
605 | LEN_WEP104 = 13, | ||
606 | }; | ||
598 | 607 | ||
599 | /** | 608 | /** |
600 | * enum ieee80211_key_flags - key flags | 609 | * enum ieee80211_key_flags - key flags |
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index aa540e6be502..d9dd0f707296 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h | |||
@@ -95,6 +95,11 @@ extern struct list_head net_namespace_list; | |||
95 | #ifdef CONFIG_NET_NS | 95 | #ifdef CONFIG_NET_NS |
96 | extern void __put_net(struct net *net); | 96 | extern void __put_net(struct net *net); |
97 | 97 | ||
98 | static inline int net_alive(struct net *net) | ||
99 | { | ||
100 | return net && atomic_read(&net->count); | ||
101 | } | ||
102 | |||
98 | static inline struct net *get_net(struct net *net) | 103 | static inline struct net *get_net(struct net *net) |
99 | { | 104 | { |
100 | atomic_inc(&net->count); | 105 | atomic_inc(&net->count); |
@@ -125,6 +130,12 @@ int net_eq(const struct net *net1, const struct net *net2) | |||
125 | return net1 == net2; | 130 | return net1 == net2; |
126 | } | 131 | } |
127 | #else | 132 | #else |
133 | |||
134 | static inline int net_alive(struct net *net) | ||
135 | { | ||
136 | return 1; | ||
137 | } | ||
138 | |||
128 | static inline struct net *get_net(struct net *net) | 139 | static inline struct net *get_net(struct net *net) |
129 | { | 140 | { |
130 | return net; | 141 | return net; |
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h index f736e842977f..f80c0ed6d870 100644 --- a/include/net/netfilter/nf_conntrack_extend.h +++ b/include/net/netfilter/nf_conntrack_extend.h | |||
@@ -15,6 +15,7 @@ enum nf_ct_ext_id | |||
15 | 15 | ||
16 | /* Extensions: optional stuff which isn't permanently in struct. */ | 16 | /* Extensions: optional stuff which isn't permanently in struct. */ |
17 | struct nf_ct_ext { | 17 | struct nf_ct_ext { |
18 | struct rcu_head rcu; | ||
18 | u8 offset[NF_CT_EXT_NUM]; | 19 | u8 offset[NF_CT_EXT_NUM]; |
19 | u8 len; | 20 | u8 len; |
20 | char data[0]; | 21 | char data[0]; |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index ab502ec1c61c..a87fc0312edc 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -178,7 +178,7 @@ extern struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops); | |||
178 | extern struct Qdisc *qdisc_create_dflt(struct net_device *dev, | 178 | extern struct Qdisc *qdisc_create_dflt(struct net_device *dev, |
179 | struct Qdisc_ops *ops, u32 parentid); | 179 | struct Qdisc_ops *ops, u32 parentid); |
180 | extern void tcf_destroy(struct tcf_proto *tp); | 180 | extern void tcf_destroy(struct tcf_proto *tp); |
181 | extern void tcf_destroy_chain(struct tcf_proto *fl); | 181 | extern void tcf_destroy_chain(struct tcf_proto **fl); |
182 | 182 | ||
183 | static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, | 183 | static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, |
184 | struct sk_buff_head *list) | 184 | struct sk_buff_head *list) |
diff --git a/include/xen/events.h b/include/xen/events.h index acd8e062c85f..67c4436554a9 100644 --- a/include/xen/events.h +++ b/include/xen/events.h | |||
@@ -32,6 +32,7 @@ void unbind_from_irqhandler(unsigned int irq, void *dev_id); | |||
32 | 32 | ||
33 | void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector); | 33 | void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector); |
34 | int resend_irq_on_evtchn(unsigned int irq); | 34 | int resend_irq_on_evtchn(unsigned int irq); |
35 | void rebind_evtchn_irq(int evtchn, int irq); | ||
35 | 36 | ||
36 | static inline void notify_remote_via_evtchn(int port) | 37 | static inline void notify_remote_via_evtchn(int port) |
37 | { | 38 | { |
@@ -40,4 +41,7 @@ static inline void notify_remote_via_evtchn(int port) | |||
40 | } | 41 | } |
41 | 42 | ||
42 | extern void notify_remote_via_irq(int irq); | 43 | extern void notify_remote_via_irq(int irq); |
44 | |||
45 | extern void xen_irq_resume(void); | ||
46 | |||
43 | #endif /* _XEN_EVENTS_H */ | 47 | #endif /* _XEN_EVENTS_H */ |
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index 466204846121..a40f1cd91be1 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h | |||
@@ -51,6 +51,9 @@ struct gnttab_free_callback { | |||
51 | u16 count; | 51 | u16 count; |
52 | }; | 52 | }; |
53 | 53 | ||
54 | int gnttab_suspend(void); | ||
55 | int gnttab_resume(void); | ||
56 | |||
54 | int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, | 57 | int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, |
55 | int readonly); | 58 | int readonly); |
56 | 59 | ||
diff --git a/include/xen/hvc-console.h b/include/xen/hvc-console.h index 21c0ecfd786d..98b79bc404dd 100644 --- a/include/xen/hvc-console.h +++ b/include/xen/hvc-console.h | |||
@@ -3,4 +3,13 @@ | |||
3 | 3 | ||
4 | extern struct console xenboot_console; | 4 | extern struct console xenboot_console; |
5 | 5 | ||
6 | #ifdef CONFIG_HVC_XEN | ||
7 | void xen_console_resume(void); | ||
8 | #else | ||
9 | static inline void xen_console_resume(void) { } | ||
10 | #endif | ||
11 | |||
12 | void xen_raw_console_write(const char *str); | ||
13 | void xen_raw_printk(const char *fmt, ...); | ||
14 | |||
6 | #endif /* XEN_HVC_CONSOLE_H */ | 15 | #endif /* XEN_HVC_CONSOLE_H */ |
diff --git a/include/xen/interface/elfnote.h b/include/xen/interface/elfnote.h index a64d3df5bd95..7a8262c375cc 100644 --- a/include/xen/interface/elfnote.h +++ b/include/xen/interface/elfnote.h | |||
@@ -120,6 +120,26 @@ | |||
120 | */ | 120 | */ |
121 | #define XEN_ELFNOTE_BSD_SYMTAB 11 | 121 | #define XEN_ELFNOTE_BSD_SYMTAB 11 |
122 | 122 | ||
123 | /* | ||
124 | * The lowest address the hypervisor hole can begin at (numeric). | ||
125 | * | ||
126 | * This must not be set higher than HYPERVISOR_VIRT_START. Its presence | ||
127 | * also indicates to the hypervisor that the kernel can deal with the | ||
128 | * hole starting at a higher address. | ||
129 | */ | ||
130 | #define XEN_ELFNOTE_HV_START_LOW 12 | ||
131 | |||
132 | /* | ||
133 | * List of maddr_t-sized mask/value pairs describing how to recognize | ||
134 | * (non-present) L1 page table entries carrying valid MFNs (numeric). | ||
135 | */ | ||
136 | #define XEN_ELFNOTE_L1_MFN_VALID 13 | ||
137 | |||
138 | /* | ||
139 | * Whether or not the guest supports cooperative suspend cancellation. | ||
140 | */ | ||
141 | #define XEN_ELFNOTE_SUSPEND_CANCEL 14 | ||
142 | |||
123 | #endif /* __XEN_PUBLIC_ELFNOTE_H__ */ | 143 | #endif /* __XEN_PUBLIC_ELFNOTE_H__ */ |
124 | 144 | ||
125 | /* | 145 | /* |
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h index d73228d16488..f51b6413b054 100644 --- a/include/xen/interface/features.h +++ b/include/xen/interface/features.h | |||
@@ -38,6 +38,9 @@ | |||
38 | */ | 38 | */ |
39 | #define XENFEAT_pae_pgdir_above_4gb 4 | 39 | #define XENFEAT_pae_pgdir_above_4gb 4 |
40 | 40 | ||
41 | /* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */ | ||
42 | #define XENFEAT_mmu_pt_update_preserve_ad 5 | ||
43 | |||
41 | #define XENFEAT_NR_SUBMAPS 1 | 44 | #define XENFEAT_NR_SUBMAPS 1 |
42 | 45 | ||
43 | #endif /* __XEN_PUBLIC_FEATURES_H__ */ | 46 | #endif /* __XEN_PUBLIC_FEATURES_H__ */ |
diff --git a/include/xen/interface/io/fbif.h b/include/xen/interface/io/fbif.h index 5a934dd7796d..974a51ed9165 100644 --- a/include/xen/interface/io/fbif.h +++ b/include/xen/interface/io/fbif.h | |||
@@ -49,11 +49,27 @@ struct xenfb_update { | |||
49 | int32_t height; /* rect height */ | 49 | int32_t height; /* rect height */ |
50 | }; | 50 | }; |
51 | 51 | ||
52 | /* | ||
53 | * Framebuffer resize notification event | ||
54 | * Capable backend sets feature-resize in xenstore. | ||
55 | */ | ||
56 | #define XENFB_TYPE_RESIZE 3 | ||
57 | |||
58 | struct xenfb_resize { | ||
59 | uint8_t type; /* XENFB_TYPE_RESIZE */ | ||
60 | int32_t width; /* width in pixels */ | ||
61 | int32_t height; /* height in pixels */ | ||
62 | int32_t stride; /* stride in bytes */ | ||
63 | int32_t depth; /* depth in bits */ | ||
64 | int32_t offset; /* start offset within framebuffer */ | ||
65 | }; | ||
66 | |||
52 | #define XENFB_OUT_EVENT_SIZE 40 | 67 | #define XENFB_OUT_EVENT_SIZE 40 |
53 | 68 | ||
54 | union xenfb_out_event { | 69 | union xenfb_out_event { |
55 | uint8_t type; | 70 | uint8_t type; |
56 | struct xenfb_update update; | 71 | struct xenfb_update update; |
72 | struct xenfb_resize resize; | ||
57 | char pad[XENFB_OUT_EVENT_SIZE]; | 73 | char pad[XENFB_OUT_EVENT_SIZE]; |
58 | }; | 74 | }; |
59 | 75 | ||
@@ -105,15 +121,18 @@ struct xenfb_page { | |||
105 | * Each directory page holds PAGE_SIZE / sizeof(*pd) | 121 | * Each directory page holds PAGE_SIZE / sizeof(*pd) |
106 | * framebuffer pages, and can thus map up to PAGE_SIZE * | 122 | * framebuffer pages, and can thus map up to PAGE_SIZE * |
107 | * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and | 123 | * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and |
108 | * sizeof(unsigned long) == 4, that's 4 Megs. Two directory | 124 | * sizeof(unsigned long) == 4/8, that's 4 Megs 32 bit and 2 |
109 | * pages should be enough for a while. | 125 | * Megs 64 bit. 256 directories give enough room for a 512 |
126 | * Meg framebuffer with a max resolution of 12,800x10,240. | ||
127 | * Should be enough for a while with room leftover for | ||
128 | * expansion. | ||
110 | */ | 129 | */ |
111 | unsigned long pd[2]; | 130 | unsigned long pd[256]; |
112 | }; | 131 | }; |
113 | 132 | ||
114 | /* | 133 | /* |
115 | * Wart: xenkbd needs to know resolution. Put it here until a better | 134 | * Wart: xenkbd needs to know default resolution. Put it here until a |
116 | * solution is found, but don't leak it to the backend. | 135 | * better solution is found, but don't leak it to the backend. |
117 | */ | 136 | */ |
118 | #ifdef __KERNEL__ | 137 | #ifdef __KERNEL__ |
119 | #define XENFB_WIDTH 800 | 138 | #define XENFB_WIDTH 800 |
diff --git a/include/xen/interface/io/kbdif.h b/include/xen/interface/io/kbdif.h index fb97f4284ffd..8066c7849fbe 100644 --- a/include/xen/interface/io/kbdif.h +++ b/include/xen/interface/io/kbdif.h | |||
@@ -49,6 +49,7 @@ struct xenkbd_motion { | |||
49 | uint8_t type; /* XENKBD_TYPE_MOTION */ | 49 | uint8_t type; /* XENKBD_TYPE_MOTION */ |
50 | int32_t rel_x; /* relative X motion */ | 50 | int32_t rel_x; /* relative X motion */ |
51 | int32_t rel_y; /* relative Y motion */ | 51 | int32_t rel_y; /* relative Y motion */ |
52 | int32_t rel_z; /* relative Z motion (wheel) */ | ||
52 | }; | 53 | }; |
53 | 54 | ||
54 | struct xenkbd_key { | 55 | struct xenkbd_key { |
@@ -61,6 +62,7 @@ struct xenkbd_position { | |||
61 | uint8_t type; /* XENKBD_TYPE_POS */ | 62 | uint8_t type; /* XENKBD_TYPE_POS */ |
62 | int32_t abs_x; /* absolute X position (in FB pixels) */ | 63 | int32_t abs_x; /* absolute X position (in FB pixels) */ |
63 | int32_t abs_y; /* absolute Y position (in FB pixels) */ | 64 | int32_t abs_y; /* absolute Y position (in FB pixels) */ |
65 | int32_t rel_z; /* relative Z motion (wheel) */ | ||
64 | }; | 66 | }; |
65 | 67 | ||
66 | #define XENKBD_IN_EVENT_SIZE 40 | 68 | #define XENKBD_IN_EVENT_SIZE 40 |
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h index da768469aa92..af36ead16817 100644 --- a/include/xen/interface/memory.h +++ b/include/xen/interface/memory.h | |||
@@ -29,7 +29,7 @@ struct xen_memory_reservation { | |||
29 | * OUT: GMFN bases of extents that were allocated | 29 | * OUT: GMFN bases of extents that were allocated |
30 | * (NB. This command also updates the mach_to_phys translation table) | 30 | * (NB. This command also updates the mach_to_phys translation table) |
31 | */ | 31 | */ |
32 | ulong extent_start; | 32 | GUEST_HANDLE(ulong) extent_start; |
33 | 33 | ||
34 | /* Number of extents, and size/alignment of each (2^extent_order pages). */ | 34 | /* Number of extents, and size/alignment of each (2^extent_order pages). */ |
35 | unsigned long nr_extents; | 35 | unsigned long nr_extents; |
@@ -50,6 +50,7 @@ struct xen_memory_reservation { | |||
50 | domid_t domid; | 50 | domid_t domid; |
51 | 51 | ||
52 | }; | 52 | }; |
53 | DEFINE_GUEST_HANDLE_STRUCT(xen_memory_reservation); | ||
53 | 54 | ||
54 | /* | 55 | /* |
55 | * Returns the maximum machine frame number of mapped RAM in this system. | 56 | * Returns the maximum machine frame number of mapped RAM in this system. |
@@ -85,7 +86,7 @@ struct xen_machphys_mfn_list { | |||
85 | * any large discontiguities in the machine address space, 2MB gaps in | 86 | * any large discontiguities in the machine address space, 2MB gaps in |
86 | * the machphys table will be represented by an MFN base of zero. | 87 | * the machphys table will be represented by an MFN base of zero. |
87 | */ | 88 | */ |
88 | ulong extent_start; | 89 | GUEST_HANDLE(ulong) extent_start; |
89 | 90 | ||
90 | /* | 91 | /* |
91 | * Number of extents written to the above array. This will be smaller | 92 | * Number of extents written to the above array. This will be smaller |
@@ -93,6 +94,7 @@ struct xen_machphys_mfn_list { | |||
93 | */ | 94 | */ |
94 | unsigned int nr_extents; | 95 | unsigned int nr_extents; |
95 | }; | 96 | }; |
97 | DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list); | ||
96 | 98 | ||
97 | /* | 99 | /* |
98 | * Sets the GPFN at which a particular page appears in the specified guest's | 100 | * Sets the GPFN at which a particular page appears in the specified guest's |
@@ -115,6 +117,7 @@ struct xen_add_to_physmap { | |||
115 | /* GPFN where the source mapping page should appear. */ | 117 | /* GPFN where the source mapping page should appear. */ |
116 | unsigned long gpfn; | 118 | unsigned long gpfn; |
117 | }; | 119 | }; |
120 | DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap); | ||
118 | 121 | ||
119 | /* | 122 | /* |
120 | * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error | 123 | * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error |
@@ -129,13 +132,14 @@ struct xen_translate_gpfn_list { | |||
129 | unsigned long nr_gpfns; | 132 | unsigned long nr_gpfns; |
130 | 133 | ||
131 | /* List of GPFNs to translate. */ | 134 | /* List of GPFNs to translate. */ |
132 | ulong gpfn_list; | 135 | GUEST_HANDLE(ulong) gpfn_list; |
133 | 136 | ||
134 | /* | 137 | /* |
135 | * Output list to contain MFN translations. May be the same as the input | 138 | * Output list to contain MFN translations. May be the same as the input |
136 | * list (in which case each input GPFN is overwritten with the output MFN). | 139 | * list (in which case each input GPFN is overwritten with the output MFN). |
137 | */ | 140 | */ |
138 | ulong mfn_list; | 141 | GUEST_HANDLE(ulong) mfn_list; |
139 | }; | 142 | }; |
143 | DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list); | ||
140 | 144 | ||
141 | #endif /* __XEN_PUBLIC_MEMORY_H__ */ | 145 | #endif /* __XEN_PUBLIC_MEMORY_H__ */ |
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h index 9b018da48cf3..2befa3e2f1bc 100644 --- a/include/xen/interface/xen.h +++ b/include/xen/interface/xen.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #define __XEN_PUBLIC_XEN_H__ | 10 | #define __XEN_PUBLIC_XEN_H__ |
11 | 11 | ||
12 | #include <asm/xen/interface.h> | 12 | #include <asm/xen/interface.h> |
13 | #include <asm/pvclock-abi.h> | ||
13 | 14 | ||
14 | /* | 15 | /* |
15 | * XEN "SYSTEM CALLS" (a.k.a. HYPERCALLS). | 16 | * XEN "SYSTEM CALLS" (a.k.a. HYPERCALLS). |
@@ -113,9 +114,14 @@ | |||
113 | * ptr[:2] -- Machine address within the frame whose mapping to modify. | 114 | * ptr[:2] -- Machine address within the frame whose mapping to modify. |
114 | * The frame must belong to the FD, if one is specified. | 115 | * The frame must belong to the FD, if one is specified. |
115 | * val -- Value to write into the mapping entry. | 116 | * val -- Value to write into the mapping entry. |
117 | * | ||
118 | * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD: | ||
119 | * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed | ||
120 | * with those in @val. | ||
116 | */ | 121 | */ |
117 | #define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ | 122 | #define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ |
118 | #define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ | 123 | #define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ |
124 | #define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */ | ||
119 | 125 | ||
120 | /* | 126 | /* |
121 | * MMU EXTENDED OPERATIONS | 127 | * MMU EXTENDED OPERATIONS |
@@ -336,7 +342,7 @@ struct vcpu_info { | |||
336 | uint8_t evtchn_upcall_mask; | 342 | uint8_t evtchn_upcall_mask; |
337 | unsigned long evtchn_pending_sel; | 343 | unsigned long evtchn_pending_sel; |
338 | struct arch_vcpu_info arch; | 344 | struct arch_vcpu_info arch; |
339 | struct vcpu_time_info time; | 345 | struct pvclock_vcpu_time_info time; |
340 | }; /* 64 bytes (x86) */ | 346 | }; /* 64 bytes (x86) */ |
341 | 347 | ||
342 | /* | 348 | /* |
@@ -384,9 +390,7 @@ struct shared_info { | |||
384 | * Wallclock time: updated only by control software. Guests should base | 390 | * Wallclock time: updated only by control software. Guests should base |
385 | * their gettimeofday() syscall on this wallclock-base value. | 391 | * their gettimeofday() syscall on this wallclock-base value. |
386 | */ | 392 | */ |
387 | uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */ | 393 | struct pvclock_wall_clock wc; |
388 | uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */ | ||
389 | uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */ | ||
390 | 394 | ||
391 | struct arch_shared_info arch; | 395 | struct arch_shared_info arch; |
392 | 396 | ||
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index 10ddfe0142d0..a706d6a78960 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h | |||
@@ -5,4 +5,10 @@ | |||
5 | 5 | ||
6 | DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); | 6 | DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); |
7 | 7 | ||
8 | void xen_pre_suspend(void); | ||
9 | void xen_post_suspend(int suspend_cancelled); | ||
10 | |||
11 | void xen_mm_pin_all(void); | ||
12 | void xen_mm_unpin_all(void); | ||
13 | |||
8 | #endif /* INCLUDE_XEN_OPS_H */ | 14 | #endif /* INCLUDE_XEN_OPS_H */ |