diff options
Diffstat (limited to 'include/asm-x86_64')
38 files changed, 242 insertions, 253 deletions
diff --git a/include/asm-x86_64/acpi.h b/include/asm-x86_64/acpi.h index dc8c981af27..aa1c7b2e438 100644 --- a/include/asm-x86_64/acpi.h +++ b/include/asm-x86_64/acpi.h | |||
@@ -101,7 +101,7 @@ __acpi_release_global_lock (unsigned int *lock) | |||
101 | :"=r"(n_hi), "=r"(n_lo) \ | 101 | :"=r"(n_hi), "=r"(n_lo) \ |
102 | :"0"(n_hi), "1"(n_lo)) | 102 | :"0"(n_hi), "1"(n_lo)) |
103 | 103 | ||
104 | #ifdef CONFIG_ACPI_BOOT | 104 | #ifdef CONFIG_ACPI |
105 | extern int acpi_lapic; | 105 | extern int acpi_lapic; |
106 | extern int acpi_ioapic; | 106 | extern int acpi_ioapic; |
107 | extern int acpi_noirq; | 107 | extern int acpi_noirq; |
@@ -121,17 +121,6 @@ static inline void disable_acpi(void) | |||
121 | #define FIX_ACPI_PAGES 4 | 121 | #define FIX_ACPI_PAGES 4 |
122 | 122 | ||
123 | extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); | 123 | extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); |
124 | |||
125 | #else /* !CONFIG_ACPI_BOOT */ | ||
126 | #define acpi_lapic 0 | ||
127 | #define acpi_ioapic 0 | ||
128 | #endif /* !CONFIG_ACPI_BOOT */ | ||
129 | |||
130 | extern int acpi_numa; | ||
131 | extern int acpi_scan_nodes(unsigned long start, unsigned long end); | ||
132 | #define NR_NODE_MEMBLKS (MAX_NUMNODES*2) | ||
133 | |||
134 | #ifdef CONFIG_ACPI_PCI | ||
135 | static inline void acpi_noirq_set(void) { acpi_noirq = 1; } | 124 | static inline void acpi_noirq_set(void) { acpi_noirq = 1; } |
136 | static inline void acpi_disable_pci(void) | 125 | static inline void acpi_disable_pci(void) |
137 | { | 126 | { |
@@ -139,11 +128,19 @@ static inline void acpi_disable_pci(void) | |||
139 | acpi_noirq_set(); | 128 | acpi_noirq_set(); |
140 | } | 129 | } |
141 | extern int acpi_irq_balance_set(char *str); | 130 | extern int acpi_irq_balance_set(char *str); |
142 | #else | 131 | |
132 | #else /* !CONFIG_ACPI */ | ||
133 | |||
134 | #define acpi_lapic 0 | ||
135 | #define acpi_ioapic 0 | ||
143 | static inline void acpi_noirq_set(void) { } | 136 | static inline void acpi_noirq_set(void) { } |
144 | static inline void acpi_disable_pci(void) { } | 137 | static inline void acpi_disable_pci(void) { } |
145 | static inline int acpi_irq_balance_set(char *str) { return 0; } | 138 | |
146 | #endif | 139 | #endif /* !CONFIG_ACPI */ |
140 | |||
141 | extern int acpi_numa; | ||
142 | extern int acpi_scan_nodes(unsigned long start, unsigned long end); | ||
143 | #define NR_NODE_MEMBLKS (MAX_NUMNODES*2) | ||
147 | 144 | ||
148 | #ifdef CONFIG_ACPI_SLEEP | 145 | #ifdef CONFIG_ACPI_SLEEP |
149 | 146 | ||
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h index 16ec82e16b2..6c5d5ca8383 100644 --- a/include/asm-x86_64/apic.h +++ b/include/asm-x86_64/apic.h | |||
@@ -109,9 +109,10 @@ extern unsigned int nmi_watchdog; | |||
109 | #define NMI_LOCAL_APIC 2 | 109 | #define NMI_LOCAL_APIC 2 |
110 | #define NMI_INVALID 3 | 110 | #define NMI_INVALID 3 |
111 | 111 | ||
112 | extern int disable_timer_pin_1; | ||
113 | |||
112 | #endif /* CONFIG_X86_LOCAL_APIC */ | 114 | #endif /* CONFIG_X86_LOCAL_APIC */ |
113 | 115 | ||
114 | #define esr_disable 0 | ||
115 | extern unsigned boot_cpu_id; | 116 | extern unsigned boot_cpu_id; |
116 | 117 | ||
117 | #endif /* __ASM_APIC_H */ | 118 | #endif /* __ASM_APIC_H */ |
diff --git a/include/asm-x86_64/apicdef.h b/include/asm-x86_64/apicdef.h index 9388062c4f6..fb1c99ac669 100644 --- a/include/asm-x86_64/apicdef.h +++ b/include/asm-x86_64/apicdef.h | |||
@@ -113,6 +113,7 @@ | |||
113 | #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) | 113 | #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) |
114 | 114 | ||
115 | #define MAX_IO_APICS 128 | 115 | #define MAX_IO_APICS 128 |
116 | #define MAX_LOCAL_APIC 256 | ||
116 | 117 | ||
117 | /* | 118 | /* |
118 | * All x86-64 systems are xAPIC compatible. | 119 | * All x86-64 systems are xAPIC compatible. |
diff --git a/include/asm-x86_64/bug.h b/include/asm-x86_64/bug.h index eed78566728..80ac1fe966a 100644 --- a/include/asm-x86_64/bug.h +++ b/include/asm-x86_64/bug.h | |||
@@ -9,10 +9,8 @@ | |||
9 | */ | 9 | */ |
10 | struct bug_frame { | 10 | struct bug_frame { |
11 | unsigned char ud2[2]; | 11 | unsigned char ud2[2]; |
12 | unsigned char mov; | 12 | unsigned char push; |
13 | /* should use 32bit offset instead, but the assembler doesn't | 13 | signed int filename; |
14 | like it */ | ||
15 | char *filename; | ||
16 | unsigned char ret; | 14 | unsigned char ret; |
17 | unsigned short line; | 15 | unsigned short line; |
18 | } __attribute__((packed)); | 16 | } __attribute__((packed)); |
@@ -25,8 +23,8 @@ struct bug_frame { | |||
25 | The magic numbers generate mov $64bitimm,%eax ; ret $offset. */ | 23 | The magic numbers generate mov $64bitimm,%eax ; ret $offset. */ |
26 | #define BUG() \ | 24 | #define BUG() \ |
27 | asm volatile( \ | 25 | asm volatile( \ |
28 | "ud2 ; .byte 0xa3 ; .quad %c1 ; .byte 0xc2 ; .short %c0" :: \ | 26 | "ud2 ; pushq $%c1 ; ret $%c0" :: \ |
29 | "i"(__LINE__), "i" (__stringify(__FILE__))) | 27 | "i"(__LINE__), "i" (__FILE__)) |
30 | void out_of_line_bug(void); | 28 | void out_of_line_bug(void); |
31 | #else | 29 | #else |
32 | static inline void out_of_line_bug(void) { } | 30 | static inline void out_of_line_bug(void) { } |
diff --git a/include/asm-x86_64/calling.h b/include/asm-x86_64/calling.h index 0bc12655fa5..fc2c5a6c262 100644 --- a/include/asm-x86_64/calling.h +++ b/include/asm-x86_64/calling.h | |||
@@ -65,27 +65,36 @@ | |||
65 | .if \skipr11 | 65 | .if \skipr11 |
66 | .else | 66 | .else |
67 | movq (%rsp),%r11 | 67 | movq (%rsp),%r11 |
68 | CFI_RESTORE r11 | ||
68 | .endif | 69 | .endif |
69 | .if \skipr8910 | 70 | .if \skipr8910 |
70 | .else | 71 | .else |
71 | movq 1*8(%rsp),%r10 | 72 | movq 1*8(%rsp),%r10 |
73 | CFI_RESTORE r10 | ||
72 | movq 2*8(%rsp),%r9 | 74 | movq 2*8(%rsp),%r9 |
75 | CFI_RESTORE r9 | ||
73 | movq 3*8(%rsp),%r8 | 76 | movq 3*8(%rsp),%r8 |
77 | CFI_RESTORE r8 | ||
74 | .endif | 78 | .endif |
75 | .if \skiprax | 79 | .if \skiprax |
76 | .else | 80 | .else |
77 | movq 4*8(%rsp),%rax | 81 | movq 4*8(%rsp),%rax |
82 | CFI_RESTORE rax | ||
78 | .endif | 83 | .endif |
79 | .if \skiprcx | 84 | .if \skiprcx |
80 | .else | 85 | .else |
81 | movq 5*8(%rsp),%rcx | 86 | movq 5*8(%rsp),%rcx |
87 | CFI_RESTORE rcx | ||
82 | .endif | 88 | .endif |
83 | .if \skiprdx | 89 | .if \skiprdx |
84 | .else | 90 | .else |
85 | movq 6*8(%rsp),%rdx | 91 | movq 6*8(%rsp),%rdx |
92 | CFI_RESTORE rdx | ||
86 | .endif | 93 | .endif |
87 | movq 7*8(%rsp),%rsi | 94 | movq 7*8(%rsp),%rsi |
95 | CFI_RESTORE rsi | ||
88 | movq 8*8(%rsp),%rdi | 96 | movq 8*8(%rsp),%rdi |
97 | CFI_RESTORE rdi | ||
89 | .if ARG_SKIP+\addskip > 0 | 98 | .if ARG_SKIP+\addskip > 0 |
90 | addq $ARG_SKIP+\addskip,%rsp | 99 | addq $ARG_SKIP+\addskip,%rsp |
91 | CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip) | 100 | CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip) |
@@ -124,11 +133,17 @@ | |||
124 | 133 | ||
125 | .macro RESTORE_REST | 134 | .macro RESTORE_REST |
126 | movq (%rsp),%r15 | 135 | movq (%rsp),%r15 |
136 | CFI_RESTORE r15 | ||
127 | movq 1*8(%rsp),%r14 | 137 | movq 1*8(%rsp),%r14 |
138 | CFI_RESTORE r14 | ||
128 | movq 2*8(%rsp),%r13 | 139 | movq 2*8(%rsp),%r13 |
140 | CFI_RESTORE r13 | ||
129 | movq 3*8(%rsp),%r12 | 141 | movq 3*8(%rsp),%r12 |
142 | CFI_RESTORE r12 | ||
130 | movq 4*8(%rsp),%rbp | 143 | movq 4*8(%rsp),%rbp |
144 | CFI_RESTORE rbp | ||
131 | movq 5*8(%rsp),%rbx | 145 | movq 5*8(%rsp),%rbx |
146 | CFI_RESTORE rbx | ||
132 | addq $REST_SKIP,%rsp | 147 | addq $REST_SKIP,%rsp |
133 | CFI_ADJUST_CFA_OFFSET -(REST_SKIP) | 148 | CFI_ADJUST_CFA_OFFSET -(REST_SKIP) |
134 | .endm | 149 | .endm |
@@ -146,11 +161,3 @@ | |||
146 | .macro icebp | 161 | .macro icebp |
147 | .byte 0xf1 | 162 | .byte 0xf1 |
148 | .endm | 163 | .endm |
149 | |||
150 | #ifdef CONFIG_FRAME_POINTER | ||
151 | #define ENTER enter | ||
152 | #define LEAVE leave | ||
153 | #else | ||
154 | #define ENTER | ||
155 | #define LEAVE | ||
156 | #endif | ||
diff --git a/include/asm-x86_64/current.h b/include/asm-x86_64/current.h index 7db560ee6f7..bc8adecee66 100644 --- a/include/asm-x86_64/current.h +++ b/include/asm-x86_64/current.h | |||
@@ -17,7 +17,7 @@ static inline struct task_struct *get_current(void) | |||
17 | #else | 17 | #else |
18 | 18 | ||
19 | #ifndef ASM_OFFSET_H | 19 | #ifndef ASM_OFFSET_H |
20 | #include <asm/offset.h> | 20 | #include <asm/asm-offsets.h> |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | #define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg | 23 | #define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg |
diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h index c89b58bebee..68ac3c62fe3 100644 --- a/include/asm-x86_64/desc.h +++ b/include/asm-x86_64/desc.h | |||
@@ -8,6 +8,8 @@ | |||
8 | #ifndef __ASSEMBLY__ | 8 | #ifndef __ASSEMBLY__ |
9 | 9 | ||
10 | #include <linux/string.h> | 10 | #include <linux/string.h> |
11 | #include <linux/smp.h> | ||
12 | |||
11 | #include <asm/segment.h> | 13 | #include <asm/segment.h> |
12 | #include <asm/mmu.h> | 14 | #include <asm/mmu.h> |
13 | 15 | ||
@@ -191,7 +193,7 @@ static inline void load_TLS(struct thread_struct *t, unsigned int cpu) | |||
191 | /* | 193 | /* |
192 | * load one particular LDT into the current CPU | 194 | * load one particular LDT into the current CPU |
193 | */ | 195 | */ |
194 | extern inline void load_LDT_nolock (mm_context_t *pc, int cpu) | 196 | static inline void load_LDT_nolock (mm_context_t *pc, int cpu) |
195 | { | 197 | { |
196 | int count = pc->size; | 198 | int count = pc->size; |
197 | 199 | ||
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h index a416dc31634..54a380efed4 100644 --- a/include/asm-x86_64/dma-mapping.h +++ b/include/asm-x86_64/dma-mapping.h | |||
@@ -17,7 +17,7 @@ extern dma_addr_t bad_dma_address; | |||
17 | (swiotlb ? swiotlb_dma_mapping_error(x) : ((x) == bad_dma_address)) | 17 | (swiotlb ? swiotlb_dma_mapping_error(x) : ((x) == bad_dma_address)) |
18 | 18 | ||
19 | void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | 19 | void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, |
20 | unsigned gfp); | 20 | gfp_t gfp); |
21 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | 21 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, |
22 | dma_addr_t dma_handle); | 22 | dma_addr_t dma_handle); |
23 | 23 | ||
@@ -85,6 +85,11 @@ static inline void dma_sync_single_for_device(struct device *hwdev, | |||
85 | flush_write_buffers(); | 85 | flush_write_buffers(); |
86 | } | 86 | } |
87 | 87 | ||
88 | #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \ | ||
89 | dma_sync_single_for_cpu(dev, dma_handle, size, dir) | ||
90 | #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ | ||
91 | dma_sync_single_for_device(dev, dma_handle, size, dir) | ||
92 | |||
88 | static inline void dma_sync_sg_for_cpu(struct device *hwdev, | 93 | static inline void dma_sync_sg_for_cpu(struct device *hwdev, |
89 | struct scatterlist *sg, | 94 | struct scatterlist *sg, |
90 | int nelems, int direction) | 95 | int nelems, int direction) |
diff --git a/include/asm-x86_64/dwarf2.h b/include/asm-x86_64/dwarf2.h index afd4212e860..582757fc036 100644 --- a/include/asm-x86_64/dwarf2.h +++ b/include/asm-x86_64/dwarf2.h | |||
@@ -24,6 +24,10 @@ | |||
24 | #define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset | 24 | #define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset |
25 | #define CFI_OFFSET .cfi_offset | 25 | #define CFI_OFFSET .cfi_offset |
26 | #define CFI_REL_OFFSET .cfi_rel_offset | 26 | #define CFI_REL_OFFSET .cfi_rel_offset |
27 | #define CFI_REGISTER .cfi_register | ||
28 | #define CFI_RESTORE .cfi_restore | ||
29 | #define CFI_REMEMBER_STATE .cfi_remember_state | ||
30 | #define CFI_RESTORE_STATE .cfi_restore_state | ||
27 | 31 | ||
28 | #else | 32 | #else |
29 | 33 | ||
@@ -36,6 +40,10 @@ | |||
36 | #define CFI_ADJUST_CFA_OFFSET # | 40 | #define CFI_ADJUST_CFA_OFFSET # |
37 | #define CFI_OFFSET # | 41 | #define CFI_OFFSET # |
38 | #define CFI_REL_OFFSET # | 42 | #define CFI_REL_OFFSET # |
43 | #define CFI_REGISTER # | ||
44 | #define CFI_RESTORE # | ||
45 | #define CFI_REMEMBER_STATE # | ||
46 | #define CFI_RESTORE_STATE # | ||
39 | 47 | ||
40 | #endif | 48 | #endif |
41 | 49 | ||
diff --git a/include/asm-x86_64/fixmap.h b/include/asm-x86_64/fixmap.h index cf8b16cbe8d..a582cfcf223 100644 --- a/include/asm-x86_64/fixmap.h +++ b/include/asm-x86_64/fixmap.h | |||
@@ -76,7 +76,7 @@ extern void __this_fixmap_does_not_exist(void); | |||
76 | * directly without translation, we catch the bug with a NULL-deference | 76 | * directly without translation, we catch the bug with a NULL-deference |
77 | * kernel oops. Illegal ranges of incoming indices are caught too. | 77 | * kernel oops. Illegal ranges of incoming indices are caught too. |
78 | */ | 78 | */ |
79 | extern inline unsigned long fix_to_virt(const unsigned int idx) | 79 | static inline unsigned long fix_to_virt(const unsigned int idx) |
80 | { | 80 | { |
81 | /* | 81 | /* |
82 | * this branch gets completely eliminated after inlining, | 82 | * this branch gets completely eliminated after inlining, |
diff --git a/include/asm-x86_64/hardirq.h b/include/asm-x86_64/hardirq.h index 27c381fa1c9..8661b476fb4 100644 --- a/include/asm-x86_64/hardirq.h +++ b/include/asm-x86_64/hardirq.h | |||
@@ -9,11 +9,12 @@ | |||
9 | 9 | ||
10 | #define __ARCH_IRQ_STAT 1 | 10 | #define __ARCH_IRQ_STAT 1 |
11 | 11 | ||
12 | /* Generate a lvalue for a pda member. Should fix softirq.c instead to use | 12 | #define local_softirq_pending() read_pda(__softirq_pending) |
13 | special access macros. This would generate better code. */ | ||
14 | #define __IRQ_STAT(cpu,member) (read_pda(me)->member) | ||
15 | 13 | ||
16 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ | 14 | #define __ARCH_SET_SOFTIRQ_PENDING 1 |
15 | |||
16 | #define set_softirq_pending(x) write_pda(__softirq_pending, (x)) | ||
17 | #define or_softirq_pending(x) or_pda(__softirq_pending, (x)) | ||
17 | 18 | ||
18 | /* | 19 | /* |
19 | * 'what should we do if we get a hw irq event on an illegal vector'. | 20 | * 'what should we do if we get a hw irq event on an illegal vector'. |
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h index 2b5cb2865d2..dc97668ea0f 100644 --- a/include/asm-x86_64/hw_irq.h +++ b/include/asm-x86_64/hw_irq.h | |||
@@ -26,6 +26,7 @@ | |||
26 | struct hw_interrupt_type; | 26 | struct hw_interrupt_type; |
27 | #endif | 27 | #endif |
28 | 28 | ||
29 | #define NMI_VECTOR 0x02 | ||
29 | /* | 30 | /* |
30 | * IDT vectors usable for external interrupt sources start | 31 | * IDT vectors usable for external interrupt sources start |
31 | * at 0x20: | 32 | * at 0x20: |
@@ -50,14 +51,15 @@ struct hw_interrupt_type; | |||
50 | */ | 51 | */ |
51 | #define SPURIOUS_APIC_VECTOR 0xff | 52 | #define SPURIOUS_APIC_VECTOR 0xff |
52 | #define ERROR_APIC_VECTOR 0xfe | 53 | #define ERROR_APIC_VECTOR 0xfe |
53 | #define INVALIDATE_TLB_VECTOR 0xfd | 54 | #define RESCHEDULE_VECTOR 0xfd |
54 | #define RESCHEDULE_VECTOR 0xfc | 55 | #define CALL_FUNCTION_VECTOR 0xfc |
55 | #define TASK_MIGRATION_VECTOR 0xfb | 56 | #define KDB_VECTOR 0xfb /* reserved for KDB */ |
56 | #define CALL_FUNCTION_VECTOR 0xfa | 57 | #define THERMAL_APIC_VECTOR 0xfa |
57 | #define KDB_VECTOR 0xf9 | 58 | /* 0xf9 free */ |
58 | 59 | #define INVALIDATE_TLB_VECTOR_END 0xf8 | |
59 | #define THERMAL_APIC_VECTOR 0xf0 | 60 | #define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f8 used for TLB flush */ |
60 | 61 | ||
62 | #define NUM_INVALIDATE_TLB_VECTORS 8 | ||
61 | 63 | ||
62 | /* | 64 | /* |
63 | * Local APIC timer IRQ vector is on a different priority level, | 65 | * Local APIC timer IRQ vector is on a different priority level, |
diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h index 37fc3f149a5..52ff269fe05 100644 --- a/include/asm-x86_64/io.h +++ b/include/asm-x86_64/io.h | |||
@@ -48,7 +48,7 @@ | |||
48 | * Talk about misusing macros.. | 48 | * Talk about misusing macros.. |
49 | */ | 49 | */ |
50 | #define __OUT1(s,x) \ | 50 | #define __OUT1(s,x) \ |
51 | extern inline void out##s(unsigned x value, unsigned short port) { | 51 | static inline void out##s(unsigned x value, unsigned short port) { |
52 | 52 | ||
53 | #define __OUT2(s,s1,s2) \ | 53 | #define __OUT2(s,s1,s2) \ |
54 | __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" | 54 | __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" |
@@ -58,7 +58,7 @@ __OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ | |||
58 | __OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \ | 58 | __OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \ |
59 | 59 | ||
60 | #define __IN1(s) \ | 60 | #define __IN1(s) \ |
61 | extern inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; | 61 | static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; |
62 | 62 | ||
63 | #define __IN2(s,s1,s2) \ | 63 | #define __IN2(s,s1,s2) \ |
64 | __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" | 64 | __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" |
@@ -68,12 +68,12 @@ __IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ | |||
68 | __IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ | 68 | __IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ |
69 | 69 | ||
70 | #define __INS(s) \ | 70 | #define __INS(s) \ |
71 | extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \ | 71 | static inline void ins##s(unsigned short port, void * addr, unsigned long count) \ |
72 | { __asm__ __volatile__ ("rep ; ins" #s \ | 72 | { __asm__ __volatile__ ("rep ; ins" #s \ |
73 | : "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } | 73 | : "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } |
74 | 74 | ||
75 | #define __OUTS(s) \ | 75 | #define __OUTS(s) \ |
76 | extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ | 76 | static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ |
77 | { __asm__ __volatile__ ("rep ; outs" #s \ | 77 | { __asm__ __volatile__ ("rep ; outs" #s \ |
78 | : "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } | 78 | : "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } |
79 | 79 | ||
@@ -110,12 +110,12 @@ __OUTS(l) | |||
110 | * Change virtual addresses to physical addresses and vv. | 110 | * Change virtual addresses to physical addresses and vv. |
111 | * These are pretty trivial | 111 | * These are pretty trivial |
112 | */ | 112 | */ |
113 | extern inline unsigned long virt_to_phys(volatile void * address) | 113 | static inline unsigned long virt_to_phys(volatile void * address) |
114 | { | 114 | { |
115 | return __pa(address); | 115 | return __pa(address); |
116 | } | 116 | } |
117 | 117 | ||
118 | extern inline void * phys_to_virt(unsigned long address) | 118 | static inline void * phys_to_virt(unsigned long address) |
119 | { | 119 | { |
120 | return __va(address); | 120 | return __va(address); |
121 | } | 121 | } |
@@ -130,7 +130,7 @@ extern inline void * phys_to_virt(unsigned long address) | |||
130 | 130 | ||
131 | extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags); | 131 | extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags); |
132 | 132 | ||
133 | extern inline void __iomem * ioremap (unsigned long offset, unsigned long size) | 133 | static inline void __iomem * ioremap (unsigned long offset, unsigned long size) |
134 | { | 134 | { |
135 | return __ioremap(offset, size, 0); | 135 | return __ioremap(offset, size, 0); |
136 | } | 136 | } |
diff --git a/include/asm-x86_64/io_apic.h b/include/asm-x86_64/io_apic.h index a8babd2bbe8..ee1bc69aec9 100644 --- a/include/asm-x86_64/io_apic.h +++ b/include/asm-x86_64/io_apic.h | |||
@@ -201,7 +201,7 @@ extern int skip_ioapic_setup; | |||
201 | */ | 201 | */ |
202 | #define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) | 202 | #define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) |
203 | 203 | ||
204 | #ifdef CONFIG_ACPI_BOOT | 204 | #ifdef CONFIG_ACPI |
205 | extern int io_apic_get_version (int ioapic); | 205 | extern int io_apic_get_version (int ioapic); |
206 | extern int io_apic_get_redir_entries (int ioapic); | 206 | extern int io_apic_get_redir_entries (int ioapic); |
207 | extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int, int); | 207 | extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int, int); |
diff --git a/include/asm-x86_64/ipi.h b/include/asm-x86_64/ipi.h index 5e166b9d3bd..022e9d340ad 100644 --- a/include/asm-x86_64/ipi.h +++ b/include/asm-x86_64/ipi.h | |||
@@ -31,9 +31,20 @@ | |||
31 | 31 | ||
32 | static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, unsigned int dest) | 32 | static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, unsigned int dest) |
33 | { | 33 | { |
34 | unsigned int icr = APIC_DM_FIXED | shortcut | vector | dest; | 34 | unsigned int icr = shortcut | dest; |
35 | if (vector == KDB_VECTOR) | 35 | |
36 | icr = (icr & (~APIC_VECTOR_MASK)) | APIC_DM_NMI; | 36 | switch (vector) { |
37 | default: | ||
38 | icr |= APIC_DM_FIXED | vector; | ||
39 | break; | ||
40 | case NMI_VECTOR: | ||
41 | /* | ||
42 | * Setup KDB IPI to be delivered as an NMI | ||
43 | */ | ||
44 | case KDB_VECTOR: | ||
45 | icr |= APIC_DM_NMI; | ||
46 | break; | ||
47 | } | ||
37 | return icr; | 48 | return icr; |
38 | } | 49 | } |
39 | 50 | ||
@@ -66,7 +77,7 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsign | |||
66 | /* | 77 | /* |
67 | * Send the IPI. The write to APIC_ICR fires this off. | 78 | * Send the IPI. The write to APIC_ICR fires this off. |
68 | */ | 79 | */ |
69 | apic_write_around(APIC_ICR, cfg); | 80 | apic_write(APIC_ICR, cfg); |
70 | } | 81 | } |
71 | 82 | ||
72 | 83 | ||
@@ -92,7 +103,7 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) | |||
92 | * prepare target chip field | 103 | * prepare target chip field |
93 | */ | 104 | */ |
94 | cfg = __prepare_ICR2(x86_cpu_to_apicid[query_cpu]); | 105 | cfg = __prepare_ICR2(x86_cpu_to_apicid[query_cpu]); |
95 | apic_write_around(APIC_ICR2, cfg); | 106 | apic_write(APIC_ICR2, cfg); |
96 | 107 | ||
97 | /* | 108 | /* |
98 | * program the ICR | 109 | * program the ICR |
@@ -102,7 +113,7 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) | |||
102 | /* | 113 | /* |
103 | * Send the IPI. The write to APIC_ICR fires this off. | 114 | * Send the IPI. The write to APIC_ICR fires this off. |
104 | */ | 115 | */ |
105 | apic_write_around(APIC_ICR, cfg); | 116 | apic_write(APIC_ICR, cfg); |
106 | } | 117 | } |
107 | local_irq_restore(flags); | 118 | local_irq_restore(flags); |
108 | } | 119 | } |
diff --git a/include/asm-x86_64/irq.h b/include/asm-x86_64/irq.h index 4482657777b..fb724ba37ae 100644 --- a/include/asm-x86_64/irq.h +++ b/include/asm-x86_64/irq.h | |||
@@ -48,10 +48,6 @@ static __inline__ int irq_canonicalize(int irq) | |||
48 | #define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */ | 48 | #define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */ |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | struct irqaction; | ||
52 | struct pt_regs; | ||
53 | int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); | ||
54 | |||
55 | #ifdef CONFIG_HOTPLUG_CPU | 51 | #ifdef CONFIG_HOTPLUG_CPU |
56 | #include <linux/cpumask.h> | 52 | #include <linux/cpumask.h> |
57 | extern void fixup_irqs(cpumask_t map); | 53 | extern void fixup_irqs(cpumask_t map); |
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h index b90341994d8..f604e84c530 100644 --- a/include/asm-x86_64/kdebug.h +++ b/include/asm-x86_64/kdebug.h | |||
@@ -46,7 +46,7 @@ extern void die(const char *,struct pt_regs *,long); | |||
46 | extern void __die(const char *,struct pt_regs *,long); | 46 | extern void __die(const char *,struct pt_regs *,long); |
47 | extern void show_registers(struct pt_regs *regs); | 47 | extern void show_registers(struct pt_regs *regs); |
48 | extern void dump_pagetable(unsigned long); | 48 | extern void dump_pagetable(unsigned long); |
49 | extern void oops_begin(void); | 49 | extern unsigned long oops_begin(void); |
50 | extern void oops_end(void); | 50 | extern void oops_end(unsigned long); |
51 | 51 | ||
52 | #endif | 52 | #endif |
diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h index c954f15c1a7..3e72c41727c 100644 --- a/include/asm-x86_64/local.h +++ b/include/asm-x86_64/local.h | |||
@@ -29,7 +29,7 @@ static __inline__ void local_dec(local_t *v) | |||
29 | :"m" (v->counter)); | 29 | :"m" (v->counter)); |
30 | } | 30 | } |
31 | 31 | ||
32 | static __inline__ void local_add(unsigned long i, local_t *v) | 32 | static __inline__ void local_add(unsigned int i, local_t *v) |
33 | { | 33 | { |
34 | __asm__ __volatile__( | 34 | __asm__ __volatile__( |
35 | "addl %1,%0" | 35 | "addl %1,%0" |
@@ -37,7 +37,7 @@ static __inline__ void local_add(unsigned long i, local_t *v) | |||
37 | :"ir" (i), "m" (v->counter)); | 37 | :"ir" (i), "m" (v->counter)); |
38 | } | 38 | } |
39 | 39 | ||
40 | static __inline__ void local_sub(unsigned long i, local_t *v) | 40 | static __inline__ void local_sub(unsigned int i, local_t *v) |
41 | { | 41 | { |
42 | __asm__ __volatile__( | 42 | __asm__ __volatile__( |
43 | "subl %1,%0" | 43 | "subl %1,%0" |
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h index 768413751b3..b40c661f111 100644 --- a/include/asm-x86_64/mmzone.h +++ b/include/asm-x86_64/mmzone.h | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | #include <asm/smp.h> | 13 | #include <asm/smp.h> |
14 | 14 | ||
15 | #define NODEMAPSIZE 0xff | 15 | #define NODEMAPSIZE 0xfff |
16 | 16 | ||
17 | /* Simple perfect hash to map physical addresses to node numbers */ | 17 | /* Simple perfect hash to map physical addresses to node numbers */ |
18 | extern int memnode_shift; | 18 | extern int memnode_shift; |
@@ -54,7 +54,7 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) | |||
54 | 54 | ||
55 | #define pfn_valid(pfn) ((pfn) >= num_physpages ? 0 : \ | 55 | #define pfn_valid(pfn) ((pfn) >= num_physpages ? 0 : \ |
56 | ({ u8 nid__ = pfn_to_nid(pfn); \ | 56 | ({ u8 nid__ = pfn_to_nid(pfn); \ |
57 | nid__ != 0xff && (pfn) >= node_start_pfn(nid__) && (pfn) <= node_end_pfn(nid__); })) | 57 | nid__ != 0xff && (pfn) >= node_start_pfn(nid__) && (pfn) < node_end_pfn(nid__); })) |
58 | #endif | 58 | #endif |
59 | 59 | ||
60 | #define local_mapnr(kvaddr) \ | 60 | #define local_mapnr(kvaddr) \ |
diff --git a/include/asm-x86_64/mpspec.h b/include/asm-x86_64/mpspec.h index 331f6a3c72a..f267e10c023 100644 --- a/include/asm-x86_64/mpspec.h +++ b/include/asm-x86_64/mpspec.h | |||
@@ -179,7 +179,7 @@ extern int mpc_default_type; | |||
179 | extern unsigned long mp_lapic_addr; | 179 | extern unsigned long mp_lapic_addr; |
180 | extern int pic_mode; | 180 | extern int pic_mode; |
181 | 181 | ||
182 | #ifdef CONFIG_ACPI_BOOT | 182 | #ifdef CONFIG_ACPI |
183 | extern void mp_register_lapic (u8 id, u8 enabled); | 183 | extern void mp_register_lapic (u8 id, u8 enabled); |
184 | extern void mp_register_lapic_address (u64 address); | 184 | extern void mp_register_lapic_address (u64 address); |
185 | 185 | ||
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h index ba15279a79d..5a7fe3c6c3d 100644 --- a/include/asm-x86_64/msr.h +++ b/include/asm-x86_64/msr.h | |||
@@ -29,22 +29,37 @@ | |||
29 | #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) | 29 | #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) |
30 | 30 | ||
31 | /* wrmsr with exception handling */ | 31 | /* wrmsr with exception handling */ |
32 | #define wrmsr_safe(msr,a,b) ({ int ret__; \ | 32 | #define wrmsr_safe(msr,a,b) ({ int ret__; \ |
33 | asm volatile("2: wrmsr ; xorl %0,%0\n" \ | 33 | asm volatile("2: wrmsr ; xorl %0,%0\n" \ |
34 | "1:\n\t" \ | 34 | "1:\n\t" \ |
35 | ".section .fixup,\"ax\"\n\t" \ | 35 | ".section .fixup,\"ax\"\n\t" \ |
36 | "3: movl %4,%0 ; jmp 1b\n\t" \ | 36 | "3: movl %4,%0 ; jmp 1b\n\t" \ |
37 | ".previous\n\t" \ | 37 | ".previous\n\t" \ |
38 | ".section __ex_table,\"a\"\n" \ | 38 | ".section __ex_table,\"a\"\n" \ |
39 | " .align 8\n\t" \ | 39 | " .align 8\n\t" \ |
40 | " .quad 2b,3b\n\t" \ | 40 | " .quad 2b,3b\n\t" \ |
41 | ".previous" \ | 41 | ".previous" \ |
42 | : "=a" (ret__) \ | 42 | : "=a" (ret__) \ |
43 | : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\ | 43 | : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \ |
44 | ret__; }) | 44 | ret__; }) |
45 | 45 | ||
46 | #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) | 46 | #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) |
47 | 47 | ||
48 | #define rdmsr_safe(msr,a,b) \ | ||
49 | ({ int ret__; \ | ||
50 | asm volatile ("1: rdmsr\n" \ | ||
51 | "2:\n" \ | ||
52 | ".section .fixup,\"ax\"\n" \ | ||
53 | "3: movl %4,%0\n" \ | ||
54 | " jmp 2b\n" \ | ||
55 | ".previous\n" \ | ||
56 | ".section __ex_table,\"a\"\n" \ | ||
57 | " .align 8\n" \ | ||
58 | " .quad 1b,3b\n" \ | ||
59 | ".previous":"=&bDS" (ret__), "=a"(a), "=d"(b)\ | ||
60 | :"c"(msr), "i"(-EIO), "0"(0)); \ | ||
61 | ret__; }) | ||
62 | |||
48 | #define rdtsc(low,high) \ | 63 | #define rdtsc(low,high) \ |
49 | __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) | 64 | __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) |
50 | 65 | ||
@@ -64,7 +79,7 @@ | |||
64 | : "=a" (low), "=d" (high) \ | 79 | : "=a" (low), "=d" (high) \ |
65 | : "c" (counter)) | 80 | : "c" (counter)) |
66 | 81 | ||
67 | extern inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, | 82 | static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, |
68 | unsigned int *ecx, unsigned int *edx) | 83 | unsigned int *ecx, unsigned int *edx) |
69 | { | 84 | { |
70 | __asm__("cpuid" | 85 | __asm__("cpuid" |
@@ -90,7 +105,7 @@ static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, | |||
90 | /* | 105 | /* |
91 | * CPUID functions returning a single datum | 106 | * CPUID functions returning a single datum |
92 | */ | 107 | */ |
93 | extern inline unsigned int cpuid_eax(unsigned int op) | 108 | static inline unsigned int cpuid_eax(unsigned int op) |
94 | { | 109 | { |
95 | unsigned int eax; | 110 | unsigned int eax; |
96 | 111 | ||
@@ -100,7 +115,7 @@ extern inline unsigned int cpuid_eax(unsigned int op) | |||
100 | : "bx", "cx", "dx"); | 115 | : "bx", "cx", "dx"); |
101 | return eax; | 116 | return eax; |
102 | } | 117 | } |
103 | extern inline unsigned int cpuid_ebx(unsigned int op) | 118 | static inline unsigned int cpuid_ebx(unsigned int op) |
104 | { | 119 | { |
105 | unsigned int eax, ebx; | 120 | unsigned int eax, ebx; |
106 | 121 | ||
@@ -110,7 +125,7 @@ extern inline unsigned int cpuid_ebx(unsigned int op) | |||
110 | : "cx", "dx" ); | 125 | : "cx", "dx" ); |
111 | return ebx; | 126 | return ebx; |
112 | } | 127 | } |
113 | extern inline unsigned int cpuid_ecx(unsigned int op) | 128 | static inline unsigned int cpuid_ecx(unsigned int op) |
114 | { | 129 | { |
115 | unsigned int eax, ecx; | 130 | unsigned int eax, ecx; |
116 | 131 | ||
@@ -120,7 +135,7 @@ extern inline unsigned int cpuid_ecx(unsigned int op) | |||
120 | : "bx", "dx" ); | 135 | : "bx", "dx" ); |
121 | return ecx; | 136 | return ecx; |
122 | } | 137 | } |
123 | extern inline unsigned int cpuid_edx(unsigned int op) | 138 | static inline unsigned int cpuid_edx(unsigned int op) |
124 | { | 139 | { |
125 | unsigned int eax, edx; | 140 | unsigned int eax, edx; |
126 | 141 | ||
@@ -219,6 +234,7 @@ extern inline unsigned int cpuid_edx(unsigned int op) | |||
219 | #define MSR_K8_TOP_MEM1 0xC001001A | 234 | #define MSR_K8_TOP_MEM1 0xC001001A |
220 | #define MSR_K8_TOP_MEM2 0xC001001D | 235 | #define MSR_K8_TOP_MEM2 0xC001001D |
221 | #define MSR_K8_SYSCFG 0xC0010010 | 236 | #define MSR_K8_SYSCFG 0xC0010010 |
237 | #define MSR_K8_HWCR 0xC0010015 | ||
222 | 238 | ||
223 | /* K6 MSRs */ | 239 | /* K6 MSRs */ |
224 | #define MSR_K6_EFER 0xC0000080 | 240 | #define MSR_K6_EFER 0xC0000080 |
diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h index 5c363a1482e..bcf55c3f7f7 100644 --- a/include/asm-x86_64/numa.h +++ b/include/asm-x86_64/numa.h | |||
@@ -9,6 +9,7 @@ struct node { | |||
9 | }; | 9 | }; |
10 | 10 | ||
11 | extern int compute_hash_shift(struct node *nodes, int numnodes); | 11 | extern int compute_hash_shift(struct node *nodes, int numnodes); |
12 | extern int pxm_to_node(int nid); | ||
12 | 13 | ||
13 | #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) | 14 | #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) |
14 | 15 | ||
@@ -16,6 +17,8 @@ extern void numa_add_cpu(int cpu); | |||
16 | extern void numa_init_array(void); | 17 | extern void numa_init_array(void); |
17 | extern int numa_off; | 18 | extern int numa_off; |
18 | 19 | ||
20 | extern unsigned char apicid_to_node[256]; | ||
21 | |||
19 | #define NUMA_NO_NODE 0xff | 22 | #define NUMA_NO_NODE 0xff |
20 | 23 | ||
21 | #endif | 24 | #endif |
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h index 135ffaa0393..e5ab4d231f2 100644 --- a/include/asm-x86_64/page.h +++ b/include/asm-x86_64/page.h | |||
@@ -32,6 +32,8 @@ | |||
32 | #ifdef __KERNEL__ | 32 | #ifdef __KERNEL__ |
33 | #ifndef __ASSEMBLY__ | 33 | #ifndef __ASSEMBLY__ |
34 | 34 | ||
35 | extern unsigned long end_pfn; | ||
36 | |||
35 | void clear_page(void *); | 37 | void clear_page(void *); |
36 | void copy_page(void *, void *); | 38 | void copy_page(void *, void *); |
37 | 39 | ||
@@ -111,7 +113,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
111 | #ifdef CONFIG_FLATMEM | 113 | #ifdef CONFIG_FLATMEM |
112 | #define pfn_to_page(pfn) (mem_map + (pfn)) | 114 | #define pfn_to_page(pfn) (mem_map + (pfn)) |
113 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) | 115 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) |
114 | #define pfn_valid(pfn) ((pfn) < max_mapnr) | 116 | #define pfn_valid(pfn) ((pfn) < end_pfn) |
115 | #endif | 117 | #endif |
116 | 118 | ||
117 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 119 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h index 36b766cfc4d..bbf89aa8a1a 100644 --- a/include/asm-x86_64/pda.h +++ b/include/asm-x86_64/pda.h | |||
@@ -10,10 +10,8 @@ | |||
10 | struct x8664_pda { | 10 | struct x8664_pda { |
11 | struct task_struct *pcurrent; /* Current process */ | 11 | struct task_struct *pcurrent; /* Current process */ |
12 | unsigned long data_offset; /* Per cpu data offset from linker address */ | 12 | unsigned long data_offset; /* Per cpu data offset from linker address */ |
13 | struct x8664_pda *me; /* Pointer to itself */ | ||
14 | unsigned long kernelstack; /* top of kernel stack for current */ | 13 | unsigned long kernelstack; /* top of kernel stack for current */ |
15 | unsigned long oldrsp; /* user rsp for system call */ | 14 | unsigned long oldrsp; /* user rsp for system call */ |
16 | unsigned long irqrsp; /* Old rsp for interrupts. */ | ||
17 | int irqcount; /* Irq nesting counter. Starts with -1 */ | 15 | int irqcount; /* Irq nesting counter. Starts with -1 */ |
18 | int cpunumber; /* Logical CPU number */ | 16 | int cpunumber; /* Logical CPU number */ |
19 | char *irqstackptr; /* top of irqstack */ | 17 | char *irqstackptr; /* top of irqstack */ |
@@ -22,7 +20,7 @@ struct x8664_pda { | |||
22 | struct mm_struct *active_mm; | 20 | struct mm_struct *active_mm; |
23 | int mmu_state; | 21 | int mmu_state; |
24 | unsigned apic_timer_irqs; | 22 | unsigned apic_timer_irqs; |
25 | } ____cacheline_aligned; | 23 | } ____cacheline_aligned_in_smp; |
26 | 24 | ||
27 | 25 | ||
28 | #define IRQSTACK_ORDER 2 | 26 | #define IRQSTACK_ORDER 2 |
@@ -42,13 +40,14 @@ extern void __bad_pda_field(void); | |||
42 | #define pda_offset(field) offsetof(struct x8664_pda, field) | 40 | #define pda_offset(field) offsetof(struct x8664_pda, field) |
43 | 41 | ||
44 | #define pda_to_op(op,field,val) do { \ | 42 | #define pda_to_op(op,field,val) do { \ |
43 | typedef typeof_field(struct x8664_pda, field) T__; \ | ||
45 | switch (sizeof_field(struct x8664_pda, field)) { \ | 44 | switch (sizeof_field(struct x8664_pda, field)) { \ |
46 | case 2: \ | 45 | case 2: \ |
47 | asm volatile(op "w %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \ | 46 | asm volatile(op "w %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ |
48 | case 4: \ | 47 | case 4: \ |
49 | asm volatile(op "l %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \ | 48 | asm volatile(op "l %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ |
50 | case 8: \ | 49 | case 8: \ |
51 | asm volatile(op "q %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \ | 50 | asm volatile(op "q %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ |
52 | default: __bad_pda_field(); \ | 51 | default: __bad_pda_field(); \ |
53 | } \ | 52 | } \ |
54 | } while (0) | 53 | } while (0) |
@@ -58,7 +57,7 @@ asm volatile(op "q %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); bre | |||
58 | * Unfortunately removing them causes all hell to break lose currently. | 57 | * Unfortunately removing them causes all hell to break lose currently. |
59 | */ | 58 | */ |
60 | #define pda_from_op(op,field) ({ \ | 59 | #define pda_from_op(op,field) ({ \ |
61 | typedef typeof_field(struct x8664_pda, field) T__; T__ ret__; \ | 60 | typeof_field(struct x8664_pda, field) ret__; \ |
62 | switch (sizeof_field(struct x8664_pda, field)) { \ | 61 | switch (sizeof_field(struct x8664_pda, field)) { \ |
63 | case 2: \ | 62 | case 2: \ |
64 | asm volatile(op "w %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\ | 63 | asm volatile(op "w %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\ |
@@ -75,6 +74,7 @@ asm volatile(op "q %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); b | |||
75 | #define write_pda(field,val) pda_to_op("mov",field,val) | 74 | #define write_pda(field,val) pda_to_op("mov",field,val) |
76 | #define add_pda(field,val) pda_to_op("add",field,val) | 75 | #define add_pda(field,val) pda_to_op("add",field,val) |
77 | #define sub_pda(field,val) pda_to_op("sub",field,val) | 76 | #define sub_pda(field,val) pda_to_op("sub",field,val) |
77 | #define or_pda(field,val) pda_to_op("or",field,val) | ||
78 | 78 | ||
79 | #endif | 79 | #endif |
80 | 80 | ||
diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h index deadd146978..08cad2482bc 100644 --- a/include/asm-x86_64/pgalloc.h +++ b/include/asm-x86_64/pgalloc.h | |||
@@ -18,12 +18,12 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p | |||
18 | set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT))); | 18 | set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT))); |
19 | } | 19 | } |
20 | 20 | ||
21 | extern __inline__ pmd_t *get_pmd(void) | 21 | static inline pmd_t *get_pmd(void) |
22 | { | 22 | { |
23 | return (pmd_t *)get_zeroed_page(GFP_KERNEL); | 23 | return (pmd_t *)get_zeroed_page(GFP_KERNEL); |
24 | } | 24 | } |
25 | 25 | ||
26 | extern __inline__ void pmd_free(pmd_t *pmd) | 26 | static inline void pmd_free(pmd_t *pmd) |
27 | { | 27 | { |
28 | BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); | 28 | BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); |
29 | free_page((unsigned long)pmd); | 29 | free_page((unsigned long)pmd); |
@@ -86,13 +86,13 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add | |||
86 | /* Should really implement gc for free page table pages. This could be | 86 | /* Should really implement gc for free page table pages. This could be |
87 | done with a reference count in struct page. */ | 87 | done with a reference count in struct page. */ |
88 | 88 | ||
89 | extern __inline__ void pte_free_kernel(pte_t *pte) | 89 | static inline void pte_free_kernel(pte_t *pte) |
90 | { | 90 | { |
91 | BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); | 91 | BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); |
92 | free_page((unsigned long)pte); | 92 | free_page((unsigned long)pte); |
93 | } | 93 | } |
94 | 94 | ||
95 | extern inline void pte_free(struct page *pte) | 95 | static inline void pte_free(struct page *pte) |
96 | { | 96 | { |
97 | __free_page(pte); | 97 | __free_page(pte); |
98 | } | 98 | } |
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index 5e0f2fdab0d..dd8711ecaf2 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h | |||
@@ -85,7 +85,7 @@ static inline void set_pud(pud_t *dst, pud_t val) | |||
85 | pud_val(*dst) = pud_val(val); | 85 | pud_val(*dst) = pud_val(val); |
86 | } | 86 | } |
87 | 87 | ||
88 | extern inline void pud_clear (pud_t *pud) | 88 | static inline void pud_clear (pud_t *pud) |
89 | { | 89 | { |
90 | set_pud(pud, __pud(0)); | 90 | set_pud(pud, __pud(0)); |
91 | } | 91 | } |
@@ -95,7 +95,7 @@ static inline void set_pgd(pgd_t *dst, pgd_t val) | |||
95 | pgd_val(*dst) = pgd_val(val); | 95 | pgd_val(*dst) = pgd_val(val); |
96 | } | 96 | } |
97 | 97 | ||
98 | extern inline void pgd_clear (pgd_t * pgd) | 98 | static inline void pgd_clear (pgd_t * pgd) |
99 | { | 99 | { |
100 | set_pgd(pgd, __pgd(0)); | 100 | set_pgd(pgd, __pgd(0)); |
101 | } | 101 | } |
@@ -375,7 +375,7 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) | |||
375 | } | 375 | } |
376 | 376 | ||
377 | /* Change flags of a PTE */ | 377 | /* Change flags of a PTE */ |
378 | extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 378 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
379 | { | 379 | { |
380 | pte_val(pte) &= _PAGE_CHG_MASK; | 380 | pte_val(pte) &= _PAGE_CHG_MASK; |
381 | pte_val(pte) |= pgprot_val(newprot); | 381 | pte_val(pte) |= pgprot_val(newprot); |
@@ -384,7 +384,7 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
384 | } | 384 | } |
385 | 385 | ||
386 | #define pte_index(address) \ | 386 | #define pte_index(address) \ |
387 | ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 387 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
388 | #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \ | 388 | #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \ |
389 | pte_index(address)) | 389 | pte_index(address)) |
390 | 390 | ||
@@ -421,9 +421,6 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
421 | 421 | ||
422 | extern int kern_addr_valid(unsigned long addr); | 422 | extern int kern_addr_valid(unsigned long addr); |
423 | 423 | ||
424 | #define io_remap_page_range(vma, vaddr, paddr, size, prot) \ | ||
425 | remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot) | ||
426 | |||
427 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | 424 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
428 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 425 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
429 | 426 | ||
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h index a8321999448..03837d34fba 100644 --- a/include/asm-x86_64/processor.h +++ b/include/asm-x86_64/processor.h | |||
@@ -254,7 +254,13 @@ struct thread_struct { | |||
254 | u64 tls_array[GDT_ENTRY_TLS_ENTRIES]; | 254 | u64 tls_array[GDT_ENTRY_TLS_ENTRIES]; |
255 | } __attribute__((aligned(16))); | 255 | } __attribute__((aligned(16))); |
256 | 256 | ||
257 | #define INIT_THREAD {} | 257 | #define INIT_THREAD { \ |
258 | .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | ||
259 | } | ||
260 | |||
261 | #define INIT_TSS { \ | ||
262 | .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | ||
263 | } | ||
258 | 264 | ||
259 | #define INIT_MMAP \ | 265 | #define INIT_MMAP \ |
260 | { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL } | 266 | { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL } |
@@ -375,13 +381,13 @@ struct extended_sigtable { | |||
375 | #define ASM_NOP_MAX 8 | 381 | #define ASM_NOP_MAX 8 |
376 | 382 | ||
377 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | 383 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ |
378 | extern inline void rep_nop(void) | 384 | static inline void rep_nop(void) |
379 | { | 385 | { |
380 | __asm__ __volatile__("rep;nop": : :"memory"); | 386 | __asm__ __volatile__("rep;nop": : :"memory"); |
381 | } | 387 | } |
382 | 388 | ||
383 | /* Stop speculative execution */ | 389 | /* Stop speculative execution */ |
384 | extern inline void sync_core(void) | 390 | static inline void sync_core(void) |
385 | { | 391 | { |
386 | int tmp; | 392 | int tmp; |
387 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); | 393 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); |
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index 6c813eb521f..dbb37b0adb4 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h | |||
@@ -8,7 +8,6 @@ | |||
8 | struct cpuinfo_x86; | 8 | struct cpuinfo_x86; |
9 | struct pt_regs; | 9 | struct pt_regs; |
10 | 10 | ||
11 | extern void get_cpu_vendor(struct cpuinfo_x86*); | ||
12 | extern void start_kernel(void); | 11 | extern void start_kernel(void); |
13 | extern void pda_init(int); | 12 | extern void pda_init(int); |
14 | 13 | ||
@@ -75,9 +74,6 @@ extern void acpi_reserve_bootmem(void); | |||
75 | 74 | ||
76 | extern void swap_low_mappings(void); | 75 | extern void swap_low_mappings(void); |
77 | 76 | ||
78 | extern void oops_begin(void); | ||
79 | extern void die(const char *,struct pt_regs *,long); | ||
80 | extern void __die(const char * str, struct pt_regs * regs, long err); | ||
81 | extern void __show_regs(struct pt_regs * regs); | 77 | extern void __show_regs(struct pt_regs * regs); |
82 | extern void show_regs(struct pt_regs * regs); | 78 | extern void show_regs(struct pt_regs * regs); |
83 | 79 | ||
@@ -94,8 +90,6 @@ extern int unhandled_signal(struct task_struct *tsk, int sig); | |||
94 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | 90 | extern void select_idle_routine(const struct cpuinfo_x86 *c); |
95 | extern void swiotlb_init(void); | 91 | extern void swiotlb_init(void); |
96 | 92 | ||
97 | extern unsigned long max_mapnr; | ||
98 | extern unsigned long end_pfn; | ||
99 | extern unsigned long table_start, table_end; | 93 | extern unsigned long table_start, table_end; |
100 | 94 | ||
101 | extern int exception_trace; | 95 | extern int exception_trace; |
diff --git a/include/asm-x86_64/signal.h b/include/asm-x86_64/signal.h index fe9b96d9481..f8d55798535 100644 --- a/include/asm-x86_64/signal.h +++ b/include/asm-x86_64/signal.h | |||
@@ -143,23 +143,23 @@ typedef struct sigaltstack { | |||
143 | #undef __HAVE_ARCH_SIG_BITOPS | 143 | #undef __HAVE_ARCH_SIG_BITOPS |
144 | #if 0 | 144 | #if 0 |
145 | 145 | ||
146 | extern __inline__ void sigaddset(sigset_t *set, int _sig) | 146 | static inline void sigaddset(sigset_t *set, int _sig) |
147 | { | 147 | { |
148 | __asm__("btsq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); | 148 | __asm__("btsq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); |
149 | } | 149 | } |
150 | 150 | ||
151 | extern __inline__ void sigdelset(sigset_t *set, int _sig) | 151 | static inline void sigdelset(sigset_t *set, int _sig) |
152 | { | 152 | { |
153 | __asm__("btrq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); | 153 | __asm__("btrq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); |
154 | } | 154 | } |
155 | 155 | ||
156 | extern __inline__ int __const_sigismember(sigset_t *set, int _sig) | 156 | static inline int __const_sigismember(sigset_t *set, int _sig) |
157 | { | 157 | { |
158 | unsigned long sig = _sig - 1; | 158 | unsigned long sig = _sig - 1; |
159 | return 1 & (set->sig[sig / _NSIG_BPW] >> (sig & ~(_NSIG_BPW-1))); | 159 | return 1 & (set->sig[sig / _NSIG_BPW] >> (sig & ~(_NSIG_BPW-1))); |
160 | } | 160 | } |
161 | 161 | ||
162 | extern __inline__ int __gen_sigismember(sigset_t *set, int _sig) | 162 | static inline int __gen_sigismember(sigset_t *set, int _sig) |
163 | { | 163 | { |
164 | int ret; | 164 | int ret; |
165 | __asm__("btq %2,%1\n\tsbbq %0,%0" | 165 | __asm__("btq %2,%1\n\tsbbq %0,%0" |
@@ -172,7 +172,7 @@ extern __inline__ int __gen_sigismember(sigset_t *set, int _sig) | |||
172 | __const_sigismember((set),(sig)) : \ | 172 | __const_sigismember((set),(sig)) : \ |
173 | __gen_sigismember((set),(sig))) | 173 | __gen_sigismember((set),(sig))) |
174 | 174 | ||
175 | extern __inline__ int sigfindinword(unsigned long word) | 175 | static inline int sigfindinword(unsigned long word) |
176 | { | 176 | { |
177 | __asm__("bsfq %1,%0" : "=r"(word) : "rm"(word) : "cc"); | 177 | __asm__("bsfq %1,%0" : "=r"(word) : "rm"(word) : "cc"); |
178 | return word; | 178 | return word; |
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index de8b57b2b62..c57ce407134 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h | |||
@@ -72,7 +72,7 @@ static inline int num_booting_cpus(void) | |||
72 | 72 | ||
73 | #define raw_smp_processor_id() read_pda(cpunumber) | 73 | #define raw_smp_processor_id() read_pda(cpunumber) |
74 | 74 | ||
75 | extern __inline int hard_smp_processor_id(void) | 75 | static inline int hard_smp_processor_id(void) |
76 | { | 76 | { |
77 | /* we don't want to mark this access volatile - bad code generation */ | 77 | /* we don't want to mark this access volatile - bad code generation */ |
78 | return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID)); | 78 | return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID)); |
@@ -81,6 +81,7 @@ extern __inline int hard_smp_processor_id(void) | |||
81 | extern int safe_smp_processor_id(void); | 81 | extern int safe_smp_processor_id(void); |
82 | extern int __cpu_disable(void); | 82 | extern int __cpu_disable(void); |
83 | extern void __cpu_die(unsigned int cpu); | 83 | extern void __cpu_die(unsigned int cpu); |
84 | extern void prefill_possible_map(void); | ||
84 | 85 | ||
85 | #endif /* !ASSEMBLY */ | 86 | #endif /* !ASSEMBLY */ |
86 | 87 | ||
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h index 5aeb57a3baa..69636831ad2 100644 --- a/include/asm-x86_64/spinlock.h +++ b/include/asm-x86_64/spinlock.h | |||
@@ -6,47 +6,21 @@ | |||
6 | #include <asm/page.h> | 6 | #include <asm/page.h> |
7 | #include <linux/config.h> | 7 | #include <linux/config.h> |
8 | 8 | ||
9 | extern int printk(const char * fmt, ...) | ||
10 | __attribute__ ((format (printf, 1, 2))); | ||
11 | |||
12 | /* | 9 | /* |
13 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 10 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
14 | */ | 11 | * |
15 | |||
16 | typedef struct { | ||
17 | volatile unsigned int lock; | ||
18 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
19 | unsigned magic; | ||
20 | #endif | ||
21 | #ifdef CONFIG_PREEMPT | ||
22 | unsigned int break_lock; | ||
23 | #endif | ||
24 | } spinlock_t; | ||
25 | |||
26 | #define SPINLOCK_MAGIC 0xdead4ead | ||
27 | |||
28 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
29 | #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC | ||
30 | #else | ||
31 | #define SPINLOCK_MAGIC_INIT /* */ | ||
32 | #endif | ||
33 | |||
34 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT } | ||
35 | |||
36 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
37 | |||
38 | /* | ||
39 | * Simple spin lock operations. There are two variants, one clears IRQ's | 12 | * Simple spin lock operations. There are two variants, one clears IRQ's |
40 | * on the local processor, one does not. | 13 | * on the local processor, one does not. |
41 | * | 14 | * |
42 | * We make no fairness assumptions. They have a cost. | 15 | * We make no fairness assumptions. They have a cost. |
16 | * | ||
17 | * (the type definitions are in asm/spinlock_types.h) | ||
43 | */ | 18 | */ |
44 | 19 | ||
45 | #define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0) | 20 | #define __raw_spin_is_locked(x) \ |
46 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | 21 | (*(volatile signed char *)(&(x)->slock) <= 0) |
47 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
48 | 22 | ||
49 | #define spin_lock_string \ | 23 | #define __raw_spin_lock_string \ |
50 | "\n1:\t" \ | 24 | "\n1:\t" \ |
51 | "lock ; decb %0\n\t" \ | 25 | "lock ; decb %0\n\t" \ |
52 | "js 2f\n" \ | 26 | "js 2f\n" \ |
@@ -58,74 +32,40 @@ typedef struct { | |||
58 | "jmp 1b\n" \ | 32 | "jmp 1b\n" \ |
59 | LOCK_SECTION_END | 33 | LOCK_SECTION_END |
60 | 34 | ||
61 | /* | 35 | #define __raw_spin_unlock_string \ |
62 | * This works. Despite all the confusion. | ||
63 | * (except on PPro SMP or if we are using OOSTORE) | ||
64 | * (PPro errata 66, 92) | ||
65 | */ | ||
66 | |||
67 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) | ||
68 | |||
69 | #define spin_unlock_string \ | ||
70 | "movb $1,%0" \ | 36 | "movb $1,%0" \ |
71 | :"=m" (lock->lock) : : "memory" | 37 | :"=m" (lock->slock) : : "memory" |
72 | |||
73 | |||
74 | static inline void _raw_spin_unlock(spinlock_t *lock) | ||
75 | { | ||
76 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
77 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
78 | assert_spin_locked(lock); | ||
79 | #endif | ||
80 | __asm__ __volatile__( | ||
81 | spin_unlock_string | ||
82 | ); | ||
83 | } | ||
84 | |||
85 | #else | ||
86 | |||
87 | #define spin_unlock_string \ | ||
88 | "xchgb %b0, %1" \ | ||
89 | :"=q" (oldval), "=m" (lock->lock) \ | ||
90 | :"0" (oldval) : "memory" | ||
91 | 38 | ||
92 | static inline void _raw_spin_unlock(spinlock_t *lock) | 39 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
93 | { | 40 | { |
94 | char oldval = 1; | ||
95 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
96 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
97 | assert_spin_locked(lock); | ||
98 | #endif | ||
99 | __asm__ __volatile__( | 41 | __asm__ __volatile__( |
100 | spin_unlock_string | 42 | __raw_spin_lock_string |
101 | ); | 43 | :"=m" (lock->slock) : : "memory"); |
102 | } | 44 | } |
103 | 45 | ||
104 | #endif | 46 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
105 | 47 | ||
106 | static inline int _raw_spin_trylock(spinlock_t *lock) | 48 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
107 | { | 49 | { |
108 | char oldval; | 50 | char oldval; |
51 | |||
109 | __asm__ __volatile__( | 52 | __asm__ __volatile__( |
110 | "xchgb %b0,%1" | 53 | "xchgb %b0,%1" |
111 | :"=q" (oldval), "=m" (lock->lock) | 54 | :"=q" (oldval), "=m" (lock->slock) |
112 | :"0" (0) : "memory"); | 55 | :"0" (0) : "memory"); |
56 | |||
113 | return oldval > 0; | 57 | return oldval > 0; |
114 | } | 58 | } |
115 | 59 | ||
116 | static inline void _raw_spin_lock(spinlock_t *lock) | 60 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
117 | { | 61 | { |
118 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
119 | if (lock->magic != SPINLOCK_MAGIC) { | ||
120 | printk("eip: %p\n", __builtin_return_address(0)); | ||
121 | BUG(); | ||
122 | } | ||
123 | #endif | ||
124 | __asm__ __volatile__( | 62 | __asm__ __volatile__( |
125 | spin_lock_string | 63 | __raw_spin_unlock_string |
126 | :"=m" (lock->lock) : : "memory"); | 64 | ); |
127 | } | 65 | } |
128 | 66 | ||
67 | #define __raw_spin_unlock_wait(lock) \ | ||
68 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | ||
129 | 69 | ||
130 | /* | 70 | /* |
131 | * Read-write spinlocks, allowing multiple readers | 71 | * Read-write spinlocks, allowing multiple readers |
@@ -136,33 +76,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
136 | * can "mix" irq-safe locks - any writer needs to get a | 76 | * can "mix" irq-safe locks - any writer needs to get a |
137 | * irq-safe write-lock, but readers can get non-irqsafe | 77 | * irq-safe write-lock, but readers can get non-irqsafe |
138 | * read-locks. | 78 | * read-locks. |
139 | */ | 79 | * |
140 | typedef struct { | ||
141 | volatile unsigned int lock; | ||
142 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
143 | unsigned magic; | ||
144 | #endif | ||
145 | #ifdef CONFIG_PREEMPT | ||
146 | unsigned int break_lock; | ||
147 | #endif | ||
148 | } rwlock_t; | ||
149 | |||
150 | #define RWLOCK_MAGIC 0xdeaf1eed | ||
151 | |||
152 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
153 | #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC | ||
154 | #else | ||
155 | #define RWLOCK_MAGIC_INIT /* */ | ||
156 | #endif | ||
157 | |||
158 | #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT } | ||
159 | |||
160 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
161 | |||
162 | #define read_can_lock(x) ((int)(x)->lock > 0) | ||
163 | #define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | ||
164 | |||
165 | /* | ||
166 | * On x86, we implement read-write locks as a 32-bit counter | 80 | * On x86, we implement read-write locks as a 32-bit counter |
167 | * with the high bit (sign) being the "contended" bit. | 81 | * with the high bit (sign) being the "contended" bit. |
168 | * | 82 | * |
@@ -170,29 +84,24 @@ typedef struct { | |||
170 | * | 84 | * |
171 | * Changed to use the same technique as rw semaphores. See | 85 | * Changed to use the same technique as rw semaphores. See |
172 | * semaphore.h for details. -ben | 86 | * semaphore.h for details. -ben |
87 | * | ||
88 | * the helpers are in arch/i386/kernel/semaphore.c | ||
173 | */ | 89 | */ |
174 | /* the spinlock helpers are in arch/i386/kernel/semaphore.c */ | ||
175 | 90 | ||
176 | static inline void _raw_read_lock(rwlock_t *rw) | 91 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) |
92 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | ||
93 | |||
94 | static inline void __raw_read_lock(raw_rwlock_t *rw) | ||
177 | { | 95 | { |
178 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
179 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
180 | #endif | ||
181 | __build_read_lock(rw, "__read_lock_failed"); | 96 | __build_read_lock(rw, "__read_lock_failed"); |
182 | } | 97 | } |
183 | 98 | ||
184 | static inline void _raw_write_lock(rwlock_t *rw) | 99 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
185 | { | 100 | { |
186 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
187 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
188 | #endif | ||
189 | __build_write_lock(rw, "__write_lock_failed"); | 101 | __build_write_lock(rw, "__write_lock_failed"); |
190 | } | 102 | } |
191 | 103 | ||
192 | #define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory") | 104 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |
193 | #define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory") | ||
194 | |||
195 | static inline int _raw_read_trylock(rwlock_t *lock) | ||
196 | { | 105 | { |
197 | atomic_t *count = (atomic_t *)lock; | 106 | atomic_t *count = (atomic_t *)lock; |
198 | atomic_dec(count); | 107 | atomic_dec(count); |
@@ -202,7 +111,7 @@ static inline int _raw_read_trylock(rwlock_t *lock) | |||
202 | return 0; | 111 | return 0; |
203 | } | 112 | } |
204 | 113 | ||
205 | static inline int _raw_write_trylock(rwlock_t *lock) | 114 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
206 | { | 115 | { |
207 | atomic_t *count = (atomic_t *)lock; | 116 | atomic_t *count = (atomic_t *)lock; |
208 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 117 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
@@ -211,4 +120,15 @@ static inline int _raw_write_trylock(rwlock_t *lock) | |||
211 | return 0; | 120 | return 0; |
212 | } | 121 | } |
213 | 122 | ||
123 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
124 | { | ||
125 | asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); | ||
126 | } | ||
127 | |||
128 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
129 | { | ||
130 | asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0" | ||
131 | : "=m" (rw->lock) : : "memory"); | ||
132 | } | ||
133 | |||
214 | #endif /* __ASM_SPINLOCK_H */ | 134 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/include/asm-x86_64/spinlock_types.h b/include/asm-x86_64/spinlock_types.h new file mode 100644 index 00000000000..59efe849f35 --- /dev/null +++ b/include/asm-x86_64/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int slock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h index 36293061f4e..7cbfd10ecc3 100644 --- a/include/asm-x86_64/swiotlb.h +++ b/include/asm-x86_64/swiotlb.h | |||
@@ -27,7 +27,7 @@ extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, | |||
27 | int nents, int direction); | 27 | int nents, int direction); |
28 | extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); | 28 | extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); |
29 | extern void *swiotlb_alloc_coherent (struct device *hwdev, size_t size, | 29 | extern void *swiotlb_alloc_coherent (struct device *hwdev, size_t size, |
30 | dma_addr_t *dma_handle, int flags); | 30 | dma_addr_t *dma_handle, gfp_t flags); |
31 | extern void swiotlb_free_coherent (struct device *hwdev, size_t size, | 31 | extern void swiotlb_free_coherent (struct device *hwdev, size_t size, |
32 | void *vaddr, dma_addr_t dma_handle); | 32 | void *vaddr, dma_addr_t dma_handle); |
33 | 33 | ||
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index 8606e170a7d..85348e02ad2 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h | |||
@@ -188,7 +188,7 @@ static inline void write_cr4(unsigned long val) | |||
188 | 188 | ||
189 | #define __xg(x) ((volatile long *)(x)) | 189 | #define __xg(x) ((volatile long *)(x)) |
190 | 190 | ||
191 | extern inline void set_64bit(volatile unsigned long *ptr, unsigned long val) | 191 | static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) |
192 | { | 192 | { |
193 | *ptr = val; | 193 | *ptr = val; |
194 | } | 194 | } |
@@ -253,19 +253,19 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
253 | case 2: | 253 | case 2: |
254 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" | 254 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" |
255 | : "=a"(prev) | 255 | : "=a"(prev) |
256 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | 256 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
257 | : "memory"); | 257 | : "memory"); |
258 | return prev; | 258 | return prev; |
259 | case 4: | 259 | case 4: |
260 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" | 260 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" |
261 | : "=a"(prev) | 261 | : "=a"(prev) |
262 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | 262 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
263 | : "memory"); | 263 | : "memory"); |
264 | return prev; | 264 | return prev; |
265 | case 8: | 265 | case 8: |
266 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" | 266 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" |
267 | : "=a"(prev) | 267 | : "=a"(prev) |
268 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | 268 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
269 | : "memory"); | 269 | : "memory"); |
270 | return prev; | 270 | return prev; |
271 | } | 271 | } |
diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h index 24ecf6a637c..f971f45d6d7 100644 --- a/include/asm-x86_64/timex.h +++ b/include/asm-x86_64/timex.h | |||
@@ -6,7 +6,6 @@ | |||
6 | #ifndef _ASMx8664_TIMEX_H | 6 | #ifndef _ASMx8664_TIMEX_H |
7 | #define _ASMx8664_TIMEX_H | 7 | #define _ASMx8664_TIMEX_H |
8 | 8 | ||
9 | #include <linux/config.h> | ||
10 | #include <asm/8253pit.h> | 9 | #include <asm/8253pit.h> |
11 | #include <asm/msr.h> | 10 | #include <asm/msr.h> |
12 | #include <asm/vsyscall.h> | 11 | #include <asm/vsyscall.h> |
diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h index 505b0cf906d..4a9c20ea9b1 100644 --- a/include/asm-x86_64/tlbflush.h +++ b/include/asm-x86_64/tlbflush.h | |||
@@ -109,6 +109,10 @@ static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long st | |||
109 | #define TLBSTATE_OK 1 | 109 | #define TLBSTATE_OK 1 |
110 | #define TLBSTATE_LAZY 2 | 110 | #define TLBSTATE_LAZY 2 |
111 | 111 | ||
112 | /* Roughly an IPI every 20MB with 4k pages for freeing page table | ||
113 | ranges. Cost is about 42k of memory for each CPU. */ | ||
114 | #define ARCH_FREE_PTE_NR 5350 | ||
115 | |||
112 | #endif | 116 | #endif |
113 | 117 | ||
114 | #define flush_tlb_kernel_range(start, end) flush_tlb_all() | 118 | #define flush_tlb_kernel_range(start, end) flush_tlb_all() |
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index c1bc3fad482..1c603cd7e4d 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h | |||
@@ -13,7 +13,6 @@ | |||
13 | extern cpumask_t cpu_online_map; | 13 | extern cpumask_t cpu_online_map; |
14 | 14 | ||
15 | extern unsigned char cpu_to_node[]; | 15 | extern unsigned char cpu_to_node[]; |
16 | extern unsigned char pci_bus_to_node[]; | ||
17 | extern cpumask_t node_to_cpumask[]; | 16 | extern cpumask_t node_to_cpumask[]; |
18 | 17 | ||
19 | #ifdef CONFIG_ACPI_NUMA | 18 | #ifdef CONFIG_ACPI_NUMA |
@@ -26,7 +25,7 @@ extern int __node_distance(int, int); | |||
26 | #define parent_node(node) (node) | 25 | #define parent_node(node) (node) |
27 | #define node_to_first_cpu(node) (__ffs(node_to_cpumask[node])) | 26 | #define node_to_first_cpu(node) (__ffs(node_to_cpumask[node])) |
28 | #define node_to_cpumask(node) (node_to_cpumask[node]) | 27 | #define node_to_cpumask(node) (node_to_cpumask[node]) |
29 | #define pcibus_to_node(bus) pci_bus_to_node[(bus)->number] | 28 | #define pcibus_to_node(bus) ((long)(bus->sysdata)) |
30 | #define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)); | 29 | #define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)); |
31 | 30 | ||
32 | /* sched_domains SD_NODE_INIT for x86_64 machines */ | 31 | /* sched_domains SD_NODE_INIT for x86_64 machines */ |
diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h index 2872da23fc7..438a3f52f83 100644 --- a/include/asm-x86_64/vsyscall.h +++ b/include/asm-x86_64/vsyscall.h | |||
@@ -29,7 +29,6 @@ enum vsyscall_num { | |||
29 | 29 | ||
30 | struct vxtime_data { | 30 | struct vxtime_data { |
31 | long hpet_address; /* HPET base address */ | 31 | long hpet_address; /* HPET base address */ |
32 | unsigned long hz; /* HPET clocks / sec */ | ||
33 | int last; | 32 | int last; |
34 | unsigned long last_tsc; | 33 | unsigned long last_tsc; |
35 | long quot; | 34 | long quot; |