diff options
Diffstat (limited to 'include/asm-i386')
29 files changed, 406 insertions, 352 deletions
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h index 20f523954218..6016632d032f 100644 --- a/include/asm-i386/acpi.h +++ b/include/asm-i386/acpi.h | |||
@@ -131,21 +131,7 @@ static inline void disable_acpi(void) | |||
131 | extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); | 131 | extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); |
132 | 132 | ||
133 | #ifdef CONFIG_X86_IO_APIC | 133 | #ifdef CONFIG_X86_IO_APIC |
134 | extern int skip_ioapic_setup; | ||
135 | extern int acpi_skip_timer_override; | 134 | extern int acpi_skip_timer_override; |
136 | |||
137 | static inline void disable_ioapic_setup(void) | ||
138 | { | ||
139 | skip_ioapic_setup = 1; | ||
140 | } | ||
141 | |||
142 | static inline int ioapic_setup_disabled(void) | ||
143 | { | ||
144 | return skip_ioapic_setup; | ||
145 | } | ||
146 | |||
147 | #else | ||
148 | static inline void disable_ioapic_setup(void) { } | ||
149 | #endif | 135 | #endif |
150 | 136 | ||
151 | static inline void acpi_noirq_set(void) { acpi_noirq = 1; } | 137 | static inline void acpi_noirq_set(void) { acpi_noirq = 1; } |
diff --git a/include/asm-i386/alternative-asm.i b/include/asm-i386/alternative-asm.i new file mode 100644 index 000000000000..6c47e3b9484b --- /dev/null +++ b/include/asm-i386/alternative-asm.i | |||
@@ -0,0 +1,14 @@ | |||
1 | #include <linux/config.h> | ||
2 | |||
3 | #ifdef CONFIG_SMP | ||
4 | .macro LOCK_PREFIX | ||
5 | 1: lock | ||
6 | .section .smp_locks,"a" | ||
7 | .align 4 | ||
8 | .long 1b | ||
9 | .previous | ||
10 | .endm | ||
11 | #else | ||
12 | .macro LOCK_PREFIX | ||
13 | .endm | ||
14 | #endif | ||
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h index 2c1e371cebb6..3a42b7d6fc92 100644 --- a/include/asm-i386/apic.h +++ b/include/asm-i386/apic.h | |||
@@ -16,20 +16,8 @@ | |||
16 | #define APIC_VERBOSE 1 | 16 | #define APIC_VERBOSE 1 |
17 | #define APIC_DEBUG 2 | 17 | #define APIC_DEBUG 2 |
18 | 18 | ||
19 | extern int enable_local_apic; | ||
20 | extern int apic_verbosity; | 19 | extern int apic_verbosity; |
21 | 20 | ||
22 | static inline void lapic_disable(void) | ||
23 | { | ||
24 | enable_local_apic = -1; | ||
25 | clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); | ||
26 | } | ||
27 | |||
28 | static inline void lapic_enable(void) | ||
29 | { | ||
30 | enable_local_apic = 1; | ||
31 | } | ||
32 | |||
33 | /* | 21 | /* |
34 | * Define the default level of output to be very little | 22 | * Define the default level of output to be very little |
35 | * This can be turned up by using apic=verbose for more | 23 | * This can be turned up by using apic=verbose for more |
@@ -42,6 +30,8 @@ static inline void lapic_enable(void) | |||
42 | } while (0) | 30 | } while (0) |
43 | 31 | ||
44 | 32 | ||
33 | extern void generic_apic_probe(void); | ||
34 | |||
45 | #ifdef CONFIG_X86_LOCAL_APIC | 35 | #ifdef CONFIG_X86_LOCAL_APIC |
46 | 36 | ||
47 | /* | 37 | /* |
@@ -117,8 +107,6 @@ extern void enable_APIC_timer(void); | |||
117 | 107 | ||
118 | extern void enable_NMI_through_LVT0 (void * dummy); | 108 | extern void enable_NMI_through_LVT0 (void * dummy); |
119 | 109 | ||
120 | extern int disable_timer_pin_1; | ||
121 | |||
122 | void smp_send_timer_broadcast_ipi(struct pt_regs *regs); | 110 | void smp_send_timer_broadcast_ipi(struct pt_regs *regs); |
123 | void switch_APIC_timer_to_ipi(void *cpumask); | 111 | void switch_APIC_timer_to_ipi(void *cpumask); |
124 | void switch_ipi_to_APIC_timer(void *cpumask); | 112 | void switch_ipi_to_APIC_timer(void *cpumask); |
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h index 89b8b82c82b3..5874ef119ffd 100644 --- a/include/asm-i386/desc.h +++ b/include/asm-i386/desc.h | |||
@@ -33,50 +33,99 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) | |||
33 | return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address; | 33 | return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address; |
34 | } | 34 | } |
35 | 35 | ||
36 | /* | ||
37 | * This is the ldt that every process will get unless we need | ||
38 | * something other than this. | ||
39 | */ | ||
40 | extern struct desc_struct default_ldt[]; | ||
41 | extern struct desc_struct idt_table[]; | ||
42 | extern void set_intr_gate(unsigned int irq, void * addr); | ||
43 | |||
44 | static inline void pack_descriptor(__u32 *a, __u32 *b, | ||
45 | unsigned long base, unsigned long limit, unsigned char type, unsigned char flags) | ||
46 | { | ||
47 | *a = ((base & 0xffff) << 16) | (limit & 0xffff); | ||
48 | *b = (base & 0xff000000) | ((base & 0xff0000) >> 16) | | ||
49 | (limit & 0x000f0000) | ((type & 0xff) << 8) | ((flags & 0xf) << 20); | ||
50 | } | ||
51 | |||
52 | static inline void pack_gate(__u32 *a, __u32 *b, | ||
53 | unsigned long base, unsigned short seg, unsigned char type, unsigned char flags) | ||
54 | { | ||
55 | *a = (seg << 16) | (base & 0xffff); | ||
56 | *b = (base & 0xffff0000) | ((type & 0xff) << 8) | (flags & 0xff); | ||
57 | } | ||
58 | |||
59 | #define DESCTYPE_LDT 0x82 /* present, system, DPL-0, LDT */ | ||
60 | #define DESCTYPE_TSS 0x89 /* present, system, DPL-0, 32-bit TSS */ | ||
61 | #define DESCTYPE_TASK 0x85 /* present, system, DPL-0, task gate */ | ||
62 | #define DESCTYPE_INT 0x8e /* present, system, DPL-0, interrupt gate */ | ||
63 | #define DESCTYPE_TRAP 0x8f /* present, system, DPL-0, trap gate */ | ||
64 | #define DESCTYPE_DPL3 0x60 /* DPL-3 */ | ||
65 | #define DESCTYPE_S 0x10 /* !system */ | ||
66 | |||
36 | #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) | 67 | #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) |
37 | #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)) | 68 | #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)) |
38 | 69 | ||
39 | #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) | 70 | #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) |
40 | #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr)) | 71 | #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr)) |
41 | #define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr)) | 72 | #define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr)) |
42 | #define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt)) | 73 | #define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt)) |
43 | 74 | ||
44 | #define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr)) | 75 | #define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr)) |
45 | #define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr)) | 76 | #define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr)) |
46 | #define store_tr(tr) __asm__ ("str %0":"=mr" (tr)) | 77 | #define store_tr(tr) __asm__ ("str %0":"=m" (tr)) |
47 | #define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt)) | 78 | #define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt)) |
48 | 79 | ||
49 | /* | 80 | #if TLS_SIZE != 24 |
50 | * This is the ldt that every process will get unless we need | 81 | # error update this code. |
51 | * something other than this. | 82 | #endif |
52 | */ | ||
53 | extern struct desc_struct default_ldt[]; | ||
54 | extern void set_intr_gate(unsigned int irq, void * addr); | ||
55 | 83 | ||
56 | #define _set_tssldt_desc(n,addr,limit,type) \ | 84 | static inline void load_TLS(struct thread_struct *t, unsigned int cpu) |
57 | __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \ | ||
58 | "movw %w1,2(%2)\n\t" \ | ||
59 | "rorl $16,%1\n\t" \ | ||
60 | "movb %b1,4(%2)\n\t" \ | ||
61 | "movb %4,5(%2)\n\t" \ | ||
62 | "movb $0,6(%2)\n\t" \ | ||
63 | "movb %h1,7(%2)\n\t" \ | ||
64 | "rorl $16,%1" \ | ||
65 | : "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type)) | ||
66 | |||
67 | static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr) | ||
68 | { | 85 | { |
69 | _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr, | 86 | #define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i] |
70 | offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89); | 87 | C(0); C(1); C(2); |
88 | #undef C | ||
71 | } | 89 | } |
72 | 90 | ||
73 | #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) | 91 | static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b) |
92 | { | ||
93 | __u32 *lp = (__u32 *)((char *)dt + entry*8); | ||
94 | *lp = entry_a; | ||
95 | *(lp+1) = entry_b; | ||
96 | } | ||
97 | |||
98 | #define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | ||
99 | #define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | ||
100 | #define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | ||
101 | |||
102 | static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg) | ||
103 | { | ||
104 | __u32 a, b; | ||
105 | pack_gate(&a, &b, (unsigned long)addr, seg, type, 0); | ||
106 | write_idt_entry(idt_table, gate, a, b); | ||
107 | } | ||
74 | 108 | ||
75 | static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size) | 109 | static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr) |
76 | { | 110 | { |
77 | _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82); | 111 | __u32 a, b; |
112 | pack_descriptor(&a, &b, (unsigned long)addr, | ||
113 | offsetof(struct tss_struct, __cacheline_filler) - 1, | ||
114 | DESCTYPE_TSS, 0); | ||
115 | write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b); | ||
78 | } | 116 | } |
79 | 117 | ||
118 | static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entries) | ||
119 | { | ||
120 | __u32 a, b; | ||
121 | pack_descriptor(&a, &b, (unsigned long)addr, | ||
122 | entries * sizeof(struct desc_struct) - 1, | ||
123 | DESCTYPE_LDT, 0); | ||
124 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b); | ||
125 | } | ||
126 | |||
127 | #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) | ||
128 | |||
80 | #define LDT_entry_a(info) \ | 129 | #define LDT_entry_a(info) \ |
81 | ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) | 130 | ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) |
82 | 131 | ||
@@ -102,24 +151,6 @@ static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size) | |||
102 | (info)->seg_not_present == 1 && \ | 151 | (info)->seg_not_present == 1 && \ |
103 | (info)->useable == 0 ) | 152 | (info)->useable == 0 ) |
104 | 153 | ||
105 | static inline void write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b) | ||
106 | { | ||
107 | __u32 *lp = (__u32 *)((char *)ldt + entry*8); | ||
108 | *lp = entry_a; | ||
109 | *(lp+1) = entry_b; | ||
110 | } | ||
111 | |||
112 | #if TLS_SIZE != 24 | ||
113 | # error update this code. | ||
114 | #endif | ||
115 | |||
116 | static inline void load_TLS(struct thread_struct *t, unsigned int cpu) | ||
117 | { | ||
118 | #define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i] | ||
119 | C(0); C(1); C(2); | ||
120 | #undef C | ||
121 | } | ||
122 | |||
123 | static inline void clear_LDT(void) | 154 | static inline void clear_LDT(void) |
124 | { | 155 | { |
125 | int cpu = get_cpu(); | 156 | int cpu = get_cpu(); |
diff --git a/include/asm-i386/dwarf2.h b/include/asm-i386/dwarf2.h index 2280f6272f80..6d66398a307d 100644 --- a/include/asm-i386/dwarf2.h +++ b/include/asm-i386/dwarf2.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _DWARF2_H | 1 | #ifndef _DWARF2_H |
2 | #define _DWARF2_H | 2 | #define _DWARF2_H |
3 | 3 | ||
4 | #include <linux/config.h> | ||
5 | |||
6 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
7 | #warning "asm/dwarf2.h should be only included in pure assembly files" | 5 | #warning "asm/dwarf2.h should be only included in pure assembly files" |
8 | #endif | 6 | #endif |
@@ -28,6 +26,13 @@ | |||
28 | #define CFI_RESTORE .cfi_restore | 26 | #define CFI_RESTORE .cfi_restore |
29 | #define CFI_REMEMBER_STATE .cfi_remember_state | 27 | #define CFI_REMEMBER_STATE .cfi_remember_state |
30 | #define CFI_RESTORE_STATE .cfi_restore_state | 28 | #define CFI_RESTORE_STATE .cfi_restore_state |
29 | #define CFI_UNDEFINED .cfi_undefined | ||
30 | |||
31 | #ifdef CONFIG_AS_CFI_SIGNAL_FRAME | ||
32 | #define CFI_SIGNAL_FRAME .cfi_signal_frame | ||
33 | #else | ||
34 | #define CFI_SIGNAL_FRAME | ||
35 | #endif | ||
31 | 36 | ||
32 | #else | 37 | #else |
33 | 38 | ||
@@ -48,6 +53,8 @@ | |||
48 | #define CFI_RESTORE ignore | 53 | #define CFI_RESTORE ignore |
49 | #define CFI_REMEMBER_STATE ignore | 54 | #define CFI_REMEMBER_STATE ignore |
50 | #define CFI_RESTORE_STATE ignore | 55 | #define CFI_RESTORE_STATE ignore |
56 | #define CFI_UNDEFINED ignore | ||
57 | #define CFI_SIGNAL_FRAME ignore | ||
51 | 58 | ||
52 | #endif | 59 | #endif |
53 | 60 | ||
diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h index ca82acb8cb1f..f7514fb6e8e4 100644 --- a/include/asm-i386/e820.h +++ b/include/asm-i386/e820.h | |||
@@ -18,7 +18,7 @@ | |||
18 | 18 | ||
19 | #define E820_RAM 1 | 19 | #define E820_RAM 1 |
20 | #define E820_RESERVED 2 | 20 | #define E820_RESERVED 2 |
21 | #define E820_ACPI 3 /* usable as RAM once ACPI tables have been read */ | 21 | #define E820_ACPI 3 |
22 | #define E820_NVS 4 | 22 | #define E820_NVS 4 |
23 | 23 | ||
24 | #define HIGH_MEMORY (1024*1024) | 24 | #define HIGH_MEMORY (1024*1024) |
diff --git a/include/asm-i386/frame.i b/include/asm-i386/frame.i new file mode 100644 index 000000000000..4d68ddce18b6 --- /dev/null +++ b/include/asm-i386/frame.i | |||
@@ -0,0 +1,24 @@ | |||
1 | #include <linux/config.h> | ||
2 | #include <asm/dwarf2.h> | ||
3 | |||
4 | /* The annotation hides the frame from the unwinder and makes it look | ||
5 | like a ordinary ebp save/restore. This avoids some special cases for | ||
6 | frame pointer later */ | ||
7 | #ifdef CONFIG_FRAME_POINTER | ||
8 | .macro FRAME | ||
9 | pushl %ebp | ||
10 | CFI_ADJUST_CFA_OFFSET 4 | ||
11 | CFI_REL_OFFSET ebp,0 | ||
12 | movl %esp,%ebp | ||
13 | .endm | ||
14 | .macro ENDFRAME | ||
15 | popl %ebp | ||
16 | CFI_ADJUST_CFA_OFFSET -4 | ||
17 | CFI_RESTORE ebp | ||
18 | .endm | ||
19 | #else | ||
20 | .macro FRAME | ||
21 | .endm | ||
22 | .macro ENDFRAME | ||
23 | .endm | ||
24 | #endif | ||
diff --git a/include/asm-i386/genapic.h b/include/asm-i386/genapic.h index b3783a32abee..8ffbb0f07457 100644 --- a/include/asm-i386/genapic.h +++ b/include/asm-i386/genapic.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _ASM_GENAPIC_H | 1 | #ifndef _ASM_GENAPIC_H |
2 | #define _ASM_GENAPIC_H 1 | 2 | #define _ASM_GENAPIC_H 1 |
3 | 3 | ||
4 | #include <asm/mpspec.h> | ||
5 | |||
4 | /* | 6 | /* |
5 | * Generic APIC driver interface. | 7 | * Generic APIC driver interface. |
6 | * | 8 | * |
@@ -63,14 +65,25 @@ struct genapic { | |||
63 | unsigned (*get_apic_id)(unsigned long x); | 65 | unsigned (*get_apic_id)(unsigned long x); |
64 | unsigned long apic_id_mask; | 66 | unsigned long apic_id_mask; |
65 | unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); | 67 | unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); |
66 | 68 | ||
69 | #ifdef CONFIG_SMP | ||
67 | /* ipi */ | 70 | /* ipi */ |
68 | void (*send_IPI_mask)(cpumask_t mask, int vector); | 71 | void (*send_IPI_mask)(cpumask_t mask, int vector); |
69 | void (*send_IPI_allbutself)(int vector); | 72 | void (*send_IPI_allbutself)(int vector); |
70 | void (*send_IPI_all)(int vector); | 73 | void (*send_IPI_all)(int vector); |
74 | #endif | ||
71 | }; | 75 | }; |
72 | 76 | ||
73 | #define APICFUNC(x) .x = x | 77 | #define APICFUNC(x) .x = x, |
78 | |||
79 | /* More functions could be probably marked IPIFUNC and save some space | ||
80 | in UP GENERICARCH kernels, but I don't have the nerve right now | ||
81 | to untangle this mess. -AK */ | ||
82 | #ifdef CONFIG_SMP | ||
83 | #define IPIFUNC(x) APICFUNC(x) | ||
84 | #else | ||
85 | #define IPIFUNC(x) | ||
86 | #endif | ||
74 | 87 | ||
75 | #define APIC_INIT(aname, aprobe) { \ | 88 | #define APIC_INIT(aname, aprobe) { \ |
76 | .name = aname, \ | 89 | .name = aname, \ |
@@ -80,33 +93,33 @@ struct genapic { | |||
80 | .no_balance_irq = NO_BALANCE_IRQ, \ | 93 | .no_balance_irq = NO_BALANCE_IRQ, \ |
81 | .ESR_DISABLE = esr_disable, \ | 94 | .ESR_DISABLE = esr_disable, \ |
82 | .apic_destination_logical = APIC_DEST_LOGICAL, \ | 95 | .apic_destination_logical = APIC_DEST_LOGICAL, \ |
83 | APICFUNC(apic_id_registered), \ | 96 | APICFUNC(apic_id_registered) \ |
84 | APICFUNC(target_cpus), \ | 97 | APICFUNC(target_cpus) \ |
85 | APICFUNC(check_apicid_used), \ | 98 | APICFUNC(check_apicid_used) \ |
86 | APICFUNC(check_apicid_present), \ | 99 | APICFUNC(check_apicid_present) \ |
87 | APICFUNC(init_apic_ldr), \ | 100 | APICFUNC(init_apic_ldr) \ |
88 | APICFUNC(ioapic_phys_id_map), \ | 101 | APICFUNC(ioapic_phys_id_map) \ |
89 | APICFUNC(clustered_apic_check), \ | 102 | APICFUNC(clustered_apic_check) \ |
90 | APICFUNC(multi_timer_check), \ | 103 | APICFUNC(multi_timer_check) \ |
91 | APICFUNC(apicid_to_node), \ | 104 | APICFUNC(apicid_to_node) \ |
92 | APICFUNC(cpu_to_logical_apicid), \ | 105 | APICFUNC(cpu_to_logical_apicid) \ |
93 | APICFUNC(cpu_present_to_apicid), \ | 106 | APICFUNC(cpu_present_to_apicid) \ |
94 | APICFUNC(apicid_to_cpu_present), \ | 107 | APICFUNC(apicid_to_cpu_present) \ |
95 | APICFUNC(mpc_apic_id), \ | 108 | APICFUNC(mpc_apic_id) \ |
96 | APICFUNC(setup_portio_remap), \ | 109 | APICFUNC(setup_portio_remap) \ |
97 | APICFUNC(check_phys_apicid_present), \ | 110 | APICFUNC(check_phys_apicid_present) \ |
98 | APICFUNC(mpc_oem_bus_info), \ | 111 | APICFUNC(mpc_oem_bus_info) \ |
99 | APICFUNC(mpc_oem_pci_bus), \ | 112 | APICFUNC(mpc_oem_pci_bus) \ |
100 | APICFUNC(mps_oem_check), \ | 113 | APICFUNC(mps_oem_check) \ |
101 | APICFUNC(get_apic_id), \ | 114 | APICFUNC(get_apic_id) \ |
102 | .apic_id_mask = APIC_ID_MASK, \ | 115 | .apic_id_mask = APIC_ID_MASK, \ |
103 | APICFUNC(cpu_mask_to_apicid), \ | 116 | APICFUNC(cpu_mask_to_apicid) \ |
104 | APICFUNC(acpi_madt_oem_check), \ | 117 | APICFUNC(acpi_madt_oem_check) \ |
105 | APICFUNC(send_IPI_mask), \ | 118 | IPIFUNC(send_IPI_mask) \ |
106 | APICFUNC(send_IPI_allbutself), \ | 119 | IPIFUNC(send_IPI_allbutself) \ |
107 | APICFUNC(send_IPI_all), \ | 120 | IPIFUNC(send_IPI_all) \ |
108 | APICFUNC(enable_apic_mode), \ | 121 | APICFUNC(enable_apic_mode) \ |
109 | APICFUNC(phys_pkg_id), \ | 122 | APICFUNC(phys_pkg_id) \ |
110 | } | 123 | } |
111 | 124 | ||
112 | extern struct genapic *genapic; | 125 | extern struct genapic *genapic; |
diff --git a/include/asm-i386/intel_arch_perfmon.h b/include/asm-i386/intel_arch_perfmon.h index 134ea9cc5283..b52cd60a075b 100644 --- a/include/asm-i386/intel_arch_perfmon.h +++ b/include/asm-i386/intel_arch_perfmon.h | |||
@@ -14,6 +14,18 @@ | |||
14 | 14 | ||
15 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c) | 15 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c) |
16 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) | 16 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) |
17 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0) | 17 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0) |
18 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ | ||
19 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) | ||
20 | |||
21 | union cpuid10_eax { | ||
22 | struct { | ||
23 | unsigned int version_id:8; | ||
24 | unsigned int num_counters:8; | ||
25 | unsigned int bit_width:8; | ||
26 | unsigned int mask_length:8; | ||
27 | } split; | ||
28 | unsigned int full; | ||
29 | }; | ||
18 | 30 | ||
19 | #endif /* X86_INTEL_ARCH_PERFMON_H */ | 31 | #endif /* X86_INTEL_ARCH_PERFMON_H */ |
diff --git a/include/asm-i386/io_apic.h b/include/asm-i386/io_apic.h index 5092e819b8a2..5d309275a1dc 100644 --- a/include/asm-i386/io_apic.h +++ b/include/asm-i386/io_apic.h | |||
@@ -188,6 +188,16 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned | |||
188 | /* 1 if "noapic" boot option passed */ | 188 | /* 1 if "noapic" boot option passed */ |
189 | extern int skip_ioapic_setup; | 189 | extern int skip_ioapic_setup; |
190 | 190 | ||
191 | static inline void disable_ioapic_setup(void) | ||
192 | { | ||
193 | skip_ioapic_setup = 1; | ||
194 | } | ||
195 | |||
196 | static inline int ioapic_setup_disabled(void) | ||
197 | { | ||
198 | return skip_ioapic_setup; | ||
199 | } | ||
200 | |||
191 | /* | 201 | /* |
192 | * If we use the IO-APIC for IRQ routing, disable automatic | 202 | * If we use the IO-APIC for IRQ routing, disable automatic |
193 | * assignment of PCI IRQ's. | 203 | * assignment of PCI IRQ's. |
@@ -206,6 +216,7 @@ extern int (*ioapic_renumber_irq)(int ioapic, int irq); | |||
206 | 216 | ||
207 | #else /* !CONFIG_X86_IO_APIC */ | 217 | #else /* !CONFIG_X86_IO_APIC */ |
208 | #define io_apic_assign_pci_irqs 0 | 218 | #define io_apic_assign_pci_irqs 0 |
219 | static inline void disable_ioapic_setup(void) { } | ||
209 | #endif | 220 | #endif |
210 | 221 | ||
211 | extern int assign_irq_vector(int irq); | 222 | extern int assign_irq_vector(int irq); |
diff --git a/include/asm-i386/kexec.h b/include/asm-i386/kexec.h index 53f0e06672dc..4dfc9f5ed031 100644 --- a/include/asm-i386/kexec.h +++ b/include/asm-i386/kexec.h | |||
@@ -1,6 +1,26 @@ | |||
1 | #ifndef _I386_KEXEC_H | 1 | #ifndef _I386_KEXEC_H |
2 | #define _I386_KEXEC_H | 2 | #define _I386_KEXEC_H |
3 | 3 | ||
4 | #define PA_CONTROL_PAGE 0 | ||
5 | #define VA_CONTROL_PAGE 1 | ||
6 | #define PA_PGD 2 | ||
7 | #define VA_PGD 3 | ||
8 | #define PA_PTE_0 4 | ||
9 | #define VA_PTE_0 5 | ||
10 | #define PA_PTE_1 6 | ||
11 | #define VA_PTE_1 7 | ||
12 | #ifdef CONFIG_X86_PAE | ||
13 | #define PA_PMD_0 8 | ||
14 | #define VA_PMD_0 9 | ||
15 | #define PA_PMD_1 10 | ||
16 | #define VA_PMD_1 11 | ||
17 | #define PAGES_NR 12 | ||
18 | #else | ||
19 | #define PAGES_NR 8 | ||
20 | #endif | ||
21 | |||
22 | #ifndef __ASSEMBLY__ | ||
23 | |||
4 | #include <asm/fixmap.h> | 24 | #include <asm/fixmap.h> |
5 | #include <asm/ptrace.h> | 25 | #include <asm/ptrace.h> |
6 | #include <asm/string.h> | 26 | #include <asm/string.h> |
@@ -72,5 +92,12 @@ static inline void crash_setup_regs(struct pt_regs *newregs, | |||
72 | newregs->eip = (unsigned long)current_text_addr(); | 92 | newregs->eip = (unsigned long)current_text_addr(); |
73 | } | 93 | } |
74 | } | 94 | } |
95 | asmlinkage NORET_TYPE void | ||
96 | relocate_kernel(unsigned long indirection_page, | ||
97 | unsigned long control_page, | ||
98 | unsigned long start_address, | ||
99 | unsigned int has_pae) ATTRIB_NORET; | ||
100 | |||
101 | #endif /* __ASSEMBLY__ */ | ||
75 | 102 | ||
76 | #endif /* _I386_KEXEC_H */ | 103 | #endif /* _I386_KEXEC_H */ |
diff --git a/include/asm-i386/mach-es7000/mach_apic.h b/include/asm-i386/mach-es7000/mach_apic.h index b5f3f0d0b2bc..26333685a7fb 100644 --- a/include/asm-i386/mach-es7000/mach_apic.h +++ b/include/asm-i386/mach-es7000/mach_apic.h | |||
@@ -123,9 +123,13 @@ extern u8 cpu_2_logical_apicid[]; | |||
123 | /* Mapping from cpu number to logical apicid */ | 123 | /* Mapping from cpu number to logical apicid */ |
124 | static inline int cpu_to_logical_apicid(int cpu) | 124 | static inline int cpu_to_logical_apicid(int cpu) |
125 | { | 125 | { |
126 | #ifdef CONFIG_SMP | ||
126 | if (cpu >= NR_CPUS) | 127 | if (cpu >= NR_CPUS) |
127 | return BAD_APICID; | 128 | return BAD_APICID; |
128 | return (int)cpu_2_logical_apicid[cpu]; | 129 | return (int)cpu_2_logical_apicid[cpu]; |
130 | #else | ||
131 | return logical_smp_processor_id(); | ||
132 | #endif | ||
129 | } | 133 | } |
130 | 134 | ||
131 | static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused) | 135 | static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused) |
diff --git a/include/asm-i386/mach-summit/mach_apic.h b/include/asm-i386/mach-summit/mach_apic.h index 9fd073286289..a81b05961595 100644 --- a/include/asm-i386/mach-summit/mach_apic.h +++ b/include/asm-i386/mach-summit/mach_apic.h | |||
@@ -46,10 +46,12 @@ extern u8 cpu_2_logical_apicid[]; | |||
46 | static inline void init_apic_ldr(void) | 46 | static inline void init_apic_ldr(void) |
47 | { | 47 | { |
48 | unsigned long val, id; | 48 | unsigned long val, id; |
49 | int i, count; | 49 | int count = 0; |
50 | u8 lid; | ||
51 | u8 my_id = (u8)hard_smp_processor_id(); | 50 | u8 my_id = (u8)hard_smp_processor_id(); |
52 | u8 my_cluster = (u8)apicid_cluster(my_id); | 51 | u8 my_cluster = (u8)apicid_cluster(my_id); |
52 | #ifdef CONFIG_SMP | ||
53 | u8 lid; | ||
54 | int i; | ||
53 | 55 | ||
54 | /* Create logical APIC IDs by counting CPUs already in cluster. */ | 56 | /* Create logical APIC IDs by counting CPUs already in cluster. */ |
55 | for (count = 0, i = NR_CPUS; --i >= 0; ) { | 57 | for (count = 0, i = NR_CPUS; --i >= 0; ) { |
@@ -57,6 +59,7 @@ static inline void init_apic_ldr(void) | |||
57 | if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) | 59 | if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) |
58 | ++count; | 60 | ++count; |
59 | } | 61 | } |
62 | #endif | ||
60 | /* We only have a 4 wide bitmap in cluster mode. If a deranged | 63 | /* We only have a 4 wide bitmap in cluster mode. If a deranged |
61 | * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ | 64 | * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ |
62 | BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); | 65 | BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); |
@@ -91,9 +94,13 @@ static inline int apicid_to_node(int logical_apicid) | |||
91 | /* Mapping from cpu number to logical apicid */ | 94 | /* Mapping from cpu number to logical apicid */ |
92 | static inline int cpu_to_logical_apicid(int cpu) | 95 | static inline int cpu_to_logical_apicid(int cpu) |
93 | { | 96 | { |
97 | #ifdef CONFIG_SMP | ||
94 | if (cpu >= NR_CPUS) | 98 | if (cpu >= NR_CPUS) |
95 | return BAD_APICID; | 99 | return BAD_APICID; |
96 | return (int)cpu_2_logical_apicid[cpu]; | 100 | return (int)cpu_2_logical_apicid[cpu]; |
101 | #else | ||
102 | return logical_smp_processor_id(); | ||
103 | #endif | ||
97 | } | 104 | } |
98 | 105 | ||
99 | static inline int cpu_present_to_apicid(int mps_cpu) | 106 | static inline int cpu_present_to_apicid(int mps_cpu) |
diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h index 05a538531229..7a17d9e58ad6 100644 --- a/include/asm-i386/mutex.h +++ b/include/asm-i386/mutex.h | |||
@@ -30,14 +30,10 @@ do { \ | |||
30 | \ | 30 | \ |
31 | __asm__ __volatile__( \ | 31 | __asm__ __volatile__( \ |
32 | LOCK_PREFIX " decl (%%eax) \n" \ | 32 | LOCK_PREFIX " decl (%%eax) \n" \ |
33 | " js 2f \n" \ | 33 | " jns 1f \n" \ |
34 | " call "#fail_fn" \n" \ | ||
34 | "1: \n" \ | 35 | "1: \n" \ |
35 | \ | 36 | \ |
36 | LOCK_SECTION_START("") \ | ||
37 | "2: call "#fail_fn" \n" \ | ||
38 | " jmp 1b \n" \ | ||
39 | LOCK_SECTION_END \ | ||
40 | \ | ||
41 | :"=a" (dummy) \ | 37 | :"=a" (dummy) \ |
42 | : "a" (count) \ | 38 | : "a" (count) \ |
43 | : "memory", "ecx", "edx"); \ | 39 | : "memory", "ecx", "edx"); \ |
@@ -86,14 +82,10 @@ do { \ | |||
86 | \ | 82 | \ |
87 | __asm__ __volatile__( \ | 83 | __asm__ __volatile__( \ |
88 | LOCK_PREFIX " incl (%%eax) \n" \ | 84 | LOCK_PREFIX " incl (%%eax) \n" \ |
89 | " jle 2f \n" \ | 85 | " jg 1f \n" \ |
86 | " call "#fail_fn" \n" \ | ||
90 | "1: \n" \ | 87 | "1: \n" \ |
91 | \ | 88 | \ |
92 | LOCK_SECTION_START("") \ | ||
93 | "2: call "#fail_fn" \n" \ | ||
94 | " jmp 1b \n" \ | ||
95 | LOCK_SECTION_END \ | ||
96 | \ | ||
97 | :"=a" (dummy) \ | 89 | :"=a" (dummy) \ |
98 | : "a" (count) \ | 90 | : "a" (count) \ |
99 | : "memory", "ecx", "edx"); \ | 91 | : "memory", "ecx", "edx"); \ |
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h index 67d994799999..303bcd4592bb 100644 --- a/include/asm-i386/nmi.h +++ b/include/asm-i386/nmi.h | |||
@@ -6,32 +6,29 @@ | |||
6 | 6 | ||
7 | #include <linux/pm.h> | 7 | #include <linux/pm.h> |
8 | 8 | ||
9 | struct pt_regs; | ||
10 | |||
11 | typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu); | ||
12 | |||
13 | /** | 9 | /** |
14 | * set_nmi_callback | 10 | * do_nmi_callback |
15 | * | 11 | * |
16 | * Set a handler for an NMI. Only one handler may be | 12 | * Check to see if a callback exists and execute it. Return 1 |
17 | * set. Return 1 if the NMI was handled. | 13 | * if the handler exists and was handled successfully. |
18 | */ | 14 | */ |
19 | void set_nmi_callback(nmi_callback_t callback); | 15 | int do_nmi_callback(struct pt_regs *regs, int cpu); |
20 | 16 | ||
21 | /** | 17 | extern int nmi_watchdog_enabled; |
22 | * unset_nmi_callback | 18 | extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); |
23 | * | 19 | extern int avail_to_resrv_perfctr_nmi(unsigned int); |
24 | * Remove the handler previously set. | 20 | extern int reserve_perfctr_nmi(unsigned int); |
25 | */ | 21 | extern void release_perfctr_nmi(unsigned int); |
26 | void unset_nmi_callback(void); | 22 | extern int reserve_evntsel_nmi(unsigned int); |
27 | 23 | extern void release_evntsel_nmi(unsigned int); | |
28 | extern void setup_apic_nmi_watchdog (void); | 24 | |
29 | extern int reserve_lapic_nmi(void); | 25 | extern void setup_apic_nmi_watchdog (void *); |
30 | extern void release_lapic_nmi(void); | 26 | extern void stop_apic_nmi_watchdog (void *); |
31 | extern void disable_timer_nmi_watchdog(void); | 27 | extern void disable_timer_nmi_watchdog(void); |
32 | extern void enable_timer_nmi_watchdog(void); | 28 | extern void enable_timer_nmi_watchdog(void); |
33 | extern void nmi_watchdog_tick (struct pt_regs * regs); | 29 | extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); |
34 | 30 | ||
31 | extern atomic_t nmi_active; | ||
35 | extern unsigned int nmi_watchdog; | 32 | extern unsigned int nmi_watchdog; |
36 | #define NMI_DEFAULT -1 | 33 | #define NMI_DEFAULT -1 |
37 | #define NMI_NONE 0 | 34 | #define NMI_NONE 0 |
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index 0dc051a8078b..541b3e234335 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h | |||
@@ -411,8 +411,6 @@ extern pte_t *lookup_address(unsigned long address); | |||
411 | static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;} | 411 | static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;} |
412 | #endif | 412 | #endif |
413 | 413 | ||
414 | extern void noexec_setup(const char *str); | ||
415 | |||
416 | #if defined(CONFIG_HIGHPTE) | 414 | #if defined(CONFIG_HIGHPTE) |
417 | #define pte_offset_map(dir, address) \ | 415 | #define pte_offset_map(dir, address) \ |
418 | ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) | 416 | ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) |
diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h index 1910880fcd40..a4a0e5207db5 100644 --- a/include/asm-i386/ptrace.h +++ b/include/asm-i386/ptrace.h | |||
@@ -27,6 +27,7 @@ struct pt_regs { | |||
27 | #ifdef __KERNEL__ | 27 | #ifdef __KERNEL__ |
28 | 28 | ||
29 | #include <asm/vm86.h> | 29 | #include <asm/vm86.h> |
30 | #include <asm/segment.h> | ||
30 | 31 | ||
31 | struct task_struct; | 32 | struct task_struct; |
32 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); | 33 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); |
@@ -40,18 +41,14 @@ extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int erro | |||
40 | */ | 41 | */ |
41 | static inline int user_mode(struct pt_regs *regs) | 42 | static inline int user_mode(struct pt_regs *regs) |
42 | { | 43 | { |
43 | return (regs->xcs & 3) != 0; | 44 | return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL; |
44 | } | 45 | } |
45 | static inline int user_mode_vm(struct pt_regs *regs) | 46 | static inline int user_mode_vm(struct pt_regs *regs) |
46 | { | 47 | { |
47 | return ((regs->xcs & 3) | (regs->eflags & VM_MASK)) != 0; | 48 | return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL; |
48 | } | 49 | } |
49 | #define instruction_pointer(regs) ((regs)->eip) | 50 | #define instruction_pointer(regs) ((regs)->eip) |
50 | #if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER) | ||
51 | extern unsigned long profile_pc(struct pt_regs *regs); | 51 | extern unsigned long profile_pc(struct pt_regs *regs); |
52 | #else | ||
53 | #define profile_pc(regs) instruction_pointer(regs) | ||
54 | #endif | ||
55 | #endif /* __KERNEL__ */ | 52 | #endif /* __KERNEL__ */ |
56 | 53 | ||
57 | #endif | 54 | #endif |
diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h index 87c069ccba08..c3e5db32fa48 100644 --- a/include/asm-i386/rwlock.h +++ b/include/asm-i386/rwlock.h | |||
@@ -20,52 +20,6 @@ | |||
20 | #define RW_LOCK_BIAS 0x01000000 | 20 | #define RW_LOCK_BIAS 0x01000000 |
21 | #define RW_LOCK_BIAS_STR "0x01000000" | 21 | #define RW_LOCK_BIAS_STR "0x01000000" |
22 | 22 | ||
23 | #define __build_read_lock_ptr(rw, helper) \ | 23 | /* Code is in asm-i386/spinlock.h */ |
24 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" \ | ||
25 | "jns 1f\n" \ | ||
26 | "call " helper "\n\t" \ | ||
27 | "1:\n" \ | ||
28 | ::"a" (rw) : "memory") | ||
29 | |||
30 | #define __build_read_lock_const(rw, helper) \ | ||
31 | asm volatile(LOCK_PREFIX " subl $1,%0\n\t" \ | ||
32 | "jns 1f\n" \ | ||
33 | "pushl %%eax\n\t" \ | ||
34 | "leal %0,%%eax\n\t" \ | ||
35 | "call " helper "\n\t" \ | ||
36 | "popl %%eax\n\t" \ | ||
37 | "1:\n" \ | ||
38 | :"+m" (*(volatile int *)rw) : : "memory") | ||
39 | |||
40 | #define __build_read_lock(rw, helper) do { \ | ||
41 | if (__builtin_constant_p(rw)) \ | ||
42 | __build_read_lock_const(rw, helper); \ | ||
43 | else \ | ||
44 | __build_read_lock_ptr(rw, helper); \ | ||
45 | } while (0) | ||
46 | |||
47 | #define __build_write_lock_ptr(rw, helper) \ | ||
48 | asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ | ||
49 | "jz 1f\n" \ | ||
50 | "call " helper "\n\t" \ | ||
51 | "1:\n" \ | ||
52 | ::"a" (rw) : "memory") | ||
53 | |||
54 | #define __build_write_lock_const(rw, helper) \ | ||
55 | asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ | ||
56 | "jz 1f\n" \ | ||
57 | "pushl %%eax\n\t" \ | ||
58 | "leal %0,%%eax\n\t" \ | ||
59 | "call " helper "\n\t" \ | ||
60 | "popl %%eax\n\t" \ | ||
61 | "1:\n" \ | ||
62 | :"+m" (*(volatile int *)rw) : : "memory") | ||
63 | |||
64 | #define __build_write_lock(rw, helper) do { \ | ||
65 | if (__builtin_constant_p(rw)) \ | ||
66 | __build_write_lock_const(rw, helper); \ | ||
67 | else \ | ||
68 | __build_write_lock_ptr(rw, helper); \ | ||
69 | } while (0) | ||
70 | 24 | ||
71 | #endif | 25 | #endif |
diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h index 43113f5608eb..bc598d6388e3 100644 --- a/include/asm-i386/rwsem.h +++ b/include/asm-i386/rwsem.h | |||
@@ -99,17 +99,9 @@ static inline void __down_read(struct rw_semaphore *sem) | |||
99 | __asm__ __volatile__( | 99 | __asm__ __volatile__( |
100 | "# beginning down_read\n\t" | 100 | "# beginning down_read\n\t" |
101 | LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */ | 101 | LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */ |
102 | " js 2f\n\t" /* jump if we weren't granted the lock */ | 102 | " jns 1f\n" |
103 | " call call_rwsem_down_read_failed\n" | ||
103 | "1:\n\t" | 104 | "1:\n\t" |
104 | LOCK_SECTION_START("") | ||
105 | "2:\n\t" | ||
106 | " pushl %%ecx\n\t" | ||
107 | " pushl %%edx\n\t" | ||
108 | " call rwsem_down_read_failed\n\t" | ||
109 | " popl %%edx\n\t" | ||
110 | " popl %%ecx\n\t" | ||
111 | " jmp 1b\n" | ||
112 | LOCK_SECTION_END | ||
113 | "# ending down_read\n\t" | 105 | "# ending down_read\n\t" |
114 | : "+m" (sem->count) | 106 | : "+m" (sem->count) |
115 | : "a" (sem) | 107 | : "a" (sem) |
@@ -151,15 +143,9 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
151 | "# beginning down_write\n\t" | 143 | "# beginning down_write\n\t" |
152 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */ | 144 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */ |
153 | " testl %%edx,%%edx\n\t" /* was the count 0 before? */ | 145 | " testl %%edx,%%edx\n\t" /* was the count 0 before? */ |
154 | " jnz 2f\n\t" /* jump if we weren't granted the lock */ | 146 | " jz 1f\n" |
155 | "1:\n\t" | 147 | " call call_rwsem_down_write_failed\n" |
156 | LOCK_SECTION_START("") | 148 | "1:\n" |
157 | "2:\n\t" | ||
158 | " pushl %%ecx\n\t" | ||
159 | " call rwsem_down_write_failed\n\t" | ||
160 | " popl %%ecx\n\t" | ||
161 | " jmp 1b\n" | ||
162 | LOCK_SECTION_END | ||
163 | "# ending down_write" | 149 | "# ending down_write" |
164 | : "+m" (sem->count), "=d" (tmp) | 150 | : "+m" (sem->count), "=d" (tmp) |
165 | : "a" (sem), "1" (tmp) | 151 | : "a" (sem), "1" (tmp) |
@@ -193,17 +179,9 @@ static inline void __up_read(struct rw_semaphore *sem) | |||
193 | __asm__ __volatile__( | 179 | __asm__ __volatile__( |
194 | "# beginning __up_read\n\t" | 180 | "# beginning __up_read\n\t" |
195 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */ | 181 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */ |
196 | " js 2f\n\t" /* jump if the lock is being waited upon */ | 182 | " jns 1f\n\t" |
197 | "1:\n\t" | 183 | " call call_rwsem_wake\n" |
198 | LOCK_SECTION_START("") | 184 | "1:\n" |
199 | "2:\n\t" | ||
200 | " decw %%dx\n\t" /* do nothing if still outstanding active readers */ | ||
201 | " jnz 1b\n\t" | ||
202 | " pushl %%ecx\n\t" | ||
203 | " call rwsem_wake\n\t" | ||
204 | " popl %%ecx\n\t" | ||
205 | " jmp 1b\n" | ||
206 | LOCK_SECTION_END | ||
207 | "# ending __up_read\n" | 185 | "# ending __up_read\n" |
208 | : "+m" (sem->count), "=d" (tmp) | 186 | : "+m" (sem->count), "=d" (tmp) |
209 | : "a" (sem), "1" (tmp) | 187 | : "a" (sem), "1" (tmp) |
@@ -219,17 +197,9 @@ static inline void __up_write(struct rw_semaphore *sem) | |||
219 | "# beginning __up_write\n\t" | 197 | "# beginning __up_write\n\t" |
220 | " movl %2,%%edx\n\t" | 198 | " movl %2,%%edx\n\t" |
221 | LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ | 199 | LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ |
222 | " jnz 2f\n\t" /* jump if the lock is being waited upon */ | 200 | " jz 1f\n" |
201 | " call call_rwsem_wake\n" | ||
223 | "1:\n\t" | 202 | "1:\n\t" |
224 | LOCK_SECTION_START("") | ||
225 | "2:\n\t" | ||
226 | " decw %%dx\n\t" /* did the active count reduce to 0? */ | ||
227 | " jnz 1b\n\t" /* jump back if not */ | ||
228 | " pushl %%ecx\n\t" | ||
229 | " call rwsem_wake\n\t" | ||
230 | " popl %%ecx\n\t" | ||
231 | " jmp 1b\n" | ||
232 | LOCK_SECTION_END | ||
233 | "# ending __up_write\n" | 203 | "# ending __up_write\n" |
234 | : "+m" (sem->count) | 204 | : "+m" (sem->count) |
235 | : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) | 205 | : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) |
@@ -244,17 +214,9 @@ static inline void __downgrade_write(struct rw_semaphore *sem) | |||
244 | __asm__ __volatile__( | 214 | __asm__ __volatile__( |
245 | "# beginning __downgrade_write\n\t" | 215 | "# beginning __downgrade_write\n\t" |
246 | LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ | 216 | LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ |
247 | " js 2f\n\t" /* jump if the lock is being waited upon */ | 217 | " jns 1f\n\t" |
218 | " call call_rwsem_downgrade_wake\n" | ||
248 | "1:\n\t" | 219 | "1:\n\t" |
249 | LOCK_SECTION_START("") | ||
250 | "2:\n\t" | ||
251 | " pushl %%ecx\n\t" | ||
252 | " pushl %%edx\n\t" | ||
253 | " call rwsem_downgrade_wake\n\t" | ||
254 | " popl %%edx\n\t" | ||
255 | " popl %%ecx\n\t" | ||
256 | " jmp 1b\n" | ||
257 | LOCK_SECTION_END | ||
258 | "# ending __downgrade_write\n" | 220 | "# ending __downgrade_write\n" |
259 | : "+m" (sem->count) | 221 | : "+m" (sem->count) |
260 | : "a" (sem), "i" (-RWSEM_WAITING_BIAS) | 222 | : "a" (sem), "i" (-RWSEM_WAITING_BIAS) |
diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h index faf995307b9e..b7ab59685ba7 100644 --- a/include/asm-i386/segment.h +++ b/include/asm-i386/segment.h | |||
@@ -83,6 +83,11 @@ | |||
83 | 83 | ||
84 | #define GDT_SIZE (GDT_ENTRIES * 8) | 84 | #define GDT_SIZE (GDT_ENTRIES * 8) |
85 | 85 | ||
86 | /* Matches __KERNEL_CS and __USER_CS (they must be 2 entries apart) */ | ||
87 | #define SEGMENT_IS_FLAT_CODE(x) (((x) & 0xec) == GDT_ENTRY_KERNEL_CS * 8) | ||
88 | /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */ | ||
89 | #define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8) | ||
90 | |||
86 | /* Simple and small GDT entries for booting only */ | 91 | /* Simple and small GDT entries for booting only */ |
87 | 92 | ||
88 | #define GDT_ENTRY_BOOT_CS 2 | 93 | #define GDT_ENTRY_BOOT_CS 2 |
@@ -112,4 +117,16 @@ | |||
112 | */ | 117 | */ |
113 | #define IDT_ENTRIES 256 | 118 | #define IDT_ENTRIES 256 |
114 | 119 | ||
120 | /* Bottom two bits of selector give the ring privilege level */ | ||
121 | #define SEGMENT_RPL_MASK 0x3 | ||
122 | /* Bit 2 is table indicator (LDT/GDT) */ | ||
123 | #define SEGMENT_TI_MASK 0x4 | ||
124 | |||
125 | /* User mode is privilege level 3 */ | ||
126 | #define USER_RPL 0x3 | ||
127 | /* LDT segment has TI set, GDT has it cleared */ | ||
128 | #define SEGMENT_LDT 0x4 | ||
129 | #define SEGMENT_GDT 0x0 | ||
130 | |||
131 | #define get_kernel_rpl() 0 | ||
115 | #endif | 132 | #endif |
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h index d51e800acf29..e63b6a68f04c 100644 --- a/include/asm-i386/semaphore.h +++ b/include/asm-i386/semaphore.h | |||
@@ -100,13 +100,10 @@ static inline void down(struct semaphore * sem) | |||
100 | __asm__ __volatile__( | 100 | __asm__ __volatile__( |
101 | "# atomic down operation\n\t" | 101 | "# atomic down operation\n\t" |
102 | LOCK_PREFIX "decl %0\n\t" /* --sem->count */ | 102 | LOCK_PREFIX "decl %0\n\t" /* --sem->count */ |
103 | "js 2f\n" | 103 | "jns 2f\n" |
104 | "1:\n" | 104 | "\tlea %0,%%eax\n\t" |
105 | LOCK_SECTION_START("") | 105 | "call __down_failed\n" |
106 | "2:\tlea %0,%%eax\n\t" | 106 | "2:" |
107 | "call __down_failed\n\t" | ||
108 | "jmp 1b\n" | ||
109 | LOCK_SECTION_END | ||
110 | :"+m" (sem->count) | 107 | :"+m" (sem->count) |
111 | : | 108 | : |
112 | :"memory","ax"); | 109 | :"memory","ax"); |
@@ -123,15 +120,12 @@ static inline int down_interruptible(struct semaphore * sem) | |||
123 | might_sleep(); | 120 | might_sleep(); |
124 | __asm__ __volatile__( | 121 | __asm__ __volatile__( |
125 | "# atomic interruptible down operation\n\t" | 122 | "# atomic interruptible down operation\n\t" |
123 | "xorl %0,%0\n\t" | ||
126 | LOCK_PREFIX "decl %1\n\t" /* --sem->count */ | 124 | LOCK_PREFIX "decl %1\n\t" /* --sem->count */ |
127 | "js 2f\n\t" | 125 | "jns 2f\n\t" |
128 | "xorl %0,%0\n" | 126 | "lea %1,%%eax\n\t" |
129 | "1:\n" | 127 | "call __down_failed_interruptible\n" |
130 | LOCK_SECTION_START("") | 128 | "2:" |
131 | "2:\tlea %1,%%eax\n\t" | ||
132 | "call __down_failed_interruptible\n\t" | ||
133 | "jmp 1b\n" | ||
134 | LOCK_SECTION_END | ||
135 | :"=a" (result), "+m" (sem->count) | 129 | :"=a" (result), "+m" (sem->count) |
136 | : | 130 | : |
137 | :"memory"); | 131 | :"memory"); |
@@ -148,15 +142,12 @@ static inline int down_trylock(struct semaphore * sem) | |||
148 | 142 | ||
149 | __asm__ __volatile__( | 143 | __asm__ __volatile__( |
150 | "# atomic interruptible down operation\n\t" | 144 | "# atomic interruptible down operation\n\t" |
145 | "xorl %0,%0\n\t" | ||
151 | LOCK_PREFIX "decl %1\n\t" /* --sem->count */ | 146 | LOCK_PREFIX "decl %1\n\t" /* --sem->count */ |
152 | "js 2f\n\t" | 147 | "jns 2f\n\t" |
153 | "xorl %0,%0\n" | 148 | "lea %1,%%eax\n\t" |
154 | "1:\n" | ||
155 | LOCK_SECTION_START("") | ||
156 | "2:\tlea %1,%%eax\n\t" | ||
157 | "call __down_failed_trylock\n\t" | 149 | "call __down_failed_trylock\n\t" |
158 | "jmp 1b\n" | 150 | "2:\n" |
159 | LOCK_SECTION_END | ||
160 | :"=a" (result), "+m" (sem->count) | 151 | :"=a" (result), "+m" (sem->count) |
161 | : | 152 | : |
162 | :"memory"); | 153 | :"memory"); |
@@ -166,22 +157,16 @@ static inline int down_trylock(struct semaphore * sem) | |||
166 | /* | 157 | /* |
167 | * Note! This is subtle. We jump to wake people up only if | 158 | * Note! This is subtle. We jump to wake people up only if |
168 | * the semaphore was negative (== somebody was waiting on it). | 159 | * the semaphore was negative (== somebody was waiting on it). |
169 | * The default case (no contention) will result in NO | ||
170 | * jumps for both down() and up(). | ||
171 | */ | 160 | */ |
172 | static inline void up(struct semaphore * sem) | 161 | static inline void up(struct semaphore * sem) |
173 | { | 162 | { |
174 | __asm__ __volatile__( | 163 | __asm__ __volatile__( |
175 | "# atomic up operation\n\t" | 164 | "# atomic up operation\n\t" |
176 | LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ | 165 | LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ |
177 | "jle 2f\n" | 166 | "jg 1f\n\t" |
178 | "1:\n" | 167 | "lea %0,%%eax\n\t" |
179 | LOCK_SECTION_START("") | 168 | "call __up_wakeup\n" |
180 | "2:\tlea %0,%%eax\n\t" | 169 | "1:" |
181 | "call __up_wakeup\n\t" | ||
182 | "jmp 1b\n" | ||
183 | LOCK_SECTION_END | ||
184 | ".subsection 0\n" | ||
185 | :"+m" (sem->count) | 170 | :"+m" (sem->count) |
186 | : | 171 | : |
187 | :"memory","ax"); | 172 | :"memory","ax"); |
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h index 142d10e34ade..32ac8c91d5c5 100644 --- a/include/asm-i386/smp.h +++ b/include/asm-i386/smp.h | |||
@@ -80,17 +80,12 @@ static inline int hard_smp_processor_id(void) | |||
80 | return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID)); | 80 | return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID)); |
81 | } | 81 | } |
82 | #endif | 82 | #endif |
83 | |||
84 | static __inline int logical_smp_processor_id(void) | ||
85 | { | ||
86 | /* we don't want to mark this access volatile - bad code generation */ | ||
87 | return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); | ||
88 | } | ||
89 | |||
90 | #endif | 83 | #endif |
91 | 84 | ||
92 | extern int __cpu_disable(void); | 85 | extern int __cpu_disable(void); |
93 | extern void __cpu_die(unsigned int cpu); | 86 | extern void __cpu_die(unsigned int cpu); |
87 | extern unsigned int num_processors; | ||
88 | |||
94 | #endif /* !__ASSEMBLY__ */ | 89 | #endif /* !__ASSEMBLY__ */ |
95 | 90 | ||
96 | #else /* CONFIG_SMP */ | 91 | #else /* CONFIG_SMP */ |
@@ -100,4 +95,15 @@ extern void __cpu_die(unsigned int cpu); | |||
100 | #define NO_PROC_ID 0xFF /* No processor magic marker */ | 95 | #define NO_PROC_ID 0xFF /* No processor magic marker */ |
101 | 96 | ||
102 | #endif | 97 | #endif |
98 | |||
99 | #ifndef __ASSEMBLY__ | ||
100 | #ifdef CONFIG_X86_LOCAL_APIC | ||
101 | static __inline int logical_smp_processor_id(void) | ||
102 | { | ||
103 | /* we don't want to mark this access volatile - bad code generation */ | ||
104 | return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); | ||
105 | } | ||
106 | #endif | ||
107 | #endif | ||
108 | |||
103 | #endif | 109 | #endif |
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index d1020363c41a..b0b3043f05e1 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h | |||
@@ -4,8 +4,12 @@ | |||
4 | #include <asm/atomic.h> | 4 | #include <asm/atomic.h> |
5 | #include <asm/rwlock.h> | 5 | #include <asm/rwlock.h> |
6 | #include <asm/page.h> | 6 | #include <asm/page.h> |
7 | #include <asm/processor.h> | ||
7 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
8 | 9 | ||
10 | #define CLI_STRING "cli" | ||
11 | #define STI_STRING "sti" | ||
12 | |||
9 | /* | 13 | /* |
10 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 14 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
11 | * | 15 | * |
@@ -17,67 +21,64 @@ | |||
17 | * (the type definitions are in asm/spinlock_types.h) | 21 | * (the type definitions are in asm/spinlock_types.h) |
18 | */ | 22 | */ |
19 | 23 | ||
20 | #define __raw_spin_is_locked(x) \ | 24 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) |
21 | (*(volatile signed char *)(&(x)->slock) <= 0) | 25 | { |
22 | 26 | return *(volatile signed char *)(&(x)->slock) <= 0; | |
23 | #define __raw_spin_lock_string \ | 27 | } |
24 | "\n1:\t" \ | ||
25 | LOCK_PREFIX " ; decb %0\n\t" \ | ||
26 | "jns 3f\n" \ | ||
27 | "2:\t" \ | ||
28 | "rep;nop\n\t" \ | ||
29 | "cmpb $0,%0\n\t" \ | ||
30 | "jle 2b\n\t" \ | ||
31 | "jmp 1b\n" \ | ||
32 | "3:\n\t" | ||
33 | |||
34 | /* | ||
35 | * NOTE: there's an irqs-on section here, which normally would have to be | ||
36 | * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use | ||
37 | * __raw_spin_lock_string_flags(). | ||
38 | */ | ||
39 | #define __raw_spin_lock_string_flags \ | ||
40 | "\n1:\t" \ | ||
41 | LOCK_PREFIX " ; decb %0\n\t" \ | ||
42 | "jns 5f\n" \ | ||
43 | "2:\t" \ | ||
44 | "testl $0x200, %1\n\t" \ | ||
45 | "jz 4f\n\t" \ | ||
46 | "sti\n" \ | ||
47 | "3:\t" \ | ||
48 | "rep;nop\n\t" \ | ||
49 | "cmpb $0, %0\n\t" \ | ||
50 | "jle 3b\n\t" \ | ||
51 | "cli\n\t" \ | ||
52 | "jmp 1b\n" \ | ||
53 | "4:\t" \ | ||
54 | "rep;nop\n\t" \ | ||
55 | "cmpb $0, %0\n\t" \ | ||
56 | "jg 1b\n\t" \ | ||
57 | "jmp 4b\n" \ | ||
58 | "5:\n\t" | ||
59 | 28 | ||
60 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 29 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
61 | { | 30 | { |
62 | asm(__raw_spin_lock_string : "+m" (lock->slock) : : "memory"); | 31 | asm volatile("\n1:\t" |
32 | LOCK_PREFIX " ; decb %0\n\t" | ||
33 | "jns 3f\n" | ||
34 | "2:\t" | ||
35 | "rep;nop\n\t" | ||
36 | "cmpb $0,%0\n\t" | ||
37 | "jle 2b\n\t" | ||
38 | "jmp 1b\n" | ||
39 | "3:\n\t" | ||
40 | : "+m" (lock->slock) : : "memory"); | ||
63 | } | 41 | } |
64 | 42 | ||
65 | /* | 43 | /* |
66 | * It is easier for the lock validator if interrupts are not re-enabled | 44 | * It is easier for the lock validator if interrupts are not re-enabled |
67 | * in the middle of a lock-acquire. This is a performance feature anyway | 45 | * in the middle of a lock-acquire. This is a performance feature anyway |
68 | * so we turn it off: | 46 | * so we turn it off: |
47 | * | ||
48 | * NOTE: there's an irqs-on section here, which normally would have to be | ||
49 | * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. | ||
69 | */ | 50 | */ |
70 | #ifndef CONFIG_PROVE_LOCKING | 51 | #ifndef CONFIG_PROVE_LOCKING |
71 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 52 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
72 | { | 53 | { |
73 | asm(__raw_spin_lock_string_flags : "+m" (lock->slock) : "r" (flags) : "memory"); | 54 | asm volatile( |
55 | "\n1:\t" | ||
56 | LOCK_PREFIX " ; decb %0\n\t" | ||
57 | "jns 5f\n" | ||
58 | "2:\t" | ||
59 | "testl $0x200, %1\n\t" | ||
60 | "jz 4f\n\t" | ||
61 | STI_STRING "\n" | ||
62 | "3:\t" | ||
63 | "rep;nop\n\t" | ||
64 | "cmpb $0, %0\n\t" | ||
65 | "jle 3b\n\t" | ||
66 | CLI_STRING "\n\t" | ||
67 | "jmp 1b\n" | ||
68 | "4:\t" | ||
69 | "rep;nop\n\t" | ||
70 | "cmpb $0, %0\n\t" | ||
71 | "jg 1b\n\t" | ||
72 | "jmp 4b\n" | ||
73 | "5:\n\t" | ||
74 | : "+m" (lock->slock) : "r" (flags) : "memory"); | ||
74 | } | 75 | } |
75 | #endif | 76 | #endif |
76 | 77 | ||
77 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 78 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
78 | { | 79 | { |
79 | char oldval; | 80 | char oldval; |
80 | __asm__ __volatile__( | 81 | asm volatile( |
81 | "xchgb %b0,%1" | 82 | "xchgb %b0,%1" |
82 | :"=q" (oldval), "+m" (lock->slock) | 83 | :"=q" (oldval), "+m" (lock->slock) |
83 | :"0" (0) : "memory"); | 84 | :"0" (0) : "memory"); |
@@ -93,38 +94,29 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
93 | 94 | ||
94 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) | 95 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) |
95 | 96 | ||
96 | #define __raw_spin_unlock_string \ | ||
97 | "movb $1,%0" \ | ||
98 | :"+m" (lock->slock) : : "memory" | ||
99 | |||
100 | |||
101 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 97 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
102 | { | 98 | { |
103 | __asm__ __volatile__( | 99 | asm volatile("movb $1,%0" : "+m" (lock->slock) :: "memory"); |
104 | __raw_spin_unlock_string | ||
105 | ); | ||
106 | } | 100 | } |
107 | 101 | ||
108 | #else | 102 | #else |
109 | 103 | ||
110 | #define __raw_spin_unlock_string \ | ||
111 | "xchgb %b0, %1" \ | ||
112 | :"=q" (oldval), "+m" (lock->slock) \ | ||
113 | :"0" (oldval) : "memory" | ||
114 | |||
115 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 104 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
116 | { | 105 | { |
117 | char oldval = 1; | 106 | char oldval = 1; |
118 | 107 | ||
119 | __asm__ __volatile__( | 108 | asm volatile("xchgb %b0, %1" |
120 | __raw_spin_unlock_string | 109 | : "=q" (oldval), "+m" (lock->slock) |
121 | ); | 110 | : "0" (oldval) : "memory"); |
122 | } | 111 | } |
123 | 112 | ||
124 | #endif | 113 | #endif |
125 | 114 | ||
126 | #define __raw_spin_unlock_wait(lock) \ | 115 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
127 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | 116 | { |
117 | while (__raw_spin_is_locked(lock)) | ||
118 | cpu_relax(); | ||
119 | } | ||
128 | 120 | ||
129 | /* | 121 | /* |
130 | * Read-write spinlocks, allowing multiple readers | 122 | * Read-write spinlocks, allowing multiple readers |
@@ -151,22 +143,36 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
151 | * read_can_lock - would read_trylock() succeed? | 143 | * read_can_lock - would read_trylock() succeed? |
152 | * @lock: the rwlock in question. | 144 | * @lock: the rwlock in question. |
153 | */ | 145 | */ |
154 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) | 146 | static inline int __raw_read_can_lock(raw_rwlock_t *x) |
147 | { | ||
148 | return (int)(x)->lock > 0; | ||
149 | } | ||
155 | 150 | ||
156 | /** | 151 | /** |
157 | * write_can_lock - would write_trylock() succeed? | 152 | * write_can_lock - would write_trylock() succeed? |
158 | * @lock: the rwlock in question. | 153 | * @lock: the rwlock in question. |
159 | */ | 154 | */ |
160 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 155 | static inline int __raw_write_can_lock(raw_rwlock_t *x) |
156 | { | ||
157 | return (x)->lock == RW_LOCK_BIAS; | ||
158 | } | ||
161 | 159 | ||
162 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 160 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
163 | { | 161 | { |
164 | __build_read_lock(rw, "__read_lock_failed"); | 162 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" |
163 | "jns 1f\n" | ||
164 | "call __read_lock_failed\n\t" | ||
165 | "1:\n" | ||
166 | ::"a" (rw) : "memory"); | ||
165 | } | 167 | } |
166 | 168 | ||
167 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 169 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
168 | { | 170 | { |
169 | __build_write_lock(rw, "__write_lock_failed"); | 171 | asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" |
172 | "jz 1f\n" | ||
173 | "call __write_lock_failed\n\t" | ||
174 | "1:\n" | ||
175 | ::"a" (rw) : "memory"); | ||
170 | } | 176 | } |
171 | 177 | ||
172 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | 178 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |
diff --git a/include/asm-i386/stacktrace.h b/include/asm-i386/stacktrace.h new file mode 100644 index 000000000000..7d1f6a5cbfca --- /dev/null +++ b/include/asm-i386/stacktrace.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-x86_64/stacktrace.h> | |||
diff --git a/include/asm-i386/therm_throt.h b/include/asm-i386/therm_throt.h new file mode 100644 index 000000000000..399bf6026b16 --- /dev/null +++ b/include/asm-i386/therm_throt.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef __ASM_I386_THERM_THROT_H__ | ||
2 | #define __ASM_I386_THERM_THROT_H__ 1 | ||
3 | |||
4 | #include <asm/atomic.h> | ||
5 | |||
6 | extern atomic_t therm_throt_en; | ||
7 | int therm_throt_process(int curr); | ||
8 | |||
9 | #endif /* __ASM_I386_THERM_THROT_H__ */ | ||
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h index d57ca5c540b6..360648b0f2b3 100644 --- a/include/asm-i386/tlbflush.h +++ b/include/asm-i386/tlbflush.h | |||
@@ -36,8 +36,6 @@ | |||
36 | : "memory"); \ | 36 | : "memory"); \ |
37 | } while (0) | 37 | } while (0) |
38 | 38 | ||
39 | extern unsigned long pgkern_mask; | ||
40 | |||
41 | # define __flush_tlb_all() \ | 39 | # define __flush_tlb_all() \ |
42 | do { \ | 40 | do { \ |
43 | if (cpu_has_pge) \ | 41 | if (cpu_has_pge) \ |
@@ -49,7 +47,7 @@ extern unsigned long pgkern_mask; | |||
49 | #define cpu_has_invlpg (boot_cpu_data.x86 > 3) | 47 | #define cpu_has_invlpg (boot_cpu_data.x86 > 3) |
50 | 48 | ||
51 | #define __flush_tlb_single(addr) \ | 49 | #define __flush_tlb_single(addr) \ |
52 | __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) | 50 | __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory") |
53 | 51 | ||
54 | #ifdef CONFIG_X86_INVLPG | 52 | #ifdef CONFIG_X86_INVLPG |
55 | # define __flush_tlb_one(addr) __flush_tlb_single(addr) | 53 | # define __flush_tlb_one(addr) __flush_tlb_single(addr) |
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h index 97b828ce31e0..c13933185c1c 100644 --- a/include/asm-i386/tsc.h +++ b/include/asm-i386/tsc.h | |||
@@ -6,7 +6,6 @@ | |||
6 | #ifndef _ASM_i386_TSC_H | 6 | #ifndef _ASM_i386_TSC_H |
7 | #define _ASM_i386_TSC_H | 7 | #define _ASM_i386_TSC_H |
8 | 8 | ||
9 | #include <linux/config.h> | ||
10 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
11 | 10 | ||
12 | /* | 11 | /* |
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index fc1c8ddae149..565d0897b205 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h | |||
@@ -323,10 +323,11 @@ | |||
323 | #define __NR_tee 315 | 323 | #define __NR_tee 315 |
324 | #define __NR_vmsplice 316 | 324 | #define __NR_vmsplice 316 |
325 | #define __NR_move_pages 317 | 325 | #define __NR_move_pages 317 |
326 | #define __NR_getcpu 318 | ||
326 | 327 | ||
327 | #ifdef __KERNEL__ | 328 | #ifdef __KERNEL__ |
328 | 329 | ||
329 | #define NR_syscalls 318 | 330 | #define NR_syscalls 319 |
330 | 331 | ||
331 | /* | 332 | /* |
332 | * user-visible error numbers are in the range -1 - -128: see | 333 | * user-visible error numbers are in the range -1 - -128: see |
diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h index 4c1a0b968569..5031d693b89d 100644 --- a/include/asm-i386/unwind.h +++ b/include/asm-i386/unwind.h | |||
@@ -18,6 +18,7 @@ struct unwind_frame_info | |||
18 | { | 18 | { |
19 | struct pt_regs regs; | 19 | struct pt_regs regs; |
20 | struct task_struct *task; | 20 | struct task_struct *task; |
21 | unsigned call_frame:1; | ||
21 | }; | 22 | }; |
22 | 23 | ||
23 | #define UNW_PC(frame) (frame)->regs.eip | 24 | #define UNW_PC(frame) (frame)->regs.eip |
@@ -28,6 +29,8 @@ struct unwind_frame_info | |||
28 | #define FRAME_LINK_OFFSET 0 | 29 | #define FRAME_LINK_OFFSET 0 |
29 | #define STACK_BOTTOM(tsk) STACK_LIMIT((tsk)->thread.esp0) | 30 | #define STACK_BOTTOM(tsk) STACK_LIMIT((tsk)->thread.esp0) |
30 | #define STACK_TOP(tsk) ((tsk)->thread.esp0) | 31 | #define STACK_TOP(tsk) ((tsk)->thread.esp0) |
32 | #else | ||
33 | #define UNW_FP(frame) ((void)(frame), 0) | ||
31 | #endif | 34 | #endif |
32 | #define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1)) | 35 | #define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1)) |
33 | 36 | ||
@@ -42,6 +45,10 @@ struct unwind_frame_info | |||
42 | PTREGS_INFO(edi), \ | 45 | PTREGS_INFO(edi), \ |
43 | PTREGS_INFO(eip) | 46 | PTREGS_INFO(eip) |
44 | 47 | ||
48 | #define UNW_DEFAULT_RA(raItem, dataAlign) \ | ||
49 | ((raItem).where == Memory && \ | ||
50 | !((raItem).value * (dataAlign) + 4)) | ||
51 | |||
45 | static inline void arch_unw_init_frame_info(struct unwind_frame_info *info, | 52 | static inline void arch_unw_init_frame_info(struct unwind_frame_info *info, |
46 | /*const*/ struct pt_regs *regs) | 53 | /*const*/ struct pt_regs *regs) |
47 | { | 54 | { |
@@ -88,6 +95,7 @@ static inline int arch_unw_user_mode(const struct unwind_frame_info *info) | |||
88 | 95 | ||
89 | #define UNW_PC(frame) ((void)(frame), 0) | 96 | #define UNW_PC(frame) ((void)(frame), 0) |
90 | #define UNW_SP(frame) ((void)(frame), 0) | 97 | #define UNW_SP(frame) ((void)(frame), 0) |
98 | #define UNW_FP(frame) ((void)(frame), 0) | ||
91 | 99 | ||
92 | static inline int arch_unw_user_mode(const void *info) | 100 | static inline int arch_unw_user_mode(const void *info) |
93 | { | 101 | { |