diff options
Diffstat (limited to 'include/asm-i386')
40 files changed, 713 insertions, 456 deletions
diff --git a/include/asm-i386/Kbuild b/include/asm-i386/Kbuild index b75a348d0c1c..147e4ac1ebf0 100644 --- a/include/asm-i386/Kbuild +++ b/include/asm-i386/Kbuild | |||
@@ -3,6 +3,7 @@ include include/asm-generic/Kbuild.asm | |||
3 | header-y += boot.h | 3 | header-y += boot.h |
4 | header-y += debugreg.h | 4 | header-y += debugreg.h |
5 | header-y += ldt.h | 5 | header-y += ldt.h |
6 | header-y += ptrace-abi.h | ||
6 | header-y += ucontext.h | 7 | header-y += ucontext.h |
7 | 8 | ||
8 | unifdef-y += mtrr.h | 9 | unifdef-y += mtrr.h |
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h index 20f523954218..6016632d032f 100644 --- a/include/asm-i386/acpi.h +++ b/include/asm-i386/acpi.h | |||
@@ -131,21 +131,7 @@ static inline void disable_acpi(void) | |||
131 | extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); | 131 | extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); |
132 | 132 | ||
133 | #ifdef CONFIG_X86_IO_APIC | 133 | #ifdef CONFIG_X86_IO_APIC |
134 | extern int skip_ioapic_setup; | ||
135 | extern int acpi_skip_timer_override; | 134 | extern int acpi_skip_timer_override; |
136 | |||
137 | static inline void disable_ioapic_setup(void) | ||
138 | { | ||
139 | skip_ioapic_setup = 1; | ||
140 | } | ||
141 | |||
142 | static inline int ioapic_setup_disabled(void) | ||
143 | { | ||
144 | return skip_ioapic_setup; | ||
145 | } | ||
146 | |||
147 | #else | ||
148 | static inline void disable_ioapic_setup(void) { } | ||
149 | #endif | 135 | #endif |
150 | 136 | ||
151 | static inline void acpi_noirq_set(void) { acpi_noirq = 1; } | 137 | static inline void acpi_noirq_set(void) { acpi_noirq = 1; } |
diff --git a/include/asm-i386/alternative-asm.i b/include/asm-i386/alternative-asm.i new file mode 100644 index 000000000000..6c47e3b9484b --- /dev/null +++ b/include/asm-i386/alternative-asm.i | |||
@@ -0,0 +1,14 @@ | |||
1 | #include <linux/config.h> | ||
2 | |||
3 | #ifdef CONFIG_SMP | ||
4 | .macro LOCK_PREFIX | ||
5 | 1: lock | ||
6 | .section .smp_locks,"a" | ||
7 | .align 4 | ||
8 | .long 1b | ||
9 | .previous | ||
10 | .endm | ||
11 | #else | ||
12 | .macro LOCK_PREFIX | ||
13 | .endm | ||
14 | #endif | ||
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h index 2c1e371cebb6..3a42b7d6fc92 100644 --- a/include/asm-i386/apic.h +++ b/include/asm-i386/apic.h | |||
@@ -16,20 +16,8 @@ | |||
16 | #define APIC_VERBOSE 1 | 16 | #define APIC_VERBOSE 1 |
17 | #define APIC_DEBUG 2 | 17 | #define APIC_DEBUG 2 |
18 | 18 | ||
19 | extern int enable_local_apic; | ||
20 | extern int apic_verbosity; | 19 | extern int apic_verbosity; |
21 | 20 | ||
22 | static inline void lapic_disable(void) | ||
23 | { | ||
24 | enable_local_apic = -1; | ||
25 | clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); | ||
26 | } | ||
27 | |||
28 | static inline void lapic_enable(void) | ||
29 | { | ||
30 | enable_local_apic = 1; | ||
31 | } | ||
32 | |||
33 | /* | 21 | /* |
34 | * Define the default level of output to be very little | 22 | * Define the default level of output to be very little |
35 | * This can be turned up by using apic=verbose for more | 23 | * This can be turned up by using apic=verbose for more |
@@ -42,6 +30,8 @@ static inline void lapic_enable(void) | |||
42 | } while (0) | 30 | } while (0) |
43 | 31 | ||
44 | 32 | ||
33 | extern void generic_apic_probe(void); | ||
34 | |||
45 | #ifdef CONFIG_X86_LOCAL_APIC | 35 | #ifdef CONFIG_X86_LOCAL_APIC |
46 | 36 | ||
47 | /* | 37 | /* |
@@ -117,8 +107,6 @@ extern void enable_APIC_timer(void); | |||
117 | 107 | ||
118 | extern void enable_NMI_through_LVT0 (void * dummy); | 108 | extern void enable_NMI_through_LVT0 (void * dummy); |
119 | 109 | ||
120 | extern int disable_timer_pin_1; | ||
121 | |||
122 | void smp_send_timer_broadcast_ipi(struct pt_regs *regs); | 110 | void smp_send_timer_broadcast_ipi(struct pt_regs *regs); |
123 | void switch_APIC_timer_to_ipi(void *cpumask); | 111 | void switch_APIC_timer_to_ipi(void *cpumask); |
124 | void switch_ipi_to_APIC_timer(void *cpumask); | 112 | void switch_ipi_to_APIC_timer(void *cpumask); |
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h index 89b8b82c82b3..5874ef119ffd 100644 --- a/include/asm-i386/desc.h +++ b/include/asm-i386/desc.h | |||
@@ -33,50 +33,99 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) | |||
33 | return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address; | 33 | return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address; |
34 | } | 34 | } |
35 | 35 | ||
36 | /* | ||
37 | * This is the ldt that every process will get unless we need | ||
38 | * something other than this. | ||
39 | */ | ||
40 | extern struct desc_struct default_ldt[]; | ||
41 | extern struct desc_struct idt_table[]; | ||
42 | extern void set_intr_gate(unsigned int irq, void * addr); | ||
43 | |||
44 | static inline void pack_descriptor(__u32 *a, __u32 *b, | ||
45 | unsigned long base, unsigned long limit, unsigned char type, unsigned char flags) | ||
46 | { | ||
47 | *a = ((base & 0xffff) << 16) | (limit & 0xffff); | ||
48 | *b = (base & 0xff000000) | ((base & 0xff0000) >> 16) | | ||
49 | (limit & 0x000f0000) | ((type & 0xff) << 8) | ((flags & 0xf) << 20); | ||
50 | } | ||
51 | |||
52 | static inline void pack_gate(__u32 *a, __u32 *b, | ||
53 | unsigned long base, unsigned short seg, unsigned char type, unsigned char flags) | ||
54 | { | ||
55 | *a = (seg << 16) | (base & 0xffff); | ||
56 | *b = (base & 0xffff0000) | ((type & 0xff) << 8) | (flags & 0xff); | ||
57 | } | ||
58 | |||
59 | #define DESCTYPE_LDT 0x82 /* present, system, DPL-0, LDT */ | ||
60 | #define DESCTYPE_TSS 0x89 /* present, system, DPL-0, 32-bit TSS */ | ||
61 | #define DESCTYPE_TASK 0x85 /* present, system, DPL-0, task gate */ | ||
62 | #define DESCTYPE_INT 0x8e /* present, system, DPL-0, interrupt gate */ | ||
63 | #define DESCTYPE_TRAP 0x8f /* present, system, DPL-0, trap gate */ | ||
64 | #define DESCTYPE_DPL3 0x60 /* DPL-3 */ | ||
65 | #define DESCTYPE_S 0x10 /* !system */ | ||
66 | |||
36 | #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) | 67 | #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) |
37 | #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)) | 68 | #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)) |
38 | 69 | ||
39 | #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) | 70 | #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) |
40 | #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr)) | 71 | #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr)) |
41 | #define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr)) | 72 | #define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr)) |
42 | #define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt)) | 73 | #define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt)) |
43 | 74 | ||
44 | #define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr)) | 75 | #define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr)) |
45 | #define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr)) | 76 | #define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr)) |
46 | #define store_tr(tr) __asm__ ("str %0":"=mr" (tr)) | 77 | #define store_tr(tr) __asm__ ("str %0":"=m" (tr)) |
47 | #define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt)) | 78 | #define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt)) |
48 | 79 | ||
49 | /* | 80 | #if TLS_SIZE != 24 |
50 | * This is the ldt that every process will get unless we need | 81 | # error update this code. |
51 | * something other than this. | 82 | #endif |
52 | */ | ||
53 | extern struct desc_struct default_ldt[]; | ||
54 | extern void set_intr_gate(unsigned int irq, void * addr); | ||
55 | 83 | ||
56 | #define _set_tssldt_desc(n,addr,limit,type) \ | 84 | static inline void load_TLS(struct thread_struct *t, unsigned int cpu) |
57 | __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \ | ||
58 | "movw %w1,2(%2)\n\t" \ | ||
59 | "rorl $16,%1\n\t" \ | ||
60 | "movb %b1,4(%2)\n\t" \ | ||
61 | "movb %4,5(%2)\n\t" \ | ||
62 | "movb $0,6(%2)\n\t" \ | ||
63 | "movb %h1,7(%2)\n\t" \ | ||
64 | "rorl $16,%1" \ | ||
65 | : "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type)) | ||
66 | |||
67 | static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr) | ||
68 | { | 85 | { |
69 | _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr, | 86 | #define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i] |
70 | offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89); | 87 | C(0); C(1); C(2); |
88 | #undef C | ||
71 | } | 89 | } |
72 | 90 | ||
73 | #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) | 91 | static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b) |
92 | { | ||
93 | __u32 *lp = (__u32 *)((char *)dt + entry*8); | ||
94 | *lp = entry_a; | ||
95 | *(lp+1) = entry_b; | ||
96 | } | ||
97 | |||
98 | #define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | ||
99 | #define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | ||
100 | #define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | ||
101 | |||
102 | static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg) | ||
103 | { | ||
104 | __u32 a, b; | ||
105 | pack_gate(&a, &b, (unsigned long)addr, seg, type, 0); | ||
106 | write_idt_entry(idt_table, gate, a, b); | ||
107 | } | ||
74 | 108 | ||
75 | static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size) | 109 | static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr) |
76 | { | 110 | { |
77 | _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82); | 111 | __u32 a, b; |
112 | pack_descriptor(&a, &b, (unsigned long)addr, | ||
113 | offsetof(struct tss_struct, __cacheline_filler) - 1, | ||
114 | DESCTYPE_TSS, 0); | ||
115 | write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b); | ||
78 | } | 116 | } |
79 | 117 | ||
118 | static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entries) | ||
119 | { | ||
120 | __u32 a, b; | ||
121 | pack_descriptor(&a, &b, (unsigned long)addr, | ||
122 | entries * sizeof(struct desc_struct) - 1, | ||
123 | DESCTYPE_LDT, 0); | ||
124 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b); | ||
125 | } | ||
126 | |||
127 | #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) | ||
128 | |||
80 | #define LDT_entry_a(info) \ | 129 | #define LDT_entry_a(info) \ |
81 | ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) | 130 | ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) |
82 | 131 | ||
@@ -102,24 +151,6 @@ static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size) | |||
102 | (info)->seg_not_present == 1 && \ | 151 | (info)->seg_not_present == 1 && \ |
103 | (info)->useable == 0 ) | 152 | (info)->useable == 0 ) |
104 | 153 | ||
105 | static inline void write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b) | ||
106 | { | ||
107 | __u32 *lp = (__u32 *)((char *)ldt + entry*8); | ||
108 | *lp = entry_a; | ||
109 | *(lp+1) = entry_b; | ||
110 | } | ||
111 | |||
112 | #if TLS_SIZE != 24 | ||
113 | # error update this code. | ||
114 | #endif | ||
115 | |||
116 | static inline void load_TLS(struct thread_struct *t, unsigned int cpu) | ||
117 | { | ||
118 | #define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i] | ||
119 | C(0); C(1); C(2); | ||
120 | #undef C | ||
121 | } | ||
122 | |||
123 | static inline void clear_LDT(void) | 154 | static inline void clear_LDT(void) |
124 | { | 155 | { |
125 | int cpu = get_cpu(); | 156 | int cpu = get_cpu(); |
diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h index 9cf20cacf76e..576ae01d71c8 100644 --- a/include/asm-i386/dma-mapping.h +++ b/include/asm-i386/dma-mapping.h | |||
@@ -21,8 +21,7 @@ static inline dma_addr_t | |||
21 | dma_map_single(struct device *dev, void *ptr, size_t size, | 21 | dma_map_single(struct device *dev, void *ptr, size_t size, |
22 | enum dma_data_direction direction) | 22 | enum dma_data_direction direction) |
23 | { | 23 | { |
24 | if (direction == DMA_NONE) | 24 | BUG_ON(direction == DMA_NONE); |
25 | BUG(); | ||
26 | WARN_ON(size == 0); | 25 | WARN_ON(size == 0); |
27 | flush_write_buffers(); | 26 | flush_write_buffers(); |
28 | return virt_to_phys(ptr); | 27 | return virt_to_phys(ptr); |
@@ -32,8 +31,7 @@ static inline void | |||
32 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 31 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, |
33 | enum dma_data_direction direction) | 32 | enum dma_data_direction direction) |
34 | { | 33 | { |
35 | if (direction == DMA_NONE) | 34 | BUG_ON(direction == DMA_NONE); |
36 | BUG(); | ||
37 | } | 35 | } |
38 | 36 | ||
39 | static inline int | 37 | static inline int |
@@ -42,8 +40,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
42 | { | 40 | { |
43 | int i; | 41 | int i; |
44 | 42 | ||
45 | if (direction == DMA_NONE) | 43 | BUG_ON(direction == DMA_NONE); |
46 | BUG(); | ||
47 | WARN_ON(nents == 0 || sg[0].length == 0); | 44 | WARN_ON(nents == 0 || sg[0].length == 0); |
48 | 45 | ||
49 | for (i = 0; i < nents; i++ ) { | 46 | for (i = 0; i < nents; i++ ) { |
diff --git a/include/asm-i386/dwarf2.h b/include/asm-i386/dwarf2.h index 2280f6272f80..6d66398a307d 100644 --- a/include/asm-i386/dwarf2.h +++ b/include/asm-i386/dwarf2.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _DWARF2_H | 1 | #ifndef _DWARF2_H |
2 | #define _DWARF2_H | 2 | #define _DWARF2_H |
3 | 3 | ||
4 | #include <linux/config.h> | ||
5 | |||
6 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
7 | #warning "asm/dwarf2.h should be only included in pure assembly files" | 5 | #warning "asm/dwarf2.h should be only included in pure assembly files" |
8 | #endif | 6 | #endif |
@@ -28,6 +26,13 @@ | |||
28 | #define CFI_RESTORE .cfi_restore | 26 | #define CFI_RESTORE .cfi_restore |
29 | #define CFI_REMEMBER_STATE .cfi_remember_state | 27 | #define CFI_REMEMBER_STATE .cfi_remember_state |
30 | #define CFI_RESTORE_STATE .cfi_restore_state | 28 | #define CFI_RESTORE_STATE .cfi_restore_state |
29 | #define CFI_UNDEFINED .cfi_undefined | ||
30 | |||
31 | #ifdef CONFIG_AS_CFI_SIGNAL_FRAME | ||
32 | #define CFI_SIGNAL_FRAME .cfi_signal_frame | ||
33 | #else | ||
34 | #define CFI_SIGNAL_FRAME | ||
35 | #endif | ||
31 | 36 | ||
32 | #else | 37 | #else |
33 | 38 | ||
@@ -48,6 +53,8 @@ | |||
48 | #define CFI_RESTORE ignore | 53 | #define CFI_RESTORE ignore |
49 | #define CFI_REMEMBER_STATE ignore | 54 | #define CFI_REMEMBER_STATE ignore |
50 | #define CFI_RESTORE_STATE ignore | 55 | #define CFI_RESTORE_STATE ignore |
56 | #define CFI_UNDEFINED ignore | ||
57 | #define CFI_SIGNAL_FRAME ignore | ||
51 | 58 | ||
52 | #endif | 59 | #endif |
53 | 60 | ||
diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h index ca82acb8cb1f..f7514fb6e8e4 100644 --- a/include/asm-i386/e820.h +++ b/include/asm-i386/e820.h | |||
@@ -18,7 +18,7 @@ | |||
18 | 18 | ||
19 | #define E820_RAM 1 | 19 | #define E820_RAM 1 |
20 | #define E820_RESERVED 2 | 20 | #define E820_RESERVED 2 |
21 | #define E820_ACPI 3 /* usable as RAM once ACPI tables have been read */ | 21 | #define E820_ACPI 3 |
22 | #define E820_NVS 4 | 22 | #define E820_NVS 4 |
23 | 23 | ||
24 | #define HIGH_MEMORY (1024*1024) | 24 | #define HIGH_MEMORY (1024*1024) |
diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h index a48cc3f7ccc6..02428cb36621 100644 --- a/include/asm-i386/fixmap.h +++ b/include/asm-i386/fixmap.h | |||
@@ -19,7 +19,11 @@ | |||
19 | * Leave one empty page between vmalloc'ed areas and | 19 | * Leave one empty page between vmalloc'ed areas and |
20 | * the start of the fixmap. | 20 | * the start of the fixmap. |
21 | */ | 21 | */ |
22 | #define __FIXADDR_TOP 0xfffff000 | 22 | #ifndef CONFIG_COMPAT_VDSO |
23 | extern unsigned long __FIXADDR_TOP; | ||
24 | #else | ||
25 | #define __FIXADDR_TOP 0xfffff000 | ||
26 | #endif | ||
23 | 27 | ||
24 | #ifndef __ASSEMBLY__ | 28 | #ifndef __ASSEMBLY__ |
25 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
@@ -93,6 +97,7 @@ enum fixed_addresses { | |||
93 | 97 | ||
94 | extern void __set_fixmap (enum fixed_addresses idx, | 98 | extern void __set_fixmap (enum fixed_addresses idx, |
95 | unsigned long phys, pgprot_t flags); | 99 | unsigned long phys, pgprot_t flags); |
100 | extern void reserve_top_address(unsigned long reserve); | ||
96 | 101 | ||
97 | #define set_fixmap(idx, phys) \ | 102 | #define set_fixmap(idx, phys) \ |
98 | __set_fixmap(idx, phys, PAGE_KERNEL) | 103 | __set_fixmap(idx, phys, PAGE_KERNEL) |
diff --git a/include/asm-i386/frame.i b/include/asm-i386/frame.i new file mode 100644 index 000000000000..4d68ddce18b6 --- /dev/null +++ b/include/asm-i386/frame.i | |||
@@ -0,0 +1,24 @@ | |||
1 | #include <linux/config.h> | ||
2 | #include <asm/dwarf2.h> | ||
3 | |||
4 | /* The annotation hides the frame from the unwinder and makes it look | ||
5 | like a ordinary ebp save/restore. This avoids some special cases for | ||
6 | frame pointer later */ | ||
7 | #ifdef CONFIG_FRAME_POINTER | ||
8 | .macro FRAME | ||
9 | pushl %ebp | ||
10 | CFI_ADJUST_CFA_OFFSET 4 | ||
11 | CFI_REL_OFFSET ebp,0 | ||
12 | movl %esp,%ebp | ||
13 | .endm | ||
14 | .macro ENDFRAME | ||
15 | popl %ebp | ||
16 | CFI_ADJUST_CFA_OFFSET -4 | ||
17 | CFI_RESTORE ebp | ||
18 | .endm | ||
19 | #else | ||
20 | .macro FRAME | ||
21 | .endm | ||
22 | .macro ENDFRAME | ||
23 | .endm | ||
24 | #endif | ||
diff --git a/include/asm-i386/genapic.h b/include/asm-i386/genapic.h index b3783a32abee..8ffbb0f07457 100644 --- a/include/asm-i386/genapic.h +++ b/include/asm-i386/genapic.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _ASM_GENAPIC_H | 1 | #ifndef _ASM_GENAPIC_H |
2 | #define _ASM_GENAPIC_H 1 | 2 | #define _ASM_GENAPIC_H 1 |
3 | 3 | ||
4 | #include <asm/mpspec.h> | ||
5 | |||
4 | /* | 6 | /* |
5 | * Generic APIC driver interface. | 7 | * Generic APIC driver interface. |
6 | * | 8 | * |
@@ -63,14 +65,25 @@ struct genapic { | |||
63 | unsigned (*get_apic_id)(unsigned long x); | 65 | unsigned (*get_apic_id)(unsigned long x); |
64 | unsigned long apic_id_mask; | 66 | unsigned long apic_id_mask; |
65 | unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); | 67 | unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); |
66 | 68 | ||
69 | #ifdef CONFIG_SMP | ||
67 | /* ipi */ | 70 | /* ipi */ |
68 | void (*send_IPI_mask)(cpumask_t mask, int vector); | 71 | void (*send_IPI_mask)(cpumask_t mask, int vector); |
69 | void (*send_IPI_allbutself)(int vector); | 72 | void (*send_IPI_allbutself)(int vector); |
70 | void (*send_IPI_all)(int vector); | 73 | void (*send_IPI_all)(int vector); |
74 | #endif | ||
71 | }; | 75 | }; |
72 | 76 | ||
73 | #define APICFUNC(x) .x = x | 77 | #define APICFUNC(x) .x = x, |
78 | |||
79 | /* More functions could be probably marked IPIFUNC and save some space | ||
80 | in UP GENERICARCH kernels, but I don't have the nerve right now | ||
81 | to untangle this mess. -AK */ | ||
82 | #ifdef CONFIG_SMP | ||
83 | #define IPIFUNC(x) APICFUNC(x) | ||
84 | #else | ||
85 | #define IPIFUNC(x) | ||
86 | #endif | ||
74 | 87 | ||
75 | #define APIC_INIT(aname, aprobe) { \ | 88 | #define APIC_INIT(aname, aprobe) { \ |
76 | .name = aname, \ | 89 | .name = aname, \ |
@@ -80,33 +93,33 @@ struct genapic { | |||
80 | .no_balance_irq = NO_BALANCE_IRQ, \ | 93 | .no_balance_irq = NO_BALANCE_IRQ, \ |
81 | .ESR_DISABLE = esr_disable, \ | 94 | .ESR_DISABLE = esr_disable, \ |
82 | .apic_destination_logical = APIC_DEST_LOGICAL, \ | 95 | .apic_destination_logical = APIC_DEST_LOGICAL, \ |
83 | APICFUNC(apic_id_registered), \ | 96 | APICFUNC(apic_id_registered) \ |
84 | APICFUNC(target_cpus), \ | 97 | APICFUNC(target_cpus) \ |
85 | APICFUNC(check_apicid_used), \ | 98 | APICFUNC(check_apicid_used) \ |
86 | APICFUNC(check_apicid_present), \ | 99 | APICFUNC(check_apicid_present) \ |
87 | APICFUNC(init_apic_ldr), \ | 100 | APICFUNC(init_apic_ldr) \ |
88 | APICFUNC(ioapic_phys_id_map), \ | 101 | APICFUNC(ioapic_phys_id_map) \ |
89 | APICFUNC(clustered_apic_check), \ | 102 | APICFUNC(clustered_apic_check) \ |
90 | APICFUNC(multi_timer_check), \ | 103 | APICFUNC(multi_timer_check) \ |
91 | APICFUNC(apicid_to_node), \ | 104 | APICFUNC(apicid_to_node) \ |
92 | APICFUNC(cpu_to_logical_apicid), \ | 105 | APICFUNC(cpu_to_logical_apicid) \ |
93 | APICFUNC(cpu_present_to_apicid), \ | 106 | APICFUNC(cpu_present_to_apicid) \ |
94 | APICFUNC(apicid_to_cpu_present), \ | 107 | APICFUNC(apicid_to_cpu_present) \ |
95 | APICFUNC(mpc_apic_id), \ | 108 | APICFUNC(mpc_apic_id) \ |
96 | APICFUNC(setup_portio_remap), \ | 109 | APICFUNC(setup_portio_remap) \ |
97 | APICFUNC(check_phys_apicid_present), \ | 110 | APICFUNC(check_phys_apicid_present) \ |
98 | APICFUNC(mpc_oem_bus_info), \ | 111 | APICFUNC(mpc_oem_bus_info) \ |
99 | APICFUNC(mpc_oem_pci_bus), \ | 112 | APICFUNC(mpc_oem_pci_bus) \ |
100 | APICFUNC(mps_oem_check), \ | 113 | APICFUNC(mps_oem_check) \ |
101 | APICFUNC(get_apic_id), \ | 114 | APICFUNC(get_apic_id) \ |
102 | .apic_id_mask = APIC_ID_MASK, \ | 115 | .apic_id_mask = APIC_ID_MASK, \ |
103 | APICFUNC(cpu_mask_to_apicid), \ | 116 | APICFUNC(cpu_mask_to_apicid) \ |
104 | APICFUNC(acpi_madt_oem_check), \ | 117 | APICFUNC(acpi_madt_oem_check) \ |
105 | APICFUNC(send_IPI_mask), \ | 118 | IPIFUNC(send_IPI_mask) \ |
106 | APICFUNC(send_IPI_allbutself), \ | 119 | IPIFUNC(send_IPI_allbutself) \ |
107 | APICFUNC(send_IPI_all), \ | 120 | IPIFUNC(send_IPI_all) \ |
108 | APICFUNC(enable_apic_mode), \ | 121 | APICFUNC(enable_apic_mode) \ |
109 | APICFUNC(phys_pkg_id), \ | 122 | APICFUNC(phys_pkg_id) \ |
110 | } | 123 | } |
111 | 124 | ||
112 | extern struct genapic *genapic; | 125 | extern struct genapic *genapic; |
diff --git a/include/asm-i386/intel_arch_perfmon.h b/include/asm-i386/intel_arch_perfmon.h index 134ea9cc5283..b52cd60a075b 100644 --- a/include/asm-i386/intel_arch_perfmon.h +++ b/include/asm-i386/intel_arch_perfmon.h | |||
@@ -14,6 +14,18 @@ | |||
14 | 14 | ||
15 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c) | 15 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c) |
16 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) | 16 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) |
17 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0) | 17 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0) |
18 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ | ||
19 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) | ||
20 | |||
21 | union cpuid10_eax { | ||
22 | struct { | ||
23 | unsigned int version_id:8; | ||
24 | unsigned int num_counters:8; | ||
25 | unsigned int bit_width:8; | ||
26 | unsigned int mask_length:8; | ||
27 | } split; | ||
28 | unsigned int full; | ||
29 | }; | ||
18 | 30 | ||
19 | #endif /* X86_INTEL_ARCH_PERFMON_H */ | 31 | #endif /* X86_INTEL_ARCH_PERFMON_H */ |
diff --git a/include/asm-i386/io_apic.h b/include/asm-i386/io_apic.h index 5092e819b8a2..5d309275a1dc 100644 --- a/include/asm-i386/io_apic.h +++ b/include/asm-i386/io_apic.h | |||
@@ -188,6 +188,16 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned | |||
188 | /* 1 if "noapic" boot option passed */ | 188 | /* 1 if "noapic" boot option passed */ |
189 | extern int skip_ioapic_setup; | 189 | extern int skip_ioapic_setup; |
190 | 190 | ||
191 | static inline void disable_ioapic_setup(void) | ||
192 | { | ||
193 | skip_ioapic_setup = 1; | ||
194 | } | ||
195 | |||
196 | static inline int ioapic_setup_disabled(void) | ||
197 | { | ||
198 | return skip_ioapic_setup; | ||
199 | } | ||
200 | |||
191 | /* | 201 | /* |
192 | * If we use the IO-APIC for IRQ routing, disable automatic | 202 | * If we use the IO-APIC for IRQ routing, disable automatic |
193 | * assignment of PCI IRQ's. | 203 | * assignment of PCI IRQ's. |
@@ -206,6 +216,7 @@ extern int (*ioapic_renumber_irq)(int ioapic, int irq); | |||
206 | 216 | ||
207 | #else /* !CONFIG_X86_IO_APIC */ | 217 | #else /* !CONFIG_X86_IO_APIC */ |
208 | #define io_apic_assign_pci_irqs 0 | 218 | #define io_apic_assign_pci_irqs 0 |
219 | static inline void disable_ioapic_setup(void) { } | ||
209 | #endif | 220 | #endif |
210 | 221 | ||
211 | extern int assign_irq_vector(int irq); | 222 | extern int assign_irq_vector(int irq); |
diff --git a/include/asm-i386/kexec.h b/include/asm-i386/kexec.h index 53f0e06672dc..4dfc9f5ed031 100644 --- a/include/asm-i386/kexec.h +++ b/include/asm-i386/kexec.h | |||
@@ -1,6 +1,26 @@ | |||
1 | #ifndef _I386_KEXEC_H | 1 | #ifndef _I386_KEXEC_H |
2 | #define _I386_KEXEC_H | 2 | #define _I386_KEXEC_H |
3 | 3 | ||
4 | #define PA_CONTROL_PAGE 0 | ||
5 | #define VA_CONTROL_PAGE 1 | ||
6 | #define PA_PGD 2 | ||
7 | #define VA_PGD 3 | ||
8 | #define PA_PTE_0 4 | ||
9 | #define VA_PTE_0 5 | ||
10 | #define PA_PTE_1 6 | ||
11 | #define VA_PTE_1 7 | ||
12 | #ifdef CONFIG_X86_PAE | ||
13 | #define PA_PMD_0 8 | ||
14 | #define VA_PMD_0 9 | ||
15 | #define PA_PMD_1 10 | ||
16 | #define VA_PMD_1 11 | ||
17 | #define PAGES_NR 12 | ||
18 | #else | ||
19 | #define PAGES_NR 8 | ||
20 | #endif | ||
21 | |||
22 | #ifndef __ASSEMBLY__ | ||
23 | |||
4 | #include <asm/fixmap.h> | 24 | #include <asm/fixmap.h> |
5 | #include <asm/ptrace.h> | 25 | #include <asm/ptrace.h> |
6 | #include <asm/string.h> | 26 | #include <asm/string.h> |
@@ -72,5 +92,12 @@ static inline void crash_setup_regs(struct pt_regs *newregs, | |||
72 | newregs->eip = (unsigned long)current_text_addr(); | 92 | newregs->eip = (unsigned long)current_text_addr(); |
73 | } | 93 | } |
74 | } | 94 | } |
95 | asmlinkage NORET_TYPE void | ||
96 | relocate_kernel(unsigned long indirection_page, | ||
97 | unsigned long control_page, | ||
98 | unsigned long start_address, | ||
99 | unsigned int has_pae) ATTRIB_NORET; | ||
100 | |||
101 | #endif /* __ASSEMBLY__ */ | ||
75 | 102 | ||
76 | #endif /* _I386_KEXEC_H */ | 103 | #endif /* _I386_KEXEC_H */ |
diff --git a/include/asm-i386/libata-portmap.h b/include/asm-i386/libata-portmap.h deleted file mode 100644 index 75484ef0c743..000000000000 --- a/include/asm-i386/libata-portmap.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/libata-portmap.h> | ||
diff --git a/include/asm-i386/mach-es7000/mach_apic.h b/include/asm-i386/mach-es7000/mach_apic.h index b5f3f0d0b2bc..26333685a7fb 100644 --- a/include/asm-i386/mach-es7000/mach_apic.h +++ b/include/asm-i386/mach-es7000/mach_apic.h | |||
@@ -123,9 +123,13 @@ extern u8 cpu_2_logical_apicid[]; | |||
123 | /* Mapping from cpu number to logical apicid */ | 123 | /* Mapping from cpu number to logical apicid */ |
124 | static inline int cpu_to_logical_apicid(int cpu) | 124 | static inline int cpu_to_logical_apicid(int cpu) |
125 | { | 125 | { |
126 | #ifdef CONFIG_SMP | ||
126 | if (cpu >= NR_CPUS) | 127 | if (cpu >= NR_CPUS) |
127 | return BAD_APICID; | 128 | return BAD_APICID; |
128 | return (int)cpu_2_logical_apicid[cpu]; | 129 | return (int)cpu_2_logical_apicid[cpu]; |
130 | #else | ||
131 | return logical_smp_processor_id(); | ||
132 | #endif | ||
129 | } | 133 | } |
130 | 134 | ||
131 | static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused) | 135 | static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused) |
diff --git a/include/asm-i386/mach-summit/mach_apic.h b/include/asm-i386/mach-summit/mach_apic.h index 9fd073286289..a81b05961595 100644 --- a/include/asm-i386/mach-summit/mach_apic.h +++ b/include/asm-i386/mach-summit/mach_apic.h | |||
@@ -46,10 +46,12 @@ extern u8 cpu_2_logical_apicid[]; | |||
46 | static inline void init_apic_ldr(void) | 46 | static inline void init_apic_ldr(void) |
47 | { | 47 | { |
48 | unsigned long val, id; | 48 | unsigned long val, id; |
49 | int i, count; | 49 | int count = 0; |
50 | u8 lid; | ||
51 | u8 my_id = (u8)hard_smp_processor_id(); | 50 | u8 my_id = (u8)hard_smp_processor_id(); |
52 | u8 my_cluster = (u8)apicid_cluster(my_id); | 51 | u8 my_cluster = (u8)apicid_cluster(my_id); |
52 | #ifdef CONFIG_SMP | ||
53 | u8 lid; | ||
54 | int i; | ||
53 | 55 | ||
54 | /* Create logical APIC IDs by counting CPUs already in cluster. */ | 56 | /* Create logical APIC IDs by counting CPUs already in cluster. */ |
55 | for (count = 0, i = NR_CPUS; --i >= 0; ) { | 57 | for (count = 0, i = NR_CPUS; --i >= 0; ) { |
@@ -57,6 +59,7 @@ static inline void init_apic_ldr(void) | |||
57 | if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) | 59 | if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) |
58 | ++count; | 60 | ++count; |
59 | } | 61 | } |
62 | #endif | ||
60 | /* We only have a 4 wide bitmap in cluster mode. If a deranged | 63 | /* We only have a 4 wide bitmap in cluster mode. If a deranged |
61 | * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ | 64 | * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ |
62 | BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); | 65 | BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); |
@@ -91,9 +94,13 @@ static inline int apicid_to_node(int logical_apicid) | |||
91 | /* Mapping from cpu number to logical apicid */ | 94 | /* Mapping from cpu number to logical apicid */ |
92 | static inline int cpu_to_logical_apicid(int cpu) | 95 | static inline int cpu_to_logical_apicid(int cpu) |
93 | { | 96 | { |
97 | #ifdef CONFIG_SMP | ||
94 | if (cpu >= NR_CPUS) | 98 | if (cpu >= NR_CPUS) |
95 | return BAD_APICID; | 99 | return BAD_APICID; |
96 | return (int)cpu_2_logical_apicid[cpu]; | 100 | return (int)cpu_2_logical_apicid[cpu]; |
101 | #else | ||
102 | return logical_smp_processor_id(); | ||
103 | #endif | ||
97 | } | 104 | } |
98 | 105 | ||
99 | static inline int cpu_present_to_apicid(int mps_cpu) | 106 | static inline int cpu_present_to_apicid(int mps_cpu) |
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h index 22cb07cc8f32..61b073322006 100644 --- a/include/asm-i386/mmzone.h +++ b/include/asm-i386/mmzone.h | |||
@@ -38,10 +38,16 @@ static inline void get_memcfg_numa(void) | |||
38 | } | 38 | } |
39 | 39 | ||
40 | extern int early_pfn_to_nid(unsigned long pfn); | 40 | extern int early_pfn_to_nid(unsigned long pfn); |
41 | extern void numa_kva_reserve(void); | ||
41 | 42 | ||
42 | #else /* !CONFIG_NUMA */ | 43 | #else /* !CONFIG_NUMA */ |
44 | |||
43 | #define get_memcfg_numa get_memcfg_numa_flat | 45 | #define get_memcfg_numa get_memcfg_numa_flat |
44 | #define get_zholes_size(n) (0) | 46 | #define get_zholes_size(n) (0) |
47 | |||
48 | static inline void numa_kva_reserve(void) | ||
49 | { | ||
50 | } | ||
45 | #endif /* CONFIG_NUMA */ | 51 | #endif /* CONFIG_NUMA */ |
46 | 52 | ||
47 | #ifdef CONFIG_DISCONTIGMEM | 53 | #ifdef CONFIG_DISCONTIGMEM |
diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h index 05a538531229..7a17d9e58ad6 100644 --- a/include/asm-i386/mutex.h +++ b/include/asm-i386/mutex.h | |||
@@ -30,14 +30,10 @@ do { \ | |||
30 | \ | 30 | \ |
31 | __asm__ __volatile__( \ | 31 | __asm__ __volatile__( \ |
32 | LOCK_PREFIX " decl (%%eax) \n" \ | 32 | LOCK_PREFIX " decl (%%eax) \n" \ |
33 | " js 2f \n" \ | 33 | " jns 1f \n" \ |
34 | " call "#fail_fn" \n" \ | ||
34 | "1: \n" \ | 35 | "1: \n" \ |
35 | \ | 36 | \ |
36 | LOCK_SECTION_START("") \ | ||
37 | "2: call "#fail_fn" \n" \ | ||
38 | " jmp 1b \n" \ | ||
39 | LOCK_SECTION_END \ | ||
40 | \ | ||
41 | :"=a" (dummy) \ | 37 | :"=a" (dummy) \ |
42 | : "a" (count) \ | 38 | : "a" (count) \ |
43 | : "memory", "ecx", "edx"); \ | 39 | : "memory", "ecx", "edx"); \ |
@@ -86,14 +82,10 @@ do { \ | |||
86 | \ | 82 | \ |
87 | __asm__ __volatile__( \ | 83 | __asm__ __volatile__( \ |
88 | LOCK_PREFIX " incl (%%eax) \n" \ | 84 | LOCK_PREFIX " incl (%%eax) \n" \ |
89 | " jle 2f \n" \ | 85 | " jg 1f \n" \ |
86 | " call "#fail_fn" \n" \ | ||
90 | "1: \n" \ | 87 | "1: \n" \ |
91 | \ | 88 | \ |
92 | LOCK_SECTION_START("") \ | ||
93 | "2: call "#fail_fn" \n" \ | ||
94 | " jmp 1b \n" \ | ||
95 | LOCK_SECTION_END \ | ||
96 | \ | ||
97 | :"=a" (dummy) \ | 89 | :"=a" (dummy) \ |
98 | : "a" (count) \ | 90 | : "a" (count) \ |
99 | : "memory", "ecx", "edx"); \ | 91 | : "memory", "ecx", "edx"); \ |
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h index 67d994799999..303bcd4592bb 100644 --- a/include/asm-i386/nmi.h +++ b/include/asm-i386/nmi.h | |||
@@ -6,32 +6,29 @@ | |||
6 | 6 | ||
7 | #include <linux/pm.h> | 7 | #include <linux/pm.h> |
8 | 8 | ||
9 | struct pt_regs; | ||
10 | |||
11 | typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu); | ||
12 | |||
13 | /** | 9 | /** |
14 | * set_nmi_callback | 10 | * do_nmi_callback |
15 | * | 11 | * |
16 | * Set a handler for an NMI. Only one handler may be | 12 | * Check to see if a callback exists and execute it. Return 1 |
17 | * set. Return 1 if the NMI was handled. | 13 | * if the handler exists and was handled successfully. |
18 | */ | 14 | */ |
19 | void set_nmi_callback(nmi_callback_t callback); | 15 | int do_nmi_callback(struct pt_regs *regs, int cpu); |
20 | 16 | ||
21 | /** | 17 | extern int nmi_watchdog_enabled; |
22 | * unset_nmi_callback | 18 | extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); |
23 | * | 19 | extern int avail_to_resrv_perfctr_nmi(unsigned int); |
24 | * Remove the handler previously set. | 20 | extern int reserve_perfctr_nmi(unsigned int); |
25 | */ | 21 | extern void release_perfctr_nmi(unsigned int); |
26 | void unset_nmi_callback(void); | 22 | extern int reserve_evntsel_nmi(unsigned int); |
27 | 23 | extern void release_evntsel_nmi(unsigned int); | |
28 | extern void setup_apic_nmi_watchdog (void); | 24 | |
29 | extern int reserve_lapic_nmi(void); | 25 | extern void setup_apic_nmi_watchdog (void *); |
30 | extern void release_lapic_nmi(void); | 26 | extern void stop_apic_nmi_watchdog (void *); |
31 | extern void disable_timer_nmi_watchdog(void); | 27 | extern void disable_timer_nmi_watchdog(void); |
32 | extern void enable_timer_nmi_watchdog(void); | 28 | extern void enable_timer_nmi_watchdog(void); |
33 | extern void nmi_watchdog_tick (struct pt_regs * regs); | 29 | extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); |
34 | 30 | ||
31 | extern atomic_t nmi_active; | ||
35 | extern unsigned int nmi_watchdog; | 32 | extern unsigned int nmi_watchdog; |
36 | #define NMI_DEFAULT -1 | 33 | #define NMI_DEFAULT -1 |
37 | #define NMI_NONE 0 | 34 | #define NMI_NONE 0 |
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h index 2756d4b04c27..201c86a6711e 100644 --- a/include/asm-i386/pgtable-2level.h +++ b/include/asm-i386/pgtable-2level.h | |||
@@ -21,8 +21,9 @@ | |||
21 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) | 21 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) |
22 | #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) | 22 | #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) |
23 | 23 | ||
24 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
24 | #define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0)) | 25 | #define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0)) |
25 | #define pte_same(a, b) ((a).pte_low == (b).pte_low) | 26 | |
26 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 27 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
27 | #define pte_none(x) (!(x).pte_low) | 28 | #define pte_none(x) (!(x).pte_low) |
28 | #define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) | 29 | #define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) |
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h index dccb1b3337ad..0d899173232e 100644 --- a/include/asm-i386/pgtable-3level.h +++ b/include/asm-i386/pgtable-3level.h | |||
@@ -77,7 +77,7 @@ static inline void pud_clear (pud_t * pud) { } | |||
77 | #define pud_page(pud) \ | 77 | #define pud_page(pud) \ |
78 | ((struct page *) __va(pud_val(pud) & PAGE_MASK)) | 78 | ((struct page *) __va(pud_val(pud) & PAGE_MASK)) |
79 | 79 | ||
80 | #define pud_page_kernel(pud) \ | 80 | #define pud_page_vaddr(pud) \ |
81 | ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) | 81 | ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) |
82 | 82 | ||
83 | 83 | ||
@@ -105,6 +105,7 @@ static inline void pmd_clear(pmd_t *pmd) | |||
105 | *(tmp + 1) = 0; | 105 | *(tmp + 1) = 0; |
106 | } | 106 | } |
107 | 107 | ||
108 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
108 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 109 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
109 | { | 110 | { |
110 | pte_t res; | 111 | pte_t res; |
@@ -117,6 +118,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | |||
117 | return res; | 118 | return res; |
118 | } | 119 | } |
119 | 120 | ||
121 | #define __HAVE_ARCH_PTE_SAME | ||
120 | static inline int pte_same(pte_t a, pte_t b) | 122 | static inline int pte_same(pte_t a, pte_t b) |
121 | { | 123 | { |
122 | return a.pte_low == b.pte_low && a.pte_high == b.pte_high; | 124 | return a.pte_low == b.pte_low && a.pte_high == b.pte_high; |
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index 09697fec3d2b..541b3e234335 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h | |||
@@ -246,6 +246,23 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p | |||
246 | # include <asm/pgtable-2level.h> | 246 | # include <asm/pgtable-2level.h> |
247 | #endif | 247 | #endif |
248 | 248 | ||
249 | /* | ||
250 | * We only update the dirty/accessed state if we set | ||
251 | * the dirty bit by hand in the kernel, since the hardware | ||
252 | * will do the accessed bit for us, and we don't want to | ||
253 | * race with other CPU's that might be updating the dirty | ||
254 | * bit at the same time. | ||
255 | */ | ||
256 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
257 | #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ | ||
258 | do { \ | ||
259 | if (dirty) { \ | ||
260 | (ptep)->pte_low = (entry).pte_low; \ | ||
261 | flush_tlb_page(vma, address); \ | ||
262 | } \ | ||
263 | } while (0) | ||
264 | |||
265 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
249 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | 266 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) |
250 | { | 267 | { |
251 | if (!pte_dirty(*ptep)) | 268 | if (!pte_dirty(*ptep)) |
@@ -253,6 +270,7 @@ static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned | |||
253 | return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); | 270 | return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); |
254 | } | 271 | } |
255 | 272 | ||
273 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
256 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | 274 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) |
257 | { | 275 | { |
258 | if (!pte_young(*ptep)) | 276 | if (!pte_young(*ptep)) |
@@ -260,6 +278,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned | |||
260 | return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); | 278 | return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); |
261 | } | 279 | } |
262 | 280 | ||
281 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | ||
263 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) | 282 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) |
264 | { | 283 | { |
265 | pte_t pte; | 284 | pte_t pte; |
@@ -272,6 +291,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long | |||
272 | return pte; | 291 | return pte; |
273 | } | 292 | } |
274 | 293 | ||
294 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
275 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 295 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
276 | { | 296 | { |
277 | clear_bit(_PAGE_BIT_RW, &ptep->pte_low); | 297 | clear_bit(_PAGE_BIT_RW, &ptep->pte_low); |
@@ -364,11 +384,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
364 | #define pte_index(address) \ | 384 | #define pte_index(address) \ |
365 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 385 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
366 | #define pte_offset_kernel(dir, address) \ | 386 | #define pte_offset_kernel(dir, address) \ |
367 | ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address)) | 387 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) |
368 | 388 | ||
369 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) | 389 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) |
370 | 390 | ||
371 | #define pmd_page_kernel(pmd) \ | 391 | #define pmd_page_vaddr(pmd) \ |
372 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | 392 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) |
373 | 393 | ||
374 | /* | 394 | /* |
@@ -391,8 +411,6 @@ extern pte_t *lookup_address(unsigned long address); | |||
391 | static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;} | 411 | static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;} |
392 | #endif | 412 | #endif |
393 | 413 | ||
394 | extern void noexec_setup(const char *str); | ||
395 | |||
396 | #if defined(CONFIG_HIGHPTE) | 414 | #if defined(CONFIG_HIGHPTE) |
397 | #define pte_offset_map(dir, address) \ | 415 | #define pte_offset_map(dir, address) \ |
398 | ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) | 416 | ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) |
@@ -411,23 +429,8 @@ extern void noexec_setup(const char *str); | |||
411 | /* | 429 | /* |
412 | * The i386 doesn't have any external MMU info: the kernel page | 430 | * The i386 doesn't have any external MMU info: the kernel page |
413 | * tables contain all the necessary information. | 431 | * tables contain all the necessary information. |
414 | * | ||
415 | * Also, we only update the dirty/accessed state if we set | ||
416 | * the dirty bit by hand in the kernel, since the hardware | ||
417 | * will do the accessed bit for us, and we don't want to | ||
418 | * race with other CPU's that might be updating the dirty | ||
419 | * bit at the same time. | ||
420 | */ | 432 | */ |
421 | #define update_mmu_cache(vma,address,pte) do { } while (0) | 433 | #define update_mmu_cache(vma,address,pte) do { } while (0) |
422 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
423 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | ||
424 | do { \ | ||
425 | if (__dirty) { \ | ||
426 | (__ptep)->pte_low = (__entry).pte_low; \ | ||
427 | flush_tlb_page(__vma, __address); \ | ||
428 | } \ | ||
429 | } while (0) | ||
430 | |||
431 | #endif /* !__ASSEMBLY__ */ | 434 | #endif /* !__ASSEMBLY__ */ |
432 | 435 | ||
433 | #ifdef CONFIG_FLATMEM | 436 | #ifdef CONFIG_FLATMEM |
@@ -441,12 +444,6 @@ extern void noexec_setup(const char *str); | |||
441 | #define GET_IOSPACE(pfn) 0 | 444 | #define GET_IOSPACE(pfn) 0 |
442 | #define GET_PFN(pfn) (pfn) | 445 | #define GET_PFN(pfn) (pfn) |
443 | 446 | ||
444 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
445 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
446 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
447 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | ||
448 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
449 | #define __HAVE_ARCH_PTE_SAME | ||
450 | #include <asm-generic/pgtable.h> | 447 | #include <asm-generic/pgtable.h> |
451 | 448 | ||
452 | #endif /* _I386_PGTABLE_H */ | 449 | #endif /* _I386_PGTABLE_H */ |
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index b32346d62e10..2277127696d2 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h | |||
@@ -143,6 +143,18 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {} | |||
143 | #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ | 143 | #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ |
144 | #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ | 144 | #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ |
145 | 145 | ||
146 | static inline void __cpuid(unsigned int *eax, unsigned int *ebx, | ||
147 | unsigned int *ecx, unsigned int *edx) | ||
148 | { | ||
149 | /* ecx is often an input as well as an output. */ | ||
150 | __asm__("cpuid" | ||
151 | : "=a" (*eax), | ||
152 | "=b" (*ebx), | ||
153 | "=c" (*ecx), | ||
154 | "=d" (*edx) | ||
155 | : "0" (*eax), "2" (*ecx)); | ||
156 | } | ||
157 | |||
146 | /* | 158 | /* |
147 | * Generic CPUID function | 159 | * Generic CPUID function |
148 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx | 160 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx |
@@ -150,24 +162,18 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {} | |||
150 | */ | 162 | */ |
151 | static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) | 163 | static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) |
152 | { | 164 | { |
153 | __asm__("cpuid" | 165 | *eax = op; |
154 | : "=a" (*eax), | 166 | *ecx = 0; |
155 | "=b" (*ebx), | 167 | __cpuid(eax, ebx, ecx, edx); |
156 | "=c" (*ecx), | ||
157 | "=d" (*edx) | ||
158 | : "0" (op), "c"(0)); | ||
159 | } | 168 | } |
160 | 169 | ||
161 | /* Some CPUID calls want 'count' to be placed in ecx */ | 170 | /* Some CPUID calls want 'count' to be placed in ecx */ |
162 | static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, | 171 | static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, |
163 | int *edx) | 172 | int *edx) |
164 | { | 173 | { |
165 | __asm__("cpuid" | 174 | *eax = op; |
166 | : "=a" (*eax), | 175 | *ecx = count; |
167 | "=b" (*ebx), | 176 | __cpuid(eax, ebx, ecx, edx); |
168 | "=c" (*ecx), | ||
169 | "=d" (*edx) | ||
170 | : "0" (op), "c" (count)); | ||
171 | } | 177 | } |
172 | 178 | ||
173 | /* | 179 | /* |
@@ -175,42 +181,30 @@ static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, | |||
175 | */ | 181 | */ |
176 | static inline unsigned int cpuid_eax(unsigned int op) | 182 | static inline unsigned int cpuid_eax(unsigned int op) |
177 | { | 183 | { |
178 | unsigned int eax; | 184 | unsigned int eax, ebx, ecx, edx; |
179 | 185 | ||
180 | __asm__("cpuid" | 186 | cpuid(op, &eax, &ebx, &ecx, &edx); |
181 | : "=a" (eax) | ||
182 | : "0" (op) | ||
183 | : "bx", "cx", "dx"); | ||
184 | return eax; | 187 | return eax; |
185 | } | 188 | } |
186 | static inline unsigned int cpuid_ebx(unsigned int op) | 189 | static inline unsigned int cpuid_ebx(unsigned int op) |
187 | { | 190 | { |
188 | unsigned int eax, ebx; | 191 | unsigned int eax, ebx, ecx, edx; |
189 | 192 | ||
190 | __asm__("cpuid" | 193 | cpuid(op, &eax, &ebx, &ecx, &edx); |
191 | : "=a" (eax), "=b" (ebx) | ||
192 | : "0" (op) | ||
193 | : "cx", "dx" ); | ||
194 | return ebx; | 194 | return ebx; |
195 | } | 195 | } |
196 | static inline unsigned int cpuid_ecx(unsigned int op) | 196 | static inline unsigned int cpuid_ecx(unsigned int op) |
197 | { | 197 | { |
198 | unsigned int eax, ecx; | 198 | unsigned int eax, ebx, ecx, edx; |
199 | 199 | ||
200 | __asm__("cpuid" | 200 | cpuid(op, &eax, &ebx, &ecx, &edx); |
201 | : "=a" (eax), "=c" (ecx) | ||
202 | : "0" (op) | ||
203 | : "bx", "dx" ); | ||
204 | return ecx; | 201 | return ecx; |
205 | } | 202 | } |
206 | static inline unsigned int cpuid_edx(unsigned int op) | 203 | static inline unsigned int cpuid_edx(unsigned int op) |
207 | { | 204 | { |
208 | unsigned int eax, edx; | 205 | unsigned int eax, ebx, ecx, edx; |
209 | 206 | ||
210 | __asm__("cpuid" | 207 | cpuid(op, &eax, &ebx, &ecx, &edx); |
211 | : "=a" (eax), "=d" (edx) | ||
212 | : "0" (op) | ||
213 | : "bx", "cx"); | ||
214 | return edx; | 208 | return edx; |
215 | } | 209 | } |
216 | 210 | ||
diff --git a/include/asm-i386/ptrace-abi.h b/include/asm-i386/ptrace-abi.h new file mode 100644 index 000000000000..a44901817a26 --- /dev/null +++ b/include/asm-i386/ptrace-abi.h | |||
@@ -0,0 +1,39 @@ | |||
1 | #ifndef I386_PTRACE_ABI_H | ||
2 | #define I386_PTRACE_ABI_H | ||
3 | |||
4 | #define EBX 0 | ||
5 | #define ECX 1 | ||
6 | #define EDX 2 | ||
7 | #define ESI 3 | ||
8 | #define EDI 4 | ||
9 | #define EBP 5 | ||
10 | #define EAX 6 | ||
11 | #define DS 7 | ||
12 | #define ES 8 | ||
13 | #define FS 9 | ||
14 | #define GS 10 | ||
15 | #define ORIG_EAX 11 | ||
16 | #define EIP 12 | ||
17 | #define CS 13 | ||
18 | #define EFL 14 | ||
19 | #define UESP 15 | ||
20 | #define SS 16 | ||
21 | #define FRAME_SIZE 17 | ||
22 | |||
23 | /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ | ||
24 | #define PTRACE_GETREGS 12 | ||
25 | #define PTRACE_SETREGS 13 | ||
26 | #define PTRACE_GETFPREGS 14 | ||
27 | #define PTRACE_SETFPREGS 15 | ||
28 | #define PTRACE_GETFPXREGS 18 | ||
29 | #define PTRACE_SETFPXREGS 19 | ||
30 | |||
31 | #define PTRACE_OLDSETOPTIONS 21 | ||
32 | |||
33 | #define PTRACE_GET_THREAD_AREA 25 | ||
34 | #define PTRACE_SET_THREAD_AREA 26 | ||
35 | |||
36 | #define PTRACE_SYSEMU 31 | ||
37 | #define PTRACE_SYSEMU_SINGLESTEP 32 | ||
38 | |||
39 | #endif | ||
diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h index f324c53b6f9a..a4a0e5207db5 100644 --- a/include/asm-i386/ptrace.h +++ b/include/asm-i386/ptrace.h | |||
@@ -1,24 +1,7 @@ | |||
1 | #ifndef _I386_PTRACE_H | 1 | #ifndef _I386_PTRACE_H |
2 | #define _I386_PTRACE_H | 2 | #define _I386_PTRACE_H |
3 | 3 | ||
4 | #define EBX 0 | 4 | #include <asm/ptrace-abi.h> |
5 | #define ECX 1 | ||
6 | #define EDX 2 | ||
7 | #define ESI 3 | ||
8 | #define EDI 4 | ||
9 | #define EBP 5 | ||
10 | #define EAX 6 | ||
11 | #define DS 7 | ||
12 | #define ES 8 | ||
13 | #define FS 9 | ||
14 | #define GS 10 | ||
15 | #define ORIG_EAX 11 | ||
16 | #define EIP 12 | ||
17 | #define CS 13 | ||
18 | #define EFL 14 | ||
19 | #define UESP 15 | ||
20 | #define SS 16 | ||
21 | #define FRAME_SIZE 17 | ||
22 | 5 | ||
23 | /* this struct defines the way the registers are stored on the | 6 | /* this struct defines the way the registers are stored on the |
24 | stack during a system call. */ | 7 | stack during a system call. */ |
@@ -41,25 +24,10 @@ struct pt_regs { | |||
41 | int xss; | 24 | int xss; |
42 | }; | 25 | }; |
43 | 26 | ||
44 | /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ | ||
45 | #define PTRACE_GETREGS 12 | ||
46 | #define PTRACE_SETREGS 13 | ||
47 | #define PTRACE_GETFPREGS 14 | ||
48 | #define PTRACE_SETFPREGS 15 | ||
49 | #define PTRACE_GETFPXREGS 18 | ||
50 | #define PTRACE_SETFPXREGS 19 | ||
51 | |||
52 | #define PTRACE_OLDSETOPTIONS 21 | ||
53 | |||
54 | #define PTRACE_GET_THREAD_AREA 25 | ||
55 | #define PTRACE_SET_THREAD_AREA 26 | ||
56 | |||
57 | #define PTRACE_SYSEMU 31 | ||
58 | #define PTRACE_SYSEMU_SINGLESTEP 32 | ||
59 | |||
60 | #ifdef __KERNEL__ | 27 | #ifdef __KERNEL__ |
61 | 28 | ||
62 | #include <asm/vm86.h> | 29 | #include <asm/vm86.h> |
30 | #include <asm/segment.h> | ||
63 | 31 | ||
64 | struct task_struct; | 32 | struct task_struct; |
65 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); | 33 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); |
@@ -73,18 +41,14 @@ extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int erro | |||
73 | */ | 41 | */ |
74 | static inline int user_mode(struct pt_regs *regs) | 42 | static inline int user_mode(struct pt_regs *regs) |
75 | { | 43 | { |
76 | return (regs->xcs & 3) != 0; | 44 | return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL; |
77 | } | 45 | } |
78 | static inline int user_mode_vm(struct pt_regs *regs) | 46 | static inline int user_mode_vm(struct pt_regs *regs) |
79 | { | 47 | { |
80 | return ((regs->xcs & 3) | (regs->eflags & VM_MASK)) != 0; | 48 | return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL; |
81 | } | 49 | } |
82 | #define instruction_pointer(regs) ((regs)->eip) | 50 | #define instruction_pointer(regs) ((regs)->eip) |
83 | #if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER) | ||
84 | extern unsigned long profile_pc(struct pt_regs *regs); | 51 | extern unsigned long profile_pc(struct pt_regs *regs); |
85 | #else | ||
86 | #define profile_pc(regs) instruction_pointer(regs) | ||
87 | #endif | ||
88 | #endif /* __KERNEL__ */ | 52 | #endif /* __KERNEL__ */ |
89 | 53 | ||
90 | #endif | 54 | #endif |
diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h index 87c069ccba08..c3e5db32fa48 100644 --- a/include/asm-i386/rwlock.h +++ b/include/asm-i386/rwlock.h | |||
@@ -20,52 +20,6 @@ | |||
20 | #define RW_LOCK_BIAS 0x01000000 | 20 | #define RW_LOCK_BIAS 0x01000000 |
21 | #define RW_LOCK_BIAS_STR "0x01000000" | 21 | #define RW_LOCK_BIAS_STR "0x01000000" |
22 | 22 | ||
23 | #define __build_read_lock_ptr(rw, helper) \ | 23 | /* Code is in asm-i386/spinlock.h */ |
24 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" \ | ||
25 | "jns 1f\n" \ | ||
26 | "call " helper "\n\t" \ | ||
27 | "1:\n" \ | ||
28 | ::"a" (rw) : "memory") | ||
29 | |||
30 | #define __build_read_lock_const(rw, helper) \ | ||
31 | asm volatile(LOCK_PREFIX " subl $1,%0\n\t" \ | ||
32 | "jns 1f\n" \ | ||
33 | "pushl %%eax\n\t" \ | ||
34 | "leal %0,%%eax\n\t" \ | ||
35 | "call " helper "\n\t" \ | ||
36 | "popl %%eax\n\t" \ | ||
37 | "1:\n" \ | ||
38 | :"+m" (*(volatile int *)rw) : : "memory") | ||
39 | |||
40 | #define __build_read_lock(rw, helper) do { \ | ||
41 | if (__builtin_constant_p(rw)) \ | ||
42 | __build_read_lock_const(rw, helper); \ | ||
43 | else \ | ||
44 | __build_read_lock_ptr(rw, helper); \ | ||
45 | } while (0) | ||
46 | |||
47 | #define __build_write_lock_ptr(rw, helper) \ | ||
48 | asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ | ||
49 | "jz 1f\n" \ | ||
50 | "call " helper "\n\t" \ | ||
51 | "1:\n" \ | ||
52 | ::"a" (rw) : "memory") | ||
53 | |||
54 | #define __build_write_lock_const(rw, helper) \ | ||
55 | asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ | ||
56 | "jz 1f\n" \ | ||
57 | "pushl %%eax\n\t" \ | ||
58 | "leal %0,%%eax\n\t" \ | ||
59 | "call " helper "\n\t" \ | ||
60 | "popl %%eax\n\t" \ | ||
61 | "1:\n" \ | ||
62 | :"+m" (*(volatile int *)rw) : : "memory") | ||
63 | |||
64 | #define __build_write_lock(rw, helper) do { \ | ||
65 | if (__builtin_constant_p(rw)) \ | ||
66 | __build_write_lock_const(rw, helper); \ | ||
67 | else \ | ||
68 | __build_write_lock_ptr(rw, helper); \ | ||
69 | } while (0) | ||
70 | 24 | ||
71 | #endif | 25 | #endif |
diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h index 43113f5608eb..bc598d6388e3 100644 --- a/include/asm-i386/rwsem.h +++ b/include/asm-i386/rwsem.h | |||
@@ -99,17 +99,9 @@ static inline void __down_read(struct rw_semaphore *sem) | |||
99 | __asm__ __volatile__( | 99 | __asm__ __volatile__( |
100 | "# beginning down_read\n\t" | 100 | "# beginning down_read\n\t" |
101 | LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */ | 101 | LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */ |
102 | " js 2f\n\t" /* jump if we weren't granted the lock */ | 102 | " jns 1f\n" |
103 | " call call_rwsem_down_read_failed\n" | ||
103 | "1:\n\t" | 104 | "1:\n\t" |
104 | LOCK_SECTION_START("") | ||
105 | "2:\n\t" | ||
106 | " pushl %%ecx\n\t" | ||
107 | " pushl %%edx\n\t" | ||
108 | " call rwsem_down_read_failed\n\t" | ||
109 | " popl %%edx\n\t" | ||
110 | " popl %%ecx\n\t" | ||
111 | " jmp 1b\n" | ||
112 | LOCK_SECTION_END | ||
113 | "# ending down_read\n\t" | 105 | "# ending down_read\n\t" |
114 | : "+m" (sem->count) | 106 | : "+m" (sem->count) |
115 | : "a" (sem) | 107 | : "a" (sem) |
@@ -151,15 +143,9 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
151 | "# beginning down_write\n\t" | 143 | "# beginning down_write\n\t" |
152 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */ | 144 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */ |
153 | " testl %%edx,%%edx\n\t" /* was the count 0 before? */ | 145 | " testl %%edx,%%edx\n\t" /* was the count 0 before? */ |
154 | " jnz 2f\n\t" /* jump if we weren't granted the lock */ | 146 | " jz 1f\n" |
155 | "1:\n\t" | 147 | " call call_rwsem_down_write_failed\n" |
156 | LOCK_SECTION_START("") | 148 | "1:\n" |
157 | "2:\n\t" | ||
158 | " pushl %%ecx\n\t" | ||
159 | " call rwsem_down_write_failed\n\t" | ||
160 | " popl %%ecx\n\t" | ||
161 | " jmp 1b\n" | ||
162 | LOCK_SECTION_END | ||
163 | "# ending down_write" | 149 | "# ending down_write" |
164 | : "+m" (sem->count), "=d" (tmp) | 150 | : "+m" (sem->count), "=d" (tmp) |
165 | : "a" (sem), "1" (tmp) | 151 | : "a" (sem), "1" (tmp) |
@@ -193,17 +179,9 @@ static inline void __up_read(struct rw_semaphore *sem) | |||
193 | __asm__ __volatile__( | 179 | __asm__ __volatile__( |
194 | "# beginning __up_read\n\t" | 180 | "# beginning __up_read\n\t" |
195 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */ | 181 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */ |
196 | " js 2f\n\t" /* jump if the lock is being waited upon */ | 182 | " jns 1f\n\t" |
197 | "1:\n\t" | 183 | " call call_rwsem_wake\n" |
198 | LOCK_SECTION_START("") | 184 | "1:\n" |
199 | "2:\n\t" | ||
200 | " decw %%dx\n\t" /* do nothing if still outstanding active readers */ | ||
201 | " jnz 1b\n\t" | ||
202 | " pushl %%ecx\n\t" | ||
203 | " call rwsem_wake\n\t" | ||
204 | " popl %%ecx\n\t" | ||
205 | " jmp 1b\n" | ||
206 | LOCK_SECTION_END | ||
207 | "# ending __up_read\n" | 185 | "# ending __up_read\n" |
208 | : "+m" (sem->count), "=d" (tmp) | 186 | : "+m" (sem->count), "=d" (tmp) |
209 | : "a" (sem), "1" (tmp) | 187 | : "a" (sem), "1" (tmp) |
@@ -219,17 +197,9 @@ static inline void __up_write(struct rw_semaphore *sem) | |||
219 | "# beginning __up_write\n\t" | 197 | "# beginning __up_write\n\t" |
220 | " movl %2,%%edx\n\t" | 198 | " movl %2,%%edx\n\t" |
221 | LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ | 199 | LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ |
222 | " jnz 2f\n\t" /* jump if the lock is being waited upon */ | 200 | " jz 1f\n" |
201 | " call call_rwsem_wake\n" | ||
223 | "1:\n\t" | 202 | "1:\n\t" |
224 | LOCK_SECTION_START("") | ||
225 | "2:\n\t" | ||
226 | " decw %%dx\n\t" /* did the active count reduce to 0? */ | ||
227 | " jnz 1b\n\t" /* jump back if not */ | ||
228 | " pushl %%ecx\n\t" | ||
229 | " call rwsem_wake\n\t" | ||
230 | " popl %%ecx\n\t" | ||
231 | " jmp 1b\n" | ||
232 | LOCK_SECTION_END | ||
233 | "# ending __up_write\n" | 203 | "# ending __up_write\n" |
234 | : "+m" (sem->count) | 204 | : "+m" (sem->count) |
235 | : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) | 205 | : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) |
@@ -244,17 +214,9 @@ static inline void __downgrade_write(struct rw_semaphore *sem) | |||
244 | __asm__ __volatile__( | 214 | __asm__ __volatile__( |
245 | "# beginning __downgrade_write\n\t" | 215 | "# beginning __downgrade_write\n\t" |
246 | LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ | 216 | LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ |
247 | " js 2f\n\t" /* jump if the lock is being waited upon */ | 217 | " jns 1f\n\t" |
218 | " call call_rwsem_downgrade_wake\n" | ||
248 | "1:\n\t" | 219 | "1:\n\t" |
249 | LOCK_SECTION_START("") | ||
250 | "2:\n\t" | ||
251 | " pushl %%ecx\n\t" | ||
252 | " pushl %%edx\n\t" | ||
253 | " call rwsem_downgrade_wake\n\t" | ||
254 | " popl %%edx\n\t" | ||
255 | " popl %%ecx\n\t" | ||
256 | " jmp 1b\n" | ||
257 | LOCK_SECTION_END | ||
258 | "# ending __downgrade_write\n" | 220 | "# ending __downgrade_write\n" |
259 | : "+m" (sem->count) | 221 | : "+m" (sem->count) |
260 | : "a" (sem), "i" (-RWSEM_WAITING_BIAS) | 222 | : "a" (sem), "i" (-RWSEM_WAITING_BIAS) |
diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h index faf995307b9e..b7ab59685ba7 100644 --- a/include/asm-i386/segment.h +++ b/include/asm-i386/segment.h | |||
@@ -83,6 +83,11 @@ | |||
83 | 83 | ||
84 | #define GDT_SIZE (GDT_ENTRIES * 8) | 84 | #define GDT_SIZE (GDT_ENTRIES * 8) |
85 | 85 | ||
86 | /* Matches __KERNEL_CS and __USER_CS (they must be 2 entries apart) */ | ||
87 | #define SEGMENT_IS_FLAT_CODE(x) (((x) & 0xec) == GDT_ENTRY_KERNEL_CS * 8) | ||
88 | /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */ | ||
89 | #define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8) | ||
90 | |||
86 | /* Simple and small GDT entries for booting only */ | 91 | /* Simple and small GDT entries for booting only */ |
87 | 92 | ||
88 | #define GDT_ENTRY_BOOT_CS 2 | 93 | #define GDT_ENTRY_BOOT_CS 2 |
@@ -112,4 +117,16 @@ | |||
112 | */ | 117 | */ |
113 | #define IDT_ENTRIES 256 | 118 | #define IDT_ENTRIES 256 |
114 | 119 | ||
120 | /* Bottom two bits of selector give the ring privilege level */ | ||
121 | #define SEGMENT_RPL_MASK 0x3 | ||
122 | /* Bit 2 is table indicator (LDT/GDT) */ | ||
123 | #define SEGMENT_TI_MASK 0x4 | ||
124 | |||
125 | /* User mode is privilege level 3 */ | ||
126 | #define USER_RPL 0x3 | ||
127 | /* LDT segment has TI set, GDT has it cleared */ | ||
128 | #define SEGMENT_LDT 0x4 | ||
129 | #define SEGMENT_GDT 0x0 | ||
130 | |||
131 | #define get_kernel_rpl() 0 | ||
115 | #endif | 132 | #endif |
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h index d51e800acf29..4e34a468c383 100644 --- a/include/asm-i386/semaphore.h +++ b/include/asm-i386/semaphore.h | |||
@@ -100,13 +100,10 @@ static inline void down(struct semaphore * sem) | |||
100 | __asm__ __volatile__( | 100 | __asm__ __volatile__( |
101 | "# atomic down operation\n\t" | 101 | "# atomic down operation\n\t" |
102 | LOCK_PREFIX "decl %0\n\t" /* --sem->count */ | 102 | LOCK_PREFIX "decl %0\n\t" /* --sem->count */ |
103 | "js 2f\n" | 103 | "jns 2f\n" |
104 | "1:\n" | 104 | "\tlea %0,%%eax\n\t" |
105 | LOCK_SECTION_START("") | 105 | "call __down_failed\n" |
106 | "2:\tlea %0,%%eax\n\t" | 106 | "2:" |
107 | "call __down_failed\n\t" | ||
108 | "jmp 1b\n" | ||
109 | LOCK_SECTION_END | ||
110 | :"+m" (sem->count) | 107 | :"+m" (sem->count) |
111 | : | 108 | : |
112 | :"memory","ax"); | 109 | :"memory","ax"); |
@@ -123,16 +120,13 @@ static inline int down_interruptible(struct semaphore * sem) | |||
123 | might_sleep(); | 120 | might_sleep(); |
124 | __asm__ __volatile__( | 121 | __asm__ __volatile__( |
125 | "# atomic interruptible down operation\n\t" | 122 | "# atomic interruptible down operation\n\t" |
123 | "xorl %0,%0\n\t" | ||
126 | LOCK_PREFIX "decl %1\n\t" /* --sem->count */ | 124 | LOCK_PREFIX "decl %1\n\t" /* --sem->count */ |
127 | "js 2f\n\t" | 125 | "jns 2f\n\t" |
128 | "xorl %0,%0\n" | 126 | "lea %1,%%eax\n\t" |
129 | "1:\n" | 127 | "call __down_failed_interruptible\n" |
130 | LOCK_SECTION_START("") | 128 | "2:" |
131 | "2:\tlea %1,%%eax\n\t" | 129 | :"=&a" (result), "+m" (sem->count) |
132 | "call __down_failed_interruptible\n\t" | ||
133 | "jmp 1b\n" | ||
134 | LOCK_SECTION_END | ||
135 | :"=a" (result), "+m" (sem->count) | ||
136 | : | 130 | : |
137 | :"memory"); | 131 | :"memory"); |
138 | return result; | 132 | return result; |
@@ -148,16 +142,13 @@ static inline int down_trylock(struct semaphore * sem) | |||
148 | 142 | ||
149 | __asm__ __volatile__( | 143 | __asm__ __volatile__( |
150 | "# atomic interruptible down operation\n\t" | 144 | "# atomic interruptible down operation\n\t" |
145 | "xorl %0,%0\n\t" | ||
151 | LOCK_PREFIX "decl %1\n\t" /* --sem->count */ | 146 | LOCK_PREFIX "decl %1\n\t" /* --sem->count */ |
152 | "js 2f\n\t" | 147 | "jns 2f\n\t" |
153 | "xorl %0,%0\n" | 148 | "lea %1,%%eax\n\t" |
154 | "1:\n" | ||
155 | LOCK_SECTION_START("") | ||
156 | "2:\tlea %1,%%eax\n\t" | ||
157 | "call __down_failed_trylock\n\t" | 149 | "call __down_failed_trylock\n\t" |
158 | "jmp 1b\n" | 150 | "2:\n" |
159 | LOCK_SECTION_END | 151 | :"=&a" (result), "+m" (sem->count) |
160 | :"=a" (result), "+m" (sem->count) | ||
161 | : | 152 | : |
162 | :"memory"); | 153 | :"memory"); |
163 | return result; | 154 | return result; |
@@ -166,22 +157,16 @@ static inline int down_trylock(struct semaphore * sem) | |||
166 | /* | 157 | /* |
167 | * Note! This is subtle. We jump to wake people up only if | 158 | * Note! This is subtle. We jump to wake people up only if |
168 | * the semaphore was negative (== somebody was waiting on it). | 159 | * the semaphore was negative (== somebody was waiting on it). |
169 | * The default case (no contention) will result in NO | ||
170 | * jumps for both down() and up(). | ||
171 | */ | 160 | */ |
172 | static inline void up(struct semaphore * sem) | 161 | static inline void up(struct semaphore * sem) |
173 | { | 162 | { |
174 | __asm__ __volatile__( | 163 | __asm__ __volatile__( |
175 | "# atomic up operation\n\t" | 164 | "# atomic up operation\n\t" |
176 | LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ | 165 | LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ |
177 | "jle 2f\n" | 166 | "jg 1f\n\t" |
178 | "1:\n" | 167 | "lea %0,%%eax\n\t" |
179 | LOCK_SECTION_START("") | 168 | "call __up_wakeup\n" |
180 | "2:\tlea %0,%%eax\n\t" | 169 | "1:" |
181 | "call __up_wakeup\n\t" | ||
182 | "jmp 1b\n" | ||
183 | LOCK_SECTION_END | ||
184 | ".subsection 0\n" | ||
185 | :"+m" (sem->count) | 170 | :"+m" (sem->count) |
186 | : | 171 | : |
187 | :"memory","ax"); | 172 | :"memory","ax"); |
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h index 142d10e34ade..32ac8c91d5c5 100644 --- a/include/asm-i386/smp.h +++ b/include/asm-i386/smp.h | |||
@@ -80,17 +80,12 @@ static inline int hard_smp_processor_id(void) | |||
80 | return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID)); | 80 | return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID)); |
81 | } | 81 | } |
82 | #endif | 82 | #endif |
83 | |||
84 | static __inline int logical_smp_processor_id(void) | ||
85 | { | ||
86 | /* we don't want to mark this access volatile - bad code generation */ | ||
87 | return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); | ||
88 | } | ||
89 | |||
90 | #endif | 83 | #endif |
91 | 84 | ||
92 | extern int __cpu_disable(void); | 85 | extern int __cpu_disable(void); |
93 | extern void __cpu_die(unsigned int cpu); | 86 | extern void __cpu_die(unsigned int cpu); |
87 | extern unsigned int num_processors; | ||
88 | |||
94 | #endif /* !__ASSEMBLY__ */ | 89 | #endif /* !__ASSEMBLY__ */ |
95 | 90 | ||
96 | #else /* CONFIG_SMP */ | 91 | #else /* CONFIG_SMP */ |
@@ -100,4 +95,15 @@ extern void __cpu_die(unsigned int cpu); | |||
100 | #define NO_PROC_ID 0xFF /* No processor magic marker */ | 95 | #define NO_PROC_ID 0xFF /* No processor magic marker */ |
101 | 96 | ||
102 | #endif | 97 | #endif |
98 | |||
99 | #ifndef __ASSEMBLY__ | ||
100 | #ifdef CONFIG_X86_LOCAL_APIC | ||
101 | static __inline int logical_smp_processor_id(void) | ||
102 | { | ||
103 | /* we don't want to mark this access volatile - bad code generation */ | ||
104 | return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); | ||
105 | } | ||
106 | #endif | ||
107 | #endif | ||
108 | |||
103 | #endif | 109 | #endif |
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index d1020363c41a..b0b3043f05e1 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h | |||
@@ -4,8 +4,12 @@ | |||
4 | #include <asm/atomic.h> | 4 | #include <asm/atomic.h> |
5 | #include <asm/rwlock.h> | 5 | #include <asm/rwlock.h> |
6 | #include <asm/page.h> | 6 | #include <asm/page.h> |
7 | #include <asm/processor.h> | ||
7 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
8 | 9 | ||
10 | #define CLI_STRING "cli" | ||
11 | #define STI_STRING "sti" | ||
12 | |||
9 | /* | 13 | /* |
10 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 14 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
11 | * | 15 | * |
@@ -17,67 +21,64 @@ | |||
17 | * (the type definitions are in asm/spinlock_types.h) | 21 | * (the type definitions are in asm/spinlock_types.h) |
18 | */ | 22 | */ |
19 | 23 | ||
20 | #define __raw_spin_is_locked(x) \ | 24 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) |
21 | (*(volatile signed char *)(&(x)->slock) <= 0) | 25 | { |
22 | 26 | return *(volatile signed char *)(&(x)->slock) <= 0; | |
23 | #define __raw_spin_lock_string \ | 27 | } |
24 | "\n1:\t" \ | ||
25 | LOCK_PREFIX " ; decb %0\n\t" \ | ||
26 | "jns 3f\n" \ | ||
27 | "2:\t" \ | ||
28 | "rep;nop\n\t" \ | ||
29 | "cmpb $0,%0\n\t" \ | ||
30 | "jle 2b\n\t" \ | ||
31 | "jmp 1b\n" \ | ||
32 | "3:\n\t" | ||
33 | |||
34 | /* | ||
35 | * NOTE: there's an irqs-on section here, which normally would have to be | ||
36 | * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use | ||
37 | * __raw_spin_lock_string_flags(). | ||
38 | */ | ||
39 | #define __raw_spin_lock_string_flags \ | ||
40 | "\n1:\t" \ | ||
41 | LOCK_PREFIX " ; decb %0\n\t" \ | ||
42 | "jns 5f\n" \ | ||
43 | "2:\t" \ | ||
44 | "testl $0x200, %1\n\t" \ | ||
45 | "jz 4f\n\t" \ | ||
46 | "sti\n" \ | ||
47 | "3:\t" \ | ||
48 | "rep;nop\n\t" \ | ||
49 | "cmpb $0, %0\n\t" \ | ||
50 | "jle 3b\n\t" \ | ||
51 | "cli\n\t" \ | ||
52 | "jmp 1b\n" \ | ||
53 | "4:\t" \ | ||
54 | "rep;nop\n\t" \ | ||
55 | "cmpb $0, %0\n\t" \ | ||
56 | "jg 1b\n\t" \ | ||
57 | "jmp 4b\n" \ | ||
58 | "5:\n\t" | ||
59 | 28 | ||
60 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 29 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
61 | { | 30 | { |
62 | asm(__raw_spin_lock_string : "+m" (lock->slock) : : "memory"); | 31 | asm volatile("\n1:\t" |
32 | LOCK_PREFIX " ; decb %0\n\t" | ||
33 | "jns 3f\n" | ||
34 | "2:\t" | ||
35 | "rep;nop\n\t" | ||
36 | "cmpb $0,%0\n\t" | ||
37 | "jle 2b\n\t" | ||
38 | "jmp 1b\n" | ||
39 | "3:\n\t" | ||
40 | : "+m" (lock->slock) : : "memory"); | ||
63 | } | 41 | } |
64 | 42 | ||
65 | /* | 43 | /* |
66 | * It is easier for the lock validator if interrupts are not re-enabled | 44 | * It is easier for the lock validator if interrupts are not re-enabled |
67 | * in the middle of a lock-acquire. This is a performance feature anyway | 45 | * in the middle of a lock-acquire. This is a performance feature anyway |
68 | * so we turn it off: | 46 | * so we turn it off: |
47 | * | ||
48 | * NOTE: there's an irqs-on section here, which normally would have to be | ||
49 | * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. | ||
69 | */ | 50 | */ |
70 | #ifndef CONFIG_PROVE_LOCKING | 51 | #ifndef CONFIG_PROVE_LOCKING |
71 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 52 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
72 | { | 53 | { |
73 | asm(__raw_spin_lock_string_flags : "+m" (lock->slock) : "r" (flags) : "memory"); | 54 | asm volatile( |
55 | "\n1:\t" | ||
56 | LOCK_PREFIX " ; decb %0\n\t" | ||
57 | "jns 5f\n" | ||
58 | "2:\t" | ||
59 | "testl $0x200, %1\n\t" | ||
60 | "jz 4f\n\t" | ||
61 | STI_STRING "\n" | ||
62 | "3:\t" | ||
63 | "rep;nop\n\t" | ||
64 | "cmpb $0, %0\n\t" | ||
65 | "jle 3b\n\t" | ||
66 | CLI_STRING "\n\t" | ||
67 | "jmp 1b\n" | ||
68 | "4:\t" | ||
69 | "rep;nop\n\t" | ||
70 | "cmpb $0, %0\n\t" | ||
71 | "jg 1b\n\t" | ||
72 | "jmp 4b\n" | ||
73 | "5:\n\t" | ||
74 | : "+m" (lock->slock) : "r" (flags) : "memory"); | ||
74 | } | 75 | } |
75 | #endif | 76 | #endif |
76 | 77 | ||
77 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 78 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
78 | { | 79 | { |
79 | char oldval; | 80 | char oldval; |
80 | __asm__ __volatile__( | 81 | asm volatile( |
81 | "xchgb %b0,%1" | 82 | "xchgb %b0,%1" |
82 | :"=q" (oldval), "+m" (lock->slock) | 83 | :"=q" (oldval), "+m" (lock->slock) |
83 | :"0" (0) : "memory"); | 84 | :"0" (0) : "memory"); |
@@ -93,38 +94,29 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
93 | 94 | ||
94 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) | 95 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) |
95 | 96 | ||
96 | #define __raw_spin_unlock_string \ | ||
97 | "movb $1,%0" \ | ||
98 | :"+m" (lock->slock) : : "memory" | ||
99 | |||
100 | |||
101 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 97 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
102 | { | 98 | { |
103 | __asm__ __volatile__( | 99 | asm volatile("movb $1,%0" : "+m" (lock->slock) :: "memory"); |
104 | __raw_spin_unlock_string | ||
105 | ); | ||
106 | } | 100 | } |
107 | 101 | ||
108 | #else | 102 | #else |
109 | 103 | ||
110 | #define __raw_spin_unlock_string \ | ||
111 | "xchgb %b0, %1" \ | ||
112 | :"=q" (oldval), "+m" (lock->slock) \ | ||
113 | :"0" (oldval) : "memory" | ||
114 | |||
115 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 104 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
116 | { | 105 | { |
117 | char oldval = 1; | 106 | char oldval = 1; |
118 | 107 | ||
119 | __asm__ __volatile__( | 108 | asm volatile("xchgb %b0, %1" |
120 | __raw_spin_unlock_string | 109 | : "=q" (oldval), "+m" (lock->slock) |
121 | ); | 110 | : "0" (oldval) : "memory"); |
122 | } | 111 | } |
123 | 112 | ||
124 | #endif | 113 | #endif |
125 | 114 | ||
126 | #define __raw_spin_unlock_wait(lock) \ | 115 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
127 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | 116 | { |
117 | while (__raw_spin_is_locked(lock)) | ||
118 | cpu_relax(); | ||
119 | } | ||
128 | 120 | ||
129 | /* | 121 | /* |
130 | * Read-write spinlocks, allowing multiple readers | 122 | * Read-write spinlocks, allowing multiple readers |
@@ -151,22 +143,36 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
151 | * read_can_lock - would read_trylock() succeed? | 143 | * read_can_lock - would read_trylock() succeed? |
152 | * @lock: the rwlock in question. | 144 | * @lock: the rwlock in question. |
153 | */ | 145 | */ |
154 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) | 146 | static inline int __raw_read_can_lock(raw_rwlock_t *x) |
147 | { | ||
148 | return (int)(x)->lock > 0; | ||
149 | } | ||
155 | 150 | ||
156 | /** | 151 | /** |
157 | * write_can_lock - would write_trylock() succeed? | 152 | * write_can_lock - would write_trylock() succeed? |
158 | * @lock: the rwlock in question. | 153 | * @lock: the rwlock in question. |
159 | */ | 154 | */ |
160 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 155 | static inline int __raw_write_can_lock(raw_rwlock_t *x) |
156 | { | ||
157 | return (x)->lock == RW_LOCK_BIAS; | ||
158 | } | ||
161 | 159 | ||
162 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 160 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
163 | { | 161 | { |
164 | __build_read_lock(rw, "__read_lock_failed"); | 162 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" |
163 | "jns 1f\n" | ||
164 | "call __read_lock_failed\n\t" | ||
165 | "1:\n" | ||
166 | ::"a" (rw) : "memory"); | ||
165 | } | 167 | } |
166 | 168 | ||
167 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 169 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
168 | { | 170 | { |
169 | __build_write_lock(rw, "__write_lock_failed"); | 171 | asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" |
172 | "jz 1f\n" | ||
173 | "call __write_lock_failed\n\t" | ||
174 | "1:\n" | ||
175 | ::"a" (rw) : "memory"); | ||
170 | } | 176 | } |
171 | 177 | ||
172 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | 178 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |
diff --git a/include/asm-i386/stacktrace.h b/include/asm-i386/stacktrace.h new file mode 100644 index 000000000000..7d1f6a5cbfca --- /dev/null +++ b/include/asm-i386/stacktrace.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-x86_64/stacktrace.h> | |||
diff --git a/include/asm-i386/sync_bitops.h b/include/asm-i386/sync_bitops.h new file mode 100644 index 000000000000..c94d51c993ee --- /dev/null +++ b/include/asm-i386/sync_bitops.h | |||
@@ -0,0 +1,156 @@ | |||
1 | #ifndef _I386_SYNC_BITOPS_H | ||
2 | #define _I386_SYNC_BITOPS_H | ||
3 | |||
4 | /* | ||
5 | * Copyright 1992, Linus Torvalds. | ||
6 | */ | ||
7 | |||
8 | /* | ||
9 | * These have to be done with inline assembly: that way the bit-setting | ||
10 | * is guaranteed to be atomic. All bit operations return 0 if the bit | ||
11 | * was cleared before the operation and != 0 if it was not. | ||
12 | * | ||
13 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | ||
14 | */ | ||
15 | |||
16 | #define ADDR (*(volatile long *) addr) | ||
17 | |||
18 | /** | ||
19 | * sync_set_bit - Atomically set a bit in memory | ||
20 | * @nr: the bit to set | ||
21 | * @addr: the address to start counting from | ||
22 | * | ||
23 | * This function is atomic and may not be reordered. See __set_bit() | ||
24 | * if you do not require the atomic guarantees. | ||
25 | * | ||
26 | * Note: there are no guarantees that this function will not be reordered | ||
27 | * on non x86 architectures, so if you are writting portable code, | ||
28 | * make sure not to rely on its reordering guarantees. | ||
29 | * | ||
30 | * Note that @nr may be almost arbitrarily large; this function is not | ||
31 | * restricted to acting on a single-word quantity. | ||
32 | */ | ||
33 | static inline void sync_set_bit(int nr, volatile unsigned long * addr) | ||
34 | { | ||
35 | __asm__ __volatile__("lock; btsl %1,%0" | ||
36 | :"+m" (ADDR) | ||
37 | :"Ir" (nr) | ||
38 | : "memory"); | ||
39 | } | ||
40 | |||
41 | /** | ||
42 | * sync_clear_bit - Clears a bit in memory | ||
43 | * @nr: Bit to clear | ||
44 | * @addr: Address to start counting from | ||
45 | * | ||
46 | * sync_clear_bit() is atomic and may not be reordered. However, it does | ||
47 | * not contain a memory barrier, so if it is used for locking purposes, | ||
48 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | ||
49 | * in order to ensure changes are visible on other processors. | ||
50 | */ | ||
51 | static inline void sync_clear_bit(int nr, volatile unsigned long * addr) | ||
52 | { | ||
53 | __asm__ __volatile__("lock; btrl %1,%0" | ||
54 | :"+m" (ADDR) | ||
55 | :"Ir" (nr) | ||
56 | : "memory"); | ||
57 | } | ||
58 | |||
59 | /** | ||
60 | * sync_change_bit - Toggle a bit in memory | ||
61 | * @nr: Bit to change | ||
62 | * @addr: Address to start counting from | ||
63 | * | ||
64 | * change_bit() is atomic and may not be reordered. It may be | ||
65 | * reordered on other architectures than x86. | ||
66 | * Note that @nr may be almost arbitrarily large; this function is not | ||
67 | * restricted to acting on a single-word quantity. | ||
68 | */ | ||
69 | static inline void sync_change_bit(int nr, volatile unsigned long * addr) | ||
70 | { | ||
71 | __asm__ __volatile__("lock; btcl %1,%0" | ||
72 | :"+m" (ADDR) | ||
73 | :"Ir" (nr) | ||
74 | : "memory"); | ||
75 | } | ||
76 | |||
77 | /** | ||
78 | * sync_test_and_set_bit - Set a bit and return its old value | ||
79 | * @nr: Bit to set | ||
80 | * @addr: Address to count from | ||
81 | * | ||
82 | * This operation is atomic and cannot be reordered. | ||
83 | * It may be reordered on other architectures than x86. | ||
84 | * It also implies a memory barrier. | ||
85 | */ | ||
86 | static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) | ||
87 | { | ||
88 | int oldbit; | ||
89 | |||
90 | __asm__ __volatile__("lock; btsl %2,%1\n\tsbbl %0,%0" | ||
91 | :"=r" (oldbit),"+m" (ADDR) | ||
92 | :"Ir" (nr) : "memory"); | ||
93 | return oldbit; | ||
94 | } | ||
95 | |||
96 | /** | ||
97 | * sync_test_and_clear_bit - Clear a bit and return its old value | ||
98 | * @nr: Bit to clear | ||
99 | * @addr: Address to count from | ||
100 | * | ||
101 | * This operation is atomic and cannot be reordered. | ||
102 | * It can be reorderdered on other architectures other than x86. | ||
103 | * It also implies a memory barrier. | ||
104 | */ | ||
105 | static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) | ||
106 | { | ||
107 | int oldbit; | ||
108 | |||
109 | __asm__ __volatile__("lock; btrl %2,%1\n\tsbbl %0,%0" | ||
110 | :"=r" (oldbit),"+m" (ADDR) | ||
111 | :"Ir" (nr) : "memory"); | ||
112 | return oldbit; | ||
113 | } | ||
114 | |||
115 | /** | ||
116 | * sync_test_and_change_bit - Change a bit and return its old value | ||
117 | * @nr: Bit to change | ||
118 | * @addr: Address to count from | ||
119 | * | ||
120 | * This operation is atomic and cannot be reordered. | ||
121 | * It also implies a memory barrier. | ||
122 | */ | ||
123 | static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr) | ||
124 | { | ||
125 | int oldbit; | ||
126 | |||
127 | __asm__ __volatile__("lock; btcl %2,%1\n\tsbbl %0,%0" | ||
128 | :"=r" (oldbit),"+m" (ADDR) | ||
129 | :"Ir" (nr) : "memory"); | ||
130 | return oldbit; | ||
131 | } | ||
132 | |||
133 | static __always_inline int sync_const_test_bit(int nr, const volatile unsigned long *addr) | ||
134 | { | ||
135 | return ((1UL << (nr & 31)) & | ||
136 | (((const volatile unsigned int *)addr)[nr >> 5])) != 0; | ||
137 | } | ||
138 | |||
139 | static inline int sync_var_test_bit(int nr, const volatile unsigned long * addr) | ||
140 | { | ||
141 | int oldbit; | ||
142 | |||
143 | __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" | ||
144 | :"=r" (oldbit) | ||
145 | :"m" (ADDR),"Ir" (nr)); | ||
146 | return oldbit; | ||
147 | } | ||
148 | |||
149 | #define sync_test_bit(nr,addr) \ | ||
150 | (__builtin_constant_p(nr) ? \ | ||
151 | sync_constant_test_bit((nr),(addr)) : \ | ||
152 | sync_var_test_bit((nr),(addr))) | ||
153 | |||
154 | #undef ADDR | ||
155 | |||
156 | #endif /* _I386_SYNC_BITOPS_H */ | ||
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 098bcee94e38..a6dabbcd6e6a 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h | |||
@@ -267,6 +267,9 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz | |||
267 | #define cmpxchg(ptr,o,n)\ | 267 | #define cmpxchg(ptr,o,n)\ |
268 | ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ | 268 | ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ |
269 | (unsigned long)(n),sizeof(*(ptr)))) | 269 | (unsigned long)(n),sizeof(*(ptr)))) |
270 | #define sync_cmpxchg(ptr,o,n)\ | ||
271 | ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\ | ||
272 | (unsigned long)(n),sizeof(*(ptr)))) | ||
270 | #endif | 273 | #endif |
271 | 274 | ||
272 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | 275 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, |
@@ -296,6 +299,39 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
296 | return old; | 299 | return old; |
297 | } | 300 | } |
298 | 301 | ||
302 | /* | ||
303 | * Always use locked operations when touching memory shared with a | ||
304 | * hypervisor, since the system may be SMP even if the guest kernel | ||
305 | * isn't. | ||
306 | */ | ||
307 | static inline unsigned long __sync_cmpxchg(volatile void *ptr, | ||
308 | unsigned long old, | ||
309 | unsigned long new, int size) | ||
310 | { | ||
311 | unsigned long prev; | ||
312 | switch (size) { | ||
313 | case 1: | ||
314 | __asm__ __volatile__("lock; cmpxchgb %b1,%2" | ||
315 | : "=a"(prev) | ||
316 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
317 | : "memory"); | ||
318 | return prev; | ||
319 | case 2: | ||
320 | __asm__ __volatile__("lock; cmpxchgw %w1,%2" | ||
321 | : "=a"(prev) | ||
322 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
323 | : "memory"); | ||
324 | return prev; | ||
325 | case 4: | ||
326 | __asm__ __volatile__("lock; cmpxchgl %1,%2" | ||
327 | : "=a"(prev) | ||
328 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
329 | : "memory"); | ||
330 | return prev; | ||
331 | } | ||
332 | return old; | ||
333 | } | ||
334 | |||
299 | #ifndef CONFIG_X86_CMPXCHG | 335 | #ifndef CONFIG_X86_CMPXCHG |
300 | /* | 336 | /* |
301 | * Building a kernel capable running on 80386. It may be necessary to | 337 | * Building a kernel capable running on 80386. It may be necessary to |
diff --git a/include/asm-i386/therm_throt.h b/include/asm-i386/therm_throt.h new file mode 100644 index 000000000000..399bf6026b16 --- /dev/null +++ b/include/asm-i386/therm_throt.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef __ASM_I386_THERM_THROT_H__ | ||
2 | #define __ASM_I386_THERM_THROT_H__ 1 | ||
3 | |||
4 | #include <asm/atomic.h> | ||
5 | |||
6 | extern atomic_t therm_throt_en; | ||
7 | int therm_throt_process(int curr); | ||
8 | |||
9 | #endif /* __ASM_I386_THERM_THROT_H__ */ | ||
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h index d57ca5c540b6..360648b0f2b3 100644 --- a/include/asm-i386/tlbflush.h +++ b/include/asm-i386/tlbflush.h | |||
@@ -36,8 +36,6 @@ | |||
36 | : "memory"); \ | 36 | : "memory"); \ |
37 | } while (0) | 37 | } while (0) |
38 | 38 | ||
39 | extern unsigned long pgkern_mask; | ||
40 | |||
41 | # define __flush_tlb_all() \ | 39 | # define __flush_tlb_all() \ |
42 | do { \ | 40 | do { \ |
43 | if (cpu_has_pge) \ | 41 | if (cpu_has_pge) \ |
@@ -49,7 +47,7 @@ extern unsigned long pgkern_mask; | |||
49 | #define cpu_has_invlpg (boot_cpu_data.x86 > 3) | 47 | #define cpu_has_invlpg (boot_cpu_data.x86 > 3) |
50 | 48 | ||
51 | #define __flush_tlb_single(addr) \ | 49 | #define __flush_tlb_single(addr) \ |
52 | __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) | 50 | __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory") |
53 | 51 | ||
54 | #ifdef CONFIG_X86_INVLPG | 52 | #ifdef CONFIG_X86_INVLPG |
55 | # define __flush_tlb_one(addr) __flush_tlb_single(addr) | 53 | # define __flush_tlb_one(addr) __flush_tlb_single(addr) |
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h index 97b828ce31e0..c13933185c1c 100644 --- a/include/asm-i386/tsc.h +++ b/include/asm-i386/tsc.h | |||
@@ -6,7 +6,6 @@ | |||
6 | #ifndef _ASM_i386_TSC_H | 6 | #ifndef _ASM_i386_TSC_H |
7 | #define _ASM_i386_TSC_H | 7 | #define _ASM_i386_TSC_H |
8 | 8 | ||
9 | #include <linux/config.h> | ||
10 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
11 | 10 | ||
12 | /* | 11 | /* |
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index fc1c8ddae149..bd9987087adc 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h | |||
@@ -323,18 +323,20 @@ | |||
323 | #define __NR_tee 315 | 323 | #define __NR_tee 315 |
324 | #define __NR_vmsplice 316 | 324 | #define __NR_vmsplice 316 |
325 | #define __NR_move_pages 317 | 325 | #define __NR_move_pages 317 |
326 | #define __NR_getcpu 318 | ||
326 | 327 | ||
327 | #ifdef __KERNEL__ | 328 | #ifdef __KERNEL__ |
328 | 329 | ||
329 | #define NR_syscalls 318 | 330 | #define NR_syscalls 319 |
331 | #include <linux/err.h> | ||
330 | 332 | ||
331 | /* | 333 | /* |
332 | * user-visible error numbers are in the range -1 - -128: see | 334 | * user-visible error numbers are in the range -1 - -MAX_ERRNO: see |
333 | * <asm-i386/errno.h> | 335 | * <asm-i386/errno.h> |
334 | */ | 336 | */ |
335 | #define __syscall_return(type, res) \ | 337 | #define __syscall_return(type, res) \ |
336 | do { \ | 338 | do { \ |
337 | if ((unsigned long)(res) >= (unsigned long)(-(128 + 1))) { \ | 339 | if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \ |
338 | errno = -(res); \ | 340 | errno = -(res); \ |
339 | res = -1; \ | 341 | res = -1; \ |
340 | } \ | 342 | } \ |
diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h index 4c1a0b968569..5031d693b89d 100644 --- a/include/asm-i386/unwind.h +++ b/include/asm-i386/unwind.h | |||
@@ -18,6 +18,7 @@ struct unwind_frame_info | |||
18 | { | 18 | { |
19 | struct pt_regs regs; | 19 | struct pt_regs regs; |
20 | struct task_struct *task; | 20 | struct task_struct *task; |
21 | unsigned call_frame:1; | ||
21 | }; | 22 | }; |
22 | 23 | ||
23 | #define UNW_PC(frame) (frame)->regs.eip | 24 | #define UNW_PC(frame) (frame)->regs.eip |
@@ -28,6 +29,8 @@ struct unwind_frame_info | |||
28 | #define FRAME_LINK_OFFSET 0 | 29 | #define FRAME_LINK_OFFSET 0 |
29 | #define STACK_BOTTOM(tsk) STACK_LIMIT((tsk)->thread.esp0) | 30 | #define STACK_BOTTOM(tsk) STACK_LIMIT((tsk)->thread.esp0) |
30 | #define STACK_TOP(tsk) ((tsk)->thread.esp0) | 31 | #define STACK_TOP(tsk) ((tsk)->thread.esp0) |
32 | #else | ||
33 | #define UNW_FP(frame) ((void)(frame), 0) | ||
31 | #endif | 34 | #endif |
32 | #define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1)) | 35 | #define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1)) |
33 | 36 | ||
@@ -42,6 +45,10 @@ struct unwind_frame_info | |||
42 | PTREGS_INFO(edi), \ | 45 | PTREGS_INFO(edi), \ |
43 | PTREGS_INFO(eip) | 46 | PTREGS_INFO(eip) |
44 | 47 | ||
48 | #define UNW_DEFAULT_RA(raItem, dataAlign) \ | ||
49 | ((raItem).where == Memory && \ | ||
50 | !((raItem).value * (dataAlign) + 4)) | ||
51 | |||
45 | static inline void arch_unw_init_frame_info(struct unwind_frame_info *info, | 52 | static inline void arch_unw_init_frame_info(struct unwind_frame_info *info, |
46 | /*const*/ struct pt_regs *regs) | 53 | /*const*/ struct pt_regs *regs) |
47 | { | 54 | { |
@@ -88,6 +95,7 @@ static inline int arch_unw_user_mode(const struct unwind_frame_info *info) | |||
88 | 95 | ||
89 | #define UNW_PC(frame) ((void)(frame), 0) | 96 | #define UNW_PC(frame) ((void)(frame), 0) |
90 | #define UNW_SP(frame) ((void)(frame), 0) | 97 | #define UNW_SP(frame) ((void)(frame), 0) |
98 | #define UNW_FP(frame) ((void)(frame), 0) | ||
91 | 99 | ||
92 | static inline int arch_unw_user_mode(const void *info) | 100 | static inline int arch_unw_user_mode(const void *info) |
93 | { | 101 | { |