diff options
author | Robert Richter <robert.richter@amd.com> | 2010-10-25 10:58:34 -0400 |
---|---|---|
committer | Robert Richter <robert.richter@amd.com> | 2010-10-25 10:58:34 -0400 |
commit | 4cafc4b8d7219b70e15f22e4a51b3ce847810caf (patch) | |
tree | 8051ea3f36f0682d08f47df8e35e14ca7eb7a5d7 /arch | |
parent | b47fad3bfb5940cc3e28a1c69716f6dc44e4b7e6 (diff) | |
parent | dbd1e66e04558a582e673bc4a9cd933ce0228d93 (diff) |
Merge branch 'oprofile/core' into oprofile/x86
Conflicts:
arch/x86/oprofile/op_model_amd.c
Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'arch')
210 files changed, 3359 insertions, 4018 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index b9647bb66d13..d04ccd73af45 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig | |||
@@ -9,6 +9,7 @@ config ALPHA | |||
9 | select HAVE_IDE | 9 | select HAVE_IDE |
10 | select HAVE_OPROFILE | 10 | select HAVE_OPROFILE |
11 | select HAVE_SYSCALL_WRAPPERS | 11 | select HAVE_SYSCALL_WRAPPERS |
12 | select HAVE_IRQ_WORK | ||
12 | select HAVE_PERF_EVENTS | 13 | select HAVE_PERF_EVENTS |
13 | select HAVE_DMA_ATTRS | 14 | select HAVE_DMA_ATTRS |
14 | help | 15 | help |
diff --git a/arch/alpha/include/asm/perf_event.h b/arch/alpha/include/asm/perf_event.h index 4157cd3c44a9..fe792ca818f6 100644 --- a/arch/alpha/include/asm/perf_event.h +++ b/arch/alpha/include/asm/perf_event.h | |||
@@ -1,11 +1,6 @@ | |||
1 | #ifndef __ASM_ALPHA_PERF_EVENT_H | 1 | #ifndef __ASM_ALPHA_PERF_EVENT_H |
2 | #define __ASM_ALPHA_PERF_EVENT_H | 2 | #define __ASM_ALPHA_PERF_EVENT_H |
3 | 3 | ||
4 | /* Alpha only supports software events through this interface. */ | ||
5 | extern void set_perf_event_pending(void); | ||
6 | |||
7 | #define PERF_EVENT_INDEX_OFFSET 0 | ||
8 | |||
9 | #ifdef CONFIG_PERF_EVENTS | 4 | #ifdef CONFIG_PERF_EVENTS |
10 | extern void init_hw_perf_events(void); | 5 | extern void init_hw_perf_events(void); |
11 | #else | 6 | #else |
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 396af1799ea4..0f1d8493cfca 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c | |||
@@ -41,7 +41,7 @@ | |||
41 | #include <linux/init.h> | 41 | #include <linux/init.h> |
42 | #include <linux/bcd.h> | 42 | #include <linux/bcd.h> |
43 | #include <linux/profile.h> | 43 | #include <linux/profile.h> |
44 | #include <linux/perf_event.h> | 44 | #include <linux/irq_work.h> |
45 | 45 | ||
46 | #include <asm/uaccess.h> | 46 | #include <asm/uaccess.h> |
47 | #include <asm/io.h> | 47 | #include <asm/io.h> |
@@ -83,25 +83,25 @@ static struct { | |||
83 | 83 | ||
84 | unsigned long est_cycle_freq; | 84 | unsigned long est_cycle_freq; |
85 | 85 | ||
86 | #ifdef CONFIG_PERF_EVENTS | 86 | #ifdef CONFIG_IRQ_WORK |
87 | 87 | ||
88 | DEFINE_PER_CPU(u8, perf_event_pending); | 88 | DEFINE_PER_CPU(u8, irq_work_pending); |
89 | 89 | ||
90 | #define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1 | 90 | #define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 |
91 | #define test_perf_event_pending() __get_cpu_var(perf_event_pending) | 91 | #define test_irq_work_pending() __get_cpu_var(irq_work_pending) |
92 | #define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 | 92 | #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 |
93 | 93 | ||
94 | void set_perf_event_pending(void) | 94 | void set_irq_work_pending(void) |
95 | { | 95 | { |
96 | set_perf_event_pending_flag(); | 96 | set_irq_work_pending_flag(); |
97 | } | 97 | } |
98 | 98 | ||
99 | #else /* CONFIG_PERF_EVENTS */ | 99 | #else /* CONFIG_IRQ_WORK */ |
100 | 100 | ||
101 | #define test_perf_event_pending() 0 | 101 | #define test_irq_work_pending() 0 |
102 | #define clear_perf_event_pending() | 102 | #define clear_irq_work_pending() |
103 | 103 | ||
104 | #endif /* CONFIG_PERF_EVENTS */ | 104 | #endif /* CONFIG_IRQ_WORK */ |
105 | 105 | ||
106 | 106 | ||
107 | static inline __u32 rpcc(void) | 107 | static inline __u32 rpcc(void) |
@@ -191,9 +191,9 @@ irqreturn_t timer_interrupt(int irq, void *dev) | |||
191 | 191 | ||
192 | write_sequnlock(&xtime_lock); | 192 | write_sequnlock(&xtime_lock); |
193 | 193 | ||
194 | if (test_perf_event_pending()) { | 194 | if (test_irq_work_pending()) { |
195 | clear_perf_event_pending(); | 195 | clear_irq_work_pending(); |
196 | perf_event_do_pending(); | 196 | irq_work_run(); |
197 | } | 197 | } |
198 | 198 | ||
199 | #ifndef CONFIG_SMP | 199 | #ifndef CONFIG_SMP |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 88c97bc7a6f5..9103904b3dab 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -23,6 +23,7 @@ config ARM | |||
23 | select HAVE_KERNEL_GZIP | 23 | select HAVE_KERNEL_GZIP |
24 | select HAVE_KERNEL_LZO | 24 | select HAVE_KERNEL_LZO |
25 | select HAVE_KERNEL_LZMA | 25 | select HAVE_KERNEL_LZMA |
26 | select HAVE_IRQ_WORK | ||
26 | select HAVE_PERF_EVENTS | 27 | select HAVE_PERF_EVENTS |
27 | select PERF_USE_VMALLOC | 28 | select PERF_USE_VMALLOC |
28 | select HAVE_REGS_AND_STACK_ACCESS_API | 29 | select HAVE_REGS_AND_STACK_ACCESS_API |
@@ -1101,6 +1102,20 @@ config ARM_ERRATA_720789 | |||
1101 | invalidated are not, resulting in an incoherency in the system page | 1102 | invalidated are not, resulting in an incoherency in the system page |
1102 | tables. The workaround changes the TLB flushing routines to invalidate | 1103 | tables. The workaround changes the TLB flushing routines to invalidate |
1103 | entries regardless of the ASID. | 1104 | entries regardless of the ASID. |
1105 | |||
1106 | config ARM_ERRATA_743622 | ||
1107 | bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption" | ||
1108 | depends on CPU_V7 | ||
1109 | help | ||
1110 | This option enables the workaround for the 743622 Cortex-A9 | ||
1111 | (r2p0..r2p2) erratum. Under very rare conditions, a faulty | ||
1112 | optimisation in the Cortex-A9 Store Buffer may lead to data | ||
1113 | corruption. This workaround sets a specific bit in the diagnostic | ||
1114 | register of the Cortex-A9 which disables the Store Buffer | ||
1115 | optimisation, preventing the defect from occurring. This has no | ||
1116 | visible impact on the overall performance or power consumption of the | ||
1117 | processor. | ||
1118 | |||
1104 | endmenu | 1119 | endmenu |
1105 | 1120 | ||
1106 | source "arch/arm/common/Kconfig" | 1121 | source "arch/arm/common/Kconfig" |
diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h index 90831f6f5f5c..5586b7c8ef6f 100644 --- a/arch/arm/include/asm/hw_irq.h +++ b/arch/arm/include/asm/hw_irq.h | |||
@@ -24,4 +24,6 @@ void set_irq_flags(unsigned int irq, unsigned int flags); | |||
24 | #define IRQF_PROBE (1 << 1) | 24 | #define IRQF_PROBE (1 << 1) |
25 | #define IRQF_NOAUTOEN (1 << 2) | 25 | #define IRQF_NOAUTOEN (1 << 2) |
26 | 26 | ||
27 | #define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE) | ||
28 | |||
27 | #endif | 29 | #endif |
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index b5799a3b7117..c4aa4e8c6af9 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h | |||
@@ -12,18 +12,6 @@ | |||
12 | #ifndef __ARM_PERF_EVENT_H__ | 12 | #ifndef __ARM_PERF_EVENT_H__ |
13 | #define __ARM_PERF_EVENT_H__ | 13 | #define __ARM_PERF_EVENT_H__ |
14 | 14 | ||
15 | /* | ||
16 | * NOP: on *most* (read: all supported) ARM platforms, the performance | ||
17 | * counter interrupts are regular interrupts and not an NMI. This | ||
18 | * means that when we receive the interrupt we can call | ||
19 | * perf_event_do_pending() that handles all of the work with | ||
20 | * interrupts disabled. | ||
21 | */ | ||
22 | static inline void | ||
23 | set_perf_event_pending(void) | ||
24 | { | ||
25 | } | ||
26 | |||
27 | /* ARM performance counters start from 1 (in the cp15 accesses) so use the | 15 | /* ARM performance counters start from 1 (in the cp15 accesses) so use the |
28 | * same indexes here for consistency. */ | 16 | * same indexes here for consistency. */ |
29 | #define PERF_EVENT_INDEX_OFFSET 1 | 17 | #define PERF_EVENT_INDEX_OFFSET 1 |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index c0d5c3b3a760..36ad3be4692a 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -154,14 +154,6 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) | |||
154 | 154 | ||
155 | void __init init_IRQ(void) | 155 | void __init init_IRQ(void) |
156 | { | 156 | { |
157 | struct irq_desc *desc; | ||
158 | int irq; | ||
159 | |||
160 | for (irq = 0; irq < nr_irqs; irq++) { | ||
161 | desc = irq_to_desc_alloc_node(irq, 0); | ||
162 | desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; | ||
163 | } | ||
164 | |||
165 | init_arch_irq(); | 157 | init_arch_irq(); |
166 | } | 158 | } |
167 | 159 | ||
@@ -169,7 +161,7 @@ void __init init_IRQ(void) | |||
169 | int __init arch_probe_nr_irqs(void) | 161 | int __init arch_probe_nr_irqs(void) |
170 | { | 162 | { |
171 | nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS; | 163 | nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS; |
172 | return 0; | 164 | return nr_irqs; |
173 | } | 165 | } |
174 | #endif | 166 | #endif |
175 | 167 | ||
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c index 8bccbfa693ff..2c1f0050c9c4 100644 --- a/arch/arm/kernel/kprobes-decode.c +++ b/arch/arm/kernel/kprobes-decode.c | |||
@@ -1162,11 +1162,12 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi) | |||
1162 | { | 1162 | { |
1163 | /* | 1163 | /* |
1164 | * MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx | 1164 | * MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx |
1165 | * Undef : cccc 0011 0x00 xxxx xxxx xxxx xxxx xxxx | 1165 | * Undef : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx |
1166 | * ALU op with S bit and Rd == 15 : | 1166 | * ALU op with S bit and Rd == 15 : |
1167 | * cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx | 1167 | * cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx |
1168 | */ | 1168 | */ |
1169 | if ((insn & 0x0f900000) == 0x03200000 || /* MSR & Undef */ | 1169 | if ((insn & 0x0fb00000) == 0x03200000 || /* MSR */ |
1170 | (insn & 0x0ff00000) == 0x03400000 || /* Undef */ | ||
1170 | (insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */ | 1171 | (insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */ |
1171 | return INSN_REJECTED; | 1172 | return INSN_REJECTED; |
1172 | 1173 | ||
@@ -1177,7 +1178,7 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi) | |||
1177 | * *S (bit 20) updates condition codes | 1178 | * *S (bit 20) updates condition codes |
1178 | * ADC/SBC/RSC reads the C flag | 1179 | * ADC/SBC/RSC reads the C flag |
1179 | */ | 1180 | */ |
1180 | insn &= 0xfff00fff; /* Rn = r0, Rd = r0 */ | 1181 | insn &= 0xffff0fff; /* Rd = r0 */ |
1181 | asi->insn[0] = insn; | 1182 | asi->insn[0] = insn; |
1182 | asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ | 1183 | asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ |
1183 | emulate_alu_imm_rwflags : emulate_alu_imm_rflags; | 1184 | emulate_alu_imm_rwflags : emulate_alu_imm_rflags; |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 6cc6521881aa..49643b1467e6 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -1092,7 +1092,7 @@ armv6pmu_handle_irq(int irq_num, | |||
1092 | * platforms that can have the PMU interrupts raised as an NMI, this | 1092 | * platforms that can have the PMU interrupts raised as an NMI, this |
1093 | * will not work. | 1093 | * will not work. |
1094 | */ | 1094 | */ |
1095 | perf_event_do_pending(); | 1095 | irq_work_run(); |
1096 | 1096 | ||
1097 | return IRQ_HANDLED; | 1097 | return IRQ_HANDLED; |
1098 | } | 1098 | } |
@@ -2068,7 +2068,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
2068 | * platforms that can have the PMU interrupts raised as an NMI, this | 2068 | * platforms that can have the PMU interrupts raised as an NMI, this |
2069 | * will not work. | 2069 | * will not work. |
2070 | */ | 2070 | */ |
2071 | perf_event_do_pending(); | 2071 | irq_work_run(); |
2072 | 2072 | ||
2073 | return IRQ_HANDLED; | 2073 | return IRQ_HANDLED; |
2074 | } | 2074 | } |
@@ -2436,7 +2436,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
2436 | armpmu->disable(hwc, idx); | 2436 | armpmu->disable(hwc, idx); |
2437 | } | 2437 | } |
2438 | 2438 | ||
2439 | perf_event_do_pending(); | 2439 | irq_work_run(); |
2440 | 2440 | ||
2441 | /* | 2441 | /* |
2442 | * Re-enable the PMU. | 2442 | * Re-enable the PMU. |
@@ -2763,7 +2763,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
2763 | armpmu->disable(hwc, idx); | 2763 | armpmu->disable(hwc, idx); |
2764 | } | 2764 | } |
2765 | 2765 | ||
2766 | perf_event_do_pending(); | 2766 | irq_work_run(); |
2767 | 2767 | ||
2768 | /* | 2768 | /* |
2769 | * Re-enable the PMU. | 2769 | * Re-enable the PMU. |
diff --git a/arch/arm/mach-at91/include/mach/system.h b/arch/arm/mach-at91/include/mach/system.h index c80e090b3670..ee8db152592e 100644 --- a/arch/arm/mach-at91/include/mach/system.h +++ b/arch/arm/mach-at91/include/mach/system.h | |||
@@ -28,17 +28,16 @@ | |||
28 | 28 | ||
29 | static inline void arch_idle(void) | 29 | static inline void arch_idle(void) |
30 | { | 30 | { |
31 | #ifndef CONFIG_DEBUG_KERNEL | ||
32 | /* | 31 | /* |
33 | * Disable the processor clock. The processor will be automatically | 32 | * Disable the processor clock. The processor will be automatically |
34 | * re-enabled by an interrupt or by a reset. | 33 | * re-enabled by an interrupt or by a reset. |
35 | */ | 34 | */ |
36 | at91_sys_write(AT91_PMC_SCDR, AT91_PMC_PCK); | 35 | at91_sys_write(AT91_PMC_SCDR, AT91_PMC_PCK); |
37 | #else | 36 | #ifndef CONFIG_CPU_ARM920T |
38 | /* | 37 | /* |
39 | * Set the processor (CP15) into 'Wait for Interrupt' mode. | 38 | * Set the processor (CP15) into 'Wait for Interrupt' mode. |
40 | * Unlike disabling the processor clock via the PMC (above) | 39 | * Post-RM9200 processors need this in conjunction with the above |
41 | * this allows the processor to be woken via JTAG. | 40 | * to save power when idle. |
42 | */ | 41 | */ |
43 | cpu_do_idle(); | 42 | cpu_do_idle(); |
44 | #endif | 43 | #endif |
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c index 29c0a911df26..77eb35c89cd0 100644 --- a/arch/arm/mach-bcmring/dma.c +++ b/arch/arm/mach-bcmring/dma.c | |||
@@ -691,7 +691,7 @@ int dma_init(void) | |||
691 | 691 | ||
692 | memset(&gDMA, 0, sizeof(gDMA)); | 692 | memset(&gDMA, 0, sizeof(gDMA)); |
693 | 693 | ||
694 | init_MUTEX_LOCKED(&gDMA.lock); | 694 | sema_init(&gDMA.lock, 0); |
695 | init_waitqueue_head(&gDMA.freeChannelQ); | 695 | init_waitqueue_head(&gDMA.freeChannelQ); |
696 | 696 | ||
697 | /* Initialize the Hardware */ | 697 | /* Initialize the Hardware */ |
@@ -1574,7 +1574,7 @@ int dma_init_mem_map(DMA_MemMap_t *memMap) | |||
1574 | { | 1574 | { |
1575 | memset(memMap, 0, sizeof(*memMap)); | 1575 | memset(memMap, 0, sizeof(*memMap)); |
1576 | 1576 | ||
1577 | init_MUTEX(&memMap->lock); | 1577 | sema_init(&memMap->lock, 1); |
1578 | 1578 | ||
1579 | return 0; | 1579 | return 0; |
1580 | } | 1580 | } |
diff --git a/arch/arm/mach-bcmring/irq.c b/arch/arm/mach-bcmring/irq.c index dc1c4939b0ce..e3152631eb37 100644 --- a/arch/arm/mach-bcmring/irq.c +++ b/arch/arm/mach-bcmring/irq.c | |||
@@ -67,21 +67,21 @@ static void bcmring_unmask_irq2(unsigned int irq) | |||
67 | } | 67 | } |
68 | 68 | ||
69 | static struct irq_chip bcmring_irq0_chip = { | 69 | static struct irq_chip bcmring_irq0_chip = { |
70 | .typename = "ARM-INTC0", | 70 | .name = "ARM-INTC0", |
71 | .ack = bcmring_mask_irq0, | 71 | .ack = bcmring_mask_irq0, |
72 | .mask = bcmring_mask_irq0, /* mask a specific interrupt, blocking its delivery. */ | 72 | .mask = bcmring_mask_irq0, /* mask a specific interrupt, blocking its delivery. */ |
73 | .unmask = bcmring_unmask_irq0, /* unmaks an interrupt */ | 73 | .unmask = bcmring_unmask_irq0, /* unmaks an interrupt */ |
74 | }; | 74 | }; |
75 | 75 | ||
76 | static struct irq_chip bcmring_irq1_chip = { | 76 | static struct irq_chip bcmring_irq1_chip = { |
77 | .typename = "ARM-INTC1", | 77 | .name = "ARM-INTC1", |
78 | .ack = bcmring_mask_irq1, | 78 | .ack = bcmring_mask_irq1, |
79 | .mask = bcmring_mask_irq1, | 79 | .mask = bcmring_mask_irq1, |
80 | .unmask = bcmring_unmask_irq1, | 80 | .unmask = bcmring_unmask_irq1, |
81 | }; | 81 | }; |
82 | 82 | ||
83 | static struct irq_chip bcmring_irq2_chip = { | 83 | static struct irq_chip bcmring_irq2_chip = { |
84 | .typename = "ARM-SINTC", | 84 | .name = "ARM-SINTC", |
85 | .ack = bcmring_mask_irq2, | 85 | .ack = bcmring_mask_irq2, |
86 | .mask = bcmring_mask_irq2, | 86 | .mask = bcmring_mask_irq2, |
87 | .unmask = bcmring_unmask_irq2, | 87 | .unmask = bcmring_unmask_irq2, |
diff --git a/arch/arm/mach-ep93xx/dma-m2p.c b/arch/arm/mach-ep93xx/dma-m2p.c index 8904ca4e2e24..a696d354b1f8 100644 --- a/arch/arm/mach-ep93xx/dma-m2p.c +++ b/arch/arm/mach-ep93xx/dma-m2p.c | |||
@@ -276,7 +276,7 @@ static void channel_disable(struct m2p_channel *ch) | |||
276 | v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN); | 276 | v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN); |
277 | m2p_set_control(ch, v); | 277 | m2p_set_control(ch, v); |
278 | 278 | ||
279 | while (m2p_channel_state(ch) == STATE_ON) | 279 | while (m2p_channel_state(ch) >= STATE_ON) |
280 | cpu_relax(); | 280 | cpu_relax(); |
281 | 281 | ||
282 | m2p_set_control(ch, 0x0); | 282 | m2p_set_control(ch, 0x0); |
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig index c5c0369bb481..2f7e2728970d 100644 --- a/arch/arm/mach-imx/Kconfig +++ b/arch/arm/mach-imx/Kconfig | |||
@@ -122,6 +122,7 @@ config MACH_CPUIMX27 | |||
122 | select IMX_HAVE_PLATFORM_IMX_I2C | 122 | select IMX_HAVE_PLATFORM_IMX_I2C |
123 | select IMX_HAVE_PLATFORM_IMX_UART | 123 | select IMX_HAVE_PLATFORM_IMX_UART |
124 | select IMX_HAVE_PLATFORM_MXC_NAND | 124 | select IMX_HAVE_PLATFORM_MXC_NAND |
125 | select MXC_ULPI if USB_ULPI | ||
125 | help | 126 | help |
126 | Include support for Eukrea CPUIMX27 platform. This includes | 127 | Include support for Eukrea CPUIMX27 platform. This includes |
127 | specific configurations for the module and its peripherals. | 128 | specific configurations for the module and its peripherals. |
diff --git a/arch/arm/mach-imx/mach-cpuimx27.c b/arch/arm/mach-imx/mach-cpuimx27.c index 339150ab0ea5..6830afd1d2ba 100644 --- a/arch/arm/mach-imx/mach-cpuimx27.c +++ b/arch/arm/mach-imx/mach-cpuimx27.c | |||
@@ -259,7 +259,7 @@ static void __init eukrea_cpuimx27_init(void) | |||
259 | i2c_register_board_info(0, eukrea_cpuimx27_i2c_devices, | 259 | i2c_register_board_info(0, eukrea_cpuimx27_i2c_devices, |
260 | ARRAY_SIZE(eukrea_cpuimx27_i2c_devices)); | 260 | ARRAY_SIZE(eukrea_cpuimx27_i2c_devices)); |
261 | 261 | ||
262 | imx27_add_i2c_imx1(&cpuimx27_i2c1_data); | 262 | imx27_add_i2c_imx0(&cpuimx27_i2c1_data); |
263 | 263 | ||
264 | platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices)); | 264 | platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices)); |
265 | 265 | ||
diff --git a/arch/arm/mach-iop13xx/msi.c b/arch/arm/mach-iop13xx/msi.c index f34b0ed80630..7149fcc16c8a 100644 --- a/arch/arm/mach-iop13xx/msi.c +++ b/arch/arm/mach-iop13xx/msi.c | |||
@@ -164,10 +164,10 @@ static void iop13xx_msi_nop(unsigned int irq) | |||
164 | static struct irq_chip iop13xx_msi_chip = { | 164 | static struct irq_chip iop13xx_msi_chip = { |
165 | .name = "PCI-MSI", | 165 | .name = "PCI-MSI", |
166 | .ack = iop13xx_msi_nop, | 166 | .ack = iop13xx_msi_nop, |
167 | .enable = unmask_msi_irq, | 167 | .irq_enable = unmask_msi_irq, |
168 | .disable = mask_msi_irq, | 168 | .irq_disable = mask_msi_irq, |
169 | .mask = mask_msi_irq, | 169 | .irq_mask = mask_msi_irq, |
170 | .unmask = unmask_msi_irq, | 170 | .irq_unmask = unmask_msi_irq, |
171 | }; | 171 | }; |
172 | 172 | ||
173 | int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) | 173 | int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) |
diff --git a/arch/arm/mach-s5p6440/cpu.c b/arch/arm/mach-s5p6440/cpu.c index 526f33adb31d..ec592e866054 100644 --- a/arch/arm/mach-s5p6440/cpu.c +++ b/arch/arm/mach-s5p6440/cpu.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/sysdev.h> | 19 | #include <linux/sysdev.h> |
20 | #include <linux/serial_core.h> | 20 | #include <linux/serial_core.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/sched.h> | ||
22 | 23 | ||
23 | #include <asm/mach/arch.h> | 24 | #include <asm/mach/arch.h> |
24 | #include <asm/mach/map.h> | 25 | #include <asm/mach/map.h> |
diff --git a/arch/arm/mach-s5p6442/cpu.c b/arch/arm/mach-s5p6442/cpu.c index a48fb553fd01..70ac681af72b 100644 --- a/arch/arm/mach-s5p6442/cpu.c +++ b/arch/arm/mach-s5p6442/cpu.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/sysdev.h> | 19 | #include <linux/sysdev.h> |
20 | #include <linux/serial_core.h> | 20 | #include <linux/serial_core.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/sched.h> | ||
22 | 23 | ||
23 | #include <asm/mach/arch.h> | 24 | #include <asm/mach/arch.h> |
24 | #include <asm/mach/map.h> | 25 | #include <asm/mach/map.h> |
diff --git a/arch/arm/mach-s5pc100/cpu.c b/arch/arm/mach-s5pc100/cpu.c index 251c92ac5b22..cd1afbce83e2 100644 --- a/arch/arm/mach-s5pc100/cpu.c +++ b/arch/arm/mach-s5pc100/cpu.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/sysdev.h> | 21 | #include <linux/sysdev.h> |
22 | #include <linux/serial_core.h> | 22 | #include <linux/serial_core.h> |
23 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
24 | #include <linux/sched.h> | ||
24 | 25 | ||
25 | #include <asm/mach/arch.h> | 26 | #include <asm/mach/arch.h> |
26 | #include <asm/mach/map.h> | 27 | #include <asm/mach/map.h> |
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c index cfecd70657cb..d562670e1b0b 100644 --- a/arch/arm/mach-s5pv210/clock.c +++ b/arch/arm/mach-s5pv210/clock.c | |||
@@ -173,11 +173,6 @@ static int s5pv210_clk_ip3_ctrl(struct clk *clk, int enable) | |||
173 | return s5p_gatectrl(S5P_CLKGATE_IP3, clk, enable); | 173 | return s5p_gatectrl(S5P_CLKGATE_IP3, clk, enable); |
174 | } | 174 | } |
175 | 175 | ||
176 | static int s5pv210_clk_ip4_ctrl(struct clk *clk, int enable) | ||
177 | { | ||
178 | return s5p_gatectrl(S5P_CLKGATE_IP4, clk, enable); | ||
179 | } | ||
180 | |||
181 | static int s5pv210_clk_mask0_ctrl(struct clk *clk, int enable) | 176 | static int s5pv210_clk_mask0_ctrl(struct clk *clk, int enable) |
182 | { | 177 | { |
183 | return s5p_gatectrl(S5P_CLK_SRC_MASK0, clk, enable); | 178 | return s5p_gatectrl(S5P_CLK_SRC_MASK0, clk, enable); |
diff --git a/arch/arm/mach-s5pv210/cpu.c b/arch/arm/mach-s5pv210/cpu.c index 77f456c91ad3..245b82b53df4 100644 --- a/arch/arm/mach-s5pv210/cpu.c +++ b/arch/arm/mach-s5pv210/cpu.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/io.h> | 19 | #include <linux/io.h> |
20 | #include <linux/sysdev.h> | 20 | #include <linux/sysdev.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/sched.h> | ||
22 | 23 | ||
23 | #include <asm/mach/arch.h> | 24 | #include <asm/mach/arch.h> |
24 | #include <asm/mach/map.h> | 25 | #include <asm/mach/map.h> |
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c index efb127022d42..71fb17349520 100644 --- a/arch/arm/mach-vexpress/ct-ca9x4.c +++ b/arch/arm/mach-vexpress/ct-ca9x4.c | |||
@@ -68,7 +68,7 @@ static void __init ct_ca9x4_init_irq(void) | |||
68 | } | 68 | } |
69 | 69 | ||
70 | #if 0 | 70 | #if 0 |
71 | static void ct_ca9x4_timer_init(void) | 71 | static void __init ct_ca9x4_timer_init(void) |
72 | { | 72 | { |
73 | writel(0, MMIO_P2V(CT_CA9X4_TIMER0) + TIMER_CTRL); | 73 | writel(0, MMIO_P2V(CT_CA9X4_TIMER0) + TIMER_CTRL); |
74 | writel(0, MMIO_P2V(CT_CA9X4_TIMER1) + TIMER_CTRL); | 74 | writel(0, MMIO_P2V(CT_CA9X4_TIMER1) + TIMER_CTRL); |
@@ -222,7 +222,7 @@ static struct platform_device pmu_device = { | |||
222 | .resource = pmu_resources, | 222 | .resource = pmu_resources, |
223 | }; | 223 | }; |
224 | 224 | ||
225 | static void ct_ca9x4_init(void) | 225 | static void __init ct_ca9x4_init(void) |
226 | { | 226 | { |
227 | int i; | 227 | int i; |
228 | 228 | ||
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c index 817f0ad38a0b..7eaa232180a5 100644 --- a/arch/arm/mach-vexpress/v2m.c +++ b/arch/arm/mach-vexpress/v2m.c | |||
@@ -48,7 +48,7 @@ void __init v2m_map_io(struct map_desc *tile, size_t num) | |||
48 | } | 48 | } |
49 | 49 | ||
50 | 50 | ||
51 | static void v2m_timer_init(void) | 51 | static void __init v2m_timer_init(void) |
52 | { | 52 | { |
53 | writel(0, MMIO_P2V(V2M_TIMER0) + TIMER_CTRL); | 53 | writel(0, MMIO_P2V(V2M_TIMER0) + TIMER_CTRL); |
54 | writel(0, MMIO_P2V(V2M_TIMER1) + TIMER_CTRL); | 54 | writel(0, MMIO_P2V(V2M_TIMER1) + TIMER_CTRL); |
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index ab506272b2d3..17e7b0b57e49 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -204,8 +204,12 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
204 | /* | 204 | /* |
205 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ | 205 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ |
206 | */ | 206 | */ |
207 | if (WARN_ON(pfn_valid(pfn))) | 207 | if (pfn_valid(pfn)) { |
208 | return NULL; | 208 | printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n" |
209 | KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n" | ||
210 | KERN_WARNING "will fail in the next kernel release. Please fix your driver.\n"); | ||
211 | WARN_ON(1); | ||
212 | } | ||
209 | 213 | ||
210 | type = get_mem_type(mtype); | 214 | type = get_mem_type(mtype); |
211 | if (!type) | 215 | if (!type) |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 6a3a2d0cd6db..e8ed9dc461fe 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -248,7 +248,7 @@ static struct mem_type mem_types[] = { | |||
248 | }, | 248 | }, |
249 | [MT_MEMORY] = { | 249 | [MT_MEMORY] = { |
250 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 250 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
251 | L_PTE_USER | L_PTE_EXEC, | 251 | L_PTE_WRITE | L_PTE_EXEC, |
252 | .prot_l1 = PMD_TYPE_TABLE, | 252 | .prot_l1 = PMD_TYPE_TABLE, |
253 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 253 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
254 | .domain = DOMAIN_KERNEL, | 254 | .domain = DOMAIN_KERNEL, |
@@ -259,7 +259,7 @@ static struct mem_type mem_types[] = { | |||
259 | }, | 259 | }, |
260 | [MT_MEMORY_NONCACHED] = { | 260 | [MT_MEMORY_NONCACHED] = { |
261 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 261 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
262 | L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, | 262 | L_PTE_WRITE | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, |
263 | .prot_l1 = PMD_TYPE_TABLE, | 263 | .prot_l1 = PMD_TYPE_TABLE, |
264 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 264 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
265 | .domain = DOMAIN_KERNEL, | 265 | .domain = DOMAIN_KERNEL, |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 7563ff0141bd..197f21bed5e9 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -253,6 +253,14 @@ __v7_setup: | |||
253 | orreq r10, r10, #1 << 22 @ set bit #22 | 253 | orreq r10, r10, #1 << 22 @ set bit #22 |
254 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register | 254 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register |
255 | #endif | 255 | #endif |
256 | #ifdef CONFIG_ARM_ERRATA_743622 | ||
257 | teq r6, #0x20 @ present in r2p0 | ||
258 | teqne r6, #0x21 @ present in r2p1 | ||
259 | teqne r6, #0x22 @ present in r2p2 | ||
260 | mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register | ||
261 | orreq r10, r10, #1 << 6 @ set bit #6 | ||
262 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register | ||
263 | #endif | ||
256 | 264 | ||
257 | 3: mov r10, #0 | 265 | 3: mov r10, #0 |
258 | #ifdef HARVARD_CACHE | 266 | #ifdef HARVARD_CACHE |
@@ -365,7 +373,7 @@ __v7_ca9mp_proc_info: | |||
365 | b __v7_ca9mp_setup | 373 | b __v7_ca9mp_setup |
366 | .long cpu_arch_name | 374 | .long cpu_arch_name |
367 | .long cpu_elf_name | 375 | .long cpu_elf_name |
368 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | 376 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS |
369 | .long cpu_v7_name | 377 | .long cpu_v7_name |
370 | .long v7_processor_functions | 378 | .long v7_processor_functions |
371 | .long v7wbi_tlb_fns | 379 | .long v7wbi_tlb_fns |
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c index a202a2ce6e3d..6cd151b31bc5 100644 --- a/arch/arm/plat-omap/iommu.c +++ b/arch/arm/plat-omap/iommu.c | |||
@@ -320,6 +320,7 @@ void flush_iotlb_page(struct iommu *obj, u32 da) | |||
320 | if ((start <= da) && (da < start + bytes)) { | 320 | if ((start <= da) && (da < start + bytes)) { |
321 | dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", | 321 | dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", |
322 | __func__, start, da, bytes); | 322 | __func__, start, da, bytes); |
323 | iotlb_load_cr(obj, &cr); | ||
323 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); | 324 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); |
324 | } | 325 | } |
325 | } | 326 | } |
diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c index 04d9521ddc9f..e8f2be2d67f2 100644 --- a/arch/arm/plat-samsung/adc.c +++ b/arch/arm/plat-samsung/adc.c | |||
@@ -435,7 +435,6 @@ static int s3c_adc_suspend(struct platform_device *pdev, pm_message_t state) | |||
435 | static int s3c_adc_resume(struct platform_device *pdev) | 435 | static int s3c_adc_resume(struct platform_device *pdev) |
436 | { | 436 | { |
437 | struct adc_device *adc = platform_get_drvdata(pdev); | 437 | struct adc_device *adc = platform_get_drvdata(pdev); |
438 | unsigned long flags; | ||
439 | 438 | ||
440 | clk_enable(adc->clk); | 439 | clk_enable(adc->clk); |
441 | enable_irq(adc->irq); | 440 | enable_irq(adc->irq); |
diff --git a/arch/arm/plat-samsung/clock.c b/arch/arm/plat-samsung/clock.c index 90a20512d68d..e8d20b0bc50e 100644 --- a/arch/arm/plat-samsung/clock.c +++ b/arch/arm/plat-samsung/clock.c | |||
@@ -48,6 +48,9 @@ | |||
48 | #include <plat/clock.h> | 48 | #include <plat/clock.h> |
49 | #include <plat/cpu.h> | 49 | #include <plat/cpu.h> |
50 | 50 | ||
51 | #include <linux/serial_core.h> | ||
52 | #include <plat/regs-serial.h> /* for s3c24xx_uart_devs */ | ||
53 | |||
51 | /* clock information */ | 54 | /* clock information */ |
52 | 55 | ||
53 | static LIST_HEAD(clocks); | 56 | static LIST_HEAD(clocks); |
@@ -65,6 +68,28 @@ static int clk_null_enable(struct clk *clk, int enable) | |||
65 | return 0; | 68 | return 0; |
66 | } | 69 | } |
67 | 70 | ||
71 | static int dev_is_s3c_uart(struct device *dev) | ||
72 | { | ||
73 | struct platform_device **pdev = s3c24xx_uart_devs; | ||
74 | int i; | ||
75 | for (i = 0; i < ARRAY_SIZE(s3c24xx_uart_devs); i++, pdev++) | ||
76 | if (*pdev && dev == &(*pdev)->dev) | ||
77 | return 1; | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | /* | ||
82 | * Serial drivers call get_clock() very early, before platform bus | ||
83 | * has been set up, this requires a special check to let them get | ||
84 | * a proper clock | ||
85 | */ | ||
86 | |||
87 | static int dev_is_platform_device(struct device *dev) | ||
88 | { | ||
89 | return dev->bus == &platform_bus_type || | ||
90 | (dev->bus == NULL && dev_is_s3c_uart(dev)); | ||
91 | } | ||
92 | |||
68 | /* Clock API calls */ | 93 | /* Clock API calls */ |
69 | 94 | ||
70 | struct clk *clk_get(struct device *dev, const char *id) | 95 | struct clk *clk_get(struct device *dev, const char *id) |
@@ -73,7 +98,7 @@ struct clk *clk_get(struct device *dev, const char *id) | |||
73 | struct clk *clk = ERR_PTR(-ENOENT); | 98 | struct clk *clk = ERR_PTR(-ENOENT); |
74 | int idno; | 99 | int idno; |
75 | 100 | ||
76 | if (dev == NULL || dev->bus != &platform_bus_type) | 101 | if (dev == NULL || !dev_is_platform_device(dev)) |
77 | idno = -1; | 102 | idno = -1; |
78 | else | 103 | else |
79 | idno = to_platform_device(dev)->id; | 104 | idno = to_platform_device(dev)->id; |
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index 16399bd24993..0f2417df6323 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig | |||
@@ -7,6 +7,7 @@ config FRV | |||
7 | default y | 7 | default y |
8 | select HAVE_IDE | 8 | select HAVE_IDE |
9 | select HAVE_ARCH_TRACEHOOK | 9 | select HAVE_ARCH_TRACEHOOK |
10 | select HAVE_IRQ_WORK | ||
10 | select HAVE_PERF_EVENTS | 11 | select HAVE_PERF_EVENTS |
11 | 12 | ||
12 | config ZONE_DMA | 13 | config ZONE_DMA |
diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile index f4709756d0d9..4ff2fb1e6b16 100644 --- a/arch/frv/lib/Makefile +++ b/arch/frv/lib/Makefile | |||
@@ -5,4 +5,4 @@ | |||
5 | lib-y := \ | 5 | lib-y := \ |
6 | __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \ | 6 | __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \ |
7 | checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \ | 7 | checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \ |
8 | outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_event.o | 8 | outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o |
diff --git a/arch/frv/lib/perf_event.c b/arch/frv/lib/perf_event.c deleted file mode 100644 index 9ac5acfd2e91..000000000000 --- a/arch/frv/lib/perf_event.c +++ /dev/null | |||
@@ -1,19 +0,0 @@ | |||
1 | /* Performance event handling | ||
2 | * | ||
3 | * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/perf_event.h> | ||
13 | |||
14 | /* | ||
15 | * mark the performance event as pending | ||
16 | */ | ||
17 | void set_perf_event_pending(void) | ||
18 | { | ||
19 | } | ||
diff --git a/arch/ia64/include/asm/hardirq.h b/arch/ia64/include/asm/hardirq.h index d514cd9edb49..8fb7d33a661f 100644 --- a/arch/ia64/include/asm/hardirq.h +++ b/arch/ia64/include/asm/hardirq.h | |||
@@ -6,12 +6,6 @@ | |||
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | 6 | * David Mosberger-Tang <davidm@hpl.hp.com> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | |||
10 | #include <linux/threads.h> | ||
11 | #include <linux/irq.h> | ||
12 | |||
13 | #include <asm/processor.h> | ||
14 | |||
15 | /* | 9 | /* |
16 | * No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure. | 10 | * No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure. |
17 | */ | 11 | */ |
@@ -20,6 +14,11 @@ | |||
20 | 14 | ||
21 | #define local_softirq_pending() (local_cpu_data->softirq_pending) | 15 | #define local_softirq_pending() (local_cpu_data->softirq_pending) |
22 | 16 | ||
17 | #include <linux/threads.h> | ||
18 | #include <linux/irq.h> | ||
19 | |||
20 | #include <asm/processor.h> | ||
21 | |||
23 | extern void __iomem *ipi_base_addr; | 22 | extern void __iomem *ipi_base_addr; |
24 | 23 | ||
25 | void ack_bad_irq(unsigned int irq); | 24 | void ack_bad_irq(unsigned int irq); |
diff --git a/arch/ia64/include/asm/system.h b/arch/ia64/include/asm/system.h index 9f342a574ce8..dd028f2b13b3 100644 --- a/arch/ia64/include/asm/system.h +++ b/arch/ia64/include/asm/system.h | |||
@@ -272,10 +272,6 @@ void cpu_idle_wait(void); | |||
272 | 272 | ||
273 | void default_idle(void); | 273 | void default_idle(void); |
274 | 274 | ||
275 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
276 | extern void account_system_vtime(struct task_struct *); | ||
277 | #endif | ||
278 | |||
279 | #endif /* __KERNEL__ */ | 275 | #endif /* __KERNEL__ */ |
280 | 276 | ||
281 | #endif /* __ASSEMBLY__ */ | 277 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 4a746ea838ff..00b19a416eab 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c | |||
@@ -104,8 +104,8 @@ static int ia64_msi_retrigger_irq(unsigned int irq) | |||
104 | */ | 104 | */ |
105 | static struct irq_chip ia64_msi_chip = { | 105 | static struct irq_chip ia64_msi_chip = { |
106 | .name = "PCI-MSI", | 106 | .name = "PCI-MSI", |
107 | .mask = mask_msi_irq, | 107 | .irq_mask = mask_msi_irq, |
108 | .unmask = unmask_msi_irq, | 108 | .irq_unmask = unmask_msi_irq, |
109 | .ack = ia64_ack_msi_irq, | 109 | .ack = ia64_ack_msi_irq, |
110 | #ifdef CONFIG_SMP | 110 | #ifdef CONFIG_SMP |
111 | .set_affinity = ia64_set_msi_irq_affinity, | 111 | .set_affinity = ia64_set_msi_irq_affinity, |
@@ -160,8 +160,8 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
160 | 160 | ||
161 | static struct irq_chip dmar_msi_type = { | 161 | static struct irq_chip dmar_msi_type = { |
162 | .name = "DMAR_MSI", | 162 | .name = "DMAR_MSI", |
163 | .unmask = dmar_msi_unmask, | 163 | .irq_unmask = dmar_msi_unmask, |
164 | .mask = dmar_msi_mask, | 164 | .irq_mask = dmar_msi_mask, |
165 | .ack = ia64_ack_msi_irq, | 165 | .ack = ia64_ack_msi_irq, |
166 | #ifdef CONFIG_SMP | 166 | #ifdef CONFIG_SMP |
167 | .set_affinity = dmar_msi_set_affinity, | 167 | .set_affinity = dmar_msi_set_affinity, |
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c index 0c72dd463831..a5e500f02853 100644 --- a/arch/ia64/sn/kernel/msi_sn.c +++ b/arch/ia64/sn/kernel/msi_sn.c | |||
@@ -228,8 +228,8 @@ static int sn_msi_retrigger_irq(unsigned int irq) | |||
228 | 228 | ||
229 | static struct irq_chip sn_msi_chip = { | 229 | static struct irq_chip sn_msi_chip = { |
230 | .name = "PCI-MSI", | 230 | .name = "PCI-MSI", |
231 | .mask = mask_msi_irq, | 231 | .irq_mask = mask_msi_irq, |
232 | .unmask = unmask_msi_irq, | 232 | .irq_unmask = unmask_msi_irq, |
233 | .ack = sn_ack_msi_irq, | 233 | .ack = sn_ack_msi_irq, |
234 | #ifdef CONFIG_SMP | 234 | #ifdef CONFIG_SMP |
235 | .set_affinity = sn_set_msi_irq_affinity, | 235 | .set_affinity = sn_set_msi_irq_affinity, |
diff --git a/arch/m32r/include/asm/elf.h b/arch/m32r/include/asm/elf.h index 2f85412ef730..b8da7d0574d2 100644 --- a/arch/m32r/include/asm/elf.h +++ b/arch/m32r/include/asm/elf.h | |||
@@ -82,9 +82,9 @@ typedef elf_fpreg_t elf_fpregset_t; | |||
82 | * These are used to set parameters in the core dumps. | 82 | * These are used to set parameters in the core dumps. |
83 | */ | 83 | */ |
84 | #define ELF_CLASS ELFCLASS32 | 84 | #define ELF_CLASS ELFCLASS32 |
85 | #if defined(__LITTLE_ENDIAN) | 85 | #if defined(__LITTLE_ENDIAN__) |
86 | #define ELF_DATA ELFDATA2LSB | 86 | #define ELF_DATA ELFDATA2LSB |
87 | #elif defined(__BIG_ENDIAN) | 87 | #elif defined(__BIG_ENDIAN__) |
88 | #define ELF_DATA ELFDATA2MSB | 88 | #define ELF_DATA ELFDATA2MSB |
89 | #else | 89 | #else |
90 | #error no endian defined | 90 | #error no endian defined |
diff --git a/arch/m32r/kernel/.gitignore b/arch/m32r/kernel/.gitignore new file mode 100644 index 000000000000..c5f676c3c224 --- /dev/null +++ b/arch/m32r/kernel/.gitignore | |||
@@ -0,0 +1 @@ | |||
vmlinux.lds | |||
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c index 3c71f776872c..7db26f1f082d 100644 --- a/arch/m32r/kernel/irq.c +++ b/arch/m32r/kernel/irq.c | |||
@@ -51,7 +51,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
51 | for_each_online_cpu(j) | 51 | for_each_online_cpu(j) |
52 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 52 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
53 | #endif | 53 | #endif |
54 | seq_printf(p, " %14s", irq_desc[i].chip->typename); | 54 | seq_printf(p, " %14s", irq_desc[i].chip->name); |
55 | seq_printf(p, " %s", action->name); | 55 | seq_printf(p, " %s", action->name); |
56 | 56 | ||
57 | for (action=action->next; action; action = action->next) | 57 | for (action=action->next; action; action = action->next) |
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c index 7bbe38645ed5..a08697f0886d 100644 --- a/arch/m32r/kernel/signal.c +++ b/arch/m32r/kernel/signal.c | |||
@@ -28,6 +28,8 @@ | |||
28 | 28 | ||
29 | #define DEBUG_SIG 0 | 29 | #define DEBUG_SIG 0 |
30 | 30 | ||
31 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
32 | |||
31 | asmlinkage int | 33 | asmlinkage int |
32 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, | 34 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, |
33 | unsigned long r2, unsigned long r3, unsigned long r4, | 35 | unsigned long r2, unsigned long r3, unsigned long r4, |
@@ -254,7 +256,7 @@ give_sigsegv: | |||
254 | static int prev_insn(struct pt_regs *regs) | 256 | static int prev_insn(struct pt_regs *regs) |
255 | { | 257 | { |
256 | u16 inst; | 258 | u16 inst; |
257 | if (get_user(&inst, (u16 __user *)(regs->bpc - 2))) | 259 | if (get_user(inst, (u16 __user *)(regs->bpc - 2))) |
258 | return -EFAULT; | 260 | return -EFAULT; |
259 | if ((inst & 0xfff0) == 0x10f0) /* trap ? */ | 261 | if ((inst & 0xfff0) == 0x10f0) /* trap ? */ |
260 | regs->bpc -= 2; | 262 | regs->bpc -= 2; |
diff --git a/arch/m32r/platforms/m32104ut/setup.c b/arch/m32r/platforms/m32104ut/setup.c index 922fdfdadeaa..402a59d7219b 100644 --- a/arch/m32r/platforms/m32104ut/setup.c +++ b/arch/m32r/platforms/m32104ut/setup.c | |||
@@ -65,7 +65,7 @@ static void shutdown_m32104ut_irq(unsigned int irq) | |||
65 | 65 | ||
66 | static struct irq_chip m32104ut_irq_type = | 66 | static struct irq_chip m32104ut_irq_type = |
67 | { | 67 | { |
68 | .typename = "M32104UT-IRQ", | 68 | .name = "M32104UT-IRQ", |
69 | .startup = startup_m32104ut_irq, | 69 | .startup = startup_m32104ut_irq, |
70 | .shutdown = shutdown_m32104ut_irq, | 70 | .shutdown = shutdown_m32104ut_irq, |
71 | .enable = enable_m32104ut_irq, | 71 | .enable = enable_m32104ut_irq, |
diff --git a/arch/m32r/platforms/m32700ut/setup.c b/arch/m32r/platforms/m32700ut/setup.c index 9c1bc7487c1e..80b1a026795a 100644 --- a/arch/m32r/platforms/m32700ut/setup.c +++ b/arch/m32r/platforms/m32700ut/setup.c | |||
@@ -71,7 +71,7 @@ static void shutdown_m32700ut_irq(unsigned int irq) | |||
71 | 71 | ||
72 | static struct irq_chip m32700ut_irq_type = | 72 | static struct irq_chip m32700ut_irq_type = |
73 | { | 73 | { |
74 | .typename = "M32700UT-IRQ", | 74 | .name = "M32700UT-IRQ", |
75 | .startup = startup_m32700ut_irq, | 75 | .startup = startup_m32700ut_irq, |
76 | .shutdown = shutdown_m32700ut_irq, | 76 | .shutdown = shutdown_m32700ut_irq, |
77 | .enable = enable_m32700ut_irq, | 77 | .enable = enable_m32700ut_irq, |
@@ -148,7 +148,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq) | |||
148 | 148 | ||
149 | static struct irq_chip m32700ut_pld_irq_type = | 149 | static struct irq_chip m32700ut_pld_irq_type = |
150 | { | 150 | { |
151 | .typename = "M32700UT-PLD-IRQ", | 151 | .name = "M32700UT-PLD-IRQ", |
152 | .startup = startup_m32700ut_pld_irq, | 152 | .startup = startup_m32700ut_pld_irq, |
153 | .shutdown = shutdown_m32700ut_pld_irq, | 153 | .shutdown = shutdown_m32700ut_pld_irq, |
154 | .enable = enable_m32700ut_pld_irq, | 154 | .enable = enable_m32700ut_pld_irq, |
@@ -217,7 +217,7 @@ static void shutdown_m32700ut_lanpld_irq(unsigned int irq) | |||
217 | 217 | ||
218 | static struct irq_chip m32700ut_lanpld_irq_type = | 218 | static struct irq_chip m32700ut_lanpld_irq_type = |
219 | { | 219 | { |
220 | .typename = "M32700UT-PLD-LAN-IRQ", | 220 | .name = "M32700UT-PLD-LAN-IRQ", |
221 | .startup = startup_m32700ut_lanpld_irq, | 221 | .startup = startup_m32700ut_lanpld_irq, |
222 | .shutdown = shutdown_m32700ut_lanpld_irq, | 222 | .shutdown = shutdown_m32700ut_lanpld_irq, |
223 | .enable = enable_m32700ut_lanpld_irq, | 223 | .enable = enable_m32700ut_lanpld_irq, |
@@ -286,7 +286,7 @@ static void shutdown_m32700ut_lcdpld_irq(unsigned int irq) | |||
286 | 286 | ||
287 | static struct irq_chip m32700ut_lcdpld_irq_type = | 287 | static struct irq_chip m32700ut_lcdpld_irq_type = |
288 | { | 288 | { |
289 | .typename = "M32700UT-PLD-LCD-IRQ", | 289 | .name = "M32700UT-PLD-LCD-IRQ", |
290 | .startup = startup_m32700ut_lcdpld_irq, | 290 | .startup = startup_m32700ut_lcdpld_irq, |
291 | .shutdown = shutdown_m32700ut_lcdpld_irq, | 291 | .shutdown = shutdown_m32700ut_lcdpld_irq, |
292 | .enable = enable_m32700ut_lcdpld_irq, | 292 | .enable = enable_m32700ut_lcdpld_irq, |
diff --git a/arch/m32r/platforms/mappi/setup.c b/arch/m32r/platforms/mappi/setup.c index fb4b17799b66..ea00c84d6b1b 100644 --- a/arch/m32r/platforms/mappi/setup.c +++ b/arch/m32r/platforms/mappi/setup.c | |||
@@ -65,7 +65,7 @@ static void shutdown_mappi_irq(unsigned int irq) | |||
65 | 65 | ||
66 | static struct irq_chip mappi_irq_type = | 66 | static struct irq_chip mappi_irq_type = |
67 | { | 67 | { |
68 | .typename = "MAPPI-IRQ", | 68 | .name = "MAPPI-IRQ", |
69 | .startup = startup_mappi_irq, | 69 | .startup = startup_mappi_irq, |
70 | .shutdown = shutdown_mappi_irq, | 70 | .shutdown = shutdown_mappi_irq, |
71 | .enable = enable_mappi_irq, | 71 | .enable = enable_mappi_irq, |
diff --git a/arch/m32r/platforms/mappi2/setup.c b/arch/m32r/platforms/mappi2/setup.c index 6a65eda0a056..c049376d0270 100644 --- a/arch/m32r/platforms/mappi2/setup.c +++ b/arch/m32r/platforms/mappi2/setup.c | |||
@@ -72,7 +72,7 @@ static void shutdown_mappi2_irq(unsigned int irq) | |||
72 | 72 | ||
73 | static struct irq_chip mappi2_irq_type = | 73 | static struct irq_chip mappi2_irq_type = |
74 | { | 74 | { |
75 | .typename = "MAPPI2-IRQ", | 75 | .name = "MAPPI2-IRQ", |
76 | .startup = startup_mappi2_irq, | 76 | .startup = startup_mappi2_irq, |
77 | .shutdown = shutdown_mappi2_irq, | 77 | .shutdown = shutdown_mappi2_irq, |
78 | .enable = enable_mappi2_irq, | 78 | .enable = enable_mappi2_irq, |
diff --git a/arch/m32r/platforms/mappi3/setup.c b/arch/m32r/platforms/mappi3/setup.c index 9c337aeac94b..882de25c6e8c 100644 --- a/arch/m32r/platforms/mappi3/setup.c +++ b/arch/m32r/platforms/mappi3/setup.c | |||
@@ -72,7 +72,7 @@ static void shutdown_mappi3_irq(unsigned int irq) | |||
72 | 72 | ||
73 | static struct irq_chip mappi3_irq_type = | 73 | static struct irq_chip mappi3_irq_type = |
74 | { | 74 | { |
75 | .typename = "MAPPI3-IRQ", | 75 | .name = "MAPPI3-IRQ", |
76 | .startup = startup_mappi3_irq, | 76 | .startup = startup_mappi3_irq, |
77 | .shutdown = shutdown_mappi3_irq, | 77 | .shutdown = shutdown_mappi3_irq, |
78 | .enable = enable_mappi3_irq, | 78 | .enable = enable_mappi3_irq, |
diff --git a/arch/m32r/platforms/oaks32r/setup.c b/arch/m32r/platforms/oaks32r/setup.c index ed865741c38d..d11d93bf74f5 100644 --- a/arch/m32r/platforms/oaks32r/setup.c +++ b/arch/m32r/platforms/oaks32r/setup.c | |||
@@ -63,7 +63,7 @@ static void shutdown_oaks32r_irq(unsigned int irq) | |||
63 | 63 | ||
64 | static struct irq_chip oaks32r_irq_type = | 64 | static struct irq_chip oaks32r_irq_type = |
65 | { | 65 | { |
66 | .typename = "OAKS32R-IRQ", | 66 | .name = "OAKS32R-IRQ", |
67 | .startup = startup_oaks32r_irq, | 67 | .startup = startup_oaks32r_irq, |
68 | .shutdown = shutdown_oaks32r_irq, | 68 | .shutdown = shutdown_oaks32r_irq, |
69 | .enable = enable_oaks32r_irq, | 69 | .enable = enable_oaks32r_irq, |
diff --git a/arch/m32r/platforms/opsput/setup.c b/arch/m32r/platforms/opsput/setup.c index 80d680657019..5f3402a2fbaf 100644 --- a/arch/m32r/platforms/opsput/setup.c +++ b/arch/m32r/platforms/opsput/setup.c | |||
@@ -72,7 +72,7 @@ static void shutdown_opsput_irq(unsigned int irq) | |||
72 | 72 | ||
73 | static struct irq_chip opsput_irq_type = | 73 | static struct irq_chip opsput_irq_type = |
74 | { | 74 | { |
75 | .typename = "OPSPUT-IRQ", | 75 | .name = "OPSPUT-IRQ", |
76 | .startup = startup_opsput_irq, | 76 | .startup = startup_opsput_irq, |
77 | .shutdown = shutdown_opsput_irq, | 77 | .shutdown = shutdown_opsput_irq, |
78 | .enable = enable_opsput_irq, | 78 | .enable = enable_opsput_irq, |
@@ -149,7 +149,7 @@ static void shutdown_opsput_pld_irq(unsigned int irq) | |||
149 | 149 | ||
150 | static struct irq_chip opsput_pld_irq_type = | 150 | static struct irq_chip opsput_pld_irq_type = |
151 | { | 151 | { |
152 | .typename = "OPSPUT-PLD-IRQ", | 152 | .name = "OPSPUT-PLD-IRQ", |
153 | .startup = startup_opsput_pld_irq, | 153 | .startup = startup_opsput_pld_irq, |
154 | .shutdown = shutdown_opsput_pld_irq, | 154 | .shutdown = shutdown_opsput_pld_irq, |
155 | .enable = enable_opsput_pld_irq, | 155 | .enable = enable_opsput_pld_irq, |
@@ -218,7 +218,7 @@ static void shutdown_opsput_lanpld_irq(unsigned int irq) | |||
218 | 218 | ||
219 | static struct irq_chip opsput_lanpld_irq_type = | 219 | static struct irq_chip opsput_lanpld_irq_type = |
220 | { | 220 | { |
221 | .typename = "OPSPUT-PLD-LAN-IRQ", | 221 | .name = "OPSPUT-PLD-LAN-IRQ", |
222 | .startup = startup_opsput_lanpld_irq, | 222 | .startup = startup_opsput_lanpld_irq, |
223 | .shutdown = shutdown_opsput_lanpld_irq, | 223 | .shutdown = shutdown_opsput_lanpld_irq, |
224 | .enable = enable_opsput_lanpld_irq, | 224 | .enable = enable_opsput_lanpld_irq, |
diff --git a/arch/m32r/platforms/usrv/setup.c b/arch/m32r/platforms/usrv/setup.c index 757302660af8..1beac7a51ed4 100644 --- a/arch/m32r/platforms/usrv/setup.c +++ b/arch/m32r/platforms/usrv/setup.c | |||
@@ -63,7 +63,7 @@ static void shutdown_mappi_irq(unsigned int irq) | |||
63 | 63 | ||
64 | static struct irq_chip mappi_irq_type = | 64 | static struct irq_chip mappi_irq_type = |
65 | { | 65 | { |
66 | .typename = "M32700-IRQ", | 66 | .name = "M32700-IRQ", |
67 | .startup = startup_mappi_irq, | 67 | .startup = startup_mappi_irq, |
68 | .shutdown = shutdown_mappi_irq, | 68 | .shutdown = shutdown_mappi_irq, |
69 | .enable = enable_mappi_irq, | 69 | .enable = enable_mappi_irq, |
@@ -136,7 +136,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq) | |||
136 | 136 | ||
137 | static struct irq_chip m32700ut_pld_irq_type = | 137 | static struct irq_chip m32700ut_pld_irq_type = |
138 | { | 138 | { |
139 | .typename = "USRV-PLD-IRQ", | 139 | .name = "USRV-PLD-IRQ", |
140 | .startup = startup_m32700ut_pld_irq, | 140 | .startup = startup_m32700ut_pld_irq, |
141 | .shutdown = shutdown_m32700ut_pld_irq, | 141 | .shutdown = shutdown_m32700ut_pld_irq, |
142 | .enable = enable_m32700ut_pld_irq, | 142 | .enable = enable_m32700ut_pld_irq, |
diff --git a/arch/mips/Kbuild b/arch/mips/Kbuild index e322d65f33a4..7dd65cfae837 100644 --- a/arch/mips/Kbuild +++ b/arch/mips/Kbuild | |||
@@ -7,6 +7,10 @@ subdir-ccflags-y := -Werror | |||
7 | include arch/mips/Kbuild.platforms | 7 | include arch/mips/Kbuild.platforms |
8 | obj-y := $(platform-y) | 8 | obj-y := $(platform-y) |
9 | 9 | ||
10 | # make clean traverses $(obj-) without having included .config, so | ||
11 | # everything ends up here | ||
12 | obj- := $(platform-) | ||
13 | |||
10 | # mips object files | 14 | # mips object files |
11 | # The object files are linked as core-y files would be linked | 15 | # The object files are linked as core-y files would be linked |
12 | 16 | ||
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 5526faabfc21..4c9f402295dd 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -881,11 +881,15 @@ config NO_IOPORT | |||
881 | config GENERIC_ISA_DMA | 881 | config GENERIC_ISA_DMA |
882 | bool | 882 | bool |
883 | select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n | 883 | select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n |
884 | select ISA_DMA_API | ||
884 | 885 | ||
885 | config GENERIC_ISA_DMA_SUPPORT_BROKEN | 886 | config GENERIC_ISA_DMA_SUPPORT_BROKEN |
886 | bool | 887 | bool |
887 | select GENERIC_ISA_DMA | 888 | select GENERIC_ISA_DMA |
888 | 889 | ||
890 | config ISA_DMA_API | ||
891 | bool | ||
892 | |||
889 | config GENERIC_GPIO | 893 | config GENERIC_GPIO |
890 | bool | 894 | bool |
891 | 895 | ||
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index 5fd7f7a58b7e..5042d51b0512 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile | |||
@@ -105,4 +105,4 @@ OBJCOPYFLAGS_vmlinuz.srec := $(OBJCOPYFLAGS) -S -O srec | |||
105 | vmlinuz.srec: vmlinuz | 105 | vmlinuz.srec: vmlinuz |
106 | $(call cmd,objcopy) | 106 | $(call cmd,objcopy) |
107 | 107 | ||
108 | clean-files := $(objtree)/vmlinuz.* | 108 | clean-files := $(objtree)/vmlinuz $(objtree)/vmlinuz.{32,ecoff,bin,srec} |
diff --git a/arch/mips/dec/Platform b/arch/mips/dec/Platform index 3adbcbd95db1..cf55a6f4e720 100644 --- a/arch/mips/dec/Platform +++ b/arch/mips/dec/Platform | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # DECstation family | 2 | # DECstation family |
3 | # | 3 | # |
4 | platform-$(CONFIG_MACH_DECSTATION) = dec/ | 4 | platform-$(CONFIG_MACH_DECSTATION) += dec/ |
5 | cflags-$(CONFIG_MACH_DECSTATION) += \ | 5 | cflags-$(CONFIG_MACH_DECSTATION) += \ |
6 | -I$(srctree)/arch/mips/include/asm/mach-dec | 6 | -I$(srctree)/arch/mips/include/asm/mach-dec |
7 | libs-$(CONFIG_MACH_DECSTATION) += arch/mips/dec/prom/ | 7 | libs-$(CONFIG_MACH_DECSTATION) += arch/mips/dec/prom/ |
diff --git a/arch/mips/include/asm/fcntl.h b/arch/mips/include/asm/fcntl.h index e482fe90fe88..75eddedcfc3e 100644 --- a/arch/mips/include/asm/fcntl.h +++ b/arch/mips/include/asm/fcntl.h | |||
@@ -56,6 +56,7 @@ | |||
56 | */ | 56 | */ |
57 | 57 | ||
58 | #ifdef CONFIG_32BIT | 58 | #ifdef CONFIG_32BIT |
59 | #include <linux/types.h> | ||
59 | 60 | ||
60 | struct flock { | 61 | struct flock { |
61 | short l_type; | 62 | short l_type; |
diff --git a/arch/mips/include/asm/siginfo.h b/arch/mips/include/asm/siginfo.h index 96e28f18dad1..1ca64b4d33d9 100644 --- a/arch/mips/include/asm/siginfo.h +++ b/arch/mips/include/asm/siginfo.h | |||
@@ -88,6 +88,7 @@ typedef struct siginfo { | |||
88 | #ifdef __ARCH_SI_TRAPNO | 88 | #ifdef __ARCH_SI_TRAPNO |
89 | int _trapno; /* TRAP # which caused the signal */ | 89 | int _trapno; /* TRAP # which caused the signal */ |
90 | #endif | 90 | #endif |
91 | short _addr_lsb; | ||
91 | } _sigfault; | 92 | } _sigfault; |
92 | 93 | ||
93 | /* SIGPOLL, SIGXFSZ (To do ...) */ | 94 | /* SIGPOLL, SIGXFSZ (To do ...) */ |
diff --git a/arch/mips/jz4740/Platform b/arch/mips/jz4740/Platform index 6a97230e3d05..ba91be9c21ef 100644 --- a/arch/mips/jz4740/Platform +++ b/arch/mips/jz4740/Platform | |||
@@ -1,3 +1,3 @@ | |||
1 | core-$(CONFIG_MACH_JZ4740) += arch/mips/jz4740/ | 1 | platform-$(CONFIG_MACH_JZ4740) += jz4740/ |
2 | cflags-$(CONFIG_MACH_JZ4740) += -I$(srctree)/arch/mips/include/asm/mach-jz4740 | 2 | cflags-$(CONFIG_MACH_JZ4740) += -I$(srctree)/arch/mips/include/asm/mach-jz4740 |
3 | load-$(CONFIG_MACH_JZ4740) += 0xffffffff80010000 | 3 | load-$(CONFIG_MACH_JZ4740) += 0xffffffff80010000 |
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 0176ed015c89..32103cc2a257 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c | |||
@@ -40,7 +40,6 @@ int __compute_return_epc(struct pt_regs *regs) | |||
40 | return -EFAULT; | 40 | return -EFAULT; |
41 | } | 41 | } |
42 | 42 | ||
43 | regs->regs[0] = 0; | ||
44 | switch (insn.i_format.opcode) { | 43 | switch (insn.i_format.opcode) { |
45 | /* | 44 | /* |
46 | * jr and jalr are in r_format format. | 45 | * jr and jalr are in r_format format. |
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 2340f11dc29c..9a526ba6f257 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c | |||
@@ -103,7 +103,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | |||
103 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) | 103 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) |
104 | goto out_unlock; | 104 | goto out_unlock; |
105 | 105 | ||
106 | retval = security_task_setscheduler(p, 0, NULL); | 106 | retval = security_task_setscheduler(p) |
107 | if (retval) | 107 | if (retval) |
108 | goto out_unlock; | 108 | goto out_unlock; |
109 | 109 | ||
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index c51b95ff8644..c8777333e198 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
@@ -536,7 +536,7 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) | |||
536 | { | 536 | { |
537 | /* do the secure computing check first */ | 537 | /* do the secure computing check first */ |
538 | if (!entryexit) | 538 | if (!entryexit) |
539 | secure_computing(regs->regs[0]); | 539 | secure_computing(regs->regs[2]); |
540 | 540 | ||
541 | if (unlikely(current->audit_context) && entryexit) | 541 | if (unlikely(current->audit_context) && entryexit) |
542 | audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]), | 542 | audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]), |
@@ -565,7 +565,7 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) | |||
565 | 565 | ||
566 | out: | 566 | out: |
567 | if (unlikely(current->audit_context) && !entryexit) | 567 | if (unlikely(current->audit_context) && !entryexit) |
568 | audit_syscall_entry(audit_arch(), regs->regs[0], | 568 | audit_syscall_entry(audit_arch(), regs->regs[2], |
569 | regs->regs[4], regs->regs[5], | 569 | regs->regs[4], regs->regs[5], |
570 | regs->regs[6], regs->regs[7]); | 570 | regs->regs[6], regs->regs[7]); |
571 | } | 571 | } |
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 584415eef8c9..fbaabad0e6e2 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -63,9 +63,9 @@ stack_done: | |||
63 | sw t0, PT_R7(sp) # set error flag | 63 | sw t0, PT_R7(sp) # set error flag |
64 | beqz t0, 1f | 64 | beqz t0, 1f |
65 | 65 | ||
66 | lw t1, PT_R2(sp) # syscall number | ||
66 | negu v0 # error | 67 | negu v0 # error |
67 | sw v0, PT_R0(sp) # set flag for syscall | 68 | sw t1, PT_R0(sp) # save it for syscall restarting |
68 | # restarting | ||
69 | 1: sw v0, PT_R2(sp) # result | 69 | 1: sw v0, PT_R2(sp) # result |
70 | 70 | ||
71 | o32_syscall_exit: | 71 | o32_syscall_exit: |
@@ -104,9 +104,9 @@ syscall_trace_entry: | |||
104 | sw t0, PT_R7(sp) # set error flag | 104 | sw t0, PT_R7(sp) # set error flag |
105 | beqz t0, 1f | 105 | beqz t0, 1f |
106 | 106 | ||
107 | lw t1, PT_R2(sp) # syscall number | ||
107 | negu v0 # error | 108 | negu v0 # error |
108 | sw v0, PT_R0(sp) # set flag for syscall | 109 | sw t1, PT_R0(sp) # save it for syscall restarting |
109 | # restarting | ||
110 | 1: sw v0, PT_R2(sp) # result | 110 | 1: sw v0, PT_R2(sp) # result |
111 | 111 | ||
112 | j syscall_exit | 112 | j syscall_exit |
@@ -169,8 +169,7 @@ stackargs: | |||
169 | * We probably should handle this case a bit more drastic. | 169 | * We probably should handle this case a bit more drastic. |
170 | */ | 170 | */ |
171 | bad_stack: | 171 | bad_stack: |
172 | negu v0 # error | 172 | li v0, EFAULT |
173 | sw v0, PT_R0(sp) | ||
174 | sw v0, PT_R2(sp) | 173 | sw v0, PT_R2(sp) |
175 | li t0, 1 # set error flag | 174 | li t0, 1 # set error flag |
176 | sw t0, PT_R7(sp) | 175 | sw t0, PT_R7(sp) |
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 5573f8e4e326..3f4179283207 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
@@ -66,9 +66,9 @@ NESTED(handle_sys64, PT_SIZE, sp) | |||
66 | sd t0, PT_R7(sp) # set error flag | 66 | sd t0, PT_R7(sp) # set error flag |
67 | beqz t0, 1f | 67 | beqz t0, 1f |
68 | 68 | ||
69 | ld t1, PT_R2(sp) # syscall number | ||
69 | dnegu v0 # error | 70 | dnegu v0 # error |
70 | sd v0, PT_R0(sp) # set flag for syscall | 71 | sd t1, PT_R0(sp) # save it for syscall restarting |
71 | # restarting | ||
72 | 1: sd v0, PT_R2(sp) # result | 72 | 1: sd v0, PT_R2(sp) # result |
73 | 73 | ||
74 | n64_syscall_exit: | 74 | n64_syscall_exit: |
@@ -109,8 +109,9 @@ syscall_trace_entry: | |||
109 | sd t0, PT_R7(sp) # set error flag | 109 | sd t0, PT_R7(sp) # set error flag |
110 | beqz t0, 1f | 110 | beqz t0, 1f |
111 | 111 | ||
112 | ld t1, PT_R2(sp) # syscall number | ||
112 | dnegu v0 # error | 113 | dnegu v0 # error |
113 | sd v0, PT_R0(sp) # set flag for syscall restarting | 114 | sd t1, PT_R0(sp) # save it for syscall restarting |
114 | 1: sd v0, PT_R2(sp) # result | 115 | 1: sd v0, PT_R2(sp) # result |
115 | 116 | ||
116 | j syscall_exit | 117 | j syscall_exit |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 1e38ec97672e..f08ece6d8acc 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -65,8 +65,9 @@ NESTED(handle_sysn32, PT_SIZE, sp) | |||
65 | sd t0, PT_R7(sp) # set error flag | 65 | sd t0, PT_R7(sp) # set error flag |
66 | beqz t0, 1f | 66 | beqz t0, 1f |
67 | 67 | ||
68 | ld t1, PT_R2(sp) # syscall number | ||
68 | dnegu v0 # error | 69 | dnegu v0 # error |
69 | sd v0, PT_R0(sp) # set flag for syscall restarting | 70 | sd t1, PT_R0(sp) # save it for syscall restarting |
70 | 1: sd v0, PT_R2(sp) # result | 71 | 1: sd v0, PT_R2(sp) # result |
71 | 72 | ||
72 | local_irq_disable # make sure need_resched and | 73 | local_irq_disable # make sure need_resched and |
@@ -106,8 +107,9 @@ n32_syscall_trace_entry: | |||
106 | sd t0, PT_R7(sp) # set error flag | 107 | sd t0, PT_R7(sp) # set error flag |
107 | beqz t0, 1f | 108 | beqz t0, 1f |
108 | 109 | ||
110 | ld t1, PT_R2(sp) # syscall number | ||
109 | dnegu v0 # error | 111 | dnegu v0 # error |
110 | sd v0, PT_R0(sp) # set flag for syscall restarting | 112 | sd t1, PT_R0(sp) # save it for syscall restarting |
111 | 1: sd v0, PT_R2(sp) # result | 113 | 1: sd v0, PT_R2(sp) # result |
112 | 114 | ||
113 | j syscall_exit | 115 | j syscall_exit |
@@ -320,10 +322,10 @@ EXPORT(sysn32_call_table) | |||
320 | PTR sys_cacheflush | 322 | PTR sys_cacheflush |
321 | PTR sys_cachectl | 323 | PTR sys_cachectl |
322 | PTR sys_sysmips | 324 | PTR sys_sysmips |
323 | PTR sys_io_setup /* 6200 */ | 325 | PTR compat_sys_io_setup /* 6200 */ |
324 | PTR sys_io_destroy | 326 | PTR sys_io_destroy |
325 | PTR sys_io_getevents | 327 | PTR compat_sys_io_getevents |
326 | PTR sys_io_submit | 328 | PTR compat_sys_io_submit |
327 | PTR sys_io_cancel | 329 | PTR sys_io_cancel |
328 | PTR sys_exit_group /* 6205 */ | 330 | PTR sys_exit_group /* 6205 */ |
329 | PTR sys_lookup_dcookie | 331 | PTR sys_lookup_dcookie |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 171979fc98e5..78d768a3e19d 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -93,8 +93,9 @@ NESTED(handle_sys, PT_SIZE, sp) | |||
93 | sd t0, PT_R7(sp) # set error flag | 93 | sd t0, PT_R7(sp) # set error flag |
94 | beqz t0, 1f | 94 | beqz t0, 1f |
95 | 95 | ||
96 | ld t1, PT_R2(sp) # syscall number | ||
96 | dnegu v0 # error | 97 | dnegu v0 # error |
97 | sd v0, PT_R0(sp) # flag for syscall restarting | 98 | sd t1, PT_R0(sp) # save it for syscall restarting |
98 | 1: sd v0, PT_R2(sp) # result | 99 | 1: sd v0, PT_R2(sp) # result |
99 | 100 | ||
100 | o32_syscall_exit: | 101 | o32_syscall_exit: |
@@ -142,8 +143,9 @@ trace_a_syscall: | |||
142 | sd t0, PT_R7(sp) # set error flag | 143 | sd t0, PT_R7(sp) # set error flag |
143 | beqz t0, 1f | 144 | beqz t0, 1f |
144 | 145 | ||
146 | ld t1, PT_R2(sp) # syscall number | ||
145 | dnegu v0 # error | 147 | dnegu v0 # error |
146 | sd v0, PT_R0(sp) # set flag for syscall restarting | 148 | sd t1, PT_R0(sp) # save it for syscall restarting |
147 | 1: sd v0, PT_R2(sp) # result | 149 | 1: sd v0, PT_R2(sp) # result |
148 | 150 | ||
149 | j syscall_exit | 151 | j syscall_exit |
@@ -154,8 +156,7 @@ trace_a_syscall: | |||
154 | * The stackpointer for a call with more than 4 arguments is bad. | 156 | * The stackpointer for a call with more than 4 arguments is bad. |
155 | */ | 157 | */ |
156 | bad_stack: | 158 | bad_stack: |
157 | dnegu v0 # error | 159 | li v0, EFAULT |
158 | sd v0, PT_R0(sp) | ||
159 | sd v0, PT_R2(sp) | 160 | sd v0, PT_R2(sp) |
160 | li t0, 1 # set error flag | 161 | li t0, 1 # set error flag |
161 | sd t0, PT_R7(sp) | 162 | sd t0, PT_R7(sp) |
@@ -444,10 +445,10 @@ sys_call_table: | |||
444 | PTR compat_sys_futex | 445 | PTR compat_sys_futex |
445 | PTR compat_sys_sched_setaffinity | 446 | PTR compat_sys_sched_setaffinity |
446 | PTR compat_sys_sched_getaffinity /* 4240 */ | 447 | PTR compat_sys_sched_getaffinity /* 4240 */ |
447 | PTR sys_io_setup | 448 | PTR compat_sys_io_setup |
448 | PTR sys_io_destroy | 449 | PTR sys_io_destroy |
449 | PTR sys_io_getevents | 450 | PTR compat_sys_io_getevents |
450 | PTR sys_io_submit | 451 | PTR compat_sys_io_submit |
451 | PTR sys_io_cancel /* 4245 */ | 452 | PTR sys_io_cancel /* 4245 */ |
452 | PTR sys_exit_group | 453 | PTR sys_exit_group |
453 | PTR sys32_lookup_dcookie | 454 | PTR sys32_lookup_dcookie |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 2099d5a4c4b7..5922342bca39 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
@@ -390,7 +390,6 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
390 | { | 390 | { |
391 | struct rt_sigframe __user *frame; | 391 | struct rt_sigframe __user *frame; |
392 | sigset_t set; | 392 | sigset_t set; |
393 | stack_t st; | ||
394 | int sig; | 393 | int sig; |
395 | 394 | ||
396 | frame = (struct rt_sigframe __user *) regs.regs[29]; | 395 | frame = (struct rt_sigframe __user *) regs.regs[29]; |
@@ -411,11 +410,9 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
411 | else if (sig) | 410 | else if (sig) |
412 | force_sig(sig, current); | 411 | force_sig(sig, current); |
413 | 412 | ||
414 | if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st))) | ||
415 | goto badframe; | ||
416 | /* It is more difficult to avoid calling this function than to | 413 | /* It is more difficult to avoid calling this function than to |
417 | call it and ignore errors. */ | 414 | call it and ignore errors. */ |
418 | do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]); | 415 | do_sigaltstack(&frame->rs_uc.uc_stack, NULL, regs.regs[29]); |
419 | 416 | ||
420 | /* | 417 | /* |
421 | * Don't let your children do this ... | 418 | * Don't let your children do this ... |
@@ -550,23 +547,26 @@ static int handle_signal(unsigned long sig, siginfo_t *info, | |||
550 | struct mips_abi *abi = current->thread.abi; | 547 | struct mips_abi *abi = current->thread.abi; |
551 | void *vdso = current->mm->context.vdso; | 548 | void *vdso = current->mm->context.vdso; |
552 | 549 | ||
553 | switch(regs->regs[0]) { | 550 | if (regs->regs[0]) { |
554 | case ERESTART_RESTARTBLOCK: | 551 | switch(regs->regs[2]) { |
555 | case ERESTARTNOHAND: | 552 | case ERESTART_RESTARTBLOCK: |
556 | regs->regs[2] = EINTR; | 553 | case ERESTARTNOHAND: |
557 | break; | ||
558 | case ERESTARTSYS: | ||
559 | if (!(ka->sa.sa_flags & SA_RESTART)) { | ||
560 | regs->regs[2] = EINTR; | 554 | regs->regs[2] = EINTR; |
561 | break; | 555 | break; |
556 | case ERESTARTSYS: | ||
557 | if (!(ka->sa.sa_flags & SA_RESTART)) { | ||
558 | regs->regs[2] = EINTR; | ||
559 | break; | ||
560 | } | ||
561 | /* fallthrough */ | ||
562 | case ERESTARTNOINTR: | ||
563 | regs->regs[7] = regs->regs[26]; | ||
564 | regs->regs[2] = regs->regs[0]; | ||
565 | regs->cp0_epc -= 4; | ||
562 | } | 566 | } |
563 | /* fallthrough */ | ||
564 | case ERESTARTNOINTR: /* Userland will reload $v0. */ | ||
565 | regs->regs[7] = regs->regs[26]; | ||
566 | regs->cp0_epc -= 8; | ||
567 | } | ||
568 | 567 | ||
569 | regs->regs[0] = 0; /* Don't deal with this again. */ | 568 | regs->regs[0] = 0; /* Don't deal with this again. */ |
569 | } | ||
570 | 570 | ||
571 | if (sig_uses_siginfo(ka)) | 571 | if (sig_uses_siginfo(ka)) |
572 | ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset, | 572 | ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset, |
@@ -575,6 +575,9 @@ static int handle_signal(unsigned long sig, siginfo_t *info, | |||
575 | ret = abi->setup_frame(vdso + abi->signal_return_offset, | 575 | ret = abi->setup_frame(vdso + abi->signal_return_offset, |
576 | ka, regs, sig, oldset); | 576 | ka, regs, sig, oldset); |
577 | 577 | ||
578 | if (ret) | ||
579 | return ret; | ||
580 | |||
578 | spin_lock_irq(¤t->sighand->siglock); | 581 | spin_lock_irq(¤t->sighand->siglock); |
579 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); | 582 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); |
580 | if (!(ka->sa.sa_flags & SA_NODEFER)) | 583 | if (!(ka->sa.sa_flags & SA_NODEFER)) |
@@ -622,17 +625,13 @@ static void do_signal(struct pt_regs *regs) | |||
622 | return; | 625 | return; |
623 | } | 626 | } |
624 | 627 | ||
625 | /* | ||
626 | * Who's code doesn't conform to the restartable syscall convention | ||
627 | * dies here!!! The li instruction, a single machine instruction, | ||
628 | * must directly be followed by the syscall instruction. | ||
629 | */ | ||
630 | if (regs->regs[0]) { | 628 | if (regs->regs[0]) { |
631 | if (regs->regs[2] == ERESTARTNOHAND || | 629 | if (regs->regs[2] == ERESTARTNOHAND || |
632 | regs->regs[2] == ERESTARTSYS || | 630 | regs->regs[2] == ERESTARTSYS || |
633 | regs->regs[2] == ERESTARTNOINTR) { | 631 | regs->regs[2] == ERESTARTNOINTR) { |
632 | regs->regs[2] = regs->regs[0]; | ||
634 | regs->regs[7] = regs->regs[26]; | 633 | regs->regs[7] = regs->regs[26]; |
635 | regs->cp0_epc -= 8; | 634 | regs->cp0_epc -= 4; |
636 | } | 635 | } |
637 | if (regs->regs[2] == ERESTART_RESTARTBLOCK) { | 636 | if (regs->regs[2] == ERESTART_RESTARTBLOCK) { |
638 | regs->regs[2] = current->thread.abi->restart; | 637 | regs->regs[2] = current->thread.abi->restart; |
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index 2c5df818c65a..ee24d814d5b9 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c | |||
@@ -109,6 +109,7 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) | |||
109 | asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | 109 | asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) |
110 | { | 110 | { |
111 | struct rt_sigframe_n32 __user *frame; | 111 | struct rt_sigframe_n32 __user *frame; |
112 | mm_segment_t old_fs; | ||
112 | sigset_t set; | 113 | sigset_t set; |
113 | stack_t st; | 114 | stack_t st; |
114 | s32 sp; | 115 | s32 sp; |
@@ -143,7 +144,11 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
143 | 144 | ||
144 | /* It is more difficult to avoid calling this function than to | 145 | /* It is more difficult to avoid calling this function than to |
145 | call it and ignore errors. */ | 146 | call it and ignore errors. */ |
147 | old_fs = get_fs(); | ||
148 | set_fs(KERNEL_DS); | ||
146 | do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]); | 149 | do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]); |
150 | set_fs(old_fs); | ||
151 | |||
147 | 152 | ||
148 | /* | 153 | /* |
149 | * Don't let your children do this ... | 154 | * Don't let your children do this ... |
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 69b039ca8d83..33d5a5ce4a29 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c | |||
@@ -109,8 +109,6 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
109 | unsigned long value; | 109 | unsigned long value; |
110 | unsigned int res; | 110 | unsigned int res; |
111 | 111 | ||
112 | regs->regs[0] = 0; | ||
113 | |||
114 | /* | 112 | /* |
115 | * This load never faults. | 113 | * This load never faults. |
116 | */ | 114 | */ |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 907417d187e1..79a04a9394d5 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -16,6 +16,7 @@ config PARISC | |||
16 | select RTC_DRV_GENERIC | 16 | select RTC_DRV_GENERIC |
17 | select INIT_ALL_POSSIBLE | 17 | select INIT_ALL_POSSIBLE |
18 | select BUG | 18 | select BUG |
19 | select HAVE_IRQ_WORK | ||
19 | select HAVE_PERF_EVENTS | 20 | select HAVE_PERF_EVENTS |
20 | select GENERIC_ATOMIC64 if !64BIT | 21 | select GENERIC_ATOMIC64 if !64BIT |
21 | help | 22 | help |
diff --git a/arch/parisc/include/asm/perf_event.h b/arch/parisc/include/asm/perf_event.h index cc146427d8f9..1e0fd8ba6c03 100644 --- a/arch/parisc/include/asm/perf_event.h +++ b/arch/parisc/include/asm/perf_event.h | |||
@@ -1,7 +1,6 @@ | |||
1 | #ifndef __ASM_PARISC_PERF_EVENT_H | 1 | #ifndef __ASM_PARISC_PERF_EVENT_H |
2 | #define __ASM_PARISC_PERF_EVENT_H | 2 | #define __ASM_PARISC_PERF_EVENT_H |
3 | 3 | ||
4 | /* parisc only supports software events through this interface. */ | 4 | /* Empty, just to avoid compiling error */ |
5 | static inline void set_perf_event_pending(void) { } | ||
6 | 5 | ||
7 | #endif /* __ASM_PARISC_PERF_EVENT_H */ | 6 | #endif /* __ASM_PARISC_PERF_EVENT_H */ |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 631e5a0fb6ab..4b1e521d966f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -138,6 +138,7 @@ config PPC | |||
138 | select HAVE_OPROFILE | 138 | select HAVE_OPROFILE |
139 | select HAVE_SYSCALL_WRAPPERS if PPC64 | 139 | select HAVE_SYSCALL_WRAPPERS if PPC64 |
140 | select GENERIC_ATOMIC64 if PPC32 | 140 | select GENERIC_ATOMIC64 if PPC32 |
141 | select HAVE_IRQ_WORK | ||
141 | select HAVE_PERF_EVENTS | 142 | select HAVE_PERF_EVENTS |
142 | select HAVE_REGS_AND_STACK_ACCESS_API | 143 | select HAVE_REGS_AND_STACK_ACCESS_API |
143 | select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64 | 144 | select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64 |
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 1ff6662f7faf..9b287fdd8ea3 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h | |||
@@ -129,7 +129,7 @@ struct paca_struct { | |||
129 | u8 soft_enabled; /* irq soft-enable flag */ | 129 | u8 soft_enabled; /* irq soft-enable flag */ |
130 | u8 hard_enabled; /* set if irqs are enabled in MSR */ | 130 | u8 hard_enabled; /* set if irqs are enabled in MSR */ |
131 | u8 io_sync; /* writel() needs spin_unlock sync */ | 131 | u8 io_sync; /* writel() needs spin_unlock sync */ |
132 | u8 perf_event_pending; /* PM interrupt while soft-disabled */ | 132 | u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ |
133 | 133 | ||
134 | /* Stuff for accurate time accounting */ | 134 | /* Stuff for accurate time accounting */ |
135 | u64 user_time; /* accumulated usermode TB ticks */ | 135 | u64 user_time; /* accumulated usermode TB ticks */ |
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h index 6c294acac848..9c3d160670b4 100644 --- a/arch/powerpc/include/asm/system.h +++ b/arch/powerpc/include/asm/system.h | |||
@@ -542,10 +542,6 @@ extern void reloc_got2(unsigned long); | |||
542 | 542 | ||
543 | #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) | 543 | #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) |
544 | 544 | ||
545 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
546 | extern void account_system_vtime(struct task_struct *); | ||
547 | #endif | ||
548 | |||
549 | extern struct dentry *powerpc_debugfs_root; | 545 | extern struct dentry *powerpc_debugfs_root; |
550 | 546 | ||
551 | #endif /* __KERNEL__ */ | 547 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 9cb4924b6c07..3129c855933c 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -1092,7 +1092,7 @@ static int power_pmu_event_init(struct perf_event *event) | |||
1092 | * XXX we should check if the task is an idle task. | 1092 | * XXX we should check if the task is an idle task. |
1093 | */ | 1093 | */ |
1094 | flags = 0; | 1094 | flags = 0; |
1095 | if (event->ctx->task) | 1095 | if (event->attach_state & PERF_ATTACH_TASK) |
1096 | flags |= PPMU_ONLY_COUNT_RUN; | 1096 | flags |= PPMU_ONLY_COUNT_RUN; |
1097 | 1097 | ||
1098 | /* | 1098 | /* |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 8533b3b83f5d..54888eb10c3b 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -53,7 +53,7 @@ | |||
53 | #include <linux/posix-timers.h> | 53 | #include <linux/posix-timers.h> |
54 | #include <linux/irq.h> | 54 | #include <linux/irq.h> |
55 | #include <linux/delay.h> | 55 | #include <linux/delay.h> |
56 | #include <linux/perf_event.h> | 56 | #include <linux/irq_work.h> |
57 | #include <asm/trace.h> | 57 | #include <asm/trace.h> |
58 | 58 | ||
59 | #include <asm/io.h> | 59 | #include <asm/io.h> |
@@ -493,60 +493,60 @@ void __init iSeries_time_init_early(void) | |||
493 | } | 493 | } |
494 | #endif /* CONFIG_PPC_ISERIES */ | 494 | #endif /* CONFIG_PPC_ISERIES */ |
495 | 495 | ||
496 | #ifdef CONFIG_PERF_EVENTS | 496 | #ifdef CONFIG_IRQ_WORK |
497 | 497 | ||
498 | /* | 498 | /* |
499 | * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... | 499 | * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... |
500 | */ | 500 | */ |
501 | #ifdef CONFIG_PPC64 | 501 | #ifdef CONFIG_PPC64 |
502 | static inline unsigned long test_perf_event_pending(void) | 502 | static inline unsigned long test_irq_work_pending(void) |
503 | { | 503 | { |
504 | unsigned long x; | 504 | unsigned long x; |
505 | 505 | ||
506 | asm volatile("lbz %0,%1(13)" | 506 | asm volatile("lbz %0,%1(13)" |
507 | : "=r" (x) | 507 | : "=r" (x) |
508 | : "i" (offsetof(struct paca_struct, perf_event_pending))); | 508 | : "i" (offsetof(struct paca_struct, irq_work_pending))); |
509 | return x; | 509 | return x; |
510 | } | 510 | } |
511 | 511 | ||
512 | static inline void set_perf_event_pending_flag(void) | 512 | static inline void set_irq_work_pending_flag(void) |
513 | { | 513 | { |
514 | asm volatile("stb %0,%1(13)" : : | 514 | asm volatile("stb %0,%1(13)" : : |
515 | "r" (1), | 515 | "r" (1), |
516 | "i" (offsetof(struct paca_struct, perf_event_pending))); | 516 | "i" (offsetof(struct paca_struct, irq_work_pending))); |
517 | } | 517 | } |
518 | 518 | ||
519 | static inline void clear_perf_event_pending(void) | 519 | static inline void clear_irq_work_pending(void) |
520 | { | 520 | { |
521 | asm volatile("stb %0,%1(13)" : : | 521 | asm volatile("stb %0,%1(13)" : : |
522 | "r" (0), | 522 | "r" (0), |
523 | "i" (offsetof(struct paca_struct, perf_event_pending))); | 523 | "i" (offsetof(struct paca_struct, irq_work_pending))); |
524 | } | 524 | } |
525 | 525 | ||
526 | #else /* 32-bit */ | 526 | #else /* 32-bit */ |
527 | 527 | ||
528 | DEFINE_PER_CPU(u8, perf_event_pending); | 528 | DEFINE_PER_CPU(u8, irq_work_pending); |
529 | 529 | ||
530 | #define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1 | 530 | #define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 |
531 | #define test_perf_event_pending() __get_cpu_var(perf_event_pending) | 531 | #define test_irq_work_pending() __get_cpu_var(irq_work_pending) |
532 | #define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 | 532 | #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 |
533 | 533 | ||
534 | #endif /* 32 vs 64 bit */ | 534 | #endif /* 32 vs 64 bit */ |
535 | 535 | ||
536 | void set_perf_event_pending(void) | 536 | void set_irq_work_pending(void) |
537 | { | 537 | { |
538 | preempt_disable(); | 538 | preempt_disable(); |
539 | set_perf_event_pending_flag(); | 539 | set_irq_work_pending_flag(); |
540 | set_dec(1); | 540 | set_dec(1); |
541 | preempt_enable(); | 541 | preempt_enable(); |
542 | } | 542 | } |
543 | 543 | ||
544 | #else /* CONFIG_PERF_EVENTS */ | 544 | #else /* CONFIG_IRQ_WORK */ |
545 | 545 | ||
546 | #define test_perf_event_pending() 0 | 546 | #define test_irq_work_pending() 0 |
547 | #define clear_perf_event_pending() | 547 | #define clear_irq_work_pending() |
548 | 548 | ||
549 | #endif /* CONFIG_PERF_EVENTS */ | 549 | #endif /* CONFIG_IRQ_WORK */ |
550 | 550 | ||
551 | /* | 551 | /* |
552 | * For iSeries shared processors, we have to let the hypervisor | 552 | * For iSeries shared processors, we have to let the hypervisor |
@@ -587,9 +587,9 @@ void timer_interrupt(struct pt_regs * regs) | |||
587 | 587 | ||
588 | calculate_steal_time(); | 588 | calculate_steal_time(); |
589 | 589 | ||
590 | if (test_perf_event_pending()) { | 590 | if (test_irq_work_pending()) { |
591 | clear_perf_event_pending(); | 591 | clear_irq_work_pending(); |
592 | perf_event_do_pending(); | 592 | irq_work_run(); |
593 | } | 593 | } |
594 | 594 | ||
595 | #ifdef CONFIG_PPC_ISERIES | 595 | #ifdef CONFIG_PPC_ISERIES |
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index 97085530aa63..e3e379c6caa7 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c | |||
@@ -310,9 +310,9 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) | |||
310 | } | 310 | } |
311 | 311 | ||
312 | static struct irq_chip msic_irq_chip = { | 312 | static struct irq_chip msic_irq_chip = { |
313 | .mask = mask_msi_irq, | 313 | .irq_mask = mask_msi_irq, |
314 | .unmask = unmask_msi_irq, | 314 | .irq_unmask = unmask_msi_irq, |
315 | .shutdown = unmask_msi_irq, | 315 | .irq_shutdown = mask_msi_irq, |
316 | .name = "AXON-MSI", | 316 | .name = "AXON-MSI", |
317 | }; | 317 | }; |
318 | 318 | ||
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 93834b0d8272..67e2c4bdac8f 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -243,7 +243,7 @@ static unsigned int xics_startup(unsigned int virq) | |||
243 | * at that level, so we do it here by hand. | 243 | * at that level, so we do it here by hand. |
244 | */ | 244 | */ |
245 | if (irq_to_desc(virq)->msi_desc) | 245 | if (irq_to_desc(virq)->msi_desc) |
246 | unmask_msi_irq(virq); | 246 | unmask_msi_irq(irq_get_irq_data(virq)); |
247 | 247 | ||
248 | /* unmask it */ | 248 | /* unmask it */ |
249 | xics_unmask_irq(virq); | 249 | xics_unmask_irq(virq); |
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 87991d3abbab..bdbd896c89d8 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c | |||
@@ -51,8 +51,8 @@ static void fsl_msi_end_irq(unsigned int virq) | |||
51 | } | 51 | } |
52 | 52 | ||
53 | static struct irq_chip fsl_msi_chip = { | 53 | static struct irq_chip fsl_msi_chip = { |
54 | .mask = mask_msi_irq, | 54 | .irq_mask = mask_msi_irq, |
55 | .unmask = unmask_msi_irq, | 55 | .irq_unmask = unmask_msi_irq, |
56 | .ack = fsl_msi_end_irq, | 56 | .ack = fsl_msi_end_irq, |
57 | .name = "FSL-MSI", | 57 | .name = "FSL-MSI", |
58 | }; | 58 | }; |
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c index 3b6a9a43718f..320ad5a9a25d 100644 --- a/arch/powerpc/sysdev/mpic_pasemi_msi.c +++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c | |||
@@ -39,24 +39,24 @@ | |||
39 | static struct mpic *msi_mpic; | 39 | static struct mpic *msi_mpic; |
40 | 40 | ||
41 | 41 | ||
42 | static void mpic_pasemi_msi_mask_irq(unsigned int irq) | 42 | static void mpic_pasemi_msi_mask_irq(struct irq_data *data) |
43 | { | 43 | { |
44 | pr_debug("mpic_pasemi_msi_mask_irq %d\n", irq); | 44 | pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq); |
45 | mask_msi_irq(irq); | 45 | mask_msi_irq(data); |
46 | mpic_mask_irq(irq); | 46 | mpic_mask_irq(data->irq); |
47 | } | 47 | } |
48 | 48 | ||
49 | static void mpic_pasemi_msi_unmask_irq(unsigned int irq) | 49 | static void mpic_pasemi_msi_unmask_irq(struct irq_data *data) |
50 | { | 50 | { |
51 | pr_debug("mpic_pasemi_msi_unmask_irq %d\n", irq); | 51 | pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq); |
52 | mpic_unmask_irq(irq); | 52 | mpic_unmask_irq(data->irq); |
53 | unmask_msi_irq(irq); | 53 | unmask_msi_irq(data); |
54 | } | 54 | } |
55 | 55 | ||
56 | static struct irq_chip mpic_pasemi_msi_chip = { | 56 | static struct irq_chip mpic_pasemi_msi_chip = { |
57 | .shutdown = mpic_pasemi_msi_mask_irq, | 57 | .irq_shutdown = mpic_pasemi_msi_mask_irq, |
58 | .mask = mpic_pasemi_msi_mask_irq, | 58 | .irq_mask = mpic_pasemi_msi_mask_irq, |
59 | .unmask = mpic_pasemi_msi_unmask_irq, | 59 | .irq_unmask = mpic_pasemi_msi_unmask_irq, |
60 | .eoi = mpic_end_irq, | 60 | .eoi = mpic_end_irq, |
61 | .set_type = mpic_set_irq_type, | 61 | .set_type = mpic_set_irq_type, |
62 | .set_affinity = mpic_set_affinity, | 62 | .set_affinity = mpic_set_affinity, |
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c index bcbfe79c704b..a2b028b4a202 100644 --- a/arch/powerpc/sysdev/mpic_u3msi.c +++ b/arch/powerpc/sysdev/mpic_u3msi.c | |||
@@ -23,22 +23,22 @@ | |||
23 | /* A bit ugly, can we get this from the pci_dev somehow? */ | 23 | /* A bit ugly, can we get this from the pci_dev somehow? */ |
24 | static struct mpic *msi_mpic; | 24 | static struct mpic *msi_mpic; |
25 | 25 | ||
26 | static void mpic_u3msi_mask_irq(unsigned int irq) | 26 | static void mpic_u3msi_mask_irq(struct irq_data *data) |
27 | { | 27 | { |
28 | mask_msi_irq(irq); | 28 | mask_msi_irq(data); |
29 | mpic_mask_irq(irq); | 29 | mpic_mask_irq(data->irq); |
30 | } | 30 | } |
31 | 31 | ||
32 | static void mpic_u3msi_unmask_irq(unsigned int irq) | 32 | static void mpic_u3msi_unmask_irq(struct irq_data *data) |
33 | { | 33 | { |
34 | mpic_unmask_irq(irq); | 34 | mpic_unmask_irq(data->irq); |
35 | unmask_msi_irq(irq); | 35 | unmask_msi_irq(data); |
36 | } | 36 | } |
37 | 37 | ||
38 | static struct irq_chip mpic_u3msi_chip = { | 38 | static struct irq_chip mpic_u3msi_chip = { |
39 | .shutdown = mpic_u3msi_mask_irq, | 39 | .irq_shutdown = mpic_u3msi_mask_irq, |
40 | .mask = mpic_u3msi_mask_irq, | 40 | .irq_mask = mpic_u3msi_mask_irq, |
41 | .unmask = mpic_u3msi_unmask_irq, | 41 | .irq_unmask = mpic_u3msi_unmask_irq, |
42 | .eoi = mpic_end_irq, | 42 | .eoi = mpic_end_irq, |
43 | .set_type = mpic_set_irq_type, | 43 | .set_type = mpic_set_irq_type, |
44 | .set_affinity = mpic_set_affinity, | 44 | .set_affinity = mpic_set_affinity, |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index f0777a47e3a5..75976a141947 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -95,6 +95,7 @@ config S390 | |||
95 | select HAVE_KVM if 64BIT | 95 | select HAVE_KVM if 64BIT |
96 | select HAVE_ARCH_TRACEHOOK | 96 | select HAVE_ARCH_TRACEHOOK |
97 | select INIT_ALL_POSSIBLE | 97 | select INIT_ALL_POSSIBLE |
98 | select HAVE_IRQ_WORK | ||
98 | select HAVE_PERF_EVENTS | 99 | select HAVE_PERF_EVENTS |
99 | select HAVE_KERNEL_GZIP | 100 | select HAVE_KERNEL_GZIP |
100 | select HAVE_KERNEL_BZIP2 | 101 | select HAVE_KERNEL_BZIP2 |
@@ -198,6 +199,13 @@ config HOTPLUG_CPU | |||
198 | can be controlled through /sys/devices/system/cpu/cpu#. | 199 | can be controlled through /sys/devices/system/cpu/cpu#. |
199 | Say N if you want to disable CPU hotplug. | 200 | Say N if you want to disable CPU hotplug. |
200 | 201 | ||
202 | config SCHED_BOOK | ||
203 | bool "Book scheduler support" | ||
204 | depends on SMP | ||
205 | help | ||
206 | Book scheduler support improves the CPU scheduler's decision making | ||
207 | when dealing with machines that have several books. | ||
208 | |||
201 | config MATHEMU | 209 | config MATHEMU |
202 | bool "IEEE FPU emulation" | 210 | bool "IEEE FPU emulation" |
203 | depends on MARCH_G5 | 211 | depends on MARCH_G5 |
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h index 498bc3892385..881d94590aeb 100644 --- a/arch/s390/include/asm/hardirq.h +++ b/arch/s390/include/asm/hardirq.h | |||
@@ -12,10 +12,6 @@ | |||
12 | #ifndef __ASM_HARDIRQ_H | 12 | #ifndef __ASM_HARDIRQ_H |
13 | #define __ASM_HARDIRQ_H | 13 | #define __ASM_HARDIRQ_H |
14 | 14 | ||
15 | #include <linux/threads.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/cache.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <asm/lowcore.h> | 15 | #include <asm/lowcore.h> |
20 | 16 | ||
21 | #define local_softirq_pending() (S390_lowcore.softirq_pending) | 17 | #define local_softirq_pending() (S390_lowcore.softirq_pending) |
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index 3840cbe77637..a75f168d2718 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h | |||
@@ -4,7 +4,6 @@ | |||
4 | * Copyright 2009 Martin Schwidefsky, IBM Corporation. | 4 | * Copyright 2009 Martin Schwidefsky, IBM Corporation. |
5 | */ | 5 | */ |
6 | 6 | ||
7 | static inline void set_perf_event_pending(void) {} | 7 | /* Empty, just to avoid compiling error */ |
8 | static inline void clear_perf_event_pending(void) {} | ||
9 | 8 | ||
10 | #define PERF_EVENT_INDEX_OFFSET 0 | 9 | #define PERF_EVENT_INDEX_OFFSET 0 |
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index cef66210c846..38ddd8a9a9e8 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h | |||
@@ -97,7 +97,6 @@ static inline void restore_access_regs(unsigned int *acrs) | |||
97 | 97 | ||
98 | extern void account_vtime(struct task_struct *, struct task_struct *); | 98 | extern void account_vtime(struct task_struct *, struct task_struct *); |
99 | extern void account_tick_vtime(struct task_struct *); | 99 | extern void account_tick_vtime(struct task_struct *); |
100 | extern void account_system_vtime(struct task_struct *); | ||
101 | 100 | ||
102 | #ifdef CONFIG_PFAULT | 101 | #ifdef CONFIG_PFAULT |
103 | extern void pfault_irq_init(void); | 102 | extern void pfault_irq_init(void); |
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 831bd033ea77..051107a2c5e2 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h | |||
@@ -3,15 +3,32 @@ | |||
3 | 3 | ||
4 | #include <linux/cpumask.h> | 4 | #include <linux/cpumask.h> |
5 | 5 | ||
6 | #define mc_capable() (1) | ||
7 | |||
8 | const struct cpumask *cpu_coregroup_mask(unsigned int cpu); | ||
9 | |||
10 | extern unsigned char cpu_core_id[NR_CPUS]; | 6 | extern unsigned char cpu_core_id[NR_CPUS]; |
11 | extern cpumask_t cpu_core_map[NR_CPUS]; | 7 | extern cpumask_t cpu_core_map[NR_CPUS]; |
12 | 8 | ||
9 | static inline const struct cpumask *cpu_coregroup_mask(unsigned int cpu) | ||
10 | { | ||
11 | return &cpu_core_map[cpu]; | ||
12 | } | ||
13 | |||
13 | #define topology_core_id(cpu) (cpu_core_id[cpu]) | 14 | #define topology_core_id(cpu) (cpu_core_id[cpu]) |
14 | #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) | 15 | #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) |
16 | #define mc_capable() (1) | ||
17 | |||
18 | #ifdef CONFIG_SCHED_BOOK | ||
19 | |||
20 | extern unsigned char cpu_book_id[NR_CPUS]; | ||
21 | extern cpumask_t cpu_book_map[NR_CPUS]; | ||
22 | |||
23 | static inline const struct cpumask *cpu_book_mask(unsigned int cpu) | ||
24 | { | ||
25 | return &cpu_book_map[cpu]; | ||
26 | } | ||
27 | |||
28 | #define topology_book_id(cpu) (cpu_book_id[cpu]) | ||
29 | #define topology_book_cpumask(cpu) (&cpu_book_map[cpu]) | ||
30 | |||
31 | #endif /* CONFIG_SCHED_BOOK */ | ||
15 | 32 | ||
16 | int topology_set_cpu_management(int fc); | 33 | int topology_set_cpu_management(int fc); |
17 | void topology_schedule_update(void); | 34 | void topology_schedule_update(void); |
@@ -30,6 +47,8 @@ static inline void s390_init_cpu_topology(void) | |||
30 | }; | 47 | }; |
31 | #endif | 48 | #endif |
32 | 49 | ||
50 | #define SD_BOOK_INIT SD_CPU_INIT | ||
51 | |||
33 | #include <asm-generic/topology.h> | 52 | #include <asm-generic/topology.h> |
34 | 53 | ||
35 | #endif /* _ASM_S390_TOPOLOGY_H */ | 54 | #endif /* _ASM_S390_TOPOLOGY_H */ |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index bcef00766a64..13559c993847 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -57,8 +57,8 @@ struct tl_info { | |||
57 | union tl_entry tle[0]; | 57 | union tl_entry tle[0]; |
58 | }; | 58 | }; |
59 | 59 | ||
60 | struct core_info { | 60 | struct mask_info { |
61 | struct core_info *next; | 61 | struct mask_info *next; |
62 | unsigned char id; | 62 | unsigned char id; |
63 | cpumask_t mask; | 63 | cpumask_t mask; |
64 | }; | 64 | }; |
@@ -66,7 +66,6 @@ struct core_info { | |||
66 | static int topology_enabled; | 66 | static int topology_enabled; |
67 | static void topology_work_fn(struct work_struct *work); | 67 | static void topology_work_fn(struct work_struct *work); |
68 | static struct tl_info *tl_info; | 68 | static struct tl_info *tl_info; |
69 | static struct core_info core_info; | ||
70 | static int machine_has_topology; | 69 | static int machine_has_topology; |
71 | static struct timer_list topology_timer; | 70 | static struct timer_list topology_timer; |
72 | static void set_topology_timer(void); | 71 | static void set_topology_timer(void); |
@@ -74,38 +73,37 @@ static DECLARE_WORK(topology_work, topology_work_fn); | |||
74 | /* topology_lock protects the core linked list */ | 73 | /* topology_lock protects the core linked list */ |
75 | static DEFINE_SPINLOCK(topology_lock); | 74 | static DEFINE_SPINLOCK(topology_lock); |
76 | 75 | ||
76 | static struct mask_info core_info; | ||
77 | cpumask_t cpu_core_map[NR_CPUS]; | 77 | cpumask_t cpu_core_map[NR_CPUS]; |
78 | unsigned char cpu_core_id[NR_CPUS]; | 78 | unsigned char cpu_core_id[NR_CPUS]; |
79 | 79 | ||
80 | static cpumask_t cpu_coregroup_map(unsigned int cpu) | 80 | #ifdef CONFIG_SCHED_BOOK |
81 | static struct mask_info book_info; | ||
82 | cpumask_t cpu_book_map[NR_CPUS]; | ||
83 | unsigned char cpu_book_id[NR_CPUS]; | ||
84 | #endif | ||
85 | |||
86 | static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) | ||
81 | { | 87 | { |
82 | struct core_info *core = &core_info; | ||
83 | unsigned long flags; | ||
84 | cpumask_t mask; | 88 | cpumask_t mask; |
85 | 89 | ||
86 | cpus_clear(mask); | 90 | cpus_clear(mask); |
87 | if (!topology_enabled || !machine_has_topology) | 91 | if (!topology_enabled || !machine_has_topology) |
88 | return cpu_possible_map; | 92 | return cpu_possible_map; |
89 | spin_lock_irqsave(&topology_lock, flags); | 93 | while (info) { |
90 | while (core) { | 94 | if (cpu_isset(cpu, info->mask)) { |
91 | if (cpu_isset(cpu, core->mask)) { | 95 | mask = info->mask; |
92 | mask = core->mask; | ||
93 | break; | 96 | break; |
94 | } | 97 | } |
95 | core = core->next; | 98 | info = info->next; |
96 | } | 99 | } |
97 | spin_unlock_irqrestore(&topology_lock, flags); | ||
98 | if (cpus_empty(mask)) | 100 | if (cpus_empty(mask)) |
99 | mask = cpumask_of_cpu(cpu); | 101 | mask = cpumask_of_cpu(cpu); |
100 | return mask; | 102 | return mask; |
101 | } | 103 | } |
102 | 104 | ||
103 | const struct cpumask *cpu_coregroup_mask(unsigned int cpu) | 105 | static void add_cpus_to_mask(struct tl_cpu *tl_cpu, struct mask_info *book, |
104 | { | 106 | struct mask_info *core) |
105 | return &cpu_core_map[cpu]; | ||
106 | } | ||
107 | |||
108 | static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) | ||
109 | { | 107 | { |
110 | unsigned int cpu; | 108 | unsigned int cpu; |
111 | 109 | ||
@@ -117,23 +115,35 @@ static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) | |||
117 | 115 | ||
118 | rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin; | 116 | rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin; |
119 | for_each_present_cpu(lcpu) { | 117 | for_each_present_cpu(lcpu) { |
120 | if (cpu_logical_map(lcpu) == rcpu) { | 118 | if (cpu_logical_map(lcpu) != rcpu) |
121 | cpu_set(lcpu, core->mask); | 119 | continue; |
122 | cpu_core_id[lcpu] = core->id; | 120 | #ifdef CONFIG_SCHED_BOOK |
123 | smp_cpu_polarization[lcpu] = tl_cpu->pp; | 121 | cpu_set(lcpu, book->mask); |
124 | } | 122 | cpu_book_id[lcpu] = book->id; |
123 | #endif | ||
124 | cpu_set(lcpu, core->mask); | ||
125 | cpu_core_id[lcpu] = core->id; | ||
126 | smp_cpu_polarization[lcpu] = tl_cpu->pp; | ||
125 | } | 127 | } |
126 | } | 128 | } |
127 | } | 129 | } |
128 | 130 | ||
129 | static void clear_cores(void) | 131 | static void clear_masks(void) |
130 | { | 132 | { |
131 | struct core_info *core = &core_info; | 133 | struct mask_info *info; |
132 | 134 | ||
133 | while (core) { | 135 | info = &core_info; |
134 | cpus_clear(core->mask); | 136 | while (info) { |
135 | core = core->next; | 137 | cpus_clear(info->mask); |
138 | info = info->next; | ||
139 | } | ||
140 | #ifdef CONFIG_SCHED_BOOK | ||
141 | info = &book_info; | ||
142 | while (info) { | ||
143 | cpus_clear(info->mask); | ||
144 | info = info->next; | ||
136 | } | 145 | } |
146 | #endif | ||
137 | } | 147 | } |
138 | 148 | ||
139 | static union tl_entry *next_tle(union tl_entry *tle) | 149 | static union tl_entry *next_tle(union tl_entry *tle) |
@@ -146,29 +156,36 @@ static union tl_entry *next_tle(union tl_entry *tle) | |||
146 | 156 | ||
147 | static void tl_to_cores(struct tl_info *info) | 157 | static void tl_to_cores(struct tl_info *info) |
148 | { | 158 | { |
159 | #ifdef CONFIG_SCHED_BOOK | ||
160 | struct mask_info *book = &book_info; | ||
161 | #else | ||
162 | struct mask_info *book = NULL; | ||
163 | #endif | ||
164 | struct mask_info *core = &core_info; | ||
149 | union tl_entry *tle, *end; | 165 | union tl_entry *tle, *end; |
150 | struct core_info *core = &core_info; | 166 | |
151 | 167 | ||
152 | spin_lock_irq(&topology_lock); | 168 | spin_lock_irq(&topology_lock); |
153 | clear_cores(); | 169 | clear_masks(); |
154 | tle = info->tle; | 170 | tle = info->tle; |
155 | end = (union tl_entry *)((unsigned long)info + info->length); | 171 | end = (union tl_entry *)((unsigned long)info + info->length); |
156 | while (tle < end) { | 172 | while (tle < end) { |
157 | switch (tle->nl) { | 173 | switch (tle->nl) { |
158 | case 5: | 174 | #ifdef CONFIG_SCHED_BOOK |
159 | case 4: | ||
160 | case 3: | ||
161 | case 2: | 175 | case 2: |
176 | book = book->next; | ||
177 | book->id = tle->container.id; | ||
162 | break; | 178 | break; |
179 | #endif | ||
163 | case 1: | 180 | case 1: |
164 | core = core->next; | 181 | core = core->next; |
165 | core->id = tle->container.id; | 182 | core->id = tle->container.id; |
166 | break; | 183 | break; |
167 | case 0: | 184 | case 0: |
168 | add_cpus_to_core(&tle->cpu, core); | 185 | add_cpus_to_mask(&tle->cpu, book, core); |
169 | break; | 186 | break; |
170 | default: | 187 | default: |
171 | clear_cores(); | 188 | clear_masks(); |
172 | machine_has_topology = 0; | 189 | machine_has_topology = 0; |
173 | goto out; | 190 | goto out; |
174 | } | 191 | } |
@@ -221,10 +238,29 @@ int topology_set_cpu_management(int fc) | |||
221 | 238 | ||
222 | static void update_cpu_core_map(void) | 239 | static void update_cpu_core_map(void) |
223 | { | 240 | { |
241 | unsigned long flags; | ||
224 | int cpu; | 242 | int cpu; |
225 | 243 | ||
226 | for_each_possible_cpu(cpu) | 244 | spin_lock_irqsave(&topology_lock, flags); |
227 | cpu_core_map[cpu] = cpu_coregroup_map(cpu); | 245 | for_each_possible_cpu(cpu) { |
246 | cpu_core_map[cpu] = cpu_group_map(&core_info, cpu); | ||
247 | #ifdef CONFIG_SCHED_BOOK | ||
248 | cpu_book_map[cpu] = cpu_group_map(&book_info, cpu); | ||
249 | #endif | ||
250 | } | ||
251 | spin_unlock_irqrestore(&topology_lock, flags); | ||
252 | } | ||
253 | |||
254 | static void store_topology(struct tl_info *info) | ||
255 | { | ||
256 | #ifdef CONFIG_SCHED_BOOK | ||
257 | int rc; | ||
258 | |||
259 | rc = stsi(info, 15, 1, 3); | ||
260 | if (rc != -ENOSYS) | ||
261 | return; | ||
262 | #endif | ||
263 | stsi(info, 15, 1, 2); | ||
228 | } | 264 | } |
229 | 265 | ||
230 | int arch_update_cpu_topology(void) | 266 | int arch_update_cpu_topology(void) |
@@ -238,7 +274,7 @@ int arch_update_cpu_topology(void) | |||
238 | topology_update_polarization_simple(); | 274 | topology_update_polarization_simple(); |
239 | return 0; | 275 | return 0; |
240 | } | 276 | } |
241 | stsi(info, 15, 1, 2); | 277 | store_topology(info); |
242 | tl_to_cores(info); | 278 | tl_to_cores(info); |
243 | update_cpu_core_map(); | 279 | update_cpu_core_map(); |
244 | for_each_online_cpu(cpu) { | 280 | for_each_online_cpu(cpu) { |
@@ -299,12 +335,24 @@ out: | |||
299 | } | 335 | } |
300 | __initcall(init_topology_update); | 336 | __initcall(init_topology_update); |
301 | 337 | ||
338 | static void alloc_masks(struct tl_info *info, struct mask_info *mask, int offset) | ||
339 | { | ||
340 | int i, nr_masks; | ||
341 | |||
342 | nr_masks = info->mag[NR_MAG - offset]; | ||
343 | for (i = 0; i < info->mnest - offset; i++) | ||
344 | nr_masks *= info->mag[NR_MAG - offset - 1 - i]; | ||
345 | nr_masks = max(nr_masks, 1); | ||
346 | for (i = 0; i < nr_masks; i++) { | ||
347 | mask->next = alloc_bootmem(sizeof(struct mask_info)); | ||
348 | mask = mask->next; | ||
349 | } | ||
350 | } | ||
351 | |||
302 | void __init s390_init_cpu_topology(void) | 352 | void __init s390_init_cpu_topology(void) |
303 | { | 353 | { |
304 | unsigned long long facility_bits; | 354 | unsigned long long facility_bits; |
305 | struct tl_info *info; | 355 | struct tl_info *info; |
306 | struct core_info *core; | ||
307 | int nr_cores; | ||
308 | int i; | 356 | int i; |
309 | 357 | ||
310 | if (stfle(&facility_bits, 1) <= 0) | 358 | if (stfle(&facility_bits, 1) <= 0) |
@@ -315,25 +363,13 @@ void __init s390_init_cpu_topology(void) | |||
315 | 363 | ||
316 | tl_info = alloc_bootmem_pages(PAGE_SIZE); | 364 | tl_info = alloc_bootmem_pages(PAGE_SIZE); |
317 | info = tl_info; | 365 | info = tl_info; |
318 | stsi(info, 15, 1, 2); | 366 | store_topology(info); |
319 | |||
320 | nr_cores = info->mag[NR_MAG - 2]; | ||
321 | for (i = 0; i < info->mnest - 2; i++) | ||
322 | nr_cores *= info->mag[NR_MAG - 3 - i]; | ||
323 | |||
324 | pr_info("The CPU configuration topology of the machine is:"); | 367 | pr_info("The CPU configuration topology of the machine is:"); |
325 | for (i = 0; i < NR_MAG; i++) | 368 | for (i = 0; i < NR_MAG; i++) |
326 | printk(" %d", info->mag[i]); | 369 | printk(" %d", info->mag[i]); |
327 | printk(" / %d\n", info->mnest); | 370 | printk(" / %d\n", info->mnest); |
328 | 371 | alloc_masks(info, &core_info, 2); | |
329 | core = &core_info; | 372 | #ifdef CONFIG_SCHED_BOOK |
330 | for (i = 0; i < nr_cores; i++) { | 373 | alloc_masks(info, &book_info, 3); |
331 | core->next = alloc_bootmem(sizeof(struct core_info)); | 374 | #endif |
332 | core = core->next; | ||
333 | if (!core) | ||
334 | goto error; | ||
335 | } | ||
336 | return; | ||
337 | error: | ||
338 | machine_has_topology = 0; | ||
339 | } | 375 | } |
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 35b6c3f85173..35b6879628a0 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -16,6 +16,7 @@ config SUPERH | |||
16 | select HAVE_ARCH_TRACEHOOK | 16 | select HAVE_ARCH_TRACEHOOK |
17 | select HAVE_DMA_API_DEBUG | 17 | select HAVE_DMA_API_DEBUG |
18 | select HAVE_DMA_ATTRS | 18 | select HAVE_DMA_ATTRS |
19 | select HAVE_IRQ_WORK | ||
19 | select HAVE_PERF_EVENTS | 20 | select HAVE_PERF_EVENTS |
20 | select PERF_USE_VMALLOC | 21 | select PERF_USE_VMALLOC |
21 | select HAVE_KERNEL_GZIP | 22 | select HAVE_KERNEL_GZIP |
diff --git a/arch/sh/include/asm/perf_event.h b/arch/sh/include/asm/perf_event.h index 3d0c9f36d150..14308bed7ea5 100644 --- a/arch/sh/include/asm/perf_event.h +++ b/arch/sh/include/asm/perf_event.h | |||
@@ -26,11 +26,4 @@ extern int register_sh_pmu(struct sh_pmu *); | |||
26 | extern int reserve_pmc_hardware(void); | 26 | extern int reserve_pmc_hardware(void); |
27 | extern void release_pmc_hardware(void); | 27 | extern void release_pmc_hardware(void); |
28 | 28 | ||
29 | static inline void set_perf_event_pending(void) | ||
30 | { | ||
31 | /* Nothing to see here, move along. */ | ||
32 | } | ||
33 | |||
34 | #define PERF_EVENT_INDEX_OFFSET 0 | ||
35 | |||
36 | #endif /* __ASM_SH_PERF_EVENT_H */ | 29 | #endif /* __ASM_SH_PERF_EVENT_H */ |
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 257de1f0692b..ae5bac39b896 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c | |||
@@ -290,7 +290,7 @@ void __init init_IRQ(void) | |||
290 | int __init arch_probe_nr_irqs(void) | 290 | int __init arch_probe_nr_irqs(void) |
291 | { | 291 | { |
292 | nr_irqs = sh_mv.mv_nr_irqs; | 292 | nr_irqs = sh_mv.mv_nr_irqs; |
293 | return 0; | 293 | return NR_IRQS_LEGACY; |
294 | } | 294 | } |
295 | #endif | 295 | #endif |
296 | 296 | ||
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 9212cd42a832..3e9d31401fb2 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -26,6 +26,7 @@ config SPARC | |||
26 | select ARCH_WANT_OPTIONAL_GPIOLIB | 26 | select ARCH_WANT_OPTIONAL_GPIOLIB |
27 | select RTC_CLASS | 27 | select RTC_CLASS |
28 | select RTC_DRV_M48T59 | 28 | select RTC_DRV_M48T59 |
29 | select HAVE_IRQ_WORK | ||
29 | select HAVE_PERF_EVENTS | 30 | select HAVE_PERF_EVENTS |
30 | select PERF_USE_VMALLOC | 31 | select PERF_USE_VMALLOC |
31 | select HAVE_DMA_ATTRS | 32 | select HAVE_DMA_ATTRS |
@@ -54,6 +55,7 @@ config SPARC64 | |||
54 | select RTC_DRV_BQ4802 | 55 | select RTC_DRV_BQ4802 |
55 | select RTC_DRV_SUN4V | 56 | select RTC_DRV_SUN4V |
56 | select RTC_DRV_STARFIRE | 57 | select RTC_DRV_STARFIRE |
58 | select HAVE_IRQ_WORK | ||
57 | select HAVE_PERF_EVENTS | 59 | select HAVE_PERF_EVENTS |
58 | select PERF_USE_VMALLOC | 60 | select PERF_USE_VMALLOC |
59 | 61 | ||
diff --git a/arch/sparc/include/asm/perf_event.h b/arch/sparc/include/asm/perf_event.h index 727af70646cb..6e8bfa1786da 100644 --- a/arch/sparc/include/asm/perf_event.h +++ b/arch/sparc/include/asm/perf_event.h | |||
@@ -1,10 +1,6 @@ | |||
1 | #ifndef __ASM_SPARC_PERF_EVENT_H | 1 | #ifndef __ASM_SPARC_PERF_EVENT_H |
2 | #define __ASM_SPARC_PERF_EVENT_H | 2 | #define __ASM_SPARC_PERF_EVENT_H |
3 | 3 | ||
4 | extern void set_perf_event_pending(void); | ||
5 | |||
6 | #define PERF_EVENT_INDEX_OFFSET 0 | ||
7 | |||
8 | #ifdef CONFIG_PERF_EVENTS | 4 | #ifdef CONFIG_PERF_EVENTS |
9 | #include <asm/ptrace.h> | 5 | #include <asm/ptrace.h> |
10 | 6 | ||
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c index 548b8ca9c210..b210416ace7b 100644 --- a/arch/sparc/kernel/pci_msi.c +++ b/arch/sparc/kernel/pci_msi.c | |||
@@ -114,10 +114,10 @@ static void free_msi(struct pci_pbm_info *pbm, int msi_num) | |||
114 | 114 | ||
115 | static struct irq_chip msi_irq = { | 115 | static struct irq_chip msi_irq = { |
116 | .name = "PCI-MSI", | 116 | .name = "PCI-MSI", |
117 | .mask = mask_msi_irq, | 117 | .irq_mask = mask_msi_irq, |
118 | .unmask = unmask_msi_irq, | 118 | .irq_unmask = unmask_msi_irq, |
119 | .enable = unmask_msi_irq, | 119 | .irq_enable = unmask_msi_irq, |
120 | .disable = mask_msi_irq, | 120 | .irq_disable = mask_msi_irq, |
121 | /* XXX affinity XXX */ | 121 | /* XXX affinity XXX */ |
122 | }; | 122 | }; |
123 | 123 | ||
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index c4a6a50b4849..b87873c0e8ea 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c | |||
@@ -7,7 +7,7 @@ | |||
7 | #include <linux/init.h> | 7 | #include <linux/init.h> |
8 | #include <linux/irq.h> | 8 | #include <linux/irq.h> |
9 | 9 | ||
10 | #include <linux/perf_event.h> | 10 | #include <linux/irq_work.h> |
11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
12 | 12 | ||
13 | #include <asm/pil.h> | 13 | #include <asm/pil.h> |
@@ -43,14 +43,14 @@ void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs) | |||
43 | 43 | ||
44 | old_regs = set_irq_regs(regs); | 44 | old_regs = set_irq_regs(regs); |
45 | irq_enter(); | 45 | irq_enter(); |
46 | #ifdef CONFIG_PERF_EVENTS | 46 | #ifdef CONFIG_IRQ_WORK |
47 | perf_event_do_pending(); | 47 | irq_work_run(); |
48 | #endif | 48 | #endif |
49 | irq_exit(); | 49 | irq_exit(); |
50 | set_irq_regs(old_regs); | 50 | set_irq_regs(old_regs); |
51 | } | 51 | } |
52 | 52 | ||
53 | void set_perf_event_pending(void) | 53 | void arch_irq_work_raise(void) |
54 | { | 54 | { |
55 | set_softint(1 << PIL_DEFERRED_PCR_WORK); | 55 | set_softint(1 << PIL_DEFERRED_PCR_WORK); |
56 | } | 56 | } |
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c index 596c60086930..9a27d563fc30 100644 --- a/arch/tile/kernel/irq.c +++ b/arch/tile/kernel/irq.c | |||
@@ -208,7 +208,7 @@ static void tile_irq_chip_eoi(unsigned int irq) | |||
208 | } | 208 | } |
209 | 209 | ||
210 | static struct irq_chip tile_irq_chip = { | 210 | static struct irq_chip tile_irq_chip = { |
211 | .typename = "tile_irq_chip", | 211 | .name = "tile_irq_chip", |
212 | .ack = tile_irq_chip_ack, | 212 | .ack = tile_irq_chip_ack, |
213 | .eoi = tile_irq_chip_eoi, | 213 | .eoi = tile_irq_chip_eoi, |
214 | .mask = tile_irq_chip_mask, | 214 | .mask = tile_irq_chip_mask, |
@@ -288,7 +288,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
288 | for_each_online_cpu(j) | 288 | for_each_online_cpu(j) |
289 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 289 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
290 | #endif | 290 | #endif |
291 | seq_printf(p, " %14s", irq_desc[i].chip->typename); | 291 | seq_printf(p, " %14s", irq_desc[i].chip->name); |
292 | seq_printf(p, " %s", action->name); | 292 | seq_printf(p, " %s", action->name); |
293 | 293 | ||
294 | for (action = action->next; action; action = action->next) | 294 | for (action = action->next; action; action = action->next) |
diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c index 0c46e398cd8f..63c740a85b4c 100644 --- a/arch/um/drivers/hostaudio_kern.c +++ b/arch/um/drivers/hostaudio_kern.c | |||
@@ -40,6 +40,11 @@ static char *mixer = HOSTAUDIO_DEV_MIXER; | |||
40 | " This is used to specify the host mixer device to the hostaudio driver.\n"\ | 40 | " This is used to specify the host mixer device to the hostaudio driver.\n"\ |
41 | " The default is \"" HOSTAUDIO_DEV_MIXER "\".\n\n" | 41 | " The default is \"" HOSTAUDIO_DEV_MIXER "\".\n\n" |
42 | 42 | ||
43 | module_param(dsp, charp, 0644); | ||
44 | MODULE_PARM_DESC(dsp, DSP_HELP); | ||
45 | module_param(mixer, charp, 0644); | ||
46 | MODULE_PARM_DESC(mixer, MIXER_HELP); | ||
47 | |||
43 | #ifndef MODULE | 48 | #ifndef MODULE |
44 | static int set_dsp(char *name, int *add) | 49 | static int set_dsp(char *name, int *add) |
45 | { | 50 | { |
@@ -56,15 +61,6 @@ static int set_mixer(char *name, int *add) | |||
56 | } | 61 | } |
57 | 62 | ||
58 | __uml_setup("mixer=", set_mixer, "mixer=<mixer device>\n" MIXER_HELP); | 63 | __uml_setup("mixer=", set_mixer, "mixer=<mixer device>\n" MIXER_HELP); |
59 | |||
60 | #else /*MODULE*/ | ||
61 | |||
62 | module_param(dsp, charp, 0644); | ||
63 | MODULE_PARM_DESC(dsp, DSP_HELP); | ||
64 | |||
65 | module_param(mixer, charp, 0644); | ||
66 | MODULE_PARM_DESC(mixer, MIXER_HELP); | ||
67 | |||
68 | #endif | 64 | #endif |
69 | 65 | ||
70 | /* /dev/dsp file operations */ | 66 | /* /dev/dsp file operations */ |
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 1bcd208c459f..9734994cba1e 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c | |||
@@ -163,6 +163,7 @@ struct ubd { | |||
163 | struct scatterlist sg[MAX_SG]; | 163 | struct scatterlist sg[MAX_SG]; |
164 | struct request *request; | 164 | struct request *request; |
165 | int start_sg, end_sg; | 165 | int start_sg, end_sg; |
166 | sector_t rq_pos; | ||
166 | }; | 167 | }; |
167 | 168 | ||
168 | #define DEFAULT_COW { \ | 169 | #define DEFAULT_COW { \ |
@@ -187,6 +188,7 @@ struct ubd { | |||
187 | .request = NULL, \ | 188 | .request = NULL, \ |
188 | .start_sg = 0, \ | 189 | .start_sg = 0, \ |
189 | .end_sg = 0, \ | 190 | .end_sg = 0, \ |
191 | .rq_pos = 0, \ | ||
190 | } | 192 | } |
191 | 193 | ||
192 | /* Protected by ubd_lock */ | 194 | /* Protected by ubd_lock */ |
@@ -1228,7 +1230,6 @@ static void do_ubd_request(struct request_queue *q) | |||
1228 | { | 1230 | { |
1229 | struct io_thread_req *io_req; | 1231 | struct io_thread_req *io_req; |
1230 | struct request *req; | 1232 | struct request *req; |
1231 | sector_t sector; | ||
1232 | int n; | 1233 | int n; |
1233 | 1234 | ||
1234 | while(1){ | 1235 | while(1){ |
@@ -1239,12 +1240,12 @@ static void do_ubd_request(struct request_queue *q) | |||
1239 | return; | 1240 | return; |
1240 | 1241 | ||
1241 | dev->request = req; | 1242 | dev->request = req; |
1243 | dev->rq_pos = blk_rq_pos(req); | ||
1242 | dev->start_sg = 0; | 1244 | dev->start_sg = 0; |
1243 | dev->end_sg = blk_rq_map_sg(q, req, dev->sg); | 1245 | dev->end_sg = blk_rq_map_sg(q, req, dev->sg); |
1244 | } | 1246 | } |
1245 | 1247 | ||
1246 | req = dev->request; | 1248 | req = dev->request; |
1247 | sector = blk_rq_pos(req); | ||
1248 | while(dev->start_sg < dev->end_sg){ | 1249 | while(dev->start_sg < dev->end_sg){ |
1249 | struct scatterlist *sg = &dev->sg[dev->start_sg]; | 1250 | struct scatterlist *sg = &dev->sg[dev->start_sg]; |
1250 | 1251 | ||
@@ -1256,10 +1257,9 @@ static void do_ubd_request(struct request_queue *q) | |||
1256 | return; | 1257 | return; |
1257 | } | 1258 | } |
1258 | prepare_request(req, io_req, | 1259 | prepare_request(req, io_req, |
1259 | (unsigned long long)sector << 9, | 1260 | (unsigned long long)dev->rq_pos << 9, |
1260 | sg->offset, sg->length, sg_page(sg)); | 1261 | sg->offset, sg->length, sg_page(sg)); |
1261 | 1262 | ||
1262 | sector += sg->length >> 9; | ||
1263 | n = os_write_file(thread_fd, &io_req, | 1263 | n = os_write_file(thread_fd, &io_req, |
1264 | sizeof(struct io_thread_req *)); | 1264 | sizeof(struct io_thread_req *)); |
1265 | if(n != sizeof(struct io_thread_req *)){ | 1265 | if(n != sizeof(struct io_thread_req *)){ |
@@ -1272,6 +1272,7 @@ static void do_ubd_request(struct request_queue *q) | |||
1272 | return; | 1272 | return; |
1273 | } | 1273 | } |
1274 | 1274 | ||
1275 | dev->rq_pos += sg->length >> 9; | ||
1275 | dev->start_sg++; | 1276 | dev->start_sg++; |
1276 | } | 1277 | } |
1277 | dev->end_sg = 0; | 1278 | dev->end_sg = 0; |
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index a3f0b04d7101..a746e3037a5b 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c | |||
@@ -46,7 +46,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
46 | for_each_online_cpu(j) | 46 | for_each_online_cpu(j) |
47 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 47 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
48 | #endif | 48 | #endif |
49 | seq_printf(p, " %14s", irq_desc[i].chip->typename); | 49 | seq_printf(p, " %14s", irq_desc[i].chip->name); |
50 | seq_printf(p, " %s", action->name); | 50 | seq_printf(p, " %s", action->name); |
51 | 51 | ||
52 | for (action=action->next; action; action = action->next) | 52 | for (action=action->next; action; action = action->next) |
@@ -369,7 +369,7 @@ static void dummy(unsigned int irq) | |||
369 | 369 | ||
370 | /* This is used for everything else than the timer. */ | 370 | /* This is used for everything else than the timer. */ |
371 | static struct irq_chip normal_irq_type = { | 371 | static struct irq_chip normal_irq_type = { |
372 | .typename = "SIGIO", | 372 | .name = "SIGIO", |
373 | .release = free_irq_by_irq_and_dev, | 373 | .release = free_irq_by_irq_and_dev, |
374 | .disable = dummy, | 374 | .disable = dummy, |
375 | .enable = dummy, | 375 | .enable = dummy, |
@@ -378,7 +378,7 @@ static struct irq_chip normal_irq_type = { | |||
378 | }; | 378 | }; |
379 | 379 | ||
380 | static struct irq_chip SIGVTALRM_irq_type = { | 380 | static struct irq_chip SIGVTALRM_irq_type = { |
381 | .typename = "SIGVTALRM", | 381 | .name = "SIGVTALRM", |
382 | .release = free_irq_by_irq_and_dev, | 382 | .release = free_irq_by_irq_and_dev, |
383 | .shutdown = dummy, /* never called */ | 383 | .shutdown = dummy, /* never called */ |
384 | .disable = dummy, | 384 | .disable = dummy, |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9815221976a7..7ab9db88ab6a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -25,6 +25,7 @@ config X86 | |||
25 | select HAVE_IDE | 25 | select HAVE_IDE |
26 | select HAVE_OPROFILE | 26 | select HAVE_OPROFILE |
27 | select HAVE_PERF_EVENTS if (!M386 && !M486) | 27 | select HAVE_PERF_EVENTS if (!M386 && !M486) |
28 | select HAVE_IRQ_WORK | ||
28 | select HAVE_IOREMAP_PROT | 29 | select HAVE_IOREMAP_PROT |
29 | select HAVE_KPROBES | 30 | select HAVE_KPROBES |
30 | select ARCH_WANT_OPTIONAL_GPIOLIB | 31 | select ARCH_WANT_OPTIONAL_GPIOLIB |
@@ -62,6 +63,10 @@ config X86 | |||
62 | select HAVE_USER_RETURN_NOTIFIER | 63 | select HAVE_USER_RETURN_NOTIFIER |
63 | select HAVE_ARCH_JUMP_LABEL | 64 | select HAVE_ARCH_JUMP_LABEL |
64 | select HAVE_TEXT_POKE_SMP | 65 | select HAVE_TEXT_POKE_SMP |
66 | select HAVE_GENERIC_HARDIRQS | ||
67 | select HAVE_SPARSE_IRQ | ||
68 | select GENERIC_IRQ_PROBE | ||
69 | select GENERIC_PENDING_IRQ if SMP | ||
65 | 70 | ||
66 | config INSTRUCTION_DECODER | 71 | config INSTRUCTION_DECODER |
67 | def_bool (KPROBES || PERF_EVENTS) | 72 | def_bool (KPROBES || PERF_EVENTS) |
@@ -203,20 +208,6 @@ config HAVE_INTEL_TXT | |||
203 | def_bool y | 208 | def_bool y |
204 | depends on EXPERIMENTAL && DMAR && ACPI | 209 | depends on EXPERIMENTAL && DMAR && ACPI |
205 | 210 | ||
206 | # Use the generic interrupt handling code in kernel/irq/: | ||
207 | config GENERIC_HARDIRQS | ||
208 | def_bool y | ||
209 | |||
210 | config GENERIC_HARDIRQS_NO__DO_IRQ | ||
211 | def_bool y | ||
212 | |||
213 | config GENERIC_IRQ_PROBE | ||
214 | def_bool y | ||
215 | |||
216 | config GENERIC_PENDING_IRQ | ||
217 | def_bool y | ||
218 | depends on GENERIC_HARDIRQS && SMP | ||
219 | |||
220 | config USE_GENERIC_SMP_HELPERS | 211 | config USE_GENERIC_SMP_HELPERS |
221 | def_bool y | 212 | def_bool y |
222 | depends on SMP | 213 | depends on SMP |
@@ -299,23 +290,6 @@ config X86_X2APIC | |||
299 | 290 | ||
300 | If you don't know what to do here, say N. | 291 | If you don't know what to do here, say N. |
301 | 292 | ||
302 | config SPARSE_IRQ | ||
303 | bool "Support sparse irq numbering" | ||
304 | depends on PCI_MSI || HT_IRQ | ||
305 | ---help--- | ||
306 | This enables support for sparse irqs. This is useful for distro | ||
307 | kernels that want to define a high CONFIG_NR_CPUS value but still | ||
308 | want to have low kernel memory footprint on smaller machines. | ||
309 | |||
310 | ( Sparse IRQs can also be beneficial on NUMA boxes, as they spread | ||
311 | out the irq_desc[] array in a more NUMA-friendly way. ) | ||
312 | |||
313 | If you don't know what to do here, say N. | ||
314 | |||
315 | config NUMA_IRQ_DESC | ||
316 | def_bool y | ||
317 | depends on SPARSE_IRQ && NUMA | ||
318 | |||
319 | config X86_MPPARSE | 293 | config X86_MPPARSE |
320 | bool "Enable MPS table" if ACPI | 294 | bool "Enable MPS table" if ACPI |
321 | default y | 295 | default y |
@@ -520,25 +494,6 @@ if PARAVIRT_GUEST | |||
520 | 494 | ||
521 | source "arch/x86/xen/Kconfig" | 495 | source "arch/x86/xen/Kconfig" |
522 | 496 | ||
523 | config VMI | ||
524 | bool "VMI Guest support (DEPRECATED)" | ||
525 | select PARAVIRT | ||
526 | depends on X86_32 | ||
527 | ---help--- | ||
528 | VMI provides a paravirtualized interface to the VMware ESX server | ||
529 | (it could be used by other hypervisors in theory too, but is not | ||
530 | at the moment), by linking the kernel to a GPL-ed ROM module | ||
531 | provided by the hypervisor. | ||
532 | |||
533 | As of September 2009, VMware has started a phased retirement | ||
534 | of this feature from VMware's products. Please see | ||
535 | feature-removal-schedule.txt for details. If you are | ||
536 | planning to enable this option, please note that you cannot | ||
537 | live migrate a VMI enabled VM to a future VMware product, | ||
538 | which doesn't support VMI. So if you expect your kernel to | ||
539 | seamlessly migrate to newer VMware products, keep this | ||
540 | disabled. | ||
541 | |||
542 | config KVM_CLOCK | 497 | config KVM_CLOCK |
543 | bool "KVM paravirtualized clock" | 498 | bool "KVM paravirtualized clock" |
544 | select PARAVIRT | 499 | select PARAVIRT |
@@ -673,7 +628,7 @@ config GART_IOMMU | |||
673 | bool "GART IOMMU support" if EMBEDDED | 628 | bool "GART IOMMU support" if EMBEDDED |
674 | default y | 629 | default y |
675 | select SWIOTLB | 630 | select SWIOTLB |
676 | depends on X86_64 && PCI && K8_NB | 631 | depends on X86_64 && PCI && AMD_NB |
677 | ---help--- | 632 | ---help--- |
678 | Support for full DMA access of devices with 32bit memory access only | 633 | Support for full DMA access of devices with 32bit memory access only |
679 | on systems with more than 3GB. This is usually needed for USB, | 634 | on systems with more than 3GB. This is usually needed for USB, |
@@ -798,6 +753,17 @@ config SCHED_MC | |||
798 | making when dealing with multi-core CPU chips at a cost of slightly | 753 | making when dealing with multi-core CPU chips at a cost of slightly |
799 | increased overhead in some places. If unsure say N here. | 754 | increased overhead in some places. If unsure say N here. |
800 | 755 | ||
756 | config IRQ_TIME_ACCOUNTING | ||
757 | bool "Fine granularity task level IRQ time accounting" | ||
758 | default n | ||
759 | ---help--- | ||
760 | Select this option to enable fine granularity task irq time | ||
761 | accounting. This is done by reading a timestamp on each | ||
762 | transitions between softirq and hardirq state, so there can be a | ||
763 | small performance impact. | ||
764 | |||
765 | If in doubt, say N here. | ||
766 | |||
801 | source "kernel/Kconfig.preempt" | 767 | source "kernel/Kconfig.preempt" |
802 | 768 | ||
803 | config X86_UP_APIC | 769 | config X86_UP_APIC |
@@ -1151,6 +1117,9 @@ config X86_PAE | |||
1151 | config ARCH_PHYS_ADDR_T_64BIT | 1117 | config ARCH_PHYS_ADDR_T_64BIT |
1152 | def_bool X86_64 || X86_PAE | 1118 | def_bool X86_64 || X86_PAE |
1153 | 1119 | ||
1120 | config ARCH_DMA_ADDR_T_64BIT | ||
1121 | def_bool X86_64 || HIGHMEM64G | ||
1122 | |||
1154 | config DIRECT_GBPAGES | 1123 | config DIRECT_GBPAGES |
1155 | bool "Enable 1GB pages for kernel pagetables" if EMBEDDED | 1124 | bool "Enable 1GB pages for kernel pagetables" if EMBEDDED |
1156 | default y | 1125 | default y |
@@ -1329,25 +1298,34 @@ config X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK | |||
1329 | Set whether the default state of memory_corruption_check is | 1298 | Set whether the default state of memory_corruption_check is |
1330 | on or off. | 1299 | on or off. |
1331 | 1300 | ||
1332 | config X86_RESERVE_LOW_64K | 1301 | config X86_RESERVE_LOW |
1333 | bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen" | 1302 | int "Amount of low memory, in kilobytes, to reserve for the BIOS" |
1334 | default y | 1303 | default 64 |
1304 | range 4 640 | ||
1335 | ---help--- | 1305 | ---help--- |
1336 | Reserve the first 64K of physical RAM on BIOSes that are known | 1306 | Specify the amount of low memory to reserve for the BIOS. |
1337 | to potentially corrupt that memory range. A numbers of BIOSes are | 1307 | |
1338 | known to utilize this area during suspend/resume, so it must not | 1308 | The first page contains BIOS data structures that the kernel |
1339 | be used by the kernel. | 1309 | must not use, so that page must always be reserved. |
1340 | 1310 | ||
1341 | Set this to N if you are absolutely sure that you trust the BIOS | 1311 | By default we reserve the first 64K of physical RAM, as a |
1342 | to get all its memory reservations and usages right. | 1312 | number of BIOSes are known to corrupt that memory range |
1313 | during events such as suspend/resume or monitor cable | ||
1314 | insertion, so it must not be used by the kernel. | ||
1343 | 1315 | ||
1344 | If you have doubts about the BIOS (e.g. suspend/resume does not | 1316 | You can set this to 4 if you are absolutely sure that you |
1345 | work or there's kernel crashes after certain hardware hotplug | 1317 | trust the BIOS to get all its memory reservations and usages |
1346 | events) and it's not AMI or Phoenix, then you might want to enable | 1318 | right. If you know your BIOS have problems beyond the |
1347 | X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check typical | 1319 | default 64K area, you can set this to 640 to avoid using the |
1348 | corruption patterns. | 1320 | entire low memory range. |
1349 | 1321 | ||
1350 | Say Y if unsure. | 1322 | If you have doubts about the BIOS (e.g. suspend/resume does |
1323 | not work or there's kernel crashes after certain hardware | ||
1324 | hotplug events) then you might want to enable | ||
1325 | X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check | ||
1326 | typical corruption patterns. | ||
1327 | |||
1328 | Leave this to the default value of 64 if you are unsure. | ||
1351 | 1329 | ||
1352 | config MATH_EMULATION | 1330 | config MATH_EMULATION |
1353 | bool | 1331 | bool |
@@ -1903,7 +1881,7 @@ config PCI_GODIRECT | |||
1903 | bool "Direct" | 1881 | bool "Direct" |
1904 | 1882 | ||
1905 | config PCI_GOOLPC | 1883 | config PCI_GOOLPC |
1906 | bool "OLPC" | 1884 | bool "OLPC XO-1" |
1907 | depends on OLPC | 1885 | depends on OLPC |
1908 | 1886 | ||
1909 | config PCI_GOANY | 1887 | config PCI_GOANY |
@@ -2064,14 +2042,21 @@ config SCx200HR_TIMER | |||
2064 | config OLPC | 2042 | config OLPC |
2065 | bool "One Laptop Per Child support" | 2043 | bool "One Laptop Per Child support" |
2066 | select GPIOLIB | 2044 | select GPIOLIB |
2045 | select OLPC_OPENFIRMWARE | ||
2067 | ---help--- | 2046 | ---help--- |
2068 | Add support for detecting the unique features of the OLPC | 2047 | Add support for detecting the unique features of the OLPC |
2069 | XO hardware. | 2048 | XO hardware. |
2070 | 2049 | ||
2050 | config OLPC_XO1 | ||
2051 | tristate "OLPC XO-1 support" | ||
2052 | depends on OLPC && PCI | ||
2053 | ---help--- | ||
2054 | Add support for non-essential features of the OLPC XO-1 laptop. | ||
2055 | |||
2071 | config OLPC_OPENFIRMWARE | 2056 | config OLPC_OPENFIRMWARE |
2072 | bool "Support for OLPC's Open Firmware" | 2057 | bool "Support for OLPC's Open Firmware" |
2073 | depends on !X86_64 && !X86_PAE | 2058 | depends on !X86_64 && !X86_PAE |
2074 | default y if OLPC | 2059 | default n |
2075 | help | 2060 | help |
2076 | This option adds support for the implementation of Open Firmware | 2061 | This option adds support for the implementation of Open Firmware |
2077 | that is used on the OLPC XO-1 Children's Machine. | 2062 | that is used on the OLPC XO-1 Children's Machine. |
@@ -2079,7 +2064,7 @@ config OLPC_OPENFIRMWARE | |||
2079 | 2064 | ||
2080 | endif # X86_32 | 2065 | endif # X86_32 |
2081 | 2066 | ||
2082 | config K8_NB | 2067 | config AMD_NB |
2083 | def_bool y | 2068 | def_bool y |
2084 | depends on CPU_SUP_AMD && PCI | 2069 | depends on CPU_SUP_AMD && PCI |
2085 | 2070 | ||
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 75085080b63e..e5bb96b10f1a 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug | |||
@@ -43,6 +43,10 @@ config EARLY_PRINTK | |||
43 | with klogd/syslogd or the X server. You should normally N here, | 43 | with klogd/syslogd or the X server. You should normally N here, |
44 | unless you want to debug such a crash. | 44 | unless you want to debug such a crash. |
45 | 45 | ||
46 | config EARLY_PRINTK_MRST | ||
47 | bool "Early printk for MRST platform support" | ||
48 | depends on EARLY_PRINTK && X86_MRST | ||
49 | |||
46 | config EARLY_PRINTK_DBGP | 50 | config EARLY_PRINTK_DBGP |
47 | bool "Early printk via EHCI debug port" | 51 | bool "Early printk via EHCI debug port" |
48 | depends on EARLY_PRINTK && PCI | 52 | depends on EARLY_PRINTK && PCI |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index e8c8881351b3..b02e509072a7 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -96,8 +96,12 @@ cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_en | |||
96 | # is .cfi_signal_frame supported too? | 96 | # is .cfi_signal_frame supported too? |
97 | cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1) | 97 | cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1) |
98 | cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1) | 98 | cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1) |
99 | KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) | 99 | |
100 | KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) | 100 | # does binutils support specific instructions? |
101 | asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1) | ||
102 | |||
103 | KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) | ||
104 | KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) | ||
101 | 105 | ||
102 | LDFLAGS := -m elf_$(UTS_MACHINE) | 106 | LDFLAGS := -m elf_$(UTS_MACHINE) |
103 | 107 | ||
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 0350311906ae..2d93bdbc9ac0 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c | |||
@@ -34,7 +34,7 @@ | |||
34 | #include <asm/ia32.h> | 34 | #include <asm/ia32.h> |
35 | 35 | ||
36 | #undef WARN_OLD | 36 | #undef WARN_OLD |
37 | #undef CORE_DUMP /* probably broken */ | 37 | #undef CORE_DUMP /* definitely broken */ |
38 | 38 | ||
39 | static int load_aout_binary(struct linux_binprm *, struct pt_regs *regs); | 39 | static int load_aout_binary(struct linux_binprm *, struct pt_regs *regs); |
40 | static int load_aout_library(struct file *); | 40 | static int load_aout_library(struct file *); |
@@ -131,21 +131,15 @@ static void set_brk(unsigned long start, unsigned long end) | |||
131 | * macros to write out all the necessary info. | 131 | * macros to write out all the necessary info. |
132 | */ | 132 | */ |
133 | 133 | ||
134 | static int dump_write(struct file *file, const void *addr, int nr) | 134 | #include <linux/coredump.h> |
135 | { | ||
136 | return file->f_op->write(file, addr, nr, &file->f_pos) == nr; | ||
137 | } | ||
138 | 135 | ||
139 | #define DUMP_WRITE(addr, nr) \ | 136 | #define DUMP_WRITE(addr, nr) \ |
140 | if (!dump_write(file, (void *)(addr), (nr))) \ | 137 | if (!dump_write(file, (void *)(addr), (nr))) \ |
141 | goto end_coredump; | 138 | goto end_coredump; |
142 | 139 | ||
143 | #define DUMP_SEEK(offset) \ | 140 | #define DUMP_SEEK(offset) \ |
144 | if (file->f_op->llseek) { \ | 141 | if (!dump_seek(file, offset)) \ |
145 | if (file->f_op->llseek(file, (offset), 0) != (offset)) \ | 142 | goto end_coredump; |
146 | goto end_coredump; \ | ||
147 | } else \ | ||
148 | file->f_pos = (offset) | ||
149 | 143 | ||
150 | #define START_DATA() (u.u_tsize << PAGE_SHIFT) | 144 | #define START_DATA() (u.u_tsize << PAGE_SHIFT) |
151 | #define START_STACK(u) (u.start_stack) | 145 | #define START_STACK(u) (u.start_stack) |
@@ -217,12 +211,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, | |||
217 | dump_size = dump.u_ssize << PAGE_SHIFT; | 211 | dump_size = dump.u_ssize << PAGE_SHIFT; |
218 | DUMP_WRITE(dump_start, dump_size); | 212 | DUMP_WRITE(dump_start, dump_size); |
219 | } | 213 | } |
220 | /* | ||
221 | * Finally dump the task struct. Not be used by gdb, but | ||
222 | * could be useful | ||
223 | */ | ||
224 | set_fs(KERNEL_DS); | ||
225 | DUMP_WRITE(current, sizeof(*current)); | ||
226 | end_coredump: | 214 | end_coredump: |
227 | set_fs(fs); | 215 | set_fs(fs); |
228 | return has_dumped; | 216 | return has_dumped; |
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h index 5af2982133b5..f16a2caca1e0 100644 --- a/arch/x86/include/asm/amd_iommu.h +++ b/arch/x86/include/asm/amd_iommu.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
4 | * Leo Duran <leo.duran@amd.com> | 4 | * Leo Duran <leo.duran@amd.com> |
5 | * | 5 | * |
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h index cb030374b90a..916bc8111a01 100644 --- a/arch/x86/include/asm/amd_iommu_proto.h +++ b/arch/x86/include/asm/amd_iommu_proto.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2009 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2009-2010 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 08616180deaf..e3509fc303bf 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
4 | * Leo Duran <leo.duran@amd.com> | 4 | * Leo Duran <leo.duran@amd.com> |
5 | * | 5 | * |
@@ -416,13 +416,22 @@ struct amd_iommu { | |||
416 | struct dma_ops_domain *default_dom; | 416 | struct dma_ops_domain *default_dom; |
417 | 417 | ||
418 | /* | 418 | /* |
419 | * This array is required to work around a potential BIOS bug. | 419 | * We can't rely on the BIOS to restore all values on reinit, so we |
420 | * The BIOS may miss to restore parts of the PCI configuration | 420 | * need to stash them |
421 | * space when the system resumes from S3. The result is that the | ||
422 | * IOMMU does not execute commands anymore which leads to system | ||
423 | * failure. | ||
424 | */ | 421 | */ |
425 | u32 cache_cfg[4]; | 422 | |
423 | /* The iommu BAR */ | ||
424 | u32 stored_addr_lo; | ||
425 | u32 stored_addr_hi; | ||
426 | |||
427 | /* | ||
428 | * Each iommu has 6 l1s, each of which is documented as having 0x12 | ||
429 | * registers | ||
430 | */ | ||
431 | u32 stored_l1[6][0x12]; | ||
432 | |||
433 | /* The l2 indirect registers */ | ||
434 | u32 stored_l2[0x83]; | ||
426 | }; | 435 | }; |
427 | 436 | ||
428 | /* | 437 | /* |
diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/amd_nb.h index af00bd1d2089..c8517f81b21e 100644 --- a/arch/x86/include/asm/k8.h +++ b/arch/x86/include/asm/amd_nb.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_K8_H | 1 | #ifndef _ASM_X86_AMD_NB_H |
2 | #define _ASM_X86_K8_H | 2 | #define _ASM_X86_AMD_NB_H |
3 | 3 | ||
4 | #include <linux/pci.h> | 4 | #include <linux/pci.h> |
5 | 5 | ||
@@ -7,24 +7,27 @@ extern struct pci_device_id k8_nb_ids[]; | |||
7 | struct bootnode; | 7 | struct bootnode; |
8 | 8 | ||
9 | extern int early_is_k8_nb(u32 value); | 9 | extern int early_is_k8_nb(u32 value); |
10 | extern struct pci_dev **k8_northbridges; | ||
11 | extern int num_k8_northbridges; | ||
12 | extern int cache_k8_northbridges(void); | 10 | extern int cache_k8_northbridges(void); |
13 | extern void k8_flush_garts(void); | 11 | extern void k8_flush_garts(void); |
14 | extern int k8_get_nodes(struct bootnode *nodes); | 12 | extern int k8_get_nodes(struct bootnode *nodes); |
15 | extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn); | 13 | extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn); |
16 | extern int k8_scan_nodes(void); | 14 | extern int k8_scan_nodes(void); |
17 | 15 | ||
18 | #ifdef CONFIG_K8_NB | 16 | struct k8_northbridge_info { |
19 | extern int num_k8_northbridges; | 17 | u16 num; |
18 | u8 gart_supported; | ||
19 | struct pci_dev **nb_misc; | ||
20 | }; | ||
21 | extern struct k8_northbridge_info k8_northbridges; | ||
22 | |||
23 | #ifdef CONFIG_AMD_NB | ||
20 | 24 | ||
21 | static inline struct pci_dev *node_to_k8_nb_misc(int node) | 25 | static inline struct pci_dev *node_to_k8_nb_misc(int node) |
22 | { | 26 | { |
23 | return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL; | 27 | return (node < k8_northbridges.num) ? k8_northbridges.nb_misc[node] : NULL; |
24 | } | 28 | } |
25 | 29 | ||
26 | #else | 30 | #else |
27 | #define num_k8_northbridges 0 | ||
28 | 31 | ||
29 | static inline struct pci_dev *node_to_k8_nb_misc(int node) | 32 | static inline struct pci_dev *node_to_k8_nb_misc(int node) |
30 | { | 33 | { |
@@ -33,4 +36,4 @@ static inline struct pci_dev *node_to_k8_nb_misc(int node) | |||
33 | #endif | 36 | #endif |
34 | 37 | ||
35 | 38 | ||
36 | #endif /* _ASM_X86_K8_H */ | 39 | #endif /* _ASM_X86_AMD_NB_H */ |
diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h index a69b1ac9eaf8..2fefa501d3ba 100644 --- a/arch/x86/include/asm/apb_timer.h +++ b/arch/x86/include/asm/apb_timer.h | |||
@@ -54,7 +54,6 @@ extern struct clock_event_device *global_clock_event; | |||
54 | extern unsigned long apbt_quick_calibrate(void); | 54 | extern unsigned long apbt_quick_calibrate(void); |
55 | extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu); | 55 | extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu); |
56 | extern void apbt_setup_secondary_clock(void); | 56 | extern void apbt_setup_secondary_clock(void); |
57 | extern unsigned int boot_cpu_id; | ||
58 | 57 | ||
59 | extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); | 58 | extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); |
60 | extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr); | 59 | extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr); |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 1fa03e04ae44..286de34b0ed6 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -252,9 +252,7 @@ static inline int apic_is_clustered_box(void) | |||
252 | } | 252 | } |
253 | #endif | 253 | #endif |
254 | 254 | ||
255 | extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); | 255 | extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask); |
256 | extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask); | ||
257 | |||
258 | 256 | ||
259 | #else /* !CONFIG_X86_LOCAL_APIC */ | 257 | #else /* !CONFIG_X86_LOCAL_APIC */ |
260 | static inline void lapic_shutdown(void) { } | 258 | static inline void lapic_shutdown(void) { } |
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index 7fe3b3060f08..a859ca461fb0 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h | |||
@@ -131,6 +131,7 @@ | |||
131 | #define APIC_EILVTn(n) (0x500 + 0x10 * n) | 131 | #define APIC_EILVTn(n) (0x500 + 0x10 * n) |
132 | #define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ | 132 | #define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ |
133 | #define APIC_EILVT_NR_AMD_10H 4 | 133 | #define APIC_EILVT_NR_AMD_10H 4 |
134 | #define APIC_EILVT_NR_MAX APIC_EILVT_NR_AMD_10H | ||
134 | #define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) | 135 | #define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) |
135 | #define APIC_EILVT_MSG_FIX 0x0 | 136 | #define APIC_EILVT_MSG_FIX 0x0 |
136 | #define APIC_EILVT_MSG_SMI 0x2 | 137 | #define APIC_EILVT_MSG_SMI 0x2 |
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index b185091bf19c..4fab24de26b1 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h | |||
@@ -32,6 +32,5 @@ extern void arch_unregister_cpu(int); | |||
32 | 32 | ||
33 | DECLARE_PER_CPU(int, cpu_state); | 33 | DECLARE_PER_CPU(int, cpu_state); |
34 | 34 | ||
35 | extern unsigned int boot_cpu_id; | ||
36 | 35 | ||
37 | #endif /* _ASM_X86_CPU_H */ | 36 | #endif /* _ASM_X86_CPU_H */ |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 3f76523589af..220e2ea08e80 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -152,10 +152,14 @@ | |||
152 | #define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */ | 152 | #define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */ |
153 | #define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */ | 153 | #define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */ |
154 | #define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */ | 154 | #define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */ |
155 | #define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */ | 155 | #define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */ |
156 | #define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ | 156 | #define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ |
157 | #define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ | 157 | #define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ |
158 | #define X86_FEATURE_LWP (6*32+15) /* Light Weight Profiling */ | ||
159 | #define X86_FEATURE_FMA4 (6*32+16) /* 4 operands MAC instructions */ | ||
158 | #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ | 160 | #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ |
161 | #define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */ | ||
162 | #define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ | ||
159 | 163 | ||
160 | /* | 164 | /* |
161 | * Auxiliary flags: Linux defined - For features scattered in various | 165 | * Auxiliary flags: Linux defined - For features scattered in various |
@@ -180,6 +184,13 @@ | |||
180 | #define X86_FEATURE_LBRV (8*32+ 6) /* AMD LBR Virtualization support */ | 184 | #define X86_FEATURE_LBRV (8*32+ 6) /* AMD LBR Virtualization support */ |
181 | #define X86_FEATURE_SVML (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */ | 185 | #define X86_FEATURE_SVML (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */ |
182 | #define X86_FEATURE_NRIPS (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */ | 186 | #define X86_FEATURE_NRIPS (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */ |
187 | #define X86_FEATURE_TSCRATEMSR (8*32+ 9) /* "tsc_scale" AMD TSC scaling support */ | ||
188 | #define X86_FEATURE_VMCBCLEAN (8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */ | ||
189 | #define X86_FEATURE_FLUSHBYASID (8*32+11) /* AMD flush-by-ASID support */ | ||
190 | #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */ | ||
191 | #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */ | ||
192 | #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */ | ||
193 | |||
183 | 194 | ||
184 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ | 195 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ |
185 | #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ | 196 | #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ |
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h index 733f7e91e7a9..326099199318 100644 --- a/arch/x86/include/asm/dwarf2.h +++ b/arch/x86/include/asm/dwarf2.h | |||
@@ -89,6 +89,16 @@ | |||
89 | CFI_ADJUST_CFA_OFFSET -8 | 89 | CFI_ADJUST_CFA_OFFSET -8 |
90 | .endm | 90 | .endm |
91 | 91 | ||
92 | .macro pushfq_cfi | ||
93 | pushfq | ||
94 | CFI_ADJUST_CFA_OFFSET 8 | ||
95 | .endm | ||
96 | |||
97 | .macro popfq_cfi | ||
98 | popfq | ||
99 | CFI_ADJUST_CFA_OFFSET -8 | ||
100 | .endm | ||
101 | |||
92 | .macro movq_cfi reg offset=0 | 102 | .macro movq_cfi reg offset=0 |
93 | movq %\reg, \offset(%rsp) | 103 | movq %\reg, \offset(%rsp) |
94 | CFI_REL_OFFSET \reg, \offset | 104 | CFI_REL_OFFSET \reg, \offset |
@@ -109,6 +119,16 @@ | |||
109 | CFI_ADJUST_CFA_OFFSET -4 | 119 | CFI_ADJUST_CFA_OFFSET -4 |
110 | .endm | 120 | .endm |
111 | 121 | ||
122 | .macro pushfl_cfi | ||
123 | pushfl | ||
124 | CFI_ADJUST_CFA_OFFSET 4 | ||
125 | .endm | ||
126 | |||
127 | .macro popfl_cfi | ||
128 | popfl | ||
129 | CFI_ADJUST_CFA_OFFSET -4 | ||
130 | .endm | ||
131 | |||
112 | .macro movl_cfi reg offset=0 | 132 | .macro movl_cfi reg offset=0 |
113 | movl %\reg, \offset(%esp) | 133 | movl %\reg, \offset(%esp) |
114 | CFI_REL_OFFSET \reg, \offset | 134 | CFI_REL_OFFSET \reg, \offset |
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index 8e8ec663a98f..b8e96a18676b 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h | |||
@@ -49,8 +49,8 @@ BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) | |||
49 | BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) | 49 | BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) |
50 | BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) | 50 | BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) |
51 | 51 | ||
52 | #ifdef CONFIG_PERF_EVENTS | 52 | #ifdef CONFIG_IRQ_WORK |
53 | BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR) | 53 | BUILD_INTERRUPT(irq_work_interrupt, IRQ_WORK_VECTOR) |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #ifdef CONFIG_X86_THERMAL_VECTOR | 56 | #ifdef CONFIG_X86_THERMAL_VECTOR |
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index d07b44f7d1dc..4d293dced62f 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -214,5 +214,20 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr) | |||
214 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | 214 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); |
215 | return __virt_to_fix(vaddr); | 215 | return __virt_to_fix(vaddr); |
216 | } | 216 | } |
217 | |||
218 | /* Return an pointer with offset calculated */ | ||
219 | static inline unsigned long __set_fixmap_offset(enum fixed_addresses idx, | ||
220 | phys_addr_t phys, pgprot_t flags) | ||
221 | { | ||
222 | __set_fixmap(idx, phys, flags); | ||
223 | return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1)); | ||
224 | } | ||
225 | |||
226 | #define set_fixmap_offset(idx, phys) \ | ||
227 | __set_fixmap_offset(idx, phys, PAGE_KERNEL) | ||
228 | |||
229 | #define set_fixmap_offset_nocache(idx, phys) \ | ||
230 | __set_fixmap_offset(idx, phys, PAGE_KERNEL_NOCACHE) | ||
231 | |||
217 | #endif /* !__ASSEMBLY__ */ | 232 | #endif /* !__ASSEMBLY__ */ |
218 | #endif /* _ASM_X86_FIXMAP_H */ | 233 | #endif /* _ASM_X86_FIXMAP_H */ |
diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h index 4ac5b0f33fc1..bf357f9b25f0 100644 --- a/arch/x86/include/asm/gart.h +++ b/arch/x86/include/asm/gart.h | |||
@@ -17,6 +17,7 @@ extern int fix_aperture; | |||
17 | #define GARTEN (1<<0) | 17 | #define GARTEN (1<<0) |
18 | #define DISGARTCPU (1<<4) | 18 | #define DISGARTCPU (1<<4) |
19 | #define DISGARTIO (1<<5) | 19 | #define DISGARTIO (1<<5) |
20 | #define DISTLBWALKPRB (1<<6) | ||
20 | 21 | ||
21 | /* GART cache control register bits. */ | 22 | /* GART cache control register bits. */ |
22 | #define INVGART (1<<0) | 23 | #define INVGART (1<<0) |
@@ -27,7 +28,6 @@ extern int fix_aperture; | |||
27 | #define AMD64_GARTAPERTUREBASE 0x94 | 28 | #define AMD64_GARTAPERTUREBASE 0x94 |
28 | #define AMD64_GARTTABLEBASE 0x98 | 29 | #define AMD64_GARTTABLEBASE 0x98 |
29 | #define AMD64_GARTCACHECTL 0x9c | 30 | #define AMD64_GARTCACHECTL 0x9c |
30 | #define AMD64_GARTEN (1<<0) | ||
31 | 31 | ||
32 | #ifdef CONFIG_GART_IOMMU | 32 | #ifdef CONFIG_GART_IOMMU |
33 | extern int gart_iommu_aperture; | 33 | extern int gart_iommu_aperture; |
@@ -57,6 +57,19 @@ static inline void gart_iommu_hole_init(void) | |||
57 | 57 | ||
58 | extern int agp_amd64_init(void); | 58 | extern int agp_amd64_init(void); |
59 | 59 | ||
60 | static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order) | ||
61 | { | ||
62 | u32 ctl; | ||
63 | |||
64 | /* | ||
65 | * Don't enable translation but enable GART IO and CPU accesses. | ||
66 | * Also, set DISTLBWALKPRB since GART tables memory is UC. | ||
67 | */ | ||
68 | ctl = DISTLBWALKPRB | order << 1; | ||
69 | |||
70 | pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); | ||
71 | } | ||
72 | |||
60 | static inline void enable_gart_translation(struct pci_dev *dev, u64 addr) | 73 | static inline void enable_gart_translation(struct pci_dev *dev, u64 addr) |
61 | { | 74 | { |
62 | u32 tmp, ctl; | 75 | u32 tmp, ctl; |
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index aeab29aee617..55e4de613f0e 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h | |||
@@ -14,7 +14,7 @@ typedef struct { | |||
14 | #endif | 14 | #endif |
15 | unsigned int x86_platform_ipis; /* arch dependent */ | 15 | unsigned int x86_platform_ipis; /* arch dependent */ |
16 | unsigned int apic_perf_irqs; | 16 | unsigned int apic_perf_irqs; |
17 | unsigned int apic_pending_irqs; | 17 | unsigned int apic_irq_work_irqs; |
18 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
19 | unsigned int irq_resched_count; | 19 | unsigned int irq_resched_count; |
20 | unsigned int irq_call_count; | 20 | unsigned int irq_call_count; |
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h index 1d5c08a1bdfd..2c392d663dce 100644 --- a/arch/x86/include/asm/hpet.h +++ b/arch/x86/include/asm/hpet.h | |||
@@ -74,10 +74,12 @@ extern void hpet_disable(void); | |||
74 | extern unsigned int hpet_readl(unsigned int a); | 74 | extern unsigned int hpet_readl(unsigned int a); |
75 | extern void force_hpet_resume(void); | 75 | extern void force_hpet_resume(void); |
76 | 76 | ||
77 | extern void hpet_msi_unmask(unsigned int irq); | 77 | struct irq_data; |
78 | extern void hpet_msi_mask(unsigned int irq); | 78 | extern void hpet_msi_unmask(struct irq_data *data); |
79 | extern void hpet_msi_write(unsigned int irq, struct msi_msg *msg); | 79 | extern void hpet_msi_mask(struct irq_data *data); |
80 | extern void hpet_msi_read(unsigned int irq, struct msi_msg *msg); | 80 | struct hpet_dev; |
81 | extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg); | ||
82 | extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg); | ||
81 | 83 | ||
82 | #ifdef CONFIG_PCI_MSI | 84 | #ifdef CONFIG_PCI_MSI |
83 | extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id); | 85 | extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id); |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 46c0fe05f230..0274ec5a7e62 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -29,7 +29,7 @@ | |||
29 | extern void apic_timer_interrupt(void); | 29 | extern void apic_timer_interrupt(void); |
30 | extern void x86_platform_ipi(void); | 30 | extern void x86_platform_ipi(void); |
31 | extern void error_interrupt(void); | 31 | extern void error_interrupt(void); |
32 | extern void perf_pending_interrupt(void); | 32 | extern void irq_work_interrupt(void); |
33 | 33 | ||
34 | extern void spurious_interrupt(void); | 34 | extern void spurious_interrupt(void); |
35 | extern void thermal_interrupt(void); | 35 | extern void thermal_interrupt(void); |
@@ -78,6 +78,13 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, | |||
78 | irq_attr->polarity = polarity; | 78 | irq_attr->polarity = polarity; |
79 | } | 79 | } |
80 | 80 | ||
81 | struct irq_2_iommu { | ||
82 | struct intel_iommu *iommu; | ||
83 | u16 irte_index; | ||
84 | u16 sub_handle; | ||
85 | u8 irte_mask; | ||
86 | }; | ||
87 | |||
81 | /* | 88 | /* |
82 | * This is performance-critical, we want to do it O(1) | 89 | * This is performance-critical, we want to do it O(1) |
83 | * | 90 | * |
@@ -89,15 +96,17 @@ struct irq_cfg { | |||
89 | cpumask_var_t old_domain; | 96 | cpumask_var_t old_domain; |
90 | u8 vector; | 97 | u8 vector; |
91 | u8 move_in_progress : 1; | 98 | u8 move_in_progress : 1; |
99 | #ifdef CONFIG_INTR_REMAP | ||
100 | struct irq_2_iommu irq_2_iommu; | ||
101 | #endif | ||
92 | }; | 102 | }; |
93 | 103 | ||
94 | extern struct irq_cfg *irq_cfg(unsigned int); | ||
95 | extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); | 104 | extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); |
96 | extern void send_cleanup_vector(struct irq_cfg *); | 105 | extern void send_cleanup_vector(struct irq_cfg *); |
97 | 106 | ||
98 | struct irq_desc; | 107 | struct irq_data; |
99 | extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *, | 108 | int __ioapic_set_affinity(struct irq_data *, const struct cpumask *, |
100 | unsigned int *dest_id); | 109 | unsigned int *dest_id); |
101 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); | 110 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); |
102 | extern void setup_ioapic_dest(void); | 111 | extern void setup_ioapic_dest(void); |
103 | 112 | ||
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index a73a8d5a5e69..4aa2bb3b242a 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -55,6 +55,12 @@ extern int save_i387_xstate_ia32(void __user *buf); | |||
55 | extern int restore_i387_xstate_ia32(void __user *buf); | 55 | extern int restore_i387_xstate_ia32(void __user *buf); |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | #ifdef CONFIG_MATH_EMULATION | ||
59 | extern void finit_soft_fpu(struct i387_soft_struct *soft); | ||
60 | #else | ||
61 | static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} | ||
62 | #endif | ||
63 | |||
58 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ | 64 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ |
59 | 65 | ||
60 | static __always_inline __pure bool use_xsaveopt(void) | 66 | static __always_inline __pure bool use_xsaveopt(void) |
@@ -67,6 +73,11 @@ static __always_inline __pure bool use_xsave(void) | |||
67 | return static_cpu_has(X86_FEATURE_XSAVE); | 73 | return static_cpu_has(X86_FEATURE_XSAVE); |
68 | } | 74 | } |
69 | 75 | ||
76 | static __always_inline __pure bool use_fxsr(void) | ||
77 | { | ||
78 | return static_cpu_has(X86_FEATURE_FXSR); | ||
79 | } | ||
80 | |||
70 | extern void __sanitize_i387_state(struct task_struct *); | 81 | extern void __sanitize_i387_state(struct task_struct *); |
71 | 82 | ||
72 | static inline void sanitize_i387_state(struct task_struct *tsk) | 83 | static inline void sanitize_i387_state(struct task_struct *tsk) |
@@ -77,19 +88,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk) | |||
77 | } | 88 | } |
78 | 89 | ||
79 | #ifdef CONFIG_X86_64 | 90 | #ifdef CONFIG_X86_64 |
80 | |||
81 | /* Ignore delayed exceptions from user space */ | ||
82 | static inline void tolerant_fwait(void) | ||
83 | { | ||
84 | asm volatile("1: fwait\n" | ||
85 | "2:\n" | ||
86 | _ASM_EXTABLE(1b, 2b)); | ||
87 | } | ||
88 | |||
89 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | 91 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) |
90 | { | 92 | { |
91 | int err; | 93 | int err; |
92 | 94 | ||
95 | /* See comment in fxsave() below. */ | ||
93 | asm volatile("1: rex64/fxrstor (%[fx])\n\t" | 96 | asm volatile("1: rex64/fxrstor (%[fx])\n\t" |
94 | "2:\n" | 97 | "2:\n" |
95 | ".section .fixup,\"ax\"\n" | 98 | ".section .fixup,\"ax\"\n" |
@@ -98,44 +101,10 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |||
98 | ".previous\n" | 101 | ".previous\n" |
99 | _ASM_EXTABLE(1b, 3b) | 102 | _ASM_EXTABLE(1b, 3b) |
100 | : [err] "=r" (err) | 103 | : [err] "=r" (err) |
101 | #if 0 /* See comment in fxsave() below. */ | 104 | : [fx] "R" (fx), "m" (*fx), "0" (0)); |
102 | : [fx] "r" (fx), "m" (*fx), "0" (0)); | ||
103 | #else | ||
104 | : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0)); | ||
105 | #endif | ||
106 | return err; | 105 | return err; |
107 | } | 106 | } |
108 | 107 | ||
109 | /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
110 | is pending. Clear the x87 state here by setting it to fixed | ||
111 | values. The kernel data segment can be sometimes 0 and sometimes | ||
112 | new user value. Both should be ok. | ||
113 | Use the PDA as safe address because it should be already in L1. */ | ||
114 | static inline void fpu_clear(struct fpu *fpu) | ||
115 | { | ||
116 | struct xsave_struct *xstate = &fpu->state->xsave; | ||
117 | struct i387_fxsave_struct *fx = &fpu->state->fxsave; | ||
118 | |||
119 | /* | ||
120 | * xsave header may indicate the init state of the FP. | ||
121 | */ | ||
122 | if (use_xsave() && | ||
123 | !(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) | ||
124 | return; | ||
125 | |||
126 | if (unlikely(fx->swd & X87_FSW_ES)) | ||
127 | asm volatile("fnclex"); | ||
128 | alternative_input(ASM_NOP8 ASM_NOP2, | ||
129 | " emms\n" /* clear stack tags */ | ||
130 | " fildl %%gs:0", /* load to clear state */ | ||
131 | X86_FEATURE_FXSAVE_LEAK); | ||
132 | } | ||
133 | |||
134 | static inline void clear_fpu_state(struct task_struct *tsk) | ||
135 | { | ||
136 | fpu_clear(&tsk->thread.fpu); | ||
137 | } | ||
138 | |||
139 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | 108 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) |
140 | { | 109 | { |
141 | int err; | 110 | int err; |
@@ -149,6 +118,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | |||
149 | if (unlikely(err)) | 118 | if (unlikely(err)) |
150 | return -EFAULT; | 119 | return -EFAULT; |
151 | 120 | ||
121 | /* See comment in fxsave() below. */ | ||
152 | asm volatile("1: rex64/fxsave (%[fx])\n\t" | 122 | asm volatile("1: rex64/fxsave (%[fx])\n\t" |
153 | "2:\n" | 123 | "2:\n" |
154 | ".section .fixup,\"ax\"\n" | 124 | ".section .fixup,\"ax\"\n" |
@@ -157,11 +127,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | |||
157 | ".previous\n" | 127 | ".previous\n" |
158 | _ASM_EXTABLE(1b, 3b) | 128 | _ASM_EXTABLE(1b, 3b) |
159 | : [err] "=r" (err), "=m" (*fx) | 129 | : [err] "=r" (err), "=m" (*fx) |
160 | #if 0 /* See comment in fxsave() below. */ | 130 | : [fx] "R" (fx), "0" (0)); |
161 | : [fx] "r" (fx), "0" (0)); | ||
162 | #else | ||
163 | : [fx] "cdaSDb" (fx), "0" (0)); | ||
164 | #endif | ||
165 | if (unlikely(err) && | 131 | if (unlikely(err) && |
166 | __clear_user(fx, sizeof(struct i387_fxsave_struct))) | 132 | __clear_user(fx, sizeof(struct i387_fxsave_struct))) |
167 | err = -EFAULT; | 133 | err = -EFAULT; |
@@ -175,56 +141,29 @@ static inline void fpu_fxsave(struct fpu *fpu) | |||
175 | uses any extended registers for addressing, a second REX prefix | 141 | uses any extended registers for addressing, a second REX prefix |
176 | will be generated (to the assembler, rex64 followed by semicolon | 142 | will be generated (to the assembler, rex64 followed by semicolon |
177 | is a separate instruction), and hence the 64-bitness is lost. */ | 143 | is a separate instruction), and hence the 64-bitness is lost. */ |
178 | #if 0 | 144 | |
145 | #ifdef CONFIG_AS_FXSAVEQ | ||
179 | /* Using "fxsaveq %0" would be the ideal choice, but is only supported | 146 | /* Using "fxsaveq %0" would be the ideal choice, but is only supported |
180 | starting with gas 2.16. */ | 147 | starting with gas 2.16. */ |
181 | __asm__ __volatile__("fxsaveq %0" | 148 | __asm__ __volatile__("fxsaveq %0" |
182 | : "=m" (fpu->state->fxsave)); | 149 | : "=m" (fpu->state->fxsave)); |
183 | #elif 0 | 150 | #else |
184 | /* Using, as a workaround, the properly prefixed form below isn't | 151 | /* Using, as a workaround, the properly prefixed form below isn't |
185 | accepted by any binutils version so far released, complaining that | 152 | accepted by any binutils version so far released, complaining that |
186 | the same type of prefix is used twice if an extended register is | 153 | the same type of prefix is used twice if an extended register is |
187 | needed for addressing (fix submitted to mainline 2005-11-21). */ | 154 | needed for addressing (fix submitted to mainline 2005-11-21). |
188 | __asm__ __volatile__("rex64/fxsave %0" | 155 | asm volatile("rex64/fxsave %0" |
189 | : "=m" (fpu->state->fxsave)); | 156 | : "=m" (fpu->state->fxsave)); |
190 | #else | 157 | This, however, we can work around by forcing the compiler to select |
191 | /* This, however, we can work around by forcing the compiler to select | ||
192 | an addressing mode that doesn't require extended registers. */ | 158 | an addressing mode that doesn't require extended registers. */ |
193 | __asm__ __volatile__("rex64/fxsave (%1)" | 159 | asm volatile("rex64/fxsave (%[fx])" |
194 | : "=m" (fpu->state->fxsave) | 160 | : "=m" (fpu->state->fxsave) |
195 | : "cdaSDb" (&fpu->state->fxsave)); | 161 | : [fx] "R" (&fpu->state->fxsave)); |
196 | #endif | 162 | #endif |
197 | } | 163 | } |
198 | 164 | ||
199 | static inline void fpu_save_init(struct fpu *fpu) | ||
200 | { | ||
201 | if (use_xsave()) | ||
202 | fpu_xsave(fpu); | ||
203 | else | ||
204 | fpu_fxsave(fpu); | ||
205 | |||
206 | fpu_clear(fpu); | ||
207 | } | ||
208 | |||
209 | static inline void __save_init_fpu(struct task_struct *tsk) | ||
210 | { | ||
211 | fpu_save_init(&tsk->thread.fpu); | ||
212 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
213 | } | ||
214 | |||
215 | #else /* CONFIG_X86_32 */ | 165 | #else /* CONFIG_X86_32 */ |
216 | 166 | ||
217 | #ifdef CONFIG_MATH_EMULATION | ||
218 | extern void finit_soft_fpu(struct i387_soft_struct *soft); | ||
219 | #else | ||
220 | static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} | ||
221 | #endif | ||
222 | |||
223 | static inline void tolerant_fwait(void) | ||
224 | { | ||
225 | asm volatile("fnclex ; fwait"); | ||
226 | } | ||
227 | |||
228 | /* perform fxrstor iff the processor has extended states, otherwise frstor */ | 167 | /* perform fxrstor iff the processor has extended states, otherwise frstor */ |
229 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | 168 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) |
230 | { | 169 | { |
@@ -241,6 +180,14 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |||
241 | return 0; | 180 | return 0; |
242 | } | 181 | } |
243 | 182 | ||
183 | static inline void fpu_fxsave(struct fpu *fpu) | ||
184 | { | ||
185 | asm volatile("fxsave %[fx]" | ||
186 | : [fx] "=m" (fpu->state->fxsave)); | ||
187 | } | ||
188 | |||
189 | #endif /* CONFIG_X86_64 */ | ||
190 | |||
244 | /* We need a safe address that is cheap to find and that is already | 191 | /* We need a safe address that is cheap to find and that is already |
245 | in L1 during context switch. The best choices are unfortunately | 192 | in L1 during context switch. The best choices are unfortunately |
246 | different for UP and SMP */ | 193 | different for UP and SMP */ |
@@ -256,47 +203,33 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |||
256 | static inline void fpu_save_init(struct fpu *fpu) | 203 | static inline void fpu_save_init(struct fpu *fpu) |
257 | { | 204 | { |
258 | if (use_xsave()) { | 205 | if (use_xsave()) { |
259 | struct xsave_struct *xstate = &fpu->state->xsave; | ||
260 | struct i387_fxsave_struct *fx = &fpu->state->fxsave; | ||
261 | |||
262 | fpu_xsave(fpu); | 206 | fpu_xsave(fpu); |
263 | 207 | ||
264 | /* | 208 | /* |
265 | * xsave header may indicate the init state of the FP. | 209 | * xsave header may indicate the init state of the FP. |
266 | */ | 210 | */ |
267 | if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) | 211 | if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) |
268 | goto end; | 212 | return; |
269 | 213 | } else if (use_fxsr()) { | |
270 | if (unlikely(fx->swd & X87_FSW_ES)) | 214 | fpu_fxsave(fpu); |
271 | asm volatile("fnclex"); | 215 | } else { |
272 | 216 | asm volatile("fsave %[fx]; fwait" | |
273 | /* | 217 | : [fx] "=m" (fpu->state->fsave)); |
274 | * we can do a simple return here or be paranoid :) | 218 | return; |
275 | */ | ||
276 | goto clear_state; | ||
277 | } | 219 | } |
278 | 220 | ||
279 | /* Use more nops than strictly needed in case the compiler | 221 | if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) |
280 | varies code */ | 222 | asm volatile("fnclex"); |
281 | alternative_input( | 223 | |
282 | "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4, | ||
283 | "fxsave %[fx]\n" | ||
284 | "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:", | ||
285 | X86_FEATURE_FXSR, | ||
286 | [fx] "m" (fpu->state->fxsave), | ||
287 | [fsw] "m" (fpu->state->fxsave.swd) : "memory"); | ||
288 | clear_state: | ||
289 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | 224 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception |
290 | is pending. Clear the x87 state here by setting it to fixed | 225 | is pending. Clear the x87 state here by setting it to fixed |
291 | values. safe_address is a random variable that should be in L1 */ | 226 | values. safe_address is a random variable that should be in L1 */ |
292 | alternative_input( | 227 | alternative_input( |
293 | GENERIC_NOP8 GENERIC_NOP2, | 228 | ASM_NOP8 ASM_NOP2, |
294 | "emms\n\t" /* clear stack tags */ | 229 | "emms\n\t" /* clear stack tags */ |
295 | "fildl %[addr]", /* set F?P to defined value */ | 230 | "fildl %P[addr]", /* set F?P to defined value */ |
296 | X86_FEATURE_FXSAVE_LEAK, | 231 | X86_FEATURE_FXSAVE_LEAK, |
297 | [addr] "m" (safe_address)); | 232 | [addr] "m" (safe_address)); |
298 | end: | ||
299 | ; | ||
300 | } | 233 | } |
301 | 234 | ||
302 | static inline void __save_init_fpu(struct task_struct *tsk) | 235 | static inline void __save_init_fpu(struct task_struct *tsk) |
@@ -305,9 +238,6 @@ static inline void __save_init_fpu(struct task_struct *tsk) | |||
305 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 238 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
306 | } | 239 | } |
307 | 240 | ||
308 | |||
309 | #endif /* CONFIG_X86_64 */ | ||
310 | |||
311 | static inline int fpu_fxrstor_checking(struct fpu *fpu) | 241 | static inline int fpu_fxrstor_checking(struct fpu *fpu) |
312 | { | 242 | { |
313 | return fxrstor_checking(&fpu->state->fxsave); | 243 | return fxrstor_checking(&fpu->state->fxsave); |
@@ -344,7 +274,10 @@ static inline void __unlazy_fpu(struct task_struct *tsk) | |||
344 | static inline void __clear_fpu(struct task_struct *tsk) | 274 | static inline void __clear_fpu(struct task_struct *tsk) |
345 | { | 275 | { |
346 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 276 | if (task_thread_info(tsk)->status & TS_USEDFPU) { |
347 | tolerant_fwait(); | 277 | /* Ignore delayed exceptions from user space */ |
278 | asm volatile("1: fwait\n" | ||
279 | "2:\n" | ||
280 | _ASM_EXTABLE(1b, 2b)); | ||
348 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 281 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
349 | stts(); | 282 | stts(); |
350 | } | 283 | } |
@@ -405,19 +338,6 @@ static inline void irq_ts_restore(int TS_state) | |||
405 | stts(); | 338 | stts(); |
406 | } | 339 | } |
407 | 340 | ||
408 | #ifdef CONFIG_X86_64 | ||
409 | |||
410 | static inline void save_init_fpu(struct task_struct *tsk) | ||
411 | { | ||
412 | __save_init_fpu(tsk); | ||
413 | stts(); | ||
414 | } | ||
415 | |||
416 | #define unlazy_fpu __unlazy_fpu | ||
417 | #define clear_fpu __clear_fpu | ||
418 | |||
419 | #else /* CONFIG_X86_32 */ | ||
420 | |||
421 | /* | 341 | /* |
422 | * These disable preemption on their own and are safe | 342 | * These disable preemption on their own and are safe |
423 | */ | 343 | */ |
@@ -443,8 +363,6 @@ static inline void clear_fpu(struct task_struct *tsk) | |||
443 | preempt_enable(); | 363 | preempt_enable(); |
444 | } | 364 | } |
445 | 365 | ||
446 | #endif /* CONFIG_X86_64 */ | ||
447 | |||
448 | /* | 366 | /* |
449 | * i387 state interaction | 367 | * i387 state interaction |
450 | */ | 368 | */ |
@@ -508,7 +426,4 @@ extern void fpu_finit(struct fpu *fpu); | |||
508 | 426 | ||
509 | #endif /* __ASSEMBLY__ */ | 427 | #endif /* __ASSEMBLY__ */ |
510 | 428 | ||
511 | #define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5 | ||
512 | #define PSHUFB_XMM5_XMM6 .byte 0x66, 0x0f, 0x38, 0x00, 0xf5 | ||
513 | |||
514 | #endif /* _ASM_X86_I387_H */ | 429 | #endif /* _ASM_X86_I387_H */ |
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h index 1655147646aa..a20365953bf8 100644 --- a/arch/x86/include/asm/i8259.h +++ b/arch/x86/include/asm/i8259.h | |||
@@ -55,6 +55,8 @@ extern struct irq_chip i8259A_chip; | |||
55 | struct legacy_pic { | 55 | struct legacy_pic { |
56 | int nr_legacy_irqs; | 56 | int nr_legacy_irqs; |
57 | struct irq_chip *chip; | 57 | struct irq_chip *chip; |
58 | void (*mask)(unsigned int irq); | ||
59 | void (*unmask)(unsigned int irq); | ||
58 | void (*mask_all)(void); | 60 | void (*mask_all)(void); |
59 | void (*restore_mask)(void); | 61 | void (*restore_mask)(void); |
60 | void (*init)(int auto_eoi); | 62 | void (*init)(int auto_eoi); |
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 30a3e9776123..6a45ec41ec26 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h | |||
@@ -206,6 +206,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) | |||
206 | 206 | ||
207 | extern void iounmap(volatile void __iomem *addr); | 207 | extern void iounmap(volatile void __iomem *addr); |
208 | 208 | ||
209 | extern void set_iounmap_nonlazy(void); | ||
209 | 210 | ||
210 | #ifdef __KERNEL__ | 211 | #ifdef __KERNEL__ |
211 | 212 | ||
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 9cb2edb87c2f..c8be4566c3d2 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h | |||
@@ -170,12 +170,6 @@ extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); | |||
170 | 170 | ||
171 | extern void probe_nr_irqs_gsi(void); | 171 | extern void probe_nr_irqs_gsi(void); |
172 | 172 | ||
173 | extern int setup_ioapic_entry(int apic, int irq, | ||
174 | struct IO_APIC_route_entry *entry, | ||
175 | unsigned int destination, int trigger, | ||
176 | int polarity, int vector, int pin); | ||
177 | extern void ioapic_write_entry(int apic, int pin, | ||
178 | struct IO_APIC_route_entry e); | ||
179 | extern void setup_ioapic_ids_from_mpc(void); | 173 | extern void setup_ioapic_ids_from_mpc(void); |
180 | 174 | ||
181 | struct mp_ioapic_gsi{ | 175 | struct mp_ioapic_gsi{ |
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h index f275e2244505..1c23360fb2d8 100644 --- a/arch/x86/include/asm/irq_remapping.h +++ b/arch/x86/include/asm/irq_remapping.h | |||
@@ -3,4 +3,39 @@ | |||
3 | 3 | ||
4 | #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) | 4 | #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) |
5 | 5 | ||
6 | #ifdef CONFIG_INTR_REMAP | ||
7 | static inline void prepare_irte(struct irte *irte, int vector, | ||
8 | unsigned int dest) | ||
9 | { | ||
10 | memset(irte, 0, sizeof(*irte)); | ||
11 | |||
12 | irte->present = 1; | ||
13 | irte->dst_mode = apic->irq_dest_mode; | ||
14 | /* | ||
15 | * Trigger mode in the IRTE will always be edge, and for IO-APIC, the | ||
16 | * actual level or edge trigger will be setup in the IO-APIC | ||
17 | * RTE. This will help simplify level triggered irq migration. | ||
18 | * For more details, see the comments (in io_apic.c) explainig IO-APIC | ||
19 | * irq migration in the presence of interrupt-remapping. | ||
20 | */ | ||
21 | irte->trigger_mode = 0; | ||
22 | irte->dlvry_mode = apic->irq_delivery_mode; | ||
23 | irte->vector = vector; | ||
24 | irte->dest_id = IRTE_DEST(dest); | ||
25 | irte->redir_hint = 1; | ||
26 | } | ||
27 | static inline bool irq_remapped(struct irq_cfg *cfg) | ||
28 | { | ||
29 | return cfg->irq_2_iommu.iommu != NULL; | ||
30 | } | ||
31 | #else | ||
32 | static void prepare_irte(struct irte *irte, int vector, unsigned int dest) | ||
33 | { | ||
34 | } | ||
35 | static inline bool irq_remapped(struct irq_cfg *cfg) | ||
36 | { | ||
37 | return false; | ||
38 | } | ||
39 | #endif | ||
40 | |||
6 | #endif /* _ASM_X86_IRQ_REMAPPING_H */ | 41 | #endif /* _ASM_X86_IRQ_REMAPPING_H */ |
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index e2ca30092557..6af0894dafb4 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h | |||
@@ -114,9 +114,9 @@ | |||
114 | #define X86_PLATFORM_IPI_VECTOR 0xed | 114 | #define X86_PLATFORM_IPI_VECTOR 0xed |
115 | 115 | ||
116 | /* | 116 | /* |
117 | * Performance monitoring pending work vector: | 117 | * IRQ work vector: |
118 | */ | 118 | */ |
119 | #define LOCAL_PENDING_VECTOR 0xec | 119 | #define IRQ_WORK_VECTOR 0xec |
120 | 120 | ||
121 | #define UV_BAU_MESSAGE 0xea | 121 | #define UV_BAU_MESSAGE 0xea |
122 | 122 | ||
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 502e53f999cf..c52e2eb40a1e 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -652,20 +652,6 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) | |||
652 | return (struct kvm_mmu_page *)page_private(page); | 652 | return (struct kvm_mmu_page *)page_private(page); |
653 | } | 653 | } |
654 | 654 | ||
655 | static inline u16 kvm_read_fs(void) | ||
656 | { | ||
657 | u16 seg; | ||
658 | asm("mov %%fs, %0" : "=g"(seg)); | ||
659 | return seg; | ||
660 | } | ||
661 | |||
662 | static inline u16 kvm_read_gs(void) | ||
663 | { | ||
664 | u16 seg; | ||
665 | asm("mov %%gs, %0" : "=g"(seg)); | ||
666 | return seg; | ||
667 | } | ||
668 | |||
669 | static inline u16 kvm_read_ldt(void) | 655 | static inline u16 kvm_read_ldt(void) |
670 | { | 656 | { |
671 | u16 ldt; | 657 | u16 ldt; |
@@ -673,16 +659,6 @@ static inline u16 kvm_read_ldt(void) | |||
673 | return ldt; | 659 | return ldt; |
674 | } | 660 | } |
675 | 661 | ||
676 | static inline void kvm_load_fs(u16 sel) | ||
677 | { | ||
678 | asm("mov %0, %%fs" : : "rm"(sel)); | ||
679 | } | ||
680 | |||
681 | static inline void kvm_load_gs(u16 sel) | ||
682 | { | ||
683 | asm("mov %0, %%gs" : : "rm"(sel)); | ||
684 | } | ||
685 | |||
686 | static inline void kvm_load_ldt(u16 sel) | 662 | static inline void kvm_load_ldt(u16 sel) |
687 | { | 663 | { |
688 | asm("lldt %0" : : "rm"(sel)); | 664 | asm("lldt %0" : : "rm"(sel)); |
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h index 16350740edf6..4a711a684b17 100644 --- a/arch/x86/include/asm/mrst.h +++ b/arch/x86/include/asm/mrst.h | |||
@@ -10,6 +10,9 @@ | |||
10 | */ | 10 | */ |
11 | #ifndef _ASM_X86_MRST_H | 11 | #ifndef _ASM_X86_MRST_H |
12 | #define _ASM_X86_MRST_H | 12 | #define _ASM_X86_MRST_H |
13 | |||
14 | #include <linux/sfi.h> | ||
15 | |||
13 | extern int pci_mrst_init(void); | 16 | extern int pci_mrst_init(void); |
14 | int __init sfi_parse_mrtc(struct sfi_table_header *table); | 17 | int __init sfi_parse_mrtc(struct sfi_table_header *table); |
15 | 18 | ||
@@ -26,7 +29,7 @@ enum mrst_cpu_type { | |||
26 | }; | 29 | }; |
27 | 30 | ||
28 | extern enum mrst_cpu_type __mrst_cpu_chip; | 31 | extern enum mrst_cpu_type __mrst_cpu_chip; |
29 | static enum mrst_cpu_type mrst_identify_cpu(void) | 32 | static inline enum mrst_cpu_type mrst_identify_cpu(void) |
30 | { | 33 | { |
31 | return __mrst_cpu_chip; | 34 | return __mrst_cpu_chip; |
32 | } | 35 | } |
@@ -42,4 +45,9 @@ extern enum mrst_timer_options mrst_timer_options; | |||
42 | #define SFI_MTMR_MAX_NUM 8 | 45 | #define SFI_MTMR_MAX_NUM 8 |
43 | #define SFI_MRTC_MAX 8 | 46 | #define SFI_MRTC_MAX 8 |
44 | 47 | ||
48 | extern struct console early_mrst_console; | ||
49 | extern void mrst_early_console_init(void); | ||
50 | |||
51 | extern struct console early_hsu_console; | ||
52 | extern void hsu_early_console_init(void); | ||
45 | #endif /* _ASM_X86_MRST_H */ | 53 | #endif /* _ASM_X86_MRST_H */ |
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h new file mode 100644 index 000000000000..bcdff997668c --- /dev/null +++ b/arch/x86/include/asm/mwait.h | |||
@@ -0,0 +1,15 @@ | |||
1 | #ifndef _ASM_X86_MWAIT_H | ||
2 | #define _ASM_X86_MWAIT_H | ||
3 | |||
4 | #define MWAIT_SUBSTATE_MASK 0xf | ||
5 | #define MWAIT_CSTATE_MASK 0xf | ||
6 | #define MWAIT_SUBSTATE_SIZE 4 | ||
7 | #define MWAIT_MAX_NUM_CSTATES 8 | ||
8 | |||
9 | #define CPUID_MWAIT_LEAF 5 | ||
10 | #define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1 | ||
11 | #define CPUID5_ECX_INTERRUPT_BREAK 0x2 | ||
12 | |||
13 | #define MWAIT_ECX_INTERRUPT_BREAK 0x1 | ||
14 | |||
15 | #endif /* _ASM_X86_MWAIT_H */ | ||
diff --git a/arch/x86/include/asm/olpc_ofw.h b/arch/x86/include/asm/olpc_ofw.h index 08fde475cb3b..2a8478140bb3 100644 --- a/arch/x86/include/asm/olpc_ofw.h +++ b/arch/x86/include/asm/olpc_ofw.h | |||
@@ -21,10 +21,14 @@ extern void olpc_ofw_detect(void); | |||
21 | /* install OFW's pde permanently into the kernel's pgtable */ | 21 | /* install OFW's pde permanently into the kernel's pgtable */ |
22 | extern void setup_olpc_ofw_pgd(void); | 22 | extern void setup_olpc_ofw_pgd(void); |
23 | 23 | ||
24 | /* check if OFW was detected during boot */ | ||
25 | extern bool olpc_ofw_present(void); | ||
26 | |||
24 | #else /* !CONFIG_OLPC_OPENFIRMWARE */ | 27 | #else /* !CONFIG_OLPC_OPENFIRMWARE */ |
25 | 28 | ||
26 | static inline void olpc_ofw_detect(void) { } | 29 | static inline void olpc_ofw_detect(void) { } |
27 | static inline void setup_olpc_ofw_pgd(void) { } | 30 | static inline void setup_olpc_ofw_pgd(void) { } |
31 | static inline bool olpc_ofw_present(void) { return false; } | ||
28 | 32 | ||
29 | #endif /* !CONFIG_OLPC_OPENFIRMWARE */ | 33 | #endif /* !CONFIG_OLPC_OPENFIRMWARE */ |
30 | 34 | ||
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index a667f24c7254..1df66211fd1b 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h | |||
@@ -8,7 +8,7 @@ | |||
8 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) | 8 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) |
9 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 9 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
10 | 10 | ||
11 | #define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1) | 11 | #define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1)) |
12 | #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) | 12 | #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) |
13 | 13 | ||
14 | /* Cast PAGE_MASK to a signed type so that it is sign-extended if | 14 | /* Cast PAGE_MASK to a signed type so that it is sign-extended if |
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 5653f43d90e5..edecb4ed2210 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -416,11 +416,6 @@ static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) | |||
416 | PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); | 416 | PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); |
417 | } | 417 | } |
418 | 418 | ||
419 | static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn, | ||
420 | unsigned long start, unsigned long count) | ||
421 | { | ||
422 | PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count); | ||
423 | } | ||
424 | static inline void paravirt_release_pmd(unsigned long pfn) | 419 | static inline void paravirt_release_pmd(unsigned long pfn) |
425 | { | 420 | { |
426 | PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); | 421 | PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index db9ef5532341..b82bac975250 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -255,7 +255,6 @@ struct pv_mmu_ops { | |||
255 | */ | 255 | */ |
256 | void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); | 256 | void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); |
257 | void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); | 257 | void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); |
258 | void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count); | ||
259 | void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); | 258 | void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); |
260 | void (*release_pte)(unsigned long pfn); | 259 | void (*release_pte)(unsigned long pfn); |
261 | void (*release_pmd)(unsigned long pfn); | 260 | void (*release_pmd)(unsigned long pfn); |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index a34c785c5a63..ada823a13c7c 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -28,6 +28,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
28 | extern spinlock_t pgd_lock; | 28 | extern spinlock_t pgd_lock; |
29 | extern struct list_head pgd_list; | 29 | extern struct list_head pgd_list; |
30 | 30 | ||
31 | extern struct mm_struct *pgd_page_get_mm(struct page *page); | ||
32 | |||
31 | #ifdef CONFIG_PARAVIRT | 33 | #ifdef CONFIG_PARAVIRT |
32 | #include <asm/paravirt.h> | 34 | #include <asm/paravirt.h> |
33 | #else /* !CONFIG_PARAVIRT */ | 35 | #else /* !CONFIG_PARAVIRT */ |
@@ -603,6 +605,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, | |||
603 | pte_update(mm, addr, ptep); | 605 | pte_update(mm, addr, ptep); |
604 | } | 606 | } |
605 | 607 | ||
608 | #define flush_tlb_fix_spurious_fault(vma, address) | ||
609 | |||
606 | /* | 610 | /* |
607 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); | 611 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); |
608 | * | 612 | * |
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 076052cd62be..f96ac9bedf75 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h | |||
@@ -102,6 +102,8 @@ static inline void native_pgd_clear(pgd_t *pgd) | |||
102 | native_set_pgd(pgd, native_make_pgd(0)); | 102 | native_set_pgd(pgd, native_make_pgd(0)); |
103 | } | 103 | } |
104 | 104 | ||
105 | extern void sync_global_pgds(unsigned long start, unsigned long end); | ||
106 | |||
105 | /* | 107 | /* |
106 | * Conversion functions: convert a page and protection to a page entry, | 108 | * Conversion functions: convert a page and protection to a page entry, |
107 | * and a page entry and page directory to the page they refer to. | 109 | * and a page entry and page directory to the page they refer to. |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 325b7bdbebaa..cae9c3cb95cf 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -110,6 +110,8 @@ struct cpuinfo_x86 { | |||
110 | u16 phys_proc_id; | 110 | u16 phys_proc_id; |
111 | /* Core id: */ | 111 | /* Core id: */ |
112 | u16 cpu_core_id; | 112 | u16 cpu_core_id; |
113 | /* Compute unit id */ | ||
114 | u8 compute_unit_id; | ||
113 | /* Index into per_cpu list: */ | 115 | /* Index into per_cpu list: */ |
114 | u16 cpu_index; | 116 | u16 cpu_index; |
115 | #endif | 117 | #endif |
@@ -602,7 +604,7 @@ extern unsigned long mmu_cr4_features; | |||
602 | 604 | ||
603 | static inline void set_in_cr4(unsigned long mask) | 605 | static inline void set_in_cr4(unsigned long mask) |
604 | { | 606 | { |
605 | unsigned cr4; | 607 | unsigned long cr4; |
606 | 608 | ||
607 | mmu_cr4_features |= mask; | 609 | mmu_cr4_features |= mask; |
608 | cr4 = read_cr4(); | 610 | cr4 = read_cr4(); |
@@ -612,7 +614,7 @@ static inline void set_in_cr4(unsigned long mask) | |||
612 | 614 | ||
613 | static inline void clear_in_cr4(unsigned long mask) | 615 | static inline void clear_in_cr4(unsigned long mask) |
614 | { | 616 | { |
615 | unsigned cr4; | 617 | unsigned long cr4; |
616 | 618 | ||
617 | mmu_cr4_features &= ~mask; | 619 | mmu_cr4_features &= ~mask; |
618 | cr4 = read_cr4(); | 620 | cr4 = read_cr4(); |
@@ -764,29 +766,6 @@ extern unsigned long idle_halt; | |||
764 | extern unsigned long idle_nomwait; | 766 | extern unsigned long idle_nomwait; |
765 | extern bool c1e_detected; | 767 | extern bool c1e_detected; |
766 | 768 | ||
767 | /* | ||
768 | * on systems with caches, caches must be flashed as the absolute | ||
769 | * last instruction before going into a suspended halt. Otherwise, | ||
770 | * dirty data can linger in the cache and become stale on resume, | ||
771 | * leading to strange errors. | ||
772 | * | ||
773 | * perform a variety of operations to guarantee that the compiler | ||
774 | * will not reorder instructions. wbinvd itself is serializing | ||
775 | * so the processor will not reorder. | ||
776 | * | ||
777 | * Systems without cache can just go into halt. | ||
778 | */ | ||
779 | static inline void wbinvd_halt(void) | ||
780 | { | ||
781 | mb(); | ||
782 | /* check for clflush to determine if wbinvd is legal */ | ||
783 | if (cpu_has_clflush) | ||
784 | asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory"); | ||
785 | else | ||
786 | while (1) | ||
787 | halt(); | ||
788 | } | ||
789 | |||
790 | extern void enable_sep_cpu(void); | 769 | extern void enable_sep_cpu(void); |
791 | extern int sysenter_setup(void); | 770 | extern int sysenter_setup(void); |
792 | 771 | ||
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index ef292c792d74..d6763b139a84 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h | |||
@@ -93,6 +93,11 @@ void *extend_brk(size_t size, size_t align); | |||
93 | : : "i" (sz)); \ | 93 | : : "i" (sz)); \ |
94 | } | 94 | } |
95 | 95 | ||
96 | /* Helper for reserving space for arrays of things */ | ||
97 | #define RESERVE_BRK_ARRAY(type, name, entries) \ | ||
98 | type *name; \ | ||
99 | RESERVE_BRK(name, sizeof(type) * entries) | ||
100 | |||
96 | #ifdef __i386__ | 101 | #ifdef __i386__ |
97 | 102 | ||
98 | void __init i386_start_kernel(void); | 103 | void __init i386_start_kernel(void); |
diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h deleted file mode 100644 index 61e08c0a2907..000000000000 --- a/arch/x86/include/asm/vmi.h +++ /dev/null | |||
@@ -1,269 +0,0 @@ | |||
1 | /* | ||
2 | * VMI interface definition | ||
3 | * | ||
4 | * Copyright (C) 2005, VMware, Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
14 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
15 | * details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
20 | * | ||
21 | * Maintained by: Zachary Amsden zach@vmware.com | ||
22 | * | ||
23 | */ | ||
24 | #include <linux/types.h> | ||
25 | |||
26 | /* | ||
27 | *--------------------------------------------------------------------- | ||
28 | * | ||
29 | * VMI Option ROM API | ||
30 | * | ||
31 | *--------------------------------------------------------------------- | ||
32 | */ | ||
33 | #define VMI_SIGNATURE 0x696d5663 /* "cVmi" */ | ||
34 | |||
35 | #define PCI_VENDOR_ID_VMWARE 0x15AD | ||
36 | #define PCI_DEVICE_ID_VMWARE_VMI 0x0801 | ||
37 | |||
38 | /* | ||
39 | * We use two version numbers for compatibility, with the major | ||
40 | * number signifying interface breakages, and the minor number | ||
41 | * interface extensions. | ||
42 | */ | ||
43 | #define VMI_API_REV_MAJOR 3 | ||
44 | #define VMI_API_REV_MINOR 0 | ||
45 | |||
46 | #define VMI_CALL_CPUID 0 | ||
47 | #define VMI_CALL_WRMSR 1 | ||
48 | #define VMI_CALL_RDMSR 2 | ||
49 | #define VMI_CALL_SetGDT 3 | ||
50 | #define VMI_CALL_SetLDT 4 | ||
51 | #define VMI_CALL_SetIDT 5 | ||
52 | #define VMI_CALL_SetTR 6 | ||
53 | #define VMI_CALL_GetGDT 7 | ||
54 | #define VMI_CALL_GetLDT 8 | ||
55 | #define VMI_CALL_GetIDT 9 | ||
56 | #define VMI_CALL_GetTR 10 | ||
57 | #define VMI_CALL_WriteGDTEntry 11 | ||
58 | #define VMI_CALL_WriteLDTEntry 12 | ||
59 | #define VMI_CALL_WriteIDTEntry 13 | ||
60 | #define VMI_CALL_UpdateKernelStack 14 | ||
61 | #define VMI_CALL_SetCR0 15 | ||
62 | #define VMI_CALL_SetCR2 16 | ||
63 | #define VMI_CALL_SetCR3 17 | ||
64 | #define VMI_CALL_SetCR4 18 | ||
65 | #define VMI_CALL_GetCR0 19 | ||
66 | #define VMI_CALL_GetCR2 20 | ||
67 | #define VMI_CALL_GetCR3 21 | ||
68 | #define VMI_CALL_GetCR4 22 | ||
69 | #define VMI_CALL_WBINVD 23 | ||
70 | #define VMI_CALL_SetDR 24 | ||
71 | #define VMI_CALL_GetDR 25 | ||
72 | #define VMI_CALL_RDPMC 26 | ||
73 | #define VMI_CALL_RDTSC 27 | ||
74 | #define VMI_CALL_CLTS 28 | ||
75 | #define VMI_CALL_EnableInterrupts 29 | ||
76 | #define VMI_CALL_DisableInterrupts 30 | ||
77 | #define VMI_CALL_GetInterruptMask 31 | ||
78 | #define VMI_CALL_SetInterruptMask 32 | ||
79 | #define VMI_CALL_IRET 33 | ||
80 | #define VMI_CALL_SYSEXIT 34 | ||
81 | #define VMI_CALL_Halt 35 | ||
82 | #define VMI_CALL_Reboot 36 | ||
83 | #define VMI_CALL_Shutdown 37 | ||
84 | #define VMI_CALL_SetPxE 38 | ||
85 | #define VMI_CALL_SetPxELong 39 | ||
86 | #define VMI_CALL_UpdatePxE 40 | ||
87 | #define VMI_CALL_UpdatePxELong 41 | ||
88 | #define VMI_CALL_MachineToPhysical 42 | ||
89 | #define VMI_CALL_PhysicalToMachine 43 | ||
90 | #define VMI_CALL_AllocatePage 44 | ||
91 | #define VMI_CALL_ReleasePage 45 | ||
92 | #define VMI_CALL_InvalPage 46 | ||
93 | #define VMI_CALL_FlushTLB 47 | ||
94 | #define VMI_CALL_SetLinearMapping 48 | ||
95 | |||
96 | #define VMI_CALL_SetIOPLMask 61 | ||
97 | #define VMI_CALL_SetInitialAPState 62 | ||
98 | #define VMI_CALL_APICWrite 63 | ||
99 | #define VMI_CALL_APICRead 64 | ||
100 | #define VMI_CALL_IODelay 65 | ||
101 | #define VMI_CALL_SetLazyMode 73 | ||
102 | |||
103 | /* | ||
104 | *--------------------------------------------------------------------- | ||
105 | * | ||
106 | * MMU operation flags | ||
107 | * | ||
108 | *--------------------------------------------------------------------- | ||
109 | */ | ||
110 | |||
111 | /* Flags used by VMI_{Allocate|Release}Page call */ | ||
112 | #define VMI_PAGE_PAE 0x10 /* Allocate PAE shadow */ | ||
113 | #define VMI_PAGE_CLONE 0x20 /* Clone from another shadow */ | ||
114 | #define VMI_PAGE_ZEROED 0x40 /* Page is pre-zeroed */ | ||
115 | |||
116 | |||
117 | /* Flags shared by Allocate|Release Page and PTE updates */ | ||
118 | #define VMI_PAGE_PT 0x01 | ||
119 | #define VMI_PAGE_PD 0x02 | ||
120 | #define VMI_PAGE_PDP 0x04 | ||
121 | #define VMI_PAGE_PML4 0x08 | ||
122 | |||
123 | #define VMI_PAGE_NORMAL 0x00 /* for debugging */ | ||
124 | |||
125 | /* Flags used by PTE updates */ | ||
126 | #define VMI_PAGE_CURRENT_AS 0x10 /* implies VMI_PAGE_VA_MASK is valid */ | ||
127 | #define VMI_PAGE_DEFER 0x20 /* may queue update until TLB inval */ | ||
128 | #define VMI_PAGE_VA_MASK 0xfffff000 | ||
129 | |||
130 | #ifdef CONFIG_X86_PAE | ||
131 | #define VMI_PAGE_L1 (VMI_PAGE_PT | VMI_PAGE_PAE | VMI_PAGE_ZEROED) | ||
132 | #define VMI_PAGE_L2 (VMI_PAGE_PD | VMI_PAGE_PAE | VMI_PAGE_ZEROED) | ||
133 | #else | ||
134 | #define VMI_PAGE_L1 (VMI_PAGE_PT | VMI_PAGE_ZEROED) | ||
135 | #define VMI_PAGE_L2 (VMI_PAGE_PD | VMI_PAGE_ZEROED) | ||
136 | #endif | ||
137 | |||
138 | /* Flags used by VMI_FlushTLB call */ | ||
139 | #define VMI_FLUSH_TLB 0x01 | ||
140 | #define VMI_FLUSH_GLOBAL 0x02 | ||
141 | |||
142 | /* | ||
143 | *--------------------------------------------------------------------- | ||
144 | * | ||
145 | * VMI relocation definitions for ROM call get_reloc | ||
146 | * | ||
147 | *--------------------------------------------------------------------- | ||
148 | */ | ||
149 | |||
150 | /* VMI Relocation types */ | ||
151 | #define VMI_RELOCATION_NONE 0 | ||
152 | #define VMI_RELOCATION_CALL_REL 1 | ||
153 | #define VMI_RELOCATION_JUMP_REL 2 | ||
154 | #define VMI_RELOCATION_NOP 3 | ||
155 | |||
156 | #ifndef __ASSEMBLY__ | ||
157 | struct vmi_relocation_info { | ||
158 | unsigned char *eip; | ||
159 | unsigned char type; | ||
160 | unsigned char reserved[3]; | ||
161 | }; | ||
162 | #endif | ||
163 | |||
164 | |||
165 | /* | ||
166 | *--------------------------------------------------------------------- | ||
167 | * | ||
168 | * Generic ROM structures and definitions | ||
169 | * | ||
170 | *--------------------------------------------------------------------- | ||
171 | */ | ||
172 | |||
173 | #ifndef __ASSEMBLY__ | ||
174 | |||
175 | struct vrom_header { | ||
176 | u16 rom_signature; /* option ROM signature */ | ||
177 | u8 rom_length; /* ROM length in 512 byte chunks */ | ||
178 | u8 rom_entry[4]; /* 16-bit code entry point */ | ||
179 | u8 rom_pad0; /* 4-byte align pad */ | ||
180 | u32 vrom_signature; /* VROM identification signature */ | ||
181 | u8 api_version_min;/* Minor version of API */ | ||
182 | u8 api_version_maj;/* Major version of API */ | ||
183 | u8 jump_slots; /* Number of jump slots */ | ||
184 | u8 reserved1; /* Reserved for expansion */ | ||
185 | u32 virtual_top; /* Hypervisor virtual address start */ | ||
186 | u16 reserved2; /* Reserved for expansion */ | ||
187 | u16 license_offs; /* Offset to License string */ | ||
188 | u16 pci_header_offs;/* Offset to PCI OPROM header */ | ||
189 | u16 pnp_header_offs;/* Offset to PnP OPROM header */ | ||
190 | u32 rom_pad3; /* PnP reserverd / VMI reserved */ | ||
191 | u8 reserved[96]; /* Reserved for headers */ | ||
192 | char vmi_init[8]; /* VMI_Init jump point */ | ||
193 | char get_reloc[8]; /* VMI_GetRelocationInfo jump point */ | ||
194 | } __attribute__((packed)); | ||
195 | |||
196 | struct pnp_header { | ||
197 | char sig[4]; | ||
198 | char rev; | ||
199 | char size; | ||
200 | short next; | ||
201 | short res; | ||
202 | long devID; | ||
203 | unsigned short manufacturer_offset; | ||
204 | unsigned short product_offset; | ||
205 | } __attribute__((packed)); | ||
206 | |||
207 | struct pci_header { | ||
208 | char sig[4]; | ||
209 | short vendorID; | ||
210 | short deviceID; | ||
211 | short vpdData; | ||
212 | short size; | ||
213 | char rev; | ||
214 | char class; | ||
215 | char subclass; | ||
216 | char interface; | ||
217 | short chunks; | ||
218 | char rom_version_min; | ||
219 | char rom_version_maj; | ||
220 | char codetype; | ||
221 | char lastRom; | ||
222 | short reserved; | ||
223 | } __attribute__((packed)); | ||
224 | |||
225 | /* Function prototypes for bootstrapping */ | ||
226 | #ifdef CONFIG_VMI | ||
227 | extern void vmi_init(void); | ||
228 | extern void vmi_activate(void); | ||
229 | extern void vmi_bringup(void); | ||
230 | #else | ||
231 | static inline void vmi_init(void) {} | ||
232 | static inline void vmi_activate(void) {} | ||
233 | static inline void vmi_bringup(void) {} | ||
234 | #endif | ||
235 | |||
236 | /* State needed to start an application processor in an SMP system. */ | ||
237 | struct vmi_ap_state { | ||
238 | u32 cr0; | ||
239 | u32 cr2; | ||
240 | u32 cr3; | ||
241 | u32 cr4; | ||
242 | |||
243 | u64 efer; | ||
244 | |||
245 | u32 eip; | ||
246 | u32 eflags; | ||
247 | u32 eax; | ||
248 | u32 ebx; | ||
249 | u32 ecx; | ||
250 | u32 edx; | ||
251 | u32 esp; | ||
252 | u32 ebp; | ||
253 | u32 esi; | ||
254 | u32 edi; | ||
255 | u16 cs; | ||
256 | u16 ss; | ||
257 | u16 ds; | ||
258 | u16 es; | ||
259 | u16 fs; | ||
260 | u16 gs; | ||
261 | u16 ldtr; | ||
262 | |||
263 | u16 gdtr_limit; | ||
264 | u32 gdtr_base; | ||
265 | u32 idtr_base; | ||
266 | u16 idtr_limit; | ||
267 | }; | ||
268 | |||
269 | #endif | ||
diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h deleted file mode 100644 index c6e0bee93e3c..000000000000 --- a/arch/x86/include/asm/vmi_time.h +++ /dev/null | |||
@@ -1,98 +0,0 @@ | |||
1 | /* | ||
2 | * VMI Time wrappers | ||
3 | * | ||
4 | * Copyright (C) 2006, VMware, Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
14 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
15 | * details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
20 | * | ||
21 | * Send feedback to dhecht@vmware.com | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #ifndef _ASM_X86_VMI_TIME_H | ||
26 | #define _ASM_X86_VMI_TIME_H | ||
27 | |||
28 | /* | ||
29 | * Raw VMI call indices for timer functions | ||
30 | */ | ||
31 | #define VMI_CALL_GetCycleFrequency 66 | ||
32 | #define VMI_CALL_GetCycleCounter 67 | ||
33 | #define VMI_CALL_SetAlarm 68 | ||
34 | #define VMI_CALL_CancelAlarm 69 | ||
35 | #define VMI_CALL_GetWallclockTime 70 | ||
36 | #define VMI_CALL_WallclockUpdated 71 | ||
37 | |||
38 | /* Cached VMI timer operations */ | ||
39 | extern struct vmi_timer_ops { | ||
40 | u64 (*get_cycle_frequency)(void); | ||
41 | u64 (*get_cycle_counter)(int); | ||
42 | u64 (*get_wallclock)(void); | ||
43 | int (*wallclock_updated)(void); | ||
44 | void (*set_alarm)(u32 flags, u64 expiry, u64 period); | ||
45 | void (*cancel_alarm)(u32 flags); | ||
46 | } vmi_timer_ops; | ||
47 | |||
48 | /* Prototypes */ | ||
49 | extern void __init vmi_time_init(void); | ||
50 | extern unsigned long vmi_get_wallclock(void); | ||
51 | extern int vmi_set_wallclock(unsigned long now); | ||
52 | extern unsigned long long vmi_sched_clock(void); | ||
53 | extern unsigned long vmi_tsc_khz(void); | ||
54 | |||
55 | #ifdef CONFIG_X86_LOCAL_APIC | ||
56 | extern void __devinit vmi_time_bsp_init(void); | ||
57 | extern void __devinit vmi_time_ap_init(void); | ||
58 | #endif | ||
59 | |||
60 | /* | ||
61 | * When run under a hypervisor, a vcpu is always in one of three states: | ||
62 | * running, halted, or ready. The vcpu is in the 'running' state if it | ||
63 | * is executing. When the vcpu executes the halt interface, the vcpu | ||
64 | * enters the 'halted' state and remains halted until there is some work | ||
65 | * pending for the vcpu (e.g. an alarm expires, host I/O completes on | ||
66 | * behalf of virtual I/O). At this point, the vcpu enters the 'ready' | ||
67 | * state (waiting for the hypervisor to reschedule it). Finally, at any | ||
68 | * time when the vcpu is not in the 'running' state nor the 'halted' | ||
69 | * state, it is in the 'ready' state. | ||
70 | * | ||
71 | * Real time is advances while the vcpu is 'running', 'ready', or | ||
72 | * 'halted'. Stolen time is the time in which the vcpu is in the | ||
73 | * 'ready' state. Available time is the remaining time -- the vcpu is | ||
74 | * either 'running' or 'halted'. | ||
75 | * | ||
76 | * All three views of time are accessible through the VMI cycle | ||
77 | * counters. | ||
78 | */ | ||
79 | |||
80 | /* The cycle counters. */ | ||
81 | #define VMI_CYCLES_REAL 0 | ||
82 | #define VMI_CYCLES_AVAILABLE 1 | ||
83 | #define VMI_CYCLES_STOLEN 2 | ||
84 | |||
85 | /* The alarm interface 'flags' bits */ | ||
86 | #define VMI_ALARM_COUNTERS 2 | ||
87 | |||
88 | #define VMI_ALARM_COUNTER_MASK 0x000000ff | ||
89 | |||
90 | #define VMI_ALARM_WIRED_IRQ0 0x00000000 | ||
91 | #define VMI_ALARM_WIRED_LVTT 0x00010000 | ||
92 | |||
93 | #define VMI_ALARM_IS_ONESHOT 0x00000000 | ||
94 | #define VMI_ALARM_IS_PERIODIC 0x00000100 | ||
95 | |||
96 | #define CONFIG_VMI_ALARM_HZ 100 | ||
97 | |||
98 | #endif /* _ASM_X86_VMI_TIME_H */ | ||
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 9d3f485e5dd0..80a93dc99076 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -35,6 +35,7 @@ obj-y := process_$(BITS).o signal.o entry_$(BITS).o | |||
35 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o | 35 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o |
36 | obj-y += time.o ioport.o ldt.o dumpstack.o | 36 | obj-y += time.o ioport.o ldt.o dumpstack.o |
37 | obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o | 37 | obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o |
38 | obj-$(CONFIG_IRQ_WORK) += irq_work.o | ||
38 | obj-$(CONFIG_X86_VISWS) += visws_quirks.o | 39 | obj-$(CONFIG_X86_VISWS) += visws_quirks.o |
39 | obj-$(CONFIG_X86_32) += probe_roms_32.o | 40 | obj-$(CONFIG_X86_32) += probe_roms_32.o |
40 | obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o | 41 | obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o |
@@ -85,15 +86,15 @@ obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o | |||
85 | obj-$(CONFIG_KGDB) += kgdb.o | 86 | obj-$(CONFIG_KGDB) += kgdb.o |
86 | obj-$(CONFIG_VM86) += vm86_32.o | 87 | obj-$(CONFIG_VM86) += vm86_32.o |
87 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 88 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
89 | obj-$(CONFIG_EARLY_PRINTK_MRST) += early_printk_mrst.o | ||
88 | 90 | ||
89 | obj-$(CONFIG_HPET_TIMER) += hpet.o | 91 | obj-$(CONFIG_HPET_TIMER) += hpet.o |
90 | obj-$(CONFIG_APB_TIMER) += apb_timer.o | 92 | obj-$(CONFIG_APB_TIMER) += apb_timer.o |
91 | 93 | ||
92 | obj-$(CONFIG_K8_NB) += k8.o | 94 | obj-$(CONFIG_AMD_NB) += amd_nb.o |
93 | obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o | 95 | obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o |
94 | obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o | 96 | obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o |
95 | 97 | ||
96 | obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o | ||
97 | obj-$(CONFIG_KVM_GUEST) += kvm.o | 98 | obj-$(CONFIG_KVM_GUEST) += kvm.o |
98 | obj-$(CONFIG_KVM_CLOCK) += kvmclock.o | 99 | obj-$(CONFIG_KVM_CLOCK) += kvmclock.o |
99 | obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o | 100 | obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o |
@@ -106,6 +107,7 @@ obj-$(CONFIG_SCx200) += scx200.o | |||
106 | scx200-y += scx200_32.o | 107 | scx200-y += scx200_32.o |
107 | 108 | ||
108 | obj-$(CONFIG_OLPC) += olpc.o | 109 | obj-$(CONFIG_OLPC) += olpc.o |
110 | obj-$(CONFIG_OLPC_XO1) += olpc-xo1.o | ||
109 | obj-$(CONFIG_OLPC_OPENFIRMWARE) += olpc_ofw.o | 111 | obj-$(CONFIG_OLPC_OPENFIRMWARE) += olpc_ofw.o |
110 | obj-$(CONFIG_X86_MRST) += mrst.o | 112 | obj-$(CONFIG_X86_MRST) += mrst.o |
111 | 113 | ||
@@ -122,7 +124,6 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o | |||
122 | # 64 bit specific files | 124 | # 64 bit specific files |
123 | ifeq ($(CONFIG_X86_64),y) | 125 | ifeq ($(CONFIG_X86_64),y) |
124 | obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o | 126 | obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o |
125 | obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o | ||
126 | obj-$(CONFIG_AUDIT) += audit_64.o | 127 | obj-$(CONFIG_AUDIT) += audit_64.o |
127 | 128 | ||
128 | obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o | 129 | obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o |
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index fb16f17e59be..5812404a0d4c 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <acpi/processor.h> | 14 | #include <acpi/processor.h> |
15 | #include <asm/acpi.h> | 15 | #include <asm/acpi.h> |
16 | #include <asm/mwait.h> | ||
16 | 17 | ||
17 | /* | 18 | /* |
18 | * Initialize bm_flags based on the CPU cache properties | 19 | * Initialize bm_flags based on the CPU cache properties |
@@ -65,16 +66,6 @@ static struct cstate_entry __percpu *cpu_cstate_entry; /* per CPU ptr */ | |||
65 | 66 | ||
66 | static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; | 67 | static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; |
67 | 68 | ||
68 | #define MWAIT_SUBSTATE_MASK (0xf) | ||
69 | #define MWAIT_CSTATE_MASK (0xf) | ||
70 | #define MWAIT_SUBSTATE_SIZE (4) | ||
71 | |||
72 | #define CPUID_MWAIT_LEAF (5) | ||
73 | #define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) | ||
74 | #define CPUID5_ECX_INTERRUPT_BREAK (0x2) | ||
75 | |||
76 | #define MWAIT_ECX_INTERRUPT_BREAK (0x1) | ||
77 | |||
78 | #define NATIVE_CSTATE_BEYOND_HALT (2) | 69 | #define NATIVE_CSTATE_BEYOND_HALT (2) |
79 | 70 | ||
80 | static long acpi_processor_ffh_cstate_probe_cpu(void *_cx) | 71 | static long acpi_processor_ffh_cstate_probe_cpu(void *_cx) |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 679b6450382b..d2fdb0826df2 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
4 | * Leo Duran <leo.duran@amd.com> | 4 | * Leo Duran <leo.duran@amd.com> |
5 | * | 5 | * |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 5a170cbbbed8..3cb482e123de 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
4 | * Leo Duran <leo.duran@amd.com> | 4 | * Leo Duran <leo.duran@amd.com> |
5 | * | 5 | * |
@@ -194,6 +194,39 @@ static inline unsigned long tbl_size(int entry_size) | |||
194 | return 1UL << shift; | 194 | return 1UL << shift; |
195 | } | 195 | } |
196 | 196 | ||
197 | /* Access to l1 and l2 indexed register spaces */ | ||
198 | |||
199 | static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) | ||
200 | { | ||
201 | u32 val; | ||
202 | |||
203 | pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); | ||
204 | pci_read_config_dword(iommu->dev, 0xfc, &val); | ||
205 | return val; | ||
206 | } | ||
207 | |||
208 | static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) | ||
209 | { | ||
210 | pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); | ||
211 | pci_write_config_dword(iommu->dev, 0xfc, val); | ||
212 | pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); | ||
213 | } | ||
214 | |||
215 | static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) | ||
216 | { | ||
217 | u32 val; | ||
218 | |||
219 | pci_write_config_dword(iommu->dev, 0xf0, address); | ||
220 | pci_read_config_dword(iommu->dev, 0xf4, &val); | ||
221 | return val; | ||
222 | } | ||
223 | |||
224 | static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) | ||
225 | { | ||
226 | pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); | ||
227 | pci_write_config_dword(iommu->dev, 0xf4, val); | ||
228 | } | ||
229 | |||
197 | /**************************************************************************** | 230 | /**************************************************************************** |
198 | * | 231 | * |
199 | * AMD IOMMU MMIO register space handling functions | 232 | * AMD IOMMU MMIO register space handling functions |
@@ -619,6 +652,7 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu) | |||
619 | { | 652 | { |
620 | int cap_ptr = iommu->cap_ptr; | 653 | int cap_ptr = iommu->cap_ptr; |
621 | u32 range, misc; | 654 | u32 range, misc; |
655 | int i, j; | ||
622 | 656 | ||
623 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, | 657 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, |
624 | &iommu->cap); | 658 | &iommu->cap); |
@@ -633,12 +667,29 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu) | |||
633 | MMIO_GET_LD(range)); | 667 | MMIO_GET_LD(range)); |
634 | iommu->evt_msi_num = MMIO_MSI_NUM(misc); | 668 | iommu->evt_msi_num = MMIO_MSI_NUM(misc); |
635 | 669 | ||
636 | if (is_rd890_iommu(iommu->dev)) { | 670 | if (!is_rd890_iommu(iommu->dev)) |
637 | pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]); | 671 | return; |
638 | pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]); | 672 | |
639 | pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]); | 673 | /* |
640 | pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]); | 674 | * Some rd890 systems may not be fully reconfigured by the BIOS, so |
641 | } | 675 | * it's necessary for us to store this information so it can be |
676 | * reprogrammed on resume | ||
677 | */ | ||
678 | |||
679 | pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, | ||
680 | &iommu->stored_addr_lo); | ||
681 | pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, | ||
682 | &iommu->stored_addr_hi); | ||
683 | |||
684 | /* Low bit locks writes to configuration space */ | ||
685 | iommu->stored_addr_lo &= ~1; | ||
686 | |||
687 | for (i = 0; i < 6; i++) | ||
688 | for (j = 0; j < 0x12; j++) | ||
689 | iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); | ||
690 | |||
691 | for (i = 0; i < 0x83; i++) | ||
692 | iommu->stored_l2[i] = iommu_read_l2(iommu, i); | ||
642 | } | 693 | } |
643 | 694 | ||
644 | /* | 695 | /* |
@@ -1127,14 +1178,53 @@ static void iommu_init_flags(struct amd_iommu *iommu) | |||
1127 | iommu_feature_enable(iommu, CONTROL_COHERENT_EN); | 1178 | iommu_feature_enable(iommu, CONTROL_COHERENT_EN); |
1128 | } | 1179 | } |
1129 | 1180 | ||
1130 | static void iommu_apply_quirks(struct amd_iommu *iommu) | 1181 | static void iommu_apply_resume_quirks(struct amd_iommu *iommu) |
1131 | { | 1182 | { |
1132 | if (is_rd890_iommu(iommu->dev)) { | 1183 | int i, j; |
1133 | pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]); | 1184 | u32 ioc_feature_control; |
1134 | pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]); | 1185 | struct pci_dev *pdev = NULL; |
1135 | pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]); | 1186 | |
1136 | pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]); | 1187 | /* RD890 BIOSes may not have completely reconfigured the iommu */ |
1137 | } | 1188 | if (!is_rd890_iommu(iommu->dev)) |
1189 | return; | ||
1190 | |||
1191 | /* | ||
1192 | * First, we need to ensure that the iommu is enabled. This is | ||
1193 | * controlled by a register in the northbridge | ||
1194 | */ | ||
1195 | pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0)); | ||
1196 | |||
1197 | if (!pdev) | ||
1198 | return; | ||
1199 | |||
1200 | /* Select Northbridge indirect register 0x75 and enable writing */ | ||
1201 | pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); | ||
1202 | pci_read_config_dword(pdev, 0x64, &ioc_feature_control); | ||
1203 | |||
1204 | /* Enable the iommu */ | ||
1205 | if (!(ioc_feature_control & 0x1)) | ||
1206 | pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); | ||
1207 | |||
1208 | pci_dev_put(pdev); | ||
1209 | |||
1210 | /* Restore the iommu BAR */ | ||
1211 | pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, | ||
1212 | iommu->stored_addr_lo); | ||
1213 | pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, | ||
1214 | iommu->stored_addr_hi); | ||
1215 | |||
1216 | /* Restore the l1 indirect regs for each of the 6 l1s */ | ||
1217 | for (i = 0; i < 6; i++) | ||
1218 | for (j = 0; j < 0x12; j++) | ||
1219 | iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); | ||
1220 | |||
1221 | /* Restore the l2 indirect regs */ | ||
1222 | for (i = 0; i < 0x83; i++) | ||
1223 | iommu_write_l2(iommu, i, iommu->stored_l2[i]); | ||
1224 | |||
1225 | /* Lock PCI setup registers */ | ||
1226 | pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, | ||
1227 | iommu->stored_addr_lo | 1); | ||
1138 | } | 1228 | } |
1139 | 1229 | ||
1140 | /* | 1230 | /* |
@@ -1147,7 +1237,6 @@ static void enable_iommus(void) | |||
1147 | 1237 | ||
1148 | for_each_iommu(iommu) { | 1238 | for_each_iommu(iommu) { |
1149 | iommu_disable(iommu); | 1239 | iommu_disable(iommu); |
1150 | iommu_apply_quirks(iommu); | ||
1151 | iommu_init_flags(iommu); | 1240 | iommu_init_flags(iommu); |
1152 | iommu_set_device_table(iommu); | 1241 | iommu_set_device_table(iommu); |
1153 | iommu_enable_command_buffer(iommu); | 1242 | iommu_enable_command_buffer(iommu); |
@@ -1173,6 +1262,11 @@ static void disable_iommus(void) | |||
1173 | 1262 | ||
1174 | static int amd_iommu_resume(struct sys_device *dev) | 1263 | static int amd_iommu_resume(struct sys_device *dev) |
1175 | { | 1264 | { |
1265 | struct amd_iommu *iommu; | ||
1266 | |||
1267 | for_each_iommu(iommu) | ||
1268 | iommu_apply_resume_quirks(iommu); | ||
1269 | |||
1176 | /* re-load the hardware */ | 1270 | /* re-load the hardware */ |
1177 | enable_iommus(); | 1271 | enable_iommus(); |
1178 | 1272 | ||
diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/amd_nb.c index 0f7bc20cfcde..8f6463d8ed0d 100644 --- a/arch/x86/kernel/k8.c +++ b/arch/x86/kernel/amd_nb.c | |||
@@ -8,21 +8,19 @@ | |||
8 | #include <linux/errno.h> | 8 | #include <linux/errno.h> |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/spinlock.h> | 10 | #include <linux/spinlock.h> |
11 | #include <asm/k8.h> | 11 | #include <asm/amd_nb.h> |
12 | |||
13 | int num_k8_northbridges; | ||
14 | EXPORT_SYMBOL(num_k8_northbridges); | ||
15 | 12 | ||
16 | static u32 *flush_words; | 13 | static u32 *flush_words; |
17 | 14 | ||
18 | struct pci_device_id k8_nb_ids[] = { | 15 | struct pci_device_id k8_nb_ids[] = { |
19 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, | 16 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, |
20 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, | 17 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, |
18 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, | ||
21 | {} | 19 | {} |
22 | }; | 20 | }; |
23 | EXPORT_SYMBOL(k8_nb_ids); | 21 | EXPORT_SYMBOL(k8_nb_ids); |
24 | 22 | ||
25 | struct pci_dev **k8_northbridges; | 23 | struct k8_northbridge_info k8_northbridges; |
26 | EXPORT_SYMBOL(k8_northbridges); | 24 | EXPORT_SYMBOL(k8_northbridges); |
27 | 25 | ||
28 | static struct pci_dev *next_k8_northbridge(struct pci_dev *dev) | 26 | static struct pci_dev *next_k8_northbridge(struct pci_dev *dev) |
@@ -40,36 +38,45 @@ int cache_k8_northbridges(void) | |||
40 | int i; | 38 | int i; |
41 | struct pci_dev *dev; | 39 | struct pci_dev *dev; |
42 | 40 | ||
43 | if (num_k8_northbridges) | 41 | if (k8_northbridges.num) |
44 | return 0; | 42 | return 0; |
45 | 43 | ||
46 | dev = NULL; | 44 | dev = NULL; |
47 | while ((dev = next_k8_northbridge(dev)) != NULL) | 45 | while ((dev = next_k8_northbridge(dev)) != NULL) |
48 | num_k8_northbridges++; | 46 | k8_northbridges.num++; |
47 | |||
48 | /* some CPU families (e.g. family 0x11) do not support GART */ | ||
49 | if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || | ||
50 | boot_cpu_data.x86 == 0x15) | ||
51 | k8_northbridges.gart_supported = 1; | ||
49 | 52 | ||
50 | k8_northbridges = kmalloc((num_k8_northbridges + 1) * sizeof(void *), | 53 | k8_northbridges.nb_misc = kmalloc((k8_northbridges.num + 1) * |
51 | GFP_KERNEL); | 54 | sizeof(void *), GFP_KERNEL); |
52 | if (!k8_northbridges) | 55 | if (!k8_northbridges.nb_misc) |
53 | return -ENOMEM; | 56 | return -ENOMEM; |
54 | 57 | ||
55 | if (!num_k8_northbridges) { | 58 | if (!k8_northbridges.num) { |
56 | k8_northbridges[0] = NULL; | 59 | k8_northbridges.nb_misc[0] = NULL; |
57 | return 0; | 60 | return 0; |
58 | } | 61 | } |
59 | 62 | ||
60 | flush_words = kmalloc(num_k8_northbridges * sizeof(u32), GFP_KERNEL); | 63 | if (k8_northbridges.gart_supported) { |
61 | if (!flush_words) { | 64 | flush_words = kmalloc(k8_northbridges.num * sizeof(u32), |
62 | kfree(k8_northbridges); | 65 | GFP_KERNEL); |
63 | return -ENOMEM; | 66 | if (!flush_words) { |
67 | kfree(k8_northbridges.nb_misc); | ||
68 | return -ENOMEM; | ||
69 | } | ||
64 | } | 70 | } |
65 | 71 | ||
66 | dev = NULL; | 72 | dev = NULL; |
67 | i = 0; | 73 | i = 0; |
68 | while ((dev = next_k8_northbridge(dev)) != NULL) { | 74 | while ((dev = next_k8_northbridge(dev)) != NULL) { |
69 | k8_northbridges[i] = dev; | 75 | k8_northbridges.nb_misc[i] = dev; |
70 | pci_read_config_dword(dev, 0x9c, &flush_words[i++]); | 76 | if (k8_northbridges.gart_supported) |
77 | pci_read_config_dword(dev, 0x9c, &flush_words[i++]); | ||
71 | } | 78 | } |
72 | k8_northbridges[i] = NULL; | 79 | k8_northbridges.nb_misc[i] = NULL; |
73 | return 0; | 80 | return 0; |
74 | } | 81 | } |
75 | EXPORT_SYMBOL_GPL(cache_k8_northbridges); | 82 | EXPORT_SYMBOL_GPL(cache_k8_northbridges); |
@@ -93,22 +100,25 @@ void k8_flush_garts(void) | |||
93 | unsigned long flags; | 100 | unsigned long flags; |
94 | static DEFINE_SPINLOCK(gart_lock); | 101 | static DEFINE_SPINLOCK(gart_lock); |
95 | 102 | ||
103 | if (!k8_northbridges.gart_supported) | ||
104 | return; | ||
105 | |||
96 | /* Avoid races between AGP and IOMMU. In theory it's not needed | 106 | /* Avoid races between AGP and IOMMU. In theory it's not needed |
97 | but I'm not sure if the hardware won't lose flush requests | 107 | but I'm not sure if the hardware won't lose flush requests |
98 | when another is pending. This whole thing is so expensive anyways | 108 | when another is pending. This whole thing is so expensive anyways |
99 | that it doesn't matter to serialize more. -AK */ | 109 | that it doesn't matter to serialize more. -AK */ |
100 | spin_lock_irqsave(&gart_lock, flags); | 110 | spin_lock_irqsave(&gart_lock, flags); |
101 | flushed = 0; | 111 | flushed = 0; |
102 | for (i = 0; i < num_k8_northbridges; i++) { | 112 | for (i = 0; i < k8_northbridges.num; i++) { |
103 | pci_write_config_dword(k8_northbridges[i], 0x9c, | 113 | pci_write_config_dword(k8_northbridges.nb_misc[i], 0x9c, |
104 | flush_words[i]|1); | 114 | flush_words[i]|1); |
105 | flushed++; | 115 | flushed++; |
106 | } | 116 | } |
107 | for (i = 0; i < num_k8_northbridges; i++) { | 117 | for (i = 0; i < k8_northbridges.num; i++) { |
108 | u32 w; | 118 | u32 w; |
109 | /* Make sure the hardware actually executed the flush*/ | 119 | /* Make sure the hardware actually executed the flush*/ |
110 | for (;;) { | 120 | for (;;) { |
111 | pci_read_config_dword(k8_northbridges[i], | 121 | pci_read_config_dword(k8_northbridges.nb_misc[i], |
112 | 0x9c, &w); | 122 | 0x9c, &w); |
113 | if (!(w & 1)) | 123 | if (!(w & 1)) |
114 | break; | 124 | break; |
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 8dd77800ff5d..92543c73cf8e 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c | |||
@@ -231,34 +231,6 @@ static void apbt_restart_clocksource(struct clocksource *cs) | |||
231 | apbt_start_counter(phy_cs_timer_id); | 231 | apbt_start_counter(phy_cs_timer_id); |
232 | } | 232 | } |
233 | 233 | ||
234 | /* Setup IRQ routing via IOAPIC */ | ||
235 | #ifdef CONFIG_SMP | ||
236 | static void apbt_setup_irq(struct apbt_dev *adev) | ||
237 | { | ||
238 | struct irq_chip *chip; | ||
239 | struct irq_desc *desc; | ||
240 | |||
241 | /* timer0 irq has been setup early */ | ||
242 | if (adev->irq == 0) | ||
243 | return; | ||
244 | desc = irq_to_desc(adev->irq); | ||
245 | chip = get_irq_chip(adev->irq); | ||
246 | disable_irq(adev->irq); | ||
247 | desc->status |= IRQ_MOVE_PCNTXT; | ||
248 | irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); | ||
249 | /* APB timer irqs are set up as mp_irqs, timer is edge triggerred */ | ||
250 | set_irq_chip_and_handler_name(adev->irq, chip, handle_edge_irq, "edge"); | ||
251 | enable_irq(adev->irq); | ||
252 | if (system_state == SYSTEM_BOOTING) | ||
253 | if (request_irq(adev->irq, apbt_interrupt_handler, | ||
254 | IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, | ||
255 | adev->name, adev)) { | ||
256 | printk(KERN_ERR "Failed request IRQ for APBT%d\n", | ||
257 | adev->num); | ||
258 | } | ||
259 | } | ||
260 | #endif | ||
261 | |||
262 | static void apbt_enable_int(int n) | 234 | static void apbt_enable_int(int n) |
263 | { | 235 | { |
264 | unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); | 236 | unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); |
@@ -334,6 +306,27 @@ static int __init apbt_clockevent_register(void) | |||
334 | } | 306 | } |
335 | 307 | ||
336 | #ifdef CONFIG_SMP | 308 | #ifdef CONFIG_SMP |
309 | |||
310 | static void apbt_setup_irq(struct apbt_dev *adev) | ||
311 | { | ||
312 | /* timer0 irq has been setup early */ | ||
313 | if (adev->irq == 0) | ||
314 | return; | ||
315 | |||
316 | if (system_state == SYSTEM_BOOTING) { | ||
317 | irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); | ||
318 | /* APB timer irqs are set up as mp_irqs, timer is edge type */ | ||
319 | __set_irq_handler(adev->irq, handle_edge_irq, 0, "edge"); | ||
320 | if (request_irq(adev->irq, apbt_interrupt_handler, | ||
321 | IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, | ||
322 | adev->name, adev)) { | ||
323 | printk(KERN_ERR "Failed request IRQ for APBT%d\n", | ||
324 | adev->num); | ||
325 | } | ||
326 | } else | ||
327 | enable_irq(adev->irq); | ||
328 | } | ||
329 | |||
337 | /* Should be called with per cpu */ | 330 | /* Should be called with per cpu */ |
338 | void apbt_setup_secondary_clock(void) | 331 | void apbt_setup_secondary_clock(void) |
339 | { | 332 | { |
@@ -343,7 +336,7 @@ void apbt_setup_secondary_clock(void) | |||
343 | 336 | ||
344 | /* Don't register boot CPU clockevent */ | 337 | /* Don't register boot CPU clockevent */ |
345 | cpu = smp_processor_id(); | 338 | cpu = smp_processor_id(); |
346 | if (cpu == boot_cpu_id) | 339 | if (!cpu) |
347 | return; | 340 | return; |
348 | /* | 341 | /* |
349 | * We need to calculate the scaled math multiplication factor for | 342 | * We need to calculate the scaled math multiplication factor for |
@@ -389,16 +382,17 @@ static int apbt_cpuhp_notify(struct notifier_block *n, | |||
389 | 382 | ||
390 | switch (action & 0xf) { | 383 | switch (action & 0xf) { |
391 | case CPU_DEAD: | 384 | case CPU_DEAD: |
385 | disable_irq(adev->irq); | ||
392 | apbt_disable_int(cpu); | 386 | apbt_disable_int(cpu); |
393 | if (system_state == SYSTEM_RUNNING) | 387 | if (system_state == SYSTEM_RUNNING) { |
394 | pr_debug("skipping APBT CPU %lu offline\n", cpu); | 388 | pr_debug("skipping APBT CPU %lu offline\n", cpu); |
395 | else if (adev) { | 389 | } else if (adev) { |
396 | pr_debug("APBT clockevent for cpu %lu offline\n", cpu); | 390 | pr_debug("APBT clockevent for cpu %lu offline\n", cpu); |
397 | free_irq(adev->irq, adev); | 391 | free_irq(adev->irq, adev); |
398 | } | 392 | } |
399 | break; | 393 | break; |
400 | default: | 394 | default: |
401 | pr_debug(KERN_INFO "APBT notified %lu, no action\n", action); | 395 | pr_debug("APBT notified %lu, no action\n", action); |
402 | } | 396 | } |
403 | return NOTIFY_OK; | 397 | return NOTIFY_OK; |
404 | } | 398 | } |
@@ -552,7 +546,7 @@ bad_count: | |||
552 | pr_debug("APB CS going back %lx:%lx:%lx ", | 546 | pr_debug("APB CS going back %lx:%lx:%lx ", |
553 | t2, last_read, t2 - last_read); | 547 | t2, last_read, t2 - last_read); |
554 | bad_count_x3: | 548 | bad_count_x3: |
555 | pr_debug(KERN_INFO "tripple check enforced\n"); | 549 | pr_debug("triple check enforced\n"); |
556 | t0 = apbt_readl(phy_cs_timer_id, | 550 | t0 = apbt_readl(phy_cs_timer_id, |
557 | APBTMR_N_CURRENT_VALUE); | 551 | APBTMR_N_CURRENT_VALUE); |
558 | udelay(1); | 552 | udelay(1); |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index a2e0caf26e17..377f5db3b8b4 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include <asm/gart.h> | 27 | #include <asm/gart.h> |
28 | #include <asm/pci-direct.h> | 28 | #include <asm/pci-direct.h> |
29 | #include <asm/dma.h> | 29 | #include <asm/dma.h> |
30 | #include <asm/k8.h> | 30 | #include <asm/amd_nb.h> |
31 | #include <asm/x86_init.h> | 31 | #include <asm/x86_init.h> |
32 | 32 | ||
33 | int gart_iommu_aperture; | 33 | int gart_iommu_aperture; |
@@ -307,7 +307,7 @@ void __init early_gart_iommu_check(void) | |||
307 | continue; | 307 | continue; |
308 | 308 | ||
309 | ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); | 309 | ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); |
310 | aper_enabled = ctl & AMD64_GARTEN; | 310 | aper_enabled = ctl & GARTEN; |
311 | aper_order = (ctl >> 1) & 7; | 311 | aper_order = (ctl >> 1) & 7; |
312 | aper_size = (32 * 1024 * 1024) << aper_order; | 312 | aper_size = (32 * 1024 * 1024) << aper_order; |
313 | aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; | 313 | aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; |
@@ -362,7 +362,7 @@ void __init early_gart_iommu_check(void) | |||
362 | continue; | 362 | continue; |
363 | 363 | ||
364 | ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); | 364 | ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); |
365 | ctl &= ~AMD64_GARTEN; | 365 | ctl &= ~GARTEN; |
366 | write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); | 366 | write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); |
367 | } | 367 | } |
368 | } | 368 | } |
@@ -505,8 +505,13 @@ out: | |||
505 | 505 | ||
506 | /* Fix up the north bridges */ | 506 | /* Fix up the north bridges */ |
507 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { | 507 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { |
508 | int bus; | 508 | int bus, dev_base, dev_limit; |
509 | int dev_base, dev_limit; | 509 | |
510 | /* | ||
511 | * Don't enable translation yet but enable GART IO and CPU | ||
512 | * accesses and set DISTLBWALKPRB since GART table memory is UC. | ||
513 | */ | ||
514 | u32 ctl = DISTLBWALKPRB | aper_order << 1; | ||
510 | 515 | ||
511 | bus = bus_dev_ranges[i].bus; | 516 | bus = bus_dev_ranges[i].bus; |
512 | dev_base = bus_dev_ranges[i].dev_base; | 517 | dev_base = bus_dev_ranges[i].dev_base; |
@@ -515,10 +520,7 @@ out: | |||
515 | if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) | 520 | if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) |
516 | continue; | 521 | continue; |
517 | 522 | ||
518 | /* Don't enable translation yet. That is done later. | 523 | write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); |
519 | Assume this BIOS didn't initialise the GART so | ||
520 | just overwrite all previous bits */ | ||
521 | write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, aper_order << 1); | ||
522 | write_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE, aper_alloc >> 25); | 524 | write_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE, aper_alloc >> 25); |
523 | } | 525 | } |
524 | } | 526 | } |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index e3b534cda49a..850657d1b0ed 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <asm/mce.h> | 52 | #include <asm/mce.h> |
53 | #include <asm/kvm_para.h> | 53 | #include <asm/kvm_para.h> |
54 | #include <asm/tsc.h> | 54 | #include <asm/tsc.h> |
55 | #include <asm/atomic.h> | ||
55 | 56 | ||
56 | unsigned int num_processors; | 57 | unsigned int num_processors; |
57 | 58 | ||
@@ -370,38 +371,87 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) | |||
370 | } | 371 | } |
371 | 372 | ||
372 | /* | 373 | /* |
373 | * Setup extended LVT, AMD specific (K8, family 10h) | 374 | * Setup extended LVT, AMD specific |
374 | * | 375 | * |
375 | * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and | 376 | * Software should use the LVT offsets the BIOS provides. The offsets |
376 | * MCE interrupts are supported. Thus MCE offset must be set to 0. | 377 | * are determined by the subsystems using it like those for MCE |
378 | * threshold or IBS. On K8 only offset 0 (APIC500) and MCE interrupts | ||
379 | * are supported. Beginning with family 10h at least 4 offsets are | ||
380 | * available. | ||
377 | * | 381 | * |
378 | * If mask=1, the LVT entry does not generate interrupts while mask=0 | 382 | * Since the offsets must be consistent for all cores, we keep track |
379 | * enables the vector. See also the BKDGs. | 383 | * of the LVT offsets in software and reserve the offset for the same |
384 | * vector also to be used on other cores. An offset is freed by | ||
385 | * setting the entry to APIC_EILVT_MASKED. | ||
386 | * | ||
387 | * If the BIOS is right, there should be no conflicts. Otherwise a | ||
388 | * "[Firmware Bug]: ..." error message is generated. However, if | ||
389 | * software does not properly determines the offsets, it is not | ||
390 | * necessarily a BIOS bug. | ||
380 | */ | 391 | */ |
381 | 392 | ||
382 | #define APIC_EILVT_LVTOFF_MCE 0 | 393 | static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX]; |
383 | #define APIC_EILVT_LVTOFF_IBS 1 | ||
384 | 394 | ||
385 | static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask) | 395 | static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new) |
386 | { | 396 | { |
387 | unsigned long reg = (lvt_off << 4) + APIC_EILVTn(0); | 397 | return (old & APIC_EILVT_MASKED) |
388 | unsigned int v = (mask << 16) | (msg_type << 8) | vector; | 398 | || (new == APIC_EILVT_MASKED) |
389 | 399 | || ((new & ~APIC_EILVT_MASKED) == old); | |
390 | apic_write(reg, v); | ||
391 | } | 400 | } |
392 | 401 | ||
393 | u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask) | 402 | static unsigned int reserve_eilvt_offset(int offset, unsigned int new) |
394 | { | 403 | { |
395 | setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask); | 404 | unsigned int rsvd; /* 0: uninitialized */ |
396 | return APIC_EILVT_LVTOFF_MCE; | 405 | |
406 | if (offset >= APIC_EILVT_NR_MAX) | ||
407 | return ~0; | ||
408 | |||
409 | rsvd = atomic_read(&eilvt_offsets[offset]) & ~APIC_EILVT_MASKED; | ||
410 | do { | ||
411 | if (rsvd && | ||
412 | !eilvt_entry_is_changeable(rsvd, new)) | ||
413 | /* may not change if vectors are different */ | ||
414 | return rsvd; | ||
415 | rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new); | ||
416 | } while (rsvd != new); | ||
417 | |||
418 | return new; | ||
397 | } | 419 | } |
398 | 420 | ||
399 | u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask) | 421 | /* |
422 | * If mask=1, the LVT entry does not generate interrupts while mask=0 | ||
423 | * enables the vector. See also the BKDGs. | ||
424 | */ | ||
425 | |||
426 | int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask) | ||
400 | { | 427 | { |
401 | setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask); | 428 | unsigned long reg = APIC_EILVTn(offset); |
402 | return APIC_EILVT_LVTOFF_IBS; | 429 | unsigned int new, old, reserved; |
430 | |||
431 | new = (mask << 16) | (msg_type << 8) | vector; | ||
432 | old = apic_read(reg); | ||
433 | reserved = reserve_eilvt_offset(offset, new); | ||
434 | |||
435 | if (reserved != new) { | ||
436 | pr_err(FW_BUG "cpu %d, try to setup vector 0x%x, but " | ||
437 | "vector 0x%x was already reserved by another core, " | ||
438 | "APIC%lX=0x%x\n", | ||
439 | smp_processor_id(), new, reserved, reg, old); | ||
440 | return -EINVAL; | ||
441 | } | ||
442 | |||
443 | if (!eilvt_entry_is_changeable(old, new)) { | ||
444 | pr_err(FW_BUG "cpu %d, try to setup vector 0x%x but " | ||
445 | "register already in use, APIC%lX=0x%x\n", | ||
446 | smp_processor_id(), new, reg, old); | ||
447 | return -EBUSY; | ||
448 | } | ||
449 | |||
450 | apic_write(reg, new); | ||
451 | |||
452 | return 0; | ||
403 | } | 453 | } |
404 | EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs); | 454 | EXPORT_SYMBOL_GPL(setup_APIC_eilvt); |
405 | 455 | ||
406 | /* | 456 | /* |
407 | * Program the next event, relative to now | 457 | * Program the next event, relative to now |
@@ -1665,10 +1715,7 @@ int __init APIC_init_uniprocessor(void) | |||
1665 | } | 1715 | } |
1666 | #endif | 1716 | #endif |
1667 | 1717 | ||
1668 | #ifndef CONFIG_SMP | ||
1669 | enable_IR_x2apic(); | ||
1670 | default_setup_apic_routing(); | 1718 | default_setup_apic_routing(); |
1671 | #endif | ||
1672 | 1719 | ||
1673 | verify_local_APIC(); | 1720 | verify_local_APIC(); |
1674 | connect_bsp_APIC(); | 1721 | connect_bsp_APIC(); |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 5c5b8f3dddb5..8ae808d110f4 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -131,13 +131,9 @@ struct irq_pin_list { | |||
131 | struct irq_pin_list *next; | 131 | struct irq_pin_list *next; |
132 | }; | 132 | }; |
133 | 133 | ||
134 | static struct irq_pin_list *get_one_free_irq_2_pin(int node) | 134 | static struct irq_pin_list *alloc_irq_pin_list(int node) |
135 | { | 135 | { |
136 | struct irq_pin_list *pin; | 136 | return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); |
137 | |||
138 | pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node); | ||
139 | |||
140 | return pin; | ||
141 | } | 137 | } |
142 | 138 | ||
143 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ | 139 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ |
@@ -150,10 +146,7 @@ static struct irq_cfg irq_cfgx[NR_IRQS]; | |||
150 | int __init arch_early_irq_init(void) | 146 | int __init arch_early_irq_init(void) |
151 | { | 147 | { |
152 | struct irq_cfg *cfg; | 148 | struct irq_cfg *cfg; |
153 | struct irq_desc *desc; | 149 | int count, node, i; |
154 | int count; | ||
155 | int node; | ||
156 | int i; | ||
157 | 150 | ||
158 | if (!legacy_pic->nr_legacy_irqs) { | 151 | if (!legacy_pic->nr_legacy_irqs) { |
159 | nr_irqs_gsi = 0; | 152 | nr_irqs_gsi = 0; |
@@ -162,13 +155,15 @@ int __init arch_early_irq_init(void) | |||
162 | 155 | ||
163 | cfg = irq_cfgx; | 156 | cfg = irq_cfgx; |
164 | count = ARRAY_SIZE(irq_cfgx); | 157 | count = ARRAY_SIZE(irq_cfgx); |
165 | node= cpu_to_node(boot_cpu_id); | 158 | node = cpu_to_node(0); |
159 | |||
160 | /* Make sure the legacy interrupts are marked in the bitmap */ | ||
161 | irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); | ||
166 | 162 | ||
167 | for (i = 0; i < count; i++) { | 163 | for (i = 0; i < count; i++) { |
168 | desc = irq_to_desc(i); | 164 | set_irq_chip_data(i, &cfg[i]); |
169 | desc->chip_data = &cfg[i]; | 165 | zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); |
170 | zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node); | 166 | zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); |
171 | zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node); | ||
172 | /* | 167 | /* |
173 | * For legacy IRQ's, start with assigning irq0 to irq15 to | 168 | * For legacy IRQ's, start with assigning irq0 to irq15 to |
174 | * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. | 169 | * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. |
@@ -183,170 +178,88 @@ int __init arch_early_irq_init(void) | |||
183 | } | 178 | } |
184 | 179 | ||
185 | #ifdef CONFIG_SPARSE_IRQ | 180 | #ifdef CONFIG_SPARSE_IRQ |
186 | struct irq_cfg *irq_cfg(unsigned int irq) | 181 | static struct irq_cfg *irq_cfg(unsigned int irq) |
187 | { | 182 | { |
188 | struct irq_cfg *cfg = NULL; | 183 | return get_irq_chip_data(irq); |
189 | struct irq_desc *desc; | ||
190 | |||
191 | desc = irq_to_desc(irq); | ||
192 | if (desc) | ||
193 | cfg = desc->chip_data; | ||
194 | |||
195 | return cfg; | ||
196 | } | 184 | } |
197 | 185 | ||
198 | static struct irq_cfg *get_one_free_irq_cfg(int node) | 186 | static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) |
199 | { | 187 | { |
200 | struct irq_cfg *cfg; | 188 | struct irq_cfg *cfg; |
201 | 189 | ||
202 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); | 190 | cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); |
203 | if (cfg) { | 191 | if (!cfg) |
204 | if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { | 192 | return NULL; |
205 | kfree(cfg); | 193 | if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) |
206 | cfg = NULL; | 194 | goto out_cfg; |
207 | } else if (!zalloc_cpumask_var_node(&cfg->old_domain, | 195 | if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) |
208 | GFP_ATOMIC, node)) { | 196 | goto out_domain; |
209 | free_cpumask_var(cfg->domain); | ||
210 | kfree(cfg); | ||
211 | cfg = NULL; | ||
212 | } | ||
213 | } | ||
214 | |||
215 | return cfg; | 197 | return cfg; |
198 | out_domain: | ||
199 | free_cpumask_var(cfg->domain); | ||
200 | out_cfg: | ||
201 | kfree(cfg); | ||
202 | return NULL; | ||
216 | } | 203 | } |
217 | 204 | ||
218 | int arch_init_chip_data(struct irq_desc *desc, int node) | 205 | static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) |
219 | { | ||
220 | struct irq_cfg *cfg; | ||
221 | |||
222 | cfg = desc->chip_data; | ||
223 | if (!cfg) { | ||
224 | desc->chip_data = get_one_free_irq_cfg(node); | ||
225 | if (!desc->chip_data) { | ||
226 | printk(KERN_ERR "can not alloc irq_cfg\n"); | ||
227 | BUG_ON(1); | ||
228 | } | ||
229 | } | ||
230 | |||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | /* for move_irq_desc */ | ||
235 | static void | ||
236 | init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int node) | ||
237 | { | 206 | { |
238 | struct irq_pin_list *old_entry, *head, *tail, *entry; | 207 | if (!cfg) |
239 | |||
240 | cfg->irq_2_pin = NULL; | ||
241 | old_entry = old_cfg->irq_2_pin; | ||
242 | if (!old_entry) | ||
243 | return; | ||
244 | |||
245 | entry = get_one_free_irq_2_pin(node); | ||
246 | if (!entry) | ||
247 | return; | 208 | return; |
209 | set_irq_chip_data(at, NULL); | ||
210 | free_cpumask_var(cfg->domain); | ||
211 | free_cpumask_var(cfg->old_domain); | ||
212 | kfree(cfg); | ||
213 | } | ||
248 | 214 | ||
249 | entry->apic = old_entry->apic; | 215 | #else |
250 | entry->pin = old_entry->pin; | ||
251 | head = entry; | ||
252 | tail = entry; | ||
253 | old_entry = old_entry->next; | ||
254 | while (old_entry) { | ||
255 | entry = get_one_free_irq_2_pin(node); | ||
256 | if (!entry) { | ||
257 | entry = head; | ||
258 | while (entry) { | ||
259 | head = entry->next; | ||
260 | kfree(entry); | ||
261 | entry = head; | ||
262 | } | ||
263 | /* still use the old one */ | ||
264 | return; | ||
265 | } | ||
266 | entry->apic = old_entry->apic; | ||
267 | entry->pin = old_entry->pin; | ||
268 | tail->next = entry; | ||
269 | tail = entry; | ||
270 | old_entry = old_entry->next; | ||
271 | } | ||
272 | 216 | ||
273 | tail->next = NULL; | 217 | struct irq_cfg *irq_cfg(unsigned int irq) |
274 | cfg->irq_2_pin = head; | 218 | { |
219 | return irq < nr_irqs ? irq_cfgx + irq : NULL; | ||
275 | } | 220 | } |
276 | 221 | ||
277 | static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg) | 222 | static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) |
278 | { | 223 | { |
279 | struct irq_pin_list *entry, *next; | 224 | return irq_cfgx + irq; |
280 | 225 | } | |
281 | if (old_cfg->irq_2_pin == cfg->irq_2_pin) | ||
282 | return; | ||
283 | 226 | ||
284 | entry = old_cfg->irq_2_pin; | 227 | static inline void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { } |
285 | 228 | ||
286 | while (entry) { | 229 | #endif |
287 | next = entry->next; | ||
288 | kfree(entry); | ||
289 | entry = next; | ||
290 | } | ||
291 | old_cfg->irq_2_pin = NULL; | ||
292 | } | ||
293 | 230 | ||
294 | void arch_init_copy_chip_data(struct irq_desc *old_desc, | 231 | static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) |
295 | struct irq_desc *desc, int node) | ||
296 | { | 232 | { |
233 | int res = irq_alloc_desc_at(at, node); | ||
297 | struct irq_cfg *cfg; | 234 | struct irq_cfg *cfg; |
298 | struct irq_cfg *old_cfg; | ||
299 | |||
300 | cfg = get_one_free_irq_cfg(node); | ||
301 | 235 | ||
302 | if (!cfg) | 236 | if (res < 0) { |
303 | return; | 237 | if (res != -EEXIST) |
304 | 238 | return NULL; | |
305 | desc->chip_data = cfg; | 239 | cfg = get_irq_chip_data(at); |
306 | 240 | if (cfg) | |
307 | old_cfg = old_desc->chip_data; | 241 | return cfg; |
308 | 242 | } | |
309 | cfg->vector = old_cfg->vector; | ||
310 | cfg->move_in_progress = old_cfg->move_in_progress; | ||
311 | cpumask_copy(cfg->domain, old_cfg->domain); | ||
312 | cpumask_copy(cfg->old_domain, old_cfg->old_domain); | ||
313 | |||
314 | init_copy_irq_2_pin(old_cfg, cfg, node); | ||
315 | } | ||
316 | 243 | ||
317 | static void free_irq_cfg(struct irq_cfg *cfg) | 244 | cfg = alloc_irq_cfg(at, node); |
318 | { | 245 | if (cfg) |
319 | free_cpumask_var(cfg->domain); | 246 | set_irq_chip_data(at, cfg); |
320 | free_cpumask_var(cfg->old_domain); | 247 | else |
321 | kfree(cfg); | 248 | irq_free_desc(at); |
249 | return cfg; | ||
322 | } | 250 | } |
323 | 251 | ||
324 | void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) | 252 | static int alloc_irq_from(unsigned int from, int node) |
325 | { | 253 | { |
326 | struct irq_cfg *old_cfg, *cfg; | 254 | return irq_alloc_desc_from(from, node); |
327 | |||
328 | old_cfg = old_desc->chip_data; | ||
329 | cfg = desc->chip_data; | ||
330 | |||
331 | if (old_cfg == cfg) | ||
332 | return; | ||
333 | |||
334 | if (old_cfg) { | ||
335 | free_irq_2_pin(old_cfg, cfg); | ||
336 | free_irq_cfg(old_cfg); | ||
337 | old_desc->chip_data = NULL; | ||
338 | } | ||
339 | } | 255 | } |
340 | /* end for move_irq_desc */ | ||
341 | 256 | ||
342 | #else | 257 | static void free_irq_at(unsigned int at, struct irq_cfg *cfg) |
343 | struct irq_cfg *irq_cfg(unsigned int irq) | ||
344 | { | 258 | { |
345 | return irq < nr_irqs ? irq_cfgx + irq : NULL; | 259 | free_irq_cfg(at, cfg); |
260 | irq_free_desc(at); | ||
346 | } | 261 | } |
347 | 262 | ||
348 | #endif | ||
349 | |||
350 | struct io_apic { | 263 | struct io_apic { |
351 | unsigned int index; | 264 | unsigned int index; |
352 | unsigned int unused[3]; | 265 | unsigned int unused[3]; |
@@ -451,7 +364,7 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) | |||
451 | io_apic_write(apic, 0x10 + 2*pin, eu.w1); | 364 | io_apic_write(apic, 0x10 + 2*pin, eu.w1); |
452 | } | 365 | } |
453 | 366 | ||
454 | void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) | 367 | static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) |
455 | { | 368 | { |
456 | unsigned long flags; | 369 | unsigned long flags; |
457 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 370 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
@@ -481,7 +394,7 @@ static void ioapic_mask_entry(int apic, int pin) | |||
481 | * fast in the common case, and fast for shared ISA-space IRQs. | 394 | * fast in the common case, and fast for shared ISA-space IRQs. |
482 | */ | 395 | */ |
483 | static int | 396 | static int |
484 | add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin) | 397 | __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) |
485 | { | 398 | { |
486 | struct irq_pin_list **last, *entry; | 399 | struct irq_pin_list **last, *entry; |
487 | 400 | ||
@@ -493,7 +406,7 @@ add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin) | |||
493 | last = &entry->next; | 406 | last = &entry->next; |
494 | } | 407 | } |
495 | 408 | ||
496 | entry = get_one_free_irq_2_pin(node); | 409 | entry = alloc_irq_pin_list(node); |
497 | if (!entry) { | 410 | if (!entry) { |
498 | printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", | 411 | printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", |
499 | node, apic, pin); | 412 | node, apic, pin); |
@@ -508,7 +421,7 @@ add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin) | |||
508 | 421 | ||
509 | static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) | 422 | static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) |
510 | { | 423 | { |
511 | if (add_pin_to_irq_node_nopanic(cfg, node, apic, pin)) | 424 | if (__add_pin_to_irq_node(cfg, node, apic, pin)) |
512 | panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); | 425 | panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); |
513 | } | 426 | } |
514 | 427 | ||
@@ -571,11 +484,6 @@ static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry) | |||
571 | IO_APIC_REDIR_LEVEL_TRIGGER, NULL); | 484 | IO_APIC_REDIR_LEVEL_TRIGGER, NULL); |
572 | } | 485 | } |
573 | 486 | ||
574 | static void __unmask_IO_APIC_irq(struct irq_cfg *cfg) | ||
575 | { | ||
576 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); | ||
577 | } | ||
578 | |||
579 | static void io_apic_sync(struct irq_pin_list *entry) | 487 | static void io_apic_sync(struct irq_pin_list *entry) |
580 | { | 488 | { |
581 | /* | 489 | /* |
@@ -587,44 +495,37 @@ static void io_apic_sync(struct irq_pin_list *entry) | |||
587 | readl(&io_apic->data); | 495 | readl(&io_apic->data); |
588 | } | 496 | } |
589 | 497 | ||
590 | static void __mask_IO_APIC_irq(struct irq_cfg *cfg) | 498 | static void mask_ioapic(struct irq_cfg *cfg) |
591 | { | 499 | { |
500 | unsigned long flags; | ||
501 | |||
502 | raw_spin_lock_irqsave(&ioapic_lock, flags); | ||
592 | io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); | 503 | io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); |
504 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | ||
593 | } | 505 | } |
594 | 506 | ||
595 | static void mask_IO_APIC_irq_desc(struct irq_desc *desc) | 507 | static void mask_ioapic_irq(struct irq_data *data) |
596 | { | 508 | { |
597 | struct irq_cfg *cfg = desc->chip_data; | 509 | mask_ioapic(data->chip_data); |
598 | unsigned long flags; | 510 | } |
599 | |||
600 | BUG_ON(!cfg); | ||
601 | 511 | ||
602 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 512 | static void __unmask_ioapic(struct irq_cfg *cfg) |
603 | __mask_IO_APIC_irq(cfg); | 513 | { |
604 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 514 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); |
605 | } | 515 | } |
606 | 516 | ||
607 | static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) | 517 | static void unmask_ioapic(struct irq_cfg *cfg) |
608 | { | 518 | { |
609 | struct irq_cfg *cfg = desc->chip_data; | ||
610 | unsigned long flags; | 519 | unsigned long flags; |
611 | 520 | ||
612 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 521 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
613 | __unmask_IO_APIC_irq(cfg); | 522 | __unmask_ioapic(cfg); |
614 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 523 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
615 | } | 524 | } |
616 | 525 | ||
617 | static void mask_IO_APIC_irq(unsigned int irq) | 526 | static void unmask_ioapic_irq(struct irq_data *data) |
618 | { | 527 | { |
619 | struct irq_desc *desc = irq_to_desc(irq); | 528 | unmask_ioapic(data->chip_data); |
620 | |||
621 | mask_IO_APIC_irq_desc(desc); | ||
622 | } | ||
623 | static void unmask_IO_APIC_irq(unsigned int irq) | ||
624 | { | ||
625 | struct irq_desc *desc = irq_to_desc(irq); | ||
626 | |||
627 | unmask_IO_APIC_irq_desc(desc); | ||
628 | } | 529 | } |
629 | 530 | ||
630 | static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) | 531 | static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) |
@@ -694,14 +595,14 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void) | |||
694 | struct IO_APIC_route_entry **ioapic_entries; | 595 | struct IO_APIC_route_entry **ioapic_entries; |
695 | 596 | ||
696 | ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics, | 597 | ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics, |
697 | GFP_ATOMIC); | 598 | GFP_KERNEL); |
698 | if (!ioapic_entries) | 599 | if (!ioapic_entries) |
699 | return 0; | 600 | return 0; |
700 | 601 | ||
701 | for (apic = 0; apic < nr_ioapics; apic++) { | 602 | for (apic = 0; apic < nr_ioapics; apic++) { |
702 | ioapic_entries[apic] = | 603 | ioapic_entries[apic] = |
703 | kzalloc(sizeof(struct IO_APIC_route_entry) * | 604 | kzalloc(sizeof(struct IO_APIC_route_entry) * |
704 | nr_ioapic_registers[apic], GFP_ATOMIC); | 605 | nr_ioapic_registers[apic], GFP_KERNEL); |
705 | if (!ioapic_entries[apic]) | 606 | if (!ioapic_entries[apic]) |
706 | goto nomem; | 607 | goto nomem; |
707 | } | 608 | } |
@@ -1259,7 +1160,6 @@ void __setup_vector_irq(int cpu) | |||
1259 | /* Initialize vector_irq on a new cpu */ | 1160 | /* Initialize vector_irq on a new cpu */ |
1260 | int irq, vector; | 1161 | int irq, vector; |
1261 | struct irq_cfg *cfg; | 1162 | struct irq_cfg *cfg; |
1262 | struct irq_desc *desc; | ||
1263 | 1163 | ||
1264 | /* | 1164 | /* |
1265 | * vector_lock will make sure that we don't run into irq vector | 1165 | * vector_lock will make sure that we don't run into irq vector |
@@ -1268,9 +1168,10 @@ void __setup_vector_irq(int cpu) | |||
1268 | */ | 1168 | */ |
1269 | raw_spin_lock(&vector_lock); | 1169 | raw_spin_lock(&vector_lock); |
1270 | /* Mark the inuse vectors */ | 1170 | /* Mark the inuse vectors */ |
1271 | for_each_irq_desc(irq, desc) { | 1171 | for_each_active_irq(irq) { |
1272 | cfg = desc->chip_data; | 1172 | cfg = get_irq_chip_data(irq); |
1273 | 1173 | if (!cfg) | |
1174 | continue; | ||
1274 | /* | 1175 | /* |
1275 | * If it is a legacy IRQ handled by the legacy PIC, this cpu | 1176 | * If it is a legacy IRQ handled by the legacy PIC, this cpu |
1276 | * will be part of the irq_cfg's domain. | 1177 | * will be part of the irq_cfg's domain. |
@@ -1327,17 +1228,17 @@ static inline int IO_APIC_irq_trigger(int irq) | |||
1327 | } | 1228 | } |
1328 | #endif | 1229 | #endif |
1329 | 1230 | ||
1330 | static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger) | 1231 | static void ioapic_register_intr(unsigned int irq, unsigned long trigger) |
1331 | { | 1232 | { |
1332 | 1233 | ||
1333 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || | 1234 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || |
1334 | trigger == IOAPIC_LEVEL) | 1235 | trigger == IOAPIC_LEVEL) |
1335 | desc->status |= IRQ_LEVEL; | 1236 | irq_set_status_flags(irq, IRQ_LEVEL); |
1336 | else | 1237 | else |
1337 | desc->status &= ~IRQ_LEVEL; | 1238 | irq_clear_status_flags(irq, IRQ_LEVEL); |
1338 | 1239 | ||
1339 | if (irq_remapped(irq)) { | 1240 | if (irq_remapped(get_irq_chip_data(irq))) { |
1340 | desc->status |= IRQ_MOVE_PCNTXT; | 1241 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
1341 | if (trigger) | 1242 | if (trigger) |
1342 | set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, | 1243 | set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, |
1343 | handle_fasteoi_irq, | 1244 | handle_fasteoi_irq, |
@@ -1358,10 +1259,10 @@ static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long t | |||
1358 | handle_edge_irq, "edge"); | 1259 | handle_edge_irq, "edge"); |
1359 | } | 1260 | } |
1360 | 1261 | ||
1361 | int setup_ioapic_entry(int apic_id, int irq, | 1262 | static int setup_ioapic_entry(int apic_id, int irq, |
1362 | struct IO_APIC_route_entry *entry, | 1263 | struct IO_APIC_route_entry *entry, |
1363 | unsigned int destination, int trigger, | 1264 | unsigned int destination, int trigger, |
1364 | int polarity, int vector, int pin) | 1265 | int polarity, int vector, int pin) |
1365 | { | 1266 | { |
1366 | /* | 1267 | /* |
1367 | * add it to the IO-APIC irq-routing table: | 1268 | * add it to the IO-APIC irq-routing table: |
@@ -1382,21 +1283,7 @@ int setup_ioapic_entry(int apic_id, int irq, | |||
1382 | if (index < 0) | 1283 | if (index < 0) |
1383 | panic("Failed to allocate IRTE for ioapic %d\n", apic_id); | 1284 | panic("Failed to allocate IRTE for ioapic %d\n", apic_id); |
1384 | 1285 | ||
1385 | memset(&irte, 0, sizeof(irte)); | 1286 | prepare_irte(&irte, vector, destination); |
1386 | |||
1387 | irte.present = 1; | ||
1388 | irte.dst_mode = apic->irq_dest_mode; | ||
1389 | /* | ||
1390 | * Trigger mode in the IRTE will always be edge, and the | ||
1391 | * actual level or edge trigger will be setup in the IO-APIC | ||
1392 | * RTE. This will help simplify level triggered irq migration. | ||
1393 | * For more details, see the comments above explainig IO-APIC | ||
1394 | * irq migration in the presence of interrupt-remapping. | ||
1395 | */ | ||
1396 | irte.trigger_mode = 0; | ||
1397 | irte.dlvry_mode = apic->irq_delivery_mode; | ||
1398 | irte.vector = vector; | ||
1399 | irte.dest_id = IRTE_DEST(destination); | ||
1400 | 1287 | ||
1401 | /* Set source-id of interrupt request */ | 1288 | /* Set source-id of interrupt request */ |
1402 | set_ioapic_sid(&irte, apic_id); | 1289 | set_ioapic_sid(&irte, apic_id); |
@@ -1431,18 +1318,14 @@ int setup_ioapic_entry(int apic_id, int irq, | |||
1431 | return 0; | 1318 | return 0; |
1432 | } | 1319 | } |
1433 | 1320 | ||
1434 | static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc, | 1321 | static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq, |
1435 | int trigger, int polarity) | 1322 | struct irq_cfg *cfg, int trigger, int polarity) |
1436 | { | 1323 | { |
1437 | struct irq_cfg *cfg; | ||
1438 | struct IO_APIC_route_entry entry; | 1324 | struct IO_APIC_route_entry entry; |
1439 | unsigned int dest; | 1325 | unsigned int dest; |
1440 | 1326 | ||
1441 | if (!IO_APIC_IRQ(irq)) | 1327 | if (!IO_APIC_IRQ(irq)) |
1442 | return; | 1328 | return; |
1443 | |||
1444 | cfg = desc->chip_data; | ||
1445 | |||
1446 | /* | 1329 | /* |
1447 | * For legacy irqs, cfg->domain starts with cpu 0 for legacy | 1330 | * For legacy irqs, cfg->domain starts with cpu 0 for legacy |
1448 | * controllers like 8259. Now that IO-APIC can handle this irq, update | 1331 | * controllers like 8259. Now that IO-APIC can handle this irq, update |
@@ -1471,9 +1354,9 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq | |||
1471 | return; | 1354 | return; |
1472 | } | 1355 | } |
1473 | 1356 | ||
1474 | ioapic_register_intr(irq, desc, trigger); | 1357 | ioapic_register_intr(irq, trigger); |
1475 | if (irq < legacy_pic->nr_legacy_irqs) | 1358 | if (irq < legacy_pic->nr_legacy_irqs) |
1476 | legacy_pic->chip->mask(irq); | 1359 | legacy_pic->mask(irq); |
1477 | 1360 | ||
1478 | ioapic_write_entry(apic_id, pin, entry); | 1361 | ioapic_write_entry(apic_id, pin, entry); |
1479 | } | 1362 | } |
@@ -1484,11 +1367,9 @@ static struct { | |||
1484 | 1367 | ||
1485 | static void __init setup_IO_APIC_irqs(void) | 1368 | static void __init setup_IO_APIC_irqs(void) |
1486 | { | 1369 | { |
1487 | int apic_id, pin, idx, irq; | 1370 | int apic_id, pin, idx, irq, notcon = 0; |
1488 | int notcon = 0; | 1371 | int node = cpu_to_node(0); |
1489 | struct irq_desc *desc; | ||
1490 | struct irq_cfg *cfg; | 1372 | struct irq_cfg *cfg; |
1491 | int node = cpu_to_node(boot_cpu_id); | ||
1492 | 1373 | ||
1493 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); | 1374 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); |
1494 | 1375 | ||
@@ -1525,19 +1406,17 @@ static void __init setup_IO_APIC_irqs(void) | |||
1525 | apic->multi_timer_check(apic_id, irq)) | 1406 | apic->multi_timer_check(apic_id, irq)) |
1526 | continue; | 1407 | continue; |
1527 | 1408 | ||
1528 | desc = irq_to_desc_alloc_node(irq, node); | 1409 | cfg = alloc_irq_and_cfg_at(irq, node); |
1529 | if (!desc) { | 1410 | if (!cfg) |
1530 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | ||
1531 | continue; | 1411 | continue; |
1532 | } | 1412 | |
1533 | cfg = desc->chip_data; | ||
1534 | add_pin_to_irq_node(cfg, node, apic_id, pin); | 1413 | add_pin_to_irq_node(cfg, node, apic_id, pin); |
1535 | /* | 1414 | /* |
1536 | * don't mark it in pin_programmed, so later acpi could | 1415 | * don't mark it in pin_programmed, so later acpi could |
1537 | * set it correctly when irq < 16 | 1416 | * set it correctly when irq < 16 |
1538 | */ | 1417 | */ |
1539 | setup_IO_APIC_irq(apic_id, pin, irq, desc, | 1418 | setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx), |
1540 | irq_trigger(idx), irq_polarity(idx)); | 1419 | irq_polarity(idx)); |
1541 | } | 1420 | } |
1542 | 1421 | ||
1543 | if (notcon) | 1422 | if (notcon) |
@@ -1552,9 +1431,7 @@ static void __init setup_IO_APIC_irqs(void) | |||
1552 | */ | 1431 | */ |
1553 | void setup_IO_APIC_irq_extra(u32 gsi) | 1432 | void setup_IO_APIC_irq_extra(u32 gsi) |
1554 | { | 1433 | { |
1555 | int apic_id = 0, pin, idx, irq; | 1434 | int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); |
1556 | int node = cpu_to_node(boot_cpu_id); | ||
1557 | struct irq_desc *desc; | ||
1558 | struct irq_cfg *cfg; | 1435 | struct irq_cfg *cfg; |
1559 | 1436 | ||
1560 | /* | 1437 | /* |
@@ -1570,18 +1447,15 @@ void setup_IO_APIC_irq_extra(u32 gsi) | |||
1570 | return; | 1447 | return; |
1571 | 1448 | ||
1572 | irq = pin_2_irq(idx, apic_id, pin); | 1449 | irq = pin_2_irq(idx, apic_id, pin); |
1573 | #ifdef CONFIG_SPARSE_IRQ | 1450 | |
1574 | desc = irq_to_desc(irq); | 1451 | /* Only handle the non legacy irqs on secondary ioapics */ |
1575 | if (desc) | 1452 | if (apic_id == 0 || irq < NR_IRQS_LEGACY) |
1576 | return; | 1453 | return; |
1577 | #endif | 1454 | |
1578 | desc = irq_to_desc_alloc_node(irq, node); | 1455 | cfg = alloc_irq_and_cfg_at(irq, node); |
1579 | if (!desc) { | 1456 | if (!cfg) |
1580 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | ||
1581 | return; | 1457 | return; |
1582 | } | ||
1583 | 1458 | ||
1584 | cfg = desc->chip_data; | ||
1585 | add_pin_to_irq_node(cfg, node, apic_id, pin); | 1459 | add_pin_to_irq_node(cfg, node, apic_id, pin); |
1586 | 1460 | ||
1587 | if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { | 1461 | if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { |
@@ -1591,7 +1465,7 @@ void setup_IO_APIC_irq_extra(u32 gsi) | |||
1591 | } | 1465 | } |
1592 | set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed); | 1466 | set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed); |
1593 | 1467 | ||
1594 | setup_IO_APIC_irq(apic_id, pin, irq, desc, | 1468 | setup_ioapic_irq(apic_id, pin, irq, cfg, |
1595 | irq_trigger(idx), irq_polarity(idx)); | 1469 | irq_trigger(idx), irq_polarity(idx)); |
1596 | } | 1470 | } |
1597 | 1471 | ||
@@ -1642,7 +1516,6 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
1642 | union IO_APIC_reg_03 reg_03; | 1516 | union IO_APIC_reg_03 reg_03; |
1643 | unsigned long flags; | 1517 | unsigned long flags; |
1644 | struct irq_cfg *cfg; | 1518 | struct irq_cfg *cfg; |
1645 | struct irq_desc *desc; | ||
1646 | unsigned int irq; | 1519 | unsigned int irq; |
1647 | 1520 | ||
1648 | printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); | 1521 | printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); |
@@ -1729,10 +1602,10 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
1729 | } | 1602 | } |
1730 | } | 1603 | } |
1731 | printk(KERN_DEBUG "IRQ to pin mappings:\n"); | 1604 | printk(KERN_DEBUG "IRQ to pin mappings:\n"); |
1732 | for_each_irq_desc(irq, desc) { | 1605 | for_each_active_irq(irq) { |
1733 | struct irq_pin_list *entry; | 1606 | struct irq_pin_list *entry; |
1734 | 1607 | ||
1735 | cfg = desc->chip_data; | 1608 | cfg = get_irq_chip_data(irq); |
1736 | if (!cfg) | 1609 | if (!cfg) |
1737 | continue; | 1610 | continue; |
1738 | entry = cfg->irq_2_pin; | 1611 | entry = cfg->irq_2_pin; |
@@ -2239,29 +2112,26 @@ static int __init timer_irq_works(void) | |||
2239 | * an edge even if it isn't on the 8259A... | 2112 | * an edge even if it isn't on the 8259A... |
2240 | */ | 2113 | */ |
2241 | 2114 | ||
2242 | static unsigned int startup_ioapic_irq(unsigned int irq) | 2115 | static unsigned int startup_ioapic_irq(struct irq_data *data) |
2243 | { | 2116 | { |
2244 | int was_pending = 0; | 2117 | int was_pending = 0, irq = data->irq; |
2245 | unsigned long flags; | 2118 | unsigned long flags; |
2246 | struct irq_cfg *cfg; | ||
2247 | 2119 | ||
2248 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 2120 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
2249 | if (irq < legacy_pic->nr_legacy_irqs) { | 2121 | if (irq < legacy_pic->nr_legacy_irqs) { |
2250 | legacy_pic->chip->mask(irq); | 2122 | legacy_pic->mask(irq); |
2251 | if (legacy_pic->irq_pending(irq)) | 2123 | if (legacy_pic->irq_pending(irq)) |
2252 | was_pending = 1; | 2124 | was_pending = 1; |
2253 | } | 2125 | } |
2254 | cfg = irq_cfg(irq); | 2126 | __unmask_ioapic(data->chip_data); |
2255 | __unmask_IO_APIC_irq(cfg); | ||
2256 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 2127 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
2257 | 2128 | ||
2258 | return was_pending; | 2129 | return was_pending; |
2259 | } | 2130 | } |
2260 | 2131 | ||
2261 | static int ioapic_retrigger_irq(unsigned int irq) | 2132 | static int ioapic_retrigger_irq(struct irq_data *data) |
2262 | { | 2133 | { |
2263 | 2134 | struct irq_cfg *cfg = data->chip_data; | |
2264 | struct irq_cfg *cfg = irq_cfg(irq); | ||
2265 | unsigned long flags; | 2135 | unsigned long flags; |
2266 | 2136 | ||
2267 | raw_spin_lock_irqsave(&vector_lock, flags); | 2137 | raw_spin_lock_irqsave(&vector_lock, flags); |
@@ -2312,7 +2182,7 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
2312 | * With interrupt-remapping, destination information comes | 2182 | * With interrupt-remapping, destination information comes |
2313 | * from interrupt-remapping table entry. | 2183 | * from interrupt-remapping table entry. |
2314 | */ | 2184 | */ |
2315 | if (!irq_remapped(irq)) | 2185 | if (!irq_remapped(cfg)) |
2316 | io_apic_write(apic, 0x11 + pin*2, dest); | 2186 | io_apic_write(apic, 0x11 + pin*2, dest); |
2317 | reg = io_apic_read(apic, 0x10 + pin*2); | 2187 | reg = io_apic_read(apic, 0x10 + pin*2); |
2318 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; | 2188 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; |
@@ -2322,65 +2192,46 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
2322 | } | 2192 | } |
2323 | 2193 | ||
2324 | /* | 2194 | /* |
2325 | * Either sets desc->affinity to a valid value, and returns | 2195 | * Either sets data->affinity to a valid value, and returns |
2326 | * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and | 2196 | * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and |
2327 | * leaves desc->affinity untouched. | 2197 | * leaves data->affinity untouched. |
2328 | */ | 2198 | */ |
2329 | unsigned int | 2199 | int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, |
2330 | set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask, | 2200 | unsigned int *dest_id) |
2331 | unsigned int *dest_id) | ||
2332 | { | 2201 | { |
2333 | struct irq_cfg *cfg; | 2202 | struct irq_cfg *cfg = data->chip_data; |
2334 | unsigned int irq; | ||
2335 | 2203 | ||
2336 | if (!cpumask_intersects(mask, cpu_online_mask)) | 2204 | if (!cpumask_intersects(mask, cpu_online_mask)) |
2337 | return -1; | 2205 | return -1; |
2338 | 2206 | ||
2339 | irq = desc->irq; | 2207 | if (assign_irq_vector(data->irq, data->chip_data, mask)) |
2340 | cfg = desc->chip_data; | ||
2341 | if (assign_irq_vector(irq, cfg, mask)) | ||
2342 | return -1; | 2208 | return -1; |
2343 | 2209 | ||
2344 | cpumask_copy(desc->affinity, mask); | 2210 | cpumask_copy(data->affinity, mask); |
2345 | 2211 | ||
2346 | *dest_id = apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain); | 2212 | *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain); |
2347 | return 0; | 2213 | return 0; |
2348 | } | 2214 | } |
2349 | 2215 | ||
2350 | static int | 2216 | static int |
2351 | set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | 2217 | ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, |
2218 | bool force) | ||
2352 | { | 2219 | { |
2353 | struct irq_cfg *cfg; | 2220 | unsigned int dest, irq = data->irq; |
2354 | unsigned long flags; | 2221 | unsigned long flags; |
2355 | unsigned int dest; | 2222 | int ret; |
2356 | unsigned int irq; | ||
2357 | int ret = -1; | ||
2358 | |||
2359 | irq = desc->irq; | ||
2360 | cfg = desc->chip_data; | ||
2361 | 2223 | ||
2362 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 2224 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
2363 | ret = set_desc_affinity(desc, mask, &dest); | 2225 | ret = __ioapic_set_affinity(data, mask, &dest); |
2364 | if (!ret) { | 2226 | if (!ret) { |
2365 | /* Only the high 8 bits are valid. */ | 2227 | /* Only the high 8 bits are valid. */ |
2366 | dest = SET_APIC_LOGICAL_ID(dest); | 2228 | dest = SET_APIC_LOGICAL_ID(dest); |
2367 | __target_IO_APIC_irq(irq, dest, cfg); | 2229 | __target_IO_APIC_irq(irq, dest, data->chip_data); |
2368 | } | 2230 | } |
2369 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 2231 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
2370 | |||
2371 | return ret; | 2232 | return ret; |
2372 | } | 2233 | } |
2373 | 2234 | ||
2374 | static int | ||
2375 | set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) | ||
2376 | { | ||
2377 | struct irq_desc *desc; | ||
2378 | |||
2379 | desc = irq_to_desc(irq); | ||
2380 | |||
2381 | return set_ioapic_affinity_irq_desc(desc, mask); | ||
2382 | } | ||
2383 | |||
2384 | #ifdef CONFIG_INTR_REMAP | 2235 | #ifdef CONFIG_INTR_REMAP |
2385 | 2236 | ||
2386 | /* | 2237 | /* |
@@ -2395,24 +2246,21 @@ set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) | |||
2395 | * the interrupt-remapping table entry. | 2246 | * the interrupt-remapping table entry. |
2396 | */ | 2247 | */ |
2397 | static int | 2248 | static int |
2398 | migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | 2249 | ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, |
2250 | bool force) | ||
2399 | { | 2251 | { |
2400 | struct irq_cfg *cfg; | 2252 | struct irq_cfg *cfg = data->chip_data; |
2253 | unsigned int dest, irq = data->irq; | ||
2401 | struct irte irte; | 2254 | struct irte irte; |
2402 | unsigned int dest; | ||
2403 | unsigned int irq; | ||
2404 | int ret = -1; | ||
2405 | 2255 | ||
2406 | if (!cpumask_intersects(mask, cpu_online_mask)) | 2256 | if (!cpumask_intersects(mask, cpu_online_mask)) |
2407 | return ret; | 2257 | return -EINVAL; |
2408 | 2258 | ||
2409 | irq = desc->irq; | ||
2410 | if (get_irte(irq, &irte)) | 2259 | if (get_irte(irq, &irte)) |
2411 | return ret; | 2260 | return -EBUSY; |
2412 | 2261 | ||
2413 | cfg = desc->chip_data; | ||
2414 | if (assign_irq_vector(irq, cfg, mask)) | 2262 | if (assign_irq_vector(irq, cfg, mask)) |
2415 | return ret; | 2263 | return -EBUSY; |
2416 | 2264 | ||
2417 | dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); | 2265 | dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); |
2418 | 2266 | ||
@@ -2427,29 +2275,14 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | |||
2427 | if (cfg->move_in_progress) | 2275 | if (cfg->move_in_progress) |
2428 | send_cleanup_vector(cfg); | 2276 | send_cleanup_vector(cfg); |
2429 | 2277 | ||
2430 | cpumask_copy(desc->affinity, mask); | 2278 | cpumask_copy(data->affinity, mask); |
2431 | |||
2432 | return 0; | 2279 | return 0; |
2433 | } | 2280 | } |
2434 | 2281 | ||
2435 | /* | ||
2436 | * Migrates the IRQ destination in the process context. | ||
2437 | */ | ||
2438 | static int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, | ||
2439 | const struct cpumask *mask) | ||
2440 | { | ||
2441 | return migrate_ioapic_irq_desc(desc, mask); | ||
2442 | } | ||
2443 | static int set_ir_ioapic_affinity_irq(unsigned int irq, | ||
2444 | const struct cpumask *mask) | ||
2445 | { | ||
2446 | struct irq_desc *desc = irq_to_desc(irq); | ||
2447 | |||
2448 | return set_ir_ioapic_affinity_irq_desc(desc, mask); | ||
2449 | } | ||
2450 | #else | 2282 | #else |
2451 | static inline int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, | 2283 | static inline int |
2452 | const struct cpumask *mask) | 2284 | ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, |
2285 | bool force) | ||
2453 | { | 2286 | { |
2454 | return 0; | 2287 | return 0; |
2455 | } | 2288 | } |
@@ -2511,10 +2344,8 @@ unlock: | |||
2511 | irq_exit(); | 2344 | irq_exit(); |
2512 | } | 2345 | } |
2513 | 2346 | ||
2514 | static void __irq_complete_move(struct irq_desc **descp, unsigned vector) | 2347 | static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) |
2515 | { | 2348 | { |
2516 | struct irq_desc *desc = *descp; | ||
2517 | struct irq_cfg *cfg = desc->chip_data; | ||
2518 | unsigned me; | 2349 | unsigned me; |
2519 | 2350 | ||
2520 | if (likely(!cfg->move_in_progress)) | 2351 | if (likely(!cfg->move_in_progress)) |
@@ -2526,31 +2357,28 @@ static void __irq_complete_move(struct irq_desc **descp, unsigned vector) | |||
2526 | send_cleanup_vector(cfg); | 2357 | send_cleanup_vector(cfg); |
2527 | } | 2358 | } |
2528 | 2359 | ||
2529 | static void irq_complete_move(struct irq_desc **descp) | 2360 | static void irq_complete_move(struct irq_cfg *cfg) |
2530 | { | 2361 | { |
2531 | __irq_complete_move(descp, ~get_irq_regs()->orig_ax); | 2362 | __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); |
2532 | } | 2363 | } |
2533 | 2364 | ||
2534 | void irq_force_complete_move(int irq) | 2365 | void irq_force_complete_move(int irq) |
2535 | { | 2366 | { |
2536 | struct irq_desc *desc = irq_to_desc(irq); | 2367 | struct irq_cfg *cfg = get_irq_chip_data(irq); |
2537 | struct irq_cfg *cfg = desc->chip_data; | ||
2538 | 2368 | ||
2539 | if (!cfg) | 2369 | if (!cfg) |
2540 | return; | 2370 | return; |
2541 | 2371 | ||
2542 | __irq_complete_move(&desc, cfg->vector); | 2372 | __irq_complete_move(cfg, cfg->vector); |
2543 | } | 2373 | } |
2544 | #else | 2374 | #else |
2545 | static inline void irq_complete_move(struct irq_desc **descp) {} | 2375 | static inline void irq_complete_move(struct irq_cfg *cfg) { } |
2546 | #endif | 2376 | #endif |
2547 | 2377 | ||
2548 | static void ack_apic_edge(unsigned int irq) | 2378 | static void ack_apic_edge(struct irq_data *data) |
2549 | { | 2379 | { |
2550 | struct irq_desc *desc = irq_to_desc(irq); | 2380 | irq_complete_move(data->chip_data); |
2551 | 2381 | move_native_irq(data->irq); | |
2552 | irq_complete_move(&desc); | ||
2553 | move_native_irq(irq); | ||
2554 | ack_APIC_irq(); | 2382 | ack_APIC_irq(); |
2555 | } | 2383 | } |
2556 | 2384 | ||
@@ -2572,10 +2400,12 @@ atomic_t irq_mis_count; | |||
2572 | * Otherwise, we simulate the EOI message manually by changing the trigger | 2400 | * Otherwise, we simulate the EOI message manually by changing the trigger |
2573 | * mode to edge and then back to level, with RTE being masked during this. | 2401 | * mode to edge and then back to level, with RTE being masked during this. |
2574 | */ | 2402 | */ |
2575 | static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | 2403 | static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) |
2576 | { | 2404 | { |
2577 | struct irq_pin_list *entry; | 2405 | struct irq_pin_list *entry; |
2406 | unsigned long flags; | ||
2578 | 2407 | ||
2408 | raw_spin_lock_irqsave(&ioapic_lock, flags); | ||
2579 | for_each_irq_pin(entry, cfg->irq_2_pin) { | 2409 | for_each_irq_pin(entry, cfg->irq_2_pin) { |
2580 | if (mp_ioapics[entry->apic].apicver >= 0x20) { | 2410 | if (mp_ioapics[entry->apic].apicver >= 0x20) { |
2581 | /* | 2411 | /* |
@@ -2584,7 +2414,7 @@ static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | |||
2584 | * intr-remapping table entry. Hence for the io-apic | 2414 | * intr-remapping table entry. Hence for the io-apic |
2585 | * EOI we use the pin number. | 2415 | * EOI we use the pin number. |
2586 | */ | 2416 | */ |
2587 | if (irq_remapped(irq)) | 2417 | if (irq_remapped(cfg)) |
2588 | io_apic_eoi(entry->apic, entry->pin); | 2418 | io_apic_eoi(entry->apic, entry->pin); |
2589 | else | 2419 | else |
2590 | io_apic_eoi(entry->apic, cfg->vector); | 2420 | io_apic_eoi(entry->apic, cfg->vector); |
@@ -2593,36 +2423,22 @@ static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | |||
2593 | __unmask_and_level_IO_APIC_irq(entry); | 2423 | __unmask_and_level_IO_APIC_irq(entry); |
2594 | } | 2424 | } |
2595 | } | 2425 | } |
2596 | } | ||
2597 | |||
2598 | static void eoi_ioapic_irq(struct irq_desc *desc) | ||
2599 | { | ||
2600 | struct irq_cfg *cfg; | ||
2601 | unsigned long flags; | ||
2602 | unsigned int irq; | ||
2603 | |||
2604 | irq = desc->irq; | ||
2605 | cfg = desc->chip_data; | ||
2606 | |||
2607 | raw_spin_lock_irqsave(&ioapic_lock, flags); | ||
2608 | __eoi_ioapic_irq(irq, cfg); | ||
2609 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 2426 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
2610 | } | 2427 | } |
2611 | 2428 | ||
2612 | static void ack_apic_level(unsigned int irq) | 2429 | static void ack_apic_level(struct irq_data *data) |
2613 | { | 2430 | { |
2431 | struct irq_cfg *cfg = data->chip_data; | ||
2432 | int i, do_unmask_irq = 0, irq = data->irq; | ||
2614 | struct irq_desc *desc = irq_to_desc(irq); | 2433 | struct irq_desc *desc = irq_to_desc(irq); |
2615 | unsigned long v; | 2434 | unsigned long v; |
2616 | int i; | ||
2617 | struct irq_cfg *cfg; | ||
2618 | int do_unmask_irq = 0; | ||
2619 | 2435 | ||
2620 | irq_complete_move(&desc); | 2436 | irq_complete_move(cfg); |
2621 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 2437 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
2622 | /* If we are moving the irq we need to mask it */ | 2438 | /* If we are moving the irq we need to mask it */ |
2623 | if (unlikely(desc->status & IRQ_MOVE_PENDING)) { | 2439 | if (unlikely(desc->status & IRQ_MOVE_PENDING)) { |
2624 | do_unmask_irq = 1; | 2440 | do_unmask_irq = 1; |
2625 | mask_IO_APIC_irq_desc(desc); | 2441 | mask_ioapic(cfg); |
2626 | } | 2442 | } |
2627 | #endif | 2443 | #endif |
2628 | 2444 | ||
@@ -2658,7 +2474,6 @@ static void ack_apic_level(unsigned int irq) | |||
2658 | * we use the above logic (mask+edge followed by unmask+level) from | 2474 | * we use the above logic (mask+edge followed by unmask+level) from |
2659 | * Manfred Spraul to clear the remote IRR. | 2475 | * Manfred Spraul to clear the remote IRR. |
2660 | */ | 2476 | */ |
2661 | cfg = desc->chip_data; | ||
2662 | i = cfg->vector; | 2477 | i = cfg->vector; |
2663 | v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); | 2478 | v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); |
2664 | 2479 | ||
@@ -2678,7 +2493,7 @@ static void ack_apic_level(unsigned int irq) | |||
2678 | if (!(v & (1 << (i & 0x1f)))) { | 2493 | if (!(v & (1 << (i & 0x1f)))) { |
2679 | atomic_inc(&irq_mis_count); | 2494 | atomic_inc(&irq_mis_count); |
2680 | 2495 | ||
2681 | eoi_ioapic_irq(desc); | 2496 | eoi_ioapic_irq(irq, cfg); |
2682 | } | 2497 | } |
2683 | 2498 | ||
2684 | /* Now we can move and renable the irq */ | 2499 | /* Now we can move and renable the irq */ |
@@ -2709,61 +2524,57 @@ static void ack_apic_level(unsigned int irq) | |||
2709 | * accurate and is causing problems then it is a hardware bug | 2524 | * accurate and is causing problems then it is a hardware bug |
2710 | * and you can go talk to the chipset vendor about it. | 2525 | * and you can go talk to the chipset vendor about it. |
2711 | */ | 2526 | */ |
2712 | cfg = desc->chip_data; | ||
2713 | if (!io_apic_level_ack_pending(cfg)) | 2527 | if (!io_apic_level_ack_pending(cfg)) |
2714 | move_masked_irq(irq); | 2528 | move_masked_irq(irq); |
2715 | unmask_IO_APIC_irq_desc(desc); | 2529 | unmask_ioapic(cfg); |
2716 | } | 2530 | } |
2717 | } | 2531 | } |
2718 | 2532 | ||
2719 | #ifdef CONFIG_INTR_REMAP | 2533 | #ifdef CONFIG_INTR_REMAP |
2720 | static void ir_ack_apic_edge(unsigned int irq) | 2534 | static void ir_ack_apic_edge(struct irq_data *data) |
2721 | { | 2535 | { |
2722 | ack_APIC_irq(); | 2536 | ack_APIC_irq(); |
2723 | } | 2537 | } |
2724 | 2538 | ||
2725 | static void ir_ack_apic_level(unsigned int irq) | 2539 | static void ir_ack_apic_level(struct irq_data *data) |
2726 | { | 2540 | { |
2727 | struct irq_desc *desc = irq_to_desc(irq); | ||
2728 | |||
2729 | ack_APIC_irq(); | 2541 | ack_APIC_irq(); |
2730 | eoi_ioapic_irq(desc); | 2542 | eoi_ioapic_irq(data->irq, data->chip_data); |
2731 | } | 2543 | } |
2732 | #endif /* CONFIG_INTR_REMAP */ | 2544 | #endif /* CONFIG_INTR_REMAP */ |
2733 | 2545 | ||
2734 | static struct irq_chip ioapic_chip __read_mostly = { | 2546 | static struct irq_chip ioapic_chip __read_mostly = { |
2735 | .name = "IO-APIC", | 2547 | .name = "IO-APIC", |
2736 | .startup = startup_ioapic_irq, | 2548 | .irq_startup = startup_ioapic_irq, |
2737 | .mask = mask_IO_APIC_irq, | 2549 | .irq_mask = mask_ioapic_irq, |
2738 | .unmask = unmask_IO_APIC_irq, | 2550 | .irq_unmask = unmask_ioapic_irq, |
2739 | .ack = ack_apic_edge, | 2551 | .irq_ack = ack_apic_edge, |
2740 | .eoi = ack_apic_level, | 2552 | .irq_eoi = ack_apic_level, |
2741 | #ifdef CONFIG_SMP | 2553 | #ifdef CONFIG_SMP |
2742 | .set_affinity = set_ioapic_affinity_irq, | 2554 | .irq_set_affinity = ioapic_set_affinity, |
2743 | #endif | 2555 | #endif |
2744 | .retrigger = ioapic_retrigger_irq, | 2556 | .irq_retrigger = ioapic_retrigger_irq, |
2745 | }; | 2557 | }; |
2746 | 2558 | ||
2747 | static struct irq_chip ir_ioapic_chip __read_mostly = { | 2559 | static struct irq_chip ir_ioapic_chip __read_mostly = { |
2748 | .name = "IR-IO-APIC", | 2560 | .name = "IR-IO-APIC", |
2749 | .startup = startup_ioapic_irq, | 2561 | .irq_startup = startup_ioapic_irq, |
2750 | .mask = mask_IO_APIC_irq, | 2562 | .irq_mask = mask_ioapic_irq, |
2751 | .unmask = unmask_IO_APIC_irq, | 2563 | .irq_unmask = unmask_ioapic_irq, |
2752 | #ifdef CONFIG_INTR_REMAP | 2564 | #ifdef CONFIG_INTR_REMAP |
2753 | .ack = ir_ack_apic_edge, | 2565 | .irq_ack = ir_ack_apic_edge, |
2754 | .eoi = ir_ack_apic_level, | 2566 | .irq_eoi = ir_ack_apic_level, |
2755 | #ifdef CONFIG_SMP | 2567 | #ifdef CONFIG_SMP |
2756 | .set_affinity = set_ir_ioapic_affinity_irq, | 2568 | .irq_set_affinity = ir_ioapic_set_affinity, |
2757 | #endif | 2569 | #endif |
2758 | #endif | 2570 | #endif |
2759 | .retrigger = ioapic_retrigger_irq, | 2571 | .irq_retrigger = ioapic_retrigger_irq, |
2760 | }; | 2572 | }; |
2761 | 2573 | ||
2762 | static inline void init_IO_APIC_traps(void) | 2574 | static inline void init_IO_APIC_traps(void) |
2763 | { | 2575 | { |
2764 | int irq; | ||
2765 | struct irq_desc *desc; | ||
2766 | struct irq_cfg *cfg; | 2576 | struct irq_cfg *cfg; |
2577 | unsigned int irq; | ||
2767 | 2578 | ||
2768 | /* | 2579 | /* |
2769 | * NOTE! The local APIC isn't very good at handling | 2580 | * NOTE! The local APIC isn't very good at handling |
@@ -2776,8 +2587,8 @@ static inline void init_IO_APIC_traps(void) | |||
2776 | * Also, we've got to be careful not to trash gate | 2587 | * Also, we've got to be careful not to trash gate |
2777 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | 2588 | * 0x80, because int 0x80 is hm, kind of importantish. ;) |
2778 | */ | 2589 | */ |
2779 | for_each_irq_desc(irq, desc) { | 2590 | for_each_active_irq(irq) { |
2780 | cfg = desc->chip_data; | 2591 | cfg = get_irq_chip_data(irq); |
2781 | if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { | 2592 | if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { |
2782 | /* | 2593 | /* |
2783 | * Hmm.. We don't have an entry for this, | 2594 | * Hmm.. We don't have an entry for this, |
@@ -2788,7 +2599,7 @@ static inline void init_IO_APIC_traps(void) | |||
2788 | legacy_pic->make_irq(irq); | 2599 | legacy_pic->make_irq(irq); |
2789 | else | 2600 | else |
2790 | /* Strange. Oh, well.. */ | 2601 | /* Strange. Oh, well.. */ |
2791 | desc->chip = &no_irq_chip; | 2602 | set_irq_chip(irq, &no_irq_chip); |
2792 | } | 2603 | } |
2793 | } | 2604 | } |
2794 | } | 2605 | } |
@@ -2797,7 +2608,7 @@ static inline void init_IO_APIC_traps(void) | |||
2797 | * The local APIC irq-chip implementation: | 2608 | * The local APIC irq-chip implementation: |
2798 | */ | 2609 | */ |
2799 | 2610 | ||
2800 | static void mask_lapic_irq(unsigned int irq) | 2611 | static void mask_lapic_irq(struct irq_data *data) |
2801 | { | 2612 | { |
2802 | unsigned long v; | 2613 | unsigned long v; |
2803 | 2614 | ||
@@ -2805,7 +2616,7 @@ static void mask_lapic_irq(unsigned int irq) | |||
2805 | apic_write(APIC_LVT0, v | APIC_LVT_MASKED); | 2616 | apic_write(APIC_LVT0, v | APIC_LVT_MASKED); |
2806 | } | 2617 | } |
2807 | 2618 | ||
2808 | static void unmask_lapic_irq(unsigned int irq) | 2619 | static void unmask_lapic_irq(struct irq_data *data) |
2809 | { | 2620 | { |
2810 | unsigned long v; | 2621 | unsigned long v; |
2811 | 2622 | ||
@@ -2813,21 +2624,21 @@ static void unmask_lapic_irq(unsigned int irq) | |||
2813 | apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); | 2624 | apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); |
2814 | } | 2625 | } |
2815 | 2626 | ||
2816 | static void ack_lapic_irq(unsigned int irq) | 2627 | static void ack_lapic_irq(struct irq_data *data) |
2817 | { | 2628 | { |
2818 | ack_APIC_irq(); | 2629 | ack_APIC_irq(); |
2819 | } | 2630 | } |
2820 | 2631 | ||
2821 | static struct irq_chip lapic_chip __read_mostly = { | 2632 | static struct irq_chip lapic_chip __read_mostly = { |
2822 | .name = "local-APIC", | 2633 | .name = "local-APIC", |
2823 | .mask = mask_lapic_irq, | 2634 | .irq_mask = mask_lapic_irq, |
2824 | .unmask = unmask_lapic_irq, | 2635 | .irq_unmask = unmask_lapic_irq, |
2825 | .ack = ack_lapic_irq, | 2636 | .irq_ack = ack_lapic_irq, |
2826 | }; | 2637 | }; |
2827 | 2638 | ||
2828 | static void lapic_register_intr(int irq, struct irq_desc *desc) | 2639 | static void lapic_register_intr(int irq) |
2829 | { | 2640 | { |
2830 | desc->status &= ~IRQ_LEVEL; | 2641 | irq_clear_status_flags(irq, IRQ_LEVEL); |
2831 | set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, | 2642 | set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, |
2832 | "edge"); | 2643 | "edge"); |
2833 | } | 2644 | } |
@@ -2930,9 +2741,8 @@ int timer_through_8259 __initdata; | |||
2930 | */ | 2741 | */ |
2931 | static inline void __init check_timer(void) | 2742 | static inline void __init check_timer(void) |
2932 | { | 2743 | { |
2933 | struct irq_desc *desc = irq_to_desc(0); | 2744 | struct irq_cfg *cfg = get_irq_chip_data(0); |
2934 | struct irq_cfg *cfg = desc->chip_data; | 2745 | int node = cpu_to_node(0); |
2935 | int node = cpu_to_node(boot_cpu_id); | ||
2936 | int apic1, pin1, apic2, pin2; | 2746 | int apic1, pin1, apic2, pin2; |
2937 | unsigned long flags; | 2747 | unsigned long flags; |
2938 | int no_pin1 = 0; | 2748 | int no_pin1 = 0; |
@@ -2942,7 +2752,7 @@ static inline void __init check_timer(void) | |||
2942 | /* | 2752 | /* |
2943 | * get/set the timer IRQ vector: | 2753 | * get/set the timer IRQ vector: |
2944 | */ | 2754 | */ |
2945 | legacy_pic->chip->mask(0); | 2755 | legacy_pic->mask(0); |
2946 | assign_irq_vector(0, cfg, apic->target_cpus()); | 2756 | assign_irq_vector(0, cfg, apic->target_cpus()); |
2947 | 2757 | ||
2948 | /* | 2758 | /* |
@@ -3001,7 +2811,7 @@ static inline void __init check_timer(void) | |||
3001 | add_pin_to_irq_node(cfg, node, apic1, pin1); | 2811 | add_pin_to_irq_node(cfg, node, apic1, pin1); |
3002 | setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); | 2812 | setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); |
3003 | } else { | 2813 | } else { |
3004 | /* for edge trigger, setup_IO_APIC_irq already | 2814 | /* for edge trigger, setup_ioapic_irq already |
3005 | * leave it unmasked. | 2815 | * leave it unmasked. |
3006 | * so only need to unmask if it is level-trigger | 2816 | * so only need to unmask if it is level-trigger |
3007 | * do we really have level trigger timer? | 2817 | * do we really have level trigger timer? |
@@ -3009,12 +2819,12 @@ static inline void __init check_timer(void) | |||
3009 | int idx; | 2819 | int idx; |
3010 | idx = find_irq_entry(apic1, pin1, mp_INT); | 2820 | idx = find_irq_entry(apic1, pin1, mp_INT); |
3011 | if (idx != -1 && irq_trigger(idx)) | 2821 | if (idx != -1 && irq_trigger(idx)) |
3012 | unmask_IO_APIC_irq_desc(desc); | 2822 | unmask_ioapic(cfg); |
3013 | } | 2823 | } |
3014 | if (timer_irq_works()) { | 2824 | if (timer_irq_works()) { |
3015 | if (nmi_watchdog == NMI_IO_APIC) { | 2825 | if (nmi_watchdog == NMI_IO_APIC) { |
3016 | setup_nmi(); | 2826 | setup_nmi(); |
3017 | legacy_pic->chip->unmask(0); | 2827 | legacy_pic->unmask(0); |
3018 | } | 2828 | } |
3019 | if (disable_timer_pin_1 > 0) | 2829 | if (disable_timer_pin_1 > 0) |
3020 | clear_IO_APIC_pin(0, pin1); | 2830 | clear_IO_APIC_pin(0, pin1); |
@@ -3037,14 +2847,14 @@ static inline void __init check_timer(void) | |||
3037 | */ | 2847 | */ |
3038 | replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); | 2848 | replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); |
3039 | setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); | 2849 | setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); |
3040 | legacy_pic->chip->unmask(0); | 2850 | legacy_pic->unmask(0); |
3041 | if (timer_irq_works()) { | 2851 | if (timer_irq_works()) { |
3042 | apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); | 2852 | apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); |
3043 | timer_through_8259 = 1; | 2853 | timer_through_8259 = 1; |
3044 | if (nmi_watchdog == NMI_IO_APIC) { | 2854 | if (nmi_watchdog == NMI_IO_APIC) { |
3045 | legacy_pic->chip->mask(0); | 2855 | legacy_pic->mask(0); |
3046 | setup_nmi(); | 2856 | setup_nmi(); |
3047 | legacy_pic->chip->unmask(0); | 2857 | legacy_pic->unmask(0); |
3048 | } | 2858 | } |
3049 | goto out; | 2859 | goto out; |
3050 | } | 2860 | } |
@@ -3052,7 +2862,7 @@ static inline void __init check_timer(void) | |||
3052 | * Cleanup, just in case ... | 2862 | * Cleanup, just in case ... |
3053 | */ | 2863 | */ |
3054 | local_irq_disable(); | 2864 | local_irq_disable(); |
3055 | legacy_pic->chip->mask(0); | 2865 | legacy_pic->mask(0); |
3056 | clear_IO_APIC_pin(apic2, pin2); | 2866 | clear_IO_APIC_pin(apic2, pin2); |
3057 | apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); | 2867 | apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); |
3058 | } | 2868 | } |
@@ -3069,16 +2879,16 @@ static inline void __init check_timer(void) | |||
3069 | apic_printk(APIC_QUIET, KERN_INFO | 2879 | apic_printk(APIC_QUIET, KERN_INFO |
3070 | "...trying to set up timer as Virtual Wire IRQ...\n"); | 2880 | "...trying to set up timer as Virtual Wire IRQ...\n"); |
3071 | 2881 | ||
3072 | lapic_register_intr(0, desc); | 2882 | lapic_register_intr(0); |
3073 | apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ | 2883 | apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ |
3074 | legacy_pic->chip->unmask(0); | 2884 | legacy_pic->unmask(0); |
3075 | 2885 | ||
3076 | if (timer_irq_works()) { | 2886 | if (timer_irq_works()) { |
3077 | apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); | 2887 | apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); |
3078 | goto out; | 2888 | goto out; |
3079 | } | 2889 | } |
3080 | local_irq_disable(); | 2890 | local_irq_disable(); |
3081 | legacy_pic->chip->mask(0); | 2891 | legacy_pic->mask(0); |
3082 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); | 2892 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); |
3083 | apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); | 2893 | apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); |
3084 | 2894 | ||
@@ -3244,49 +3054,42 @@ device_initcall(ioapic_init_sysfs); | |||
3244 | /* | 3054 | /* |
3245 | * Dynamic irq allocate and deallocation | 3055 | * Dynamic irq allocate and deallocation |
3246 | */ | 3056 | */ |
3247 | unsigned int create_irq_nr(unsigned int irq_want, int node) | 3057 | unsigned int create_irq_nr(unsigned int from, int node) |
3248 | { | 3058 | { |
3249 | /* Allocate an unused irq */ | 3059 | struct irq_cfg *cfg; |
3250 | unsigned int irq; | ||
3251 | unsigned int new; | ||
3252 | unsigned long flags; | 3060 | unsigned long flags; |
3253 | struct irq_cfg *cfg_new = NULL; | 3061 | unsigned int ret = 0; |
3254 | struct irq_desc *desc_new = NULL; | 3062 | int irq; |
3255 | |||
3256 | irq = 0; | ||
3257 | if (irq_want < nr_irqs_gsi) | ||
3258 | irq_want = nr_irqs_gsi; | ||
3259 | |||
3260 | raw_spin_lock_irqsave(&vector_lock, flags); | ||
3261 | for (new = irq_want; new < nr_irqs; new++) { | ||
3262 | desc_new = irq_to_desc_alloc_node(new, node); | ||
3263 | if (!desc_new) { | ||
3264 | printk(KERN_INFO "can not get irq_desc for %d\n", new); | ||
3265 | continue; | ||
3266 | } | ||
3267 | cfg_new = desc_new->chip_data; | ||
3268 | |||
3269 | if (cfg_new->vector != 0) | ||
3270 | continue; | ||
3271 | 3063 | ||
3272 | desc_new = move_irq_desc(desc_new, node); | 3064 | if (from < nr_irqs_gsi) |
3273 | cfg_new = desc_new->chip_data; | 3065 | from = nr_irqs_gsi; |
3274 | 3066 | ||
3275 | if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) | 3067 | irq = alloc_irq_from(from, node); |
3276 | irq = new; | 3068 | if (irq < 0) |
3277 | break; | 3069 | return 0; |
3070 | cfg = alloc_irq_cfg(irq, node); | ||
3071 | if (!cfg) { | ||
3072 | free_irq_at(irq, NULL); | ||
3073 | return 0; | ||
3278 | } | 3074 | } |
3279 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
3280 | 3075 | ||
3281 | if (irq > 0) | 3076 | raw_spin_lock_irqsave(&vector_lock, flags); |
3282 | dynamic_irq_init_keep_chip_data(irq); | 3077 | if (!__assign_irq_vector(irq, cfg, apic->target_cpus())) |
3078 | ret = irq; | ||
3079 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
3283 | 3080 | ||
3284 | return irq; | 3081 | if (ret) { |
3082 | set_irq_chip_data(irq, cfg); | ||
3083 | irq_clear_status_flags(irq, IRQ_NOREQUEST); | ||
3084 | } else { | ||
3085 | free_irq_at(irq, cfg); | ||
3086 | } | ||
3087 | return ret; | ||
3285 | } | 3088 | } |
3286 | 3089 | ||
3287 | int create_irq(void) | 3090 | int create_irq(void) |
3288 | { | 3091 | { |
3289 | int node = cpu_to_node(boot_cpu_id); | 3092 | int node = cpu_to_node(0); |
3290 | unsigned int irq_want; | 3093 | unsigned int irq_want; |
3291 | int irq; | 3094 | int irq; |
3292 | 3095 | ||
@@ -3301,14 +3104,17 @@ int create_irq(void) | |||
3301 | 3104 | ||
3302 | void destroy_irq(unsigned int irq) | 3105 | void destroy_irq(unsigned int irq) |
3303 | { | 3106 | { |
3107 | struct irq_cfg *cfg = get_irq_chip_data(irq); | ||
3304 | unsigned long flags; | 3108 | unsigned long flags; |
3305 | 3109 | ||
3306 | dynamic_irq_cleanup_keep_chip_data(irq); | 3110 | irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); |
3307 | 3111 | ||
3308 | free_irte(irq); | 3112 | if (intr_remapping_enabled) |
3113 | free_irte(irq); | ||
3309 | raw_spin_lock_irqsave(&vector_lock, flags); | 3114 | raw_spin_lock_irqsave(&vector_lock, flags); |
3310 | __clear_irq_vector(irq, get_irq_chip_data(irq)); | 3115 | __clear_irq_vector(irq, cfg); |
3311 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 3116 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
3117 | free_irq_at(irq, cfg); | ||
3312 | } | 3118 | } |
3313 | 3119 | ||
3314 | /* | 3120 | /* |
@@ -3332,7 +3138,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, | |||
3332 | 3138 | ||
3333 | dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); | 3139 | dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); |
3334 | 3140 | ||
3335 | if (irq_remapped(irq)) { | 3141 | if (irq_remapped(get_irq_chip_data(irq))) { |
3336 | struct irte irte; | 3142 | struct irte irte; |
3337 | int ir_index; | 3143 | int ir_index; |
3338 | u16 sub_handle; | 3144 | u16 sub_handle; |
@@ -3340,14 +3146,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, | |||
3340 | ir_index = map_irq_to_irte_handle(irq, &sub_handle); | 3146 | ir_index = map_irq_to_irte_handle(irq, &sub_handle); |
3341 | BUG_ON(ir_index == -1); | 3147 | BUG_ON(ir_index == -1); |
3342 | 3148 | ||
3343 | memset (&irte, 0, sizeof(irte)); | 3149 | prepare_irte(&irte, cfg->vector, dest); |
3344 | |||
3345 | irte.present = 1; | ||
3346 | irte.dst_mode = apic->irq_dest_mode; | ||
3347 | irte.trigger_mode = 0; /* edge */ | ||
3348 | irte.dlvry_mode = apic->irq_delivery_mode; | ||
3349 | irte.vector = cfg->vector; | ||
3350 | irte.dest_id = IRTE_DEST(dest); | ||
3351 | 3150 | ||
3352 | /* Set source-id of interrupt request */ | 3151 | /* Set source-id of interrupt request */ |
3353 | if (pdev) | 3152 | if (pdev) |
@@ -3392,26 +3191,24 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, | |||
3392 | } | 3191 | } |
3393 | 3192 | ||
3394 | #ifdef CONFIG_SMP | 3193 | #ifdef CONFIG_SMP |
3395 | static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | 3194 | static int |
3195 | msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) | ||
3396 | { | 3196 | { |
3397 | struct irq_desc *desc = irq_to_desc(irq); | 3197 | struct irq_cfg *cfg = data->chip_data; |
3398 | struct irq_cfg *cfg; | ||
3399 | struct msi_msg msg; | 3198 | struct msi_msg msg; |
3400 | unsigned int dest; | 3199 | unsigned int dest; |
3401 | 3200 | ||
3402 | if (set_desc_affinity(desc, mask, &dest)) | 3201 | if (__ioapic_set_affinity(data, mask, &dest)) |
3403 | return -1; | 3202 | return -1; |
3404 | 3203 | ||
3405 | cfg = desc->chip_data; | 3204 | __get_cached_msi_msg(data->msi_desc, &msg); |
3406 | |||
3407 | get_cached_msi_msg_desc(desc, &msg); | ||
3408 | 3205 | ||
3409 | msg.data &= ~MSI_DATA_VECTOR_MASK; | 3206 | msg.data &= ~MSI_DATA_VECTOR_MASK; |
3410 | msg.data |= MSI_DATA_VECTOR(cfg->vector); | 3207 | msg.data |= MSI_DATA_VECTOR(cfg->vector); |
3411 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; | 3208 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; |
3412 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3209 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3413 | 3210 | ||
3414 | write_msi_msg_desc(desc, &msg); | 3211 | __write_msi_msg(data->msi_desc, &msg); |
3415 | 3212 | ||
3416 | return 0; | 3213 | return 0; |
3417 | } | 3214 | } |
@@ -3421,17 +3218,17 @@ static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
3421 | * done in the process context using interrupt-remapping hardware. | 3218 | * done in the process context using interrupt-remapping hardware. |
3422 | */ | 3219 | */ |
3423 | static int | 3220 | static int |
3424 | ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | 3221 | ir_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, |
3222 | bool force) | ||
3425 | { | 3223 | { |
3426 | struct irq_desc *desc = irq_to_desc(irq); | 3224 | struct irq_cfg *cfg = data->chip_data; |
3427 | struct irq_cfg *cfg = desc->chip_data; | 3225 | unsigned int dest, irq = data->irq; |
3428 | unsigned int dest; | ||
3429 | struct irte irte; | 3226 | struct irte irte; |
3430 | 3227 | ||
3431 | if (get_irte(irq, &irte)) | 3228 | if (get_irte(irq, &irte)) |
3432 | return -1; | 3229 | return -1; |
3433 | 3230 | ||
3434 | if (set_desc_affinity(desc, mask, &dest)) | 3231 | if (__ioapic_set_affinity(data, mask, &dest)) |
3435 | return -1; | 3232 | return -1; |
3436 | 3233 | ||
3437 | irte.vector = cfg->vector; | 3234 | irte.vector = cfg->vector; |
@@ -3461,27 +3258,27 @@ ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
3461 | * which implement the MSI or MSI-X Capability Structure. | 3258 | * which implement the MSI or MSI-X Capability Structure. |
3462 | */ | 3259 | */ |
3463 | static struct irq_chip msi_chip = { | 3260 | static struct irq_chip msi_chip = { |
3464 | .name = "PCI-MSI", | 3261 | .name = "PCI-MSI", |
3465 | .unmask = unmask_msi_irq, | 3262 | .irq_unmask = unmask_msi_irq, |
3466 | .mask = mask_msi_irq, | 3263 | .irq_mask = mask_msi_irq, |
3467 | .ack = ack_apic_edge, | 3264 | .irq_ack = ack_apic_edge, |
3468 | #ifdef CONFIG_SMP | 3265 | #ifdef CONFIG_SMP |
3469 | .set_affinity = set_msi_irq_affinity, | 3266 | .irq_set_affinity = msi_set_affinity, |
3470 | #endif | 3267 | #endif |
3471 | .retrigger = ioapic_retrigger_irq, | 3268 | .irq_retrigger = ioapic_retrigger_irq, |
3472 | }; | 3269 | }; |
3473 | 3270 | ||
3474 | static struct irq_chip msi_ir_chip = { | 3271 | static struct irq_chip msi_ir_chip = { |
3475 | .name = "IR-PCI-MSI", | 3272 | .name = "IR-PCI-MSI", |
3476 | .unmask = unmask_msi_irq, | 3273 | .irq_unmask = unmask_msi_irq, |
3477 | .mask = mask_msi_irq, | 3274 | .irq_mask = mask_msi_irq, |
3478 | #ifdef CONFIG_INTR_REMAP | 3275 | #ifdef CONFIG_INTR_REMAP |
3479 | .ack = ir_ack_apic_edge, | 3276 | .irq_ack = ir_ack_apic_edge, |
3480 | #ifdef CONFIG_SMP | 3277 | #ifdef CONFIG_SMP |
3481 | .set_affinity = ir_set_msi_irq_affinity, | 3278 | .irq_set_affinity = ir_msi_set_affinity, |
3482 | #endif | 3279 | #endif |
3483 | #endif | 3280 | #endif |
3484 | .retrigger = ioapic_retrigger_irq, | 3281 | .irq_retrigger = ioapic_retrigger_irq, |
3485 | }; | 3282 | }; |
3486 | 3283 | ||
3487 | /* | 3284 | /* |
@@ -3513,8 +3310,8 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec) | |||
3513 | 3310 | ||
3514 | static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | 3311 | static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) |
3515 | { | 3312 | { |
3516 | int ret; | ||
3517 | struct msi_msg msg; | 3313 | struct msi_msg msg; |
3314 | int ret; | ||
3518 | 3315 | ||
3519 | ret = msi_compose_msg(dev, irq, &msg, -1); | 3316 | ret = msi_compose_msg(dev, irq, &msg, -1); |
3520 | if (ret < 0) | 3317 | if (ret < 0) |
@@ -3523,12 +3320,8 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | |||
3523 | set_irq_msi(irq, msidesc); | 3320 | set_irq_msi(irq, msidesc); |
3524 | write_msi_msg(irq, &msg); | 3321 | write_msi_msg(irq, &msg); |
3525 | 3322 | ||
3526 | if (irq_remapped(irq)) { | 3323 | if (irq_remapped(get_irq_chip_data(irq))) { |
3527 | struct irq_desc *desc = irq_to_desc(irq); | 3324 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
3528 | /* | ||
3529 | * irq migration in process context | ||
3530 | */ | ||
3531 | desc->status |= IRQ_MOVE_PCNTXT; | ||
3532 | set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); | 3325 | set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); |
3533 | } else | 3326 | } else |
3534 | set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); | 3327 | set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); |
@@ -3540,13 +3333,10 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | |||
3540 | 3333 | ||
3541 | int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | 3334 | int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
3542 | { | 3335 | { |
3543 | unsigned int irq; | 3336 | int node, ret, sub_handle, index = 0; |
3544 | int ret, sub_handle; | 3337 | unsigned int irq, irq_want; |
3545 | struct msi_desc *msidesc; | 3338 | struct msi_desc *msidesc; |
3546 | unsigned int irq_want; | ||
3547 | struct intel_iommu *iommu = NULL; | 3339 | struct intel_iommu *iommu = NULL; |
3548 | int index = 0; | ||
3549 | int node; | ||
3550 | 3340 | ||
3551 | /* x86 doesn't support multiple MSI yet */ | 3341 | /* x86 doesn't support multiple MSI yet */ |
3552 | if (type == PCI_CAP_ID_MSI && nvec > 1) | 3342 | if (type == PCI_CAP_ID_MSI && nvec > 1) |
@@ -3606,18 +3396,17 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
3606 | 3396 | ||
3607 | #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) | 3397 | #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) |
3608 | #ifdef CONFIG_SMP | 3398 | #ifdef CONFIG_SMP |
3609 | static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | 3399 | static int |
3400 | dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, | ||
3401 | bool force) | ||
3610 | { | 3402 | { |
3611 | struct irq_desc *desc = irq_to_desc(irq); | 3403 | struct irq_cfg *cfg = data->chip_data; |
3612 | struct irq_cfg *cfg; | 3404 | unsigned int dest, irq = data->irq; |
3613 | struct msi_msg msg; | 3405 | struct msi_msg msg; |
3614 | unsigned int dest; | ||
3615 | 3406 | ||
3616 | if (set_desc_affinity(desc, mask, &dest)) | 3407 | if (__ioapic_set_affinity(data, mask, &dest)) |
3617 | return -1; | 3408 | return -1; |
3618 | 3409 | ||
3619 | cfg = desc->chip_data; | ||
3620 | |||
3621 | dmar_msi_read(irq, &msg); | 3410 | dmar_msi_read(irq, &msg); |
3622 | 3411 | ||
3623 | msg.data &= ~MSI_DATA_VECTOR_MASK; | 3412 | msg.data &= ~MSI_DATA_VECTOR_MASK; |
@@ -3633,14 +3422,14 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
3633 | #endif /* CONFIG_SMP */ | 3422 | #endif /* CONFIG_SMP */ |
3634 | 3423 | ||
3635 | static struct irq_chip dmar_msi_type = { | 3424 | static struct irq_chip dmar_msi_type = { |
3636 | .name = "DMAR_MSI", | 3425 | .name = "DMAR_MSI", |
3637 | .unmask = dmar_msi_unmask, | 3426 | .irq_unmask = dmar_msi_unmask, |
3638 | .mask = dmar_msi_mask, | 3427 | .irq_mask = dmar_msi_mask, |
3639 | .ack = ack_apic_edge, | 3428 | .irq_ack = ack_apic_edge, |
3640 | #ifdef CONFIG_SMP | 3429 | #ifdef CONFIG_SMP |
3641 | .set_affinity = dmar_msi_set_affinity, | 3430 | .irq_set_affinity = dmar_msi_set_affinity, |
3642 | #endif | 3431 | #endif |
3643 | .retrigger = ioapic_retrigger_irq, | 3432 | .irq_retrigger = ioapic_retrigger_irq, |
3644 | }; | 3433 | }; |
3645 | 3434 | ||
3646 | int arch_setup_dmar_msi(unsigned int irq) | 3435 | int arch_setup_dmar_msi(unsigned int irq) |
@@ -3661,26 +3450,24 @@ int arch_setup_dmar_msi(unsigned int irq) | |||
3661 | #ifdef CONFIG_HPET_TIMER | 3450 | #ifdef CONFIG_HPET_TIMER |
3662 | 3451 | ||
3663 | #ifdef CONFIG_SMP | 3452 | #ifdef CONFIG_SMP |
3664 | static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | 3453 | static int hpet_msi_set_affinity(struct irq_data *data, |
3454 | const struct cpumask *mask, bool force) | ||
3665 | { | 3455 | { |
3666 | struct irq_desc *desc = irq_to_desc(irq); | 3456 | struct irq_cfg *cfg = data->chip_data; |
3667 | struct irq_cfg *cfg; | ||
3668 | struct msi_msg msg; | 3457 | struct msi_msg msg; |
3669 | unsigned int dest; | 3458 | unsigned int dest; |
3670 | 3459 | ||
3671 | if (set_desc_affinity(desc, mask, &dest)) | 3460 | if (__ioapic_set_affinity(data, mask, &dest)) |
3672 | return -1; | 3461 | return -1; |
3673 | 3462 | ||
3674 | cfg = desc->chip_data; | 3463 | hpet_msi_read(data->handler_data, &msg); |
3675 | |||
3676 | hpet_msi_read(irq, &msg); | ||
3677 | 3464 | ||
3678 | msg.data &= ~MSI_DATA_VECTOR_MASK; | 3465 | msg.data &= ~MSI_DATA_VECTOR_MASK; |
3679 | msg.data |= MSI_DATA_VECTOR(cfg->vector); | 3466 | msg.data |= MSI_DATA_VECTOR(cfg->vector); |
3680 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; | 3467 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; |
3681 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3468 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3682 | 3469 | ||
3683 | hpet_msi_write(irq, &msg); | 3470 | hpet_msi_write(data->handler_data, &msg); |
3684 | 3471 | ||
3685 | return 0; | 3472 | return 0; |
3686 | } | 3473 | } |
@@ -3688,34 +3475,33 @@ static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
3688 | #endif /* CONFIG_SMP */ | 3475 | #endif /* CONFIG_SMP */ |
3689 | 3476 | ||
3690 | static struct irq_chip ir_hpet_msi_type = { | 3477 | static struct irq_chip ir_hpet_msi_type = { |
3691 | .name = "IR-HPET_MSI", | 3478 | .name = "IR-HPET_MSI", |
3692 | .unmask = hpet_msi_unmask, | 3479 | .irq_unmask = hpet_msi_unmask, |
3693 | .mask = hpet_msi_mask, | 3480 | .irq_mask = hpet_msi_mask, |
3694 | #ifdef CONFIG_INTR_REMAP | 3481 | #ifdef CONFIG_INTR_REMAP |
3695 | .ack = ir_ack_apic_edge, | 3482 | .irq_ack = ir_ack_apic_edge, |
3696 | #ifdef CONFIG_SMP | 3483 | #ifdef CONFIG_SMP |
3697 | .set_affinity = ir_set_msi_irq_affinity, | 3484 | .irq_set_affinity = ir_msi_set_affinity, |
3698 | #endif | 3485 | #endif |
3699 | #endif | 3486 | #endif |
3700 | .retrigger = ioapic_retrigger_irq, | 3487 | .irq_retrigger = ioapic_retrigger_irq, |
3701 | }; | 3488 | }; |
3702 | 3489 | ||
3703 | static struct irq_chip hpet_msi_type = { | 3490 | static struct irq_chip hpet_msi_type = { |
3704 | .name = "HPET_MSI", | 3491 | .name = "HPET_MSI", |
3705 | .unmask = hpet_msi_unmask, | 3492 | .irq_unmask = hpet_msi_unmask, |
3706 | .mask = hpet_msi_mask, | 3493 | .irq_mask = hpet_msi_mask, |
3707 | .ack = ack_apic_edge, | 3494 | .irq_ack = ack_apic_edge, |
3708 | #ifdef CONFIG_SMP | 3495 | #ifdef CONFIG_SMP |
3709 | .set_affinity = hpet_msi_set_affinity, | 3496 | .irq_set_affinity = hpet_msi_set_affinity, |
3710 | #endif | 3497 | #endif |
3711 | .retrigger = ioapic_retrigger_irq, | 3498 | .irq_retrigger = ioapic_retrigger_irq, |
3712 | }; | 3499 | }; |
3713 | 3500 | ||
3714 | int arch_setup_hpet_msi(unsigned int irq, unsigned int id) | 3501 | int arch_setup_hpet_msi(unsigned int irq, unsigned int id) |
3715 | { | 3502 | { |
3716 | int ret; | ||
3717 | struct msi_msg msg; | 3503 | struct msi_msg msg; |
3718 | struct irq_desc *desc = irq_to_desc(irq); | 3504 | int ret; |
3719 | 3505 | ||
3720 | if (intr_remapping_enabled) { | 3506 | if (intr_remapping_enabled) { |
3721 | struct intel_iommu *iommu = map_hpet_to_ir(id); | 3507 | struct intel_iommu *iommu = map_hpet_to_ir(id); |
@@ -3733,9 +3519,9 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id) | |||
3733 | if (ret < 0) | 3519 | if (ret < 0) |
3734 | return ret; | 3520 | return ret; |
3735 | 3521 | ||
3736 | hpet_msi_write(irq, &msg); | 3522 | hpet_msi_write(get_irq_data(irq), &msg); |
3737 | desc->status |= IRQ_MOVE_PCNTXT; | 3523 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
3738 | if (irq_remapped(irq)) | 3524 | if (irq_remapped(get_irq_chip_data(irq))) |
3739 | set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, | 3525 | set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, |
3740 | handle_edge_irq, "edge"); | 3526 | handle_edge_irq, "edge"); |
3741 | else | 3527 | else |
@@ -3768,33 +3554,30 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) | |||
3768 | write_ht_irq_msg(irq, &msg); | 3554 | write_ht_irq_msg(irq, &msg); |
3769 | } | 3555 | } |
3770 | 3556 | ||
3771 | static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) | 3557 | static int |
3558 | ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) | ||
3772 | { | 3559 | { |
3773 | struct irq_desc *desc = irq_to_desc(irq); | 3560 | struct irq_cfg *cfg = data->chip_data; |
3774 | struct irq_cfg *cfg; | ||
3775 | unsigned int dest; | 3561 | unsigned int dest; |
3776 | 3562 | ||
3777 | if (set_desc_affinity(desc, mask, &dest)) | 3563 | if (__ioapic_set_affinity(data, mask, &dest)) |
3778 | return -1; | 3564 | return -1; |
3779 | 3565 | ||
3780 | cfg = desc->chip_data; | 3566 | target_ht_irq(data->irq, dest, cfg->vector); |
3781 | |||
3782 | target_ht_irq(irq, dest, cfg->vector); | ||
3783 | |||
3784 | return 0; | 3567 | return 0; |
3785 | } | 3568 | } |
3786 | 3569 | ||
3787 | #endif | 3570 | #endif |
3788 | 3571 | ||
3789 | static struct irq_chip ht_irq_chip = { | 3572 | static struct irq_chip ht_irq_chip = { |
3790 | .name = "PCI-HT", | 3573 | .name = "PCI-HT", |
3791 | .mask = mask_ht_irq, | 3574 | .irq_mask = mask_ht_irq, |
3792 | .unmask = unmask_ht_irq, | 3575 | .irq_unmask = unmask_ht_irq, |
3793 | .ack = ack_apic_edge, | 3576 | .irq_ack = ack_apic_edge, |
3794 | #ifdef CONFIG_SMP | 3577 | #ifdef CONFIG_SMP |
3795 | .set_affinity = set_ht_irq_affinity, | 3578 | .irq_set_affinity = ht_set_affinity, |
3796 | #endif | 3579 | #endif |
3797 | .retrigger = ioapic_retrigger_irq, | 3580 | .irq_retrigger = ioapic_retrigger_irq, |
3798 | }; | 3581 | }; |
3799 | 3582 | ||
3800 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | 3583 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) |
@@ -3885,14 +3668,13 @@ int __init arch_probe_nr_irqs(void) | |||
3885 | if (nr < nr_irqs) | 3668 | if (nr < nr_irqs) |
3886 | nr_irqs = nr; | 3669 | nr_irqs = nr; |
3887 | 3670 | ||
3888 | return 0; | 3671 | return NR_IRQS_LEGACY; |
3889 | } | 3672 | } |
3890 | #endif | 3673 | #endif |
3891 | 3674 | ||
3892 | static int __io_apic_set_pci_routing(struct device *dev, int irq, | 3675 | static int __io_apic_set_pci_routing(struct device *dev, int irq, |
3893 | struct io_apic_irq_attr *irq_attr) | 3676 | struct io_apic_irq_attr *irq_attr) |
3894 | { | 3677 | { |
3895 | struct irq_desc *desc; | ||
3896 | struct irq_cfg *cfg; | 3678 | struct irq_cfg *cfg; |
3897 | int node; | 3679 | int node; |
3898 | int ioapic, pin; | 3680 | int ioapic, pin; |
@@ -3908,13 +3690,11 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq, | |||
3908 | if (dev) | 3690 | if (dev) |
3909 | node = dev_to_node(dev); | 3691 | node = dev_to_node(dev); |
3910 | else | 3692 | else |
3911 | node = cpu_to_node(boot_cpu_id); | 3693 | node = cpu_to_node(0); |
3912 | 3694 | ||
3913 | desc = irq_to_desc_alloc_node(irq, node); | 3695 | cfg = alloc_irq_and_cfg_at(irq, node); |
3914 | if (!desc) { | 3696 | if (!cfg) |
3915 | printk(KERN_INFO "can not get irq_desc %d\n", irq); | ||
3916 | return 0; | 3697 | return 0; |
3917 | } | ||
3918 | 3698 | ||
3919 | pin = irq_attr->ioapic_pin; | 3699 | pin = irq_attr->ioapic_pin; |
3920 | trigger = irq_attr->trigger; | 3700 | trigger = irq_attr->trigger; |
@@ -3924,15 +3704,14 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq, | |||
3924 | * IRQs < 16 are already in the irq_2_pin[] map | 3704 | * IRQs < 16 are already in the irq_2_pin[] map |
3925 | */ | 3705 | */ |
3926 | if (irq >= legacy_pic->nr_legacy_irqs) { | 3706 | if (irq >= legacy_pic->nr_legacy_irqs) { |
3927 | cfg = desc->chip_data; | 3707 | if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) { |
3928 | if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) { | ||
3929 | printk(KERN_INFO "can not add pin %d for irq %d\n", | 3708 | printk(KERN_INFO "can not add pin %d for irq %d\n", |
3930 | pin, irq); | 3709 | pin, irq); |
3931 | return 0; | 3710 | return 0; |
3932 | } | 3711 | } |
3933 | } | 3712 | } |
3934 | 3713 | ||
3935 | setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity); | 3714 | setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity); |
3936 | 3715 | ||
3937 | return 0; | 3716 | return 0; |
3938 | } | 3717 | } |
@@ -4125,14 +3904,14 @@ void __init setup_ioapic_dest(void) | |||
4125 | */ | 3904 | */ |
4126 | if (desc->status & | 3905 | if (desc->status & |
4127 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) | 3906 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) |
4128 | mask = desc->affinity; | 3907 | mask = desc->irq_data.affinity; |
4129 | else | 3908 | else |
4130 | mask = apic->target_cpus(); | 3909 | mask = apic->target_cpus(); |
4131 | 3910 | ||
4132 | if (intr_remapping_enabled) | 3911 | if (intr_remapping_enabled) |
4133 | set_ir_ioapic_affinity_irq_desc(desc, mask); | 3912 | ir_ioapic_set_affinity(&desc->irq_data, mask, false); |
4134 | else | 3913 | else |
4135 | set_ioapic_affinity_irq_desc(desc, mask); | 3914 | ioapic_set_affinity(&desc->irq_data, mask, false); |
4136 | } | 3915 | } |
4137 | 3916 | ||
4138 | } | 3917 | } |
@@ -4316,19 +4095,18 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) | |||
4316 | void __init pre_init_apic_IRQ0(void) | 4095 | void __init pre_init_apic_IRQ0(void) |
4317 | { | 4096 | { |
4318 | struct irq_cfg *cfg; | 4097 | struct irq_cfg *cfg; |
4319 | struct irq_desc *desc; | ||
4320 | 4098 | ||
4321 | printk(KERN_INFO "Early APIC setup for system timer0\n"); | 4099 | printk(KERN_INFO "Early APIC setup for system timer0\n"); |
4322 | #ifndef CONFIG_SMP | 4100 | #ifndef CONFIG_SMP |
4323 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); | 4101 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); |
4324 | #endif | 4102 | #endif |
4325 | desc = irq_to_desc_alloc_node(0, 0); | 4103 | /* Make sure the irq descriptor is set up */ |
4104 | cfg = alloc_irq_and_cfg_at(0, 0); | ||
4326 | 4105 | ||
4327 | setup_local_APIC(); | 4106 | setup_local_APIC(); |
4328 | 4107 | ||
4329 | cfg = irq_cfg(0); | ||
4330 | add_pin_to_irq_node(cfg, 0, 0, 0); | 4108 | add_pin_to_irq_node(cfg, 0, 0, 0); |
4331 | set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); | 4109 | set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); |
4332 | 4110 | ||
4333 | setup_IO_APIC_irq(0, 0, 0, desc, 0, 0); | 4111 | setup_ioapic_irq(0, 0, 0, cfg, 0, 0); |
4334 | } | 4112 | } |
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index a43f71cb30f8..c90041ccb742 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
@@ -178,7 +178,7 @@ int __init check_nmi_watchdog(void) | |||
178 | error: | 178 | error: |
179 | if (nmi_watchdog == NMI_IO_APIC) { | 179 | if (nmi_watchdog == NMI_IO_APIC) { |
180 | if (!timer_through_8259) | 180 | if (!timer_through_8259) |
181 | legacy_pic->chip->mask(0); | 181 | legacy_pic->mask(0); |
182 | on_each_cpu(__acpi_nmi_disable, NULL, 1); | 182 | on_each_cpu(__acpi_nmi_disable, NULL, 1); |
183 | } | 183 | } |
184 | 184 | ||
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index 83e9be4778e2..f9e4e6a54073 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c | |||
@@ -54,6 +54,9 @@ static int apicid_phys_pkg_id(int initial_apic_id, int index_msb) | |||
54 | */ | 54 | */ |
55 | void __init default_setup_apic_routing(void) | 55 | void __init default_setup_apic_routing(void) |
56 | { | 56 | { |
57 | |||
58 | enable_IR_x2apic(); | ||
59 | |||
57 | #ifdef CONFIG_X86_X2APIC | 60 | #ifdef CONFIG_X86_X2APIC |
58 | if (x2apic_mode | 61 | if (x2apic_mode |
59 | #ifdef CONFIG_X86_UV | 62 | #ifdef CONFIG_X86_UV |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index ba5f62f45f01..9e093f8fe78c 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -148,7 +148,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) | |||
148 | { | 148 | { |
149 | #ifdef CONFIG_SMP | 149 | #ifdef CONFIG_SMP |
150 | /* calling is from identify_secondary_cpu() ? */ | 150 | /* calling is from identify_secondary_cpu() ? */ |
151 | if (c->cpu_index == boot_cpu_id) | 151 | if (!c->cpu_index) |
152 | return; | 152 | return; |
153 | 153 | ||
154 | /* | 154 | /* |
@@ -253,37 +253,51 @@ static int __cpuinit nearby_node(int apicid) | |||
253 | #endif | 253 | #endif |
254 | 254 | ||
255 | /* | 255 | /* |
256 | * Fixup core topology information for AMD multi-node processors. | 256 | * Fixup core topology information for |
257 | * Assumption: Number of cores in each internal node is the same. | 257 | * (1) AMD multi-node processors |
258 | * Assumption: Number of cores in each internal node is the same. | ||
259 | * (2) AMD processors supporting compute units | ||
258 | */ | 260 | */ |
259 | #ifdef CONFIG_X86_HT | 261 | #ifdef CONFIG_X86_HT |
260 | static void __cpuinit amd_fixup_dcm(struct cpuinfo_x86 *c) | 262 | static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) |
261 | { | 263 | { |
262 | unsigned long long value; | 264 | u32 nodes; |
263 | u32 nodes, cores_per_node; | 265 | u8 node_id; |
264 | int cpu = smp_processor_id(); | 266 | int cpu = smp_processor_id(); |
265 | 267 | ||
266 | if (!cpu_has(c, X86_FEATURE_NODEID_MSR)) | 268 | /* get information required for multi-node processors */ |
267 | return; | 269 | if (cpu_has(c, X86_FEATURE_TOPOEXT)) { |
270 | u32 eax, ebx, ecx, edx; | ||
268 | 271 | ||
269 | /* fixup topology information only once for a core */ | 272 | cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); |
270 | if (cpu_has(c, X86_FEATURE_AMD_DCM)) | 273 | nodes = ((ecx >> 8) & 7) + 1; |
271 | return; | 274 | node_id = ecx & 7; |
272 | 275 | ||
273 | rdmsrl(MSR_FAM10H_NODE_ID, value); | 276 | /* get compute unit information */ |
277 | smp_num_siblings = ((ebx >> 8) & 3) + 1; | ||
278 | c->compute_unit_id = ebx & 0xff; | ||
279 | } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { | ||
280 | u64 value; | ||
274 | 281 | ||
275 | nodes = ((value >> 3) & 7) + 1; | 282 | rdmsrl(MSR_FAM10H_NODE_ID, value); |
276 | if (nodes == 1) | 283 | nodes = ((value >> 3) & 7) + 1; |
284 | node_id = value & 7; | ||
285 | } else | ||
277 | return; | 286 | return; |
278 | 287 | ||
279 | set_cpu_cap(c, X86_FEATURE_AMD_DCM); | 288 | /* fixup multi-node processor information */ |
280 | cores_per_node = c->x86_max_cores / nodes; | 289 | if (nodes > 1) { |
290 | u32 cores_per_node; | ||
291 | |||
292 | set_cpu_cap(c, X86_FEATURE_AMD_DCM); | ||
293 | cores_per_node = c->x86_max_cores / nodes; | ||
281 | 294 | ||
282 | /* store NodeID, use llc_shared_map to store sibling info */ | 295 | /* store NodeID, use llc_shared_map to store sibling info */ |
283 | per_cpu(cpu_llc_id, cpu) = value & 7; | 296 | per_cpu(cpu_llc_id, cpu) = node_id; |
284 | 297 | ||
285 | /* fixup core id to be in range from 0 to (cores_per_node - 1) */ | 298 | /* core id to be in range from 0 to (cores_per_node - 1) */ |
286 | c->cpu_core_id = c->cpu_core_id % cores_per_node; | 299 | c->cpu_core_id = c->cpu_core_id % cores_per_node; |
300 | } | ||
287 | } | 301 | } |
288 | #endif | 302 | #endif |
289 | 303 | ||
@@ -304,9 +318,7 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | |||
304 | c->phys_proc_id = c->initial_apicid >> bits; | 318 | c->phys_proc_id = c->initial_apicid >> bits; |
305 | /* use socket ID also for last level cache */ | 319 | /* use socket ID also for last level cache */ |
306 | per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; | 320 | per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; |
307 | /* fixup topology information on multi-node processors */ | 321 | amd_get_topology(c); |
308 | if ((c->x86 == 0x10) && (c->x86_model == 9)) | ||
309 | amd_fixup_dcm(c); | ||
310 | #endif | 322 | #endif |
311 | } | 323 | } |
312 | 324 | ||
@@ -412,6 +424,23 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | |||
412 | set_cpu_cap(c, X86_FEATURE_EXTD_APICID); | 424 | set_cpu_cap(c, X86_FEATURE_EXTD_APICID); |
413 | } | 425 | } |
414 | #endif | 426 | #endif |
427 | |||
428 | /* We need to do the following only once */ | ||
429 | if (c != &boot_cpu_data) | ||
430 | return; | ||
431 | |||
432 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { | ||
433 | |||
434 | if (c->x86 > 0x10 || | ||
435 | (c->x86 == 0x10 && c->x86_model >= 0x2)) { | ||
436 | u64 val; | ||
437 | |||
438 | rdmsrl(MSR_K7_HWCR, val); | ||
439 | if (!(val & BIT(24))) | ||
440 | printk(KERN_WARNING FW_BUG "TSC doesn't count " | ||
441 | "with P0 frequency!\n"); | ||
442 | } | ||
443 | } | ||
415 | } | 444 | } |
416 | 445 | ||
417 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | 446 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) |
@@ -523,7 +552,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
523 | #endif | 552 | #endif |
524 | 553 | ||
525 | if (c->extended_cpuid_level >= 0x80000006) { | 554 | if (c->extended_cpuid_level >= 0x80000006) { |
526 | if ((c->x86 >= 0x0f) && (cpuid_edx(0x80000006) & 0xf000)) | 555 | if (cpuid_edx(0x80000006) & 0xf000) |
527 | num_cache_leaves = 4; | 556 | num_cache_leaves = 4; |
528 | else | 557 | else |
529 | num_cache_leaves = 3; | 558 | num_cache_leaves = 3; |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index f2f9ac7da25c..4b68bda30938 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -665,7 +665,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
665 | this_cpu->c_early_init(c); | 665 | this_cpu->c_early_init(c); |
666 | 666 | ||
667 | #ifdef CONFIG_SMP | 667 | #ifdef CONFIG_SMP |
668 | c->cpu_index = boot_cpu_id; | 668 | c->cpu_index = 0; |
669 | #endif | 669 | #endif |
670 | filter_cpuid_features(c, false); | 670 | filter_cpuid_features(c, false); |
671 | } | 671 | } |
@@ -704,16 +704,21 @@ void __init early_cpu_init(void) | |||
704 | } | 704 | } |
705 | 705 | ||
706 | /* | 706 | /* |
707 | * The NOPL instruction is supposed to exist on all CPUs with | 707 | * The NOPL instruction is supposed to exist on all CPUs of family >= 6; |
708 | * family >= 6; unfortunately, that's not true in practice because | 708 | * unfortunately, that's not true in practice because of early VIA |
709 | * of early VIA chips and (more importantly) broken virtualizers that | 709 | * chips and (more importantly) broken virtualizers that are not easy |
710 | * are not easy to detect. In the latter case it doesn't even *fail* | 710 | * to detect. In the latter case it doesn't even *fail* reliably, so |
711 | * reliably, so probing for it doesn't even work. Disable it completely | 711 | * probing for it doesn't even work. Disable it completely on 32-bit |
712 | * unless we can find a reliable way to detect all the broken cases. | 712 | * unless we can find a reliable way to detect all the broken cases. |
713 | * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). | ||
713 | */ | 714 | */ |
714 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | 715 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) |
715 | { | 716 | { |
717 | #ifdef CONFIG_X86_32 | ||
716 | clear_cpu_cap(c, X86_FEATURE_NOPL); | 718 | clear_cpu_cap(c, X86_FEATURE_NOPL); |
719 | #else | ||
720 | set_cpu_cap(c, X86_FEATURE_NOPL); | ||
721 | #endif | ||
717 | } | 722 | } |
718 | 723 | ||
719 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | 724 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) |
@@ -1264,13 +1269,6 @@ void __cpuinit cpu_init(void) | |||
1264 | clear_all_debug_regs(); | 1269 | clear_all_debug_regs(); |
1265 | dbg_restore_debug_regs(); | 1270 | dbg_restore_debug_regs(); |
1266 | 1271 | ||
1267 | /* | ||
1268 | * Force FPU initialization: | ||
1269 | */ | ||
1270 | current_thread_info()->status = 0; | ||
1271 | clear_used_math(); | ||
1272 | mxcsr_feature_mask_init(); | ||
1273 | |||
1274 | fpu_init(); | 1272 | fpu_init(); |
1275 | xsave_init(); | 1273 | xsave_init(); |
1276 | } | 1274 | } |
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index f668bb1f7d43..e765633f210e 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h | |||
@@ -32,6 +32,7 @@ struct cpu_dev { | |||
32 | extern const struct cpu_dev *const __x86_cpu_dev_start[], | 32 | extern const struct cpu_dev *const __x86_cpu_dev_start[], |
33 | *const __x86_cpu_dev_end[]; | 33 | *const __x86_cpu_dev_end[]; |
34 | 34 | ||
35 | extern void get_cpu_cap(struct cpuinfo_x86 *c); | ||
35 | extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); | 36 | extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); |
36 | extern void get_cpu_cap(struct cpuinfo_x86 *c); | 37 | extern void get_cpu_cap(struct cpuinfo_x86 *c); |
37 | 38 | ||
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index b4389441efbb..695f17731e23 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -170,7 +170,7 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) | |||
170 | { | 170 | { |
171 | #ifdef CONFIG_SMP | 171 | #ifdef CONFIG_SMP |
172 | /* calling is from identify_secondary_cpu() ? */ | 172 | /* calling is from identify_secondary_cpu() ? */ |
173 | if (c->cpu_index == boot_cpu_id) | 173 | if (!c->cpu_index) |
174 | return; | 174 | return; |
175 | 175 | ||
176 | /* | 176 | /* |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 898c2f4eab88..12cd823c8d03 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -17,7 +17,7 @@ | |||
17 | 17 | ||
18 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
19 | #include <linux/smp.h> | 19 | #include <linux/smp.h> |
20 | #include <asm/k8.h> | 20 | #include <asm/amd_nb.h> |
21 | #include <asm/smp.h> | 21 | #include <asm/smp.h> |
22 | 22 | ||
23 | #define LVL_1_INST 1 | 23 | #define LVL_1_INST 1 |
@@ -306,7 +306,7 @@ struct _cache_attr { | |||
306 | ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count); | 306 | ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count); |
307 | }; | 307 | }; |
308 | 308 | ||
309 | #ifdef CONFIG_CPU_SUP_AMD | 309 | #ifdef CONFIG_AMD_NB |
310 | 310 | ||
311 | /* | 311 | /* |
312 | * L3 cache descriptors | 312 | * L3 cache descriptors |
@@ -369,7 +369,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, | |||
369 | return; | 369 | return; |
370 | 370 | ||
371 | /* not in virtualized environments */ | 371 | /* not in virtualized environments */ |
372 | if (num_k8_northbridges == 0) | 372 | if (k8_northbridges.num == 0) |
373 | return; | 373 | return; |
374 | 374 | ||
375 | /* | 375 | /* |
@@ -377,7 +377,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, | |||
377 | * never freed but this is done only on shutdown so it doesn't matter. | 377 | * never freed but this is done only on shutdown so it doesn't matter. |
378 | */ | 378 | */ |
379 | if (!l3_caches) { | 379 | if (!l3_caches) { |
380 | int size = num_k8_northbridges * sizeof(struct amd_l3_cache *); | 380 | int size = k8_northbridges.num * sizeof(struct amd_l3_cache *); |
381 | 381 | ||
382 | l3_caches = kzalloc(size, GFP_ATOMIC); | 382 | l3_caches = kzalloc(size, GFP_ATOMIC); |
383 | if (!l3_caches) | 383 | if (!l3_caches) |
@@ -556,12 +556,12 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, | |||
556 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, | 556 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, |
557 | show_cache_disable_1, store_cache_disable_1); | 557 | show_cache_disable_1, store_cache_disable_1); |
558 | 558 | ||
559 | #else /* CONFIG_CPU_SUP_AMD */ | 559 | #else /* CONFIG_AMD_NB */ |
560 | static void __cpuinit | 560 | static void __cpuinit |
561 | amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, int index) | 561 | amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, int index) |
562 | { | 562 | { |
563 | }; | 563 | }; |
564 | #endif /* CONFIG_CPU_SUP_AMD */ | 564 | #endif /* CONFIG_AMD_NB */ |
565 | 565 | ||
566 | static int | 566 | static int |
567 | __cpuinit cpuid4_cache_lookup_regs(int index, | 567 | __cpuinit cpuid4_cache_lookup_regs(int index, |
@@ -1000,7 +1000,7 @@ static struct attribute *default_attrs[] = { | |||
1000 | 1000 | ||
1001 | static struct attribute *default_l3_attrs[] = { | 1001 | static struct attribute *default_l3_attrs[] = { |
1002 | DEFAULT_SYSFS_CACHE_ATTRS, | 1002 | DEFAULT_SYSFS_CACHE_ATTRS, |
1003 | #ifdef CONFIG_CPU_SUP_AMD | 1003 | #ifdef CONFIG_AMD_NB |
1004 | &cache_disable_0.attr, | 1004 | &cache_disable_0.attr, |
1005 | &cache_disable_1.attr, | 1005 | &cache_disable_1.attr, |
1006 | #endif | 1006 | #endif |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 5e975298fa81..80c482382d5c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -131,7 +131,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
131 | u32 low = 0, high = 0, address = 0; | 131 | u32 low = 0, high = 0, address = 0; |
132 | unsigned int bank, block; | 132 | unsigned int bank, block; |
133 | struct thresh_restart tr; | 133 | struct thresh_restart tr; |
134 | u8 lvt_off; | 134 | int lvt_off = -1; |
135 | u8 offset; | ||
135 | 136 | ||
136 | for (bank = 0; bank < NR_BANKS; ++bank) { | 137 | for (bank = 0; bank < NR_BANKS; ++bank) { |
137 | for (block = 0; block < NR_BLOCKS; ++block) { | 138 | for (block = 0; block < NR_BLOCKS; ++block) { |
@@ -141,6 +142,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
141 | address = (low & MASK_BLKPTR_LO) >> 21; | 142 | address = (low & MASK_BLKPTR_LO) >> 21; |
142 | if (!address) | 143 | if (!address) |
143 | break; | 144 | break; |
145 | |||
144 | address += MCG_XBLK_ADDR; | 146 | address += MCG_XBLK_ADDR; |
145 | } else | 147 | } else |
146 | ++address; | 148 | ++address; |
@@ -148,12 +150,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
148 | if (rdmsr_safe(address, &low, &high)) | 150 | if (rdmsr_safe(address, &low, &high)) |
149 | break; | 151 | break; |
150 | 152 | ||
151 | if (!(high & MASK_VALID_HI)) { | 153 | if (!(high & MASK_VALID_HI)) |
152 | if (block) | 154 | continue; |
153 | continue; | ||
154 | else | ||
155 | break; | ||
156 | } | ||
157 | 155 | ||
158 | if (!(high & MASK_CNTP_HI) || | 156 | if (!(high & MASK_CNTP_HI) || |
159 | (high & MASK_LOCKED_HI)) | 157 | (high & MASK_LOCKED_HI)) |
@@ -165,8 +163,28 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
165 | if (shared_bank[bank] && c->cpu_core_id) | 163 | if (shared_bank[bank] && c->cpu_core_id) |
166 | break; | 164 | break; |
167 | #endif | 165 | #endif |
168 | lvt_off = setup_APIC_eilvt_mce(THRESHOLD_APIC_VECTOR, | 166 | offset = (high & MASK_LVTOFF_HI) >> 20; |
169 | APIC_EILVT_MSG_FIX, 0); | 167 | if (lvt_off < 0) { |
168 | if (setup_APIC_eilvt(offset, | ||
169 | THRESHOLD_APIC_VECTOR, | ||
170 | APIC_EILVT_MSG_FIX, 0)) { | ||
171 | pr_err(FW_BUG "cpu %d, failed to " | ||
172 | "setup threshold interrupt " | ||
173 | "for bank %d, block %d " | ||
174 | "(MSR%08X=0x%x%08x)", | ||
175 | smp_processor_id(), bank, block, | ||
176 | address, high, low); | ||
177 | continue; | ||
178 | } | ||
179 | lvt_off = offset; | ||
180 | } else if (lvt_off != offset) { | ||
181 | pr_err(FW_BUG "cpu %d, invalid threshold " | ||
182 | "interrupt offset %d for bank %d," | ||
183 | "block %d (MSR%08X=0x%x%08x)", | ||
184 | smp_processor_id(), lvt_off, bank, | ||
185 | block, address, high, low); | ||
186 | continue; | ||
187 | } | ||
170 | 188 | ||
171 | high &= ~MASK_LVTOFF_HI; | 189 | high &= ~MASK_LVTOFF_HI; |
172 | high |= lvt_off << 20; | 190 | high |= lvt_off << 20; |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index d9368eeda309..4b683267eca5 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -216,7 +216,7 @@ static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev, | |||
216 | err = sysfs_add_file_to_group(&sys_dev->kobj, | 216 | err = sysfs_add_file_to_group(&sys_dev->kobj, |
217 | &attr_core_power_limit_count.attr, | 217 | &attr_core_power_limit_count.attr, |
218 | thermal_attr_group.name); | 218 | thermal_attr_group.name); |
219 | if (cpu_has(c, X86_FEATURE_PTS)) | 219 | if (cpu_has(c, X86_FEATURE_PTS)) { |
220 | err = sysfs_add_file_to_group(&sys_dev->kobj, | 220 | err = sysfs_add_file_to_group(&sys_dev->kobj, |
221 | &attr_package_throttle_count.attr, | 221 | &attr_package_throttle_count.attr, |
222 | thermal_attr_group.name); | 222 | thermal_attr_group.name); |
@@ -224,6 +224,7 @@ static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev, | |||
224 | err = sysfs_add_file_to_group(&sys_dev->kobj, | 224 | err = sysfs_add_file_to_group(&sys_dev->kobj, |
225 | &attr_package_power_limit_count.attr, | 225 | &attr_package_power_limit_count.attr, |
226 | thermal_attr_group.name); | 226 | thermal_attr_group.name); |
227 | } | ||
227 | 228 | ||
228 | return err; | 229 | return err; |
229 | } | 230 | } |
@@ -349,7 +350,7 @@ static void intel_thermal_interrupt(void) | |||
349 | 350 | ||
350 | static void unexpected_thermal_interrupt(void) | 351 | static void unexpected_thermal_interrupt(void) |
351 | { | 352 | { |
352 | printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n", | 353 | printk(KERN_ERR "CPU%d: Unexpected LVT thermal interrupt!\n", |
353 | smp_processor_id()); | 354 | smp_processor_id()); |
354 | add_taint(TAINT_MACHINE_CHECK); | 355 | add_taint(TAINT_MACHINE_CHECK); |
355 | } | 356 | } |
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index c5f59d071425..ac140c7be396 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c | |||
@@ -827,7 +827,7 @@ int __init amd_special_default_mtrr(void) | |||
827 | 827 | ||
828 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) | 828 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) |
829 | return 0; | 829 | return 0; |
830 | if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) | 830 | if (boot_cpu_data.x86 < 0xf) |
831 | return 0; | 831 | return 0; |
832 | /* In case some hypervisor doesn't pass SYSCFG through: */ | 832 | /* In case some hypervisor doesn't pass SYSCFG through: */ |
833 | if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) | 833 | if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) |
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 7d28d7d03885..9f27228ceffd 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -64,18 +64,59 @@ static inline void k8_check_syscfg_dram_mod_en(void) | |||
64 | } | 64 | } |
65 | } | 65 | } |
66 | 66 | ||
67 | /* Get the size of contiguous MTRR range */ | ||
68 | static u64 get_mtrr_size(u64 mask) | ||
69 | { | ||
70 | u64 size; | ||
71 | |||
72 | mask >>= PAGE_SHIFT; | ||
73 | mask |= size_or_mask; | ||
74 | size = -mask; | ||
75 | size <<= PAGE_SHIFT; | ||
76 | return size; | ||
77 | } | ||
78 | |||
67 | /* | 79 | /* |
68 | * Returns the effective MTRR type for the region | 80 | * Check and return the effective type for MTRR-MTRR type overlap. |
69 | * Error returns: | 81 | * Returns 1 if the effective type is UNCACHEABLE, else returns 0 |
70 | * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR | ||
71 | * - 0xFF - when MTRR is not enabled | ||
72 | */ | 82 | */ |
73 | u8 mtrr_type_lookup(u64 start, u64 end) | 83 | static int check_type_overlap(u8 *prev, u8 *curr) |
84 | { | ||
85 | if (*prev == MTRR_TYPE_UNCACHABLE || *curr == MTRR_TYPE_UNCACHABLE) { | ||
86 | *prev = MTRR_TYPE_UNCACHABLE; | ||
87 | *curr = MTRR_TYPE_UNCACHABLE; | ||
88 | return 1; | ||
89 | } | ||
90 | |||
91 | if ((*prev == MTRR_TYPE_WRBACK && *curr == MTRR_TYPE_WRTHROUGH) || | ||
92 | (*prev == MTRR_TYPE_WRTHROUGH && *curr == MTRR_TYPE_WRBACK)) { | ||
93 | *prev = MTRR_TYPE_WRTHROUGH; | ||
94 | *curr = MTRR_TYPE_WRTHROUGH; | ||
95 | } | ||
96 | |||
97 | if (*prev != *curr) { | ||
98 | *prev = MTRR_TYPE_UNCACHABLE; | ||
99 | *curr = MTRR_TYPE_UNCACHABLE; | ||
100 | return 1; | ||
101 | } | ||
102 | |||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Error/Semi-error returns: | ||
108 | * 0xFF - when MTRR is not enabled | ||
109 | * *repeat == 1 implies [start:end] spanned across MTRR range and type returned | ||
110 | * corresponds only to [start:*partial_end]. | ||
111 | * Caller has to lookup again for [*partial_end:end]. | ||
112 | */ | ||
113 | static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat) | ||
74 | { | 114 | { |
75 | int i; | 115 | int i; |
76 | u64 base, mask; | 116 | u64 base, mask; |
77 | u8 prev_match, curr_match; | 117 | u8 prev_match, curr_match; |
78 | 118 | ||
119 | *repeat = 0; | ||
79 | if (!mtrr_state_set) | 120 | if (!mtrr_state_set) |
80 | return 0xFF; | 121 | return 0xFF; |
81 | 122 | ||
@@ -126,8 +167,34 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
126 | 167 | ||
127 | start_state = ((start & mask) == (base & mask)); | 168 | start_state = ((start & mask) == (base & mask)); |
128 | end_state = ((end & mask) == (base & mask)); | 169 | end_state = ((end & mask) == (base & mask)); |
129 | if (start_state != end_state) | 170 | |
130 | return 0xFE; | 171 | if (start_state != end_state) { |
172 | /* | ||
173 | * We have start:end spanning across an MTRR. | ||
174 | * We split the region into | ||
175 | * either | ||
176 | * (start:mtrr_end) (mtrr_end:end) | ||
177 | * or | ||
178 | * (start:mtrr_start) (mtrr_start:end) | ||
179 | * depending on kind of overlap. | ||
180 | * Return the type for first region and a pointer to | ||
181 | * the start of second region so that caller will | ||
182 | * lookup again on the second region. | ||
183 | * Note: This way we handle multiple overlaps as well. | ||
184 | */ | ||
185 | if (start_state) | ||
186 | *partial_end = base + get_mtrr_size(mask); | ||
187 | else | ||
188 | *partial_end = base; | ||
189 | |||
190 | if (unlikely(*partial_end <= start)) { | ||
191 | WARN_ON(1); | ||
192 | *partial_end = start + PAGE_SIZE; | ||
193 | } | ||
194 | |||
195 | end = *partial_end - 1; /* end is inclusive */ | ||
196 | *repeat = 1; | ||
197 | } | ||
131 | 198 | ||
132 | if ((start & mask) != (base & mask)) | 199 | if ((start & mask) != (base & mask)) |
133 | continue; | 200 | continue; |
@@ -138,21 +205,8 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
138 | continue; | 205 | continue; |
139 | } | 206 | } |
140 | 207 | ||
141 | if (prev_match == MTRR_TYPE_UNCACHABLE || | 208 | if (check_type_overlap(&prev_match, &curr_match)) |
142 | curr_match == MTRR_TYPE_UNCACHABLE) { | 209 | return curr_match; |
143 | return MTRR_TYPE_UNCACHABLE; | ||
144 | } | ||
145 | |||
146 | if ((prev_match == MTRR_TYPE_WRBACK && | ||
147 | curr_match == MTRR_TYPE_WRTHROUGH) || | ||
148 | (prev_match == MTRR_TYPE_WRTHROUGH && | ||
149 | curr_match == MTRR_TYPE_WRBACK)) { | ||
150 | prev_match = MTRR_TYPE_WRTHROUGH; | ||
151 | curr_match = MTRR_TYPE_WRTHROUGH; | ||
152 | } | ||
153 | |||
154 | if (prev_match != curr_match) | ||
155 | return MTRR_TYPE_UNCACHABLE; | ||
156 | } | 210 | } |
157 | 211 | ||
158 | if (mtrr_tom2) { | 212 | if (mtrr_tom2) { |
@@ -166,6 +220,36 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
166 | return mtrr_state.def_type; | 220 | return mtrr_state.def_type; |
167 | } | 221 | } |
168 | 222 | ||
223 | /* | ||
224 | * Returns the effective MTRR type for the region | ||
225 | * Error return: | ||
226 | * 0xFF - when MTRR is not enabled | ||
227 | */ | ||
228 | u8 mtrr_type_lookup(u64 start, u64 end) | ||
229 | { | ||
230 | u8 type, prev_type; | ||
231 | int repeat; | ||
232 | u64 partial_end; | ||
233 | |||
234 | type = __mtrr_type_lookup(start, end, &partial_end, &repeat); | ||
235 | |||
236 | /* | ||
237 | * Common path is with repeat = 0. | ||
238 | * However, we can have cases where [start:end] spans across some | ||
239 | * MTRR range. Do repeated lookups for that case here. | ||
240 | */ | ||
241 | while (repeat) { | ||
242 | prev_type = type; | ||
243 | start = partial_end; | ||
244 | type = __mtrr_type_lookup(start, end, &partial_end, &repeat); | ||
245 | |||
246 | if (check_type_overlap(&prev_type, &type)) | ||
247 | return type; | ||
248 | } | ||
249 | |||
250 | return type; | ||
251 | } | ||
252 | |||
169 | /* Get the MSR pair relating to a var range */ | 253 | /* Get the MSR pair relating to a var range */ |
170 | static void | 254 | static void |
171 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) | 255 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index e2513f26ba8b..fe73c1844a9a 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -1196,25 +1196,6 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1196 | return handled; | 1196 | return handled; |
1197 | } | 1197 | } |
1198 | 1198 | ||
1199 | void smp_perf_pending_interrupt(struct pt_regs *regs) | ||
1200 | { | ||
1201 | irq_enter(); | ||
1202 | ack_APIC_irq(); | ||
1203 | inc_irq_stat(apic_pending_irqs); | ||
1204 | perf_event_do_pending(); | ||
1205 | irq_exit(); | ||
1206 | } | ||
1207 | |||
1208 | void set_perf_event_pending(void) | ||
1209 | { | ||
1210 | #ifdef CONFIG_X86_LOCAL_APIC | ||
1211 | if (!x86_pmu.apic || !x86_pmu_initialized()) | ||
1212 | return; | ||
1213 | |||
1214 | apic->send_IPI_self(LOCAL_PENDING_VECTOR); | ||
1215 | #endif | ||
1216 | } | ||
1217 | |||
1218 | void perf_events_lapic_init(void) | 1199 | void perf_events_lapic_init(void) |
1219 | { | 1200 | { |
1220 | if (!x86_pmu.apic || !x86_pmu_initialized()) | 1201 | if (!x86_pmu.apic || !x86_pmu_initialized()) |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index c2897b7b4a3b..46d58448c3af 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -52,7 +52,7 @@ static __initconst const u64 amd_hw_cache_event_ids | |||
52 | [ C(DTLB) ] = { | 52 | [ C(DTLB) ] = { |
53 | [ C(OP_READ) ] = { | 53 | [ C(OP_READ) ] = { |
54 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | 54 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ |
55 | [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */ | 55 | [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */ |
56 | }, | 56 | }, |
57 | [ C(OP_WRITE) ] = { | 57 | [ C(OP_WRITE) ] = { |
58 | [ C(RESULT_ACCESS) ] = 0, | 58 | [ C(RESULT_ACCESS) ] = 0, |
@@ -66,7 +66,7 @@ static __initconst const u64 amd_hw_cache_event_ids | |||
66 | [ C(ITLB) ] = { | 66 | [ C(ITLB) ] = { |
67 | [ C(OP_READ) ] = { | 67 | [ C(OP_READ) ] = { |
68 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ | 68 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ |
69 | [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */ | 69 | [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */ |
70 | }, | 70 | }, |
71 | [ C(OP_WRITE) ] = { | 71 | [ C(OP_WRITE) ] = { |
72 | [ C(RESULT_ACCESS) ] = -1, | 72 | [ C(RESULT_ACCESS) ] = -1, |
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index fb329e9f8494..d9f4ff8fcd69 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -700,11 +700,10 @@ static void probe_nmi_watchdog(void) | |||
700 | { | 700 | { |
701 | switch (boot_cpu_data.x86_vendor) { | 701 | switch (boot_cpu_data.x86_vendor) { |
702 | case X86_VENDOR_AMD: | 702 | case X86_VENDOR_AMD: |
703 | if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 && | 703 | if (boot_cpu_data.x86 == 6 || |
704 | boot_cpu_data.x86 != 16 && boot_cpu_data.x86 != 17) | 704 | (boot_cpu_data.x86 >= 0xf && boot_cpu_data.x86 <= 0x15)) |
705 | return; | 705 | wd_ops = &k7_wd_ops; |
706 | wd_ops = &k7_wd_ops; | 706 | return; |
707 | break; | ||
708 | case X86_VENDOR_INTEL: | 707 | case X86_VENDOR_INTEL: |
709 | /* Work around where perfctr1 doesn't have a working enable | 708 | /* Work around where perfctr1 doesn't have a working enable |
710 | * bit as described in the following errata: | 709 | * bit as described in the following errata: |
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index d49079515122..c7f64e6f537a 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c | |||
@@ -44,6 +44,12 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
44 | { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 }, | 44 | { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 }, |
45 | { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 }, | 45 | { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 }, |
46 | { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 }, | 46 | { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 }, |
47 | { X86_FEATURE_TSCRATEMSR, CR_EDX, 4, 0x8000000a, 0 }, | ||
48 | { X86_FEATURE_VMCBCLEAN, CR_EDX, 5, 0x8000000a, 0 }, | ||
49 | { X86_FEATURE_FLUSHBYASID, CR_EDX, 6, 0x8000000a, 0 }, | ||
50 | { X86_FEATURE_DECODEASSISTS, CR_EDX, 7, 0x8000000a, 0 }, | ||
51 | { X86_FEATURE_PAUSEFILTER, CR_EDX,10, 0x8000000a, 0 }, | ||
52 | { X86_FEATURE_PFTHRESHOLD, CR_EDX,12, 0x8000000a, 0 }, | ||
47 | { 0, 0, 0, 0, 0 } | 53 | { 0, 0, 0, 0, 0 } |
48 | }; | 54 | }; |
49 | 55 | ||
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c index 045b36cada65..994828899e09 100644 --- a/arch/x86/kernel/crash_dump_64.c +++ b/arch/x86/kernel/crash_dump_64.c | |||
@@ -34,7 +34,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | |||
34 | if (!csize) | 34 | if (!csize) |
35 | return 0; | 35 | return 0; |
36 | 36 | ||
37 | vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); | 37 | vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE); |
38 | if (!vaddr) | 38 | if (!vaddr) |
39 | return -ENOMEM; | 39 | return -ENOMEM; |
40 | 40 | ||
@@ -46,6 +46,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | |||
46 | } else | 46 | } else |
47 | memcpy(buf, vaddr + offset, csize); | 47 | memcpy(buf, vaddr + offset, csize); |
48 | 48 | ||
49 | set_iounmap_nonlazy(); | ||
49 | iounmap(vaddr); | 50 | iounmap(vaddr); |
50 | return csize; | 51 | return csize; |
51 | } | 52 | } |
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index ebdb85cf2686..76b8cd953dee 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -97,7 +97,6 @@ static void __init nvidia_bugs(int num, int slot, int func) | |||
97 | } | 97 | } |
98 | 98 | ||
99 | #if defined(CONFIG_ACPI) && defined(CONFIG_X86_IO_APIC) | 99 | #if defined(CONFIG_ACPI) && defined(CONFIG_X86_IO_APIC) |
100 | #if defined(CONFIG_ACPI) && defined(CONFIG_X86_IO_APIC) | ||
101 | static u32 __init ati_ixp4x0_rev(int num, int slot, int func) | 100 | static u32 __init ati_ixp4x0_rev(int num, int slot, int func) |
102 | { | 101 | { |
103 | u32 d; | 102 | u32 d; |
@@ -115,7 +114,6 @@ static u32 __init ati_ixp4x0_rev(int num, int slot, int func) | |||
115 | d &= 0xff; | 114 | d &= 0xff; |
116 | return d; | 115 | return d; |
117 | } | 116 | } |
118 | #endif | ||
119 | 117 | ||
120 | static void __init ati_bugs(int num, int slot, int func) | 118 | static void __init ati_bugs(int num, int slot, int func) |
121 | { | 119 | { |
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index fa99bae75ace..4572f25f9325 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <xen/hvc-console.h> | 14 | #include <xen/hvc-console.h> |
15 | #include <asm/pci-direct.h> | 15 | #include <asm/pci-direct.h> |
16 | #include <asm/fixmap.h> | 16 | #include <asm/fixmap.h> |
17 | #include <asm/mrst.h> | ||
17 | #include <asm/pgtable.h> | 18 | #include <asm/pgtable.h> |
18 | #include <linux/usb/ehci_def.h> | 19 | #include <linux/usb/ehci_def.h> |
19 | 20 | ||
@@ -239,6 +240,18 @@ static int __init setup_early_printk(char *buf) | |||
239 | if (!strncmp(buf, "xen", 3)) | 240 | if (!strncmp(buf, "xen", 3)) |
240 | early_console_register(&xenboot_console, keep); | 241 | early_console_register(&xenboot_console, keep); |
241 | #endif | 242 | #endif |
243 | #ifdef CONFIG_X86_MRST_EARLY_PRINTK | ||
244 | if (!strncmp(buf, "mrst", 4)) { | ||
245 | mrst_early_console_init(); | ||
246 | early_console_register(&early_mrst_console, keep); | ||
247 | } | ||
248 | |||
249 | if (!strncmp(buf, "hsu", 3)) { | ||
250 | hsu_early_console_init(); | ||
251 | early_console_register(&early_hsu_console, keep); | ||
252 | } | ||
253 | |||
254 | #endif | ||
242 | buf++; | 255 | buf++; |
243 | } | 256 | } |
244 | return 0; | 257 | return 0; |
diff --git a/arch/x86/kernel/early_printk_mrst.c b/arch/x86/kernel/early_printk_mrst.c new file mode 100644 index 000000000000..65df603622b2 --- /dev/null +++ b/arch/x86/kernel/early_printk_mrst.c | |||
@@ -0,0 +1,319 @@ | |||
1 | /* | ||
2 | * early_printk_mrst.c - early consoles for Intel MID platforms | ||
3 | * | ||
4 | * Copyright (c) 2008-2010, Intel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; version 2 | ||
9 | * of the License. | ||
10 | */ | ||
11 | |||
12 | /* | ||
13 | * This file implements two early consoles named mrst and hsu. | ||
14 | * mrst is based on Maxim3110 spi-uart device, it exists in both | ||
15 | * Moorestown and Medfield platforms, while hsu is based on a High | ||
16 | * Speed UART device which only exists in the Medfield platform | ||
17 | */ | ||
18 | |||
19 | #include <linux/serial_reg.h> | ||
20 | #include <linux/serial_mfd.h> | ||
21 | #include <linux/kmsg_dump.h> | ||
22 | #include <linux/console.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/io.h> | ||
27 | |||
28 | #include <asm/fixmap.h> | ||
29 | #include <asm/pgtable.h> | ||
30 | #include <asm/mrst.h> | ||
31 | |||
32 | #define MRST_SPI_TIMEOUT 0x200000 | ||
33 | #define MRST_REGBASE_SPI0 0xff128000 | ||
34 | #define MRST_REGBASE_SPI1 0xff128400 | ||
35 | #define MRST_CLK_SPI0_REG 0xff11d86c | ||
36 | |||
37 | /* Bit fields in CTRLR0 */ | ||
38 | #define SPI_DFS_OFFSET 0 | ||
39 | |||
40 | #define SPI_FRF_OFFSET 4 | ||
41 | #define SPI_FRF_SPI 0x0 | ||
42 | #define SPI_FRF_SSP 0x1 | ||
43 | #define SPI_FRF_MICROWIRE 0x2 | ||
44 | #define SPI_FRF_RESV 0x3 | ||
45 | |||
46 | #define SPI_MODE_OFFSET 6 | ||
47 | #define SPI_SCPH_OFFSET 6 | ||
48 | #define SPI_SCOL_OFFSET 7 | ||
49 | #define SPI_TMOD_OFFSET 8 | ||
50 | #define SPI_TMOD_TR 0x0 /* xmit & recv */ | ||
51 | #define SPI_TMOD_TO 0x1 /* xmit only */ | ||
52 | #define SPI_TMOD_RO 0x2 /* recv only */ | ||
53 | #define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */ | ||
54 | |||
55 | #define SPI_SLVOE_OFFSET 10 | ||
56 | #define SPI_SRL_OFFSET 11 | ||
57 | #define SPI_CFS_OFFSET 12 | ||
58 | |||
59 | /* Bit fields in SR, 7 bits */ | ||
60 | #define SR_MASK 0x7f /* cover 7 bits */ | ||
61 | #define SR_BUSY (1 << 0) | ||
62 | #define SR_TF_NOT_FULL (1 << 1) | ||
63 | #define SR_TF_EMPT (1 << 2) | ||
64 | #define SR_RF_NOT_EMPT (1 << 3) | ||
65 | #define SR_RF_FULL (1 << 4) | ||
66 | #define SR_TX_ERR (1 << 5) | ||
67 | #define SR_DCOL (1 << 6) | ||
68 | |||
69 | struct dw_spi_reg { | ||
70 | u32 ctrl0; | ||
71 | u32 ctrl1; | ||
72 | u32 ssienr; | ||
73 | u32 mwcr; | ||
74 | u32 ser; | ||
75 | u32 baudr; | ||
76 | u32 txfltr; | ||
77 | u32 rxfltr; | ||
78 | u32 txflr; | ||
79 | u32 rxflr; | ||
80 | u32 sr; | ||
81 | u32 imr; | ||
82 | u32 isr; | ||
83 | u32 risr; | ||
84 | u32 txoicr; | ||
85 | u32 rxoicr; | ||
86 | u32 rxuicr; | ||
87 | u32 msticr; | ||
88 | u32 icr; | ||
89 | u32 dmacr; | ||
90 | u32 dmatdlr; | ||
91 | u32 dmardlr; | ||
92 | u32 idr; | ||
93 | u32 version; | ||
94 | |||
95 | /* Currently operates as 32 bits, though only the low 16 bits matter */ | ||
96 | u32 dr; | ||
97 | } __packed; | ||
98 | |||
99 | #define dw_readl(dw, name) __raw_readl(&(dw)->name) | ||
100 | #define dw_writel(dw, name, val) __raw_writel((val), &(dw)->name) | ||
101 | |||
102 | /* Default use SPI0 register for mrst, we will detect Penwell and use SPI1 */ | ||
103 | static unsigned long mrst_spi_paddr = MRST_REGBASE_SPI0; | ||
104 | |||
105 | static u32 *pclk_spi0; | ||
106 | /* Always contains an accessable address, start with 0 */ | ||
107 | static struct dw_spi_reg *pspi; | ||
108 | |||
109 | static struct kmsg_dumper dw_dumper; | ||
110 | static int dumper_registered; | ||
111 | |||
112 | static void dw_kmsg_dump(struct kmsg_dumper *dumper, | ||
113 | enum kmsg_dump_reason reason, | ||
114 | const char *s1, unsigned long l1, | ||
115 | const char *s2, unsigned long l2) | ||
116 | { | ||
117 | int i; | ||
118 | |||
119 | /* When run to this, we'd better re-init the HW */ | ||
120 | mrst_early_console_init(); | ||
121 | |||
122 | for (i = 0; i < l1; i++) | ||
123 | early_mrst_console.write(&early_mrst_console, s1 + i, 1); | ||
124 | for (i = 0; i < l2; i++) | ||
125 | early_mrst_console.write(&early_mrst_console, s2 + i, 1); | ||
126 | } | ||
127 | |||
128 | /* Set the ratio rate to 115200, 8n1, IRQ disabled */ | ||
129 | static void max3110_write_config(void) | ||
130 | { | ||
131 | u16 config; | ||
132 | |||
133 | config = 0xc001; | ||
134 | dw_writel(pspi, dr, config); | ||
135 | } | ||
136 | |||
137 | /* Translate char to a eligible word and send to max3110 */ | ||
138 | static void max3110_write_data(char c) | ||
139 | { | ||
140 | u16 data; | ||
141 | |||
142 | data = 0x8000 | c; | ||
143 | dw_writel(pspi, dr, data); | ||
144 | } | ||
145 | |||
146 | void mrst_early_console_init(void) | ||
147 | { | ||
148 | u32 ctrlr0 = 0; | ||
149 | u32 spi0_cdiv; | ||
150 | u32 freq; /* Freqency info only need be searched once */ | ||
151 | |||
152 | /* Base clk is 100 MHz, the actual clk = 100M / (clk_divider + 1) */ | ||
153 | pclk_spi0 = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, | ||
154 | MRST_CLK_SPI0_REG); | ||
155 | spi0_cdiv = ((*pclk_spi0) & 0xe00) >> 9; | ||
156 | freq = 100000000 / (spi0_cdiv + 1); | ||
157 | |||
158 | if (mrst_identify_cpu() == MRST_CPU_CHIP_PENWELL) | ||
159 | mrst_spi_paddr = MRST_REGBASE_SPI1; | ||
160 | |||
161 | pspi = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, | ||
162 | mrst_spi_paddr); | ||
163 | |||
164 | /* Disable SPI controller */ | ||
165 | dw_writel(pspi, ssienr, 0); | ||
166 | |||
167 | /* Set control param, 8 bits, transmit only mode */ | ||
168 | ctrlr0 = dw_readl(pspi, ctrl0); | ||
169 | |||
170 | ctrlr0 &= 0xfcc0; | ||
171 | ctrlr0 |= 0xf | (SPI_FRF_SPI << SPI_FRF_OFFSET) | ||
172 | | (SPI_TMOD_TO << SPI_TMOD_OFFSET); | ||
173 | dw_writel(pspi, ctrl0, ctrlr0); | ||
174 | |||
175 | /* | ||
176 | * Change the spi0 clk to comply with 115200 bps, use 100000 to | ||
177 | * calculate the clk dividor to make the clock a little slower | ||
178 | * than real baud rate. | ||
179 | */ | ||
180 | dw_writel(pspi, baudr, freq/100000); | ||
181 | |||
182 | /* Disable all INT for early phase */ | ||
183 | dw_writel(pspi, imr, 0x0); | ||
184 | |||
185 | /* Set the cs to spi-uart */ | ||
186 | dw_writel(pspi, ser, 0x2); | ||
187 | |||
188 | /* Enable the HW, the last step for HW init */ | ||
189 | dw_writel(pspi, ssienr, 0x1); | ||
190 | |||
191 | /* Set the default configuration */ | ||
192 | max3110_write_config(); | ||
193 | |||
194 | /* Register the kmsg dumper */ | ||
195 | if (!dumper_registered) { | ||
196 | dw_dumper.dump = dw_kmsg_dump; | ||
197 | kmsg_dump_register(&dw_dumper); | ||
198 | dumper_registered = 1; | ||
199 | } | ||
200 | } | ||
201 | |||
202 | /* Slave select should be called in the read/write function */ | ||
203 | static void early_mrst_spi_putc(char c) | ||
204 | { | ||
205 | unsigned int timeout; | ||
206 | u32 sr; | ||
207 | |||
208 | timeout = MRST_SPI_TIMEOUT; | ||
209 | /* Early putc needs to make sure the TX FIFO is not full */ | ||
210 | while (--timeout) { | ||
211 | sr = dw_readl(pspi, sr); | ||
212 | if (!(sr & SR_TF_NOT_FULL)) | ||
213 | cpu_relax(); | ||
214 | else | ||
215 | break; | ||
216 | } | ||
217 | |||
218 | if (!timeout) | ||
219 | pr_warning("MRST earlycon: timed out\n"); | ||
220 | else | ||
221 | max3110_write_data(c); | ||
222 | } | ||
223 | |||
224 | /* Early SPI only uses polling mode */ | ||
225 | static void early_mrst_spi_write(struct console *con, const char *str, unsigned n) | ||
226 | { | ||
227 | int i; | ||
228 | |||
229 | for (i = 0; i < n && *str; i++) { | ||
230 | if (*str == '\n') | ||
231 | early_mrst_spi_putc('\r'); | ||
232 | early_mrst_spi_putc(*str); | ||
233 | str++; | ||
234 | } | ||
235 | } | ||
236 | |||
237 | struct console early_mrst_console = { | ||
238 | .name = "earlymrst", | ||
239 | .write = early_mrst_spi_write, | ||
240 | .flags = CON_PRINTBUFFER, | ||
241 | .index = -1, | ||
242 | }; | ||
243 | |||
244 | /* | ||
245 | * Following is the early console based on Medfield HSU (High | ||
246 | * Speed UART) device. | ||
247 | */ | ||
248 | #define HSU_PORT2_PADDR 0xffa28180 | ||
249 | |||
250 | static void __iomem *phsu; | ||
251 | |||
252 | void hsu_early_console_init(void) | ||
253 | { | ||
254 | u8 lcr; | ||
255 | |||
256 | phsu = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, | ||
257 | HSU_PORT2_PADDR); | ||
258 | |||
259 | /* Disable FIFO */ | ||
260 | writeb(0x0, phsu + UART_FCR); | ||
261 | |||
262 | /* Set to default 115200 bps, 8n1 */ | ||
263 | lcr = readb(phsu + UART_LCR); | ||
264 | writeb((0x80 | lcr), phsu + UART_LCR); | ||
265 | writeb(0x18, phsu + UART_DLL); | ||
266 | writeb(lcr, phsu + UART_LCR); | ||
267 | writel(0x3600, phsu + UART_MUL*4); | ||
268 | |||
269 | writeb(0x8, phsu + UART_MCR); | ||
270 | writeb(0x7, phsu + UART_FCR); | ||
271 | writeb(0x3, phsu + UART_LCR); | ||
272 | |||
273 | /* Clear IRQ status */ | ||
274 | readb(phsu + UART_LSR); | ||
275 | readb(phsu + UART_RX); | ||
276 | readb(phsu + UART_IIR); | ||
277 | readb(phsu + UART_MSR); | ||
278 | |||
279 | /* Enable FIFO */ | ||
280 | writeb(0x7, phsu + UART_FCR); | ||
281 | } | ||
282 | |||
283 | #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) | ||
284 | |||
285 | static void early_hsu_putc(char ch) | ||
286 | { | ||
287 | unsigned int timeout = 10000; /* 10ms */ | ||
288 | u8 status; | ||
289 | |||
290 | while (--timeout) { | ||
291 | status = readb(phsu + UART_LSR); | ||
292 | if (status & BOTH_EMPTY) | ||
293 | break; | ||
294 | udelay(1); | ||
295 | } | ||
296 | |||
297 | /* Only write the char when there was no timeout */ | ||
298 | if (timeout) | ||
299 | writeb(ch, phsu + UART_TX); | ||
300 | } | ||
301 | |||
302 | static void early_hsu_write(struct console *con, const char *str, unsigned n) | ||
303 | { | ||
304 | int i; | ||
305 | |||
306 | for (i = 0; i < n && *str; i++) { | ||
307 | if (*str == '\n') | ||
308 | early_hsu_putc('\r'); | ||
309 | early_hsu_putc(*str); | ||
310 | str++; | ||
311 | } | ||
312 | } | ||
313 | |||
314 | struct console early_hsu_console = { | ||
315 | .name = "earlyhsu", | ||
316 | .write = early_hsu_write, | ||
317 | .flags = CON_PRINTBUFFER, | ||
318 | .index = -1, | ||
319 | }; | ||
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 227d00920d2f..9fb188d7bc76 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -115,8 +115,7 @@ | |||
115 | 115 | ||
116 | /* unfortunately push/pop can't be no-op */ | 116 | /* unfortunately push/pop can't be no-op */ |
117 | .macro PUSH_GS | 117 | .macro PUSH_GS |
118 | pushl $0 | 118 | pushl_cfi $0 |
119 | CFI_ADJUST_CFA_OFFSET 4 | ||
120 | .endm | 119 | .endm |
121 | .macro POP_GS pop=0 | 120 | .macro POP_GS pop=0 |
122 | addl $(4 + \pop), %esp | 121 | addl $(4 + \pop), %esp |
@@ -140,14 +139,12 @@ | |||
140 | #else /* CONFIG_X86_32_LAZY_GS */ | 139 | #else /* CONFIG_X86_32_LAZY_GS */ |
141 | 140 | ||
142 | .macro PUSH_GS | 141 | .macro PUSH_GS |
143 | pushl %gs | 142 | pushl_cfi %gs |
144 | CFI_ADJUST_CFA_OFFSET 4 | ||
145 | /*CFI_REL_OFFSET gs, 0*/ | 143 | /*CFI_REL_OFFSET gs, 0*/ |
146 | .endm | 144 | .endm |
147 | 145 | ||
148 | .macro POP_GS pop=0 | 146 | .macro POP_GS pop=0 |
149 | 98: popl %gs | 147 | 98: popl_cfi %gs |
150 | CFI_ADJUST_CFA_OFFSET -4 | ||
151 | /*CFI_RESTORE gs*/ | 148 | /*CFI_RESTORE gs*/ |
152 | .if \pop <> 0 | 149 | .if \pop <> 0 |
153 | add $\pop, %esp | 150 | add $\pop, %esp |
@@ -195,35 +192,25 @@ | |||
195 | .macro SAVE_ALL | 192 | .macro SAVE_ALL |
196 | cld | 193 | cld |
197 | PUSH_GS | 194 | PUSH_GS |
198 | pushl %fs | 195 | pushl_cfi %fs |
199 | CFI_ADJUST_CFA_OFFSET 4 | ||
200 | /*CFI_REL_OFFSET fs, 0;*/ | 196 | /*CFI_REL_OFFSET fs, 0;*/ |
201 | pushl %es | 197 | pushl_cfi %es |
202 | CFI_ADJUST_CFA_OFFSET 4 | ||
203 | /*CFI_REL_OFFSET es, 0;*/ | 198 | /*CFI_REL_OFFSET es, 0;*/ |
204 | pushl %ds | 199 | pushl_cfi %ds |
205 | CFI_ADJUST_CFA_OFFSET 4 | ||
206 | /*CFI_REL_OFFSET ds, 0;*/ | 200 | /*CFI_REL_OFFSET ds, 0;*/ |
207 | pushl %eax | 201 | pushl_cfi %eax |
208 | CFI_ADJUST_CFA_OFFSET 4 | ||
209 | CFI_REL_OFFSET eax, 0 | 202 | CFI_REL_OFFSET eax, 0 |
210 | pushl %ebp | 203 | pushl_cfi %ebp |
211 | CFI_ADJUST_CFA_OFFSET 4 | ||
212 | CFI_REL_OFFSET ebp, 0 | 204 | CFI_REL_OFFSET ebp, 0 |
213 | pushl %edi | 205 | pushl_cfi %edi |
214 | CFI_ADJUST_CFA_OFFSET 4 | ||
215 | CFI_REL_OFFSET edi, 0 | 206 | CFI_REL_OFFSET edi, 0 |
216 | pushl %esi | 207 | pushl_cfi %esi |
217 | CFI_ADJUST_CFA_OFFSET 4 | ||
218 | CFI_REL_OFFSET esi, 0 | 208 | CFI_REL_OFFSET esi, 0 |
219 | pushl %edx | 209 | pushl_cfi %edx |
220 | CFI_ADJUST_CFA_OFFSET 4 | ||
221 | CFI_REL_OFFSET edx, 0 | 210 | CFI_REL_OFFSET edx, 0 |
222 | pushl %ecx | 211 | pushl_cfi %ecx |
223 | CFI_ADJUST_CFA_OFFSET 4 | ||
224 | CFI_REL_OFFSET ecx, 0 | 212 | CFI_REL_OFFSET ecx, 0 |
225 | pushl %ebx | 213 | pushl_cfi %ebx |
226 | CFI_ADJUST_CFA_OFFSET 4 | ||
227 | CFI_REL_OFFSET ebx, 0 | 214 | CFI_REL_OFFSET ebx, 0 |
228 | movl $(__USER_DS), %edx | 215 | movl $(__USER_DS), %edx |
229 | movl %edx, %ds | 216 | movl %edx, %ds |
@@ -234,39 +221,29 @@ | |||
234 | .endm | 221 | .endm |
235 | 222 | ||
236 | .macro RESTORE_INT_REGS | 223 | .macro RESTORE_INT_REGS |
237 | popl %ebx | 224 | popl_cfi %ebx |
238 | CFI_ADJUST_CFA_OFFSET -4 | ||
239 | CFI_RESTORE ebx | 225 | CFI_RESTORE ebx |
240 | popl %ecx | 226 | popl_cfi %ecx |
241 | CFI_ADJUST_CFA_OFFSET -4 | ||
242 | CFI_RESTORE ecx | 227 | CFI_RESTORE ecx |
243 | popl %edx | 228 | popl_cfi %edx |
244 | CFI_ADJUST_CFA_OFFSET -4 | ||
245 | CFI_RESTORE edx | 229 | CFI_RESTORE edx |
246 | popl %esi | 230 | popl_cfi %esi |
247 | CFI_ADJUST_CFA_OFFSET -4 | ||
248 | CFI_RESTORE esi | 231 | CFI_RESTORE esi |
249 | popl %edi | 232 | popl_cfi %edi |
250 | CFI_ADJUST_CFA_OFFSET -4 | ||
251 | CFI_RESTORE edi | 233 | CFI_RESTORE edi |
252 | popl %ebp | 234 | popl_cfi %ebp |
253 | CFI_ADJUST_CFA_OFFSET -4 | ||
254 | CFI_RESTORE ebp | 235 | CFI_RESTORE ebp |
255 | popl %eax | 236 | popl_cfi %eax |
256 | CFI_ADJUST_CFA_OFFSET -4 | ||
257 | CFI_RESTORE eax | 237 | CFI_RESTORE eax |
258 | .endm | 238 | .endm |
259 | 239 | ||
260 | .macro RESTORE_REGS pop=0 | 240 | .macro RESTORE_REGS pop=0 |
261 | RESTORE_INT_REGS | 241 | RESTORE_INT_REGS |
262 | 1: popl %ds | 242 | 1: popl_cfi %ds |
263 | CFI_ADJUST_CFA_OFFSET -4 | ||
264 | /*CFI_RESTORE ds;*/ | 243 | /*CFI_RESTORE ds;*/ |
265 | 2: popl %es | 244 | 2: popl_cfi %es |
266 | CFI_ADJUST_CFA_OFFSET -4 | ||
267 | /*CFI_RESTORE es;*/ | 245 | /*CFI_RESTORE es;*/ |
268 | 3: popl %fs | 246 | 3: popl_cfi %fs |
269 | CFI_ADJUST_CFA_OFFSET -4 | ||
270 | /*CFI_RESTORE fs;*/ | 247 | /*CFI_RESTORE fs;*/ |
271 | POP_GS \pop | 248 | POP_GS \pop |
272 | .pushsection .fixup, "ax" | 249 | .pushsection .fixup, "ax" |
@@ -320,16 +297,12 @@ | |||
320 | 297 | ||
321 | ENTRY(ret_from_fork) | 298 | ENTRY(ret_from_fork) |
322 | CFI_STARTPROC | 299 | CFI_STARTPROC |
323 | pushl %eax | 300 | pushl_cfi %eax |
324 | CFI_ADJUST_CFA_OFFSET 4 | ||
325 | call schedule_tail | 301 | call schedule_tail |
326 | GET_THREAD_INFO(%ebp) | 302 | GET_THREAD_INFO(%ebp) |
327 | popl %eax | 303 | popl_cfi %eax |
328 | CFI_ADJUST_CFA_OFFSET -4 | 304 | pushl_cfi $0x0202 # Reset kernel eflags |
329 | pushl $0x0202 # Reset kernel eflags | 305 | popfl_cfi |
330 | CFI_ADJUST_CFA_OFFSET 4 | ||
331 | popfl | ||
332 | CFI_ADJUST_CFA_OFFSET -4 | ||
333 | jmp syscall_exit | 306 | jmp syscall_exit |
334 | CFI_ENDPROC | 307 | CFI_ENDPROC |
335 | END(ret_from_fork) | 308 | END(ret_from_fork) |
@@ -409,29 +382,23 @@ sysenter_past_esp: | |||
409 | * enough kernel state to call TRACE_IRQS_OFF can be called - but | 382 | * enough kernel state to call TRACE_IRQS_OFF can be called - but |
410 | * we immediately enable interrupts at that point anyway. | 383 | * we immediately enable interrupts at that point anyway. |
411 | */ | 384 | */ |
412 | pushl $(__USER_DS) | 385 | pushl_cfi $(__USER_DS) |
413 | CFI_ADJUST_CFA_OFFSET 4 | ||
414 | /*CFI_REL_OFFSET ss, 0*/ | 386 | /*CFI_REL_OFFSET ss, 0*/ |
415 | pushl %ebp | 387 | pushl_cfi %ebp |
416 | CFI_ADJUST_CFA_OFFSET 4 | ||
417 | CFI_REL_OFFSET esp, 0 | 388 | CFI_REL_OFFSET esp, 0 |
418 | pushfl | 389 | pushfl_cfi |
419 | orl $X86_EFLAGS_IF, (%esp) | 390 | orl $X86_EFLAGS_IF, (%esp) |
420 | CFI_ADJUST_CFA_OFFSET 4 | 391 | pushl_cfi $(__USER_CS) |
421 | pushl $(__USER_CS) | ||
422 | CFI_ADJUST_CFA_OFFSET 4 | ||
423 | /*CFI_REL_OFFSET cs, 0*/ | 392 | /*CFI_REL_OFFSET cs, 0*/ |
424 | /* | 393 | /* |
425 | * Push current_thread_info()->sysenter_return to the stack. | 394 | * Push current_thread_info()->sysenter_return to the stack. |
426 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words | 395 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words |
427 | * pushed above; +8 corresponds to copy_thread's esp0 setting. | 396 | * pushed above; +8 corresponds to copy_thread's esp0 setting. |
428 | */ | 397 | */ |
429 | pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) | 398 | pushl_cfi (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) |
430 | CFI_ADJUST_CFA_OFFSET 4 | ||
431 | CFI_REL_OFFSET eip, 0 | 399 | CFI_REL_OFFSET eip, 0 |
432 | 400 | ||
433 | pushl %eax | 401 | pushl_cfi %eax |
434 | CFI_ADJUST_CFA_OFFSET 4 | ||
435 | SAVE_ALL | 402 | SAVE_ALL |
436 | ENABLE_INTERRUPTS(CLBR_NONE) | 403 | ENABLE_INTERRUPTS(CLBR_NONE) |
437 | 404 | ||
@@ -486,8 +453,7 @@ sysenter_audit: | |||
486 | movl %eax,%edx /* 2nd arg: syscall number */ | 453 | movl %eax,%edx /* 2nd arg: syscall number */ |
487 | movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */ | 454 | movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */ |
488 | call audit_syscall_entry | 455 | call audit_syscall_entry |
489 | pushl %ebx | 456 | pushl_cfi %ebx |
490 | CFI_ADJUST_CFA_OFFSET 4 | ||
491 | movl PT_EAX(%esp),%eax /* reload syscall number */ | 457 | movl PT_EAX(%esp),%eax /* reload syscall number */ |
492 | jmp sysenter_do_call | 458 | jmp sysenter_do_call |
493 | 459 | ||
@@ -529,8 +495,7 @@ ENDPROC(ia32_sysenter_target) | |||
529 | # system call handler stub | 495 | # system call handler stub |
530 | ENTRY(system_call) | 496 | ENTRY(system_call) |
531 | RING0_INT_FRAME # can't unwind into user space anyway | 497 | RING0_INT_FRAME # can't unwind into user space anyway |
532 | pushl %eax # save orig_eax | 498 | pushl_cfi %eax # save orig_eax |
533 | CFI_ADJUST_CFA_OFFSET 4 | ||
534 | SAVE_ALL | 499 | SAVE_ALL |
535 | GET_THREAD_INFO(%ebp) | 500 | GET_THREAD_INFO(%ebp) |
536 | # system call tracing in operation / emulation | 501 | # system call tracing in operation / emulation |
@@ -566,7 +531,6 @@ restore_all_notrace: | |||
566 | je ldt_ss # returning to user-space with LDT SS | 531 | je ldt_ss # returning to user-space with LDT SS |
567 | restore_nocheck: | 532 | restore_nocheck: |
568 | RESTORE_REGS 4 # skip orig_eax/error_code | 533 | RESTORE_REGS 4 # skip orig_eax/error_code |
569 | CFI_ADJUST_CFA_OFFSET -4 | ||
570 | irq_return: | 534 | irq_return: |
571 | INTERRUPT_RETURN | 535 | INTERRUPT_RETURN |
572 | .section .fixup,"ax" | 536 | .section .fixup,"ax" |
@@ -619,10 +583,8 @@ ldt_ss: | |||
619 | shr $16, %edx | 583 | shr $16, %edx |
620 | mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ | 584 | mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ |
621 | mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ | 585 | mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ |
622 | pushl $__ESPFIX_SS | 586 | pushl_cfi $__ESPFIX_SS |
623 | CFI_ADJUST_CFA_OFFSET 4 | 587 | pushl_cfi %eax /* new kernel esp */ |
624 | push %eax /* new kernel esp */ | ||
625 | CFI_ADJUST_CFA_OFFSET 4 | ||
626 | /* Disable interrupts, but do not irqtrace this section: we | 588 | /* Disable interrupts, but do not irqtrace this section: we |
627 | * will soon execute iret and the tracer was already set to | 589 | * will soon execute iret and the tracer was already set to |
628 | * the irqstate after the iret */ | 590 | * the irqstate after the iret */ |
@@ -666,11 +628,9 @@ work_notifysig: # deal with pending signals and | |||
666 | 628 | ||
667 | ALIGN | 629 | ALIGN |
668 | work_notifysig_v86: | 630 | work_notifysig_v86: |
669 | pushl %ecx # save ti_flags for do_notify_resume | 631 | pushl_cfi %ecx # save ti_flags for do_notify_resume |
670 | CFI_ADJUST_CFA_OFFSET 4 | ||
671 | call save_v86_state # %eax contains pt_regs pointer | 632 | call save_v86_state # %eax contains pt_regs pointer |
672 | popl %ecx | 633 | popl_cfi %ecx |
673 | CFI_ADJUST_CFA_OFFSET -4 | ||
674 | movl %eax, %esp | 634 | movl %eax, %esp |
675 | #else | 635 | #else |
676 | movl %esp, %eax | 636 | movl %esp, %eax |
@@ -750,14 +710,18 @@ ptregs_##name: \ | |||
750 | #define PTREGSCALL3(name) \ | 710 | #define PTREGSCALL3(name) \ |
751 | ALIGN; \ | 711 | ALIGN; \ |
752 | ptregs_##name: \ | 712 | ptregs_##name: \ |
713 | CFI_STARTPROC; \ | ||
753 | leal 4(%esp),%eax; \ | 714 | leal 4(%esp),%eax; \ |
754 | pushl %eax; \ | 715 | pushl_cfi %eax; \ |
755 | movl PT_EDX(%eax),%ecx; \ | 716 | movl PT_EDX(%eax),%ecx; \ |
756 | movl PT_ECX(%eax),%edx; \ | 717 | movl PT_ECX(%eax),%edx; \ |
757 | movl PT_EBX(%eax),%eax; \ | 718 | movl PT_EBX(%eax),%eax; \ |
758 | call sys_##name; \ | 719 | call sys_##name; \ |
759 | addl $4,%esp; \ | 720 | addl $4,%esp; \ |
760 | ret | 721 | CFI_ADJUST_CFA_OFFSET -4; \ |
722 | ret; \ | ||
723 | CFI_ENDPROC; \ | ||
724 | ENDPROC(ptregs_##name) | ||
761 | 725 | ||
762 | PTREGSCALL1(iopl) | 726 | PTREGSCALL1(iopl) |
763 | PTREGSCALL0(fork) | 727 | PTREGSCALL0(fork) |
@@ -772,15 +736,19 @@ PTREGSCALL1(vm86old) | |||
772 | /* Clone is an oddball. The 4th arg is in %edi */ | 736 | /* Clone is an oddball. The 4th arg is in %edi */ |
773 | ALIGN; | 737 | ALIGN; |
774 | ptregs_clone: | 738 | ptregs_clone: |
739 | CFI_STARTPROC | ||
775 | leal 4(%esp),%eax | 740 | leal 4(%esp),%eax |
776 | pushl %eax | 741 | pushl_cfi %eax |
777 | pushl PT_EDI(%eax) | 742 | pushl_cfi PT_EDI(%eax) |
778 | movl PT_EDX(%eax),%ecx | 743 | movl PT_EDX(%eax),%ecx |
779 | movl PT_ECX(%eax),%edx | 744 | movl PT_ECX(%eax),%edx |
780 | movl PT_EBX(%eax),%eax | 745 | movl PT_EBX(%eax),%eax |
781 | call sys_clone | 746 | call sys_clone |
782 | addl $8,%esp | 747 | addl $8,%esp |
748 | CFI_ADJUST_CFA_OFFSET -8 | ||
783 | ret | 749 | ret |
750 | CFI_ENDPROC | ||
751 | ENDPROC(ptregs_clone) | ||
784 | 752 | ||
785 | .macro FIXUP_ESPFIX_STACK | 753 | .macro FIXUP_ESPFIX_STACK |
786 | /* | 754 | /* |
@@ -795,10 +763,8 @@ ptregs_clone: | |||
795 | mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ | 763 | mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ |
796 | shl $16, %eax | 764 | shl $16, %eax |
797 | addl %esp, %eax /* the adjusted stack pointer */ | 765 | addl %esp, %eax /* the adjusted stack pointer */ |
798 | pushl $__KERNEL_DS | 766 | pushl_cfi $__KERNEL_DS |
799 | CFI_ADJUST_CFA_OFFSET 4 | 767 | pushl_cfi %eax |
800 | pushl %eax | ||
801 | CFI_ADJUST_CFA_OFFSET 4 | ||
802 | lss (%esp), %esp /* switch to the normal stack segment */ | 768 | lss (%esp), %esp /* switch to the normal stack segment */ |
803 | CFI_ADJUST_CFA_OFFSET -8 | 769 | CFI_ADJUST_CFA_OFFSET -8 |
804 | .endm | 770 | .endm |
@@ -835,8 +801,7 @@ vector=FIRST_EXTERNAL_VECTOR | |||
835 | .if vector <> FIRST_EXTERNAL_VECTOR | 801 | .if vector <> FIRST_EXTERNAL_VECTOR |
836 | CFI_ADJUST_CFA_OFFSET -4 | 802 | CFI_ADJUST_CFA_OFFSET -4 |
837 | .endif | 803 | .endif |
838 | 1: pushl $(~vector+0x80) /* Note: always in signed byte range */ | 804 | 1: pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */ |
839 | CFI_ADJUST_CFA_OFFSET 4 | ||
840 | .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 | 805 | .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 |
841 | jmp 2f | 806 | jmp 2f |
842 | .endif | 807 | .endif |
@@ -876,8 +841,7 @@ ENDPROC(common_interrupt) | |||
876 | #define BUILD_INTERRUPT3(name, nr, fn) \ | 841 | #define BUILD_INTERRUPT3(name, nr, fn) \ |
877 | ENTRY(name) \ | 842 | ENTRY(name) \ |
878 | RING0_INT_FRAME; \ | 843 | RING0_INT_FRAME; \ |
879 | pushl $~(nr); \ | 844 | pushl_cfi $~(nr); \ |
880 | CFI_ADJUST_CFA_OFFSET 4; \ | ||
881 | SAVE_ALL; \ | 845 | SAVE_ALL; \ |
882 | TRACE_IRQS_OFF \ | 846 | TRACE_IRQS_OFF \ |
883 | movl %esp,%eax; \ | 847 | movl %esp,%eax; \ |
@@ -893,21 +857,18 @@ ENDPROC(name) | |||
893 | 857 | ||
894 | ENTRY(coprocessor_error) | 858 | ENTRY(coprocessor_error) |
895 | RING0_INT_FRAME | 859 | RING0_INT_FRAME |
896 | pushl $0 | 860 | pushl_cfi $0 |
897 | CFI_ADJUST_CFA_OFFSET 4 | 861 | pushl_cfi $do_coprocessor_error |
898 | pushl $do_coprocessor_error | ||
899 | CFI_ADJUST_CFA_OFFSET 4 | ||
900 | jmp error_code | 862 | jmp error_code |
901 | CFI_ENDPROC | 863 | CFI_ENDPROC |
902 | END(coprocessor_error) | 864 | END(coprocessor_error) |
903 | 865 | ||
904 | ENTRY(simd_coprocessor_error) | 866 | ENTRY(simd_coprocessor_error) |
905 | RING0_INT_FRAME | 867 | RING0_INT_FRAME |
906 | pushl $0 | 868 | pushl_cfi $0 |
907 | CFI_ADJUST_CFA_OFFSET 4 | ||
908 | #ifdef CONFIG_X86_INVD_BUG | 869 | #ifdef CONFIG_X86_INVD_BUG |
909 | /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ | 870 | /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ |
910 | 661: pushl $do_general_protection | 871 | 661: pushl_cfi $do_general_protection |
911 | 662: | 872 | 662: |
912 | .section .altinstructions,"a" | 873 | .section .altinstructions,"a" |
913 | .balign 4 | 874 | .balign 4 |
@@ -922,19 +883,16 @@ ENTRY(simd_coprocessor_error) | |||
922 | 664: | 883 | 664: |
923 | .previous | 884 | .previous |
924 | #else | 885 | #else |
925 | pushl $do_simd_coprocessor_error | 886 | pushl_cfi $do_simd_coprocessor_error |
926 | #endif | 887 | #endif |
927 | CFI_ADJUST_CFA_OFFSET 4 | ||
928 | jmp error_code | 888 | jmp error_code |
929 | CFI_ENDPROC | 889 | CFI_ENDPROC |
930 | END(simd_coprocessor_error) | 890 | END(simd_coprocessor_error) |
931 | 891 | ||
932 | ENTRY(device_not_available) | 892 | ENTRY(device_not_available) |
933 | RING0_INT_FRAME | 893 | RING0_INT_FRAME |
934 | pushl $-1 # mark this as an int | 894 | pushl_cfi $-1 # mark this as an int |
935 | CFI_ADJUST_CFA_OFFSET 4 | 895 | pushl_cfi $do_device_not_available |
936 | pushl $do_device_not_available | ||
937 | CFI_ADJUST_CFA_OFFSET 4 | ||
938 | jmp error_code | 896 | jmp error_code |
939 | CFI_ENDPROC | 897 | CFI_ENDPROC |
940 | END(device_not_available) | 898 | END(device_not_available) |
@@ -956,82 +914,68 @@ END(native_irq_enable_sysexit) | |||
956 | 914 | ||
957 | ENTRY(overflow) | 915 | ENTRY(overflow) |
958 | RING0_INT_FRAME | 916 | RING0_INT_FRAME |
959 | pushl $0 | 917 | pushl_cfi $0 |
960 | CFI_ADJUST_CFA_OFFSET 4 | 918 | pushl_cfi $do_overflow |
961 | pushl $do_overflow | ||
962 | CFI_ADJUST_CFA_OFFSET 4 | ||
963 | jmp error_code | 919 | jmp error_code |
964 | CFI_ENDPROC | 920 | CFI_ENDPROC |
965 | END(overflow) | 921 | END(overflow) |
966 | 922 | ||
967 | ENTRY(bounds) | 923 | ENTRY(bounds) |
968 | RING0_INT_FRAME | 924 | RING0_INT_FRAME |
969 | pushl $0 | 925 | pushl_cfi $0 |
970 | CFI_ADJUST_CFA_OFFSET 4 | 926 | pushl_cfi $do_bounds |
971 | pushl $do_bounds | ||
972 | CFI_ADJUST_CFA_OFFSET 4 | ||
973 | jmp error_code | 927 | jmp error_code |
974 | CFI_ENDPROC | 928 | CFI_ENDPROC |
975 | END(bounds) | 929 | END(bounds) |
976 | 930 | ||
977 | ENTRY(invalid_op) | 931 | ENTRY(invalid_op) |
978 | RING0_INT_FRAME | 932 | RING0_INT_FRAME |
979 | pushl $0 | 933 | pushl_cfi $0 |
980 | CFI_ADJUST_CFA_OFFSET 4 | 934 | pushl_cfi $do_invalid_op |
981 | pushl $do_invalid_op | ||
982 | CFI_ADJUST_CFA_OFFSET 4 | ||
983 | jmp error_code | 935 | jmp error_code |
984 | CFI_ENDPROC | 936 | CFI_ENDPROC |
985 | END(invalid_op) | 937 | END(invalid_op) |
986 | 938 | ||
987 | ENTRY(coprocessor_segment_overrun) | 939 | ENTRY(coprocessor_segment_overrun) |
988 | RING0_INT_FRAME | 940 | RING0_INT_FRAME |
989 | pushl $0 | 941 | pushl_cfi $0 |
990 | CFI_ADJUST_CFA_OFFSET 4 | 942 | pushl_cfi $do_coprocessor_segment_overrun |
991 | pushl $do_coprocessor_segment_overrun | ||
992 | CFI_ADJUST_CFA_OFFSET 4 | ||
993 | jmp error_code | 943 | jmp error_code |
994 | CFI_ENDPROC | 944 | CFI_ENDPROC |
995 | END(coprocessor_segment_overrun) | 945 | END(coprocessor_segment_overrun) |
996 | 946 | ||
997 | ENTRY(invalid_TSS) | 947 | ENTRY(invalid_TSS) |
998 | RING0_EC_FRAME | 948 | RING0_EC_FRAME |
999 | pushl $do_invalid_TSS | 949 | pushl_cfi $do_invalid_TSS |
1000 | CFI_ADJUST_CFA_OFFSET 4 | ||
1001 | jmp error_code | 950 | jmp error_code |
1002 | CFI_ENDPROC | 951 | CFI_ENDPROC |
1003 | END(invalid_TSS) | 952 | END(invalid_TSS) |
1004 | 953 | ||
1005 | ENTRY(segment_not_present) | 954 | ENTRY(segment_not_present) |
1006 | RING0_EC_FRAME | 955 | RING0_EC_FRAME |
1007 | pushl $do_segment_not_present | 956 | pushl_cfi $do_segment_not_present |
1008 | CFI_ADJUST_CFA_OFFSET 4 | ||
1009 | jmp error_code | 957 | jmp error_code |
1010 | CFI_ENDPROC | 958 | CFI_ENDPROC |
1011 | END(segment_not_present) | 959 | END(segment_not_present) |
1012 | 960 | ||
1013 | ENTRY(stack_segment) | 961 | ENTRY(stack_segment) |
1014 | RING0_EC_FRAME | 962 | RING0_EC_FRAME |
1015 | pushl $do_stack_segment | 963 | pushl_cfi $do_stack_segment |
1016 | CFI_ADJUST_CFA_OFFSET 4 | ||
1017 | jmp error_code | 964 | jmp error_code |
1018 | CFI_ENDPROC | 965 | CFI_ENDPROC |
1019 | END(stack_segment) | 966 | END(stack_segment) |
1020 | 967 | ||
1021 | ENTRY(alignment_check) | 968 | ENTRY(alignment_check) |
1022 | RING0_EC_FRAME | 969 | RING0_EC_FRAME |
1023 | pushl $do_alignment_check | 970 | pushl_cfi $do_alignment_check |
1024 | CFI_ADJUST_CFA_OFFSET 4 | ||
1025 | jmp error_code | 971 | jmp error_code |
1026 | CFI_ENDPROC | 972 | CFI_ENDPROC |
1027 | END(alignment_check) | 973 | END(alignment_check) |
1028 | 974 | ||
1029 | ENTRY(divide_error) | 975 | ENTRY(divide_error) |
1030 | RING0_INT_FRAME | 976 | RING0_INT_FRAME |
1031 | pushl $0 # no error code | 977 | pushl_cfi $0 # no error code |
1032 | CFI_ADJUST_CFA_OFFSET 4 | 978 | pushl_cfi $do_divide_error |
1033 | pushl $do_divide_error | ||
1034 | CFI_ADJUST_CFA_OFFSET 4 | ||
1035 | jmp error_code | 979 | jmp error_code |
1036 | CFI_ENDPROC | 980 | CFI_ENDPROC |
1037 | END(divide_error) | 981 | END(divide_error) |
@@ -1039,10 +983,8 @@ END(divide_error) | |||
1039 | #ifdef CONFIG_X86_MCE | 983 | #ifdef CONFIG_X86_MCE |
1040 | ENTRY(machine_check) | 984 | ENTRY(machine_check) |
1041 | RING0_INT_FRAME | 985 | RING0_INT_FRAME |
1042 | pushl $0 | 986 | pushl_cfi $0 |
1043 | CFI_ADJUST_CFA_OFFSET 4 | 987 | pushl_cfi machine_check_vector |
1044 | pushl machine_check_vector | ||
1045 | CFI_ADJUST_CFA_OFFSET 4 | ||
1046 | jmp error_code | 988 | jmp error_code |
1047 | CFI_ENDPROC | 989 | CFI_ENDPROC |
1048 | END(machine_check) | 990 | END(machine_check) |
@@ -1050,10 +992,8 @@ END(machine_check) | |||
1050 | 992 | ||
1051 | ENTRY(spurious_interrupt_bug) | 993 | ENTRY(spurious_interrupt_bug) |
1052 | RING0_INT_FRAME | 994 | RING0_INT_FRAME |
1053 | pushl $0 | 995 | pushl_cfi $0 |
1054 | CFI_ADJUST_CFA_OFFSET 4 | 996 | pushl_cfi $do_spurious_interrupt_bug |
1055 | pushl $do_spurious_interrupt_bug | ||
1056 | CFI_ADJUST_CFA_OFFSET 4 | ||
1057 | jmp error_code | 997 | jmp error_code |
1058 | CFI_ENDPROC | 998 | CFI_ENDPROC |
1059 | END(spurious_interrupt_bug) | 999 | END(spurious_interrupt_bug) |
@@ -1084,8 +1024,7 @@ ENTRY(xen_sysenter_target) | |||
1084 | 1024 | ||
1085 | ENTRY(xen_hypervisor_callback) | 1025 | ENTRY(xen_hypervisor_callback) |
1086 | CFI_STARTPROC | 1026 | CFI_STARTPROC |
1087 | pushl $0 | 1027 | pushl_cfi $0 |
1088 | CFI_ADJUST_CFA_OFFSET 4 | ||
1089 | SAVE_ALL | 1028 | SAVE_ALL |
1090 | TRACE_IRQS_OFF | 1029 | TRACE_IRQS_OFF |
1091 | 1030 | ||
@@ -1121,23 +1060,20 @@ ENDPROC(xen_hypervisor_callback) | |||
1121 | # We distinguish between categories by maintaining a status value in EAX. | 1060 | # We distinguish between categories by maintaining a status value in EAX. |
1122 | ENTRY(xen_failsafe_callback) | 1061 | ENTRY(xen_failsafe_callback) |
1123 | CFI_STARTPROC | 1062 | CFI_STARTPROC |
1124 | pushl %eax | 1063 | pushl_cfi %eax |
1125 | CFI_ADJUST_CFA_OFFSET 4 | ||
1126 | movl $1,%eax | 1064 | movl $1,%eax |
1127 | 1: mov 4(%esp),%ds | 1065 | 1: mov 4(%esp),%ds |
1128 | 2: mov 8(%esp),%es | 1066 | 2: mov 8(%esp),%es |
1129 | 3: mov 12(%esp),%fs | 1067 | 3: mov 12(%esp),%fs |
1130 | 4: mov 16(%esp),%gs | 1068 | 4: mov 16(%esp),%gs |
1131 | testl %eax,%eax | 1069 | testl %eax,%eax |
1132 | popl %eax | 1070 | popl_cfi %eax |
1133 | CFI_ADJUST_CFA_OFFSET -4 | ||
1134 | lea 16(%esp),%esp | 1071 | lea 16(%esp),%esp |
1135 | CFI_ADJUST_CFA_OFFSET -16 | 1072 | CFI_ADJUST_CFA_OFFSET -16 |
1136 | jz 5f | 1073 | jz 5f |
1137 | addl $16,%esp | 1074 | addl $16,%esp |
1138 | jmp iret_exc # EAX != 0 => Category 2 (Bad IRET) | 1075 | jmp iret_exc # EAX != 0 => Category 2 (Bad IRET) |
1139 | 5: pushl $0 # EAX == 0 => Category 1 (Bad segment) | 1076 | 5: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment) |
1140 | CFI_ADJUST_CFA_OFFSET 4 | ||
1141 | SAVE_ALL | 1077 | SAVE_ALL |
1142 | jmp ret_from_exception | 1078 | jmp ret_from_exception |
1143 | CFI_ENDPROC | 1079 | CFI_ENDPROC |
@@ -1287,40 +1223,29 @@ syscall_table_size=(.-sys_call_table) | |||
1287 | 1223 | ||
1288 | ENTRY(page_fault) | 1224 | ENTRY(page_fault) |
1289 | RING0_EC_FRAME | 1225 | RING0_EC_FRAME |
1290 | pushl $do_page_fault | 1226 | pushl_cfi $do_page_fault |
1291 | CFI_ADJUST_CFA_OFFSET 4 | ||
1292 | ALIGN | 1227 | ALIGN |
1293 | error_code: | 1228 | error_code: |
1294 | /* the function address is in %gs's slot on the stack */ | 1229 | /* the function address is in %gs's slot on the stack */ |
1295 | pushl %fs | 1230 | pushl_cfi %fs |
1296 | CFI_ADJUST_CFA_OFFSET 4 | ||
1297 | /*CFI_REL_OFFSET fs, 0*/ | 1231 | /*CFI_REL_OFFSET fs, 0*/ |
1298 | pushl %es | 1232 | pushl_cfi %es |
1299 | CFI_ADJUST_CFA_OFFSET 4 | ||
1300 | /*CFI_REL_OFFSET es, 0*/ | 1233 | /*CFI_REL_OFFSET es, 0*/ |
1301 | pushl %ds | 1234 | pushl_cfi %ds |
1302 | CFI_ADJUST_CFA_OFFSET 4 | ||
1303 | /*CFI_REL_OFFSET ds, 0*/ | 1235 | /*CFI_REL_OFFSET ds, 0*/ |
1304 | pushl %eax | 1236 | pushl_cfi %eax |
1305 | CFI_ADJUST_CFA_OFFSET 4 | ||
1306 | CFI_REL_OFFSET eax, 0 | 1237 | CFI_REL_OFFSET eax, 0 |
1307 | pushl %ebp | 1238 | pushl_cfi %ebp |
1308 | CFI_ADJUST_CFA_OFFSET 4 | ||
1309 | CFI_REL_OFFSET ebp, 0 | 1239 | CFI_REL_OFFSET ebp, 0 |
1310 | pushl %edi | 1240 | pushl_cfi %edi |
1311 | CFI_ADJUST_CFA_OFFSET 4 | ||
1312 | CFI_REL_OFFSET edi, 0 | 1241 | CFI_REL_OFFSET edi, 0 |
1313 | pushl %esi | 1242 | pushl_cfi %esi |
1314 | CFI_ADJUST_CFA_OFFSET 4 | ||
1315 | CFI_REL_OFFSET esi, 0 | 1243 | CFI_REL_OFFSET esi, 0 |
1316 | pushl %edx | 1244 | pushl_cfi %edx |
1317 | CFI_ADJUST_CFA_OFFSET 4 | ||
1318 | CFI_REL_OFFSET edx, 0 | 1245 | CFI_REL_OFFSET edx, 0 |
1319 | pushl %ecx | 1246 | pushl_cfi %ecx |
1320 | CFI_ADJUST_CFA_OFFSET 4 | ||
1321 | CFI_REL_OFFSET ecx, 0 | 1247 | CFI_REL_OFFSET ecx, 0 |
1322 | pushl %ebx | 1248 | pushl_cfi %ebx |
1323 | CFI_ADJUST_CFA_OFFSET 4 | ||
1324 | CFI_REL_OFFSET ebx, 0 | 1249 | CFI_REL_OFFSET ebx, 0 |
1325 | cld | 1250 | cld |
1326 | movl $(__KERNEL_PERCPU), %ecx | 1251 | movl $(__KERNEL_PERCPU), %ecx |
@@ -1362,12 +1287,9 @@ END(page_fault) | |||
1362 | movl TSS_sysenter_sp0 + \offset(%esp), %esp | 1287 | movl TSS_sysenter_sp0 + \offset(%esp), %esp |
1363 | CFI_DEF_CFA esp, 0 | 1288 | CFI_DEF_CFA esp, 0 |
1364 | CFI_UNDEFINED eip | 1289 | CFI_UNDEFINED eip |
1365 | pushfl | 1290 | pushfl_cfi |
1366 | CFI_ADJUST_CFA_OFFSET 4 | 1291 | pushl_cfi $__KERNEL_CS |
1367 | pushl $__KERNEL_CS | 1292 | pushl_cfi $sysenter_past_esp |
1368 | CFI_ADJUST_CFA_OFFSET 4 | ||
1369 | pushl $sysenter_past_esp | ||
1370 | CFI_ADJUST_CFA_OFFSET 4 | ||
1371 | CFI_REL_OFFSET eip, 0 | 1293 | CFI_REL_OFFSET eip, 0 |
1372 | .endm | 1294 | .endm |
1373 | 1295 | ||
@@ -1377,8 +1299,7 @@ ENTRY(debug) | |||
1377 | jne debug_stack_correct | 1299 | jne debug_stack_correct |
1378 | FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn | 1300 | FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn |
1379 | debug_stack_correct: | 1301 | debug_stack_correct: |
1380 | pushl $-1 # mark this as an int | 1302 | pushl_cfi $-1 # mark this as an int |
1381 | CFI_ADJUST_CFA_OFFSET 4 | ||
1382 | SAVE_ALL | 1303 | SAVE_ALL |
1383 | TRACE_IRQS_OFF | 1304 | TRACE_IRQS_OFF |
1384 | xorl %edx,%edx # error code 0 | 1305 | xorl %edx,%edx # error code 0 |
@@ -1398,32 +1319,27 @@ END(debug) | |||
1398 | */ | 1319 | */ |
1399 | ENTRY(nmi) | 1320 | ENTRY(nmi) |
1400 | RING0_INT_FRAME | 1321 | RING0_INT_FRAME |
1401 | pushl %eax | 1322 | pushl_cfi %eax |
1402 | CFI_ADJUST_CFA_OFFSET 4 | ||
1403 | movl %ss, %eax | 1323 | movl %ss, %eax |
1404 | cmpw $__ESPFIX_SS, %ax | 1324 | cmpw $__ESPFIX_SS, %ax |
1405 | popl %eax | 1325 | popl_cfi %eax |
1406 | CFI_ADJUST_CFA_OFFSET -4 | ||
1407 | je nmi_espfix_stack | 1326 | je nmi_espfix_stack |
1408 | cmpl $ia32_sysenter_target,(%esp) | 1327 | cmpl $ia32_sysenter_target,(%esp) |
1409 | je nmi_stack_fixup | 1328 | je nmi_stack_fixup |
1410 | pushl %eax | 1329 | pushl_cfi %eax |
1411 | CFI_ADJUST_CFA_OFFSET 4 | ||
1412 | movl %esp,%eax | 1330 | movl %esp,%eax |
1413 | /* Do not access memory above the end of our stack page, | 1331 | /* Do not access memory above the end of our stack page, |
1414 | * it might not exist. | 1332 | * it might not exist. |
1415 | */ | 1333 | */ |
1416 | andl $(THREAD_SIZE-1),%eax | 1334 | andl $(THREAD_SIZE-1),%eax |
1417 | cmpl $(THREAD_SIZE-20),%eax | 1335 | cmpl $(THREAD_SIZE-20),%eax |
1418 | popl %eax | 1336 | popl_cfi %eax |
1419 | CFI_ADJUST_CFA_OFFSET -4 | ||
1420 | jae nmi_stack_correct | 1337 | jae nmi_stack_correct |
1421 | cmpl $ia32_sysenter_target,12(%esp) | 1338 | cmpl $ia32_sysenter_target,12(%esp) |
1422 | je nmi_debug_stack_check | 1339 | je nmi_debug_stack_check |
1423 | nmi_stack_correct: | 1340 | nmi_stack_correct: |
1424 | /* We have a RING0_INT_FRAME here */ | 1341 | /* We have a RING0_INT_FRAME here */ |
1425 | pushl %eax | 1342 | pushl_cfi %eax |
1426 | CFI_ADJUST_CFA_OFFSET 4 | ||
1427 | SAVE_ALL | 1343 | SAVE_ALL |
1428 | xorl %edx,%edx # zero error code | 1344 | xorl %edx,%edx # zero error code |
1429 | movl %esp,%eax # pt_regs pointer | 1345 | movl %esp,%eax # pt_regs pointer |
@@ -1452,18 +1368,14 @@ nmi_espfix_stack: | |||
1452 | * | 1368 | * |
1453 | * create the pointer to lss back | 1369 | * create the pointer to lss back |
1454 | */ | 1370 | */ |
1455 | pushl %ss | 1371 | pushl_cfi %ss |
1456 | CFI_ADJUST_CFA_OFFSET 4 | 1372 | pushl_cfi %esp |
1457 | pushl %esp | ||
1458 | CFI_ADJUST_CFA_OFFSET 4 | ||
1459 | addl $4, (%esp) | 1373 | addl $4, (%esp) |
1460 | /* copy the iret frame of 12 bytes */ | 1374 | /* copy the iret frame of 12 bytes */ |
1461 | .rept 3 | 1375 | .rept 3 |
1462 | pushl 16(%esp) | 1376 | pushl_cfi 16(%esp) |
1463 | CFI_ADJUST_CFA_OFFSET 4 | ||
1464 | .endr | 1377 | .endr |
1465 | pushl %eax | 1378 | pushl_cfi %eax |
1466 | CFI_ADJUST_CFA_OFFSET 4 | ||
1467 | SAVE_ALL | 1379 | SAVE_ALL |
1468 | FIXUP_ESPFIX_STACK # %eax == %esp | 1380 | FIXUP_ESPFIX_STACK # %eax == %esp |
1469 | xorl %edx,%edx # zero error code | 1381 | xorl %edx,%edx # zero error code |
@@ -1477,8 +1389,7 @@ END(nmi) | |||
1477 | 1389 | ||
1478 | ENTRY(int3) | 1390 | ENTRY(int3) |
1479 | RING0_INT_FRAME | 1391 | RING0_INT_FRAME |
1480 | pushl $-1 # mark this as an int | 1392 | pushl_cfi $-1 # mark this as an int |
1481 | CFI_ADJUST_CFA_OFFSET 4 | ||
1482 | SAVE_ALL | 1393 | SAVE_ALL |
1483 | TRACE_IRQS_OFF | 1394 | TRACE_IRQS_OFF |
1484 | xorl %edx,%edx # zero error code | 1395 | xorl %edx,%edx # zero error code |
@@ -1490,8 +1401,7 @@ END(int3) | |||
1490 | 1401 | ||
1491 | ENTRY(general_protection) | 1402 | ENTRY(general_protection) |
1492 | RING0_EC_FRAME | 1403 | RING0_EC_FRAME |
1493 | pushl $do_general_protection | 1404 | pushl_cfi $do_general_protection |
1494 | CFI_ADJUST_CFA_OFFSET 4 | ||
1495 | jmp error_code | 1405 | jmp error_code |
1496 | CFI_ENDPROC | 1406 | CFI_ENDPROC |
1497 | END(general_protection) | 1407 | END(general_protection) |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 17be5ec7cbba..a7ae7fd1010f 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -213,23 +213,17 @@ ENDPROC(native_usergs_sysret64) | |||
213 | .macro FAKE_STACK_FRAME child_rip | 213 | .macro FAKE_STACK_FRAME child_rip |
214 | /* push in order ss, rsp, eflags, cs, rip */ | 214 | /* push in order ss, rsp, eflags, cs, rip */ |
215 | xorl %eax, %eax | 215 | xorl %eax, %eax |
216 | pushq $__KERNEL_DS /* ss */ | 216 | pushq_cfi $__KERNEL_DS /* ss */ |
217 | CFI_ADJUST_CFA_OFFSET 8 | ||
218 | /*CFI_REL_OFFSET ss,0*/ | 217 | /*CFI_REL_OFFSET ss,0*/ |
219 | pushq %rax /* rsp */ | 218 | pushq_cfi %rax /* rsp */ |
220 | CFI_ADJUST_CFA_OFFSET 8 | ||
221 | CFI_REL_OFFSET rsp,0 | 219 | CFI_REL_OFFSET rsp,0 |
222 | pushq $X86_EFLAGS_IF /* eflags - interrupts on */ | 220 | pushq_cfi $X86_EFLAGS_IF /* eflags - interrupts on */ |
223 | CFI_ADJUST_CFA_OFFSET 8 | ||
224 | /*CFI_REL_OFFSET rflags,0*/ | 221 | /*CFI_REL_OFFSET rflags,0*/ |
225 | pushq $__KERNEL_CS /* cs */ | 222 | pushq_cfi $__KERNEL_CS /* cs */ |
226 | CFI_ADJUST_CFA_OFFSET 8 | ||
227 | /*CFI_REL_OFFSET cs,0*/ | 223 | /*CFI_REL_OFFSET cs,0*/ |
228 | pushq \child_rip /* rip */ | 224 | pushq_cfi \child_rip /* rip */ |
229 | CFI_ADJUST_CFA_OFFSET 8 | ||
230 | CFI_REL_OFFSET rip,0 | 225 | CFI_REL_OFFSET rip,0 |
231 | pushq %rax /* orig rax */ | 226 | pushq_cfi %rax /* orig rax */ |
232 | CFI_ADJUST_CFA_OFFSET 8 | ||
233 | .endm | 227 | .endm |
234 | 228 | ||
235 | .macro UNFAKE_STACK_FRAME | 229 | .macro UNFAKE_STACK_FRAME |
@@ -398,10 +392,8 @@ ENTRY(ret_from_fork) | |||
398 | 392 | ||
399 | LOCK ; btr $TIF_FORK,TI_flags(%r8) | 393 | LOCK ; btr $TIF_FORK,TI_flags(%r8) |
400 | 394 | ||
401 | push kernel_eflags(%rip) | 395 | pushq_cfi kernel_eflags(%rip) |
402 | CFI_ADJUST_CFA_OFFSET 8 | 396 | popfq_cfi # reset kernel eflags |
403 | popf # reset kernel eflags | ||
404 | CFI_ADJUST_CFA_OFFSET -8 | ||
405 | 397 | ||
406 | call schedule_tail # rdi: 'prev' task parameter | 398 | call schedule_tail # rdi: 'prev' task parameter |
407 | 399 | ||
@@ -521,11 +513,9 @@ sysret_careful: | |||
521 | jnc sysret_signal | 513 | jnc sysret_signal |
522 | TRACE_IRQS_ON | 514 | TRACE_IRQS_ON |
523 | ENABLE_INTERRUPTS(CLBR_NONE) | 515 | ENABLE_INTERRUPTS(CLBR_NONE) |
524 | pushq %rdi | 516 | pushq_cfi %rdi |
525 | CFI_ADJUST_CFA_OFFSET 8 | ||
526 | call schedule | 517 | call schedule |
527 | popq %rdi | 518 | popq_cfi %rdi |
528 | CFI_ADJUST_CFA_OFFSET -8 | ||
529 | jmp sysret_check | 519 | jmp sysret_check |
530 | 520 | ||
531 | /* Handle a signal */ | 521 | /* Handle a signal */ |
@@ -634,11 +624,9 @@ int_careful: | |||
634 | jnc int_very_careful | 624 | jnc int_very_careful |
635 | TRACE_IRQS_ON | 625 | TRACE_IRQS_ON |
636 | ENABLE_INTERRUPTS(CLBR_NONE) | 626 | ENABLE_INTERRUPTS(CLBR_NONE) |
637 | pushq %rdi | 627 | pushq_cfi %rdi |
638 | CFI_ADJUST_CFA_OFFSET 8 | ||
639 | call schedule | 628 | call schedule |
640 | popq %rdi | 629 | popq_cfi %rdi |
641 | CFI_ADJUST_CFA_OFFSET -8 | ||
642 | DISABLE_INTERRUPTS(CLBR_NONE) | 630 | DISABLE_INTERRUPTS(CLBR_NONE) |
643 | TRACE_IRQS_OFF | 631 | TRACE_IRQS_OFF |
644 | jmp int_with_check | 632 | jmp int_with_check |
@@ -652,12 +640,10 @@ int_check_syscall_exit_work: | |||
652 | /* Check for syscall exit trace */ | 640 | /* Check for syscall exit trace */ |
653 | testl $_TIF_WORK_SYSCALL_EXIT,%edx | 641 | testl $_TIF_WORK_SYSCALL_EXIT,%edx |
654 | jz int_signal | 642 | jz int_signal |
655 | pushq %rdi | 643 | pushq_cfi %rdi |
656 | CFI_ADJUST_CFA_OFFSET 8 | ||
657 | leaq 8(%rsp),%rdi # &ptregs -> arg1 | 644 | leaq 8(%rsp),%rdi # &ptregs -> arg1 |
658 | call syscall_trace_leave | 645 | call syscall_trace_leave |
659 | popq %rdi | 646 | popq_cfi %rdi |
660 | CFI_ADJUST_CFA_OFFSET -8 | ||
661 | andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi | 647 | andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi |
662 | jmp int_restore_rest | 648 | jmp int_restore_rest |
663 | 649 | ||
@@ -714,9 +700,8 @@ END(ptregscall_common) | |||
714 | 700 | ||
715 | ENTRY(stub_execve) | 701 | ENTRY(stub_execve) |
716 | CFI_STARTPROC | 702 | CFI_STARTPROC |
717 | popq %r11 | 703 | addq $8, %rsp |
718 | CFI_ADJUST_CFA_OFFSET -8 | 704 | PARTIAL_FRAME 0 |
719 | CFI_REGISTER rip, r11 | ||
720 | SAVE_REST | 705 | SAVE_REST |
721 | FIXUP_TOP_OF_STACK %r11 | 706 | FIXUP_TOP_OF_STACK %r11 |
722 | movq %rsp, %rcx | 707 | movq %rsp, %rcx |
@@ -735,7 +720,7 @@ END(stub_execve) | |||
735 | ENTRY(stub_rt_sigreturn) | 720 | ENTRY(stub_rt_sigreturn) |
736 | CFI_STARTPROC | 721 | CFI_STARTPROC |
737 | addq $8, %rsp | 722 | addq $8, %rsp |
738 | CFI_ADJUST_CFA_OFFSET -8 | 723 | PARTIAL_FRAME 0 |
739 | SAVE_REST | 724 | SAVE_REST |
740 | movq %rsp,%rdi | 725 | movq %rsp,%rdi |
741 | FIXUP_TOP_OF_STACK %r11 | 726 | FIXUP_TOP_OF_STACK %r11 |
@@ -766,8 +751,7 @@ vector=FIRST_EXTERNAL_VECTOR | |||
766 | .if vector <> FIRST_EXTERNAL_VECTOR | 751 | .if vector <> FIRST_EXTERNAL_VECTOR |
767 | CFI_ADJUST_CFA_OFFSET -8 | 752 | CFI_ADJUST_CFA_OFFSET -8 |
768 | .endif | 753 | .endif |
769 | 1: pushq $(~vector+0x80) /* Note: always in signed byte range */ | 754 | 1: pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */ |
770 | CFI_ADJUST_CFA_OFFSET 8 | ||
771 | .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 | 755 | .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 |
772 | jmp 2f | 756 | jmp 2f |
773 | .endif | 757 | .endif |
@@ -796,8 +780,8 @@ END(interrupt) | |||
796 | 780 | ||
797 | /* 0(%rsp): ~(interrupt number) */ | 781 | /* 0(%rsp): ~(interrupt number) */ |
798 | .macro interrupt func | 782 | .macro interrupt func |
799 | subq $10*8, %rsp | 783 | subq $ORIG_RAX-ARGOFFSET+8, %rsp |
800 | CFI_ADJUST_CFA_OFFSET 10*8 | 784 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+8 |
801 | call save_args | 785 | call save_args |
802 | PARTIAL_FRAME 0 | 786 | PARTIAL_FRAME 0 |
803 | call \func | 787 | call \func |
@@ -822,6 +806,7 @@ ret_from_intr: | |||
822 | TRACE_IRQS_OFF | 806 | TRACE_IRQS_OFF |
823 | decl PER_CPU_VAR(irq_count) | 807 | decl PER_CPU_VAR(irq_count) |
824 | leaveq | 808 | leaveq |
809 | CFI_RESTORE rbp | ||
825 | CFI_DEF_CFA_REGISTER rsp | 810 | CFI_DEF_CFA_REGISTER rsp |
826 | CFI_ADJUST_CFA_OFFSET -8 | 811 | CFI_ADJUST_CFA_OFFSET -8 |
827 | exit_intr: | 812 | exit_intr: |
@@ -903,11 +888,9 @@ retint_careful: | |||
903 | jnc retint_signal | 888 | jnc retint_signal |
904 | TRACE_IRQS_ON | 889 | TRACE_IRQS_ON |
905 | ENABLE_INTERRUPTS(CLBR_NONE) | 890 | ENABLE_INTERRUPTS(CLBR_NONE) |
906 | pushq %rdi | 891 | pushq_cfi %rdi |
907 | CFI_ADJUST_CFA_OFFSET 8 | ||
908 | call schedule | 892 | call schedule |
909 | popq %rdi | 893 | popq_cfi %rdi |
910 | CFI_ADJUST_CFA_OFFSET -8 | ||
911 | GET_THREAD_INFO(%rcx) | 894 | GET_THREAD_INFO(%rcx) |
912 | DISABLE_INTERRUPTS(CLBR_NONE) | 895 | DISABLE_INTERRUPTS(CLBR_NONE) |
913 | TRACE_IRQS_OFF | 896 | TRACE_IRQS_OFF |
@@ -956,8 +939,7 @@ END(common_interrupt) | |||
956 | .macro apicinterrupt num sym do_sym | 939 | .macro apicinterrupt num sym do_sym |
957 | ENTRY(\sym) | 940 | ENTRY(\sym) |
958 | INTR_FRAME | 941 | INTR_FRAME |
959 | pushq $~(\num) | 942 | pushq_cfi $~(\num) |
960 | CFI_ADJUST_CFA_OFFSET 8 | ||
961 | interrupt \do_sym | 943 | interrupt \do_sym |
962 | jmp ret_from_intr | 944 | jmp ret_from_intr |
963 | CFI_ENDPROC | 945 | CFI_ENDPROC |
@@ -1023,9 +1005,9 @@ apicinterrupt ERROR_APIC_VECTOR \ | |||
1023 | apicinterrupt SPURIOUS_APIC_VECTOR \ | 1005 | apicinterrupt SPURIOUS_APIC_VECTOR \ |
1024 | spurious_interrupt smp_spurious_interrupt | 1006 | spurious_interrupt smp_spurious_interrupt |
1025 | 1007 | ||
1026 | #ifdef CONFIG_PERF_EVENTS | 1008 | #ifdef CONFIG_IRQ_WORK |
1027 | apicinterrupt LOCAL_PENDING_VECTOR \ | 1009 | apicinterrupt IRQ_WORK_VECTOR \ |
1028 | perf_pending_interrupt smp_perf_pending_interrupt | 1010 | irq_work_interrupt smp_irq_work_interrupt |
1029 | #endif | 1011 | #endif |
1030 | 1012 | ||
1031 | /* | 1013 | /* |
@@ -1036,8 +1018,8 @@ ENTRY(\sym) | |||
1036 | INTR_FRAME | 1018 | INTR_FRAME |
1037 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1019 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1038 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ | 1020 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
1039 | subq $15*8,%rsp | 1021 | subq $ORIG_RAX-R15, %rsp |
1040 | CFI_ADJUST_CFA_OFFSET 15*8 | 1022 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
1041 | call error_entry | 1023 | call error_entry |
1042 | DEFAULT_FRAME 0 | 1024 | DEFAULT_FRAME 0 |
1043 | movq %rsp,%rdi /* pt_regs pointer */ | 1025 | movq %rsp,%rdi /* pt_regs pointer */ |
@@ -1052,9 +1034,9 @@ END(\sym) | |||
1052 | ENTRY(\sym) | 1034 | ENTRY(\sym) |
1053 | INTR_FRAME | 1035 | INTR_FRAME |
1054 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1036 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1055 | pushq $-1 /* ORIG_RAX: no syscall to restart */ | 1037 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
1056 | CFI_ADJUST_CFA_OFFSET 8 | 1038 | subq $ORIG_RAX-R15, %rsp |
1057 | subq $15*8, %rsp | 1039 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
1058 | call save_paranoid | 1040 | call save_paranoid |
1059 | TRACE_IRQS_OFF | 1041 | TRACE_IRQS_OFF |
1060 | movq %rsp,%rdi /* pt_regs pointer */ | 1042 | movq %rsp,%rdi /* pt_regs pointer */ |
@@ -1070,9 +1052,9 @@ END(\sym) | |||
1070 | ENTRY(\sym) | 1052 | ENTRY(\sym) |
1071 | INTR_FRAME | 1053 | INTR_FRAME |
1072 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1054 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1073 | pushq $-1 /* ORIG_RAX: no syscall to restart */ | 1055 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
1074 | CFI_ADJUST_CFA_OFFSET 8 | 1056 | subq $ORIG_RAX-R15, %rsp |
1075 | subq $15*8, %rsp | 1057 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
1076 | call save_paranoid | 1058 | call save_paranoid |
1077 | TRACE_IRQS_OFF | 1059 | TRACE_IRQS_OFF |
1078 | movq %rsp,%rdi /* pt_regs pointer */ | 1060 | movq %rsp,%rdi /* pt_regs pointer */ |
@@ -1089,8 +1071,8 @@ END(\sym) | |||
1089 | ENTRY(\sym) | 1071 | ENTRY(\sym) |
1090 | XCPT_FRAME | 1072 | XCPT_FRAME |
1091 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1073 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1092 | subq $15*8,%rsp | 1074 | subq $ORIG_RAX-R15, %rsp |
1093 | CFI_ADJUST_CFA_OFFSET 15*8 | 1075 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
1094 | call error_entry | 1076 | call error_entry |
1095 | DEFAULT_FRAME 0 | 1077 | DEFAULT_FRAME 0 |
1096 | movq %rsp,%rdi /* pt_regs pointer */ | 1078 | movq %rsp,%rdi /* pt_regs pointer */ |
@@ -1107,8 +1089,8 @@ END(\sym) | |||
1107 | ENTRY(\sym) | 1089 | ENTRY(\sym) |
1108 | XCPT_FRAME | 1090 | XCPT_FRAME |
1109 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1091 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1110 | subq $15*8,%rsp | 1092 | subq $ORIG_RAX-R15, %rsp |
1111 | CFI_ADJUST_CFA_OFFSET 15*8 | 1093 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
1112 | call save_paranoid | 1094 | call save_paranoid |
1113 | DEFAULT_FRAME 0 | 1095 | DEFAULT_FRAME 0 |
1114 | TRACE_IRQS_OFF | 1096 | TRACE_IRQS_OFF |
@@ -1139,16 +1121,14 @@ zeroentry simd_coprocessor_error do_simd_coprocessor_error | |||
1139 | /* edi: new selector */ | 1121 | /* edi: new selector */ |
1140 | ENTRY(native_load_gs_index) | 1122 | ENTRY(native_load_gs_index) |
1141 | CFI_STARTPROC | 1123 | CFI_STARTPROC |
1142 | pushf | 1124 | pushfq_cfi |
1143 | CFI_ADJUST_CFA_OFFSET 8 | ||
1144 | DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) | 1125 | DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) |
1145 | SWAPGS | 1126 | SWAPGS |
1146 | gs_change: | 1127 | gs_change: |
1147 | movl %edi,%gs | 1128 | movl %edi,%gs |
1148 | 2: mfence /* workaround */ | 1129 | 2: mfence /* workaround */ |
1149 | SWAPGS | 1130 | SWAPGS |
1150 | popf | 1131 | popfq_cfi |
1151 | CFI_ADJUST_CFA_OFFSET -8 | ||
1152 | ret | 1132 | ret |
1153 | CFI_ENDPROC | 1133 | CFI_ENDPROC |
1154 | END(native_load_gs_index) | 1134 | END(native_load_gs_index) |
@@ -1215,8 +1195,7 @@ END(kernel_execve) | |||
1215 | /* Call softirq on interrupt stack. Interrupts are off. */ | 1195 | /* Call softirq on interrupt stack. Interrupts are off. */ |
1216 | ENTRY(call_softirq) | 1196 | ENTRY(call_softirq) |
1217 | CFI_STARTPROC | 1197 | CFI_STARTPROC |
1218 | push %rbp | 1198 | pushq_cfi %rbp |
1219 | CFI_ADJUST_CFA_OFFSET 8 | ||
1220 | CFI_REL_OFFSET rbp,0 | 1199 | CFI_REL_OFFSET rbp,0 |
1221 | mov %rsp,%rbp | 1200 | mov %rsp,%rbp |
1222 | CFI_DEF_CFA_REGISTER rbp | 1201 | CFI_DEF_CFA_REGISTER rbp |
@@ -1225,6 +1204,7 @@ ENTRY(call_softirq) | |||
1225 | push %rbp # backlink for old unwinder | 1204 | push %rbp # backlink for old unwinder |
1226 | call __do_softirq | 1205 | call __do_softirq |
1227 | leaveq | 1206 | leaveq |
1207 | CFI_RESTORE rbp | ||
1228 | CFI_DEF_CFA_REGISTER rsp | 1208 | CFI_DEF_CFA_REGISTER rsp |
1229 | CFI_ADJUST_CFA_OFFSET -8 | 1209 | CFI_ADJUST_CFA_OFFSET -8 |
1230 | decl PER_CPU_VAR(irq_count) | 1210 | decl PER_CPU_VAR(irq_count) |
@@ -1368,7 +1348,7 @@ paranoidzeroentry machine_check *machine_check_vector(%rip) | |||
1368 | 1348 | ||
1369 | /* ebx: no swapgs flag */ | 1349 | /* ebx: no swapgs flag */ |
1370 | ENTRY(paranoid_exit) | 1350 | ENTRY(paranoid_exit) |
1371 | INTR_FRAME | 1351 | DEFAULT_FRAME |
1372 | DISABLE_INTERRUPTS(CLBR_NONE) | 1352 | DISABLE_INTERRUPTS(CLBR_NONE) |
1373 | TRACE_IRQS_OFF | 1353 | TRACE_IRQS_OFF |
1374 | testl %ebx,%ebx /* swapgs needed? */ | 1354 | testl %ebx,%ebx /* swapgs needed? */ |
@@ -1445,7 +1425,6 @@ error_swapgs: | |||
1445 | error_sti: | 1425 | error_sti: |
1446 | TRACE_IRQS_OFF | 1426 | TRACE_IRQS_OFF |
1447 | ret | 1427 | ret |
1448 | CFI_ENDPROC | ||
1449 | 1428 | ||
1450 | /* | 1429 | /* |
1451 | * There are two places in the kernel that can potentially fault with | 1430 | * There are two places in the kernel that can potentially fault with |
@@ -1470,6 +1449,7 @@ bstep_iret: | |||
1470 | /* Fix truncated RIP */ | 1449 | /* Fix truncated RIP */ |
1471 | movq %rcx,RIP+8(%rsp) | 1450 | movq %rcx,RIP+8(%rsp) |
1472 | jmp error_swapgs | 1451 | jmp error_swapgs |
1452 | CFI_ENDPROC | ||
1473 | END(error_entry) | 1453 | END(error_entry) |
1474 | 1454 | ||
1475 | 1455 | ||
@@ -1498,8 +1478,8 @@ ENTRY(nmi) | |||
1498 | INTR_FRAME | 1478 | INTR_FRAME |
1499 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1479 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1500 | pushq_cfi $-1 | 1480 | pushq_cfi $-1 |
1501 | subq $15*8, %rsp | 1481 | subq $ORIG_RAX-R15, %rsp |
1502 | CFI_ADJUST_CFA_OFFSET 15*8 | 1482 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
1503 | call save_paranoid | 1483 | call save_paranoid |
1504 | DEFAULT_FRAME 0 | 1484 | DEFAULT_FRAME 0 |
1505 | /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ | 1485 | /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 7494999141b3..efaf906daf93 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -440,9 +440,9 @@ static int hpet_legacy_next_event(unsigned long delta, | |||
440 | static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); | 440 | static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); |
441 | static struct hpet_dev *hpet_devs; | 441 | static struct hpet_dev *hpet_devs; |
442 | 442 | ||
443 | void hpet_msi_unmask(unsigned int irq) | 443 | void hpet_msi_unmask(struct irq_data *data) |
444 | { | 444 | { |
445 | struct hpet_dev *hdev = get_irq_data(irq); | 445 | struct hpet_dev *hdev = data->handler_data; |
446 | unsigned int cfg; | 446 | unsigned int cfg; |
447 | 447 | ||
448 | /* unmask it */ | 448 | /* unmask it */ |
@@ -451,10 +451,10 @@ void hpet_msi_unmask(unsigned int irq) | |||
451 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); | 451 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); |
452 | } | 452 | } |
453 | 453 | ||
454 | void hpet_msi_mask(unsigned int irq) | 454 | void hpet_msi_mask(struct irq_data *data) |
455 | { | 455 | { |
456 | struct hpet_dev *hdev = data->handler_data; | ||
456 | unsigned int cfg; | 457 | unsigned int cfg; |
457 | struct hpet_dev *hdev = get_irq_data(irq); | ||
458 | 458 | ||
459 | /* mask it */ | 459 | /* mask it */ |
460 | cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); | 460 | cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); |
@@ -462,18 +462,14 @@ void hpet_msi_mask(unsigned int irq) | |||
462 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); | 462 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); |
463 | } | 463 | } |
464 | 464 | ||
465 | void hpet_msi_write(unsigned int irq, struct msi_msg *msg) | 465 | void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg) |
466 | { | 466 | { |
467 | struct hpet_dev *hdev = get_irq_data(irq); | ||
468 | |||
469 | hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num)); | 467 | hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num)); |
470 | hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4); | 468 | hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4); |
471 | } | 469 | } |
472 | 470 | ||
473 | void hpet_msi_read(unsigned int irq, struct msi_msg *msg) | 471 | void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg) |
474 | { | 472 | { |
475 | struct hpet_dev *hdev = get_irq_data(irq); | ||
476 | |||
477 | msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num)); | 473 | msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num)); |
478 | msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4); | 474 | msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4); |
479 | msg->address_hi = 0; | 475 | msg->address_hi = 0; |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index a46cb3522c0c..58bb239a2fd7 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -68,19 +68,22 @@ static void __cpuinit init_thread_xstate(void) | |||
68 | */ | 68 | */ |
69 | 69 | ||
70 | if (!HAVE_HWFP) { | 70 | if (!HAVE_HWFP) { |
71 | /* | ||
72 | * Disable xsave as we do not support it if i387 | ||
73 | * emulation is enabled. | ||
74 | */ | ||
75 | setup_clear_cpu_cap(X86_FEATURE_XSAVE); | ||
76 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); | ||
71 | xstate_size = sizeof(struct i387_soft_struct); | 77 | xstate_size = sizeof(struct i387_soft_struct); |
72 | return; | 78 | return; |
73 | } | 79 | } |
74 | 80 | ||
75 | if (cpu_has_fxsr) | 81 | if (cpu_has_fxsr) |
76 | xstate_size = sizeof(struct i387_fxsave_struct); | 82 | xstate_size = sizeof(struct i387_fxsave_struct); |
77 | #ifdef CONFIG_X86_32 | ||
78 | else | 83 | else |
79 | xstate_size = sizeof(struct i387_fsave_struct); | 84 | xstate_size = sizeof(struct i387_fsave_struct); |
80 | #endif | ||
81 | } | 85 | } |
82 | 86 | ||
83 | #ifdef CONFIG_X86_64 | ||
84 | /* | 87 | /* |
85 | * Called at bootup to set up the initial FPU state that is later cloned | 88 | * Called at bootup to set up the initial FPU state that is later cloned |
86 | * into all processes. | 89 | * into all processes. |
@@ -88,12 +91,21 @@ static void __cpuinit init_thread_xstate(void) | |||
88 | 91 | ||
89 | void __cpuinit fpu_init(void) | 92 | void __cpuinit fpu_init(void) |
90 | { | 93 | { |
91 | unsigned long oldcr0 = read_cr0(); | 94 | unsigned long cr0; |
92 | 95 | unsigned long cr4_mask = 0; | |
93 | set_in_cr4(X86_CR4_OSFXSR); | ||
94 | set_in_cr4(X86_CR4_OSXMMEXCPT); | ||
95 | 96 | ||
96 | write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */ | 97 | if (cpu_has_fxsr) |
98 | cr4_mask |= X86_CR4_OSFXSR; | ||
99 | if (cpu_has_xmm) | ||
100 | cr4_mask |= X86_CR4_OSXMMEXCPT; | ||
101 | if (cr4_mask) | ||
102 | set_in_cr4(cr4_mask); | ||
103 | |||
104 | cr0 = read_cr0(); | ||
105 | cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */ | ||
106 | if (!HAVE_HWFP) | ||
107 | cr0 |= X86_CR0_EM; | ||
108 | write_cr0(cr0); | ||
97 | 109 | ||
98 | if (!smp_processor_id()) | 110 | if (!smp_processor_id()) |
99 | init_thread_xstate(); | 111 | init_thread_xstate(); |
@@ -104,24 +116,12 @@ void __cpuinit fpu_init(void) | |||
104 | clear_used_math(); | 116 | clear_used_math(); |
105 | } | 117 | } |
106 | 118 | ||
107 | #else /* CONFIG_X86_64 */ | ||
108 | |||
109 | void __cpuinit fpu_init(void) | ||
110 | { | ||
111 | if (!smp_processor_id()) | ||
112 | init_thread_xstate(); | ||
113 | } | ||
114 | |||
115 | #endif /* CONFIG_X86_32 */ | ||
116 | |||
117 | void fpu_finit(struct fpu *fpu) | 119 | void fpu_finit(struct fpu *fpu) |
118 | { | 120 | { |
119 | #ifdef CONFIG_X86_32 | ||
120 | if (!HAVE_HWFP) { | 121 | if (!HAVE_HWFP) { |
121 | finit_soft_fpu(&fpu->state->soft); | 122 | finit_soft_fpu(&fpu->state->soft); |
122 | return; | 123 | return; |
123 | } | 124 | } |
124 | #endif | ||
125 | 125 | ||
126 | if (cpu_has_fxsr) { | 126 | if (cpu_has_fxsr) { |
127 | struct i387_fxsave_struct *fx = &fpu->state->fxsave; | 127 | struct i387_fxsave_struct *fx = &fpu->state->fxsave; |
@@ -386,19 +386,17 @@ convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) | |||
386 | #ifdef CONFIG_X86_64 | 386 | #ifdef CONFIG_X86_64 |
387 | env->fip = fxsave->rip; | 387 | env->fip = fxsave->rip; |
388 | env->foo = fxsave->rdp; | 388 | env->foo = fxsave->rdp; |
389 | /* | ||
390 | * should be actually ds/cs at fpu exception time, but | ||
391 | * that information is not available in 64bit mode. | ||
392 | */ | ||
393 | env->fcs = task_pt_regs(tsk)->cs; | ||
389 | if (tsk == current) { | 394 | if (tsk == current) { |
390 | /* | 395 | savesegment(ds, env->fos); |
391 | * should be actually ds/cs at fpu exception time, but | ||
392 | * that information is not available in 64bit mode. | ||
393 | */ | ||
394 | asm("mov %%ds, %[fos]" : [fos] "=r" (env->fos)); | ||
395 | asm("mov %%cs, %[fcs]" : [fcs] "=r" (env->fcs)); | ||
396 | } else { | 396 | } else { |
397 | struct pt_regs *regs = task_pt_regs(tsk); | 397 | env->fos = tsk->thread.ds; |
398 | |||
399 | env->fos = 0xffff0000 | tsk->thread.ds; | ||
400 | env->fcs = regs->cs; | ||
401 | } | 398 | } |
399 | env->fos |= 0xffff0000; | ||
402 | #else | 400 | #else |
403 | env->fip = fxsave->fip; | 401 | env->fip = fxsave->fip; |
404 | env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16); | 402 | env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16); |
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index cafa7c80ac95..20757cb2efa3 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c | |||
@@ -29,24 +29,10 @@ | |||
29 | * plus some generic x86 specific things if generic specifics makes | 29 | * plus some generic x86 specific things if generic specifics makes |
30 | * any sense at all. | 30 | * any sense at all. |
31 | */ | 31 | */ |
32 | static void init_8259A(int auto_eoi); | ||
32 | 33 | ||
33 | static int i8259A_auto_eoi; | 34 | static int i8259A_auto_eoi; |
34 | DEFINE_RAW_SPINLOCK(i8259A_lock); | 35 | DEFINE_RAW_SPINLOCK(i8259A_lock); |
35 | static void mask_and_ack_8259A(unsigned int); | ||
36 | static void mask_8259A(void); | ||
37 | static void unmask_8259A(void); | ||
38 | static void disable_8259A_irq(unsigned int irq); | ||
39 | static void enable_8259A_irq(unsigned int irq); | ||
40 | static void init_8259A(int auto_eoi); | ||
41 | static int i8259A_irq_pending(unsigned int irq); | ||
42 | |||
43 | struct irq_chip i8259A_chip = { | ||
44 | .name = "XT-PIC", | ||
45 | .mask = disable_8259A_irq, | ||
46 | .disable = disable_8259A_irq, | ||
47 | .unmask = enable_8259A_irq, | ||
48 | .mask_ack = mask_and_ack_8259A, | ||
49 | }; | ||
50 | 36 | ||
51 | /* | 37 | /* |
52 | * 8259A PIC functions to handle ISA devices: | 38 | * 8259A PIC functions to handle ISA devices: |
@@ -68,7 +54,7 @@ unsigned int cached_irq_mask = 0xffff; | |||
68 | */ | 54 | */ |
69 | unsigned long io_apic_irqs; | 55 | unsigned long io_apic_irqs; |
70 | 56 | ||
71 | static void disable_8259A_irq(unsigned int irq) | 57 | static void mask_8259A_irq(unsigned int irq) |
72 | { | 58 | { |
73 | unsigned int mask = 1 << irq; | 59 | unsigned int mask = 1 << irq; |
74 | unsigned long flags; | 60 | unsigned long flags; |
@@ -82,7 +68,12 @@ static void disable_8259A_irq(unsigned int irq) | |||
82 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | 68 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
83 | } | 69 | } |
84 | 70 | ||
85 | static void enable_8259A_irq(unsigned int irq) | 71 | static void disable_8259A_irq(struct irq_data *data) |
72 | { | ||
73 | mask_8259A_irq(data->irq); | ||
74 | } | ||
75 | |||
76 | static void unmask_8259A_irq(unsigned int irq) | ||
86 | { | 77 | { |
87 | unsigned int mask = ~(1 << irq); | 78 | unsigned int mask = ~(1 << irq); |
88 | unsigned long flags; | 79 | unsigned long flags; |
@@ -96,6 +87,11 @@ static void enable_8259A_irq(unsigned int irq) | |||
96 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | 87 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
97 | } | 88 | } |
98 | 89 | ||
90 | static void enable_8259A_irq(struct irq_data *data) | ||
91 | { | ||
92 | unmask_8259A_irq(data->irq); | ||
93 | } | ||
94 | |||
99 | static int i8259A_irq_pending(unsigned int irq) | 95 | static int i8259A_irq_pending(unsigned int irq) |
100 | { | 96 | { |
101 | unsigned int mask = 1<<irq; | 97 | unsigned int mask = 1<<irq; |
@@ -117,7 +113,7 @@ static void make_8259A_irq(unsigned int irq) | |||
117 | disable_irq_nosync(irq); | 113 | disable_irq_nosync(irq); |
118 | io_apic_irqs &= ~(1<<irq); | 114 | io_apic_irqs &= ~(1<<irq); |
119 | set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, | 115 | set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, |
120 | "XT"); | 116 | i8259A_chip.name); |
121 | enable_irq(irq); | 117 | enable_irq(irq); |
122 | } | 118 | } |
123 | 119 | ||
@@ -150,8 +146,9 @@ static inline int i8259A_irq_real(unsigned int irq) | |||
150 | * first, _then_ send the EOI, and the order of EOI | 146 | * first, _then_ send the EOI, and the order of EOI |
151 | * to the two 8259s is important! | 147 | * to the two 8259s is important! |
152 | */ | 148 | */ |
153 | static void mask_and_ack_8259A(unsigned int irq) | 149 | static void mask_and_ack_8259A(struct irq_data *data) |
154 | { | 150 | { |
151 | unsigned int irq = data->irq; | ||
155 | unsigned int irqmask = 1 << irq; | 152 | unsigned int irqmask = 1 << irq; |
156 | unsigned long flags; | 153 | unsigned long flags; |
157 | 154 | ||
@@ -223,6 +220,14 @@ spurious_8259A_irq: | |||
223 | } | 220 | } |
224 | } | 221 | } |
225 | 222 | ||
223 | struct irq_chip i8259A_chip = { | ||
224 | .name = "XT-PIC", | ||
225 | .irq_mask = disable_8259A_irq, | ||
226 | .irq_disable = disable_8259A_irq, | ||
227 | .irq_unmask = enable_8259A_irq, | ||
228 | .irq_mask_ack = mask_and_ack_8259A, | ||
229 | }; | ||
230 | |||
226 | static char irq_trigger[2]; | 231 | static char irq_trigger[2]; |
227 | /** | 232 | /** |
228 | * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ | 233 | * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ |
@@ -342,9 +347,9 @@ static void init_8259A(int auto_eoi) | |||
342 | * In AEOI mode we just have to mask the interrupt | 347 | * In AEOI mode we just have to mask the interrupt |
343 | * when acking. | 348 | * when acking. |
344 | */ | 349 | */ |
345 | i8259A_chip.mask_ack = disable_8259A_irq; | 350 | i8259A_chip.irq_mask_ack = disable_8259A_irq; |
346 | else | 351 | else |
347 | i8259A_chip.mask_ack = mask_and_ack_8259A; | 352 | i8259A_chip.irq_mask_ack = mask_and_ack_8259A; |
348 | 353 | ||
349 | udelay(100); /* wait for 8259A to initialize */ | 354 | udelay(100); /* wait for 8259A to initialize */ |
350 | 355 | ||
@@ -363,14 +368,6 @@ static void init_8259A(int auto_eoi) | |||
363 | static void legacy_pic_noop(void) { }; | 368 | static void legacy_pic_noop(void) { }; |
364 | static void legacy_pic_uint_noop(unsigned int unused) { }; | 369 | static void legacy_pic_uint_noop(unsigned int unused) { }; |
365 | static void legacy_pic_int_noop(int unused) { }; | 370 | static void legacy_pic_int_noop(int unused) { }; |
366 | |||
367 | static struct irq_chip dummy_pic_chip = { | ||
368 | .name = "dummy pic", | ||
369 | .mask = legacy_pic_uint_noop, | ||
370 | .unmask = legacy_pic_uint_noop, | ||
371 | .disable = legacy_pic_uint_noop, | ||
372 | .mask_ack = legacy_pic_uint_noop, | ||
373 | }; | ||
374 | static int legacy_pic_irq_pending_noop(unsigned int irq) | 371 | static int legacy_pic_irq_pending_noop(unsigned int irq) |
375 | { | 372 | { |
376 | return 0; | 373 | return 0; |
@@ -378,7 +375,9 @@ static int legacy_pic_irq_pending_noop(unsigned int irq) | |||
378 | 375 | ||
379 | struct legacy_pic null_legacy_pic = { | 376 | struct legacy_pic null_legacy_pic = { |
380 | .nr_legacy_irqs = 0, | 377 | .nr_legacy_irqs = 0, |
381 | .chip = &dummy_pic_chip, | 378 | .chip = &dummy_irq_chip, |
379 | .mask = legacy_pic_uint_noop, | ||
380 | .unmask = legacy_pic_uint_noop, | ||
382 | .mask_all = legacy_pic_noop, | 381 | .mask_all = legacy_pic_noop, |
383 | .restore_mask = legacy_pic_noop, | 382 | .restore_mask = legacy_pic_noop, |
384 | .init = legacy_pic_int_noop, | 383 | .init = legacy_pic_int_noop, |
@@ -389,7 +388,9 @@ struct legacy_pic null_legacy_pic = { | |||
389 | struct legacy_pic default_legacy_pic = { | 388 | struct legacy_pic default_legacy_pic = { |
390 | .nr_legacy_irqs = NR_IRQS_LEGACY, | 389 | .nr_legacy_irqs = NR_IRQS_LEGACY, |
391 | .chip = &i8259A_chip, | 390 | .chip = &i8259A_chip, |
392 | .mask_all = mask_8259A, | 391 | .mask = mask_8259A_irq, |
392 | .unmask = unmask_8259A_irq, | ||
393 | .mask_all = mask_8259A, | ||
393 | .restore_mask = unmask_8259A, | 394 | .restore_mask = unmask_8259A, |
394 | .init = init_8259A, | 395 | .init = init_8259A, |
395 | .irq_pending = i8259A_irq_pending, | 396 | .irq_pending = i8259A_irq_pending, |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 91fd0c70a18a..83ec0175f986 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -67,10 +67,10 @@ static int show_other_interrupts(struct seq_file *p, int prec) | |||
67 | for_each_online_cpu(j) | 67 | for_each_online_cpu(j) |
68 | seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); | 68 | seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); |
69 | seq_printf(p, " Performance monitoring interrupts\n"); | 69 | seq_printf(p, " Performance monitoring interrupts\n"); |
70 | seq_printf(p, "%*s: ", prec, "PND"); | 70 | seq_printf(p, "%*s: ", prec, "IWI"); |
71 | for_each_online_cpu(j) | 71 | for_each_online_cpu(j) |
72 | seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs); | 72 | seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs); |
73 | seq_printf(p, " Performance pending work\n"); | 73 | seq_printf(p, " IRQ work interrupts\n"); |
74 | #endif | 74 | #endif |
75 | if (x86_platform_ipi_callback) { | 75 | if (x86_platform_ipi_callback) { |
76 | seq_printf(p, "%*s: ", prec, "PLT"); | 76 | seq_printf(p, "%*s: ", prec, "PLT"); |
@@ -159,7 +159,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
159 | seq_printf(p, "%*d: ", prec, i); | 159 | seq_printf(p, "%*d: ", prec, i); |
160 | for_each_online_cpu(j) | 160 | for_each_online_cpu(j) |
161 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 161 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
162 | seq_printf(p, " %8s", desc->chip->name); | 162 | seq_printf(p, " %8s", desc->irq_data.chip->name); |
163 | seq_printf(p, "-%-8s", desc->name); | 163 | seq_printf(p, "-%-8s", desc->name); |
164 | 164 | ||
165 | if (action) { | 165 | if (action) { |
@@ -185,7 +185,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu) | |||
185 | sum += irq_stats(cpu)->apic_timer_irqs; | 185 | sum += irq_stats(cpu)->apic_timer_irqs; |
186 | sum += irq_stats(cpu)->irq_spurious_count; | 186 | sum += irq_stats(cpu)->irq_spurious_count; |
187 | sum += irq_stats(cpu)->apic_perf_irqs; | 187 | sum += irq_stats(cpu)->apic_perf_irqs; |
188 | sum += irq_stats(cpu)->apic_pending_irqs; | 188 | sum += irq_stats(cpu)->apic_irq_work_irqs; |
189 | #endif | 189 | #endif |
190 | if (x86_platform_ipi_callback) | 190 | if (x86_platform_ipi_callback) |
191 | sum += irq_stats(cpu)->x86_platform_ipis; | 191 | sum += irq_stats(cpu)->x86_platform_ipis; |
@@ -282,6 +282,7 @@ void fixup_irqs(void) | |||
282 | unsigned int irq, vector; | 282 | unsigned int irq, vector; |
283 | static int warned; | 283 | static int warned; |
284 | struct irq_desc *desc; | 284 | struct irq_desc *desc; |
285 | struct irq_data *data; | ||
285 | 286 | ||
286 | for_each_irq_desc(irq, desc) { | 287 | for_each_irq_desc(irq, desc) { |
287 | int break_affinity = 0; | 288 | int break_affinity = 0; |
@@ -296,7 +297,8 @@ void fixup_irqs(void) | |||
296 | /* interrupt's are disabled at this point */ | 297 | /* interrupt's are disabled at this point */ |
297 | raw_spin_lock(&desc->lock); | 298 | raw_spin_lock(&desc->lock); |
298 | 299 | ||
299 | affinity = desc->affinity; | 300 | data = &desc->irq_data; |
301 | affinity = data->affinity; | ||
300 | if (!irq_has_action(irq) || | 302 | if (!irq_has_action(irq) || |
301 | cpumask_equal(affinity, cpu_online_mask)) { | 303 | cpumask_equal(affinity, cpu_online_mask)) { |
302 | raw_spin_unlock(&desc->lock); | 304 | raw_spin_unlock(&desc->lock); |
@@ -315,16 +317,16 @@ void fixup_irqs(void) | |||
315 | affinity = cpu_all_mask; | 317 | affinity = cpu_all_mask; |
316 | } | 318 | } |
317 | 319 | ||
318 | if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->mask) | 320 | if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_mask) |
319 | desc->chip->mask(irq); | 321 | data->chip->irq_mask(data); |
320 | 322 | ||
321 | if (desc->chip->set_affinity) | 323 | if (data->chip->irq_set_affinity) |
322 | desc->chip->set_affinity(irq, affinity); | 324 | data->chip->irq_set_affinity(data, affinity, true); |
323 | else if (!(warned++)) | 325 | else if (!(warned++)) |
324 | set_affinity = 0; | 326 | set_affinity = 0; |
325 | 327 | ||
326 | if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) | 328 | if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_unmask) |
327 | desc->chip->unmask(irq); | 329 | data->chip->irq_unmask(data); |
328 | 330 | ||
329 | raw_spin_unlock(&desc->lock); | 331 | raw_spin_unlock(&desc->lock); |
330 | 332 | ||
@@ -355,10 +357,10 @@ void fixup_irqs(void) | |||
355 | if (irr & (1 << (vector % 32))) { | 357 | if (irr & (1 << (vector % 32))) { |
356 | irq = __get_cpu_var(vector_irq)[vector]; | 358 | irq = __get_cpu_var(vector_irq)[vector]; |
357 | 359 | ||
358 | desc = irq_to_desc(irq); | 360 | data = irq_get_irq_data(irq); |
359 | raw_spin_lock(&desc->lock); | 361 | raw_spin_lock(&desc->lock); |
360 | if (desc->chip->retrigger) | 362 | if (data->chip->irq_retrigger) |
361 | desc->chip->retrigger(irq); | 363 | data->chip->irq_retrigger(data); |
362 | raw_spin_unlock(&desc->lock); | 364 | raw_spin_unlock(&desc->lock); |
363 | } | 365 | } |
364 | } | 366 | } |
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c new file mode 100644 index 000000000000..ca8f703a1e70 --- /dev/null +++ b/arch/x86/kernel/irq_work.c | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * x86 specific code for irq_work | ||
3 | * | ||
4 | * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/irq_work.h> | ||
9 | #include <linux/hardirq.h> | ||
10 | #include <asm/apic.h> | ||
11 | |||
12 | void smp_irq_work_interrupt(struct pt_regs *regs) | ||
13 | { | ||
14 | irq_enter(); | ||
15 | ack_APIC_irq(); | ||
16 | inc_irq_stat(apic_irq_work_irqs); | ||
17 | irq_work_run(); | ||
18 | irq_exit(); | ||
19 | } | ||
20 | |||
21 | void arch_irq_work_raise(void) | ||
22 | { | ||
23 | #ifdef CONFIG_X86_LOCAL_APIC | ||
24 | if (!cpu_has_apic) | ||
25 | return; | ||
26 | |||
27 | apic->send_IPI_self(IRQ_WORK_VECTOR); | ||
28 | apic_wait_icr_idle(); | ||
29 | #endif | ||
30 | } | ||
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 990ae7cfc578..c752e973958d 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -100,6 +100,8 @@ int vector_used_by_percpu_irq(unsigned int vector) | |||
100 | 100 | ||
101 | void __init init_ISA_irqs(void) | 101 | void __init init_ISA_irqs(void) |
102 | { | 102 | { |
103 | struct irq_chip *chip = legacy_pic->chip; | ||
104 | const char *name = chip->name; | ||
103 | int i; | 105 | int i; |
104 | 106 | ||
105 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) | 107 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) |
@@ -107,19 +109,8 @@ void __init init_ISA_irqs(void) | |||
107 | #endif | 109 | #endif |
108 | legacy_pic->init(0); | 110 | legacy_pic->init(0); |
109 | 111 | ||
110 | /* | 112 | for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) |
111 | * 16 old-style INTA-cycle interrupts: | 113 | set_irq_chip_and_handler_name(i, chip, handle_level_irq, name); |
112 | */ | ||
113 | for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) { | ||
114 | struct irq_desc *desc = irq_to_desc(i); | ||
115 | |||
116 | desc->status = IRQ_DISABLED; | ||
117 | desc->action = NULL; | ||
118 | desc->depth = 1; | ||
119 | |||
120 | set_irq_chip_and_handler_name(i, &i8259A_chip, | ||
121 | handle_level_irq, "XT"); | ||
122 | } | ||
123 | } | 114 | } |
124 | 115 | ||
125 | void __init init_IRQ(void) | 116 | void __init init_IRQ(void) |
@@ -224,9 +215,9 @@ static void __init apic_intr_init(void) | |||
224 | alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); | 215 | alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); |
225 | alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); | 216 | alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); |
226 | 217 | ||
227 | /* Performance monitoring interrupts: */ | 218 | /* IRQ work interrupts: */ |
228 | # ifdef CONFIG_PERF_EVENTS | 219 | # ifdef CONFIG_IRQ_WORK |
229 | alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt); | 220 | alloc_intr_gate(IRQ_WORK_VECTOR, irq_work_interrupt); |
230 | # endif | 221 | # endif |
231 | 222 | ||
232 | #endif | 223 | #endif |
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 035c8c529181..b3ea9db39db6 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c | |||
@@ -36,7 +36,7 @@ static int init_one_level2_page(struct kimage *image, pgd_t *pgd, | |||
36 | if (!page) | 36 | if (!page) |
37 | goto out; | 37 | goto out; |
38 | pud = (pud_t *)page_address(page); | 38 | pud = (pud_t *)page_address(page); |
39 | memset(pud, 0, PAGE_SIZE); | 39 | clear_page(pud); |
40 | set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE)); | 40 | set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE)); |
41 | } | 41 | } |
42 | pud = pud_offset(pgd, addr); | 42 | pud = pud_offset(pgd, addr); |
@@ -45,7 +45,7 @@ static int init_one_level2_page(struct kimage *image, pgd_t *pgd, | |||
45 | if (!page) | 45 | if (!page) |
46 | goto out; | 46 | goto out; |
47 | pmd = (pmd_t *)page_address(page); | 47 | pmd = (pmd_t *)page_address(page); |
48 | memset(pmd, 0, PAGE_SIZE); | 48 | clear_page(pmd); |
49 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); | 49 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); |
50 | } | 50 | } |
51 | pmd = pmd_offset(pud, addr); | 51 | pmd = pmd_offset(pud, addr); |
diff --git a/arch/x86/kernel/olpc-xo1.c b/arch/x86/kernel/olpc-xo1.c new file mode 100644 index 000000000000..f5442c03abc3 --- /dev/null +++ b/arch/x86/kernel/olpc-xo1.c | |||
@@ -0,0 +1,140 @@ | |||
1 | /* | ||
2 | * Support for features of the OLPC XO-1 laptop | ||
3 | * | ||
4 | * Copyright (C) 2010 One Laptop per Child | ||
5 | * Copyright (C) 2006 Red Hat, Inc. | ||
6 | * Copyright (C) 2006 Advanced Micro Devices, Inc. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/module.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/pci_ids.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/pm.h> | ||
19 | |||
20 | #include <asm/io.h> | ||
21 | #include <asm/olpc.h> | ||
22 | |||
23 | #define DRV_NAME "olpc-xo1" | ||
24 | |||
25 | #define PMS_BAR 4 | ||
26 | #define ACPI_BAR 5 | ||
27 | |||
28 | /* PMC registers (PMS block) */ | ||
29 | #define PM_SCLK 0x10 | ||
30 | #define PM_IN_SLPCTL 0x20 | ||
31 | #define PM_WKXD 0x34 | ||
32 | #define PM_WKD 0x30 | ||
33 | #define PM_SSC 0x54 | ||
34 | |||
35 | /* PM registers (ACPI block) */ | ||
36 | #define PM1_CNT 0x08 | ||
37 | #define PM_GPE0_STS 0x18 | ||
38 | |||
39 | static unsigned long acpi_base; | ||
40 | static unsigned long pms_base; | ||
41 | |||
42 | static void xo1_power_off(void) | ||
43 | { | ||
44 | printk(KERN_INFO "OLPC XO-1 power off sequence...\n"); | ||
45 | |||
46 | /* Enable all of these controls with 0 delay */ | ||
47 | outl(0x40000000, pms_base + PM_SCLK); | ||
48 | outl(0x40000000, pms_base + PM_IN_SLPCTL); | ||
49 | outl(0x40000000, pms_base + PM_WKXD); | ||
50 | outl(0x40000000, pms_base + PM_WKD); | ||
51 | |||
52 | /* Clear status bits (possibly unnecessary) */ | ||
53 | outl(0x0002ffff, pms_base + PM_SSC); | ||
54 | outl(0xffffffff, acpi_base + PM_GPE0_STS); | ||
55 | |||
56 | /* Write SLP_EN bit to start the machinery */ | ||
57 | outl(0x00002000, acpi_base + PM1_CNT); | ||
58 | } | ||
59 | |||
60 | /* Read the base addresses from the PCI BAR info */ | ||
61 | static int __devinit setup_bases(struct pci_dev *pdev) | ||
62 | { | ||
63 | int r; | ||
64 | |||
65 | r = pci_enable_device_io(pdev); | ||
66 | if (r) { | ||
67 | dev_err(&pdev->dev, "can't enable device IO\n"); | ||
68 | return r; | ||
69 | } | ||
70 | |||
71 | r = pci_request_region(pdev, ACPI_BAR, DRV_NAME); | ||
72 | if (r) { | ||
73 | dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", ACPI_BAR); | ||
74 | return r; | ||
75 | } | ||
76 | |||
77 | r = pci_request_region(pdev, PMS_BAR, DRV_NAME); | ||
78 | if (r) { | ||
79 | dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", PMS_BAR); | ||
80 | pci_release_region(pdev, ACPI_BAR); | ||
81 | return r; | ||
82 | } | ||
83 | |||
84 | acpi_base = pci_resource_start(pdev, ACPI_BAR); | ||
85 | pms_base = pci_resource_start(pdev, PMS_BAR); | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | static int __devinit olpc_xo1_probe(struct platform_device *pdev) | ||
91 | { | ||
92 | struct pci_dev *pcidev; | ||
93 | int r; | ||
94 | |||
95 | pcidev = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, | ||
96 | NULL); | ||
97 | if (!pdev) | ||
98 | return -ENODEV; | ||
99 | |||
100 | r = setup_bases(pcidev); | ||
101 | if (r) | ||
102 | return r; | ||
103 | |||
104 | pm_power_off = xo1_power_off; | ||
105 | |||
106 | printk(KERN_INFO "OLPC XO-1 support registered\n"); | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static int __devexit olpc_xo1_remove(struct platform_device *pdev) | ||
111 | { | ||
112 | pm_power_off = NULL; | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | static struct platform_driver olpc_xo1_driver = { | ||
117 | .driver = { | ||
118 | .name = DRV_NAME, | ||
119 | .owner = THIS_MODULE, | ||
120 | }, | ||
121 | .probe = olpc_xo1_probe, | ||
122 | .remove = __devexit_p(olpc_xo1_remove), | ||
123 | }; | ||
124 | |||
125 | static int __init olpc_xo1_init(void) | ||
126 | { | ||
127 | return platform_driver_register(&olpc_xo1_driver); | ||
128 | } | ||
129 | |||
130 | static void __exit olpc_xo1_exit(void) | ||
131 | { | ||
132 | platform_driver_unregister(&olpc_xo1_driver); | ||
133 | } | ||
134 | |||
135 | MODULE_AUTHOR("Daniel Drake <dsd@laptop.org>"); | ||
136 | MODULE_LICENSE("GPL"); | ||
137 | MODULE_ALIAS("platform:olpc-xo1"); | ||
138 | |||
139 | module_init(olpc_xo1_init); | ||
140 | module_exit(olpc_xo1_exit); | ||
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c index 0e0cdde519be..edaf3fe8dc5e 100644 --- a/arch/x86/kernel/olpc.c +++ b/arch/x86/kernel/olpc.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <linux/string.h> | 19 | #include <linux/string.h> |
20 | #include <linux/platform_device.h> | ||
20 | 21 | ||
21 | #include <asm/geode.h> | 22 | #include <asm/geode.h> |
22 | #include <asm/setup.h> | 23 | #include <asm/setup.h> |
@@ -114,6 +115,7 @@ int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, | |||
114 | unsigned long flags; | 115 | unsigned long flags; |
115 | int ret = -EIO; | 116 | int ret = -EIO; |
116 | int i; | 117 | int i; |
118 | int restarts = 0; | ||
117 | 119 | ||
118 | spin_lock_irqsave(&ec_lock, flags); | 120 | spin_lock_irqsave(&ec_lock, flags); |
119 | 121 | ||
@@ -169,7 +171,9 @@ restart: | |||
169 | if (wait_on_obf(0x6c, 1)) { | 171 | if (wait_on_obf(0x6c, 1)) { |
170 | printk(KERN_ERR "olpc-ec: timeout waiting for" | 172 | printk(KERN_ERR "olpc-ec: timeout waiting for" |
171 | " EC to provide data!\n"); | 173 | " EC to provide data!\n"); |
172 | goto restart; | 174 | if (restarts++ < 10) |
175 | goto restart; | ||
176 | goto err; | ||
173 | } | 177 | } |
174 | outbuf[i] = inb(0x68); | 178 | outbuf[i] = inb(0x68); |
175 | pr_devel("olpc-ec: received 0x%x\n", outbuf[i]); | 179 | pr_devel("olpc-ec: received 0x%x\n", outbuf[i]); |
@@ -183,8 +187,21 @@ err: | |||
183 | } | 187 | } |
184 | EXPORT_SYMBOL_GPL(olpc_ec_cmd); | 188 | EXPORT_SYMBOL_GPL(olpc_ec_cmd); |
185 | 189 | ||
186 | #ifdef CONFIG_OLPC_OPENFIRMWARE | 190 | static bool __init check_ofw_architecture(void) |
187 | static void __init platform_detect(void) | 191 | { |
192 | size_t propsize; | ||
193 | char olpc_arch[5]; | ||
194 | const void *args[] = { NULL, "architecture", olpc_arch, (void *)5 }; | ||
195 | void *res[] = { &propsize }; | ||
196 | |||
197 | if (olpc_ofw("getprop", args, res)) { | ||
198 | printk(KERN_ERR "ofw: getprop call failed!\n"); | ||
199 | return false; | ||
200 | } | ||
201 | return propsize == 5 && strncmp("OLPC", olpc_arch, 5) == 0; | ||
202 | } | ||
203 | |||
204 | static u32 __init get_board_revision(void) | ||
188 | { | 205 | { |
189 | size_t propsize; | 206 | size_t propsize; |
190 | __be32 rev; | 207 | __be32 rev; |
@@ -193,45 +210,43 @@ static void __init platform_detect(void) | |||
193 | 210 | ||
194 | if (olpc_ofw("getprop", args, res) || propsize != 4) { | 211 | if (olpc_ofw("getprop", args, res) || propsize != 4) { |
195 | printk(KERN_ERR "ofw: getprop call failed!\n"); | 212 | printk(KERN_ERR "ofw: getprop call failed!\n"); |
196 | rev = cpu_to_be32(0); | 213 | return cpu_to_be32(0); |
197 | } | 214 | } |
198 | olpc_platform_info.boardrev = be32_to_cpu(rev); | 215 | return be32_to_cpu(rev); |
199 | } | 216 | } |
200 | #else | 217 | |
201 | static void __init platform_detect(void) | 218 | static bool __init platform_detect(void) |
202 | { | 219 | { |
203 | /* stopgap until OFW support is added to the kernel */ | 220 | if (!check_ofw_architecture()) |
204 | olpc_platform_info.boardrev = olpc_board(0xc2); | 221 | return false; |
222 | olpc_platform_info.flags |= OLPC_F_PRESENT; | ||
223 | olpc_platform_info.boardrev = get_board_revision(); | ||
224 | return true; | ||
205 | } | 225 | } |
206 | #endif | ||
207 | 226 | ||
208 | static int __init olpc_init(void) | 227 | static int __init add_xo1_platform_devices(void) |
209 | { | 228 | { |
210 | unsigned char *romsig; | 229 | struct platform_device *pdev; |
211 | 230 | ||
212 | /* The ioremap check is dangerous; limit what we run it on */ | 231 | pdev = platform_device_register_simple("xo1-rfkill", -1, NULL, 0); |
213 | if (!is_geode() || cs5535_has_vsa2()) | 232 | if (IS_ERR(pdev)) |
214 | return 0; | 233 | return PTR_ERR(pdev); |
215 | 234 | ||
216 | spin_lock_init(&ec_lock); | 235 | pdev = platform_device_register_simple("olpc-xo1", -1, NULL, 0); |
236 | if (IS_ERR(pdev)) | ||
237 | return PTR_ERR(pdev); | ||
217 | 238 | ||
218 | romsig = ioremap(0xffffffc0, 16); | 239 | return 0; |
219 | if (!romsig) | 240 | } |
220 | return 0; | ||
221 | 241 | ||
222 | if (strncmp(romsig, "CL1 Q", 7)) | 242 | static int __init olpc_init(void) |
223 | goto unmap; | 243 | { |
224 | if (strncmp(romsig+6, romsig+13, 3)) { | 244 | int r = 0; |
225 | printk(KERN_INFO "OLPC BIOS signature looks invalid. " | ||
226 | "Assuming not OLPC\n"); | ||
227 | goto unmap; | ||
228 | } | ||
229 | 245 | ||
230 | printk(KERN_INFO "OLPC board with OpenFirmware %.16s\n", romsig); | 246 | if (!olpc_ofw_present() || !platform_detect()) |
231 | olpc_platform_info.flags |= OLPC_F_PRESENT; | 247 | return 0; |
232 | 248 | ||
233 | /* get the platform revision */ | 249 | spin_lock_init(&ec_lock); |
234 | platform_detect(); | ||
235 | 250 | ||
236 | /* assume B1 and above models always have a DCON */ | 251 | /* assume B1 and above models always have a DCON */ |
237 | if (olpc_board_at_least(olpc_board(0xb1))) | 252 | if (olpc_board_at_least(olpc_board(0xb1))) |
@@ -242,8 +257,10 @@ static int __init olpc_init(void) | |||
242 | (unsigned char *) &olpc_platform_info.ecver, 1); | 257 | (unsigned char *) &olpc_platform_info.ecver, 1); |
243 | 258 | ||
244 | #ifdef CONFIG_PCI_OLPC | 259 | #ifdef CONFIG_PCI_OLPC |
245 | /* If the VSA exists let it emulate PCI, if not emulate in kernel */ | 260 | /* If the VSA exists let it emulate PCI, if not emulate in kernel. |
246 | if (!cs5535_has_vsa2()) | 261 | * XO-1 only. */ |
262 | if (olpc_platform_info.boardrev < olpc_board_pre(0xd0) && | ||
263 | !cs5535_has_vsa2()) | ||
247 | x86_init.pci.arch_init = pci_olpc_init; | 264 | x86_init.pci.arch_init = pci_olpc_init; |
248 | #endif | 265 | #endif |
249 | 266 | ||
@@ -252,8 +269,12 @@ static int __init olpc_init(void) | |||
252 | olpc_platform_info.boardrev >> 4, | 269 | olpc_platform_info.boardrev >> 4, |
253 | olpc_platform_info.ecver); | 270 | olpc_platform_info.ecver); |
254 | 271 | ||
255 | unmap: | 272 | if (olpc_platform_info.boardrev < olpc_board_pre(0xd0)) { /* XO-1 */ |
256 | iounmap(romsig); | 273 | r = add_xo1_platform_devices(); |
274 | if (r) | ||
275 | return r; | ||
276 | } | ||
277 | |||
257 | return 0; | 278 | return 0; |
258 | } | 279 | } |
259 | 280 | ||
diff --git a/arch/x86/kernel/olpc_ofw.c b/arch/x86/kernel/olpc_ofw.c index 3218aa71ab5e..787320464379 100644 --- a/arch/x86/kernel/olpc_ofw.c +++ b/arch/x86/kernel/olpc_ofw.c | |||
@@ -74,6 +74,12 @@ int __olpc_ofw(const char *name, int nr_args, const void **args, int nr_res, | |||
74 | } | 74 | } |
75 | EXPORT_SYMBOL_GPL(__olpc_ofw); | 75 | EXPORT_SYMBOL_GPL(__olpc_ofw); |
76 | 76 | ||
77 | bool olpc_ofw_present(void) | ||
78 | { | ||
79 | return olpc_ofw_cif != NULL; | ||
80 | } | ||
81 | EXPORT_SYMBOL_GPL(olpc_ofw_present); | ||
82 | |||
77 | /* OFW cif _should_ be above this address */ | 83 | /* OFW cif _should_ be above this address */ |
78 | #define OFW_MIN 0xff000000 | 84 | #define OFW_MIN 0xff000000 |
79 | 85 | ||
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 1db183ed7c01..c5b250011fd4 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -413,7 +413,6 @@ struct pv_mmu_ops pv_mmu_ops = { | |||
413 | 413 | ||
414 | .alloc_pte = paravirt_nop, | 414 | .alloc_pte = paravirt_nop, |
415 | .alloc_pmd = paravirt_nop, | 415 | .alloc_pmd = paravirt_nop, |
416 | .alloc_pmd_clone = paravirt_nop, | ||
417 | .alloc_pud = paravirt_nop, | 416 | .alloc_pud = paravirt_nop, |
418 | .release_pte = paravirt_nop, | 417 | .release_pte = paravirt_nop, |
419 | .release_pmd = paravirt_nop, | 418 | .release_pmd = paravirt_nop, |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 0f7f130caa67..c562207b1b3d 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #include <asm/cacheflush.h> | 39 | #include <asm/cacheflush.h> |
40 | #include <asm/swiotlb.h> | 40 | #include <asm/swiotlb.h> |
41 | #include <asm/dma.h> | 41 | #include <asm/dma.h> |
42 | #include <asm/k8.h> | 42 | #include <asm/amd_nb.h> |
43 | #include <asm/x86_init.h> | 43 | #include <asm/x86_init.h> |
44 | 44 | ||
45 | static unsigned long iommu_bus_base; /* GART remapping area (physical) */ | 45 | static unsigned long iommu_bus_base; /* GART remapping area (physical) */ |
@@ -560,8 +560,11 @@ static void enable_gart_translations(void) | |||
560 | { | 560 | { |
561 | int i; | 561 | int i; |
562 | 562 | ||
563 | for (i = 0; i < num_k8_northbridges; i++) { | 563 | if (!k8_northbridges.gart_supported) |
564 | struct pci_dev *dev = k8_northbridges[i]; | 564 | return; |
565 | |||
566 | for (i = 0; i < k8_northbridges.num; i++) { | ||
567 | struct pci_dev *dev = k8_northbridges.nb_misc[i]; | ||
565 | 568 | ||
566 | enable_gart_translation(dev, __pa(agp_gatt_table)); | 569 | enable_gart_translation(dev, __pa(agp_gatt_table)); |
567 | } | 570 | } |
@@ -592,16 +595,19 @@ static void gart_fixup_northbridges(struct sys_device *dev) | |||
592 | if (!fix_up_north_bridges) | 595 | if (!fix_up_north_bridges) |
593 | return; | 596 | return; |
594 | 597 | ||
598 | if (!k8_northbridges.gart_supported) | ||
599 | return; | ||
600 | |||
595 | pr_info("PCI-DMA: Restoring GART aperture settings\n"); | 601 | pr_info("PCI-DMA: Restoring GART aperture settings\n"); |
596 | 602 | ||
597 | for (i = 0; i < num_k8_northbridges; i++) { | 603 | for (i = 0; i < k8_northbridges.num; i++) { |
598 | struct pci_dev *dev = k8_northbridges[i]; | 604 | struct pci_dev *dev = k8_northbridges.nb_misc[i]; |
599 | 605 | ||
600 | /* | 606 | /* |
601 | * Don't enable translations just yet. That is the next | 607 | * Don't enable translations just yet. That is the next |
602 | * step. Restore the pre-suspend aperture settings. | 608 | * step. Restore the pre-suspend aperture settings. |
603 | */ | 609 | */ |
604 | pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, aperture_order << 1); | 610 | gart_set_size_and_enable(dev, aperture_order); |
605 | pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25); | 611 | pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25); |
606 | } | 612 | } |
607 | } | 613 | } |
@@ -649,8 +655,8 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
649 | 655 | ||
650 | aper_size = aper_base = info->aper_size = 0; | 656 | aper_size = aper_base = info->aper_size = 0; |
651 | dev = NULL; | 657 | dev = NULL; |
652 | for (i = 0; i < num_k8_northbridges; i++) { | 658 | for (i = 0; i < k8_northbridges.num; i++) { |
653 | dev = k8_northbridges[i]; | 659 | dev = k8_northbridges.nb_misc[i]; |
654 | new_aper_base = read_aperture(dev, &new_aper_size); | 660 | new_aper_base = read_aperture(dev, &new_aper_size); |
655 | if (!new_aper_base) | 661 | if (!new_aper_base) |
656 | goto nommu; | 662 | goto nommu; |
@@ -718,10 +724,13 @@ static void gart_iommu_shutdown(void) | |||
718 | if (!no_agp) | 724 | if (!no_agp) |
719 | return; | 725 | return; |
720 | 726 | ||
721 | for (i = 0; i < num_k8_northbridges; i++) { | 727 | if (!k8_northbridges.gart_supported) |
728 | return; | ||
729 | |||
730 | for (i = 0; i < k8_northbridges.num; i++) { | ||
722 | u32 ctl; | 731 | u32 ctl; |
723 | 732 | ||
724 | dev = k8_northbridges[i]; | 733 | dev = k8_northbridges.nb_misc[i]; |
725 | pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); | 734 | pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); |
726 | 735 | ||
727 | ctl &= ~GARTEN; | 736 | ctl &= ~GARTEN; |
@@ -739,7 +748,7 @@ int __init gart_iommu_init(void) | |||
739 | unsigned long scratch; | 748 | unsigned long scratch; |
740 | long i; | 749 | long i; |
741 | 750 | ||
742 | if (num_k8_northbridges == 0) | 751 | if (!k8_northbridges.gart_supported) |
743 | return 0; | 752 | return 0; |
744 | 753 | ||
745 | #ifndef CONFIG_AGP_AMD64 | 754 | #ifndef CONFIG_AGP_AMD64 |
diff --git a/arch/x86/kernel/pmtimer_64.c b/arch/x86/kernel/pmtimer_64.c deleted file mode 100644 index b112406f1996..000000000000 --- a/arch/x86/kernel/pmtimer_64.c +++ /dev/null | |||
@@ -1,69 +0,0 @@ | |||
1 | /* Ported over from i386 by AK, original copyright was: | ||
2 | * | ||
3 | * (C) Dominik Brodowski <linux@brodo.de> 2003 | ||
4 | * | ||
5 | * Driver to use the Power Management Timer (PMTMR) available in some | ||
6 | * southbridges as primary timing source for the Linux kernel. | ||
7 | * | ||
8 | * Based on parts of linux/drivers/acpi/hardware/hwtimer.c, timer_pit.c, | ||
9 | * timer_hpet.c, and on Arjan van de Ven's implementation for 2.4. | ||
10 | * | ||
11 | * This file is licensed under the GPL v2. | ||
12 | * | ||
13 | * Dropped all the hardware bug workarounds for now. Hopefully they | ||
14 | * are not needed on 64bit chipsets. | ||
15 | */ | ||
16 | |||
17 | #include <linux/jiffies.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/time.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/cpumask.h> | ||
22 | #include <linux/acpi_pmtmr.h> | ||
23 | |||
24 | #include <asm/io.h> | ||
25 | #include <asm/proto.h> | ||
26 | #include <asm/msr.h> | ||
27 | #include <asm/vsyscall.h> | ||
28 | |||
29 | static inline u32 cyc2us(u32 cycles) | ||
30 | { | ||
31 | /* The Power Management Timer ticks at 3.579545 ticks per microsecond. | ||
32 | * 1 / PM_TIMER_FREQUENCY == 0.27936511 =~ 286/1024 [error: 0.024%] | ||
33 | * | ||
34 | * Even with HZ = 100, delta is at maximum 35796 ticks, so it can | ||
35 | * easily be multiplied with 286 (=0x11E) without having to fear | ||
36 | * u32 overflows. | ||
37 | */ | ||
38 | cycles *= 286; | ||
39 | return (cycles >> 10); | ||
40 | } | ||
41 | |||
42 | static unsigned pmtimer_wait_tick(void) | ||
43 | { | ||
44 | u32 a, b; | ||
45 | for (a = b = inl(pmtmr_ioport) & ACPI_PM_MASK; | ||
46 | a == b; | ||
47 | b = inl(pmtmr_ioport) & ACPI_PM_MASK) | ||
48 | cpu_relax(); | ||
49 | return b; | ||
50 | } | ||
51 | |||
52 | /* note: wait time is rounded up to one tick */ | ||
53 | void pmtimer_wait(unsigned us) | ||
54 | { | ||
55 | u32 a, b; | ||
56 | a = pmtimer_wait_tick(); | ||
57 | do { | ||
58 | b = inl(pmtmr_ioport); | ||
59 | cpu_relax(); | ||
60 | } while (cyc2us(b - a) < us); | ||
61 | } | ||
62 | |||
63 | static int __init nopmtimer_setup(char *s) | ||
64 | { | ||
65 | pmtmr_ioport = 0; | ||
66 | return 1; | ||
67 | } | ||
68 | |||
69 | __setup("nopmtimer", nopmtimer_setup); | ||
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 3d9ea531ddd1..b3d7a3a04f38 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -424,7 +424,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
424 | load_TLS(next, cpu); | 424 | load_TLS(next, cpu); |
425 | 425 | ||
426 | /* Must be after DS reload */ | 426 | /* Must be after DS reload */ |
427 | unlazy_fpu(prev_p); | 427 | __unlazy_fpu(prev_p); |
428 | 428 | ||
429 | /* Make sure cpu is ready for new context */ | 429 | /* Make sure cpu is ready for new context */ |
430 | if (preload_fpu) | 430 | if (preload_fpu) |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index e3af342fe83a..7a4cf14223ba 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -84,7 +84,7 @@ static int __init reboot_setup(char *str) | |||
84 | } | 84 | } |
85 | /* we will leave sorting out the final value | 85 | /* we will leave sorting out the final value |
86 | when we are ready to reboot, since we might not | 86 | when we are ready to reboot, since we might not |
87 | have set up boot_cpu_id or smp_num_cpu */ | 87 | have detected BSP APIC ID or smp_num_cpu */ |
88 | break; | 88 | break; |
89 | #endif /* CONFIG_SMP */ | 89 | #endif /* CONFIG_SMP */ |
90 | 90 | ||
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 00e167870f71..a59f6a6df5e2 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -83,7 +83,6 @@ | |||
83 | #include <asm/dmi.h> | 83 | #include <asm/dmi.h> |
84 | #include <asm/io_apic.h> | 84 | #include <asm/io_apic.h> |
85 | #include <asm/ist.h> | 85 | #include <asm/ist.h> |
86 | #include <asm/vmi.h> | ||
87 | #include <asm/setup_arch.h> | 86 | #include <asm/setup_arch.h> |
88 | #include <asm/bios_ebda.h> | 87 | #include <asm/bios_ebda.h> |
89 | #include <asm/cacheflush.h> | 88 | #include <asm/cacheflush.h> |
@@ -107,7 +106,7 @@ | |||
107 | #include <asm/percpu.h> | 106 | #include <asm/percpu.h> |
108 | #include <asm/topology.h> | 107 | #include <asm/topology.h> |
109 | #include <asm/apicdef.h> | 108 | #include <asm/apicdef.h> |
110 | #include <asm/k8.h> | 109 | #include <asm/amd_nb.h> |
111 | #ifdef CONFIG_X86_64 | 110 | #ifdef CONFIG_X86_64 |
112 | #include <asm/numa_64.h> | 111 | #include <asm/numa_64.h> |
113 | #endif | 112 | #endif |
@@ -126,7 +125,6 @@ unsigned long max_pfn_mapped; | |||
126 | RESERVE_BRK(dmi_alloc, 65536); | 125 | RESERVE_BRK(dmi_alloc, 65536); |
127 | #endif | 126 | #endif |
128 | 127 | ||
129 | unsigned int boot_cpu_id __read_mostly; | ||
130 | 128 | ||
131 | static __initdata unsigned long _brk_start = (unsigned long)__brk_base; | 129 | static __initdata unsigned long _brk_start = (unsigned long)__brk_base; |
132 | unsigned long _brk_end = (unsigned long)__brk_base; | 130 | unsigned long _brk_end = (unsigned long)__brk_base; |
@@ -619,79 +617,7 @@ static __init void reserve_ibft_region(void) | |||
619 | reserve_early_overlap_ok(addr, addr + size, "ibft"); | 617 | reserve_early_overlap_ok(addr, addr + size, "ibft"); |
620 | } | 618 | } |
621 | 619 | ||
622 | #ifdef CONFIG_X86_RESERVE_LOW_64K | 620 | static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10; |
623 | static int __init dmi_low_memory_corruption(const struct dmi_system_id *d) | ||
624 | { | ||
625 | printk(KERN_NOTICE | ||
626 | "%s detected: BIOS may corrupt low RAM, working around it.\n", | ||
627 | d->ident); | ||
628 | |||
629 | e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED); | ||
630 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); | ||
631 | |||
632 | return 0; | ||
633 | } | ||
634 | #endif | ||
635 | |||
636 | /* List of systems that have known low memory corruption BIOS problems */ | ||
637 | static struct dmi_system_id __initdata bad_bios_dmi_table[] = { | ||
638 | #ifdef CONFIG_X86_RESERVE_LOW_64K | ||
639 | { | ||
640 | .callback = dmi_low_memory_corruption, | ||
641 | .ident = "AMI BIOS", | ||
642 | .matches = { | ||
643 | DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), | ||
644 | }, | ||
645 | }, | ||
646 | { | ||
647 | .callback = dmi_low_memory_corruption, | ||
648 | .ident = "Phoenix BIOS", | ||
649 | .matches = { | ||
650 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"), | ||
651 | }, | ||
652 | }, | ||
653 | { | ||
654 | .callback = dmi_low_memory_corruption, | ||
655 | .ident = "Phoenix/MSC BIOS", | ||
656 | .matches = { | ||
657 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix/MSC"), | ||
658 | }, | ||
659 | }, | ||
660 | /* | ||
661 | * AMI BIOS with low memory corruption was found on Intel DG45ID and | ||
662 | * DG45FC boards. | ||
663 | * It has a different DMI_BIOS_VENDOR = "Intel Corp.", for now we will | ||
664 | * match only DMI_BOARD_NAME and see if there is more bad products | ||
665 | * with this vendor. | ||
666 | */ | ||
667 | { | ||
668 | .callback = dmi_low_memory_corruption, | ||
669 | .ident = "AMI BIOS", | ||
670 | .matches = { | ||
671 | DMI_MATCH(DMI_BOARD_NAME, "DG45ID"), | ||
672 | }, | ||
673 | }, | ||
674 | { | ||
675 | .callback = dmi_low_memory_corruption, | ||
676 | .ident = "AMI BIOS", | ||
677 | .matches = { | ||
678 | DMI_MATCH(DMI_BOARD_NAME, "DG45FC"), | ||
679 | }, | ||
680 | }, | ||
681 | /* | ||
682 | * The Dell Inspiron Mini 1012 has DMI_BIOS_VENDOR = "Dell Inc.", so | ||
683 | * match on the product name. | ||
684 | */ | ||
685 | { | ||
686 | .callback = dmi_low_memory_corruption, | ||
687 | .ident = "Phoenix BIOS", | ||
688 | .matches = { | ||
689 | DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), | ||
690 | }, | ||
691 | }, | ||
692 | #endif | ||
693 | {} | ||
694 | }; | ||
695 | 621 | ||
696 | static void __init trim_bios_range(void) | 622 | static void __init trim_bios_range(void) |
697 | { | 623 | { |
@@ -699,8 +625,14 @@ static void __init trim_bios_range(void) | |||
699 | * A special case is the first 4Kb of memory; | 625 | * A special case is the first 4Kb of memory; |
700 | * This is a BIOS owned area, not kernel ram, but generally | 626 | * This is a BIOS owned area, not kernel ram, but generally |
701 | * not listed as such in the E820 table. | 627 | * not listed as such in the E820 table. |
628 | * | ||
629 | * This typically reserves additional memory (64KiB by default) | ||
630 | * since some BIOSes are known to corrupt low memory. See the | ||
631 | * Kconfig help text for X86_RESERVE_LOW. | ||
702 | */ | 632 | */ |
703 | e820_update_range(0, PAGE_SIZE, E820_RAM, E820_RESERVED); | 633 | e820_update_range(0, ALIGN(reserve_low, PAGE_SIZE), |
634 | E820_RAM, E820_RESERVED); | ||
635 | |||
704 | /* | 636 | /* |
705 | * special case: Some BIOSen report the PC BIOS | 637 | * special case: Some BIOSen report the PC BIOS |
706 | * area (640->1Mb) as ram even though it is not. | 638 | * area (640->1Mb) as ram even though it is not. |
@@ -710,6 +642,28 @@ static void __init trim_bios_range(void) | |||
710 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); | 642 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); |
711 | } | 643 | } |
712 | 644 | ||
645 | static int __init parse_reservelow(char *p) | ||
646 | { | ||
647 | unsigned long long size; | ||
648 | |||
649 | if (!p) | ||
650 | return -EINVAL; | ||
651 | |||
652 | size = memparse(p, &p); | ||
653 | |||
654 | if (size < 4096) | ||
655 | size = 4096; | ||
656 | |||
657 | if (size > 640*1024) | ||
658 | size = 640*1024; | ||
659 | |||
660 | reserve_low = size; | ||
661 | |||
662 | return 0; | ||
663 | } | ||
664 | |||
665 | early_param("reservelow", parse_reservelow); | ||
666 | |||
713 | /* | 667 | /* |
714 | * Determine if we were loaded by an EFI loader. If so, then we have also been | 668 | * Determine if we were loaded by an EFI loader. If so, then we have also been |
715 | * passed the efi memmap, systab, etc., so we should use these data structures | 669 | * passed the efi memmap, systab, etc., so we should use these data structures |
@@ -736,10 +690,10 @@ void __init setup_arch(char **cmdline_p) | |||
736 | printk(KERN_INFO "Command line: %s\n", boot_command_line); | 690 | printk(KERN_INFO "Command line: %s\n", boot_command_line); |
737 | #endif | 691 | #endif |
738 | 692 | ||
739 | /* VMI may relocate the fixmap; do this before touching ioremap area */ | 693 | /* |
740 | vmi_init(); | 694 | * If we have OLPC OFW, we might end up relocating the fixmap due to |
741 | 695 | * reserve_top(), so do this before touching the ioremap area. | |
742 | /* OFW also may relocate the fixmap */ | 696 | */ |
743 | olpc_ofw_detect(); | 697 | olpc_ofw_detect(); |
744 | 698 | ||
745 | early_trap_init(); | 699 | early_trap_init(); |
@@ -840,9 +794,6 @@ void __init setup_arch(char **cmdline_p) | |||
840 | 794 | ||
841 | x86_report_nx(); | 795 | x86_report_nx(); |
842 | 796 | ||
843 | /* Must be before kernel pagetables are setup */ | ||
844 | vmi_activate(); | ||
845 | |||
846 | /* after early param, so could get panic from serial */ | 797 | /* after early param, so could get panic from serial */ |
847 | reserve_early_setup_data(); | 798 | reserve_early_setup_data(); |
848 | 799 | ||
@@ -865,8 +816,6 @@ void __init setup_arch(char **cmdline_p) | |||
865 | 816 | ||
866 | dmi_scan_machine(); | 817 | dmi_scan_machine(); |
867 | 818 | ||
868 | dmi_check_system(bad_bios_dmi_table); | ||
869 | |||
870 | /* | 819 | /* |
871 | * VMware detection requires dmi to be available, so this | 820 | * VMware detection requires dmi to be available, so this |
872 | * needs to be done after dmi_scan_machine, for the BP. | 821 | * needs to be done after dmi_scan_machine, for the BP. |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index a60df9ae6454..2335c15c93a4 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -253,7 +253,7 @@ void __init setup_per_cpu_areas(void) | |||
253 | * Up to this point, the boot CPU has been using .init.data | 253 | * Up to this point, the boot CPU has been using .init.data |
254 | * area. Reload any changed state for the boot CPU. | 254 | * area. Reload any changed state for the boot CPU. |
255 | */ | 255 | */ |
256 | if (cpu == boot_cpu_id) | 256 | if (!cpu) |
257 | switch_to_new_gdt(cpu); | 257 | switch_to_new_gdt(cpu); |
258 | } | 258 | } |
259 | 259 | ||
diff --git a/arch/x86/kernel/sfi.c b/arch/x86/kernel/sfi.c index cb22acf3ed09..dd4c281ffe57 100644 --- a/arch/x86/kernel/sfi.c +++ b/arch/x86/kernel/sfi.c | |||
@@ -34,7 +34,7 @@ | |||
34 | #ifdef CONFIG_X86_LOCAL_APIC | 34 | #ifdef CONFIG_X86_LOCAL_APIC |
35 | static unsigned long sfi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; | 35 | static unsigned long sfi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; |
36 | 36 | ||
37 | void __init mp_sfi_register_lapic_address(unsigned long address) | 37 | static void __init mp_sfi_register_lapic_address(unsigned long address) |
38 | { | 38 | { |
39 | mp_lapic_addr = address; | 39 | mp_lapic_addr = address; |
40 | 40 | ||
@@ -46,7 +46,7 @@ void __init mp_sfi_register_lapic_address(unsigned long address) | |||
46 | } | 46 | } |
47 | 47 | ||
48 | /* All CPUs enumerated by SFI must be present and enabled */ | 48 | /* All CPUs enumerated by SFI must be present and enabled */ |
49 | void __cpuinit mp_sfi_register_lapic(u8 id) | 49 | static void __cpuinit mp_sfi_register_lapic(u8 id) |
50 | { | 50 | { |
51 | if (MAX_APICS - id <= 0) { | 51 | if (MAX_APICS - id <= 0) { |
52 | pr_warning("Processor #%d invalid (max %d)\n", | 52 | pr_warning("Processor #%d invalid (max %d)\n", |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 8b3bfc4dd708..dfb50890b5b7 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -62,7 +62,7 @@ | |||
62 | #include <asm/pgtable.h> | 62 | #include <asm/pgtable.h> |
63 | #include <asm/tlbflush.h> | 63 | #include <asm/tlbflush.h> |
64 | #include <asm/mtrr.h> | 64 | #include <asm/mtrr.h> |
65 | #include <asm/vmi.h> | 65 | #include <asm/mwait.h> |
66 | #include <asm/apic.h> | 66 | #include <asm/apic.h> |
67 | #include <asm/setup.h> | 67 | #include <asm/setup.h> |
68 | #include <asm/uv/uv.h> | 68 | #include <asm/uv/uv.h> |
@@ -311,7 +311,6 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
311 | __flush_tlb_all(); | 311 | __flush_tlb_all(); |
312 | #endif | 312 | #endif |
313 | 313 | ||
314 | vmi_bringup(); | ||
315 | cpu_init(); | 314 | cpu_init(); |
316 | preempt_disable(); | 315 | preempt_disable(); |
317 | smp_callin(); | 316 | smp_callin(); |
@@ -324,9 +323,9 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
324 | check_tsc_sync_target(); | 323 | check_tsc_sync_target(); |
325 | 324 | ||
326 | if (nmi_watchdog == NMI_IO_APIC) { | 325 | if (nmi_watchdog == NMI_IO_APIC) { |
327 | legacy_pic->chip->mask(0); | 326 | legacy_pic->mask(0); |
328 | enable_NMI_through_LVT0(); | 327 | enable_NMI_through_LVT0(); |
329 | legacy_pic->chip->unmask(0); | 328 | legacy_pic->unmask(0); |
330 | } | 329 | } |
331 | 330 | ||
332 | /* This must be done before setting cpu_online_mask */ | 331 | /* This must be done before setting cpu_online_mask */ |
@@ -397,6 +396,19 @@ void __cpuinit smp_store_cpu_info(int id) | |||
397 | identify_secondary_cpu(c); | 396 | identify_secondary_cpu(c); |
398 | } | 397 | } |
399 | 398 | ||
399 | static void __cpuinit link_thread_siblings(int cpu1, int cpu2) | ||
400 | { | ||
401 | struct cpuinfo_x86 *c1 = &cpu_data(cpu1); | ||
402 | struct cpuinfo_x86 *c2 = &cpu_data(cpu2); | ||
403 | |||
404 | cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); | ||
405 | cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1)); | ||
406 | cpumask_set_cpu(cpu1, cpu_core_mask(cpu2)); | ||
407 | cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); | ||
408 | cpumask_set_cpu(cpu1, c2->llc_shared_map); | ||
409 | cpumask_set_cpu(cpu2, c1->llc_shared_map); | ||
410 | } | ||
411 | |||
400 | 412 | ||
401 | void __cpuinit set_cpu_sibling_map(int cpu) | 413 | void __cpuinit set_cpu_sibling_map(int cpu) |
402 | { | 414 | { |
@@ -409,14 +421,13 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
409 | for_each_cpu(i, cpu_sibling_setup_mask) { | 421 | for_each_cpu(i, cpu_sibling_setup_mask) { |
410 | struct cpuinfo_x86 *o = &cpu_data(i); | 422 | struct cpuinfo_x86 *o = &cpu_data(i); |
411 | 423 | ||
412 | if (c->phys_proc_id == o->phys_proc_id && | 424 | if (cpu_has(c, X86_FEATURE_TOPOEXT)) { |
413 | c->cpu_core_id == o->cpu_core_id) { | 425 | if (c->phys_proc_id == o->phys_proc_id && |
414 | cpumask_set_cpu(i, cpu_sibling_mask(cpu)); | 426 | c->compute_unit_id == o->compute_unit_id) |
415 | cpumask_set_cpu(cpu, cpu_sibling_mask(i)); | 427 | link_thread_siblings(cpu, i); |
416 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | 428 | } else if (c->phys_proc_id == o->phys_proc_id && |
417 | cpumask_set_cpu(cpu, cpu_core_mask(i)); | 429 | c->cpu_core_id == o->cpu_core_id) { |
418 | cpumask_set_cpu(i, c->llc_shared_map); | 430 | link_thread_siblings(cpu, i); |
419 | cpumask_set_cpu(cpu, o->llc_shared_map); | ||
420 | } | 431 | } |
421 | } | 432 | } |
422 | } else { | 433 | } else { |
@@ -1109,8 +1120,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1109 | } | 1120 | } |
1110 | set_cpu_sibling_map(0); | 1121 | set_cpu_sibling_map(0); |
1111 | 1122 | ||
1112 | enable_IR_x2apic(); | ||
1113 | default_setup_apic_routing(); | ||
1114 | 1123 | ||
1115 | if (smp_sanity_check(max_cpus) < 0) { | 1124 | if (smp_sanity_check(max_cpus) < 0) { |
1116 | printk(KERN_INFO "SMP disabled\n"); | 1125 | printk(KERN_INFO "SMP disabled\n"); |
@@ -1118,6 +1127,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1118 | goto out; | 1127 | goto out; |
1119 | } | 1128 | } |
1120 | 1129 | ||
1130 | default_setup_apic_routing(); | ||
1131 | |||
1121 | preempt_disable(); | 1132 | preempt_disable(); |
1122 | if (read_apic_id() != boot_cpu_physical_apicid) { | 1133 | if (read_apic_id() != boot_cpu_physical_apicid) { |
1123 | panic("Boot APIC ID in local APIC unexpected (%d vs %d)", | 1134 | panic("Boot APIC ID in local APIC unexpected (%d vs %d)", |
@@ -1383,11 +1394,88 @@ void play_dead_common(void) | |||
1383 | local_irq_disable(); | 1394 | local_irq_disable(); |
1384 | } | 1395 | } |
1385 | 1396 | ||
1397 | /* | ||
1398 | * We need to flush the caches before going to sleep, lest we have | ||
1399 | * dirty data in our caches when we come back up. | ||
1400 | */ | ||
1401 | static inline void mwait_play_dead(void) | ||
1402 | { | ||
1403 | unsigned int eax, ebx, ecx, edx; | ||
1404 | unsigned int highest_cstate = 0; | ||
1405 | unsigned int highest_subcstate = 0; | ||
1406 | int i; | ||
1407 | void *mwait_ptr; | ||
1408 | |||
1409 | if (!cpu_has(¤t_cpu_data, X86_FEATURE_MWAIT)) | ||
1410 | return; | ||
1411 | if (!cpu_has(¤t_cpu_data, X86_FEATURE_CLFLSH)) | ||
1412 | return; | ||
1413 | if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) | ||
1414 | return; | ||
1415 | |||
1416 | eax = CPUID_MWAIT_LEAF; | ||
1417 | ecx = 0; | ||
1418 | native_cpuid(&eax, &ebx, &ecx, &edx); | ||
1419 | |||
1420 | /* | ||
1421 | * eax will be 0 if EDX enumeration is not valid. | ||
1422 | * Initialized below to cstate, sub_cstate value when EDX is valid. | ||
1423 | */ | ||
1424 | if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) { | ||
1425 | eax = 0; | ||
1426 | } else { | ||
1427 | edx >>= MWAIT_SUBSTATE_SIZE; | ||
1428 | for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { | ||
1429 | if (edx & MWAIT_SUBSTATE_MASK) { | ||
1430 | highest_cstate = i; | ||
1431 | highest_subcstate = edx & MWAIT_SUBSTATE_MASK; | ||
1432 | } | ||
1433 | } | ||
1434 | eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | | ||
1435 | (highest_subcstate - 1); | ||
1436 | } | ||
1437 | |||
1438 | /* | ||
1439 | * This should be a memory location in a cache line which is | ||
1440 | * unlikely to be touched by other processors. The actual | ||
1441 | * content is immaterial as it is not actually modified in any way. | ||
1442 | */ | ||
1443 | mwait_ptr = ¤t_thread_info()->flags; | ||
1444 | |||
1445 | wbinvd(); | ||
1446 | |||
1447 | while (1) { | ||
1448 | /* | ||
1449 | * The CLFLUSH is a workaround for erratum AAI65 for | ||
1450 | * the Xeon 7400 series. It's not clear it is actually | ||
1451 | * needed, but it should be harmless in either case. | ||
1452 | * The WBINVD is insufficient due to the spurious-wakeup | ||
1453 | * case where we return around the loop. | ||
1454 | */ | ||
1455 | clflush(mwait_ptr); | ||
1456 | __monitor(mwait_ptr, 0, 0); | ||
1457 | mb(); | ||
1458 | __mwait(eax, 0); | ||
1459 | } | ||
1460 | } | ||
1461 | |||
1462 | static inline void hlt_play_dead(void) | ||
1463 | { | ||
1464 | if (current_cpu_data.x86 >= 4) | ||
1465 | wbinvd(); | ||
1466 | |||
1467 | while (1) { | ||
1468 | native_halt(); | ||
1469 | } | ||
1470 | } | ||
1471 | |||
1386 | void native_play_dead(void) | 1472 | void native_play_dead(void) |
1387 | { | 1473 | { |
1388 | play_dead_common(); | 1474 | play_dead_common(); |
1389 | tboot_shutdown(TB_SHUTDOWN_WFS); | 1475 | tboot_shutdown(TB_SHUTDOWN_WFS); |
1390 | wbinvd_halt(); | 1476 | |
1477 | mwait_play_dead(); /* Only returns on failure */ | ||
1478 | hlt_play_dead(); | ||
1391 | } | 1479 | } |
1392 | 1480 | ||
1393 | #else /* ... !CONFIG_HOTPLUG_CPU */ | 1481 | #else /* ... !CONFIG_HOTPLUG_CPU */ |
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c index d5e06624e34a..0b0cb5fede19 100644 --- a/arch/x86/kernel/sys_i386_32.c +++ b/arch/x86/kernel/sys_i386_32.c | |||
@@ -33,8 +33,8 @@ int kernel_execve(const char *filename, | |||
33 | const char *const envp[]) | 33 | const char *const envp[]) |
34 | { | 34 | { |
35 | long __res; | 35 | long __res; |
36 | asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" | 36 | asm volatile ("int $0x80" |
37 | : "=a" (__res) | 37 | : "=a" (__res) |
38 | : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory"); | 38 | : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory"); |
39 | return __res; | 39 | return __res; |
40 | } | 40 | } |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 60788dee0f8a..d43968503dd2 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -776,21 +776,10 @@ asmlinkage void math_state_restore(void) | |||
776 | } | 776 | } |
777 | EXPORT_SYMBOL_GPL(math_state_restore); | 777 | EXPORT_SYMBOL_GPL(math_state_restore); |
778 | 778 | ||
779 | #ifndef CONFIG_MATH_EMULATION | ||
780 | void math_emulate(struct math_emu_info *info) | ||
781 | { | ||
782 | printk(KERN_EMERG | ||
783 | "math-emulation not enabled and no coprocessor found.\n"); | ||
784 | printk(KERN_EMERG "killing %s.\n", current->comm); | ||
785 | force_sig(SIGFPE, current); | ||
786 | schedule(); | ||
787 | } | ||
788 | #endif /* CONFIG_MATH_EMULATION */ | ||
789 | |||
790 | dotraplinkage void __kprobes | 779 | dotraplinkage void __kprobes |
791 | do_device_not_available(struct pt_regs *regs, long error_code) | 780 | do_device_not_available(struct pt_regs *regs, long error_code) |
792 | { | 781 | { |
793 | #ifdef CONFIG_X86_32 | 782 | #ifdef CONFIG_MATH_EMULATION |
794 | if (read_cr0() & X86_CR0_EM) { | 783 | if (read_cr0() & X86_CR0_EM) { |
795 | struct math_emu_info info = { }; | 784 | struct math_emu_info info = { }; |
796 | 785 | ||
@@ -798,12 +787,12 @@ do_device_not_available(struct pt_regs *regs, long error_code) | |||
798 | 787 | ||
799 | info.regs = regs; | 788 | info.regs = regs; |
800 | math_emulate(&info); | 789 | math_emulate(&info); |
801 | } else { | 790 | return; |
802 | math_state_restore(); /* interrupts still off */ | ||
803 | conditional_sti(regs); | ||
804 | } | 791 | } |
805 | #else | 792 | #endif |
806 | math_state_restore(); | 793 | math_state_restore(); /* interrupts still off */ |
794 | #ifdef CONFIG_X86_32 | ||
795 | conditional_sti(regs); | ||
807 | #endif | 796 | #endif |
808 | } | 797 | } |
809 | 798 | ||
@@ -881,18 +870,6 @@ void __init trap_init(void) | |||
881 | #endif | 870 | #endif |
882 | 871 | ||
883 | #ifdef CONFIG_X86_32 | 872 | #ifdef CONFIG_X86_32 |
884 | if (cpu_has_fxsr) { | ||
885 | printk(KERN_INFO "Enabling fast FPU save and restore... "); | ||
886 | set_in_cr4(X86_CR4_OSFXSR); | ||
887 | printk("done.\n"); | ||
888 | } | ||
889 | if (cpu_has_xmm) { | ||
890 | printk(KERN_INFO | ||
891 | "Enabling unmasked SIMD FPU exception support... "); | ||
892 | set_in_cr4(X86_CR4_OSXMMEXCPT); | ||
893 | printk("done.\n"); | ||
894 | } | ||
895 | |||
896 | set_system_trap_gate(SYSCALL_VECTOR, &system_call); | 873 | set_system_trap_gate(SYSCALL_VECTOR, &system_call); |
897 | set_bit(SYSCALL_VECTOR, used_vectors); | 874 | set_bit(SYSCALL_VECTOR, used_vectors); |
898 | #endif | 875 | #endif |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 26a863a9c2a8..0c40d8b72416 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -104,10 +104,14 @@ int __init notsc_setup(char *str) | |||
104 | 104 | ||
105 | __setup("notsc", notsc_setup); | 105 | __setup("notsc", notsc_setup); |
106 | 106 | ||
107 | static int no_sched_irq_time; | ||
108 | |||
107 | static int __init tsc_setup(char *str) | 109 | static int __init tsc_setup(char *str) |
108 | { | 110 | { |
109 | if (!strcmp(str, "reliable")) | 111 | if (!strcmp(str, "reliable")) |
110 | tsc_clocksource_reliable = 1; | 112 | tsc_clocksource_reliable = 1; |
113 | if (!strncmp(str, "noirqtime", 9)) | ||
114 | no_sched_irq_time = 1; | ||
111 | return 1; | 115 | return 1; |
112 | } | 116 | } |
113 | 117 | ||
@@ -801,6 +805,7 @@ void mark_tsc_unstable(char *reason) | |||
801 | if (!tsc_unstable) { | 805 | if (!tsc_unstable) { |
802 | tsc_unstable = 1; | 806 | tsc_unstable = 1; |
803 | sched_clock_stable = 0; | 807 | sched_clock_stable = 0; |
808 | disable_sched_clock_irqtime(); | ||
804 | printk(KERN_INFO "Marking TSC unstable due to %s\n", reason); | 809 | printk(KERN_INFO "Marking TSC unstable due to %s\n", reason); |
805 | /* Change only the rating, when not registered */ | 810 | /* Change only the rating, when not registered */ |
806 | if (clocksource_tsc.mult) | 811 | if (clocksource_tsc.mult) |
@@ -892,60 +897,6 @@ static void __init init_tsc_clocksource(void) | |||
892 | clocksource_register_khz(&clocksource_tsc, tsc_khz); | 897 | clocksource_register_khz(&clocksource_tsc, tsc_khz); |
893 | } | 898 | } |
894 | 899 | ||
895 | #ifdef CONFIG_X86_64 | ||
896 | /* | ||
897 | * calibrate_cpu is used on systems with fixed rate TSCs to determine | ||
898 | * processor frequency | ||
899 | */ | ||
900 | #define TICK_COUNT 100000000 | ||
901 | static unsigned long __init calibrate_cpu(void) | ||
902 | { | ||
903 | int tsc_start, tsc_now; | ||
904 | int i, no_ctr_free; | ||
905 | unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0; | ||
906 | unsigned long flags; | ||
907 | |||
908 | for (i = 0; i < 4; i++) | ||
909 | if (avail_to_resrv_perfctr_nmi_bit(i)) | ||
910 | break; | ||
911 | no_ctr_free = (i == 4); | ||
912 | if (no_ctr_free) { | ||
913 | WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... " | ||
914 | "cpu_khz value may be incorrect.\n"); | ||
915 | i = 3; | ||
916 | rdmsrl(MSR_K7_EVNTSEL3, evntsel3); | ||
917 | wrmsrl(MSR_K7_EVNTSEL3, 0); | ||
918 | rdmsrl(MSR_K7_PERFCTR3, pmc3); | ||
919 | } else { | ||
920 | reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i); | ||
921 | reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | ||
922 | } | ||
923 | local_irq_save(flags); | ||
924 | /* start measuring cycles, incrementing from 0 */ | ||
925 | wrmsrl(MSR_K7_PERFCTR0 + i, 0); | ||
926 | wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76); | ||
927 | rdtscl(tsc_start); | ||
928 | do { | ||
929 | rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now); | ||
930 | tsc_now = get_cycles(); | ||
931 | } while ((tsc_now - tsc_start) < TICK_COUNT); | ||
932 | |||
933 | local_irq_restore(flags); | ||
934 | if (no_ctr_free) { | ||
935 | wrmsrl(MSR_K7_EVNTSEL3, 0); | ||
936 | wrmsrl(MSR_K7_PERFCTR3, pmc3); | ||
937 | wrmsrl(MSR_K7_EVNTSEL3, evntsel3); | ||
938 | } else { | ||
939 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); | ||
940 | release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | ||
941 | } | ||
942 | |||
943 | return pmc_now * tsc_khz / (tsc_now - tsc_start); | ||
944 | } | ||
945 | #else | ||
946 | static inline unsigned long calibrate_cpu(void) { return cpu_khz; } | ||
947 | #endif | ||
948 | |||
949 | void __init tsc_init(void) | 900 | void __init tsc_init(void) |
950 | { | 901 | { |
951 | u64 lpj; | 902 | u64 lpj; |
@@ -964,10 +915,6 @@ void __init tsc_init(void) | |||
964 | return; | 915 | return; |
965 | } | 916 | } |
966 | 917 | ||
967 | if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && | ||
968 | (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) | ||
969 | cpu_khz = calibrate_cpu(); | ||
970 | |||
971 | printk("Detected %lu.%03lu MHz processor.\n", | 918 | printk("Detected %lu.%03lu MHz processor.\n", |
972 | (unsigned long)cpu_khz / 1000, | 919 | (unsigned long)cpu_khz / 1000, |
973 | (unsigned long)cpu_khz % 1000); | 920 | (unsigned long)cpu_khz % 1000); |
@@ -987,6 +934,9 @@ void __init tsc_init(void) | |||
987 | /* now allow native_sched_clock() to use rdtsc */ | 934 | /* now allow native_sched_clock() to use rdtsc */ |
988 | tsc_disabled = 0; | 935 | tsc_disabled = 0; |
989 | 936 | ||
937 | if (!no_sched_irq_time) | ||
938 | enable_sched_clock_irqtime(); | ||
939 | |||
990 | lpj = ((u64)tsc_khz * 1000); | 940 | lpj = ((u64)tsc_khz * 1000); |
991 | do_div(lpj, HZ); | 941 | do_div(lpj, HZ); |
992 | lpj_fine = lpj; | 942 | lpj_fine = lpj; |
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c index 1132129db792..7b24460917d5 100644 --- a/arch/x86/kernel/uv_irq.c +++ b/arch/x86/kernel/uv_irq.c | |||
@@ -28,34 +28,21 @@ struct uv_irq_2_mmr_pnode{ | |||
28 | static spinlock_t uv_irq_lock; | 28 | static spinlock_t uv_irq_lock; |
29 | static struct rb_root uv_irq_root; | 29 | static struct rb_root uv_irq_root; |
30 | 30 | ||
31 | static int uv_set_irq_affinity(unsigned int, const struct cpumask *); | 31 | static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool); |
32 | 32 | ||
33 | static void uv_noop(unsigned int irq) | 33 | static void uv_noop(struct irq_data *data) { } |
34 | { | ||
35 | } | ||
36 | |||
37 | static unsigned int uv_noop_ret(unsigned int irq) | ||
38 | { | ||
39 | return 0; | ||
40 | } | ||
41 | 34 | ||
42 | static void uv_ack_apic(unsigned int irq) | 35 | static void uv_ack_apic(struct irq_data *data) |
43 | { | 36 | { |
44 | ack_APIC_irq(); | 37 | ack_APIC_irq(); |
45 | } | 38 | } |
46 | 39 | ||
47 | static struct irq_chip uv_irq_chip = { | 40 | static struct irq_chip uv_irq_chip = { |
48 | .name = "UV-CORE", | 41 | .name = "UV-CORE", |
49 | .startup = uv_noop_ret, | 42 | .irq_mask = uv_noop, |
50 | .shutdown = uv_noop, | 43 | .irq_unmask = uv_noop, |
51 | .enable = uv_noop, | 44 | .irq_eoi = uv_ack_apic, |
52 | .disable = uv_noop, | 45 | .irq_set_affinity = uv_set_irq_affinity, |
53 | .ack = uv_noop, | ||
54 | .mask = uv_noop, | ||
55 | .unmask = uv_noop, | ||
56 | .eoi = uv_ack_apic, | ||
57 | .end = uv_noop, | ||
58 | .set_affinity = uv_set_irq_affinity, | ||
59 | }; | 46 | }; |
60 | 47 | ||
61 | /* | 48 | /* |
@@ -144,26 +131,22 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
144 | unsigned long mmr_offset, int limit) | 131 | unsigned long mmr_offset, int limit) |
145 | { | 132 | { |
146 | const struct cpumask *eligible_cpu = cpumask_of(cpu); | 133 | const struct cpumask *eligible_cpu = cpumask_of(cpu); |
147 | struct irq_desc *desc = irq_to_desc(irq); | 134 | struct irq_cfg *cfg = get_irq_chip_data(irq); |
148 | struct irq_cfg *cfg; | ||
149 | int mmr_pnode; | ||
150 | unsigned long mmr_value; | 135 | unsigned long mmr_value; |
151 | struct uv_IO_APIC_route_entry *entry; | 136 | struct uv_IO_APIC_route_entry *entry; |
152 | int err; | 137 | int mmr_pnode, err; |
153 | 138 | ||
154 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != | 139 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != |
155 | sizeof(unsigned long)); | 140 | sizeof(unsigned long)); |
156 | 141 | ||
157 | cfg = irq_cfg(irq); | ||
158 | |||
159 | err = assign_irq_vector(irq, cfg, eligible_cpu); | 142 | err = assign_irq_vector(irq, cfg, eligible_cpu); |
160 | if (err != 0) | 143 | if (err != 0) |
161 | return err; | 144 | return err; |
162 | 145 | ||
163 | if (limit == UV_AFFINITY_CPU) | 146 | if (limit == UV_AFFINITY_CPU) |
164 | desc->status |= IRQ_NO_BALANCING; | 147 | irq_set_status_flags(irq, IRQ_NO_BALANCING); |
165 | else | 148 | else |
166 | desc->status |= IRQ_MOVE_PCNTXT; | 149 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
167 | 150 | ||
168 | set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, | 151 | set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, |
169 | irq_name); | 152 | irq_name); |
@@ -206,17 +189,17 @@ static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset) | |||
206 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | 189 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); |
207 | } | 190 | } |
208 | 191 | ||
209 | static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) | 192 | static int |
193 | uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask, | ||
194 | bool force) | ||
210 | { | 195 | { |
211 | struct irq_desc *desc = irq_to_desc(irq); | 196 | struct irq_cfg *cfg = data->chip_data; |
212 | struct irq_cfg *cfg = desc->chip_data; | ||
213 | unsigned int dest; | 197 | unsigned int dest; |
214 | unsigned long mmr_value; | 198 | unsigned long mmr_value, mmr_offset; |
215 | struct uv_IO_APIC_route_entry *entry; | 199 | struct uv_IO_APIC_route_entry *entry; |
216 | unsigned long mmr_offset; | ||
217 | int mmr_pnode; | 200 | int mmr_pnode; |
218 | 201 | ||
219 | if (set_desc_affinity(desc, mask, &dest)) | 202 | if (__ioapic_set_affinity(data, mask, &dest)) |
220 | return -1; | 203 | return -1; |
221 | 204 | ||
222 | mmr_value = 0; | 205 | mmr_value = 0; |
@@ -231,7 +214,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
231 | entry->dest = dest; | 214 | entry->dest = dest; |
232 | 215 | ||
233 | /* Get previously stored MMR and pnode of hub sourcing interrupts */ | 216 | /* Get previously stored MMR and pnode of hub sourcing interrupts */ |
234 | if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode)) | 217 | if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode)) |
235 | return -1; | 218 | return -1; |
236 | 219 | ||
237 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | 220 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); |
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index e680ea52db9b..3371bd053b89 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c | |||
@@ -66,10 +66,7 @@ static void __init visws_time_init(void) | |||
66 | } | 66 | } |
67 | 67 | ||
68 | /* Replaces the default init_ISA_irqs in the generic setup */ | 68 | /* Replaces the default init_ISA_irqs in the generic setup */ |
69 | static void __init visws_pre_intr_init(void) | 69 | static void __init visws_pre_intr_init(void); |
70 | { | ||
71 | init_VISWS_APIC_irqs(); | ||
72 | } | ||
73 | 70 | ||
74 | /* Quirk for machine specific memory setup. */ | 71 | /* Quirk for machine specific memory setup. */ |
75 | 72 | ||
@@ -429,67 +426,34 @@ static int is_co_apic(unsigned int irq) | |||
429 | /* | 426 | /* |
430 | * This is the SGI Cobalt (IO-)APIC: | 427 | * This is the SGI Cobalt (IO-)APIC: |
431 | */ | 428 | */ |
432 | 429 | static void enable_cobalt_irq(struct irq_data *data) | |
433 | static void enable_cobalt_irq(unsigned int irq) | ||
434 | { | 430 | { |
435 | co_apic_set(is_co_apic(irq), irq); | 431 | co_apic_set(is_co_apic(data->irq), data->irq); |
436 | } | 432 | } |
437 | 433 | ||
438 | static void disable_cobalt_irq(unsigned int irq) | 434 | static void disable_cobalt_irq(struct irq_data *data) |
439 | { | 435 | { |
440 | int entry = is_co_apic(irq); | 436 | int entry = is_co_apic(data->irq); |
441 | 437 | ||
442 | co_apic_write(CO_APIC_LO(entry), CO_APIC_MASK); | 438 | co_apic_write(CO_APIC_LO(entry), CO_APIC_MASK); |
443 | co_apic_read(CO_APIC_LO(entry)); | 439 | co_apic_read(CO_APIC_LO(entry)); |
444 | } | 440 | } |
445 | 441 | ||
446 | /* | 442 | static void ack_cobalt_irq(struct irq_data *data) |
447 | * "irq" really just serves to identify the device. Here is where we | ||
448 | * map this to the Cobalt APIC entry where it's physically wired. | ||
449 | * This is called via request_irq -> setup_irq -> irq_desc->startup() | ||
450 | */ | ||
451 | static unsigned int startup_cobalt_irq(unsigned int irq) | ||
452 | { | 443 | { |
453 | unsigned long flags; | 444 | unsigned long flags; |
454 | struct irq_desc *desc = irq_to_desc(irq); | ||
455 | 445 | ||
456 | spin_lock_irqsave(&cobalt_lock, flags); | 446 | spin_lock_irqsave(&cobalt_lock, flags); |
457 | if ((desc->status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING))) | 447 | disable_cobalt_irq(data); |
458 | desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING); | ||
459 | enable_cobalt_irq(irq); | ||
460 | spin_unlock_irqrestore(&cobalt_lock, flags); | ||
461 | return 0; | ||
462 | } | ||
463 | |||
464 | static void ack_cobalt_irq(unsigned int irq) | ||
465 | { | ||
466 | unsigned long flags; | ||
467 | |||
468 | spin_lock_irqsave(&cobalt_lock, flags); | ||
469 | disable_cobalt_irq(irq); | ||
470 | apic_write(APIC_EOI, APIC_EIO_ACK); | 448 | apic_write(APIC_EOI, APIC_EIO_ACK); |
471 | spin_unlock_irqrestore(&cobalt_lock, flags); | 449 | spin_unlock_irqrestore(&cobalt_lock, flags); |
472 | } | 450 | } |
473 | 451 | ||
474 | static void end_cobalt_irq(unsigned int irq) | ||
475 | { | ||
476 | unsigned long flags; | ||
477 | struct irq_desc *desc = irq_to_desc(irq); | ||
478 | |||
479 | spin_lock_irqsave(&cobalt_lock, flags); | ||
480 | if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS))) | ||
481 | enable_cobalt_irq(irq); | ||
482 | spin_unlock_irqrestore(&cobalt_lock, flags); | ||
483 | } | ||
484 | |||
485 | static struct irq_chip cobalt_irq_type = { | 452 | static struct irq_chip cobalt_irq_type = { |
486 | .name = "Cobalt-APIC", | 453 | .name = "Cobalt-APIC", |
487 | .startup = startup_cobalt_irq, | 454 | .irq_enable = enable_cobalt_irq, |
488 | .shutdown = disable_cobalt_irq, | 455 | .irq_disable = disable_cobalt_irq, |
489 | .enable = enable_cobalt_irq, | 456 | .irq_ack = ack_cobalt_irq, |
490 | .disable = disable_cobalt_irq, | ||
491 | .ack = ack_cobalt_irq, | ||
492 | .end = end_cobalt_irq, | ||
493 | }; | 457 | }; |
494 | 458 | ||
495 | 459 | ||
@@ -503,35 +467,34 @@ static struct irq_chip cobalt_irq_type = { | |||
503 | * interrupt controller type, and through a special virtual interrupt- | 467 | * interrupt controller type, and through a special virtual interrupt- |
504 | * controller. Device drivers only see the virtual interrupt sources. | 468 | * controller. Device drivers only see the virtual interrupt sources. |
505 | */ | 469 | */ |
506 | static unsigned int startup_piix4_master_irq(unsigned int irq) | 470 | static unsigned int startup_piix4_master_irq(struct irq_data *data) |
507 | { | 471 | { |
508 | legacy_pic->init(0); | 472 | legacy_pic->init(0); |
509 | 473 | enable_cobalt_irq(data); | |
510 | return startup_cobalt_irq(irq); | ||
511 | } | 474 | } |
512 | 475 | ||
513 | static void end_piix4_master_irq(unsigned int irq) | 476 | static void end_piix4_master_irq(struct irq_data *data) |
514 | { | 477 | { |
515 | unsigned long flags; | 478 | unsigned long flags; |
516 | 479 | ||
517 | spin_lock_irqsave(&cobalt_lock, flags); | 480 | spin_lock_irqsave(&cobalt_lock, flags); |
518 | enable_cobalt_irq(irq); | 481 | enable_cobalt_irq(data); |
519 | spin_unlock_irqrestore(&cobalt_lock, flags); | 482 | spin_unlock_irqrestore(&cobalt_lock, flags); |
520 | } | 483 | } |
521 | 484 | ||
522 | static struct irq_chip piix4_master_irq_type = { | 485 | static struct irq_chip piix4_master_irq_type = { |
523 | .name = "PIIX4-master", | 486 | .name = "PIIX4-master", |
524 | .startup = startup_piix4_master_irq, | 487 | .irq_startup = startup_piix4_master_irq, |
525 | .ack = ack_cobalt_irq, | 488 | .irq_ack = ack_cobalt_irq, |
526 | .end = end_piix4_master_irq, | ||
527 | }; | 489 | }; |
528 | 490 | ||
491 | static void pii4_mask(struct irq_data *data) { } | ||
529 | 492 | ||
530 | static struct irq_chip piix4_virtual_irq_type = { | 493 | static struct irq_chip piix4_virtual_irq_type = { |
531 | .name = "PIIX4-virtual", | 494 | .name = "PIIX4-virtual", |
495 | .mask = pii4_mask, | ||
532 | }; | 496 | }; |
533 | 497 | ||
534 | |||
535 | /* | 498 | /* |
536 | * PIIX4-8259 master/virtual functions to handle interrupt requests | 499 | * PIIX4-8259 master/virtual functions to handle interrupt requests |
537 | * from legacy devices: floppy, parallel, serial, rtc. | 500 | * from legacy devices: floppy, parallel, serial, rtc. |
@@ -549,9 +512,8 @@ static struct irq_chip piix4_virtual_irq_type = { | |||
549 | */ | 512 | */ |
550 | static irqreturn_t piix4_master_intr(int irq, void *dev_id) | 513 | static irqreturn_t piix4_master_intr(int irq, void *dev_id) |
551 | { | 514 | { |
552 | int realirq; | ||
553 | struct irq_desc *desc; | ||
554 | unsigned long flags; | 515 | unsigned long flags; |
516 | int realirq; | ||
555 | 517 | ||
556 | raw_spin_lock_irqsave(&i8259A_lock, flags); | 518 | raw_spin_lock_irqsave(&i8259A_lock, flags); |
557 | 519 | ||
@@ -592,18 +554,10 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id) | |||
592 | 554 | ||
593 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | 555 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
594 | 556 | ||
595 | desc = irq_to_desc(realirq); | ||
596 | |||
597 | /* | 557 | /* |
598 | * handle this 'virtual interrupt' as a Cobalt one now. | 558 | * handle this 'virtual interrupt' as a Cobalt one now. |
599 | */ | 559 | */ |
600 | kstat_incr_irqs_this_cpu(realirq, desc); | 560 | generic_handle_irq(realirq); |
601 | |||
602 | if (likely(desc->action != NULL)) | ||
603 | handle_IRQ_event(realirq, desc->action); | ||
604 | |||
605 | if (!(desc->status & IRQ_DISABLED)) | ||
606 | legacy_pic->chip->unmask(realirq); | ||
607 | 561 | ||
608 | return IRQ_HANDLED; | 562 | return IRQ_HANDLED; |
609 | 563 | ||
@@ -624,41 +578,35 @@ static struct irqaction cascade_action = { | |||
624 | 578 | ||
625 | static inline void set_piix4_virtual_irq_type(void) | 579 | static inline void set_piix4_virtual_irq_type(void) |
626 | { | 580 | { |
627 | piix4_virtual_irq_type.shutdown = i8259A_chip.mask; | ||
628 | piix4_virtual_irq_type.enable = i8259A_chip.unmask; | 581 | piix4_virtual_irq_type.enable = i8259A_chip.unmask; |
629 | piix4_virtual_irq_type.disable = i8259A_chip.mask; | 582 | piix4_virtual_irq_type.disable = i8259A_chip.mask; |
583 | piix4_virtual_irq_type.unmask = i8259A_chip.unmask; | ||
630 | } | 584 | } |
631 | 585 | ||
632 | void init_VISWS_APIC_irqs(void) | 586 | static void __init visws_pre_intr_init(void) |
633 | { | 587 | { |
634 | int i; | 588 | int i; |
635 | 589 | ||
636 | for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) { | 590 | set_piix4_virtual_irq_type(); |
637 | struct irq_desc *desc = irq_to_desc(i); | ||
638 | |||
639 | desc->status = IRQ_DISABLED; | ||
640 | desc->action = 0; | ||
641 | desc->depth = 1; | ||
642 | 591 | ||
643 | if (i == 0) { | 592 | for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) { |
644 | desc->chip = &cobalt_irq_type; | 593 | struct irq_chip *chip = NULL; |
645 | } | 594 | |
646 | else if (i == CO_IRQ_IDE0) { | 595 | if (i == 0) |
647 | desc->chip = &cobalt_irq_type; | 596 | chip = &cobalt_irq_type; |
648 | } | 597 | else if (i == CO_IRQ_IDE0) |
649 | else if (i == CO_IRQ_IDE1) { | 598 | chip = &cobalt_irq_type; |
650 | desc->chip = &cobalt_irq_type; | 599 | else if (i == CO_IRQ_IDE1) |
651 | } | 600 | >chip = &cobalt_irq_type; |
652 | else if (i == CO_IRQ_8259) { | 601 | else if (i == CO_IRQ_8259) |
653 | desc->chip = &piix4_master_irq_type; | 602 | chip = &piix4_master_irq_type; |
654 | } | 603 | else if (i < CO_IRQ_APIC0) |
655 | else if (i < CO_IRQ_APIC0) { | 604 | chip = &piix4_virtual_irq_type; |
656 | set_piix4_virtual_irq_type(); | 605 | else if (IS_CO_APIC(i)) |
657 | desc->chip = &piix4_virtual_irq_type; | 606 | chip = &cobalt_irq_type; |
658 | } | 607 | |
659 | else if (IS_CO_APIC(i)) { | 608 | if (chip) |
660 | desc->chip = &cobalt_irq_type; | 609 | set_irq_chip(i, chip); |
661 | } | ||
662 | } | 610 | } |
663 | 611 | ||
664 | setup_irq(CO_IRQ_8259, &master_action); | 612 | setup_irq(CO_IRQ_8259, &master_action); |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c deleted file mode 100644 index ce9fbacb7526..000000000000 --- a/arch/x86/kernel/vmi_32.c +++ /dev/null | |||
@@ -1,893 +0,0 @@ | |||
1 | /* | ||
2 | * VMI specific paravirt-ops implementation | ||
3 | * | ||
4 | * Copyright (C) 2005, VMware, Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
14 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
15 | * details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
20 | * | ||
21 | * Send feedback to zach@vmware.com | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #include <linux/module.h> | ||
26 | #include <linux/cpu.h> | ||
27 | #include <linux/bootmem.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/highmem.h> | ||
30 | #include <linux/sched.h> | ||
31 | #include <linux/gfp.h> | ||
32 | #include <asm/vmi.h> | ||
33 | #include <asm/io.h> | ||
34 | #include <asm/fixmap.h> | ||
35 | #include <asm/apicdef.h> | ||
36 | #include <asm/apic.h> | ||
37 | #include <asm/pgalloc.h> | ||
38 | #include <asm/processor.h> | ||
39 | #include <asm/timer.h> | ||
40 | #include <asm/vmi_time.h> | ||
41 | #include <asm/kmap_types.h> | ||
42 | #include <asm/setup.h> | ||
43 | |||
44 | /* Convenient for calling VMI functions indirectly in the ROM */ | ||
45 | typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void); | ||
46 | typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int); | ||
47 | |||
48 | #define call_vrom_func(rom,func) \ | ||
49 | (((VROMFUNC *)(rom->func))()) | ||
50 | |||
51 | #define call_vrom_long_func(rom,func,arg) \ | ||
52 | (((VROMLONGFUNC *)(rom->func)) (arg)) | ||
53 | |||
54 | static struct vrom_header *vmi_rom; | ||
55 | static int disable_pge; | ||
56 | static int disable_pse; | ||
57 | static int disable_sep; | ||
58 | static int disable_tsc; | ||
59 | static int disable_mtrr; | ||
60 | static int disable_noidle; | ||
61 | static int disable_vmi_timer; | ||
62 | |||
63 | /* Cached VMI operations */ | ||
64 | static struct { | ||
65 | void (*cpuid)(void /* non-c */); | ||
66 | void (*_set_ldt)(u32 selector); | ||
67 | void (*set_tr)(u32 selector); | ||
68 | void (*write_idt_entry)(struct desc_struct *, int, u32, u32); | ||
69 | void (*write_gdt_entry)(struct desc_struct *, int, u32, u32); | ||
70 | void (*write_ldt_entry)(struct desc_struct *, int, u32, u32); | ||
71 | void (*set_kernel_stack)(u32 selector, u32 sp0); | ||
72 | void (*allocate_page)(u32, u32, u32, u32, u32); | ||
73 | void (*release_page)(u32, u32); | ||
74 | void (*set_pte)(pte_t, pte_t *, unsigned); | ||
75 | void (*update_pte)(pte_t *, unsigned); | ||
76 | void (*set_linear_mapping)(int, void *, u32, u32); | ||
77 | void (*_flush_tlb)(int); | ||
78 | void (*set_initial_ap_state)(int, int); | ||
79 | void (*halt)(void); | ||
80 | void (*set_lazy_mode)(int mode); | ||
81 | } vmi_ops; | ||
82 | |||
83 | /* Cached VMI operations */ | ||
84 | struct vmi_timer_ops vmi_timer_ops; | ||
85 | |||
86 | /* | ||
87 | * VMI patching routines. | ||
88 | */ | ||
89 | #define MNEM_CALL 0xe8 | ||
90 | #define MNEM_JMP 0xe9 | ||
91 | #define MNEM_RET 0xc3 | ||
92 | |||
93 | #define IRQ_PATCH_INT_MASK 0 | ||
94 | #define IRQ_PATCH_DISABLE 5 | ||
95 | |||
96 | static inline void patch_offset(void *insnbuf, | ||
97 | unsigned long ip, unsigned long dest) | ||
98 | { | ||
99 | *(unsigned long *)(insnbuf+1) = dest-ip-5; | ||
100 | } | ||
101 | |||
102 | static unsigned patch_internal(int call, unsigned len, void *insnbuf, | ||
103 | unsigned long ip) | ||
104 | { | ||
105 | u64 reloc; | ||
106 | struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc; | ||
107 | reloc = call_vrom_long_func(vmi_rom, get_reloc, call); | ||
108 | switch(rel->type) { | ||
109 | case VMI_RELOCATION_CALL_REL: | ||
110 | BUG_ON(len < 5); | ||
111 | *(char *)insnbuf = MNEM_CALL; | ||
112 | patch_offset(insnbuf, ip, (unsigned long)rel->eip); | ||
113 | return 5; | ||
114 | |||
115 | case VMI_RELOCATION_JUMP_REL: | ||
116 | BUG_ON(len < 5); | ||
117 | *(char *)insnbuf = MNEM_JMP; | ||
118 | patch_offset(insnbuf, ip, (unsigned long)rel->eip); | ||
119 | return 5; | ||
120 | |||
121 | case VMI_RELOCATION_NOP: | ||
122 | /* obliterate the whole thing */ | ||
123 | return 0; | ||
124 | |||
125 | case VMI_RELOCATION_NONE: | ||
126 | /* leave native code in place */ | ||
127 | break; | ||
128 | |||
129 | default: | ||
130 | BUG(); | ||
131 | } | ||
132 | return len; | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * Apply patch if appropriate, return length of new instruction | ||
137 | * sequence. The callee does nop padding for us. | ||
138 | */ | ||
139 | static unsigned vmi_patch(u8 type, u16 clobbers, void *insns, | ||
140 | unsigned long ip, unsigned len) | ||
141 | { | ||
142 | switch (type) { | ||
143 | case PARAVIRT_PATCH(pv_irq_ops.irq_disable): | ||
144 | return patch_internal(VMI_CALL_DisableInterrupts, len, | ||
145 | insns, ip); | ||
146 | case PARAVIRT_PATCH(pv_irq_ops.irq_enable): | ||
147 | return patch_internal(VMI_CALL_EnableInterrupts, len, | ||
148 | insns, ip); | ||
149 | case PARAVIRT_PATCH(pv_irq_ops.restore_fl): | ||
150 | return patch_internal(VMI_CALL_SetInterruptMask, len, | ||
151 | insns, ip); | ||
152 | case PARAVIRT_PATCH(pv_irq_ops.save_fl): | ||
153 | return patch_internal(VMI_CALL_GetInterruptMask, len, | ||
154 | insns, ip); | ||
155 | case PARAVIRT_PATCH(pv_cpu_ops.iret): | ||
156 | return patch_internal(VMI_CALL_IRET, len, insns, ip); | ||
157 | case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit): | ||
158 | return patch_internal(VMI_CALL_SYSEXIT, len, insns, ip); | ||
159 | default: | ||
160 | break; | ||
161 | } | ||
162 | return len; | ||
163 | } | ||
164 | |||
165 | /* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */ | ||
166 | static void vmi_cpuid(unsigned int *ax, unsigned int *bx, | ||
167 | unsigned int *cx, unsigned int *dx) | ||
168 | { | ||
169 | int override = 0; | ||
170 | if (*ax == 1) | ||
171 | override = 1; | ||
172 | asm volatile ("call *%6" | ||
173 | : "=a" (*ax), | ||
174 | "=b" (*bx), | ||
175 | "=c" (*cx), | ||
176 | "=d" (*dx) | ||
177 | : "0" (*ax), "2" (*cx), "r" (vmi_ops.cpuid)); | ||
178 | if (override) { | ||
179 | if (disable_pse) | ||
180 | *dx &= ~X86_FEATURE_PSE; | ||
181 | if (disable_pge) | ||
182 | *dx &= ~X86_FEATURE_PGE; | ||
183 | if (disable_sep) | ||
184 | *dx &= ~X86_FEATURE_SEP; | ||
185 | if (disable_tsc) | ||
186 | *dx &= ~X86_FEATURE_TSC; | ||
187 | if (disable_mtrr) | ||
188 | *dx &= ~X86_FEATURE_MTRR; | ||
189 | } | ||
190 | } | ||
191 | |||
192 | static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new) | ||
193 | { | ||
194 | if (gdt[nr].a != new->a || gdt[nr].b != new->b) | ||
195 | write_gdt_entry(gdt, nr, new, 0); | ||
196 | } | ||
197 | |||
198 | static void vmi_load_tls(struct thread_struct *t, unsigned int cpu) | ||
199 | { | ||
200 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); | ||
201 | vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0]); | ||
202 | vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1]); | ||
203 | vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2]); | ||
204 | } | ||
205 | |||
206 | static void vmi_set_ldt(const void *addr, unsigned entries) | ||
207 | { | ||
208 | unsigned cpu = smp_processor_id(); | ||
209 | struct desc_struct desc; | ||
210 | |||
211 | pack_descriptor(&desc, (unsigned long)addr, | ||
212 | entries * sizeof(struct desc_struct) - 1, | ||
213 | DESC_LDT, 0); | ||
214 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, &desc, DESC_LDT); | ||
215 | vmi_ops._set_ldt(entries ? GDT_ENTRY_LDT*sizeof(struct desc_struct) : 0); | ||
216 | } | ||
217 | |||
218 | static void vmi_set_tr(void) | ||
219 | { | ||
220 | vmi_ops.set_tr(GDT_ENTRY_TSS*sizeof(struct desc_struct)); | ||
221 | } | ||
222 | |||
223 | static void vmi_write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) | ||
224 | { | ||
225 | u32 *idt_entry = (u32 *)g; | ||
226 | vmi_ops.write_idt_entry(dt, entry, idt_entry[0], idt_entry[1]); | ||
227 | } | ||
228 | |||
229 | static void vmi_write_gdt_entry(struct desc_struct *dt, int entry, | ||
230 | const void *desc, int type) | ||
231 | { | ||
232 | u32 *gdt_entry = (u32 *)desc; | ||
233 | vmi_ops.write_gdt_entry(dt, entry, gdt_entry[0], gdt_entry[1]); | ||
234 | } | ||
235 | |||
236 | static void vmi_write_ldt_entry(struct desc_struct *dt, int entry, | ||
237 | const void *desc) | ||
238 | { | ||
239 | u32 *ldt_entry = (u32 *)desc; | ||
240 | vmi_ops.write_ldt_entry(dt, entry, ldt_entry[0], ldt_entry[1]); | ||
241 | } | ||
242 | |||
243 | static void vmi_load_sp0(struct tss_struct *tss, | ||
244 | struct thread_struct *thread) | ||
245 | { | ||
246 | tss->x86_tss.sp0 = thread->sp0; | ||
247 | |||
248 | /* This can only happen when SEP is enabled, no need to test "SEP"arately */ | ||
249 | if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { | ||
250 | tss->x86_tss.ss1 = thread->sysenter_cs; | ||
251 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | ||
252 | } | ||
253 | vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.sp0); | ||
254 | } | ||
255 | |||
256 | static void vmi_flush_tlb_user(void) | ||
257 | { | ||
258 | vmi_ops._flush_tlb(VMI_FLUSH_TLB); | ||
259 | } | ||
260 | |||
261 | static void vmi_flush_tlb_kernel(void) | ||
262 | { | ||
263 | vmi_ops._flush_tlb(VMI_FLUSH_TLB | VMI_FLUSH_GLOBAL); | ||
264 | } | ||
265 | |||
266 | /* Stub to do nothing at all; used for delays and unimplemented calls */ | ||
267 | static void vmi_nop(void) | ||
268 | { | ||
269 | } | ||
270 | |||
271 | static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn) | ||
272 | { | ||
273 | vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); | ||
274 | } | ||
275 | |||
276 | static void vmi_allocate_pmd(struct mm_struct *mm, unsigned long pfn) | ||
277 | { | ||
278 | /* | ||
279 | * This call comes in very early, before mem_map is setup. | ||
280 | * It is called only for swapper_pg_dir, which already has | ||
281 | * data on it. | ||
282 | */ | ||
283 | vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0); | ||
284 | } | ||
285 | |||
286 | static void vmi_allocate_pmd_clone(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count) | ||
287 | { | ||
288 | vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count); | ||
289 | } | ||
290 | |||
291 | static void vmi_release_pte(unsigned long pfn) | ||
292 | { | ||
293 | vmi_ops.release_page(pfn, VMI_PAGE_L1); | ||
294 | } | ||
295 | |||
296 | static void vmi_release_pmd(unsigned long pfn) | ||
297 | { | ||
298 | vmi_ops.release_page(pfn, VMI_PAGE_L2); | ||
299 | } | ||
300 | |||
301 | /* | ||
302 | * We use the pgd_free hook for releasing the pgd page: | ||
303 | */ | ||
304 | static void vmi_pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
305 | { | ||
306 | unsigned long pfn = __pa(pgd) >> PAGE_SHIFT; | ||
307 | |||
308 | vmi_ops.release_page(pfn, VMI_PAGE_L2); | ||
309 | } | ||
310 | |||
311 | /* | ||
312 | * Helper macros for MMU update flags. We can defer updates until a flush | ||
313 | * or page invalidation only if the update is to the current address space | ||
314 | * (otherwise, there is no flush). We must check against init_mm, since | ||
315 | * this could be a kernel update, which usually passes init_mm, although | ||
316 | * sometimes this check can be skipped if we know the particular function | ||
317 | * is only called on user mode PTEs. We could change the kernel to pass | ||
318 | * current->active_mm here, but in particular, I was unsure if changing | ||
319 | * mm/highmem.c to do this would still be correct on other architectures. | ||
320 | */ | ||
321 | #define is_current_as(mm, mustbeuser) ((mm) == current->active_mm || \ | ||
322 | (!mustbeuser && (mm) == &init_mm)) | ||
323 | #define vmi_flags_addr(mm, addr, level, user) \ | ||
324 | ((level) | (is_current_as(mm, user) ? \ | ||
325 | (VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0)) | ||
326 | #define vmi_flags_addr_defer(mm, addr, level, user) \ | ||
327 | ((level) | (is_current_as(mm, user) ? \ | ||
328 | (VMI_PAGE_DEFER | VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0)) | ||
329 | |||
330 | static void vmi_update_pte(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
331 | { | ||
332 | vmi_ops.update_pte(ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); | ||
333 | } | ||
334 | |||
335 | static void vmi_update_pte_defer(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
336 | { | ||
337 | vmi_ops.update_pte(ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 0)); | ||
338 | } | ||
339 | |||
340 | static void vmi_set_pte(pte_t *ptep, pte_t pte) | ||
341 | { | ||
342 | /* XXX because of set_pmd_pte, this can be called on PT or PD layers */ | ||
343 | vmi_ops.set_pte(pte, ptep, VMI_PAGE_PT); | ||
344 | } | ||
345 | |||
346 | static void vmi_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) | ||
347 | { | ||
348 | vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); | ||
349 | } | ||
350 | |||
351 | static void vmi_set_pmd(pmd_t *pmdp, pmd_t pmdval) | ||
352 | { | ||
353 | #ifdef CONFIG_X86_PAE | ||
354 | const pte_t pte = { .pte = pmdval.pmd }; | ||
355 | #else | ||
356 | const pte_t pte = { pmdval.pud.pgd.pgd }; | ||
357 | #endif | ||
358 | vmi_ops.set_pte(pte, (pte_t *)pmdp, VMI_PAGE_PD); | ||
359 | } | ||
360 | |||
361 | #ifdef CONFIG_X86_PAE | ||
362 | |||
363 | static void vmi_set_pte_atomic(pte_t *ptep, pte_t pteval) | ||
364 | { | ||
365 | /* | ||
366 | * XXX This is called from set_pmd_pte, but at both PT | ||
367 | * and PD layers so the VMI_PAGE_PT flag is wrong. But | ||
368 | * it is only called for large page mapping changes, | ||
369 | * the Xen backend, doesn't support large pages, and the | ||
370 | * ESX backend doesn't depend on the flag. | ||
371 | */ | ||
372 | set_64bit((unsigned long long *)ptep,pte_val(pteval)); | ||
373 | vmi_ops.update_pte(ptep, VMI_PAGE_PT); | ||
374 | } | ||
375 | |||
376 | static void vmi_set_pud(pud_t *pudp, pud_t pudval) | ||
377 | { | ||
378 | /* Um, eww */ | ||
379 | const pte_t pte = { .pte = pudval.pgd.pgd }; | ||
380 | vmi_ops.set_pte(pte, (pte_t *)pudp, VMI_PAGE_PDP); | ||
381 | } | ||
382 | |||
383 | static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
384 | { | ||
385 | const pte_t pte = { .pte = 0 }; | ||
386 | vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); | ||
387 | } | ||
388 | |||
389 | static void vmi_pmd_clear(pmd_t *pmd) | ||
390 | { | ||
391 | const pte_t pte = { .pte = 0 }; | ||
392 | vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD); | ||
393 | } | ||
394 | #endif | ||
395 | |||
396 | #ifdef CONFIG_SMP | ||
397 | static void __devinit | ||
398 | vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, | ||
399 | unsigned long start_esp) | ||
400 | { | ||
401 | struct vmi_ap_state ap; | ||
402 | |||
403 | /* Default everything to zero. This is fine for most GPRs. */ | ||
404 | memset(&ap, 0, sizeof(struct vmi_ap_state)); | ||
405 | |||
406 | ap.gdtr_limit = GDT_SIZE - 1; | ||
407 | ap.gdtr_base = (unsigned long) get_cpu_gdt_table(phys_apicid); | ||
408 | |||
409 | ap.idtr_limit = IDT_ENTRIES * 8 - 1; | ||
410 | ap.idtr_base = (unsigned long) idt_table; | ||
411 | |||
412 | ap.ldtr = 0; | ||
413 | |||
414 | ap.cs = __KERNEL_CS; | ||
415 | ap.eip = (unsigned long) start_eip; | ||
416 | ap.ss = __KERNEL_DS; | ||
417 | ap.esp = (unsigned long) start_esp; | ||
418 | |||
419 | ap.ds = __USER_DS; | ||
420 | ap.es = __USER_DS; | ||
421 | ap.fs = __KERNEL_PERCPU; | ||
422 | ap.gs = __KERNEL_STACK_CANARY; | ||
423 | |||
424 | ap.eflags = 0; | ||
425 | |||
426 | #ifdef CONFIG_X86_PAE | ||
427 | /* efer should match BSP efer. */ | ||
428 | if (cpu_has_nx) { | ||
429 | unsigned l, h; | ||
430 | rdmsr(MSR_EFER, l, h); | ||
431 | ap.efer = (unsigned long long) h << 32 | l; | ||
432 | } | ||
433 | #endif | ||
434 | |||
435 | ap.cr3 = __pa(swapper_pg_dir); | ||
436 | /* Protected mode, paging, AM, WP, NE, MP. */ | ||
437 | ap.cr0 = 0x80050023; | ||
438 | ap.cr4 = mmu_cr4_features; | ||
439 | vmi_ops.set_initial_ap_state((u32)&ap, phys_apicid); | ||
440 | } | ||
441 | #endif | ||
442 | |||
443 | static void vmi_start_context_switch(struct task_struct *prev) | ||
444 | { | ||
445 | paravirt_start_context_switch(prev); | ||
446 | vmi_ops.set_lazy_mode(2); | ||
447 | } | ||
448 | |||
449 | static void vmi_end_context_switch(struct task_struct *next) | ||
450 | { | ||
451 | vmi_ops.set_lazy_mode(0); | ||
452 | paravirt_end_context_switch(next); | ||
453 | } | ||
454 | |||
455 | static void vmi_enter_lazy_mmu(void) | ||
456 | { | ||
457 | paravirt_enter_lazy_mmu(); | ||
458 | vmi_ops.set_lazy_mode(1); | ||
459 | } | ||
460 | |||
461 | static void vmi_leave_lazy_mmu(void) | ||
462 | { | ||
463 | vmi_ops.set_lazy_mode(0); | ||
464 | paravirt_leave_lazy_mmu(); | ||
465 | } | ||
466 | |||
467 | static inline int __init check_vmi_rom(struct vrom_header *rom) | ||
468 | { | ||
469 | struct pci_header *pci; | ||
470 | struct pnp_header *pnp; | ||
471 | const char *manufacturer = "UNKNOWN"; | ||
472 | const char *product = "UNKNOWN"; | ||
473 | const char *license = "unspecified"; | ||
474 | |||
475 | if (rom->rom_signature != 0xaa55) | ||
476 | return 0; | ||
477 | if (rom->vrom_signature != VMI_SIGNATURE) | ||
478 | return 0; | ||
479 | if (rom->api_version_maj != VMI_API_REV_MAJOR || | ||
480 | rom->api_version_min+1 < VMI_API_REV_MINOR+1) { | ||
481 | printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n", | ||
482 | rom->api_version_maj, | ||
483 | rom->api_version_min); | ||
484 | return 0; | ||
485 | } | ||
486 | |||
487 | /* | ||
488 | * Relying on the VMI_SIGNATURE field is not 100% safe, so check | ||
489 | * the PCI header and device type to make sure this is really a | ||
490 | * VMI device. | ||
491 | */ | ||
492 | if (!rom->pci_header_offs) { | ||
493 | printk(KERN_WARNING "VMI: ROM does not contain PCI header.\n"); | ||
494 | return 0; | ||
495 | } | ||
496 | |||
497 | pci = (struct pci_header *)((char *)rom+rom->pci_header_offs); | ||
498 | if (pci->vendorID != PCI_VENDOR_ID_VMWARE || | ||
499 | pci->deviceID != PCI_DEVICE_ID_VMWARE_VMI) { | ||
500 | /* Allow it to run... anyways, but warn */ | ||
501 | printk(KERN_WARNING "VMI: ROM from unknown manufacturer\n"); | ||
502 | } | ||
503 | |||
504 | if (rom->pnp_header_offs) { | ||
505 | pnp = (struct pnp_header *)((char *)rom+rom->pnp_header_offs); | ||
506 | if (pnp->manufacturer_offset) | ||
507 | manufacturer = (const char *)rom+pnp->manufacturer_offset; | ||
508 | if (pnp->product_offset) | ||
509 | product = (const char *)rom+pnp->product_offset; | ||
510 | } | ||
511 | |||
512 | if (rom->license_offs) | ||
513 | license = (char *)rom+rom->license_offs; | ||
514 | |||
515 | printk(KERN_INFO "VMI: Found %s %s, API version %d.%d, ROM version %d.%d\n", | ||
516 | manufacturer, product, | ||
517 | rom->api_version_maj, rom->api_version_min, | ||
518 | pci->rom_version_maj, pci->rom_version_min); | ||
519 | |||
520 | /* Don't allow BSD/MIT here for now because we don't want to end up | ||
521 | with any binary only shim layers */ | ||
522 | if (strcmp(license, "GPL") && strcmp(license, "GPL v2")) { | ||
523 | printk(KERN_WARNING "VMI: Non GPL license `%s' found for ROM. Not used.\n", | ||
524 | license); | ||
525 | return 0; | ||
526 | } | ||
527 | |||
528 | return 1; | ||
529 | } | ||
530 | |||
531 | /* | ||
532 | * Probe for the VMI option ROM | ||
533 | */ | ||
534 | static inline int __init probe_vmi_rom(void) | ||
535 | { | ||
536 | unsigned long base; | ||
537 | |||
538 | /* VMI ROM is in option ROM area, check signature */ | ||
539 | for (base = 0xC0000; base < 0xE0000; base += 2048) { | ||
540 | struct vrom_header *romstart; | ||
541 | romstart = (struct vrom_header *)isa_bus_to_virt(base); | ||
542 | if (check_vmi_rom(romstart)) { | ||
543 | vmi_rom = romstart; | ||
544 | return 1; | ||
545 | } | ||
546 | } | ||
547 | return 0; | ||
548 | } | ||
549 | |||
550 | /* | ||
551 | * VMI setup common to all processors | ||
552 | */ | ||
553 | void vmi_bringup(void) | ||
554 | { | ||
555 | /* We must establish the lowmem mapping for MMU ops to work */ | ||
556 | if (vmi_ops.set_linear_mapping) | ||
557 | vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, MAXMEM_PFN, 0); | ||
558 | } | ||
559 | |||
560 | /* | ||
561 | * Return a pointer to a VMI function or NULL if unimplemented | ||
562 | */ | ||
563 | static void *vmi_get_function(int vmicall) | ||
564 | { | ||
565 | u64 reloc; | ||
566 | const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc; | ||
567 | reloc = call_vrom_long_func(vmi_rom, get_reloc, vmicall); | ||
568 | BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); | ||
569 | if (rel->type == VMI_RELOCATION_CALL_REL) | ||
570 | return (void *)rel->eip; | ||
571 | else | ||
572 | return NULL; | ||
573 | } | ||
574 | |||
575 | /* | ||
576 | * Helper macro for making the VMI paravirt-ops fill code readable. | ||
577 | * For unimplemented operations, fall back to default, unless nop | ||
578 | * is returned by the ROM. | ||
579 | */ | ||
580 | #define para_fill(opname, vmicall) \ | ||
581 | do { \ | ||
582 | reloc = call_vrom_long_func(vmi_rom, get_reloc, \ | ||
583 | VMI_CALL_##vmicall); \ | ||
584 | if (rel->type == VMI_RELOCATION_CALL_REL) \ | ||
585 | opname = (void *)rel->eip; \ | ||
586 | else if (rel->type == VMI_RELOCATION_NOP) \ | ||
587 | opname = (void *)vmi_nop; \ | ||
588 | else if (rel->type != VMI_RELOCATION_NONE) \ | ||
589 | printk(KERN_WARNING "VMI: Unknown relocation " \ | ||
590 | "type %d for " #vmicall"\n",\ | ||
591 | rel->type); \ | ||
592 | } while (0) | ||
593 | |||
594 | /* | ||
595 | * Helper macro for making the VMI paravirt-ops fill code readable. | ||
596 | * For cached operations which do not match the VMI ROM ABI and must | ||
597 | * go through a tranlation stub. Ignore NOPs, since it is not clear | ||
598 | * a NOP * VMI function corresponds to a NOP paravirt-op when the | ||
599 | * functions are not in 1-1 correspondence. | ||
600 | */ | ||
601 | #define para_wrap(opname, wrapper, cache, vmicall) \ | ||
602 | do { \ | ||
603 | reloc = call_vrom_long_func(vmi_rom, get_reloc, \ | ||
604 | VMI_CALL_##vmicall); \ | ||
605 | BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \ | ||
606 | if (rel->type == VMI_RELOCATION_CALL_REL) { \ | ||
607 | opname = wrapper; \ | ||
608 | vmi_ops.cache = (void *)rel->eip; \ | ||
609 | } \ | ||
610 | } while (0) | ||
611 | |||
612 | /* | ||
613 | * Activate the VMI interface and switch into paravirtualized mode | ||
614 | */ | ||
615 | static inline int __init activate_vmi(void) | ||
616 | { | ||
617 | short kernel_cs; | ||
618 | u64 reloc; | ||
619 | const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc; | ||
620 | |||
621 | /* | ||
622 | * Prevent page tables from being allocated in highmem, even if | ||
623 | * CONFIG_HIGHPTE is enabled. | ||
624 | */ | ||
625 | __userpte_alloc_gfp &= ~__GFP_HIGHMEM; | ||
626 | |||
627 | if (call_vrom_func(vmi_rom, vmi_init) != 0) { | ||
628 | printk(KERN_ERR "VMI ROM failed to initialize!"); | ||
629 | return 0; | ||
630 | } | ||
631 | savesegment(cs, kernel_cs); | ||
632 | |||
633 | pv_info.paravirt_enabled = 1; | ||
634 | pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK; | ||
635 | pv_info.name = "vmi [deprecated]"; | ||
636 | |||
637 | pv_init_ops.patch = vmi_patch; | ||
638 | |||
639 | /* | ||
640 | * Many of these operations are ABI compatible with VMI. | ||
641 | * This means we can fill in the paravirt-ops with direct | ||
642 | * pointers into the VMI ROM. If the calling convention for | ||
643 | * these operations changes, this code needs to be updated. | ||
644 | * | ||
645 | * Exceptions | ||
646 | * CPUID paravirt-op uses pointers, not the native ISA | ||
647 | * halt has no VMI equivalent; all VMI halts are "safe" | ||
648 | * no MSR support yet - just trap and emulate. VMI uses the | ||
649 | * same ABI as the native ISA, but Linux wants exceptions | ||
650 | * from bogus MSR read / write handled | ||
651 | * rdpmc is not yet used in Linux | ||
652 | */ | ||
653 | |||
654 | /* CPUID is special, so very special it gets wrapped like a present */ | ||
655 | para_wrap(pv_cpu_ops.cpuid, vmi_cpuid, cpuid, CPUID); | ||
656 | |||
657 | para_fill(pv_cpu_ops.clts, CLTS); | ||
658 | para_fill(pv_cpu_ops.get_debugreg, GetDR); | ||
659 | para_fill(pv_cpu_ops.set_debugreg, SetDR); | ||
660 | para_fill(pv_cpu_ops.read_cr0, GetCR0); | ||
661 | para_fill(pv_mmu_ops.read_cr2, GetCR2); | ||
662 | para_fill(pv_mmu_ops.read_cr3, GetCR3); | ||
663 | para_fill(pv_cpu_ops.read_cr4, GetCR4); | ||
664 | para_fill(pv_cpu_ops.write_cr0, SetCR0); | ||
665 | para_fill(pv_mmu_ops.write_cr2, SetCR2); | ||
666 | para_fill(pv_mmu_ops.write_cr3, SetCR3); | ||
667 | para_fill(pv_cpu_ops.write_cr4, SetCR4); | ||
668 | |||
669 | para_fill(pv_irq_ops.save_fl.func, GetInterruptMask); | ||
670 | para_fill(pv_irq_ops.restore_fl.func, SetInterruptMask); | ||
671 | para_fill(pv_irq_ops.irq_disable.func, DisableInterrupts); | ||
672 | para_fill(pv_irq_ops.irq_enable.func, EnableInterrupts); | ||
673 | |||
674 | para_fill(pv_cpu_ops.wbinvd, WBINVD); | ||
675 | para_fill(pv_cpu_ops.read_tsc, RDTSC); | ||
676 | |||
677 | /* The following we emulate with trap and emulate for now */ | ||
678 | /* paravirt_ops.read_msr = vmi_rdmsr */ | ||
679 | /* paravirt_ops.write_msr = vmi_wrmsr */ | ||
680 | /* paravirt_ops.rdpmc = vmi_rdpmc */ | ||
681 | |||
682 | /* TR interface doesn't pass TR value, wrap */ | ||
683 | para_wrap(pv_cpu_ops.load_tr_desc, vmi_set_tr, set_tr, SetTR); | ||
684 | |||
685 | /* LDT is special, too */ | ||
686 | para_wrap(pv_cpu_ops.set_ldt, vmi_set_ldt, _set_ldt, SetLDT); | ||
687 | |||
688 | para_fill(pv_cpu_ops.load_gdt, SetGDT); | ||
689 | para_fill(pv_cpu_ops.load_idt, SetIDT); | ||
690 | para_fill(pv_cpu_ops.store_gdt, GetGDT); | ||
691 | para_fill(pv_cpu_ops.store_idt, GetIDT); | ||
692 | para_fill(pv_cpu_ops.store_tr, GetTR); | ||
693 | pv_cpu_ops.load_tls = vmi_load_tls; | ||
694 | para_wrap(pv_cpu_ops.write_ldt_entry, vmi_write_ldt_entry, | ||
695 | write_ldt_entry, WriteLDTEntry); | ||
696 | para_wrap(pv_cpu_ops.write_gdt_entry, vmi_write_gdt_entry, | ||
697 | write_gdt_entry, WriteGDTEntry); | ||
698 | para_wrap(pv_cpu_ops.write_idt_entry, vmi_write_idt_entry, | ||
699 | write_idt_entry, WriteIDTEntry); | ||
700 | para_wrap(pv_cpu_ops.load_sp0, vmi_load_sp0, set_kernel_stack, UpdateKernelStack); | ||
701 | para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask); | ||
702 | para_fill(pv_cpu_ops.io_delay, IODelay); | ||
703 | |||
704 | para_wrap(pv_cpu_ops.start_context_switch, vmi_start_context_switch, | ||
705 | set_lazy_mode, SetLazyMode); | ||
706 | para_wrap(pv_cpu_ops.end_context_switch, vmi_end_context_switch, | ||
707 | set_lazy_mode, SetLazyMode); | ||
708 | |||
709 | para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu, | ||
710 | set_lazy_mode, SetLazyMode); | ||
711 | para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy_mmu, | ||
712 | set_lazy_mode, SetLazyMode); | ||
713 | |||
714 | /* user and kernel flush are just handled with different flags to FlushTLB */ | ||
715 | para_wrap(pv_mmu_ops.flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB); | ||
716 | para_wrap(pv_mmu_ops.flush_tlb_kernel, vmi_flush_tlb_kernel, _flush_tlb, FlushTLB); | ||
717 | para_fill(pv_mmu_ops.flush_tlb_single, InvalPage); | ||
718 | |||
719 | /* | ||
720 | * Until a standard flag format can be agreed on, we need to | ||
721 | * implement these as wrappers in Linux. Get the VMI ROM | ||
722 | * function pointers for the two backend calls. | ||
723 | */ | ||
724 | #ifdef CONFIG_X86_PAE | ||
725 | vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxELong); | ||
726 | vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxELong); | ||
727 | #else | ||
728 | vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE); | ||
729 | vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE); | ||
730 | #endif | ||
731 | |||
732 | if (vmi_ops.set_pte) { | ||
733 | pv_mmu_ops.set_pte = vmi_set_pte; | ||
734 | pv_mmu_ops.set_pte_at = vmi_set_pte_at; | ||
735 | pv_mmu_ops.set_pmd = vmi_set_pmd; | ||
736 | #ifdef CONFIG_X86_PAE | ||
737 | pv_mmu_ops.set_pte_atomic = vmi_set_pte_atomic; | ||
738 | pv_mmu_ops.set_pud = vmi_set_pud; | ||
739 | pv_mmu_ops.pte_clear = vmi_pte_clear; | ||
740 | pv_mmu_ops.pmd_clear = vmi_pmd_clear; | ||
741 | #endif | ||
742 | } | ||
743 | |||
744 | if (vmi_ops.update_pte) { | ||
745 | pv_mmu_ops.pte_update = vmi_update_pte; | ||
746 | pv_mmu_ops.pte_update_defer = vmi_update_pte_defer; | ||
747 | } | ||
748 | |||
749 | vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage); | ||
750 | if (vmi_ops.allocate_page) { | ||
751 | pv_mmu_ops.alloc_pte = vmi_allocate_pte; | ||
752 | pv_mmu_ops.alloc_pmd = vmi_allocate_pmd; | ||
753 | pv_mmu_ops.alloc_pmd_clone = vmi_allocate_pmd_clone; | ||
754 | } | ||
755 | |||
756 | vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage); | ||
757 | if (vmi_ops.release_page) { | ||
758 | pv_mmu_ops.release_pte = vmi_release_pte; | ||
759 | pv_mmu_ops.release_pmd = vmi_release_pmd; | ||
760 | pv_mmu_ops.pgd_free = vmi_pgd_free; | ||
761 | } | ||
762 | |||
763 | /* Set linear is needed in all cases */ | ||
764 | vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping); | ||
765 | |||
766 | /* | ||
767 | * These MUST always be patched. Don't support indirect jumps | ||
768 | * through these operations, as the VMI interface may use either | ||
769 | * a jump or a call to get to these operations, depending on | ||
770 | * the backend. They are performance critical anyway, so requiring | ||
771 | * a patch is not a big problem. | ||
772 | */ | ||
773 | pv_cpu_ops.irq_enable_sysexit = (void *)0xfeedbab0; | ||
774 | pv_cpu_ops.iret = (void *)0xbadbab0; | ||
775 | |||
776 | #ifdef CONFIG_SMP | ||
777 | para_wrap(pv_apic_ops.startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState); | ||
778 | #endif | ||
779 | |||
780 | #ifdef CONFIG_X86_LOCAL_APIC | ||
781 | para_fill(apic->read, APICRead); | ||
782 | para_fill(apic->write, APICWrite); | ||
783 | #endif | ||
784 | |||
785 | /* | ||
786 | * Check for VMI timer functionality by probing for a cycle frequency method | ||
787 | */ | ||
788 | reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency); | ||
789 | if (!disable_vmi_timer && rel->type != VMI_RELOCATION_NONE) { | ||
790 | vmi_timer_ops.get_cycle_frequency = (void *)rel->eip; | ||
791 | vmi_timer_ops.get_cycle_counter = | ||
792 | vmi_get_function(VMI_CALL_GetCycleCounter); | ||
793 | vmi_timer_ops.get_wallclock = | ||
794 | vmi_get_function(VMI_CALL_GetWallclockTime); | ||
795 | vmi_timer_ops.wallclock_updated = | ||
796 | vmi_get_function(VMI_CALL_WallclockUpdated); | ||
797 | vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm); | ||
798 | vmi_timer_ops.cancel_alarm = | ||
799 | vmi_get_function(VMI_CALL_CancelAlarm); | ||
800 | x86_init.timers.timer_init = vmi_time_init; | ||
801 | #ifdef CONFIG_X86_LOCAL_APIC | ||
802 | x86_init.timers.setup_percpu_clockev = vmi_time_bsp_init; | ||
803 | x86_cpuinit.setup_percpu_clockev = vmi_time_ap_init; | ||
804 | #endif | ||
805 | pv_time_ops.sched_clock = vmi_sched_clock; | ||
806 | x86_platform.calibrate_tsc = vmi_tsc_khz; | ||
807 | x86_platform.get_wallclock = vmi_get_wallclock; | ||
808 | x86_platform.set_wallclock = vmi_set_wallclock; | ||
809 | |||
810 | /* We have true wallclock functions; disable CMOS clock sync */ | ||
811 | no_sync_cmos_clock = 1; | ||
812 | } else { | ||
813 | disable_noidle = 1; | ||
814 | disable_vmi_timer = 1; | ||
815 | } | ||
816 | |||
817 | para_fill(pv_irq_ops.safe_halt, Halt); | ||
818 | |||
819 | /* | ||
820 | * Alternative instruction rewriting doesn't happen soon enough | ||
821 | * to convert VMI_IRET to a call instead of a jump; so we have | ||
822 | * to do this before IRQs get reenabled. Fortunately, it is | ||
823 | * idempotent. | ||
824 | */ | ||
825 | apply_paravirt(__parainstructions, __parainstructions_end); | ||
826 | |||
827 | vmi_bringup(); | ||
828 | |||
829 | return 1; | ||
830 | } | ||
831 | |||
832 | #undef para_fill | ||
833 | |||
834 | void __init vmi_init(void) | ||
835 | { | ||
836 | if (!vmi_rom) | ||
837 | probe_vmi_rom(); | ||
838 | else | ||
839 | check_vmi_rom(vmi_rom); | ||
840 | |||
841 | /* In case probing for or validating the ROM failed, basil */ | ||
842 | if (!vmi_rom) | ||
843 | return; | ||
844 | |||
845 | reserve_top_address(-vmi_rom->virtual_top); | ||
846 | |||
847 | #ifdef CONFIG_X86_IO_APIC | ||
848 | /* This is virtual hardware; timer routing is wired correctly */ | ||
849 | no_timer_check = 1; | ||
850 | #endif | ||
851 | } | ||
852 | |||
853 | void __init vmi_activate(void) | ||
854 | { | ||
855 | unsigned long flags; | ||
856 | |||
857 | if (!vmi_rom) | ||
858 | return; | ||
859 | |||
860 | local_irq_save(flags); | ||
861 | activate_vmi(); | ||
862 | local_irq_restore(flags & X86_EFLAGS_IF); | ||
863 | } | ||
864 | |||
865 | static int __init parse_vmi(char *arg) | ||
866 | { | ||
867 | if (!arg) | ||
868 | return -EINVAL; | ||
869 | |||
870 | if (!strcmp(arg, "disable_pge")) { | ||
871 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE); | ||
872 | disable_pge = 1; | ||
873 | } else if (!strcmp(arg, "disable_pse")) { | ||
874 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PSE); | ||
875 | disable_pse = 1; | ||
876 | } else if (!strcmp(arg, "disable_sep")) { | ||
877 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_SEP); | ||
878 | disable_sep = 1; | ||
879 | } else if (!strcmp(arg, "disable_tsc")) { | ||
880 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC); | ||
881 | disable_tsc = 1; | ||
882 | } else if (!strcmp(arg, "disable_mtrr")) { | ||
883 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_MTRR); | ||
884 | disable_mtrr = 1; | ||
885 | } else if (!strcmp(arg, "disable_timer")) { | ||
886 | disable_vmi_timer = 1; | ||
887 | disable_noidle = 1; | ||
888 | } else if (!strcmp(arg, "disable_noidle")) | ||
889 | disable_noidle = 1; | ||
890 | return 0; | ||
891 | } | ||
892 | |||
893 | early_param("vmi", parse_vmi); | ||
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c deleted file mode 100644 index 5e1ff66ecd73..000000000000 --- a/arch/x86/kernel/vmiclock_32.c +++ /dev/null | |||
@@ -1,317 +0,0 @@ | |||
1 | /* | ||
2 | * VMI paravirtual timer support routines. | ||
3 | * | ||
4 | * Copyright (C) 2007, VMware, Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
14 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
15 | * details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/smp.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/cpumask.h> | ||
26 | #include <linux/clocksource.h> | ||
27 | #include <linux/clockchips.h> | ||
28 | |||
29 | #include <asm/vmi.h> | ||
30 | #include <asm/vmi_time.h> | ||
31 | #include <asm/apicdef.h> | ||
32 | #include <asm/apic.h> | ||
33 | #include <asm/timer.h> | ||
34 | #include <asm/i8253.h> | ||
35 | #include <asm/irq_vectors.h> | ||
36 | |||
37 | #define VMI_ONESHOT (VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL | vmi_get_alarm_wiring()) | ||
38 | #define VMI_PERIODIC (VMI_ALARM_IS_PERIODIC | VMI_CYCLES_REAL | vmi_get_alarm_wiring()) | ||
39 | |||
40 | static DEFINE_PER_CPU(struct clock_event_device, local_events); | ||
41 | |||
42 | static inline u32 vmi_counter(u32 flags) | ||
43 | { | ||
44 | /* Given VMI_ONESHOT or VMI_PERIODIC, return the corresponding | ||
45 | * cycle counter. */ | ||
46 | return flags & VMI_ALARM_COUNTER_MASK; | ||
47 | } | ||
48 | |||
49 | /* paravirt_ops.get_wallclock = vmi_get_wallclock */ | ||
50 | unsigned long vmi_get_wallclock(void) | ||
51 | { | ||
52 | unsigned long long wallclock; | ||
53 | wallclock = vmi_timer_ops.get_wallclock(); // nsec | ||
54 | (void)do_div(wallclock, 1000000000); // sec | ||
55 | |||
56 | return wallclock; | ||
57 | } | ||
58 | |||
59 | /* paravirt_ops.set_wallclock = vmi_set_wallclock */ | ||
60 | int vmi_set_wallclock(unsigned long now) | ||
61 | { | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | /* paravirt_ops.sched_clock = vmi_sched_clock */ | ||
66 | unsigned long long vmi_sched_clock(void) | ||
67 | { | ||
68 | return cycles_2_ns(vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE)); | ||
69 | } | ||
70 | |||
71 | /* x86_platform.calibrate_tsc = vmi_tsc_khz */ | ||
72 | unsigned long vmi_tsc_khz(void) | ||
73 | { | ||
74 | unsigned long long khz; | ||
75 | khz = vmi_timer_ops.get_cycle_frequency(); | ||
76 | (void)do_div(khz, 1000); | ||
77 | return khz; | ||
78 | } | ||
79 | |||
80 | static inline unsigned int vmi_get_timer_vector(void) | ||
81 | { | ||
82 | return IRQ0_VECTOR; | ||
83 | } | ||
84 | |||
85 | /** vmi clockchip */ | ||
86 | #ifdef CONFIG_X86_LOCAL_APIC | ||
87 | static unsigned int startup_timer_irq(unsigned int irq) | ||
88 | { | ||
89 | unsigned long val = apic_read(APIC_LVTT); | ||
90 | apic_write(APIC_LVTT, vmi_get_timer_vector()); | ||
91 | |||
92 | return (val & APIC_SEND_PENDING); | ||
93 | } | ||
94 | |||
95 | static void mask_timer_irq(unsigned int irq) | ||
96 | { | ||
97 | unsigned long val = apic_read(APIC_LVTT); | ||
98 | apic_write(APIC_LVTT, val | APIC_LVT_MASKED); | ||
99 | } | ||
100 | |||
101 | static void unmask_timer_irq(unsigned int irq) | ||
102 | { | ||
103 | unsigned long val = apic_read(APIC_LVTT); | ||
104 | apic_write(APIC_LVTT, val & ~APIC_LVT_MASKED); | ||
105 | } | ||
106 | |||
107 | static void ack_timer_irq(unsigned int irq) | ||
108 | { | ||
109 | ack_APIC_irq(); | ||
110 | } | ||
111 | |||
112 | static struct irq_chip vmi_chip __read_mostly = { | ||
113 | .name = "VMI-LOCAL", | ||
114 | .startup = startup_timer_irq, | ||
115 | .mask = mask_timer_irq, | ||
116 | .unmask = unmask_timer_irq, | ||
117 | .ack = ack_timer_irq | ||
118 | }; | ||
119 | #endif | ||
120 | |||
121 | /** vmi clockevent */ | ||
122 | #define VMI_ALARM_WIRED_IRQ0 0x00000000 | ||
123 | #define VMI_ALARM_WIRED_LVTT 0x00010000 | ||
124 | static int vmi_wiring = VMI_ALARM_WIRED_IRQ0; | ||
125 | |||
126 | static inline int vmi_get_alarm_wiring(void) | ||
127 | { | ||
128 | return vmi_wiring; | ||
129 | } | ||
130 | |||
131 | static void vmi_timer_set_mode(enum clock_event_mode mode, | ||
132 | struct clock_event_device *evt) | ||
133 | { | ||
134 | cycle_t now, cycles_per_hz; | ||
135 | BUG_ON(!irqs_disabled()); | ||
136 | |||
137 | switch (mode) { | ||
138 | case CLOCK_EVT_MODE_ONESHOT: | ||
139 | case CLOCK_EVT_MODE_RESUME: | ||
140 | break; | ||
141 | case CLOCK_EVT_MODE_PERIODIC: | ||
142 | cycles_per_hz = vmi_timer_ops.get_cycle_frequency(); | ||
143 | (void)do_div(cycles_per_hz, HZ); | ||
144 | now = vmi_timer_ops.get_cycle_counter(vmi_counter(VMI_PERIODIC)); | ||
145 | vmi_timer_ops.set_alarm(VMI_PERIODIC, now, cycles_per_hz); | ||
146 | break; | ||
147 | case CLOCK_EVT_MODE_UNUSED: | ||
148 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
149 | switch (evt->mode) { | ||
150 | case CLOCK_EVT_MODE_ONESHOT: | ||
151 | vmi_timer_ops.cancel_alarm(VMI_ONESHOT); | ||
152 | break; | ||
153 | case CLOCK_EVT_MODE_PERIODIC: | ||
154 | vmi_timer_ops.cancel_alarm(VMI_PERIODIC); | ||
155 | break; | ||
156 | default: | ||
157 | break; | ||
158 | } | ||
159 | break; | ||
160 | default: | ||
161 | break; | ||
162 | } | ||
163 | } | ||
164 | |||
165 | static int vmi_timer_next_event(unsigned long delta, | ||
166 | struct clock_event_device *evt) | ||
167 | { | ||
168 | /* Unfortunately, set_next_event interface only passes relative | ||
169 | * expiry, but we want absolute expiry. It'd be better if were | ||
170 | * were passed an absolute expiry, since a bunch of time may | ||
171 | * have been stolen between the time the delta is computed and | ||
172 | * when we set the alarm below. */ | ||
173 | cycle_t now = vmi_timer_ops.get_cycle_counter(vmi_counter(VMI_ONESHOT)); | ||
174 | |||
175 | BUG_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT); | ||
176 | vmi_timer_ops.set_alarm(VMI_ONESHOT, now + delta, 0); | ||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | static struct clock_event_device vmi_clockevent = { | ||
181 | .name = "vmi-timer", | ||
182 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | ||
183 | .shift = 22, | ||
184 | .set_mode = vmi_timer_set_mode, | ||
185 | .set_next_event = vmi_timer_next_event, | ||
186 | .rating = 1000, | ||
187 | .irq = 0, | ||
188 | }; | ||
189 | |||
190 | static irqreturn_t vmi_timer_interrupt(int irq, void *dev_id) | ||
191 | { | ||
192 | struct clock_event_device *evt = &__get_cpu_var(local_events); | ||
193 | evt->event_handler(evt); | ||
194 | return IRQ_HANDLED; | ||
195 | } | ||
196 | |||
197 | static struct irqaction vmi_clock_action = { | ||
198 | .name = "vmi-timer", | ||
199 | .handler = vmi_timer_interrupt, | ||
200 | .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER, | ||
201 | }; | ||
202 | |||
203 | static void __devinit vmi_time_init_clockevent(void) | ||
204 | { | ||
205 | cycle_t cycles_per_msec; | ||
206 | struct clock_event_device *evt; | ||
207 | |||
208 | int cpu = smp_processor_id(); | ||
209 | evt = &__get_cpu_var(local_events); | ||
210 | |||
211 | /* Use cycles_per_msec since div_sc params are 32-bits. */ | ||
212 | cycles_per_msec = vmi_timer_ops.get_cycle_frequency(); | ||
213 | (void)do_div(cycles_per_msec, 1000); | ||
214 | |||
215 | memcpy(evt, &vmi_clockevent, sizeof(*evt)); | ||
216 | /* Must pick .shift such that .mult fits in 32-bits. Choosing | ||
217 | * .shift to be 22 allows 2^(32-22) cycles per nano-seconds | ||
218 | * before overflow. */ | ||
219 | evt->mult = div_sc(cycles_per_msec, NSEC_PER_MSEC, evt->shift); | ||
220 | /* Upper bound is clockevent's use of ulong for cycle deltas. */ | ||
221 | evt->max_delta_ns = clockevent_delta2ns(ULONG_MAX, evt); | ||
222 | evt->min_delta_ns = clockevent_delta2ns(1, evt); | ||
223 | evt->cpumask = cpumask_of(cpu); | ||
224 | |||
225 | printk(KERN_WARNING "vmi: registering clock event %s. mult=%u shift=%u\n", | ||
226 | evt->name, evt->mult, evt->shift); | ||
227 | clockevents_register_device(evt); | ||
228 | } | ||
229 | |||
230 | void __init vmi_time_init(void) | ||
231 | { | ||
232 | unsigned int cpu; | ||
233 | /* Disable PIT: BIOSes start PIT CH0 with 18.2hz peridic. */ | ||
234 | outb_pit(0x3a, PIT_MODE); /* binary, mode 5, LSB/MSB, ch 0 */ | ||
235 | |||
236 | vmi_time_init_clockevent(); | ||
237 | setup_irq(0, &vmi_clock_action); | ||
238 | for_each_possible_cpu(cpu) | ||
239 | per_cpu(vector_irq, cpu)[vmi_get_timer_vector()] = 0; | ||
240 | } | ||
241 | |||
242 | #ifdef CONFIG_X86_LOCAL_APIC | ||
243 | void __devinit vmi_time_bsp_init(void) | ||
244 | { | ||
245 | /* | ||
246 | * On APIC systems, we want local timers to fire on each cpu. We do | ||
247 | * this by programming LVTT to deliver timer events to the IRQ handler | ||
248 | * for IRQ-0, since we can't re-use the APIC local timer handler | ||
249 | * without interfering with that code. | ||
250 | */ | ||
251 | clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); | ||
252 | local_irq_disable(); | ||
253 | #ifdef CONFIG_SMP | ||
254 | /* | ||
255 | * XXX handle_percpu_irq only defined for SMP; we need to switch over | ||
256 | * to using it, since this is a local interrupt, which each CPU must | ||
257 | * handle individually without locking out or dropping simultaneous | ||
258 | * local timers on other CPUs. We also don't want to trigger the | ||
259 | * quirk workaround code for interrupts which gets invoked from | ||
260 | * handle_percpu_irq via eoi, so we use our own IRQ chip. | ||
261 | */ | ||
262 | set_irq_chip_and_handler_name(0, &vmi_chip, handle_percpu_irq, "lvtt"); | ||
263 | #else | ||
264 | set_irq_chip_and_handler_name(0, &vmi_chip, handle_edge_irq, "lvtt"); | ||
265 | #endif | ||
266 | vmi_wiring = VMI_ALARM_WIRED_LVTT; | ||
267 | apic_write(APIC_LVTT, vmi_get_timer_vector()); | ||
268 | local_irq_enable(); | ||
269 | clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); | ||
270 | } | ||
271 | |||
272 | void __devinit vmi_time_ap_init(void) | ||
273 | { | ||
274 | vmi_time_init_clockevent(); | ||
275 | apic_write(APIC_LVTT, vmi_get_timer_vector()); | ||
276 | } | ||
277 | #endif | ||
278 | |||
279 | /** vmi clocksource */ | ||
280 | static struct clocksource clocksource_vmi; | ||
281 | |||
282 | static cycle_t read_real_cycles(struct clocksource *cs) | ||
283 | { | ||
284 | cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); | ||
285 | return max(ret, clocksource_vmi.cycle_last); | ||
286 | } | ||
287 | |||
288 | static struct clocksource clocksource_vmi = { | ||
289 | .name = "vmi-timer", | ||
290 | .rating = 450, | ||
291 | .read = read_real_cycles, | ||
292 | .mask = CLOCKSOURCE_MASK(64), | ||
293 | .mult = 0, /* to be set */ | ||
294 | .shift = 22, | ||
295 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
296 | }; | ||
297 | |||
298 | static int __init init_vmi_clocksource(void) | ||
299 | { | ||
300 | cycle_t cycles_per_msec; | ||
301 | |||
302 | if (!vmi_timer_ops.get_cycle_frequency) | ||
303 | return 0; | ||
304 | /* Use khz2mult rather than hz2mult since hz arg is only 32-bits. */ | ||
305 | cycles_per_msec = vmi_timer_ops.get_cycle_frequency(); | ||
306 | (void)do_div(cycles_per_msec, 1000); | ||
307 | |||
308 | /* Note that clocksource.{mult, shift} converts in the opposite direction | ||
309 | * as clockevents. */ | ||
310 | clocksource_vmi.mult = clocksource_khz2mult(cycles_per_msec, | ||
311 | clocksource_vmi.shift); | ||
312 | |||
313 | printk(KERN_WARNING "vmi: registering clock source khz=%lld\n", cycles_per_msec); | ||
314 | return clocksource_register(&clocksource_vmi); | ||
315 | |||
316 | } | ||
317 | module_init(init_vmi_clocksource); | ||
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 77d8c0f4817d..22b06f7660f4 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -1056,14 +1056,13 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) | |||
1056 | 1056 | ||
1057 | vcpu->arch.apic = apic; | 1057 | vcpu->arch.apic = apic; |
1058 | 1058 | ||
1059 | apic->regs_page = alloc_page(GFP_KERNEL); | 1059 | apic->regs_page = alloc_page(GFP_KERNEL|__GFP_ZERO); |
1060 | if (apic->regs_page == NULL) { | 1060 | if (apic->regs_page == NULL) { |
1061 | printk(KERN_ERR "malloc apic regs error for vcpu %x\n", | 1061 | printk(KERN_ERR "malloc apic regs error for vcpu %x\n", |
1062 | vcpu->vcpu_id); | 1062 | vcpu->vcpu_id); |
1063 | goto nomem_free_apic; | 1063 | goto nomem_free_apic; |
1064 | } | 1064 | } |
1065 | apic->regs = page_address(apic->regs_page); | 1065 | apic->regs = page_address(apic->regs_page); |
1066 | memset(apic->regs, 0, PAGE_SIZE); | ||
1067 | apic->vcpu = vcpu; | 1066 | apic->vcpu = vcpu; |
1068 | 1067 | ||
1069 | hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, | 1068 | hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index bc5b9b8d4a33..8a3f9f64f86f 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -766,7 +766,6 @@ static void init_vmcb(struct vcpu_svm *svm) | |||
766 | 766 | ||
767 | control->iopm_base_pa = iopm_base; | 767 | control->iopm_base_pa = iopm_base; |
768 | control->msrpm_base_pa = __pa(svm->msrpm); | 768 | control->msrpm_base_pa = __pa(svm->msrpm); |
769 | control->tsc_offset = 0; | ||
770 | control->int_ctl = V_INTR_MASKING_MASK; | 769 | control->int_ctl = V_INTR_MASKING_MASK; |
771 | 770 | ||
772 | init_seg(&save->es); | 771 | init_seg(&save->es); |
@@ -902,6 +901,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) | |||
902 | svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; | 901 | svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; |
903 | svm->asid_generation = 0; | 902 | svm->asid_generation = 0; |
904 | init_vmcb(svm); | 903 | init_vmcb(svm); |
904 | svm->vmcb->control.tsc_offset = 0-native_read_tsc(); | ||
905 | 905 | ||
906 | err = fx_init(&svm->vcpu); | 906 | err = fx_init(&svm->vcpu); |
907 | if (err) | 907 | if (err) |
@@ -3163,8 +3163,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
3163 | sync_lapic_to_cr8(vcpu); | 3163 | sync_lapic_to_cr8(vcpu); |
3164 | 3164 | ||
3165 | save_host_msrs(vcpu); | 3165 | save_host_msrs(vcpu); |
3166 | fs_selector = kvm_read_fs(); | 3166 | savesegment(fs, fs_selector); |
3167 | gs_selector = kvm_read_gs(); | 3167 | savesegment(gs, gs_selector); |
3168 | ldt_selector = kvm_read_ldt(); | 3168 | ldt_selector = kvm_read_ldt(); |
3169 | svm->vmcb->save.cr2 = vcpu->arch.cr2; | 3169 | svm->vmcb->save.cr2 = vcpu->arch.cr2; |
3170 | /* required for live migration with NPT */ | 3170 | /* required for live migration with NPT */ |
@@ -3251,10 +3251,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
3251 | vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; | 3251 | vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; |
3252 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; | 3252 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; |
3253 | 3253 | ||
3254 | kvm_load_fs(fs_selector); | ||
3255 | kvm_load_gs(gs_selector); | ||
3256 | kvm_load_ldt(ldt_selector); | ||
3257 | load_host_msrs(vcpu); | 3254 | load_host_msrs(vcpu); |
3255 | loadsegment(fs, fs_selector); | ||
3256 | #ifdef CONFIG_X86_64 | ||
3257 | load_gs_index(gs_selector); | ||
3258 | wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); | ||
3259 | #else | ||
3260 | loadsegment(gs, gs_selector); | ||
3261 | #endif | ||
3262 | kvm_load_ldt(ldt_selector); | ||
3258 | 3263 | ||
3259 | reload_tss(vcpu); | 3264 | reload_tss(vcpu); |
3260 | 3265 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 49b25eee25ac..7bddfab12013 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -803,7 +803,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
803 | */ | 803 | */ |
804 | vmx->host_state.ldt_sel = kvm_read_ldt(); | 804 | vmx->host_state.ldt_sel = kvm_read_ldt(); |
805 | vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; | 805 | vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; |
806 | vmx->host_state.fs_sel = kvm_read_fs(); | 806 | savesegment(fs, vmx->host_state.fs_sel); |
807 | if (!(vmx->host_state.fs_sel & 7)) { | 807 | if (!(vmx->host_state.fs_sel & 7)) { |
808 | vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); | 808 | vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); |
809 | vmx->host_state.fs_reload_needed = 0; | 809 | vmx->host_state.fs_reload_needed = 0; |
@@ -811,7 +811,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
811 | vmcs_write16(HOST_FS_SELECTOR, 0); | 811 | vmcs_write16(HOST_FS_SELECTOR, 0); |
812 | vmx->host_state.fs_reload_needed = 1; | 812 | vmx->host_state.fs_reload_needed = 1; |
813 | } | 813 | } |
814 | vmx->host_state.gs_sel = kvm_read_gs(); | 814 | savesegment(gs, vmx->host_state.gs_sel); |
815 | if (!(vmx->host_state.gs_sel & 7)) | 815 | if (!(vmx->host_state.gs_sel & 7)) |
816 | vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); | 816 | vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); |
817 | else { | 817 | else { |
@@ -841,27 +841,21 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
841 | 841 | ||
842 | static void __vmx_load_host_state(struct vcpu_vmx *vmx) | 842 | static void __vmx_load_host_state(struct vcpu_vmx *vmx) |
843 | { | 843 | { |
844 | unsigned long flags; | ||
845 | |||
846 | if (!vmx->host_state.loaded) | 844 | if (!vmx->host_state.loaded) |
847 | return; | 845 | return; |
848 | 846 | ||
849 | ++vmx->vcpu.stat.host_state_reload; | 847 | ++vmx->vcpu.stat.host_state_reload; |
850 | vmx->host_state.loaded = 0; | 848 | vmx->host_state.loaded = 0; |
851 | if (vmx->host_state.fs_reload_needed) | 849 | if (vmx->host_state.fs_reload_needed) |
852 | kvm_load_fs(vmx->host_state.fs_sel); | 850 | loadsegment(fs, vmx->host_state.fs_sel); |
853 | if (vmx->host_state.gs_ldt_reload_needed) { | 851 | if (vmx->host_state.gs_ldt_reload_needed) { |
854 | kvm_load_ldt(vmx->host_state.ldt_sel); | 852 | kvm_load_ldt(vmx->host_state.ldt_sel); |
855 | /* | ||
856 | * If we have to reload gs, we must take care to | ||
857 | * preserve our gs base. | ||
858 | */ | ||
859 | local_irq_save(flags); | ||
860 | kvm_load_gs(vmx->host_state.gs_sel); | ||
861 | #ifdef CONFIG_X86_64 | 853 | #ifdef CONFIG_X86_64 |
862 | wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); | 854 | load_gs_index(vmx->host_state.gs_sel); |
855 | wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); | ||
856 | #else | ||
857 | loadsegment(gs, vmx->host_state.gs_sel); | ||
863 | #endif | 858 | #endif |
864 | local_irq_restore(flags); | ||
865 | } | 859 | } |
866 | reload_tss(); | 860 | reload_tss(); |
867 | #ifdef CONFIG_X86_64 | 861 | #ifdef CONFIG_X86_64 |
@@ -2589,8 +2583,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
2589 | vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ | 2583 | vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ |
2590 | vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ | 2584 | vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ |
2591 | vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ | 2585 | vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ |
2592 | vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */ | 2586 | vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ |
2593 | vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */ | 2587 | vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ |
2594 | vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ | 2588 | vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ |
2595 | #ifdef CONFIG_X86_64 | 2589 | #ifdef CONFIG_X86_64 |
2596 | rdmsrl(MSR_FS_BASE, a); | 2590 | rdmsrl(MSR_FS_BASE, a); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3a09c625d526..6c2ecf0a806d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1991,13 +1991,14 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
1991 | 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ | | 1991 | 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ | |
1992 | 0 /* Reserved, DCA */ | F(XMM4_1) | | 1992 | 0 /* Reserved, DCA */ | F(XMM4_1) | |
1993 | F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) | | 1993 | F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) | |
1994 | 0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX); | 1994 | 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) | |
1995 | F(F16C); | ||
1995 | /* cpuid 0x80000001.ecx */ | 1996 | /* cpuid 0x80000001.ecx */ |
1996 | const u32 kvm_supported_word6_x86_features = | 1997 | const u32 kvm_supported_word6_x86_features = |
1997 | F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ | | 1998 | F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ | |
1998 | F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | | 1999 | F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | |
1999 | F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) | | 2000 | F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) | |
2000 | 0 /* SKINIT */ | 0 /* WDT */; | 2001 | 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM); |
2001 | 2002 | ||
2002 | /* all calls to cpuid_count() should be made on the same cpu */ | 2003 | /* all calls to cpuid_count() should be made on the same cpu */ |
2003 | get_cpu(); | 2004 | get_cpu(); |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 9d5f55848455..73b1e1a1f489 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -791,22 +791,22 @@ static void lguest_flush_tlb_kernel(void) | |||
791 | * simple as setting a bit. We don't actually "ack" interrupts as such, we | 791 | * simple as setting a bit. We don't actually "ack" interrupts as such, we |
792 | * just mask and unmask them. I wonder if we should be cleverer? | 792 | * just mask and unmask them. I wonder if we should be cleverer? |
793 | */ | 793 | */ |
794 | static void disable_lguest_irq(unsigned int irq) | 794 | static void disable_lguest_irq(struct irq_data *data) |
795 | { | 795 | { |
796 | set_bit(irq, lguest_data.blocked_interrupts); | 796 | set_bit(data->irq, lguest_data.blocked_interrupts); |
797 | } | 797 | } |
798 | 798 | ||
799 | static void enable_lguest_irq(unsigned int irq) | 799 | static void enable_lguest_irq(struct irq_data *data) |
800 | { | 800 | { |
801 | clear_bit(irq, lguest_data.blocked_interrupts); | 801 | clear_bit(data->irq, lguest_data.blocked_interrupts); |
802 | } | 802 | } |
803 | 803 | ||
804 | /* This structure describes the lguest IRQ controller. */ | 804 | /* This structure describes the lguest IRQ controller. */ |
805 | static struct irq_chip lguest_irq_controller = { | 805 | static struct irq_chip lguest_irq_controller = { |
806 | .name = "lguest", | 806 | .name = "lguest", |
807 | .mask = disable_lguest_irq, | 807 | .irq_mask = disable_lguest_irq, |
808 | .mask_ack = disable_lguest_irq, | 808 | .irq_mask_ack = disable_lguest_irq, |
809 | .unmask = enable_lguest_irq, | 809 | .irq_unmask = enable_lguest_irq, |
810 | }; | 810 | }; |
811 | 811 | ||
812 | /* | 812 | /* |
@@ -838,12 +838,12 @@ static void __init lguest_init_IRQ(void) | |||
838 | * rather than set them in lguest_init_IRQ we are called here every time an | 838 | * rather than set them in lguest_init_IRQ we are called here every time an |
839 | * lguest device needs an interrupt. | 839 | * lguest device needs an interrupt. |
840 | * | 840 | * |
841 | * FIXME: irq_to_desc_alloc_node() can fail due to lack of memory, we should | 841 | * FIXME: irq_alloc_desc_at() can fail due to lack of memory, we should |
842 | * pass that up! | 842 | * pass that up! |
843 | */ | 843 | */ |
844 | void lguest_setup_irq(unsigned int irq) | 844 | void lguest_setup_irq(unsigned int irq) |
845 | { | 845 | { |
846 | irq_to_desc_alloc_node(irq, 0); | 846 | irq_alloc_desc_at(irq, 0); |
847 | set_irq_chip_and_handler_name(irq, &lguest_irq_controller, | 847 | set_irq_chip_and_handler_name(irq, &lguest_irq_controller, |
848 | handle_level_irq, "level"); | 848 | handle_level_irq, "level"); |
849 | } | 849 | } |
diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c index 5415a9d06f53..b908a59eccf5 100644 --- a/arch/x86/lib/memcpy_32.c +++ b/arch/x86/lib/memcpy_32.c | |||
@@ -22,22 +22,187 @@ EXPORT_SYMBOL(memset); | |||
22 | 22 | ||
23 | void *memmove(void *dest, const void *src, size_t n) | 23 | void *memmove(void *dest, const void *src, size_t n) |
24 | { | 24 | { |
25 | int d0, d1, d2; | 25 | int d0,d1,d2,d3,d4,d5; |
26 | 26 | char *ret = dest; | |
27 | if (dest < src) { | 27 | |
28 | memcpy(dest, src, n); | 28 | __asm__ __volatile__( |
29 | } else { | 29 | /* Handle more 16bytes in loop */ |
30 | __asm__ __volatile__( | 30 | "cmp $0x10, %0\n\t" |
31 | "std\n\t" | 31 | "jb 1f\n\t" |
32 | "rep\n\t" | 32 | |
33 | "movsb\n\t" | 33 | /* Decide forward/backward copy mode */ |
34 | "cld" | 34 | "cmp %2, %1\n\t" |
35 | : "=&c" (d0), "=&S" (d1), "=&D" (d2) | 35 | "jb 2f\n\t" |
36 | :"0" (n), | 36 | |
37 | "1" (n-1+src), | 37 | /* |
38 | "2" (n-1+dest) | 38 | * movs instruction have many startup latency |
39 | :"memory"); | 39 | * so we handle small size by general register. |
40 | } | 40 | */ |
41 | return dest; | 41 | "cmp $680, %0\n\t" |
42 | "jb 3f\n\t" | ||
43 | /* | ||
44 | * movs instruction is only good for aligned case. | ||
45 | */ | ||
46 | "mov %1, %3\n\t" | ||
47 | "xor %2, %3\n\t" | ||
48 | "and $0xff, %3\n\t" | ||
49 | "jz 4f\n\t" | ||
50 | "3:\n\t" | ||
51 | "sub $0x10, %0\n\t" | ||
52 | |||
53 | /* | ||
54 | * We gobble 16byts forward in each loop. | ||
55 | */ | ||
56 | "3:\n\t" | ||
57 | "sub $0x10, %0\n\t" | ||
58 | "mov 0*4(%1), %3\n\t" | ||
59 | "mov 1*4(%1), %4\n\t" | ||
60 | "mov %3, 0*4(%2)\n\t" | ||
61 | "mov %4, 1*4(%2)\n\t" | ||
62 | "mov 2*4(%1), %3\n\t" | ||
63 | "mov 3*4(%1), %4\n\t" | ||
64 | "mov %3, 2*4(%2)\n\t" | ||
65 | "mov %4, 3*4(%2)\n\t" | ||
66 | "lea 0x10(%1), %1\n\t" | ||
67 | "lea 0x10(%2), %2\n\t" | ||
68 | "jae 3b\n\t" | ||
69 | "add $0x10, %0\n\t" | ||
70 | "jmp 1f\n\t" | ||
71 | |||
72 | /* | ||
73 | * Handle data forward by movs. | ||
74 | */ | ||
75 | ".p2align 4\n\t" | ||
76 | "4:\n\t" | ||
77 | "mov -4(%1, %0), %3\n\t" | ||
78 | "lea -4(%2, %0), %4\n\t" | ||
79 | "shr $2, %0\n\t" | ||
80 | "rep movsl\n\t" | ||
81 | "mov %3, (%4)\n\t" | ||
82 | "jmp 11f\n\t" | ||
83 | /* | ||
84 | * Handle data backward by movs. | ||
85 | */ | ||
86 | ".p2align 4\n\t" | ||
87 | "6:\n\t" | ||
88 | "mov (%1), %3\n\t" | ||
89 | "mov %2, %4\n\t" | ||
90 | "lea -4(%1, %0), %1\n\t" | ||
91 | "lea -4(%2, %0), %2\n\t" | ||
92 | "shr $2, %0\n\t" | ||
93 | "std\n\t" | ||
94 | "rep movsl\n\t" | ||
95 | "mov %3,(%4)\n\t" | ||
96 | "cld\n\t" | ||
97 | "jmp 11f\n\t" | ||
98 | |||
99 | /* | ||
100 | * Start to prepare for backward copy. | ||
101 | */ | ||
102 | ".p2align 4\n\t" | ||
103 | "2:\n\t" | ||
104 | "cmp $680, %0\n\t" | ||
105 | "jb 5f\n\t" | ||
106 | "mov %1, %3\n\t" | ||
107 | "xor %2, %3\n\t" | ||
108 | "and $0xff, %3\n\t" | ||
109 | "jz 6b\n\t" | ||
110 | |||
111 | /* | ||
112 | * Calculate copy position to tail. | ||
113 | */ | ||
114 | "5:\n\t" | ||
115 | "add %0, %1\n\t" | ||
116 | "add %0, %2\n\t" | ||
117 | "sub $0x10, %0\n\t" | ||
118 | |||
119 | /* | ||
120 | * We gobble 16byts backward in each loop. | ||
121 | */ | ||
122 | "7:\n\t" | ||
123 | "sub $0x10, %0\n\t" | ||
124 | |||
125 | "mov -1*4(%1), %3\n\t" | ||
126 | "mov -2*4(%1), %4\n\t" | ||
127 | "mov %3, -1*4(%2)\n\t" | ||
128 | "mov %4, -2*4(%2)\n\t" | ||
129 | "mov -3*4(%1), %3\n\t" | ||
130 | "mov -4*4(%1), %4\n\t" | ||
131 | "mov %3, -3*4(%2)\n\t" | ||
132 | "mov %4, -4*4(%2)\n\t" | ||
133 | "lea -0x10(%1), %1\n\t" | ||
134 | "lea -0x10(%2), %2\n\t" | ||
135 | "jae 7b\n\t" | ||
136 | /* | ||
137 | * Calculate copy position to head. | ||
138 | */ | ||
139 | "add $0x10, %0\n\t" | ||
140 | "sub %0, %1\n\t" | ||
141 | "sub %0, %2\n\t" | ||
142 | |||
143 | /* | ||
144 | * Move data from 8 bytes to 15 bytes. | ||
145 | */ | ||
146 | ".p2align 4\n\t" | ||
147 | "1:\n\t" | ||
148 | "cmp $8, %0\n\t" | ||
149 | "jb 8f\n\t" | ||
150 | "mov 0*4(%1), %3\n\t" | ||
151 | "mov 1*4(%1), %4\n\t" | ||
152 | "mov -2*4(%1, %0), %5\n\t" | ||
153 | "mov -1*4(%1, %0), %1\n\t" | ||
154 | |||
155 | "mov %3, 0*4(%2)\n\t" | ||
156 | "mov %4, 1*4(%2)\n\t" | ||
157 | "mov %5, -2*4(%2, %0)\n\t" | ||
158 | "mov %1, -1*4(%2, %0)\n\t" | ||
159 | "jmp 11f\n\t" | ||
160 | |||
161 | /* | ||
162 | * Move data from 4 bytes to 7 bytes. | ||
163 | */ | ||
164 | ".p2align 4\n\t" | ||
165 | "8:\n\t" | ||
166 | "cmp $4, %0\n\t" | ||
167 | "jb 9f\n\t" | ||
168 | "mov 0*4(%1), %3\n\t" | ||
169 | "mov -1*4(%1, %0), %4\n\t" | ||
170 | "mov %3, 0*4(%2)\n\t" | ||
171 | "mov %4, -1*4(%2, %0)\n\t" | ||
172 | "jmp 11f\n\t" | ||
173 | |||
174 | /* | ||
175 | * Move data from 2 bytes to 3 bytes. | ||
176 | */ | ||
177 | ".p2align 4\n\t" | ||
178 | "9:\n\t" | ||
179 | "cmp $2, %0\n\t" | ||
180 | "jb 10f\n\t" | ||
181 | "movw 0*2(%1), %%dx\n\t" | ||
182 | "movw -1*2(%1, %0), %%bx\n\t" | ||
183 | "movw %%dx, 0*2(%2)\n\t" | ||
184 | "movw %%bx, -1*2(%2, %0)\n\t" | ||
185 | "jmp 11f\n\t" | ||
186 | |||
187 | /* | ||
188 | * Move data for 1 byte. | ||
189 | */ | ||
190 | ".p2align 4\n\t" | ||
191 | "10:\n\t" | ||
192 | "cmp $1, %0\n\t" | ||
193 | "jb 11f\n\t" | ||
194 | "movb (%1), %%cl\n\t" | ||
195 | "movb %%cl, (%2)\n\t" | ||
196 | ".p2align 4\n\t" | ||
197 | "11:" | ||
198 | : "=&c" (d0), "=&S" (d1), "=&D" (d2), | ||
199 | "=r" (d3),"=r" (d4), "=r"(d5) | ||
200 | :"0" (n), | ||
201 | "1" (src), | ||
202 | "2" (dest) | ||
203 | :"memory"); | ||
204 | |||
205 | return ret; | ||
206 | |||
42 | } | 207 | } |
43 | EXPORT_SYMBOL(memmove); | 208 | EXPORT_SYMBOL(memmove); |
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index bcbcd1e0f7d5..75ef61e35e38 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S | |||
@@ -40,84 +40,132 @@ | |||
40 | ENTRY(__memcpy) | 40 | ENTRY(__memcpy) |
41 | ENTRY(memcpy) | 41 | ENTRY(memcpy) |
42 | CFI_STARTPROC | 42 | CFI_STARTPROC |
43 | movq %rdi, %rax | ||
43 | 44 | ||
44 | /* | 45 | /* |
45 | * Put the number of full 64-byte blocks into %ecx. | 46 | * Use 32bit CMP here to avoid long NOP padding. |
46 | * Tail portion is handled at the end: | ||
47 | */ | 47 | */ |
48 | movq %rdi, %rax | 48 | cmp $0x20, %edx |
49 | movl %edx, %ecx | 49 | jb .Lhandle_tail |
50 | shrl $6, %ecx | ||
51 | jz .Lhandle_tail | ||
52 | 50 | ||
53 | .p2align 4 | ||
54 | .Lloop_64: | ||
55 | /* | 51 | /* |
56 | * We decrement the loop index here - and the zero-flag is | 52 | * We check whether memory false dependece could occur, |
57 | * checked at the end of the loop (instructions inbetween do | 53 | * then jump to corresponding copy mode. |
58 | * not change the zero flag): | ||
59 | */ | 54 | */ |
60 | decl %ecx | 55 | cmp %dil, %sil |
56 | jl .Lcopy_backward | ||
57 | subl $0x20, %edx | ||
58 | .Lcopy_forward_loop: | ||
59 | subq $0x20, %rdx | ||
61 | 60 | ||
62 | /* | 61 | /* |
63 | * Move in blocks of 4x16 bytes: | 62 | * Move in blocks of 4x8 bytes: |
64 | */ | 63 | */ |
65 | movq 0*8(%rsi), %r11 | 64 | movq 0*8(%rsi), %r8 |
66 | movq 1*8(%rsi), %r8 | 65 | movq 1*8(%rsi), %r9 |
67 | movq %r11, 0*8(%rdi) | 66 | movq 2*8(%rsi), %r10 |
68 | movq %r8, 1*8(%rdi) | 67 | movq 3*8(%rsi), %r11 |
69 | 68 | leaq 4*8(%rsi), %rsi | |
70 | movq 2*8(%rsi), %r9 | 69 | |
71 | movq 3*8(%rsi), %r10 | 70 | movq %r8, 0*8(%rdi) |
72 | movq %r9, 2*8(%rdi) | 71 | movq %r9, 1*8(%rdi) |
73 | movq %r10, 3*8(%rdi) | 72 | movq %r10, 2*8(%rdi) |
74 | 73 | movq %r11, 3*8(%rdi) | |
75 | movq 4*8(%rsi), %r11 | 74 | leaq 4*8(%rdi), %rdi |
76 | movq 5*8(%rsi), %r8 | 75 | jae .Lcopy_forward_loop |
77 | movq %r11, 4*8(%rdi) | 76 | addq $0x20, %rdx |
78 | movq %r8, 5*8(%rdi) | 77 | jmp .Lhandle_tail |
79 | 78 | ||
80 | movq 6*8(%rsi), %r9 | 79 | .Lcopy_backward: |
81 | movq 7*8(%rsi), %r10 | 80 | /* |
82 | movq %r9, 6*8(%rdi) | 81 | * Calculate copy position to tail. |
83 | movq %r10, 7*8(%rdi) | 82 | */ |
84 | 83 | addq %rdx, %rsi | |
85 | leaq 64(%rsi), %rsi | 84 | addq %rdx, %rdi |
86 | leaq 64(%rdi), %rdi | 85 | subq $0x20, %rdx |
87 | 86 | /* | |
88 | jnz .Lloop_64 | 87 | * At most 3 ALU operations in one cycle, |
88 | * so append NOPS in the same 16bytes trunk. | ||
89 | */ | ||
90 | .p2align 4 | ||
91 | .Lcopy_backward_loop: | ||
92 | subq $0x20, %rdx | ||
93 | movq -1*8(%rsi), %r8 | ||
94 | movq -2*8(%rsi), %r9 | ||
95 | movq -3*8(%rsi), %r10 | ||
96 | movq -4*8(%rsi), %r11 | ||
97 | leaq -4*8(%rsi), %rsi | ||
98 | movq %r8, -1*8(%rdi) | ||
99 | movq %r9, -2*8(%rdi) | ||
100 | movq %r10, -3*8(%rdi) | ||
101 | movq %r11, -4*8(%rdi) | ||
102 | leaq -4*8(%rdi), %rdi | ||
103 | jae .Lcopy_backward_loop | ||
89 | 104 | ||
105 | /* | ||
106 | * Calculate copy position to head. | ||
107 | */ | ||
108 | addq $0x20, %rdx | ||
109 | subq %rdx, %rsi | ||
110 | subq %rdx, %rdi | ||
90 | .Lhandle_tail: | 111 | .Lhandle_tail: |
91 | movl %edx, %ecx | 112 | cmpq $16, %rdx |
92 | andl $63, %ecx | 113 | jb .Lless_16bytes |
93 | shrl $3, %ecx | ||
94 | jz .Lhandle_7 | ||
95 | 114 | ||
115 | /* | ||
116 | * Move data from 16 bytes to 31 bytes. | ||
117 | */ | ||
118 | movq 0*8(%rsi), %r8 | ||
119 | movq 1*8(%rsi), %r9 | ||
120 | movq -2*8(%rsi, %rdx), %r10 | ||
121 | movq -1*8(%rsi, %rdx), %r11 | ||
122 | movq %r8, 0*8(%rdi) | ||
123 | movq %r9, 1*8(%rdi) | ||
124 | movq %r10, -2*8(%rdi, %rdx) | ||
125 | movq %r11, -1*8(%rdi, %rdx) | ||
126 | retq | ||
96 | .p2align 4 | 127 | .p2align 4 |
97 | .Lloop_8: | 128 | .Lless_16bytes: |
98 | decl %ecx | 129 | cmpq $8, %rdx |
99 | movq (%rsi), %r8 | 130 | jb .Lless_8bytes |
100 | movq %r8, (%rdi) | 131 | /* |
101 | leaq 8(%rdi), %rdi | 132 | * Move data from 8 bytes to 15 bytes. |
102 | leaq 8(%rsi), %rsi | 133 | */ |
103 | jnz .Lloop_8 | 134 | movq 0*8(%rsi), %r8 |
104 | 135 | movq -1*8(%rsi, %rdx), %r9 | |
105 | .Lhandle_7: | 136 | movq %r8, 0*8(%rdi) |
106 | movl %edx, %ecx | 137 | movq %r9, -1*8(%rdi, %rdx) |
107 | andl $7, %ecx | 138 | retq |
108 | jz .Lend | 139 | .p2align 4 |
140 | .Lless_8bytes: | ||
141 | cmpq $4, %rdx | ||
142 | jb .Lless_3bytes | ||
109 | 143 | ||
144 | /* | ||
145 | * Move data from 4 bytes to 7 bytes. | ||
146 | */ | ||
147 | movl (%rsi), %ecx | ||
148 | movl -4(%rsi, %rdx), %r8d | ||
149 | movl %ecx, (%rdi) | ||
150 | movl %r8d, -4(%rdi, %rdx) | ||
151 | retq | ||
110 | .p2align 4 | 152 | .p2align 4 |
153 | .Lless_3bytes: | ||
154 | cmpl $0, %edx | ||
155 | je .Lend | ||
156 | /* | ||
157 | * Move data from 1 bytes to 3 bytes. | ||
158 | */ | ||
111 | .Lloop_1: | 159 | .Lloop_1: |
112 | movb (%rsi), %r8b | 160 | movb (%rsi), %r8b |
113 | movb %r8b, (%rdi) | 161 | movb %r8b, (%rdi) |
114 | incq %rdi | 162 | incq %rdi |
115 | incq %rsi | 163 | incq %rsi |
116 | decl %ecx | 164 | decl %edx |
117 | jnz .Lloop_1 | 165 | jnz .Lloop_1 |
118 | 166 | ||
119 | .Lend: | 167 | .Lend: |
120 | ret | 168 | retq |
121 | CFI_ENDPROC | 169 | CFI_ENDPROC |
122 | ENDPROC(memcpy) | 170 | ENDPROC(memcpy) |
123 | ENDPROC(__memcpy) | 171 | ENDPROC(__memcpy) |
diff --git a/arch/x86/lib/memmove_64.c b/arch/x86/lib/memmove_64.c index 0a33909bf122..6d0f0ec41b34 100644 --- a/arch/x86/lib/memmove_64.c +++ b/arch/x86/lib/memmove_64.c | |||
@@ -8,14 +8,185 @@ | |||
8 | #undef memmove | 8 | #undef memmove |
9 | void *memmove(void *dest, const void *src, size_t count) | 9 | void *memmove(void *dest, const void *src, size_t count) |
10 | { | 10 | { |
11 | if (dest < src) { | 11 | unsigned long d0,d1,d2,d3,d4,d5,d6,d7; |
12 | return memcpy(dest, src, count); | 12 | char *ret; |
13 | } else { | 13 | |
14 | char *p = dest + count; | 14 | __asm__ __volatile__( |
15 | const char *s = src + count; | 15 | /* Handle more 32bytes in loop */ |
16 | while (count--) | 16 | "mov %2, %3\n\t" |
17 | *--p = *--s; | 17 | "cmp $0x20, %0\n\t" |
18 | } | 18 | "jb 1f\n\t" |
19 | return dest; | 19 | |
20 | /* Decide forward/backward copy mode */ | ||
21 | "cmp %2, %1\n\t" | ||
22 | "jb 2f\n\t" | ||
23 | |||
24 | /* | ||
25 | * movsq instruction have many startup latency | ||
26 | * so we handle small size by general register. | ||
27 | */ | ||
28 | "cmp $680, %0\n\t" | ||
29 | "jb 3f\n\t" | ||
30 | /* | ||
31 | * movsq instruction is only good for aligned case. | ||
32 | */ | ||
33 | "cmpb %%dil, %%sil\n\t" | ||
34 | "je 4f\n\t" | ||
35 | "3:\n\t" | ||
36 | "sub $0x20, %0\n\t" | ||
37 | /* | ||
38 | * We gobble 32byts forward in each loop. | ||
39 | */ | ||
40 | "5:\n\t" | ||
41 | "sub $0x20, %0\n\t" | ||
42 | "movq 0*8(%1), %4\n\t" | ||
43 | "movq 1*8(%1), %5\n\t" | ||
44 | "movq 2*8(%1), %6\n\t" | ||
45 | "movq 3*8(%1), %7\n\t" | ||
46 | "leaq 4*8(%1), %1\n\t" | ||
47 | |||
48 | "movq %4, 0*8(%2)\n\t" | ||
49 | "movq %5, 1*8(%2)\n\t" | ||
50 | "movq %6, 2*8(%2)\n\t" | ||
51 | "movq %7, 3*8(%2)\n\t" | ||
52 | "leaq 4*8(%2), %2\n\t" | ||
53 | "jae 5b\n\t" | ||
54 | "addq $0x20, %0\n\t" | ||
55 | "jmp 1f\n\t" | ||
56 | /* | ||
57 | * Handle data forward by movsq. | ||
58 | */ | ||
59 | ".p2align 4\n\t" | ||
60 | "4:\n\t" | ||
61 | "movq %0, %8\n\t" | ||
62 | "movq -8(%1, %0), %4\n\t" | ||
63 | "lea -8(%2, %0), %5\n\t" | ||
64 | "shrq $3, %8\n\t" | ||
65 | "rep movsq\n\t" | ||
66 | "movq %4, (%5)\n\t" | ||
67 | "jmp 13f\n\t" | ||
68 | /* | ||
69 | * Handle data backward by movsq. | ||
70 | */ | ||
71 | ".p2align 4\n\t" | ||
72 | "7:\n\t" | ||
73 | "movq %0, %8\n\t" | ||
74 | "movq (%1), %4\n\t" | ||
75 | "movq %2, %5\n\t" | ||
76 | "leaq -8(%1, %0), %1\n\t" | ||
77 | "leaq -8(%2, %0), %2\n\t" | ||
78 | "shrq $3, %8\n\t" | ||
79 | "std\n\t" | ||
80 | "rep movsq\n\t" | ||
81 | "cld\n\t" | ||
82 | "movq %4, (%5)\n\t" | ||
83 | "jmp 13f\n\t" | ||
84 | |||
85 | /* | ||
86 | * Start to prepare for backward copy. | ||
87 | */ | ||
88 | ".p2align 4\n\t" | ||
89 | "2:\n\t" | ||
90 | "cmp $680, %0\n\t" | ||
91 | "jb 6f \n\t" | ||
92 | "cmp %%dil, %%sil\n\t" | ||
93 | "je 7b \n\t" | ||
94 | "6:\n\t" | ||
95 | /* | ||
96 | * Calculate copy position to tail. | ||
97 | */ | ||
98 | "addq %0, %1\n\t" | ||
99 | "addq %0, %2\n\t" | ||
100 | "subq $0x20, %0\n\t" | ||
101 | /* | ||
102 | * We gobble 32byts backward in each loop. | ||
103 | */ | ||
104 | "8:\n\t" | ||
105 | "subq $0x20, %0\n\t" | ||
106 | "movq -1*8(%1), %4\n\t" | ||
107 | "movq -2*8(%1), %5\n\t" | ||
108 | "movq -3*8(%1), %6\n\t" | ||
109 | "movq -4*8(%1), %7\n\t" | ||
110 | "leaq -4*8(%1), %1\n\t" | ||
111 | |||
112 | "movq %4, -1*8(%2)\n\t" | ||
113 | "movq %5, -2*8(%2)\n\t" | ||
114 | "movq %6, -3*8(%2)\n\t" | ||
115 | "movq %7, -4*8(%2)\n\t" | ||
116 | "leaq -4*8(%2), %2\n\t" | ||
117 | "jae 8b\n\t" | ||
118 | /* | ||
119 | * Calculate copy position to head. | ||
120 | */ | ||
121 | "addq $0x20, %0\n\t" | ||
122 | "subq %0, %1\n\t" | ||
123 | "subq %0, %2\n\t" | ||
124 | "1:\n\t" | ||
125 | "cmpq $16, %0\n\t" | ||
126 | "jb 9f\n\t" | ||
127 | /* | ||
128 | * Move data from 16 bytes to 31 bytes. | ||
129 | */ | ||
130 | "movq 0*8(%1), %4\n\t" | ||
131 | "movq 1*8(%1), %5\n\t" | ||
132 | "movq -2*8(%1, %0), %6\n\t" | ||
133 | "movq -1*8(%1, %0), %7\n\t" | ||
134 | "movq %4, 0*8(%2)\n\t" | ||
135 | "movq %5, 1*8(%2)\n\t" | ||
136 | "movq %6, -2*8(%2, %0)\n\t" | ||
137 | "movq %7, -1*8(%2, %0)\n\t" | ||
138 | "jmp 13f\n\t" | ||
139 | ".p2align 4\n\t" | ||
140 | "9:\n\t" | ||
141 | "cmpq $8, %0\n\t" | ||
142 | "jb 10f\n\t" | ||
143 | /* | ||
144 | * Move data from 8 bytes to 15 bytes. | ||
145 | */ | ||
146 | "movq 0*8(%1), %4\n\t" | ||
147 | "movq -1*8(%1, %0), %5\n\t" | ||
148 | "movq %4, 0*8(%2)\n\t" | ||
149 | "movq %5, -1*8(%2, %0)\n\t" | ||
150 | "jmp 13f\n\t" | ||
151 | "10:\n\t" | ||
152 | "cmpq $4, %0\n\t" | ||
153 | "jb 11f\n\t" | ||
154 | /* | ||
155 | * Move data from 4 bytes to 7 bytes. | ||
156 | */ | ||
157 | "movl (%1), %4d\n\t" | ||
158 | "movl -4(%1, %0), %5d\n\t" | ||
159 | "movl %4d, (%2)\n\t" | ||
160 | "movl %5d, -4(%2, %0)\n\t" | ||
161 | "jmp 13f\n\t" | ||
162 | "11:\n\t" | ||
163 | "cmp $2, %0\n\t" | ||
164 | "jb 12f\n\t" | ||
165 | /* | ||
166 | * Move data from 2 bytes to 3 bytes. | ||
167 | */ | ||
168 | "movw (%1), %4w\n\t" | ||
169 | "movw -2(%1, %0), %5w\n\t" | ||
170 | "movw %4w, (%2)\n\t" | ||
171 | "movw %5w, -2(%2, %0)\n\t" | ||
172 | "jmp 13f\n\t" | ||
173 | "12:\n\t" | ||
174 | "cmp $1, %0\n\t" | ||
175 | "jb 13f\n\t" | ||
176 | /* | ||
177 | * Move data for 1 byte. | ||
178 | */ | ||
179 | "movb (%1), %4b\n\t" | ||
180 | "movb %4b, (%2)\n\t" | ||
181 | "13:\n\t" | ||
182 | : "=&d" (d0), "=&S" (d1), "=&D" (d2), "=&a" (ret) , | ||
183 | "=r"(d3), "=r"(d4), "=r"(d5), "=r"(d6), "=&c" (d7) | ||
184 | :"0" (count), | ||
185 | "1" (src), | ||
186 | "2" (dest) | ||
187 | :"memory"); | ||
188 | |||
189 | return ret; | ||
190 | |||
20 | } | 191 | } |
21 | EXPORT_SYMBOL(memmove); | 192 | EXPORT_SYMBOL(memmove); |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index a24c6cfdccc4..79b0b372d2d0 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -229,7 +229,16 @@ void vmalloc_sync_all(void) | |||
229 | 229 | ||
230 | spin_lock_irqsave(&pgd_lock, flags); | 230 | spin_lock_irqsave(&pgd_lock, flags); |
231 | list_for_each_entry(page, &pgd_list, lru) { | 231 | list_for_each_entry(page, &pgd_list, lru) { |
232 | if (!vmalloc_sync_one(page_address(page), address)) | 232 | spinlock_t *pgt_lock; |
233 | pmd_t *ret; | ||
234 | |||
235 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; | ||
236 | |||
237 | spin_lock(pgt_lock); | ||
238 | ret = vmalloc_sync_one(page_address(page), address); | ||
239 | spin_unlock(pgt_lock); | ||
240 | |||
241 | if (!ret) | ||
233 | break; | 242 | break; |
234 | } | 243 | } |
235 | spin_unlock_irqrestore(&pgd_lock, flags); | 244 | spin_unlock_irqrestore(&pgd_lock, flags); |
@@ -328,29 +337,7 @@ out: | |||
328 | 337 | ||
329 | void vmalloc_sync_all(void) | 338 | void vmalloc_sync_all(void) |
330 | { | 339 | { |
331 | unsigned long address; | 340 | sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); |
332 | |||
333 | for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; | ||
334 | address += PGDIR_SIZE) { | ||
335 | |||
336 | const pgd_t *pgd_ref = pgd_offset_k(address); | ||
337 | unsigned long flags; | ||
338 | struct page *page; | ||
339 | |||
340 | if (pgd_none(*pgd_ref)) | ||
341 | continue; | ||
342 | |||
343 | spin_lock_irqsave(&pgd_lock, flags); | ||
344 | list_for_each_entry(page, &pgd_list, lru) { | ||
345 | pgd_t *pgd; | ||
346 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | ||
347 | if (pgd_none(*pgd)) | ||
348 | set_pgd(pgd, *pgd_ref); | ||
349 | else | ||
350 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | ||
351 | } | ||
352 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
353 | } | ||
354 | } | 341 | } |
355 | 342 | ||
356 | /* | 343 | /* |
@@ -898,8 +885,14 @@ spurious_fault(unsigned long error_code, unsigned long address) | |||
898 | if (pmd_large(*pmd)) | 885 | if (pmd_large(*pmd)) |
899 | return spurious_fault_check(error_code, (pte_t *) pmd); | 886 | return spurious_fault_check(error_code, (pte_t *) pmd); |
900 | 887 | ||
888 | /* | ||
889 | * Note: don't use pte_present() here, since it returns true | ||
890 | * if the _PAGE_PROTNONE bit is set. However, this aliases the | ||
891 | * _PAGE_GLOBAL bit, which for kernel pages give false positives | ||
892 | * when CONFIG_DEBUG_PAGEALLOC is used. | ||
893 | */ | ||
901 | pte = pte_offset_kernel(pmd, address); | 894 | pte = pte_offset_kernel(pmd, address); |
902 | if (!pte_present(*pte)) | 895 | if (!(pte_flags(*pte) & _PAGE_PRESENT)) |
903 | return 0; | 896 | return 0; |
904 | 897 | ||
905 | ret = spurious_fault_check(error_code, pte); | 898 | ret = spurious_fault_check(error_code, pte); |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index bca79091b9d6..558f2d332076 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -67,7 +67,7 @@ static __init void *alloc_low_page(void) | |||
67 | panic("alloc_low_page: ran out of memory"); | 67 | panic("alloc_low_page: ran out of memory"); |
68 | 68 | ||
69 | adr = __va(pfn * PAGE_SIZE); | 69 | adr = __va(pfn * PAGE_SIZE); |
70 | memset(adr, 0, PAGE_SIZE); | 70 | clear_page(adr); |
71 | return adr; | 71 | return adr; |
72 | } | 72 | } |
73 | 73 | ||
@@ -558,7 +558,7 @@ char swsusp_pg_dir[PAGE_SIZE] | |||
558 | 558 | ||
559 | static inline void save_pg_dir(void) | 559 | static inline void save_pg_dir(void) |
560 | { | 560 | { |
561 | memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE); | 561 | copy_page(swsusp_pg_dir, swapper_pg_dir); |
562 | } | 562 | } |
563 | #else /* !CONFIG_ACPI_SLEEP */ | 563 | #else /* !CONFIG_ACPI_SLEEP */ |
564 | static inline void save_pg_dir(void) | 564 | static inline void save_pg_dir(void) |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 9a6674689a20..c55f900fbf89 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -98,6 +98,43 @@ static int __init nonx32_setup(char *str) | |||
98 | __setup("noexec32=", nonx32_setup); | 98 | __setup("noexec32=", nonx32_setup); |
99 | 99 | ||
100 | /* | 100 | /* |
101 | * When memory was added/removed make sure all the processes MM have | ||
102 | * suitable PGD entries in the local PGD level page. | ||
103 | */ | ||
104 | void sync_global_pgds(unsigned long start, unsigned long end) | ||
105 | { | ||
106 | unsigned long address; | ||
107 | |||
108 | for (address = start; address <= end; address += PGDIR_SIZE) { | ||
109 | const pgd_t *pgd_ref = pgd_offset_k(address); | ||
110 | unsigned long flags; | ||
111 | struct page *page; | ||
112 | |||
113 | if (pgd_none(*pgd_ref)) | ||
114 | continue; | ||
115 | |||
116 | spin_lock_irqsave(&pgd_lock, flags); | ||
117 | list_for_each_entry(page, &pgd_list, lru) { | ||
118 | pgd_t *pgd; | ||
119 | spinlock_t *pgt_lock; | ||
120 | |||
121 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | ||
122 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; | ||
123 | spin_lock(pgt_lock); | ||
124 | |||
125 | if (pgd_none(*pgd)) | ||
126 | set_pgd(pgd, *pgd_ref); | ||
127 | else | ||
128 | BUG_ON(pgd_page_vaddr(*pgd) | ||
129 | != pgd_page_vaddr(*pgd_ref)); | ||
130 | |||
131 | spin_unlock(pgt_lock); | ||
132 | } | ||
133 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
134 | } | ||
135 | } | ||
136 | |||
137 | /* | ||
101 | * NOTE: This function is marked __ref because it calls __init function | 138 | * NOTE: This function is marked __ref because it calls __init function |
102 | * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. | 139 | * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. |
103 | */ | 140 | */ |
@@ -293,7 +330,7 @@ static __ref void *alloc_low_page(unsigned long *phys) | |||
293 | panic("alloc_low_page: ran out of memory"); | 330 | panic("alloc_low_page: ran out of memory"); |
294 | 331 | ||
295 | adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE); | 332 | adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE); |
296 | memset(adr, 0, PAGE_SIZE); | 333 | clear_page(adr); |
297 | *phys = pfn * PAGE_SIZE; | 334 | *phys = pfn * PAGE_SIZE; |
298 | return adr; | 335 | return adr; |
299 | } | 336 | } |
@@ -534,11 +571,13 @@ kernel_physical_mapping_init(unsigned long start, | |||
534 | unsigned long end, | 571 | unsigned long end, |
535 | unsigned long page_size_mask) | 572 | unsigned long page_size_mask) |
536 | { | 573 | { |
537 | 574 | bool pgd_changed = false; | |
538 | unsigned long next, last_map_addr = end; | 575 | unsigned long next, last_map_addr = end; |
576 | unsigned long addr; | ||
539 | 577 | ||
540 | start = (unsigned long)__va(start); | 578 | start = (unsigned long)__va(start); |
541 | end = (unsigned long)__va(end); | 579 | end = (unsigned long)__va(end); |
580 | addr = start; | ||
542 | 581 | ||
543 | for (; start < end; start = next) { | 582 | for (; start < end; start = next) { |
544 | pgd_t *pgd = pgd_offset_k(start); | 583 | pgd_t *pgd = pgd_offset_k(start); |
@@ -563,7 +602,12 @@ kernel_physical_mapping_init(unsigned long start, | |||
563 | spin_lock(&init_mm.page_table_lock); | 602 | spin_lock(&init_mm.page_table_lock); |
564 | pgd_populate(&init_mm, pgd, __va(pud_phys)); | 603 | pgd_populate(&init_mm, pgd, __va(pud_phys)); |
565 | spin_unlock(&init_mm.page_table_lock); | 604 | spin_unlock(&init_mm.page_table_lock); |
605 | pgd_changed = true; | ||
566 | } | 606 | } |
607 | |||
608 | if (pgd_changed) | ||
609 | sync_global_pgds(addr, end); | ||
610 | |||
567 | __flush_tlb_all(); | 611 | __flush_tlb_all(); |
568 | 612 | ||
569 | return last_map_addr; | 613 | return last_map_addr; |
@@ -1003,6 +1047,7 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node) | |||
1003 | } | 1047 | } |
1004 | 1048 | ||
1005 | } | 1049 | } |
1050 | sync_global_pgds((unsigned long)start_page, end); | ||
1006 | return 0; | 1051 | return 0; |
1007 | } | 1052 | } |
1008 | 1053 | ||
diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c index 970ed579d4e4..52d54bfc1ebb 100644 --- a/arch/x86/mm/k8topology_64.c +++ b/arch/x86/mm/k8topology_64.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <asm/numa.h> | 22 | #include <asm/numa.h> |
23 | #include <asm/mpspec.h> | 23 | #include <asm/mpspec.h> |
24 | #include <asm/apic.h> | 24 | #include <asm/apic.h> |
25 | #include <asm/k8.h> | 25 | #include <asm/amd_nb.h> |
26 | 26 | ||
27 | static struct bootnode __initdata nodes[8]; | 27 | static struct bootnode __initdata nodes[8]; |
28 | static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE; | 28 | static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE; |
@@ -54,8 +54,8 @@ static __init int find_northbridge(void) | |||
54 | static __init void early_get_boot_cpu_id(void) | 54 | static __init void early_get_boot_cpu_id(void) |
55 | { | 55 | { |
56 | /* | 56 | /* |
57 | * need to get boot_cpu_id so can use that to create apicid_to_node | 57 | * need to get the APIC ID of the BSP so can use that to |
58 | * in k8_scan_nodes() | 58 | * create apicid_to_node in k8_scan_nodes() |
59 | */ | 59 | */ |
60 | #ifdef CONFIG_X86_MPPARSE | 60 | #ifdef CONFIG_X86_MPPARSE |
61 | /* | 61 | /* |
@@ -212,7 +212,7 @@ int __init k8_scan_nodes(void) | |||
212 | bits = boot_cpu_data.x86_coreid_bits; | 212 | bits = boot_cpu_data.x86_coreid_bits; |
213 | cores = (1<<bits); | 213 | cores = (1<<bits); |
214 | apicid_base = 0; | 214 | apicid_base = 0; |
215 | /* need to get boot_cpu_id early for system with apicid lifting */ | 215 | /* get the APIC ID of the BSP early for systems with apicid lifting */ |
216 | early_get_boot_cpu_id(); | 216 | early_get_boot_cpu_id(); |
217 | if (boot_cpu_physical_apicid > 0) { | 217 | if (boot_cpu_physical_apicid > 0) { |
218 | pr_info("BSP APIC ID: %02x\n", boot_cpu_physical_apicid); | 218 | pr_info("BSP APIC ID: %02x\n", boot_cpu_physical_apicid); |
diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c index 63c19e27aa6f..324aa3f07237 100644 --- a/arch/x86/mm/kmemcheck/opcode.c +++ b/arch/x86/mm/kmemcheck/opcode.c | |||
@@ -9,7 +9,7 @@ static bool opcode_is_prefix(uint8_t b) | |||
9 | b == 0xf0 || b == 0xf2 || b == 0xf3 | 9 | b == 0xf0 || b == 0xf2 || b == 0xf3 |
10 | /* Group 2 */ | 10 | /* Group 2 */ |
11 | || b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26 | 11 | || b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26 |
12 | || b == 0x64 || b == 0x65 || b == 0x2e || b == 0x3e | 12 | || b == 0x64 || b == 0x65 |
13 | /* Group 3 */ | 13 | /* Group 3 */ |
14 | || b == 0x66 | 14 | || b == 0x66 |
15 | /* Group 4 */ | 15 | /* Group 4 */ |
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index a7bcc23ef96c..4962f1aeda6f 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <asm/dma.h> | 18 | #include <asm/dma.h> |
19 | #include <asm/numa.h> | 19 | #include <asm/numa.h> |
20 | #include <asm/acpi.h> | 20 | #include <asm/acpi.h> |
21 | #include <asm/k8.h> | 21 | #include <asm/amd_nb.h> |
22 | 22 | ||
23 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; | 23 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; |
24 | EXPORT_SYMBOL(node_data); | 24 | EXPORT_SYMBOL(node_data); |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 5c4ee422590e..8be8c7d7bc89 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -87,7 +87,19 @@ static inline void pgd_list_del(pgd_t *pgd) | |||
87 | #define UNSHARED_PTRS_PER_PGD \ | 87 | #define UNSHARED_PTRS_PER_PGD \ |
88 | (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) | 88 | (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) |
89 | 89 | ||
90 | static void pgd_ctor(pgd_t *pgd) | 90 | |
91 | static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) | ||
92 | { | ||
93 | BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm)); | ||
94 | virt_to_page(pgd)->index = (pgoff_t)mm; | ||
95 | } | ||
96 | |||
97 | struct mm_struct *pgd_page_get_mm(struct page *page) | ||
98 | { | ||
99 | return (struct mm_struct *)page->index; | ||
100 | } | ||
101 | |||
102 | static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) | ||
91 | { | 103 | { |
92 | /* If the pgd points to a shared pagetable level (either the | 104 | /* If the pgd points to a shared pagetable level (either the |
93 | ptes in non-PAE, or shared PMD in PAE), then just copy the | 105 | ptes in non-PAE, or shared PMD in PAE), then just copy the |
@@ -98,15 +110,13 @@ static void pgd_ctor(pgd_t *pgd) | |||
98 | clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, | 110 | clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, |
99 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | 111 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, |
100 | KERNEL_PGD_PTRS); | 112 | KERNEL_PGD_PTRS); |
101 | paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT, | ||
102 | __pa(swapper_pg_dir) >> PAGE_SHIFT, | ||
103 | KERNEL_PGD_BOUNDARY, | ||
104 | KERNEL_PGD_PTRS); | ||
105 | } | 113 | } |
106 | 114 | ||
107 | /* list required to sync kernel mapping updates */ | 115 | /* list required to sync kernel mapping updates */ |
108 | if (!SHARED_KERNEL_PMD) | 116 | if (!SHARED_KERNEL_PMD) { |
117 | pgd_set_mm(pgd, mm); | ||
109 | pgd_list_add(pgd); | 118 | pgd_list_add(pgd); |
119 | } | ||
110 | } | 120 | } |
111 | 121 | ||
112 | static void pgd_dtor(pgd_t *pgd) | 122 | static void pgd_dtor(pgd_t *pgd) |
@@ -272,7 +282,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
272 | */ | 282 | */ |
273 | spin_lock_irqsave(&pgd_lock, flags); | 283 | spin_lock_irqsave(&pgd_lock, flags); |
274 | 284 | ||
275 | pgd_ctor(pgd); | 285 | pgd_ctor(mm, pgd); |
276 | pgd_prepopulate_pmd(mm, pgd, pmds); | 286 | pgd_prepopulate_pmd(mm, pgd, pmds); |
277 | 287 | ||
278 | spin_unlock_irqrestore(&pgd_lock, flags); | 288 | spin_unlock_irqrestore(&pgd_lock, flags); |
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index f9897f7a9ef1..9c0d0d399c30 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
@@ -420,9 +420,11 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) | |||
420 | return -1; | 420 | return -1; |
421 | } | 421 | } |
422 | 422 | ||
423 | for_each_node_mask(i, nodes_parsed) | 423 | for (i = 0; i < num_node_memblks; i++) |
424 | e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, | 424 | e820_register_active_regions(memblk_nodeid[i], |
425 | nodes[i].end >> PAGE_SHIFT); | 425 | node_memblk_range[i].start >> PAGE_SHIFT, |
426 | node_memblk_range[i].end >> PAGE_SHIFT); | ||
427 | |||
426 | /* for out of order entries in SRAT */ | 428 | /* for out of order entries in SRAT */ |
427 | sort_node_map(); | 429 | sort_node_map(); |
428 | if (!nodes_cover_memory(nodes)) { | 430 | if (!nodes_cover_memory(nodes)) { |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index c03f14ab6667..49358481c733 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/smp.h> | 5 | #include <linux/smp.h> |
6 | #include <linux/interrupt.h> | 6 | #include <linux/interrupt.h> |
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | #include <linux/cpu.h> | ||
8 | 9 | ||
9 | #include <asm/tlbflush.h> | 10 | #include <asm/tlbflush.h> |
10 | #include <asm/mmu_context.h> | 11 | #include <asm/mmu_context.h> |
@@ -52,6 +53,8 @@ union smp_flush_state { | |||
52 | want false sharing in the per cpu data segment. */ | 53 | want false sharing in the per cpu data segment. */ |
53 | static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS]; | 54 | static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS]; |
54 | 55 | ||
56 | static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset); | ||
57 | |||
55 | /* | 58 | /* |
56 | * We cannot call mmdrop() because we are in interrupt context, | 59 | * We cannot call mmdrop() because we are in interrupt context, |
57 | * instead update mm->cpu_vm_mask. | 60 | * instead update mm->cpu_vm_mask. |
@@ -173,7 +176,7 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask, | |||
173 | union smp_flush_state *f; | 176 | union smp_flush_state *f; |
174 | 177 | ||
175 | /* Caller has disabled preemption */ | 178 | /* Caller has disabled preemption */ |
176 | sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; | 179 | sender = this_cpu_read(tlb_vector_offset); |
177 | f = &flush_state[sender]; | 180 | f = &flush_state[sender]; |
178 | 181 | ||
179 | /* | 182 | /* |
@@ -218,6 +221,47 @@ void native_flush_tlb_others(const struct cpumask *cpumask, | |||
218 | flush_tlb_others_ipi(cpumask, mm, va); | 221 | flush_tlb_others_ipi(cpumask, mm, va); |
219 | } | 222 | } |
220 | 223 | ||
224 | static void __cpuinit calculate_tlb_offset(void) | ||
225 | { | ||
226 | int cpu, node, nr_node_vecs; | ||
227 | /* | ||
228 | * we are changing tlb_vector_offset for each CPU in runtime, but this | ||
229 | * will not cause inconsistency, as the write is atomic under X86. we | ||
230 | * might see more lock contentions in a short time, but after all CPU's | ||
231 | * tlb_vector_offset are changed, everything should go normal | ||
232 | * | ||
233 | * Note: if NUM_INVALIDATE_TLB_VECTORS % nr_online_nodes !=0, we might | ||
234 | * waste some vectors. | ||
235 | **/ | ||
236 | if (nr_online_nodes > NUM_INVALIDATE_TLB_VECTORS) | ||
237 | nr_node_vecs = 1; | ||
238 | else | ||
239 | nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes; | ||
240 | |||
241 | for_each_online_node(node) { | ||
242 | int node_offset = (node % NUM_INVALIDATE_TLB_VECTORS) * | ||
243 | nr_node_vecs; | ||
244 | int cpu_offset = 0; | ||
245 | for_each_cpu(cpu, cpumask_of_node(node)) { | ||
246 | per_cpu(tlb_vector_offset, cpu) = node_offset + | ||
247 | cpu_offset; | ||
248 | cpu_offset++; | ||
249 | cpu_offset = cpu_offset % nr_node_vecs; | ||
250 | } | ||
251 | } | ||
252 | } | ||
253 | |||
254 | static int tlb_cpuhp_notify(struct notifier_block *n, | ||
255 | unsigned long action, void *hcpu) | ||
256 | { | ||
257 | switch (action & 0xf) { | ||
258 | case CPU_ONLINE: | ||
259 | case CPU_DEAD: | ||
260 | calculate_tlb_offset(); | ||
261 | } | ||
262 | return NOTIFY_OK; | ||
263 | } | ||
264 | |||
221 | static int __cpuinit init_smp_flush(void) | 265 | static int __cpuinit init_smp_flush(void) |
222 | { | 266 | { |
223 | int i; | 267 | int i; |
@@ -225,6 +269,8 @@ static int __cpuinit init_smp_flush(void) | |||
225 | for (i = 0; i < ARRAY_SIZE(flush_state); i++) | 269 | for (i = 0; i < ARRAY_SIZE(flush_state); i++) |
226 | raw_spin_lock_init(&flush_state[i].tlbstate_lock); | 270 | raw_spin_lock_init(&flush_state[i].tlbstate_lock); |
227 | 271 | ||
272 | calculate_tlb_offset(); | ||
273 | hotcpu_notifier(tlb_cpuhp_notify, 0); | ||
228 | return 0; | 274 | return 0; |
229 | } | 275 | } |
230 | core_initcall(init_smp_flush); | 276 | core_initcall(init_smp_flush); |
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 65f0a1eb6b86..8d17db266bbf 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c | |||
@@ -71,7 +71,7 @@ static struct ibs_state ibs_state; | |||
71 | * IBS cpuid feature detection | 71 | * IBS cpuid feature detection |
72 | */ | 72 | */ |
73 | 73 | ||
74 | #define IBS_CPUID_FEATURES 0x8000001b | 74 | #define IBS_CPUID_FEATURES 0x8000001b |
75 | 75 | ||
76 | /* | 76 | /* |
77 | * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but | 77 | * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but |
@@ -314,6 +314,74 @@ static void op_amd_stop_ibs(void) | |||
314 | wrmsrl(MSR_AMD64_IBSOPCTL, 0); | 314 | wrmsrl(MSR_AMD64_IBSOPCTL, 0); |
315 | } | 315 | } |
316 | 316 | ||
317 | static inline int eilvt_is_available(int offset) | ||
318 | { | ||
319 | /* check if we may assign a vector */ | ||
320 | return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1); | ||
321 | } | ||
322 | |||
323 | static inline int ibs_eilvt_valid(void) | ||
324 | { | ||
325 | u64 val; | ||
326 | int offset; | ||
327 | |||
328 | rdmsrl(MSR_AMD64_IBSCTL, val); | ||
329 | if (!(val & IBSCTL_LVT_OFFSET_VALID)) { | ||
330 | pr_err(FW_BUG "cpu %d, invalid IBS " | ||
331 | "interrupt offset %d (MSR%08X=0x%016llx)", | ||
332 | smp_processor_id(), offset, | ||
333 | MSR_AMD64_IBSCTL, val); | ||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | offset = val & IBSCTL_LVT_OFFSET_MASK; | ||
338 | |||
339 | if (eilvt_is_available(offset)) | ||
340 | return !0; | ||
341 | |||
342 | pr_err(FW_BUG "cpu %d, IBS interrupt offset %d " | ||
343 | "not available (MSR%08X=0x%016llx)", | ||
344 | smp_processor_id(), offset, | ||
345 | MSR_AMD64_IBSCTL, val); | ||
346 | |||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | static inline int get_ibs_offset(void) | ||
351 | { | ||
352 | u64 val; | ||
353 | |||
354 | rdmsrl(MSR_AMD64_IBSCTL, val); | ||
355 | if (!(val & IBSCTL_LVT_OFFSET_VALID)) | ||
356 | return -EINVAL; | ||
357 | |||
358 | return val & IBSCTL_LVT_OFFSET_MASK; | ||
359 | } | ||
360 | |||
361 | static void setup_APIC_ibs(void) | ||
362 | { | ||
363 | int offset; | ||
364 | |||
365 | offset = get_ibs_offset(); | ||
366 | if (offset < 0) | ||
367 | goto failed; | ||
368 | |||
369 | if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0)) | ||
370 | return; | ||
371 | failed: | ||
372 | pr_warn("oprofile: IBS APIC setup failed on cpu #%d\n", | ||
373 | smp_processor_id()); | ||
374 | } | ||
375 | |||
376 | static void clear_APIC_ibs(void) | ||
377 | { | ||
378 | int offset; | ||
379 | |||
380 | offset = get_ibs_offset(); | ||
381 | if (offset >= 0) | ||
382 | setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); | ||
383 | } | ||
384 | |||
317 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | 385 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX |
318 | 386 | ||
319 | static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, | 387 | static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, |
@@ -424,13 +492,13 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, | |||
424 | } | 492 | } |
425 | 493 | ||
426 | if (ibs_caps) | 494 | if (ibs_caps) |
427 | setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0); | 495 | setup_APIC_ibs(); |
428 | } | 496 | } |
429 | 497 | ||
430 | static void op_amd_cpu_shutdown(void) | 498 | static void op_amd_cpu_shutdown(void) |
431 | { | 499 | { |
432 | if (ibs_caps) | 500 | if (ibs_caps) |
433 | setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1); | 501 | clear_APIC_ibs(); |
434 | } | 502 | } |
435 | 503 | ||
436 | static int op_amd_check_ctrs(struct pt_regs * const regs, | 504 | static int op_amd_check_ctrs(struct pt_regs * const regs, |
@@ -493,16 +561,11 @@ static void op_amd_stop(struct op_msrs const * const msrs) | |||
493 | op_amd_stop_ibs(); | 561 | op_amd_stop_ibs(); |
494 | } | 562 | } |
495 | 563 | ||
496 | static int __init_ibs_nmi(void) | 564 | static int setup_ibs_ctl(int ibs_eilvt_off) |
497 | { | 565 | { |
498 | #define IBSCTL_LVTOFFSETVAL (1 << 8) | ||
499 | #define IBSCTL 0x1cc | ||
500 | struct pci_dev *cpu_cfg; | 566 | struct pci_dev *cpu_cfg; |
501 | int nodes; | 567 | int nodes; |
502 | u32 value = 0; | 568 | u32 value = 0; |
503 | u8 ibs_eilvt_off; | ||
504 | |||
505 | ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1); | ||
506 | 569 | ||
507 | nodes = 0; | 570 | nodes = 0; |
508 | cpu_cfg = NULL; | 571 | cpu_cfg = NULL; |
@@ -514,21 +577,60 @@ static int __init_ibs_nmi(void) | |||
514 | break; | 577 | break; |
515 | ++nodes; | 578 | ++nodes; |
516 | pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off | 579 | pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off |
517 | | IBSCTL_LVTOFFSETVAL); | 580 | | IBSCTL_LVT_OFFSET_VALID); |
518 | pci_read_config_dword(cpu_cfg, IBSCTL, &value); | 581 | pci_read_config_dword(cpu_cfg, IBSCTL, &value); |
519 | if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) { | 582 | if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) { |
520 | pci_dev_put(cpu_cfg); | 583 | pci_dev_put(cpu_cfg); |
521 | printk(KERN_DEBUG "Failed to setup IBS LVT offset, " | 584 | printk(KERN_DEBUG "Failed to setup IBS LVT offset, " |
522 | "IBSCTL = 0x%08x", value); | 585 | "IBSCTL = 0x%08x\n", value); |
523 | return 1; | 586 | return -EINVAL; |
524 | } | 587 | } |
525 | } while (1); | 588 | } while (1); |
526 | 589 | ||
527 | if (!nodes) { | 590 | if (!nodes) { |
528 | printk(KERN_DEBUG "No CPU node configured for IBS"); | 591 | printk(KERN_DEBUG "No CPU node configured for IBS\n"); |
529 | return 1; | 592 | return -ENODEV; |
593 | } | ||
594 | |||
595 | return 0; | ||
596 | } | ||
597 | |||
598 | static int force_ibs_eilvt_setup(void) | ||
599 | { | ||
600 | int i; | ||
601 | int ret; | ||
602 | |||
603 | /* find the next free available EILVT entry */ | ||
604 | for (i = 1; i < 4; i++) { | ||
605 | if (!eilvt_is_available(i)) | ||
606 | continue; | ||
607 | ret = setup_ibs_ctl(i); | ||
608 | if (ret) | ||
609 | return ret; | ||
610 | return 0; | ||
530 | } | 611 | } |
531 | 612 | ||
613 | printk(KERN_DEBUG "No EILVT entry available\n"); | ||
614 | |||
615 | return -EBUSY; | ||
616 | } | ||
617 | |||
618 | static int __init_ibs_nmi(void) | ||
619 | { | ||
620 | int ret; | ||
621 | |||
622 | if (ibs_eilvt_valid()) | ||
623 | return 0; | ||
624 | |||
625 | ret = force_ibs_eilvt_setup(); | ||
626 | if (ret) | ||
627 | return ret; | ||
628 | |||
629 | if (!ibs_eilvt_valid()) | ||
630 | return -EFAULT; | ||
631 | |||
632 | pr_err(FW_BUG "workaround enabled for IBS LVT offset\n"); | ||
633 | |||
532 | return 0; | 634 | return 0; |
533 | } | 635 | } |
534 | 636 | ||
diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c index b34815408f58..13700ec8e2e4 100644 --- a/arch/x86/pci/olpc.c +++ b/arch/x86/pci/olpc.c | |||
@@ -304,7 +304,7 @@ static struct pci_raw_ops pci_olpc_conf = { | |||
304 | 304 | ||
305 | int __init pci_olpc_init(void) | 305 | int __init pci_olpc_init(void) |
306 | { | 306 | { |
307 | printk(KERN_INFO "PCI: Using configuration type OLPC\n"); | 307 | printk(KERN_INFO "PCI: Using configuration type OLPC XO-1\n"); |
308 | raw_pci_ops = &pci_olpc_conf; | 308 | raw_pci_ops = &pci_olpc_conf; |
309 | is_lx = is_geode_lx(); | 309 | is_lx = is_geode_lx(); |
310 | return 0; | 310 | return 0; |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 42086ac406af..b2363fcbcd0f 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1969,7 +1969,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { | |||
1969 | .alloc_pte = xen_alloc_pte_init, | 1969 | .alloc_pte = xen_alloc_pte_init, |
1970 | .release_pte = xen_release_pte_init, | 1970 | .release_pte = xen_release_pte_init, |
1971 | .alloc_pmd = xen_alloc_pmd_init, | 1971 | .alloc_pmd = xen_alloc_pmd_init, |
1972 | .alloc_pmd_clone = paravirt_nop, | ||
1973 | .release_pmd = xen_release_pmd_init, | 1972 | .release_pmd = xen_release_pmd_init, |
1974 | 1973 | ||
1975 | #ifdef CONFIG_X86_64 | 1974 | #ifdef CONFIG_X86_64 |
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index c64a5d387de5..87508886cbbd 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c | |||
@@ -92,7 +92,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
92 | for_each_online_cpu(j) | 92 | for_each_online_cpu(j) |
93 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 93 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
94 | #endif | 94 | #endif |
95 | seq_printf(p, " %14s", irq_desc[i].chip->typename); | 95 | seq_printf(p, " %14s", irq_desc[i].chip->name); |
96 | seq_printf(p, " %s", action->name); | 96 | seq_printf(p, " %s", action->name); |
97 | 97 | ||
98 | for (action=action->next; action; action = action->next) | 98 | for (action=action->next; action; action = action->next) |