diff options
Diffstat (limited to 'arch/x86/kernel')
29 files changed, 820 insertions, 824 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index de5657c039e9..95f216bbfaf1 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -70,7 +70,7 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | |||
70 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o | 70 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o |
71 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o | 71 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o |
72 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o | 72 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o |
73 | obj-y += vsmp_64.o | 73 | obj-$(CONFIG_X86_VSMP) += vsmp_64.o |
74 | obj-$(CONFIG_KPROBES) += kprobes.o | 74 | obj-$(CONFIG_KPROBES) += kprobes.o |
75 | obj-$(CONFIG_MODULES) += module_$(BITS).o | 75 | obj-$(CONFIG_MODULES) += module_$(BITS).o |
76 | obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o | 76 | obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o |
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 3b002995e145..f933822dba18 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c | |||
@@ -222,7 +222,6 @@ struct apic apic_flat = { | |||
222 | .send_IPI_all = flat_send_IPI_all, | 222 | .send_IPI_all = flat_send_IPI_all, |
223 | .send_IPI_self = apic_send_IPI_self, | 223 | .send_IPI_self = apic_send_IPI_self, |
224 | 224 | ||
225 | .wakeup_cpu = NULL, | ||
226 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | 225 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, |
227 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | 226 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, |
228 | .wait_for_init_deassert = NULL, | 227 | .wait_for_init_deassert = NULL, |
@@ -373,7 +372,6 @@ struct apic apic_physflat = { | |||
373 | .send_IPI_all = physflat_send_IPI_all, | 372 | .send_IPI_all = physflat_send_IPI_all, |
374 | .send_IPI_self = apic_send_IPI_self, | 373 | .send_IPI_self = apic_send_IPI_self, |
375 | 374 | ||
376 | .wakeup_cpu = NULL, | ||
377 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | 375 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, |
378 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | 376 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, |
379 | .wait_for_init_deassert = NULL, | 377 | .wait_for_init_deassert = NULL, |
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index 0b1093394fdf..d806ecaa948f 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c | |||
@@ -16,17 +16,17 @@ | |||
16 | #include <asm/apic.h> | 16 | #include <asm/apic.h> |
17 | #include <asm/ipi.h> | 17 | #include <asm/ipi.h> |
18 | 18 | ||
19 | static inline unsigned bigsmp_get_apic_id(unsigned long x) | 19 | static unsigned bigsmp_get_apic_id(unsigned long x) |
20 | { | 20 | { |
21 | return (x >> 24) & 0xFF; | 21 | return (x >> 24) & 0xFF; |
22 | } | 22 | } |
23 | 23 | ||
24 | static inline int bigsmp_apic_id_registered(void) | 24 | static int bigsmp_apic_id_registered(void) |
25 | { | 25 | { |
26 | return 1; | 26 | return 1; |
27 | } | 27 | } |
28 | 28 | ||
29 | static inline const cpumask_t *bigsmp_target_cpus(void) | 29 | static const cpumask_t *bigsmp_target_cpus(void) |
30 | { | 30 | { |
31 | #ifdef CONFIG_SMP | 31 | #ifdef CONFIG_SMP |
32 | return &cpu_online_map; | 32 | return &cpu_online_map; |
@@ -35,13 +35,12 @@ static inline const cpumask_t *bigsmp_target_cpus(void) | |||
35 | #endif | 35 | #endif |
36 | } | 36 | } |
37 | 37 | ||
38 | static inline unsigned long | 38 | static unsigned long bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid) |
39 | bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid) | ||
40 | { | 39 | { |
41 | return 0; | 40 | return 0; |
42 | } | 41 | } |
43 | 42 | ||
44 | static inline unsigned long bigsmp_check_apicid_present(int bit) | 43 | static unsigned long bigsmp_check_apicid_present(int bit) |
45 | { | 44 | { |
46 | return 1; | 45 | return 1; |
47 | } | 46 | } |
@@ -64,7 +63,7 @@ static inline unsigned long calculate_ldr(int cpu) | |||
64 | * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel | 63 | * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel |
65 | * document number 292116). So here it goes... | 64 | * document number 292116). So here it goes... |
66 | */ | 65 | */ |
67 | static inline void bigsmp_init_apic_ldr(void) | 66 | static void bigsmp_init_apic_ldr(void) |
68 | { | 67 | { |
69 | unsigned long val; | 68 | unsigned long val; |
70 | int cpu = smp_processor_id(); | 69 | int cpu = smp_processor_id(); |
@@ -74,19 +73,19 @@ static inline void bigsmp_init_apic_ldr(void) | |||
74 | apic_write(APIC_LDR, val); | 73 | apic_write(APIC_LDR, val); |
75 | } | 74 | } |
76 | 75 | ||
77 | static inline void bigsmp_setup_apic_routing(void) | 76 | static void bigsmp_setup_apic_routing(void) |
78 | { | 77 | { |
79 | printk(KERN_INFO | 78 | printk(KERN_INFO |
80 | "Enabling APIC mode: Physflat. Using %d I/O APICs\n", | 79 | "Enabling APIC mode: Physflat. Using %d I/O APICs\n", |
81 | nr_ioapics); | 80 | nr_ioapics); |
82 | } | 81 | } |
83 | 82 | ||
84 | static inline int bigsmp_apicid_to_node(int logical_apicid) | 83 | static int bigsmp_apicid_to_node(int logical_apicid) |
85 | { | 84 | { |
86 | return apicid_2_node[hard_smp_processor_id()]; | 85 | return apicid_2_node[hard_smp_processor_id()]; |
87 | } | 86 | } |
88 | 87 | ||
89 | static inline int bigsmp_cpu_present_to_apicid(int mps_cpu) | 88 | static int bigsmp_cpu_present_to_apicid(int mps_cpu) |
90 | { | 89 | { |
91 | if (mps_cpu < nr_cpu_ids) | 90 | if (mps_cpu < nr_cpu_ids) |
92 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); | 91 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); |
@@ -94,7 +93,7 @@ static inline int bigsmp_cpu_present_to_apicid(int mps_cpu) | |||
94 | return BAD_APICID; | 93 | return BAD_APICID; |
95 | } | 94 | } |
96 | 95 | ||
97 | static inline physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid) | 96 | static physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid) |
98 | { | 97 | { |
99 | return physid_mask_of_physid(phys_apicid); | 98 | return physid_mask_of_physid(phys_apicid); |
100 | } | 99 | } |
@@ -107,29 +106,24 @@ static inline int bigsmp_cpu_to_logical_apicid(int cpu) | |||
107 | return cpu_physical_id(cpu); | 106 | return cpu_physical_id(cpu); |
108 | } | 107 | } |
109 | 108 | ||
110 | static inline physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map) | 109 | static physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map) |
111 | { | 110 | { |
112 | /* For clustered we don't have a good way to do this yet - hack */ | 111 | /* For clustered we don't have a good way to do this yet - hack */ |
113 | return physids_promote(0xFFL); | 112 | return physids_promote(0xFFL); |
114 | } | 113 | } |
115 | 114 | ||
116 | static inline void bigsmp_setup_portio_remap(void) | 115 | static int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid) |
117 | { | ||
118 | } | ||
119 | |||
120 | static inline int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid) | ||
121 | { | 116 | { |
122 | return 1; | 117 | return 1; |
123 | } | 118 | } |
124 | 119 | ||
125 | /* As we are using single CPU as destination, pick only one CPU here */ | 120 | /* As we are using single CPU as destination, pick only one CPU here */ |
126 | static inline unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask) | 121 | static unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask) |
127 | { | 122 | { |
128 | return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask)); | 123 | return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask)); |
129 | } | 124 | } |
130 | 125 | ||
131 | static inline unsigned int | 126 | static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, |
132 | bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
133 | const struct cpumask *andmask) | 127 | const struct cpumask *andmask) |
134 | { | 128 | { |
135 | int cpu; | 129 | int cpu; |
@@ -148,7 +142,7 @@ bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | |||
148 | return BAD_APICID; | 142 | return BAD_APICID; |
149 | } | 143 | } |
150 | 144 | ||
151 | static inline int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) | 145 | static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) |
152 | { | 146 | { |
153 | return cpuid_apic >> index_msb; | 147 | return cpuid_apic >> index_msb; |
154 | } | 148 | } |
@@ -158,12 +152,12 @@ static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector) | |||
158 | default_send_IPI_mask_sequence_phys(mask, vector); | 152 | default_send_IPI_mask_sequence_phys(mask, vector); |
159 | } | 153 | } |
160 | 154 | ||
161 | static inline void bigsmp_send_IPI_allbutself(int vector) | 155 | static void bigsmp_send_IPI_allbutself(int vector) |
162 | { | 156 | { |
163 | default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); | 157 | default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); |
164 | } | 158 | } |
165 | 159 | ||
166 | static inline void bigsmp_send_IPI_all(int vector) | 160 | static void bigsmp_send_IPI_all(int vector) |
167 | { | 161 | { |
168 | bigsmp_send_IPI_mask(cpu_online_mask, vector); | 162 | bigsmp_send_IPI_mask(cpu_online_mask, vector); |
169 | } | 163 | } |
@@ -256,7 +250,6 @@ struct apic apic_bigsmp = { | |||
256 | .send_IPI_all = bigsmp_send_IPI_all, | 250 | .send_IPI_all = bigsmp_send_IPI_all, |
257 | .send_IPI_self = default_send_IPI_self, | 251 | .send_IPI_self = default_send_IPI_self, |
258 | 252 | ||
259 | .wakeup_cpu = NULL, | ||
260 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | 253 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, |
261 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | 254 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, |
262 | 255 | ||
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 320f2d2e4e54..19588f2770ee 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
@@ -163,22 +163,17 @@ static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) | |||
163 | return 0; | 163 | return 0; |
164 | } | 164 | } |
165 | 165 | ||
166 | static int __init es7000_update_apic(void) | 166 | static int es7000_apic_is_cluster(void) |
167 | { | 167 | { |
168 | apic->wakeup_cpu = wakeup_secondary_cpu_via_mip; | ||
169 | |||
170 | /* MPENTIUMIII */ | 168 | /* MPENTIUMIII */ |
171 | if (boot_cpu_data.x86 == 6 && | 169 | if (boot_cpu_data.x86 == 6 && |
172 | (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) { | 170 | (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) |
173 | es7000_update_apic_to_cluster(); | 171 | return 1; |
174 | apic->wait_for_init_deassert = NULL; | ||
175 | apic->wakeup_cpu = wakeup_secondary_cpu_via_mip; | ||
176 | } | ||
177 | 172 | ||
178 | return 0; | 173 | return 0; |
179 | } | 174 | } |
180 | 175 | ||
181 | static void __init setup_unisys(void) | 176 | static void setup_unisys(void) |
182 | { | 177 | { |
183 | /* | 178 | /* |
184 | * Determine the generation of the ES7000 currently running. | 179 | * Determine the generation of the ES7000 currently running. |
@@ -192,14 +187,12 @@ static void __init setup_unisys(void) | |||
192 | else | 187 | else |
193 | es7000_plat = ES7000_CLASSIC; | 188 | es7000_plat = ES7000_CLASSIC; |
194 | ioapic_renumber_irq = es7000_rename_gsi; | 189 | ioapic_renumber_irq = es7000_rename_gsi; |
195 | |||
196 | x86_quirks->update_apic = es7000_update_apic; | ||
197 | } | 190 | } |
198 | 191 | ||
199 | /* | 192 | /* |
200 | * Parse the OEM Table: | 193 | * Parse the OEM Table: |
201 | */ | 194 | */ |
202 | static int __init parse_unisys_oem(char *oemptr) | 195 | static int parse_unisys_oem(char *oemptr) |
203 | { | 196 | { |
204 | int i; | 197 | int i; |
205 | int success = 0; | 198 | int success = 0; |
@@ -261,7 +254,7 @@ static int __init parse_unisys_oem(char *oemptr) | |||
261 | } | 254 | } |
262 | 255 | ||
263 | #ifdef CONFIG_ACPI | 256 | #ifdef CONFIG_ACPI |
264 | static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) | 257 | static int find_unisys_acpi_oem_table(unsigned long *oem_addr) |
265 | { | 258 | { |
266 | struct acpi_table_header *header = NULL; | 259 | struct acpi_table_header *header = NULL; |
267 | struct es7000_oem_table *table; | 260 | struct es7000_oem_table *table; |
@@ -292,7 +285,7 @@ static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) | |||
292 | return 0; | 285 | return 0; |
293 | } | 286 | } |
294 | 287 | ||
295 | static void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) | 288 | static void unmap_unisys_acpi_oem_table(unsigned long oem_addr) |
296 | { | 289 | { |
297 | if (!oem_addr) | 290 | if (!oem_addr) |
298 | return; | 291 | return; |
@@ -310,8 +303,10 @@ static int es7000_check_dsdt(void) | |||
310 | return 0; | 303 | return 0; |
311 | } | 304 | } |
312 | 305 | ||
306 | static int es7000_acpi_ret; | ||
307 | |||
313 | /* Hook from generic ACPI tables.c */ | 308 | /* Hook from generic ACPI tables.c */ |
314 | static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 309 | static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
315 | { | 310 | { |
316 | unsigned long oem_addr = 0; | 311 | unsigned long oem_addr = 0; |
317 | int check_dsdt; | 312 | int check_dsdt; |
@@ -332,10 +327,26 @@ static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
332 | */ | 327 | */ |
333 | unmap_unisys_acpi_oem_table(oem_addr); | 328 | unmap_unisys_acpi_oem_table(oem_addr); |
334 | } | 329 | } |
335 | return ret; | 330 | |
331 | es7000_acpi_ret = ret; | ||
332 | |||
333 | return ret && !es7000_apic_is_cluster(); | ||
336 | } | 334 | } |
335 | |||
336 | static int es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id) | ||
337 | { | ||
338 | int ret = es7000_acpi_ret; | ||
339 | |||
340 | return ret && es7000_apic_is_cluster(); | ||
341 | } | ||
342 | |||
337 | #else /* !CONFIG_ACPI: */ | 343 | #else /* !CONFIG_ACPI: */ |
338 | static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 344 | static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
345 | { | ||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | static int es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id) | ||
339 | { | 350 | { |
340 | return 0; | 351 | return 0; |
341 | } | 352 | } |
@@ -349,8 +360,7 @@ static void es7000_spin(int n) | |||
349 | rep_nop(); | 360 | rep_nop(); |
350 | } | 361 | } |
351 | 362 | ||
352 | static int __init | 363 | static int es7000_mip_write(struct mip_reg *mip_reg) |
353 | es7000_mip_write(struct mip_reg *mip_reg) | ||
354 | { | 364 | { |
355 | int status = 0; | 365 | int status = 0; |
356 | int spin; | 366 | int spin; |
@@ -383,7 +393,7 @@ es7000_mip_write(struct mip_reg *mip_reg) | |||
383 | return status; | 393 | return status; |
384 | } | 394 | } |
385 | 395 | ||
386 | static void __init es7000_enable_apic_mode(void) | 396 | static void es7000_enable_apic_mode(void) |
387 | { | 397 | { |
388 | struct mip_reg es7000_mip_reg; | 398 | struct mip_reg es7000_mip_reg; |
389 | int mip_status; | 399 | int mip_status; |
@@ -416,11 +426,8 @@ static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask) | |||
416 | 426 | ||
417 | static void es7000_wait_for_init_deassert(atomic_t *deassert) | 427 | static void es7000_wait_for_init_deassert(atomic_t *deassert) |
418 | { | 428 | { |
419 | #ifndef CONFIG_ES7000_CLUSTERED_APIC | ||
420 | while (!atomic_read(deassert)) | 429 | while (!atomic_read(deassert)) |
421 | cpu_relax(); | 430 | cpu_relax(); |
422 | #endif | ||
423 | return; | ||
424 | } | 431 | } |
425 | 432 | ||
426 | static unsigned int es7000_get_apic_id(unsigned long x) | 433 | static unsigned int es7000_get_apic_id(unsigned long x) |
@@ -565,72 +572,24 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid) | |||
565 | return 1; | 572 | return 1; |
566 | } | 573 | } |
567 | 574 | ||
568 | static unsigned int | ||
569 | es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) | ||
570 | { | ||
571 | int cpus_found = 0; | ||
572 | int num_bits_set; | ||
573 | int apicid; | ||
574 | int cpu; | ||
575 | |||
576 | num_bits_set = cpumask_weight(cpumask); | ||
577 | /* Return id to all */ | ||
578 | if (num_bits_set == nr_cpu_ids) | ||
579 | return 0xFF; | ||
580 | /* | ||
581 | * The cpus in the mask must all be on the apic cluster. If are not | ||
582 | * on the same apicid cluster return default value of target_cpus(): | ||
583 | */ | ||
584 | cpu = cpumask_first(cpumask); | ||
585 | apicid = es7000_cpu_to_logical_apicid(cpu); | ||
586 | |||
587 | while (cpus_found < num_bits_set) { | ||
588 | if (cpumask_test_cpu(cpu, cpumask)) { | ||
589 | int new_apicid = es7000_cpu_to_logical_apicid(cpu); | ||
590 | |||
591 | if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { | ||
592 | WARN(1, "Not a valid mask!"); | ||
593 | |||
594 | return 0xFF; | ||
595 | } | ||
596 | apicid = new_apicid; | ||
597 | cpus_found++; | ||
598 | } | ||
599 | cpu++; | ||
600 | } | ||
601 | return apicid; | ||
602 | } | ||
603 | |||
604 | static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) | 575 | static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) |
605 | { | 576 | { |
606 | int cpus_found = 0; | 577 | unsigned int round = 0; |
607 | int num_bits_set; | 578 | int cpu, uninitialized_var(apicid); |
608 | int apicid; | ||
609 | int cpu; | ||
610 | 579 | ||
611 | num_bits_set = cpus_weight(*cpumask); | ||
612 | /* Return id to all */ | ||
613 | if (num_bits_set == nr_cpu_ids) | ||
614 | return es7000_cpu_to_logical_apicid(0); | ||
615 | /* | 580 | /* |
616 | * The cpus in the mask must all be on the apic cluster. If are not | 581 | * The cpus in the mask must all be on the apic cluster. |
617 | * on the same apicid cluster return default value of target_cpus(): | ||
618 | */ | 582 | */ |
619 | cpu = first_cpu(*cpumask); | 583 | for_each_cpu(cpu, cpumask) { |
620 | apicid = es7000_cpu_to_logical_apicid(cpu); | 584 | int new_apicid = es7000_cpu_to_logical_apicid(cpu); |
621 | while (cpus_found < num_bits_set) { | ||
622 | if (cpu_isset(cpu, *cpumask)) { | ||
623 | int new_apicid = es7000_cpu_to_logical_apicid(cpu); | ||
624 | 585 | ||
625 | if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { | 586 | if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { |
626 | printk("%s: Not a valid mask!\n", __func__); | 587 | WARN(1, "Not a valid mask!"); |
627 | 588 | ||
628 | return es7000_cpu_to_logical_apicid(0); | 589 | return BAD_APICID; |
629 | } | ||
630 | apicid = new_apicid; | ||
631 | cpus_found++; | ||
632 | } | 590 | } |
633 | cpu++; | 591 | apicid = new_apicid; |
592 | round++; | ||
634 | } | 593 | } |
635 | return apicid; | 594 | return apicid; |
636 | } | 595 | } |
@@ -659,37 +618,103 @@ static int es7000_phys_pkg_id(int cpuid_apic, int index_msb) | |||
659 | return cpuid_apic >> index_msb; | 618 | return cpuid_apic >> index_msb; |
660 | } | 619 | } |
661 | 620 | ||
662 | void __init es7000_update_apic_to_cluster(void) | ||
663 | { | ||
664 | apic->target_cpus = target_cpus_cluster; | ||
665 | apic->irq_delivery_mode = dest_LowestPrio; | ||
666 | /* logical delivery broadcast to all procs: */ | ||
667 | apic->irq_dest_mode = 1; | ||
668 | |||
669 | apic->init_apic_ldr = es7000_init_apic_ldr_cluster; | ||
670 | |||
671 | apic->cpu_mask_to_apicid = es7000_cpu_mask_to_apicid_cluster; | ||
672 | } | ||
673 | |||
674 | static int probe_es7000(void) | 621 | static int probe_es7000(void) |
675 | { | 622 | { |
676 | /* probed later in mptable/ACPI hooks */ | 623 | /* probed later in mptable/ACPI hooks */ |
677 | return 0; | 624 | return 0; |
678 | } | 625 | } |
679 | 626 | ||
680 | static __init int | 627 | static int es7000_mps_ret; |
681 | es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) | 628 | static int es7000_mps_oem_check(struct mpc_table *mpc, char *oem, |
629 | char *productid) | ||
682 | { | 630 | { |
631 | int ret = 0; | ||
632 | |||
683 | if (mpc->oemptr) { | 633 | if (mpc->oemptr) { |
684 | struct mpc_oemtable *oem_table = | 634 | struct mpc_oemtable *oem_table = |
685 | (struct mpc_oemtable *)mpc->oemptr; | 635 | (struct mpc_oemtable *)mpc->oemptr; |
686 | 636 | ||
687 | if (!strncmp(oem, "UNISYS", 6)) | 637 | if (!strncmp(oem, "UNISYS", 6)) |
688 | return parse_unisys_oem((char *)oem_table); | 638 | ret = parse_unisys_oem((char *)oem_table); |
689 | } | 639 | } |
690 | return 0; | 640 | |
641 | es7000_mps_ret = ret; | ||
642 | |||
643 | return ret && !es7000_apic_is_cluster(); | ||
691 | } | 644 | } |
692 | 645 | ||
646 | static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem, | ||
647 | char *productid) | ||
648 | { | ||
649 | int ret = es7000_mps_ret; | ||
650 | |||
651 | return ret && es7000_apic_is_cluster(); | ||
652 | } | ||
653 | |||
654 | struct apic apic_es7000_cluster = { | ||
655 | |||
656 | .name = "es7000", | ||
657 | .probe = probe_es7000, | ||
658 | .acpi_madt_oem_check = es7000_acpi_madt_oem_check_cluster, | ||
659 | .apic_id_registered = es7000_apic_id_registered, | ||
660 | |||
661 | .irq_delivery_mode = dest_LowestPrio, | ||
662 | /* logical delivery broadcast to all procs: */ | ||
663 | .irq_dest_mode = 1, | ||
664 | |||
665 | .target_cpus = target_cpus_cluster, | ||
666 | .disable_esr = 1, | ||
667 | .dest_logical = 0, | ||
668 | .check_apicid_used = es7000_check_apicid_used, | ||
669 | .check_apicid_present = es7000_check_apicid_present, | ||
670 | |||
671 | .vector_allocation_domain = es7000_vector_allocation_domain, | ||
672 | .init_apic_ldr = es7000_init_apic_ldr_cluster, | ||
673 | |||
674 | .ioapic_phys_id_map = es7000_ioapic_phys_id_map, | ||
675 | .setup_apic_routing = es7000_setup_apic_routing, | ||
676 | .multi_timer_check = NULL, | ||
677 | .apicid_to_node = es7000_apicid_to_node, | ||
678 | .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid, | ||
679 | .cpu_present_to_apicid = es7000_cpu_present_to_apicid, | ||
680 | .apicid_to_cpu_present = es7000_apicid_to_cpu_present, | ||
681 | .setup_portio_remap = NULL, | ||
682 | .check_phys_apicid_present = es7000_check_phys_apicid_present, | ||
683 | .enable_apic_mode = es7000_enable_apic_mode, | ||
684 | .phys_pkg_id = es7000_phys_pkg_id, | ||
685 | .mps_oem_check = es7000_mps_oem_check_cluster, | ||
686 | |||
687 | .get_apic_id = es7000_get_apic_id, | ||
688 | .set_apic_id = NULL, | ||
689 | .apic_id_mask = 0xFF << 24, | ||
690 | |||
691 | .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid, | ||
692 | .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and, | ||
693 | |||
694 | .send_IPI_mask = es7000_send_IPI_mask, | ||
695 | .send_IPI_mask_allbutself = NULL, | ||
696 | .send_IPI_allbutself = es7000_send_IPI_allbutself, | ||
697 | .send_IPI_all = es7000_send_IPI_all, | ||
698 | .send_IPI_self = default_send_IPI_self, | ||
699 | |||
700 | .wakeup_secondary_cpu = wakeup_secondary_cpu_via_mip, | ||
701 | |||
702 | .trampoline_phys_low = 0x467, | ||
703 | .trampoline_phys_high = 0x469, | ||
704 | |||
705 | .wait_for_init_deassert = NULL, | ||
706 | |||
707 | /* Nothing to do for most platforms, since cleared by the INIT cycle: */ | ||
708 | .smp_callin_clear_local_apic = NULL, | ||
709 | .inquire_remote_apic = default_inquire_remote_apic, | ||
710 | |||
711 | .read = native_apic_mem_read, | ||
712 | .write = native_apic_mem_write, | ||
713 | .icr_read = native_apic_icr_read, | ||
714 | .icr_write = native_apic_icr_write, | ||
715 | .wait_icr_idle = native_apic_wait_icr_idle, | ||
716 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | ||
717 | }; | ||
693 | 718 | ||
694 | struct apic apic_es7000 = { | 719 | struct apic apic_es7000 = { |
695 | 720 | ||
@@ -737,8 +762,6 @@ struct apic apic_es7000 = { | |||
737 | .send_IPI_all = es7000_send_IPI_all, | 762 | .send_IPI_all = es7000_send_IPI_all, |
738 | .send_IPI_self = default_send_IPI_self, | 763 | .send_IPI_self = default_send_IPI_self, |
739 | 764 | ||
740 | .wakeup_cpu = NULL, | ||
741 | |||
742 | .trampoline_phys_low = 0x467, | 765 | .trampoline_phys_low = 0x467, |
743 | .trampoline_phys_high = 0x469, | 766 | .trampoline_phys_high = 0x469, |
744 | 767 | ||
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index d9d6d61eed82..ba2fc6465534 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c | |||
@@ -69,7 +69,7 @@ struct mpc_trans { | |||
69 | /* x86_quirks member */ | 69 | /* x86_quirks member */ |
70 | static int mpc_record; | 70 | static int mpc_record; |
71 | 71 | ||
72 | static __cpuinitdata struct mpc_trans *translation_table[MAX_MPC_ENTRY]; | 72 | static struct mpc_trans *translation_table[MAX_MPC_ENTRY]; |
73 | 73 | ||
74 | int mp_bus_id_to_node[MAX_MP_BUSSES]; | 74 | int mp_bus_id_to_node[MAX_MP_BUSSES]; |
75 | int mp_bus_id_to_local[MAX_MP_BUSSES]; | 75 | int mp_bus_id_to_local[MAX_MP_BUSSES]; |
@@ -256,13 +256,6 @@ static int __init numaq_setup_ioapic_ids(void) | |||
256 | return 1; | 256 | return 1; |
257 | } | 257 | } |
258 | 258 | ||
259 | static int __init numaq_update_apic(void) | ||
260 | { | ||
261 | apic->wakeup_cpu = wakeup_secondary_cpu_via_nmi; | ||
262 | |||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | static struct x86_quirks numaq_x86_quirks __initdata = { | 259 | static struct x86_quirks numaq_x86_quirks __initdata = { |
267 | .arch_pre_time_init = numaq_pre_time_init, | 260 | .arch_pre_time_init = numaq_pre_time_init, |
268 | .arch_time_init = NULL, | 261 | .arch_time_init = NULL, |
@@ -278,7 +271,6 @@ static struct x86_quirks numaq_x86_quirks __initdata = { | |||
278 | .mpc_oem_pci_bus = mpc_oem_pci_bus, | 271 | .mpc_oem_pci_bus = mpc_oem_pci_bus, |
279 | .smp_read_mpc_oem = smp_read_mpc_oem, | 272 | .smp_read_mpc_oem = smp_read_mpc_oem, |
280 | .setup_ioapic_ids = numaq_setup_ioapic_ids, | 273 | .setup_ioapic_ids = numaq_setup_ioapic_ids, |
281 | .update_apic = numaq_update_apic, | ||
282 | }; | 274 | }; |
283 | 275 | ||
284 | static __init void early_check_numaq(void) | 276 | static __init void early_check_numaq(void) |
@@ -546,7 +538,7 @@ struct apic apic_numaq = { | |||
546 | .send_IPI_all = numaq_send_IPI_all, | 538 | .send_IPI_all = numaq_send_IPI_all, |
547 | .send_IPI_self = default_send_IPI_self, | 539 | .send_IPI_self = default_send_IPI_self, |
548 | 540 | ||
549 | .wakeup_cpu = NULL, | 541 | .wakeup_secondary_cpu = wakeup_secondary_cpu_via_nmi, |
550 | .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW, | 542 | .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW, |
551 | .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH, | 543 | .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH, |
552 | 544 | ||
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 3a730fa574bb..141c99a1c264 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c | |||
@@ -138,7 +138,6 @@ struct apic apic_default = { | |||
138 | .send_IPI_all = default_send_IPI_all, | 138 | .send_IPI_all = default_send_IPI_all, |
139 | .send_IPI_self = default_send_IPI_self, | 139 | .send_IPI_self = default_send_IPI_self, |
140 | 140 | ||
141 | .wakeup_cpu = NULL, | ||
142 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | 141 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, |
143 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | 142 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, |
144 | 143 | ||
@@ -159,6 +158,7 @@ extern struct apic apic_numaq; | |||
159 | extern struct apic apic_summit; | 158 | extern struct apic apic_summit; |
160 | extern struct apic apic_bigsmp; | 159 | extern struct apic apic_bigsmp; |
161 | extern struct apic apic_es7000; | 160 | extern struct apic apic_es7000; |
161 | extern struct apic apic_es7000_cluster; | ||
162 | extern struct apic apic_default; | 162 | extern struct apic apic_default; |
163 | 163 | ||
164 | struct apic *apic = &apic_default; | 164 | struct apic *apic = &apic_default; |
@@ -176,6 +176,7 @@ static struct apic *apic_probe[] __initdata = { | |||
176 | #endif | 176 | #endif |
177 | #ifdef CONFIG_X86_ES7000 | 177 | #ifdef CONFIG_X86_ES7000 |
178 | &apic_es7000, | 178 | &apic_es7000, |
179 | &apic_es7000_cluster, | ||
179 | #endif | 180 | #endif |
180 | &apic_default, /* must be last */ | 181 | &apic_default, /* must be last */ |
181 | NULL, | 182 | NULL, |
@@ -197,9 +198,6 @@ static int __init parse_apic(char *arg) | |||
197 | } | 198 | } |
198 | } | 199 | } |
199 | 200 | ||
200 | if (x86_quirks->update_apic) | ||
201 | x86_quirks->update_apic(); | ||
202 | |||
203 | /* Parsed again by __setup for debug/verbose */ | 201 | /* Parsed again by __setup for debug/verbose */ |
204 | return 0; | 202 | return 0; |
205 | } | 203 | } |
@@ -218,8 +216,6 @@ void __init generic_bigsmp_probe(void) | |||
218 | if (!cmdline_apic && apic == &apic_default) { | 216 | if (!cmdline_apic && apic == &apic_default) { |
219 | if (apic_bigsmp.probe()) { | 217 | if (apic_bigsmp.probe()) { |
220 | apic = &apic_bigsmp; | 218 | apic = &apic_bigsmp; |
221 | if (x86_quirks->update_apic) | ||
222 | x86_quirks->update_apic(); | ||
223 | printk(KERN_INFO "Overriding APIC driver with %s\n", | 219 | printk(KERN_INFO "Overriding APIC driver with %s\n", |
224 | apic->name); | 220 | apic->name); |
225 | } | 221 | } |
@@ -240,9 +236,6 @@ void __init generic_apic_probe(void) | |||
240 | /* Not visible without early console */ | 236 | /* Not visible without early console */ |
241 | if (!apic_probe[i]) | 237 | if (!apic_probe[i]) |
242 | panic("Didn't find an APIC driver"); | 238 | panic("Didn't find an APIC driver"); |
243 | |||
244 | if (x86_quirks->update_apic) | ||
245 | x86_quirks->update_apic(); | ||
246 | } | 239 | } |
247 | printk(KERN_INFO "Using APIC driver %s\n", apic->name); | 240 | printk(KERN_INFO "Using APIC driver %s\n", apic->name); |
248 | } | 241 | } |
@@ -262,8 +255,6 @@ generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) | |||
262 | 255 | ||
263 | if (!cmdline_apic) { | 256 | if (!cmdline_apic) { |
264 | apic = apic_probe[i]; | 257 | apic = apic_probe[i]; |
265 | if (x86_quirks->update_apic) | ||
266 | x86_quirks->update_apic(); | ||
267 | printk(KERN_INFO "Switched to APIC driver `%s'.\n", | 258 | printk(KERN_INFO "Switched to APIC driver `%s'.\n", |
268 | apic->name); | 259 | apic->name); |
269 | } | 260 | } |
@@ -284,8 +275,6 @@ int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
284 | 275 | ||
285 | if (!cmdline_apic) { | 276 | if (!cmdline_apic) { |
286 | apic = apic_probe[i]; | 277 | apic = apic_probe[i]; |
287 | if (x86_quirks->update_apic) | ||
288 | x86_quirks->update_apic(); | ||
289 | printk(KERN_INFO "Switched to APIC driver `%s'.\n", | 278 | printk(KERN_INFO "Switched to APIC driver `%s'.\n", |
290 | apic->name); | 279 | apic->name); |
291 | } | 280 | } |
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index e7c163661c77..8d7748efe6a8 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c | |||
@@ -68,9 +68,6 @@ void __init default_setup_apic_routing(void) | |||
68 | apic = &apic_physflat; | 68 | apic = &apic_physflat; |
69 | printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); | 69 | printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); |
70 | } | 70 | } |
71 | |||
72 | if (x86_quirks->update_apic) | ||
73 | x86_quirks->update_apic(); | ||
74 | } | 71 | } |
75 | 72 | ||
76 | /* Same for both flat and physical. */ | 73 | /* Same for both flat and physical. */ |
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index 32838b57a945..aac52fa873ff 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c | |||
@@ -77,9 +77,9 @@ static void summit_send_IPI_all(int vector) | |||
77 | extern int use_cyclone; | 77 | extern int use_cyclone; |
78 | 78 | ||
79 | #ifdef CONFIG_X86_SUMMIT_NUMA | 79 | #ifdef CONFIG_X86_SUMMIT_NUMA |
80 | extern void setup_summit(void); | 80 | static void setup_summit(void); |
81 | #else | 81 | #else |
82 | #define setup_summit() {} | 82 | static inline void setup_summit(void) {} |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | static int summit_mps_oem_check(struct mpc_table *mpc, char *oem, | 85 | static int summit_mps_oem_check(struct mpc_table *mpc, char *oem, |
@@ -291,33 +291,21 @@ static int summit_check_phys_apicid_present(int boot_cpu_physical_apicid) | |||
291 | 291 | ||
292 | static unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) | 292 | static unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) |
293 | { | 293 | { |
294 | int cpus_found = 0; | 294 | unsigned int round = 0; |
295 | int num_bits_set; | 295 | int cpu, apicid = 0; |
296 | int apicid; | ||
297 | int cpu; | ||
298 | 296 | ||
299 | num_bits_set = cpus_weight(*cpumask); | ||
300 | if (num_bits_set >= nr_cpu_ids) | ||
301 | return BAD_APICID; | ||
302 | /* | 297 | /* |
303 | * The cpus in the mask must all be on the apic cluster. | 298 | * The cpus in the mask must all be on the apic cluster. |
304 | */ | 299 | */ |
305 | cpu = first_cpu(*cpumask); | 300 | for_each_cpu(cpu, cpumask) { |
306 | apicid = summit_cpu_to_logical_apicid(cpu); | 301 | int new_apicid = summit_cpu_to_logical_apicid(cpu); |
307 | |||
308 | while (cpus_found < num_bits_set) { | ||
309 | if (cpu_isset(cpu, *cpumask)) { | ||
310 | int new_apicid = summit_cpu_to_logical_apicid(cpu); | ||
311 | 302 | ||
312 | if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { | 303 | if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { |
313 | printk("%s: Not a valid mask!\n", __func__); | 304 | printk("%s: Not a valid mask!\n", __func__); |
314 | 305 | return BAD_APICID; | |
315 | return BAD_APICID; | ||
316 | } | ||
317 | apicid = apicid | new_apicid; | ||
318 | cpus_found++; | ||
319 | } | 306 | } |
320 | cpu++; | 307 | apicid |= new_apicid; |
308 | round++; | ||
321 | } | 309 | } |
322 | return apicid; | 310 | return apicid; |
323 | } | 311 | } |
@@ -372,15 +360,15 @@ static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask) | |||
372 | } | 360 | } |
373 | 361 | ||
374 | #ifdef CONFIG_X86_SUMMIT_NUMA | 362 | #ifdef CONFIG_X86_SUMMIT_NUMA |
375 | static struct rio_table_hdr *rio_table_hdr __initdata; | 363 | static struct rio_table_hdr *rio_table_hdr; |
376 | static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; | 364 | static struct scal_detail *scal_devs[MAX_NUMNODES]; |
377 | static struct rio_detail *rio_devs[MAX_NUMNODES*4] __initdata; | 365 | static struct rio_detail *rio_devs[MAX_NUMNODES*4]; |
378 | 366 | ||
379 | #ifndef CONFIG_X86_NUMAQ | 367 | #ifndef CONFIG_X86_NUMAQ |
380 | static int mp_bus_id_to_node[MAX_MP_BUSSES] __initdata; | 368 | static int mp_bus_id_to_node[MAX_MP_BUSSES]; |
381 | #endif | 369 | #endif |
382 | 370 | ||
383 | static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) | 371 | static int setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) |
384 | { | 372 | { |
385 | int twister = 0, node = 0; | 373 | int twister = 0, node = 0; |
386 | int i, bus, num_buses; | 374 | int i, bus, num_buses; |
@@ -442,7 +430,7 @@ static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) | |||
442 | return bus; | 430 | return bus; |
443 | } | 431 | } |
444 | 432 | ||
445 | static int __init build_detail_arrays(void) | 433 | static int build_detail_arrays(void) |
446 | { | 434 | { |
447 | unsigned long ptr; | 435 | unsigned long ptr; |
448 | int i, scal_detail_size, rio_detail_size; | 436 | int i, scal_detail_size, rio_detail_size; |
@@ -476,7 +464,7 @@ static int __init build_detail_arrays(void) | |||
476 | return 1; | 464 | return 1; |
477 | } | 465 | } |
478 | 466 | ||
479 | void __init setup_summit(void) | 467 | void setup_summit(void) |
480 | { | 468 | { |
481 | unsigned long ptr; | 469 | unsigned long ptr; |
482 | unsigned short offset; | 470 | unsigned short offset; |
@@ -574,7 +562,6 @@ struct apic apic_summit = { | |||
574 | .send_IPI_all = summit_send_IPI_all, | 562 | .send_IPI_all = summit_send_IPI_all, |
575 | .send_IPI_self = default_send_IPI_self, | 563 | .send_IPI_self = default_send_IPI_self, |
576 | 564 | ||
577 | .wakeup_cpu = NULL, | ||
578 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | 565 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, |
579 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | 566 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, |
580 | 567 | ||
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 354b9c45601d..8fb87b6dd633 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c | |||
@@ -224,7 +224,6 @@ struct apic apic_x2apic_cluster = { | |||
224 | .send_IPI_all = x2apic_send_IPI_all, | 224 | .send_IPI_all = x2apic_send_IPI_all, |
225 | .send_IPI_self = x2apic_send_IPI_self, | 225 | .send_IPI_self = x2apic_send_IPI_self, |
226 | 226 | ||
227 | .wakeup_cpu = NULL, | ||
228 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | 227 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, |
229 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | 228 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, |
230 | .wait_for_init_deassert = NULL, | 229 | .wait_for_init_deassert = NULL, |
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 5bcb174409bc..23625b9f98b2 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c | |||
@@ -213,7 +213,6 @@ struct apic apic_x2apic_phys = { | |||
213 | .send_IPI_all = x2apic_send_IPI_all, | 213 | .send_IPI_all = x2apic_send_IPI_all, |
214 | .send_IPI_self = x2apic_send_IPI_self, | 214 | .send_IPI_self = x2apic_send_IPI_self, |
215 | 215 | ||
216 | .wakeup_cpu = NULL, | ||
217 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | 216 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, |
218 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | 217 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, |
219 | .wait_for_init_deassert = NULL, | 218 | .wait_for_init_deassert = NULL, |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 20b4ad07c3a1..1bd6da1f8fad 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -7,28 +7,28 @@ | |||
7 | * | 7 | * |
8 | * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. | 8 | * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. |
9 | */ | 9 | */ |
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/threads.h> | ||
13 | #include <linux/cpu.h> | ||
14 | #include <linux/cpumask.h> | 10 | #include <linux/cpumask.h> |
11 | #include <linux/hardirq.h> | ||
12 | #include <linux/proc_fs.h> | ||
13 | #include <linux/threads.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
15 | #include <linux/string.h> | 16 | #include <linux/string.h> |
16 | #include <linux/ctype.h> | 17 | #include <linux/ctype.h> |
17 | #include <linux/init.h> | ||
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/module.h> | ||
20 | #include <linux/hardirq.h> | ||
21 | #include <linux/timer.h> | 19 | #include <linux/timer.h> |
22 | #include <linux/proc_fs.h> | 20 | #include <linux/cpu.h> |
23 | #include <asm/current.h> | 21 | #include <linux/init.h> |
24 | #include <asm/smp.h> | 22 | |
25 | #include <asm/apic.h> | ||
26 | #include <asm/ipi.h> | ||
27 | #include <asm/pgtable.h> | ||
28 | #include <asm/uv/uv.h> | ||
29 | #include <asm/uv/uv_mmrs.h> | 23 | #include <asm/uv/uv_mmrs.h> |
30 | #include <asm/uv/uv_hub.h> | 24 | #include <asm/uv/uv_hub.h> |
25 | #include <asm/current.h> | ||
26 | #include <asm/pgtable.h> | ||
31 | #include <asm/uv/bios.h> | 27 | #include <asm/uv/bios.h> |
28 | #include <asm/uv/uv.h> | ||
29 | #include <asm/apic.h> | ||
30 | #include <asm/ipi.h> | ||
31 | #include <asm/smp.h> | ||
32 | 32 | ||
33 | DEFINE_PER_CPU(int, x2apic_extra_bits); | 33 | DEFINE_PER_CPU(int, x2apic_extra_bits); |
34 | 34 | ||
@@ -91,24 +91,28 @@ static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) | |||
91 | cpumask_set_cpu(cpu, retmask); | 91 | cpumask_set_cpu(cpu, retmask); |
92 | } | 92 | } |
93 | 93 | ||
94 | int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) | 94 | static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) |
95 | { | 95 | { |
96 | #ifdef CONFIG_SMP | ||
96 | unsigned long val; | 97 | unsigned long val; |
97 | int pnode; | 98 | int pnode; |
98 | 99 | ||
99 | pnode = uv_apicid_to_pnode(phys_apicid); | 100 | pnode = uv_apicid_to_pnode(phys_apicid); |
100 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | 101 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | |
101 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | | 102 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | |
102 | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | | 103 | ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | |
103 | APIC_DM_INIT; | 104 | APIC_DM_INIT; |
104 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); | 105 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); |
105 | mdelay(10); | 106 | mdelay(10); |
106 | 107 | ||
107 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | 108 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | |
108 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | | 109 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | |
109 | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | | 110 | ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | |
110 | APIC_DM_STARTUP; | 111 | APIC_DM_STARTUP; |
111 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); | 112 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); |
113 | |||
114 | atomic_set(&init_deasserted, 1); | ||
115 | #endif | ||
112 | return 0; | 116 | return 0; |
113 | } | 117 | } |
114 | 118 | ||
@@ -285,7 +289,7 @@ struct apic apic_x2apic_uv_x = { | |||
285 | .send_IPI_all = uv_send_IPI_all, | 289 | .send_IPI_all = uv_send_IPI_all, |
286 | .send_IPI_self = uv_send_IPI_self, | 290 | .send_IPI_self = uv_send_IPI_self, |
287 | 291 | ||
288 | .wakeup_cpu = NULL, | 292 | .wakeup_secondary_cpu = uv_wakeup_secondary, |
289 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | 293 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, |
290 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | 294 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, |
291 | .wait_for_init_deassert = NULL, | 295 | .wait_for_init_deassert = NULL, |
@@ -365,7 +369,7 @@ static __init void map_high(char *id, unsigned long base, int shift, | |||
365 | paddr = base << shift; | 369 | paddr = base << shift; |
366 | bytes = (1UL << shift) * (max_pnode + 1); | 370 | bytes = (1UL << shift) * (max_pnode + 1); |
367 | printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, | 371 | printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, |
368 | paddr + bytes); | 372 | paddr + bytes); |
369 | if (map_type == map_uc) | 373 | if (map_type == map_uc) |
370 | init_extra_mapping_uc(paddr, bytes); | 374 | init_extra_mapping_uc(paddr, bytes); |
371 | else | 375 | else |
@@ -528,7 +532,7 @@ late_initcall(uv_init_heartbeat); | |||
528 | 532 | ||
529 | /* | 533 | /* |
530 | * Called on each cpu to initialize the per_cpu UV data area. | 534 | * Called on each cpu to initialize the per_cpu UV data area. |
531 | * ZZZ hotplug not supported yet | 535 | * FIXME: hotplug not supported yet |
532 | */ | 536 | */ |
533 | void __cpuinit uv_cpu_init(void) | 537 | void __cpuinit uv_cpu_init(void) |
534 | { | 538 | { |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 4b1c319d30c3..22590cf688ae 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -601,7 +601,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
601 | if (!data) | 601 | if (!data) |
602 | return -ENOMEM; | 602 | return -ENOMEM; |
603 | 603 | ||
604 | data->acpi_data = percpu_ptr(acpi_perf_data, cpu); | 604 | data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); |
605 | per_cpu(drv_data, cpu) = data; | 605 | per_cpu(drv_data, cpu) = data; |
606 | 606 | ||
607 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) | 607 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) |
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 01b1244ef1c0..d67e0e48bc2d 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
@@ -7,11 +7,10 @@ | |||
7 | /* | 7 | /* |
8 | * Get CPU information for use by the procfs. | 8 | * Get CPU information for use by the procfs. |
9 | */ | 9 | */ |
10 | #ifdef CONFIG_X86_32 | ||
11 | static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | 10 | static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, |
12 | unsigned int cpu) | 11 | unsigned int cpu) |
13 | { | 12 | { |
14 | #ifdef CONFIG_X86_HT | 13 | #ifdef CONFIG_SMP |
15 | if (c->x86_max_cores * smp_num_siblings > 1) { | 14 | if (c->x86_max_cores * smp_num_siblings > 1) { |
16 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | 15 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); |
17 | seq_printf(m, "siblings\t: %d\n", | 16 | seq_printf(m, "siblings\t: %d\n", |
@@ -24,6 +23,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | |||
24 | #endif | 23 | #endif |
25 | } | 24 | } |
26 | 25 | ||
26 | #ifdef CONFIG_X86_32 | ||
27 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | 27 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) |
28 | { | 28 | { |
29 | /* | 29 | /* |
@@ -50,22 +50,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | |||
50 | c->wp_works_ok ? "yes" : "no"); | 50 | c->wp_works_ok ? "yes" : "no"); |
51 | } | 51 | } |
52 | #else | 52 | #else |
53 | static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | ||
54 | unsigned int cpu) | ||
55 | { | ||
56 | #ifdef CONFIG_SMP | ||
57 | if (c->x86_max_cores * smp_num_siblings > 1) { | ||
58 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | ||
59 | seq_printf(m, "siblings\t: %d\n", | ||
60 | cpus_weight(per_cpu(cpu_core_map, cpu))); | ||
61 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); | ||
62 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); | ||
63 | seq_printf(m, "apicid\t\t: %d\n", c->apicid); | ||
64 | seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid); | ||
65 | } | ||
66 | #endif | ||
67 | } | ||
68 | |||
69 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | 53 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) |
70 | { | 54 | { |
71 | seq_printf(m, | 55 | seq_printf(m, |
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c index b205272ad394..1736acc4d7aa 100644 --- a/arch/x86/kernel/efi.c +++ b/arch/x86/kernel/efi.c | |||
@@ -469,7 +469,7 @@ void __init efi_enter_virtual_mode(void) | |||
469 | efi_memory_desc_t *md; | 469 | efi_memory_desc_t *md; |
470 | efi_status_t status; | 470 | efi_status_t status; |
471 | unsigned long size; | 471 | unsigned long size; |
472 | u64 end, systab, addr, npages; | 472 | u64 end, systab, addr, npages, end_pfn; |
473 | void *p, *va; | 473 | void *p, *va; |
474 | 474 | ||
475 | efi.systab = NULL; | 475 | efi.systab = NULL; |
@@ -481,7 +481,10 @@ void __init efi_enter_virtual_mode(void) | |||
481 | size = md->num_pages << EFI_PAGE_SHIFT; | 481 | size = md->num_pages << EFI_PAGE_SHIFT; |
482 | end = md->phys_addr + size; | 482 | end = md->phys_addr + size; |
483 | 483 | ||
484 | if (PFN_UP(end) <= max_low_pfn_mapped) | 484 | end_pfn = PFN_UP(end); |
485 | if (end_pfn <= max_low_pfn_mapped | ||
486 | || (end_pfn > (1UL << (32 - PAGE_SHIFT)) | ||
487 | && end_pfn <= max_pfn_mapped)) | ||
485 | va = __va(md->phys_addr); | 488 | va = __va(md->phys_addr); |
486 | else | 489 | else |
487 | va = efi_ioremap(md->phys_addr, size); | 490 | va = efi_ioremap(md->phys_addr, size); |
diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/kernel/efi_64.c index a4ee29127fdf..22c3b7828c50 100644 --- a/arch/x86/kernel/efi_64.c +++ b/arch/x86/kernel/efi_64.c | |||
@@ -100,24 +100,11 @@ void __init efi_call_phys_epilog(void) | |||
100 | 100 | ||
101 | void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size) | 101 | void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size) |
102 | { | 102 | { |
103 | static unsigned pages_mapped __initdata; | 103 | unsigned long last_map_pfn; |
104 | unsigned i, pages; | ||
105 | unsigned long offset; | ||
106 | 104 | ||
107 | pages = PFN_UP(phys_addr + size) - PFN_DOWN(phys_addr); | 105 | last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); |
108 | offset = phys_addr & ~PAGE_MASK; | 106 | if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) |
109 | phys_addr &= PAGE_MASK; | ||
110 | |||
111 | if (pages_mapped + pages > MAX_EFI_IO_PAGES) | ||
112 | return NULL; | 107 | return NULL; |
113 | 108 | ||
114 | for (i = 0; i < pages; i++) { | 109 | return (void __iomem *)__va(phys_addr); |
115 | __set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped, | ||
116 | phys_addr, PAGE_KERNEL); | ||
117 | phys_addr += PAGE_SIZE; | ||
118 | pages_mapped++; | ||
119 | } | ||
120 | |||
121 | return (void __iomem *)__fix_to_virt(FIX_EFI_IO_MAP_FIRST_PAGE - \ | ||
122 | (pages_mapped - pages)) + offset; | ||
123 | } | 110 | } |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index b0f61f0dcd0a..f2f8540a7f3d 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -136,7 +136,7 @@ int init_fpu(struct task_struct *tsk) | |||
136 | #ifdef CONFIG_X86_32 | 136 | #ifdef CONFIG_X86_32 |
137 | if (!HAVE_HWFP) { | 137 | if (!HAVE_HWFP) { |
138 | memset(tsk->thread.xstate, 0, xstate_size); | 138 | memset(tsk->thread.xstate, 0, xstate_size); |
139 | finit(); | 139 | finit_task(tsk); |
140 | set_stopped_child_used_math(tsk); | 140 | set_stopped_child_used_math(tsk); |
141 | return 0; | 141 | return 0; |
142 | } | 142 | } |
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index e41980a373ab..99c4d308f16b 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c | |||
@@ -85,19 +85,8 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) | |||
85 | 85 | ||
86 | t->io_bitmap_max = bytes; | 86 | t->io_bitmap_max = bytes; |
87 | 87 | ||
88 | #ifdef CONFIG_X86_32 | ||
89 | /* | ||
90 | * Sets the lazy trigger so that the next I/O operation will | ||
91 | * reload the correct bitmap. | ||
92 | * Reset the owner so that a process switch will not set | ||
93 | * tss->io_bitmap_base to IO_BITMAP_OFFSET. | ||
94 | */ | ||
95 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; | ||
96 | tss->io_bitmap_owner = NULL; | ||
97 | #else | ||
98 | /* Update the TSS: */ | 88 | /* Update the TSS: */ |
99 | memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated); | 89 | memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated); |
100 | #endif | ||
101 | 90 | ||
102 | put_cpu(); | 91 | put_cpu(); |
103 | 92 | ||
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 9dc6b2b24275..3b09634a5153 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/cpu.h> | 16 | #include <linux/cpu.h> |
17 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
18 | #include <linux/uaccess.h> | 18 | #include <linux/uaccess.h> |
19 | #include <linux/percpu.h> | ||
19 | 20 | ||
20 | #include <asm/apic.h> | 21 | #include <asm/apic.h> |
21 | 22 | ||
@@ -55,13 +56,13 @@ static inline void print_stack_overflow(void) { } | |||
55 | union irq_ctx { | 56 | union irq_ctx { |
56 | struct thread_info tinfo; | 57 | struct thread_info tinfo; |
57 | u32 stack[THREAD_SIZE/sizeof(u32)]; | 58 | u32 stack[THREAD_SIZE/sizeof(u32)]; |
58 | }; | 59 | } __attribute__((aligned(PAGE_SIZE))); |
59 | 60 | ||
60 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; | 61 | static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); |
61 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; | 62 | static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx); |
62 | 63 | ||
63 | static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; | 64 | static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, hardirq_stack); |
64 | static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; | 65 | static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, softirq_stack); |
65 | 66 | ||
66 | static void call_on_stack(void *func, void *stack) | 67 | static void call_on_stack(void *func, void *stack) |
67 | { | 68 | { |
@@ -81,7 +82,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | |||
81 | u32 *isp, arg1, arg2; | 82 | u32 *isp, arg1, arg2; |
82 | 83 | ||
83 | curctx = (union irq_ctx *) current_thread_info(); | 84 | curctx = (union irq_ctx *) current_thread_info(); |
84 | irqctx = hardirq_ctx[smp_processor_id()]; | 85 | irqctx = __get_cpu_var(hardirq_ctx); |
85 | 86 | ||
86 | /* | 87 | /* |
87 | * this is where we switch to the IRQ stack. However, if we are | 88 | * this is where we switch to the IRQ stack. However, if we are |
@@ -125,34 +126,34 @@ void __cpuinit irq_ctx_init(int cpu) | |||
125 | { | 126 | { |
126 | union irq_ctx *irqctx; | 127 | union irq_ctx *irqctx; |
127 | 128 | ||
128 | if (hardirq_ctx[cpu]) | 129 | if (per_cpu(hardirq_ctx, cpu)) |
129 | return; | 130 | return; |
130 | 131 | ||
131 | irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE]; | 132 | irqctx = &per_cpu(hardirq_stack, cpu); |
132 | irqctx->tinfo.task = NULL; | 133 | irqctx->tinfo.task = NULL; |
133 | irqctx->tinfo.exec_domain = NULL; | 134 | irqctx->tinfo.exec_domain = NULL; |
134 | irqctx->tinfo.cpu = cpu; | 135 | irqctx->tinfo.cpu = cpu; |
135 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; | 136 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; |
136 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | 137 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
137 | 138 | ||
138 | hardirq_ctx[cpu] = irqctx; | 139 | per_cpu(hardirq_ctx, cpu) = irqctx; |
139 | 140 | ||
140 | irqctx = (union irq_ctx *) &softirq_stack[cpu*THREAD_SIZE]; | 141 | irqctx = &per_cpu(softirq_stack, cpu); |
141 | irqctx->tinfo.task = NULL; | 142 | irqctx->tinfo.task = NULL; |
142 | irqctx->tinfo.exec_domain = NULL; | 143 | irqctx->tinfo.exec_domain = NULL; |
143 | irqctx->tinfo.cpu = cpu; | 144 | irqctx->tinfo.cpu = cpu; |
144 | irqctx->tinfo.preempt_count = 0; | 145 | irqctx->tinfo.preempt_count = 0; |
145 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | 146 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
146 | 147 | ||
147 | softirq_ctx[cpu] = irqctx; | 148 | per_cpu(softirq_ctx, cpu) = irqctx; |
148 | 149 | ||
149 | printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", | 150 | printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", |
150 | cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); | 151 | cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); |
151 | } | 152 | } |
152 | 153 | ||
153 | void irq_ctx_exit(int cpu) | 154 | void irq_ctx_exit(int cpu) |
154 | { | 155 | { |
155 | hardirq_ctx[cpu] = NULL; | 156 | per_cpu(hardirq_ctx, cpu) = NULL; |
156 | } | 157 | } |
157 | 158 | ||
158 | asmlinkage void do_softirq(void) | 159 | asmlinkage void do_softirq(void) |
@@ -169,7 +170,7 @@ asmlinkage void do_softirq(void) | |||
169 | 170 | ||
170 | if (local_softirq_pending()) { | 171 | if (local_softirq_pending()) { |
171 | curctx = current_thread_info(); | 172 | curctx = current_thread_info(); |
172 | irqctx = softirq_ctx[smp_processor_id()]; | 173 | irqctx = __get_cpu_var(softirq_ctx); |
173 | irqctx->tinfo.task = curctx->task; | 174 | irqctx->tinfo.task = curctx->task; |
174 | irqctx->tinfo.previous_esp = current_stack_pointer; | 175 | irqctx->tinfo.previous_esp = current_stack_pointer; |
175 | 176 | ||
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 87b69d4fac16..6afa5232dbb7 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -1,8 +1,8 @@ | |||
1 | #include <linux/errno.h> | 1 | #include <linux/errno.h> |
2 | #include <linux/kernel.h> | 2 | #include <linux/kernel.h> |
3 | #include <linux/mm.h> | 3 | #include <linux/mm.h> |
4 | #include <asm/idle.h> | ||
5 | #include <linux/smp.h> | 4 | #include <linux/smp.h> |
5 | #include <linux/prctl.h> | ||
6 | #include <linux/slab.h> | 6 | #include <linux/slab.h> |
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
@@ -11,6 +11,9 @@ | |||
11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
12 | #include <asm/system.h> | 12 | #include <asm/system.h> |
13 | #include <asm/apic.h> | 13 | #include <asm/apic.h> |
14 | #include <asm/idle.h> | ||
15 | #include <asm/uaccess.h> | ||
16 | #include <asm/i387.h> | ||
14 | 17 | ||
15 | unsigned long idle_halt; | 18 | unsigned long idle_halt; |
16 | EXPORT_SYMBOL(idle_halt); | 19 | EXPORT_SYMBOL(idle_halt); |
@@ -56,6 +59,192 @@ void arch_task_cache_init(void) | |||
56 | } | 59 | } |
57 | 60 | ||
58 | /* | 61 | /* |
62 | * Free current thread data structures etc.. | ||
63 | */ | ||
64 | void exit_thread(void) | ||
65 | { | ||
66 | struct task_struct *me = current; | ||
67 | struct thread_struct *t = &me->thread; | ||
68 | |||
69 | if (me->thread.io_bitmap_ptr) { | ||
70 | struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); | ||
71 | |||
72 | kfree(t->io_bitmap_ptr); | ||
73 | t->io_bitmap_ptr = NULL; | ||
74 | clear_thread_flag(TIF_IO_BITMAP); | ||
75 | /* | ||
76 | * Careful, clear this in the TSS too: | ||
77 | */ | ||
78 | memset(tss->io_bitmap, 0xff, t->io_bitmap_max); | ||
79 | t->io_bitmap_max = 0; | ||
80 | put_cpu(); | ||
81 | } | ||
82 | |||
83 | ds_exit_thread(current); | ||
84 | } | ||
85 | |||
86 | void flush_thread(void) | ||
87 | { | ||
88 | struct task_struct *tsk = current; | ||
89 | |||
90 | #ifdef CONFIG_X86_64 | ||
91 | if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) { | ||
92 | clear_tsk_thread_flag(tsk, TIF_ABI_PENDING); | ||
93 | if (test_tsk_thread_flag(tsk, TIF_IA32)) { | ||
94 | clear_tsk_thread_flag(tsk, TIF_IA32); | ||
95 | } else { | ||
96 | set_tsk_thread_flag(tsk, TIF_IA32); | ||
97 | current_thread_info()->status |= TS_COMPAT; | ||
98 | } | ||
99 | } | ||
100 | #endif | ||
101 | |||
102 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | ||
103 | |||
104 | tsk->thread.debugreg0 = 0; | ||
105 | tsk->thread.debugreg1 = 0; | ||
106 | tsk->thread.debugreg2 = 0; | ||
107 | tsk->thread.debugreg3 = 0; | ||
108 | tsk->thread.debugreg6 = 0; | ||
109 | tsk->thread.debugreg7 = 0; | ||
110 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); | ||
111 | /* | ||
112 | * Forget coprocessor state.. | ||
113 | */ | ||
114 | tsk->fpu_counter = 0; | ||
115 | clear_fpu(tsk); | ||
116 | clear_used_math(); | ||
117 | } | ||
118 | |||
119 | static void hard_disable_TSC(void) | ||
120 | { | ||
121 | write_cr4(read_cr4() | X86_CR4_TSD); | ||
122 | } | ||
123 | |||
124 | void disable_TSC(void) | ||
125 | { | ||
126 | preempt_disable(); | ||
127 | if (!test_and_set_thread_flag(TIF_NOTSC)) | ||
128 | /* | ||
129 | * Must flip the CPU state synchronously with | ||
130 | * TIF_NOTSC in the current running context. | ||
131 | */ | ||
132 | hard_disable_TSC(); | ||
133 | preempt_enable(); | ||
134 | } | ||
135 | |||
136 | static void hard_enable_TSC(void) | ||
137 | { | ||
138 | write_cr4(read_cr4() & ~X86_CR4_TSD); | ||
139 | } | ||
140 | |||
141 | static void enable_TSC(void) | ||
142 | { | ||
143 | preempt_disable(); | ||
144 | if (test_and_clear_thread_flag(TIF_NOTSC)) | ||
145 | /* | ||
146 | * Must flip the CPU state synchronously with | ||
147 | * TIF_NOTSC in the current running context. | ||
148 | */ | ||
149 | hard_enable_TSC(); | ||
150 | preempt_enable(); | ||
151 | } | ||
152 | |||
153 | int get_tsc_mode(unsigned long adr) | ||
154 | { | ||
155 | unsigned int val; | ||
156 | |||
157 | if (test_thread_flag(TIF_NOTSC)) | ||
158 | val = PR_TSC_SIGSEGV; | ||
159 | else | ||
160 | val = PR_TSC_ENABLE; | ||
161 | |||
162 | return put_user(val, (unsigned int __user *)adr); | ||
163 | } | ||
164 | |||
165 | int set_tsc_mode(unsigned int val) | ||
166 | { | ||
167 | if (val == PR_TSC_SIGSEGV) | ||
168 | disable_TSC(); | ||
169 | else if (val == PR_TSC_ENABLE) | ||
170 | enable_TSC(); | ||
171 | else | ||
172 | return -EINVAL; | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | ||
178 | struct tss_struct *tss) | ||
179 | { | ||
180 | struct thread_struct *prev, *next; | ||
181 | |||
182 | prev = &prev_p->thread; | ||
183 | next = &next_p->thread; | ||
184 | |||
185 | if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) || | ||
186 | test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR)) | ||
187 | ds_switch_to(prev_p, next_p); | ||
188 | else if (next->debugctlmsr != prev->debugctlmsr) | ||
189 | update_debugctlmsr(next->debugctlmsr); | ||
190 | |||
191 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { | ||
192 | set_debugreg(next->debugreg0, 0); | ||
193 | set_debugreg(next->debugreg1, 1); | ||
194 | set_debugreg(next->debugreg2, 2); | ||
195 | set_debugreg(next->debugreg3, 3); | ||
196 | /* no 4 and 5 */ | ||
197 | set_debugreg(next->debugreg6, 6); | ||
198 | set_debugreg(next->debugreg7, 7); | ||
199 | } | ||
200 | |||
201 | if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ | ||
202 | test_tsk_thread_flag(next_p, TIF_NOTSC)) { | ||
203 | /* prev and next are different */ | ||
204 | if (test_tsk_thread_flag(next_p, TIF_NOTSC)) | ||
205 | hard_disable_TSC(); | ||
206 | else | ||
207 | hard_enable_TSC(); | ||
208 | } | ||
209 | |||
210 | if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { | ||
211 | /* | ||
212 | * Copy the relevant range of the IO bitmap. | ||
213 | * Normally this is 128 bytes or less: | ||
214 | */ | ||
215 | memcpy(tss->io_bitmap, next->io_bitmap_ptr, | ||
216 | max(prev->io_bitmap_max, next->io_bitmap_max)); | ||
217 | } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { | ||
218 | /* | ||
219 | * Clear any possible leftover bits: | ||
220 | */ | ||
221 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); | ||
222 | } | ||
223 | } | ||
224 | |||
225 | int sys_fork(struct pt_regs *regs) | ||
226 | { | ||
227 | return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); | ||
228 | } | ||
229 | |||
230 | /* | ||
231 | * This is trivial, and on the face of it looks like it | ||
232 | * could equally well be done in user mode. | ||
233 | * | ||
234 | * Not so, for quite unobvious reasons - register pressure. | ||
235 | * In user mode vfork() cannot have a stack frame, and if | ||
236 | * done by calling the "clone()" system call directly, you | ||
237 | * do not have enough call-clobbered registers to hold all | ||
238 | * the information you need. | ||
239 | */ | ||
240 | int sys_vfork(struct pt_regs *regs) | ||
241 | { | ||
242 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, | ||
243 | NULL, NULL); | ||
244 | } | ||
245 | |||
246 | |||
247 | /* | ||
59 | * Idle related variables and functions | 248 | * Idle related variables and functions |
60 | */ | 249 | */ |
61 | unsigned long boot_option_idle_override = 0; | 250 | unsigned long boot_option_idle_override = 0; |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 646da41a620a..14014d766cad 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -230,55 +230,6 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) | |||
230 | } | 230 | } |
231 | EXPORT_SYMBOL(kernel_thread); | 231 | EXPORT_SYMBOL(kernel_thread); |
232 | 232 | ||
233 | /* | ||
234 | * Free current thread data structures etc.. | ||
235 | */ | ||
236 | void exit_thread(void) | ||
237 | { | ||
238 | /* The process may have allocated an io port bitmap... nuke it. */ | ||
239 | if (unlikely(test_thread_flag(TIF_IO_BITMAP))) { | ||
240 | struct task_struct *tsk = current; | ||
241 | struct thread_struct *t = &tsk->thread; | ||
242 | int cpu = get_cpu(); | ||
243 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | ||
244 | |||
245 | kfree(t->io_bitmap_ptr); | ||
246 | t->io_bitmap_ptr = NULL; | ||
247 | clear_thread_flag(TIF_IO_BITMAP); | ||
248 | /* | ||
249 | * Careful, clear this in the TSS too: | ||
250 | */ | ||
251 | memset(tss->io_bitmap, 0xff, tss->io_bitmap_max); | ||
252 | t->io_bitmap_max = 0; | ||
253 | tss->io_bitmap_owner = NULL; | ||
254 | tss->io_bitmap_max = 0; | ||
255 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; | ||
256 | put_cpu(); | ||
257 | } | ||
258 | |||
259 | ds_exit_thread(current); | ||
260 | } | ||
261 | |||
262 | void flush_thread(void) | ||
263 | { | ||
264 | struct task_struct *tsk = current; | ||
265 | |||
266 | tsk->thread.debugreg0 = 0; | ||
267 | tsk->thread.debugreg1 = 0; | ||
268 | tsk->thread.debugreg2 = 0; | ||
269 | tsk->thread.debugreg3 = 0; | ||
270 | tsk->thread.debugreg6 = 0; | ||
271 | tsk->thread.debugreg7 = 0; | ||
272 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); | ||
273 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | ||
274 | /* | ||
275 | * Forget coprocessor state.. | ||
276 | */ | ||
277 | tsk->fpu_counter = 0; | ||
278 | clear_fpu(tsk); | ||
279 | clear_used_math(); | ||
280 | } | ||
281 | |||
282 | void release_thread(struct task_struct *dead_task) | 233 | void release_thread(struct task_struct *dead_task) |
283 | { | 234 | { |
284 | BUG_ON(dead_task->mm); | 235 | BUG_ON(dead_task->mm); |
@@ -366,127 +317,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) | |||
366 | } | 317 | } |
367 | EXPORT_SYMBOL_GPL(start_thread); | 318 | EXPORT_SYMBOL_GPL(start_thread); |
368 | 319 | ||
369 | static void hard_disable_TSC(void) | ||
370 | { | ||
371 | write_cr4(read_cr4() | X86_CR4_TSD); | ||
372 | } | ||
373 | |||
374 | void disable_TSC(void) | ||
375 | { | ||
376 | preempt_disable(); | ||
377 | if (!test_and_set_thread_flag(TIF_NOTSC)) | ||
378 | /* | ||
379 | * Must flip the CPU state synchronously with | ||
380 | * TIF_NOTSC in the current running context. | ||
381 | */ | ||
382 | hard_disable_TSC(); | ||
383 | preempt_enable(); | ||
384 | } | ||
385 | |||
386 | static void hard_enable_TSC(void) | ||
387 | { | ||
388 | write_cr4(read_cr4() & ~X86_CR4_TSD); | ||
389 | } | ||
390 | |||
391 | static void enable_TSC(void) | ||
392 | { | ||
393 | preempt_disable(); | ||
394 | if (test_and_clear_thread_flag(TIF_NOTSC)) | ||
395 | /* | ||
396 | * Must flip the CPU state synchronously with | ||
397 | * TIF_NOTSC in the current running context. | ||
398 | */ | ||
399 | hard_enable_TSC(); | ||
400 | preempt_enable(); | ||
401 | } | ||
402 | |||
403 | int get_tsc_mode(unsigned long adr) | ||
404 | { | ||
405 | unsigned int val; | ||
406 | |||
407 | if (test_thread_flag(TIF_NOTSC)) | ||
408 | val = PR_TSC_SIGSEGV; | ||
409 | else | ||
410 | val = PR_TSC_ENABLE; | ||
411 | |||
412 | return put_user(val, (unsigned int __user *)adr); | ||
413 | } | ||
414 | |||
415 | int set_tsc_mode(unsigned int val) | ||
416 | { | ||
417 | if (val == PR_TSC_SIGSEGV) | ||
418 | disable_TSC(); | ||
419 | else if (val == PR_TSC_ENABLE) | ||
420 | enable_TSC(); | ||
421 | else | ||
422 | return -EINVAL; | ||
423 | |||
424 | return 0; | ||
425 | } | ||
426 | |||
427 | static noinline void | ||
428 | __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | ||
429 | struct tss_struct *tss) | ||
430 | { | ||
431 | struct thread_struct *prev, *next; | ||
432 | |||
433 | prev = &prev_p->thread; | ||
434 | next = &next_p->thread; | ||
435 | |||
436 | if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) || | ||
437 | test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR)) | ||
438 | ds_switch_to(prev_p, next_p); | ||
439 | else if (next->debugctlmsr != prev->debugctlmsr) | ||
440 | update_debugctlmsr(next->debugctlmsr); | ||
441 | |||
442 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { | ||
443 | set_debugreg(next->debugreg0, 0); | ||
444 | set_debugreg(next->debugreg1, 1); | ||
445 | set_debugreg(next->debugreg2, 2); | ||
446 | set_debugreg(next->debugreg3, 3); | ||
447 | /* no 4 and 5 */ | ||
448 | set_debugreg(next->debugreg6, 6); | ||
449 | set_debugreg(next->debugreg7, 7); | ||
450 | } | ||
451 | |||
452 | if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ | ||
453 | test_tsk_thread_flag(next_p, TIF_NOTSC)) { | ||
454 | /* prev and next are different */ | ||
455 | if (test_tsk_thread_flag(next_p, TIF_NOTSC)) | ||
456 | hard_disable_TSC(); | ||
457 | else | ||
458 | hard_enable_TSC(); | ||
459 | } | ||
460 | |||
461 | if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { | ||
462 | /* | ||
463 | * Disable the bitmap via an invalid offset. We still cache | ||
464 | * the previous bitmap owner and the IO bitmap contents: | ||
465 | */ | ||
466 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; | ||
467 | return; | ||
468 | } | ||
469 | |||
470 | if (likely(next == tss->io_bitmap_owner)) { | ||
471 | /* | ||
472 | * Previous owner of the bitmap (hence the bitmap content) | ||
473 | * matches the next task, we dont have to do anything but | ||
474 | * to set a valid offset in the TSS: | ||
475 | */ | ||
476 | tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; | ||
477 | return; | ||
478 | } | ||
479 | /* | ||
480 | * Lazy TSS's I/O bitmap copy. We set an invalid offset here | ||
481 | * and we let the task to get a GPF in case an I/O instruction | ||
482 | * is performed. The handler of the GPF will verify that the | ||
483 | * faulting task has a valid I/O bitmap and, it true, does the | ||
484 | * real copy and restart the instruction. This will save us | ||
485 | * redundant copies when the currently switched task does not | ||
486 | * perform any I/O during its timeslice. | ||
487 | */ | ||
488 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; | ||
489 | } | ||
490 | 320 | ||
491 | /* | 321 | /* |
492 | * switch_to(x,yn) should switch tasks from x to y. | 322 | * switch_to(x,yn) should switch tasks from x to y. |
@@ -600,11 +430,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
600 | return prev_p; | 430 | return prev_p; |
601 | } | 431 | } |
602 | 432 | ||
603 | int sys_fork(struct pt_regs *regs) | ||
604 | { | ||
605 | return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); | ||
606 | } | ||
607 | |||
608 | int sys_clone(struct pt_regs *regs) | 433 | int sys_clone(struct pt_regs *regs) |
609 | { | 434 | { |
610 | unsigned long clone_flags; | 435 | unsigned long clone_flags; |
@@ -621,21 +446,6 @@ int sys_clone(struct pt_regs *regs) | |||
621 | } | 446 | } |
622 | 447 | ||
623 | /* | 448 | /* |
624 | * This is trivial, and on the face of it looks like it | ||
625 | * could equally well be done in user mode. | ||
626 | * | ||
627 | * Not so, for quite unobvious reasons - register pressure. | ||
628 | * In user mode vfork() cannot have a stack frame, and if | ||
629 | * done by calling the "clone()" system call directly, you | ||
630 | * do not have enough call-clobbered registers to hold all | ||
631 | * the information you need. | ||
632 | */ | ||
633 | int sys_vfork(struct pt_regs *regs) | ||
634 | { | ||
635 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, NULL, NULL); | ||
636 | } | ||
637 | |||
638 | /* | ||
639 | * sys_execve() executes a new program. | 449 | * sys_execve() executes a new program. |
640 | */ | 450 | */ |
641 | int sys_execve(struct pt_regs *regs) | 451 | int sys_execve(struct pt_regs *regs) |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 836ef6575f01..abb7e6a7f0c6 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -237,61 +237,6 @@ void show_regs(struct pt_regs *regs) | |||
237 | show_trace(NULL, regs, (void *)(regs + 1), regs->bp); | 237 | show_trace(NULL, regs, (void *)(regs + 1), regs->bp); |
238 | } | 238 | } |
239 | 239 | ||
240 | /* | ||
241 | * Free current thread data structures etc.. | ||
242 | */ | ||
243 | void exit_thread(void) | ||
244 | { | ||
245 | struct task_struct *me = current; | ||
246 | struct thread_struct *t = &me->thread; | ||
247 | |||
248 | if (me->thread.io_bitmap_ptr) { | ||
249 | struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); | ||
250 | |||
251 | kfree(t->io_bitmap_ptr); | ||
252 | t->io_bitmap_ptr = NULL; | ||
253 | clear_thread_flag(TIF_IO_BITMAP); | ||
254 | /* | ||
255 | * Careful, clear this in the TSS too: | ||
256 | */ | ||
257 | memset(tss->io_bitmap, 0xff, t->io_bitmap_max); | ||
258 | t->io_bitmap_max = 0; | ||
259 | put_cpu(); | ||
260 | } | ||
261 | |||
262 | ds_exit_thread(current); | ||
263 | } | ||
264 | |||
265 | void flush_thread(void) | ||
266 | { | ||
267 | struct task_struct *tsk = current; | ||
268 | |||
269 | if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) { | ||
270 | clear_tsk_thread_flag(tsk, TIF_ABI_PENDING); | ||
271 | if (test_tsk_thread_flag(tsk, TIF_IA32)) { | ||
272 | clear_tsk_thread_flag(tsk, TIF_IA32); | ||
273 | } else { | ||
274 | set_tsk_thread_flag(tsk, TIF_IA32); | ||
275 | current_thread_info()->status |= TS_COMPAT; | ||
276 | } | ||
277 | } | ||
278 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | ||
279 | |||
280 | tsk->thread.debugreg0 = 0; | ||
281 | tsk->thread.debugreg1 = 0; | ||
282 | tsk->thread.debugreg2 = 0; | ||
283 | tsk->thread.debugreg3 = 0; | ||
284 | tsk->thread.debugreg6 = 0; | ||
285 | tsk->thread.debugreg7 = 0; | ||
286 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); | ||
287 | /* | ||
288 | * Forget coprocessor state.. | ||
289 | */ | ||
290 | tsk->fpu_counter = 0; | ||
291 | clear_fpu(tsk); | ||
292 | clear_used_math(); | ||
293 | } | ||
294 | |||
295 | void release_thread(struct task_struct *dead_task) | 240 | void release_thread(struct task_struct *dead_task) |
296 | { | 241 | { |
297 | if (dead_task->mm) { | 242 | if (dead_task->mm) { |
@@ -425,118 +370,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) | |||
425 | } | 370 | } |
426 | EXPORT_SYMBOL_GPL(start_thread); | 371 | EXPORT_SYMBOL_GPL(start_thread); |
427 | 372 | ||
428 | static void hard_disable_TSC(void) | ||
429 | { | ||
430 | write_cr4(read_cr4() | X86_CR4_TSD); | ||
431 | } | ||
432 | |||
433 | void disable_TSC(void) | ||
434 | { | ||
435 | preempt_disable(); | ||
436 | if (!test_and_set_thread_flag(TIF_NOTSC)) | ||
437 | /* | ||
438 | * Must flip the CPU state synchronously with | ||
439 | * TIF_NOTSC in the current running context. | ||
440 | */ | ||
441 | hard_disable_TSC(); | ||
442 | preempt_enable(); | ||
443 | } | ||
444 | |||
445 | static void hard_enable_TSC(void) | ||
446 | { | ||
447 | write_cr4(read_cr4() & ~X86_CR4_TSD); | ||
448 | } | ||
449 | |||
450 | static void enable_TSC(void) | ||
451 | { | ||
452 | preempt_disable(); | ||
453 | if (test_and_clear_thread_flag(TIF_NOTSC)) | ||
454 | /* | ||
455 | * Must flip the CPU state synchronously with | ||
456 | * TIF_NOTSC in the current running context. | ||
457 | */ | ||
458 | hard_enable_TSC(); | ||
459 | preempt_enable(); | ||
460 | } | ||
461 | |||
462 | int get_tsc_mode(unsigned long adr) | ||
463 | { | ||
464 | unsigned int val; | ||
465 | |||
466 | if (test_thread_flag(TIF_NOTSC)) | ||
467 | val = PR_TSC_SIGSEGV; | ||
468 | else | ||
469 | val = PR_TSC_ENABLE; | ||
470 | |||
471 | return put_user(val, (unsigned int __user *)adr); | ||
472 | } | ||
473 | |||
474 | int set_tsc_mode(unsigned int val) | ||
475 | { | ||
476 | if (val == PR_TSC_SIGSEGV) | ||
477 | disable_TSC(); | ||
478 | else if (val == PR_TSC_ENABLE) | ||
479 | enable_TSC(); | ||
480 | else | ||
481 | return -EINVAL; | ||
482 | |||
483 | return 0; | ||
484 | } | ||
485 | |||
486 | /* | ||
487 | * This special macro can be used to load a debugging register | ||
488 | */ | ||
489 | #define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r) | ||
490 | |||
491 | static inline void __switch_to_xtra(struct task_struct *prev_p, | ||
492 | struct task_struct *next_p, | ||
493 | struct tss_struct *tss) | ||
494 | { | ||
495 | struct thread_struct *prev, *next; | ||
496 | |||
497 | prev = &prev_p->thread, | ||
498 | next = &next_p->thread; | ||
499 | |||
500 | if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) || | ||
501 | test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR)) | ||
502 | ds_switch_to(prev_p, next_p); | ||
503 | else if (next->debugctlmsr != prev->debugctlmsr) | ||
504 | update_debugctlmsr(next->debugctlmsr); | ||
505 | |||
506 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { | ||
507 | loaddebug(next, 0); | ||
508 | loaddebug(next, 1); | ||
509 | loaddebug(next, 2); | ||
510 | loaddebug(next, 3); | ||
511 | /* no 4 and 5 */ | ||
512 | loaddebug(next, 6); | ||
513 | loaddebug(next, 7); | ||
514 | } | ||
515 | |||
516 | if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ | ||
517 | test_tsk_thread_flag(next_p, TIF_NOTSC)) { | ||
518 | /* prev and next are different */ | ||
519 | if (test_tsk_thread_flag(next_p, TIF_NOTSC)) | ||
520 | hard_disable_TSC(); | ||
521 | else | ||
522 | hard_enable_TSC(); | ||
523 | } | ||
524 | |||
525 | if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { | ||
526 | /* | ||
527 | * Copy the relevant range of the IO bitmap. | ||
528 | * Normally this is 128 bytes or less: | ||
529 | */ | ||
530 | memcpy(tss->io_bitmap, next->io_bitmap_ptr, | ||
531 | max(prev->io_bitmap_max, next->io_bitmap_max)); | ||
532 | } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { | ||
533 | /* | ||
534 | * Clear any possible leftover bits: | ||
535 | */ | ||
536 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); | ||
537 | } | ||
538 | } | ||
539 | |||
540 | /* | 373 | /* |
541 | * switch_to(x,y) should switch tasks from x to y. | 374 | * switch_to(x,y) should switch tasks from x to y. |
542 | * | 375 | * |
@@ -694,11 +527,6 @@ void set_personality_64bit(void) | |||
694 | current->personality &= ~READ_IMPLIES_EXEC; | 527 | current->personality &= ~READ_IMPLIES_EXEC; |
695 | } | 528 | } |
696 | 529 | ||
697 | asmlinkage long sys_fork(struct pt_regs *regs) | ||
698 | { | ||
699 | return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); | ||
700 | } | ||
701 | |||
702 | asmlinkage long | 530 | asmlinkage long |
703 | sys_clone(unsigned long clone_flags, unsigned long newsp, | 531 | sys_clone(unsigned long clone_flags, unsigned long newsp, |
704 | void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) | 532 | void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) |
@@ -708,22 +536,6 @@ sys_clone(unsigned long clone_flags, unsigned long newsp, | |||
708 | return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); | 536 | return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); |
709 | } | 537 | } |
710 | 538 | ||
711 | /* | ||
712 | * This is trivial, and on the face of it looks like it | ||
713 | * could equally well be done in user mode. | ||
714 | * | ||
715 | * Not so, for quite unobvious reasons - register pressure. | ||
716 | * In user mode vfork() cannot have a stack frame, and if | ||
717 | * done by calling the "clone()" system call directly, you | ||
718 | * do not have enough call-clobbered registers to hold all | ||
719 | * the information you need. | ||
720 | */ | ||
721 | asmlinkage long sys_vfork(struct pt_regs *regs) | ||
722 | { | ||
723 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, | ||
724 | NULL, NULL); | ||
725 | } | ||
726 | |||
727 | unsigned long get_wchan(struct task_struct *p) | 539 | unsigned long get_wchan(struct task_struct *p) |
728 | { | 540 | { |
729 | unsigned long stack; | 541 | unsigned long stack; |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index fb2159a5c817..3d9672e59c16 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -1383,7 +1383,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, | |||
1383 | #ifdef CONFIG_X86_32 | 1383 | #ifdef CONFIG_X86_32 |
1384 | # define IS_IA32 1 | 1384 | # define IS_IA32 1 |
1385 | #elif defined CONFIG_IA32_EMULATION | 1385 | #elif defined CONFIG_IA32_EMULATION |
1386 | # define IS_IA32 test_thread_flag(TIF_IA32) | 1386 | # define IS_IA32 is_compat_task() |
1387 | #else | 1387 | #else |
1388 | # define IS_IA32 0 | 1388 | # define IS_IA32 0 |
1389 | #endif | 1389 | #endif |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 1cc18d439bbb..2aef36d8aca2 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -216,6 +216,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
216 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"), | 216 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"), |
217 | }, | 217 | }, |
218 | }, | 218 | }, |
219 | { /* Handle problems with rebooting on Dell XPS710 */ | ||
220 | .callback = set_bios_reboot, | ||
221 | .ident = "Dell XPS710", | ||
222 | .matches = { | ||
223 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
224 | DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"), | ||
225 | }, | ||
226 | }, | ||
219 | { } | 227 | { } |
220 | }; | 228 | }; |
221 | 229 | ||
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 5b85759e7972..b746deb9ebc6 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -600,19 +600,7 @@ static int __init setup_elfcorehdr(char *arg) | |||
600 | early_param("elfcorehdr", setup_elfcorehdr); | 600 | early_param("elfcorehdr", setup_elfcorehdr); |
601 | #endif | 601 | #endif |
602 | 602 | ||
603 | static int __init default_update_apic(void) | 603 | static struct x86_quirks default_x86_quirks __initdata; |
604 | { | ||
605 | #ifdef CONFIG_SMP | ||
606 | if (!apic->wakeup_cpu) | ||
607 | apic->wakeup_cpu = wakeup_secondary_cpu_via_init; | ||
608 | #endif | ||
609 | |||
610 | return 0; | ||
611 | } | ||
612 | |||
613 | static struct x86_quirks default_x86_quirks __initdata = { | ||
614 | .update_apic = default_update_apic, | ||
615 | }; | ||
616 | 604 | ||
617 | struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; | 605 | struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; |
618 | 606 | ||
@@ -782,6 +770,9 @@ void __init setup_arch(char **cmdline_p) | |||
782 | 770 | ||
783 | finish_e820_parsing(); | 771 | finish_e820_parsing(); |
784 | 772 | ||
773 | if (efi_enabled) | ||
774 | efi_init(); | ||
775 | |||
785 | dmi_scan_machine(); | 776 | dmi_scan_machine(); |
786 | 777 | ||
787 | dmi_check_system(bad_bios_dmi_table); | 778 | dmi_check_system(bad_bios_dmi_table); |
@@ -801,8 +792,6 @@ void __init setup_arch(char **cmdline_p) | |||
801 | insert_resource(&iomem_resource, &data_resource); | 792 | insert_resource(&iomem_resource, &data_resource); |
802 | insert_resource(&iomem_resource, &bss_resource); | 793 | insert_resource(&iomem_resource, &bss_resource); |
803 | 794 | ||
804 | if (efi_enabled) | ||
805 | efi_init(); | ||
806 | 795 | ||
807 | #ifdef CONFIG_X86_32 | 796 | #ifdef CONFIG_X86_32 |
808 | if (ppro_with_ram_bug()) { | 797 | if (ppro_with_ram_bug()) { |
@@ -875,9 +864,7 @@ void __init setup_arch(char **cmdline_p) | |||
875 | 864 | ||
876 | reserve_initrd(); | 865 | reserve_initrd(); |
877 | 866 | ||
878 | #ifdef CONFIG_X86_64 | ||
879 | vsmp_init(); | 867 | vsmp_init(); |
880 | #endif | ||
881 | 868 | ||
882 | io_delay_init(); | 869 | io_delay_init(); |
883 | 870 | ||
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index d992e6cff730..c29f301d3885 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/crash_dump.h> | 7 | #include <linux/crash_dump.h> |
8 | #include <linux/smp.h> | 8 | #include <linux/smp.h> |
9 | #include <linux/topology.h> | 9 | #include <linux/topology.h> |
10 | #include <linux/pfn.h> | ||
10 | #include <asm/sections.h> | 11 | #include <asm/sections.h> |
11 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
12 | #include <asm/setup.h> | 13 | #include <asm/setup.h> |
@@ -41,6 +42,321 @@ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { | |||
41 | }; | 42 | }; |
42 | EXPORT_SYMBOL(__per_cpu_offset); | 43 | EXPORT_SYMBOL(__per_cpu_offset); |
43 | 44 | ||
45 | /** | ||
46 | * pcpu_need_numa - determine percpu allocation needs to consider NUMA | ||
47 | * | ||
48 | * If NUMA is not configured or there is only one NUMA node available, | ||
49 | * there is no reason to consider NUMA. This function determines | ||
50 | * whether percpu allocation should consider NUMA or not. | ||
51 | * | ||
52 | * RETURNS: | ||
53 | * true if NUMA should be considered; otherwise, false. | ||
54 | */ | ||
55 | static bool __init pcpu_need_numa(void) | ||
56 | { | ||
57 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
58 | pg_data_t *last = NULL; | ||
59 | unsigned int cpu; | ||
60 | |||
61 | for_each_possible_cpu(cpu) { | ||
62 | int node = early_cpu_to_node(cpu); | ||
63 | |||
64 | if (node_online(node) && NODE_DATA(node) && | ||
65 | last && last != NODE_DATA(node)) | ||
66 | return true; | ||
67 | |||
68 | last = NODE_DATA(node); | ||
69 | } | ||
70 | #endif | ||
71 | return false; | ||
72 | } | ||
73 | |||
74 | /** | ||
75 | * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu | ||
76 | * @cpu: cpu to allocate for | ||
77 | * @size: size allocation in bytes | ||
78 | * @align: alignment | ||
79 | * | ||
80 | * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper | ||
81 | * does the right thing for NUMA regardless of the current | ||
82 | * configuration. | ||
83 | * | ||
84 | * RETURNS: | ||
85 | * Pointer to the allocated area on success, NULL on failure. | ||
86 | */ | ||
87 | static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, | ||
88 | unsigned long align) | ||
89 | { | ||
90 | const unsigned long goal = __pa(MAX_DMA_ADDRESS); | ||
91 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
92 | int node = early_cpu_to_node(cpu); | ||
93 | void *ptr; | ||
94 | |||
95 | if (!node_online(node) || !NODE_DATA(node)) { | ||
96 | ptr = __alloc_bootmem_nopanic(size, align, goal); | ||
97 | pr_info("cpu %d has no node %d or node-local memory\n", | ||
98 | cpu, node); | ||
99 | pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", | ||
100 | cpu, size, __pa(ptr)); | ||
101 | } else { | ||
102 | ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node), | ||
103 | size, align, goal); | ||
104 | pr_debug("per cpu data for cpu%d %lu bytes on node%d at " | ||
105 | "%016lx\n", cpu, size, node, __pa(ptr)); | ||
106 | } | ||
107 | return ptr; | ||
108 | #else | ||
109 | return __alloc_bootmem_nopanic(size, align, goal); | ||
110 | #endif | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Remap allocator | ||
115 | * | ||
116 | * This allocator uses PMD page as unit. A PMD page is allocated for | ||
117 | * each cpu and each is remapped into vmalloc area using PMD mapping. | ||
118 | * As PMD page is quite large, only part of it is used for the first | ||
119 | * chunk. Unused part is returned to the bootmem allocator. | ||
120 | * | ||
121 | * So, the PMD pages are mapped twice - once to the physical mapping | ||
122 | * and to the vmalloc area for the first percpu chunk. The double | ||
123 | * mapping does add one more PMD TLB entry pressure but still is much | ||
124 | * better than only using 4k mappings while still being NUMA friendly. | ||
125 | */ | ||
126 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
127 | static size_t pcpur_size __initdata; | ||
128 | static void **pcpur_ptrs __initdata; | ||
129 | |||
130 | static struct page * __init pcpur_get_page(unsigned int cpu, int pageno) | ||
131 | { | ||
132 | size_t off = (size_t)pageno << PAGE_SHIFT; | ||
133 | |||
134 | if (off >= pcpur_size) | ||
135 | return NULL; | ||
136 | |||
137 | return virt_to_page(pcpur_ptrs[cpu] + off); | ||
138 | } | ||
139 | |||
140 | static ssize_t __init setup_pcpu_remap(size_t static_size) | ||
141 | { | ||
142 | static struct vm_struct vm; | ||
143 | pg_data_t *last; | ||
144 | size_t ptrs_size; | ||
145 | unsigned int cpu; | ||
146 | ssize_t ret; | ||
147 | |||
148 | /* | ||
149 | * If large page isn't supported, there's no benefit in doing | ||
150 | * this. Also, on non-NUMA, embedding is better. | ||
151 | */ | ||
152 | if (!cpu_has_pse || pcpu_need_numa()) | ||
153 | return -EINVAL; | ||
154 | |||
155 | last = NULL; | ||
156 | for_each_possible_cpu(cpu) { | ||
157 | int node = early_cpu_to_node(cpu); | ||
158 | |||
159 | if (node_online(node) && NODE_DATA(node) && | ||
160 | last && last != NODE_DATA(node)) | ||
161 | goto proceed; | ||
162 | |||
163 | last = NODE_DATA(node); | ||
164 | } | ||
165 | return -EINVAL; | ||
166 | |||
167 | proceed: | ||
168 | /* | ||
169 | * Currently supports only single page. Supporting multiple | ||
170 | * pages won't be too difficult if it ever becomes necessary. | ||
171 | */ | ||
172 | pcpur_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE); | ||
173 | if (pcpur_size > PMD_SIZE) { | ||
174 | pr_warning("PERCPU: static data is larger than large page, " | ||
175 | "can't use large page\n"); | ||
176 | return -EINVAL; | ||
177 | } | ||
178 | |||
179 | /* allocate pointer array and alloc large pages */ | ||
180 | ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0])); | ||
181 | pcpur_ptrs = alloc_bootmem(ptrs_size); | ||
182 | |||
183 | for_each_possible_cpu(cpu) { | ||
184 | pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE); | ||
185 | if (!pcpur_ptrs[cpu]) | ||
186 | goto enomem; | ||
187 | |||
188 | /* | ||
189 | * Only use pcpur_size bytes and give back the rest. | ||
190 | * | ||
191 | * Ingo: The 2MB up-rounding bootmem is needed to make | ||
192 | * sure the partial 2MB page is still fully RAM - it's | ||
193 | * not well-specified to have a PAT-incompatible area | ||
194 | * (unmapped RAM, device memory, etc.) in that hole. | ||
195 | */ | ||
196 | free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size), | ||
197 | PMD_SIZE - pcpur_size); | ||
198 | |||
199 | memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size); | ||
200 | } | ||
201 | |||
202 | /* allocate address and map */ | ||
203 | vm.flags = VM_ALLOC; | ||
204 | vm.size = num_possible_cpus() * PMD_SIZE; | ||
205 | vm_area_register_early(&vm, PMD_SIZE); | ||
206 | |||
207 | for_each_possible_cpu(cpu) { | ||
208 | pmd_t *pmd; | ||
209 | |||
210 | pmd = populate_extra_pmd((unsigned long)vm.addr | ||
211 | + cpu * PMD_SIZE); | ||
212 | set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])), | ||
213 | PAGE_KERNEL_LARGE)); | ||
214 | } | ||
215 | |||
216 | /* we're ready, commit */ | ||
217 | pr_info("PERCPU: Remapped at %p with large pages, static data " | ||
218 | "%zu bytes\n", vm.addr, static_size); | ||
219 | |||
220 | ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, PMD_SIZE, | ||
221 | pcpur_size - static_size, vm.addr, NULL); | ||
222 | goto out_free_ar; | ||
223 | |||
224 | enomem: | ||
225 | for_each_possible_cpu(cpu) | ||
226 | if (pcpur_ptrs[cpu]) | ||
227 | free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE); | ||
228 | ret = -ENOMEM; | ||
229 | out_free_ar: | ||
230 | free_bootmem(__pa(pcpur_ptrs), ptrs_size); | ||
231 | return ret; | ||
232 | } | ||
233 | #else | ||
234 | static ssize_t __init setup_pcpu_remap(size_t static_size) | ||
235 | { | ||
236 | return -EINVAL; | ||
237 | } | ||
238 | #endif | ||
239 | |||
240 | /* | ||
241 | * Embedding allocator | ||
242 | * | ||
243 | * The first chunk is sized to just contain the static area plus | ||
244 | * PERCPU_DYNAMIC_RESERVE and allocated as a contiguous area using | ||
245 | * bootmem allocator and used as-is without being mapped into vmalloc | ||
246 | * area. This enables the first chunk to piggy back on the linear | ||
247 | * physical PMD mapping and doesn't add any additional pressure to | ||
248 | * TLB. | ||
249 | */ | ||
250 | static void *pcpue_ptr __initdata; | ||
251 | static size_t pcpue_unit_size __initdata; | ||
252 | |||
253 | static struct page * __init pcpue_get_page(unsigned int cpu, int pageno) | ||
254 | { | ||
255 | return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size | ||
256 | + ((size_t)pageno << PAGE_SHIFT)); | ||
257 | } | ||
258 | |||
259 | static ssize_t __init setup_pcpu_embed(size_t static_size) | ||
260 | { | ||
261 | unsigned int cpu; | ||
262 | |||
263 | /* | ||
264 | * If large page isn't supported, there's no benefit in doing | ||
265 | * this. Also, embedding allocation doesn't play well with | ||
266 | * NUMA. | ||
267 | */ | ||
268 | if (!cpu_has_pse || pcpu_need_numa()) | ||
269 | return -EINVAL; | ||
270 | |||
271 | /* allocate and copy */ | ||
272 | pcpue_unit_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE); | ||
273 | pcpue_unit_size = max_t(size_t, pcpue_unit_size, PCPU_MIN_UNIT_SIZE); | ||
274 | pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size, | ||
275 | PAGE_SIZE); | ||
276 | if (!pcpue_ptr) | ||
277 | return -ENOMEM; | ||
278 | |||
279 | for_each_possible_cpu(cpu) | ||
280 | memcpy(pcpue_ptr + cpu * pcpue_unit_size, __per_cpu_load, | ||
281 | static_size); | ||
282 | |||
283 | /* we're ready, commit */ | ||
284 | pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n", | ||
285 | pcpue_unit_size >> PAGE_SHIFT, pcpue_ptr, static_size); | ||
286 | |||
287 | return pcpu_setup_first_chunk(pcpue_get_page, static_size, | ||
288 | pcpue_unit_size, | ||
289 | pcpue_unit_size - static_size, pcpue_ptr, | ||
290 | NULL); | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * 4k page allocator | ||
295 | * | ||
296 | * This is the basic allocator. Static percpu area is allocated | ||
297 | * page-by-page and most of initialization is done by the generic | ||
298 | * setup function. | ||
299 | */ | ||
300 | static struct page **pcpu4k_pages __initdata; | ||
301 | static int pcpu4k_nr_static_pages __initdata; | ||
302 | |||
303 | static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno) | ||
304 | { | ||
305 | if (pageno < pcpu4k_nr_static_pages) | ||
306 | return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno]; | ||
307 | return NULL; | ||
308 | } | ||
309 | |||
310 | static void __init pcpu4k_populate_pte(unsigned long addr) | ||
311 | { | ||
312 | populate_extra_pte(addr); | ||
313 | } | ||
314 | |||
315 | static ssize_t __init setup_pcpu_4k(size_t static_size) | ||
316 | { | ||
317 | size_t pages_size; | ||
318 | unsigned int cpu; | ||
319 | int i, j; | ||
320 | ssize_t ret; | ||
321 | |||
322 | pcpu4k_nr_static_pages = PFN_UP(static_size); | ||
323 | |||
324 | /* unaligned allocations can't be freed, round up to page size */ | ||
325 | pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus() | ||
326 | * sizeof(pcpu4k_pages[0])); | ||
327 | pcpu4k_pages = alloc_bootmem(pages_size); | ||
328 | |||
329 | /* allocate and copy */ | ||
330 | j = 0; | ||
331 | for_each_possible_cpu(cpu) | ||
332 | for (i = 0; i < pcpu4k_nr_static_pages; i++) { | ||
333 | void *ptr; | ||
334 | |||
335 | ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE); | ||
336 | if (!ptr) | ||
337 | goto enomem; | ||
338 | |||
339 | memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE); | ||
340 | pcpu4k_pages[j++] = virt_to_page(ptr); | ||
341 | } | ||
342 | |||
343 | /* we're ready, commit */ | ||
344 | pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n", | ||
345 | pcpu4k_nr_static_pages, static_size); | ||
346 | |||
347 | ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 0, 0, NULL, | ||
348 | pcpu4k_populate_pte); | ||
349 | goto out_free_ar; | ||
350 | |||
351 | enomem: | ||
352 | while (--j >= 0) | ||
353 | free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE); | ||
354 | ret = -ENOMEM; | ||
355 | out_free_ar: | ||
356 | free_bootmem(__pa(pcpu4k_pages), pages_size); | ||
357 | return ret; | ||
358 | } | ||
359 | |||
44 | static inline void setup_percpu_segment(int cpu) | 360 | static inline void setup_percpu_segment(int cpu) |
45 | { | 361 | { |
46 | #ifdef CONFIG_X86_32 | 362 | #ifdef CONFIG_X86_32 |
@@ -61,38 +377,35 @@ static inline void setup_percpu_segment(int cpu) | |||
61 | */ | 377 | */ |
62 | void __init setup_per_cpu_areas(void) | 378 | void __init setup_per_cpu_areas(void) |
63 | { | 379 | { |
64 | ssize_t size; | 380 | size_t static_size = __per_cpu_end - __per_cpu_start; |
65 | char *ptr; | 381 | unsigned int cpu; |
66 | int cpu; | 382 | unsigned long delta; |
67 | 383 | size_t pcpu_unit_size; | |
68 | /* Copy section for each CPU (we discard the original) */ | 384 | ssize_t ret; |
69 | size = roundup(PERCPU_ENOUGH_ROOM, PAGE_SIZE); | ||
70 | 385 | ||
71 | pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", | 386 | pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", |
72 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); | 387 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); |
73 | 388 | ||
74 | pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size); | 389 | /* |
390 | * Allocate percpu area. If PSE is supported, try to make use | ||
391 | * of large page mappings. Please read comments on top of | ||
392 | * each allocator for details. | ||
393 | */ | ||
394 | ret = setup_pcpu_remap(static_size); | ||
395 | if (ret < 0) | ||
396 | ret = setup_pcpu_embed(static_size); | ||
397 | if (ret < 0) | ||
398 | ret = setup_pcpu_4k(static_size); | ||
399 | if (ret < 0) | ||
400 | panic("cannot allocate static percpu area (%zu bytes, err=%zd)", | ||
401 | static_size, ret); | ||
75 | 402 | ||
76 | for_each_possible_cpu(cpu) { | 403 | pcpu_unit_size = ret; |
77 | #ifndef CONFIG_NEED_MULTIPLE_NODES | ||
78 | ptr = alloc_bootmem_pages(size); | ||
79 | #else | ||
80 | int node = early_cpu_to_node(cpu); | ||
81 | if (!node_online(node) || !NODE_DATA(node)) { | ||
82 | ptr = alloc_bootmem_pages(size); | ||
83 | pr_info("cpu %d has no node %d or node-local memory\n", | ||
84 | cpu, node); | ||
85 | pr_debug("per cpu data for cpu%d at %016lx\n", | ||
86 | cpu, __pa(ptr)); | ||
87 | } else { | ||
88 | ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); | ||
89 | pr_debug("per cpu data for cpu%d on node%d at %016lx\n", | ||
90 | cpu, node, __pa(ptr)); | ||
91 | } | ||
92 | #endif | ||
93 | 404 | ||
94 | memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start); | 405 | /* alrighty, percpu areas up and running */ |
95 | per_cpu_offset(cpu) = ptr - __per_cpu_start; | 406 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; |
407 | for_each_possible_cpu(cpu) { | ||
408 | per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; | ||
96 | per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); | 409 | per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); |
97 | per_cpu(cpu_number, cpu) = cpu; | 410 | per_cpu(cpu_number, cpu) = cpu; |
98 | setup_percpu_segment(cpu); | 411 | setup_percpu_segment(cpu); |
@@ -125,8 +438,6 @@ void __init setup_per_cpu_areas(void) | |||
125 | */ | 438 | */ |
126 | if (cpu == boot_cpu_id) | 439 | if (cpu == boot_cpu_id) |
127 | switch_to_new_gdt(cpu); | 440 | switch_to_new_gdt(cpu); |
128 | |||
129 | DBG("PERCPU: cpu %4d %p\n", cpu, ptr); | ||
130 | } | 441 | } |
131 | 442 | ||
132 | /* indicate the early static arrays will soon be gone */ | 443 | /* indicate the early static arrays will soon be gone */ |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 7cdcd16885ed..d2cc6428c587 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -187,40 +187,35 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, | |||
187 | /* | 187 | /* |
188 | * Set up a signal frame. | 188 | * Set up a signal frame. |
189 | */ | 189 | */ |
190 | #ifdef CONFIG_X86_32 | ||
191 | static const struct { | ||
192 | u16 poplmovl; | ||
193 | u32 val; | ||
194 | u16 int80; | ||
195 | } __attribute__((packed)) retcode = { | ||
196 | 0xb858, /* popl %eax; movl $..., %eax */ | ||
197 | __NR_sigreturn, | ||
198 | 0x80cd, /* int $0x80 */ | ||
199 | }; | ||
200 | |||
201 | static const struct { | ||
202 | u8 movl; | ||
203 | u32 val; | ||
204 | u16 int80; | ||
205 | u8 pad; | ||
206 | } __attribute__((packed)) rt_retcode = { | ||
207 | 0xb8, /* movl $..., %eax */ | ||
208 | __NR_rt_sigreturn, | ||
209 | 0x80cd, /* int $0x80 */ | ||
210 | 0 | ||
211 | }; | ||
212 | 190 | ||
213 | /* | 191 | /* |
214 | * Determine which stack to use.. | 192 | * Determine which stack to use.. |
215 | */ | 193 | */ |
194 | static unsigned long align_sigframe(unsigned long sp) | ||
195 | { | ||
196 | #ifdef CONFIG_X86_32 | ||
197 | /* | ||
198 | * Align the stack pointer according to the i386 ABI, | ||
199 | * i.e. so that on function entry ((sp + 4) & 15) == 0. | ||
200 | */ | ||
201 | sp = ((sp + 4) & -16ul) - 4; | ||
202 | #else /* !CONFIG_X86_32 */ | ||
203 | sp = round_down(sp, 16) - 8; | ||
204 | #endif | ||
205 | return sp; | ||
206 | } | ||
207 | |||
216 | static inline void __user * | 208 | static inline void __user * |
217 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, | 209 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, |
218 | void **fpstate) | 210 | void __user **fpstate) |
219 | { | 211 | { |
220 | unsigned long sp; | ||
221 | |||
222 | /* Default to using normal stack */ | 212 | /* Default to using normal stack */ |
223 | sp = regs->sp; | 213 | unsigned long sp = regs->sp; |
214 | |||
215 | #ifdef CONFIG_X86_64 | ||
216 | /* redzone */ | ||
217 | sp -= 128; | ||
218 | #endif /* CONFIG_X86_64 */ | ||
224 | 219 | ||
225 | /* | 220 | /* |
226 | * If we are on the alternate signal stack and would overflow it, don't. | 221 | * If we are on the alternate signal stack and would overflow it, don't. |
@@ -234,30 +229,52 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, | |||
234 | if (sas_ss_flags(sp) == 0) | 229 | if (sas_ss_flags(sp) == 0) |
235 | sp = current->sas_ss_sp + current->sas_ss_size; | 230 | sp = current->sas_ss_sp + current->sas_ss_size; |
236 | } else { | 231 | } else { |
232 | #ifdef CONFIG_X86_32 | ||
237 | /* This is the legacy signal stack switching. */ | 233 | /* This is the legacy signal stack switching. */ |
238 | if ((regs->ss & 0xffff) != __USER_DS && | 234 | if ((regs->ss & 0xffff) != __USER_DS && |
239 | !(ka->sa.sa_flags & SA_RESTORER) && | 235 | !(ka->sa.sa_flags & SA_RESTORER) && |
240 | ka->sa.sa_restorer) | 236 | ka->sa.sa_restorer) |
241 | sp = (unsigned long) ka->sa.sa_restorer; | 237 | sp = (unsigned long) ka->sa.sa_restorer; |
238 | #endif /* CONFIG_X86_32 */ | ||
242 | } | 239 | } |
243 | 240 | ||
244 | if (used_math()) { | 241 | if (used_math()) { |
245 | sp = sp - sig_xstate_size; | 242 | sp -= sig_xstate_size; |
246 | *fpstate = (struct _fpstate *) sp; | 243 | #ifdef CONFIG_X86_64 |
244 | sp = round_down(sp, 64); | ||
245 | #endif /* CONFIG_X86_64 */ | ||
246 | *fpstate = (void __user *)sp; | ||
247 | |||
247 | if (save_i387_xstate(*fpstate) < 0) | 248 | if (save_i387_xstate(*fpstate) < 0) |
248 | return (void __user *)-1L; | 249 | return (void __user *)-1L; |
249 | } | 250 | } |
250 | 251 | ||
251 | sp -= frame_size; | 252 | return (void __user *)align_sigframe(sp - frame_size); |
252 | /* | ||
253 | * Align the stack pointer according to the i386 ABI, | ||
254 | * i.e. so that on function entry ((sp + 4) & 15) == 0. | ||
255 | */ | ||
256 | sp = ((sp + 4) & -16ul) - 4; | ||
257 | |||
258 | return (void __user *) sp; | ||
259 | } | 253 | } |
260 | 254 | ||
255 | #ifdef CONFIG_X86_32 | ||
256 | static const struct { | ||
257 | u16 poplmovl; | ||
258 | u32 val; | ||
259 | u16 int80; | ||
260 | } __attribute__((packed)) retcode = { | ||
261 | 0xb858, /* popl %eax; movl $..., %eax */ | ||
262 | __NR_sigreturn, | ||
263 | 0x80cd, /* int $0x80 */ | ||
264 | }; | ||
265 | |||
266 | static const struct { | ||
267 | u8 movl; | ||
268 | u32 val; | ||
269 | u16 int80; | ||
270 | u8 pad; | ||
271 | } __attribute__((packed)) rt_retcode = { | ||
272 | 0xb8, /* movl $..., %eax */ | ||
273 | __NR_rt_sigreturn, | ||
274 | 0x80cd, /* int $0x80 */ | ||
275 | 0 | ||
276 | }; | ||
277 | |||
261 | static int | 278 | static int |
262 | __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | 279 | __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, |
263 | struct pt_regs *regs) | 280 | struct pt_regs *regs) |
@@ -388,24 +405,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
388 | return 0; | 405 | return 0; |
389 | } | 406 | } |
390 | #else /* !CONFIG_X86_32 */ | 407 | #else /* !CONFIG_X86_32 */ |
391 | /* | ||
392 | * Determine which stack to use.. | ||
393 | */ | ||
394 | static void __user * | ||
395 | get_stack(struct k_sigaction *ka, unsigned long sp, unsigned long size) | ||
396 | { | ||
397 | /* Default to using normal stack - redzone*/ | ||
398 | sp -= 128; | ||
399 | |||
400 | /* This is the X/Open sanctioned signal stack switching. */ | ||
401 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
402 | if (sas_ss_flags(sp) == 0) | ||
403 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
404 | } | ||
405 | |||
406 | return (void __user *)round_down(sp - size, 64); | ||
407 | } | ||
408 | |||
409 | static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 408 | static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
410 | sigset_t *set, struct pt_regs *regs) | 409 | sigset_t *set, struct pt_regs *regs) |
411 | { | 410 | { |
@@ -414,15 +413,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
414 | int err = 0; | 413 | int err = 0; |
415 | struct task_struct *me = current; | 414 | struct task_struct *me = current; |
416 | 415 | ||
417 | if (used_math()) { | 416 | frame = get_sigframe(ka, regs, sizeof(struct rt_sigframe), &fp); |
418 | fp = get_stack(ka, regs->sp, sig_xstate_size); | ||
419 | frame = (void __user *)round_down( | ||
420 | (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; | ||
421 | |||
422 | if (save_i387_xstate(fp) < 0) | ||
423 | return -EFAULT; | ||
424 | } else | ||
425 | frame = get_stack(ka, regs->sp, sizeof(struct rt_sigframe)) - 8; | ||
426 | 417 | ||
427 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 418 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
428 | return -EFAULT; | 419 | return -EFAULT; |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 9ce666387f37..249334f5080a 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -112,7 +112,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map); | |||
112 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); | 112 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); |
113 | EXPORT_PER_CPU_SYMBOL(cpu_info); | 113 | EXPORT_PER_CPU_SYMBOL(cpu_info); |
114 | 114 | ||
115 | static atomic_t init_deasserted; | 115 | atomic_t init_deasserted; |
116 | 116 | ||
117 | 117 | ||
118 | /* Set if we find a B stepping CPU */ | 118 | /* Set if we find a B stepping CPU */ |
@@ -614,12 +614,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) | |||
614 | unsigned long send_status, accept_status = 0; | 614 | unsigned long send_status, accept_status = 0; |
615 | int maxlvt, num_starts, j; | 615 | int maxlvt, num_starts, j; |
616 | 616 | ||
617 | if (get_uv_system_type() == UV_NON_UNIQUE_APIC) { | ||
618 | send_status = uv_wakeup_secondary(phys_apicid, start_eip); | ||
619 | atomic_set(&init_deasserted, 1); | ||
620 | return send_status; | ||
621 | } | ||
622 | |||
623 | maxlvt = lapic_get_maxlvt(); | 617 | maxlvt = lapic_get_maxlvt(); |
624 | 618 | ||
625 | /* | 619 | /* |
@@ -748,7 +742,8 @@ static void __cpuinit do_fork_idle(struct work_struct *work) | |||
748 | /* | 742 | /* |
749 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad | 743 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad |
750 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. | 744 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. |
751 | * Returns zero if CPU booted OK, else error code from ->wakeup_cpu. | 745 | * Returns zero if CPU booted OK, else error code from |
746 | * ->wakeup_secondary_cpu. | ||
752 | */ | 747 | */ |
753 | static int __cpuinit do_boot_cpu(int apicid, int cpu) | 748 | static int __cpuinit do_boot_cpu(int apicid, int cpu) |
754 | { | 749 | { |
@@ -835,9 +830,13 @@ do_rest: | |||
835 | } | 830 | } |
836 | 831 | ||
837 | /* | 832 | /* |
838 | * Starting actual IPI sequence... | 833 | * Kick the secondary CPU. Use the method in the APIC driver |
834 | * if it's defined - or use an INIT boot APIC message otherwise: | ||
839 | */ | 835 | */ |
840 | boot_error = apic->wakeup_cpu(apicid, start_ip); | 836 | if (apic->wakeup_secondary_cpu) |
837 | boot_error = apic->wakeup_secondary_cpu(apicid, start_ip); | ||
838 | else | ||
839 | boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip); | ||
841 | 840 | ||
842 | if (!boot_error) { | 841 | if (!boot_error) { |
843 | /* | 842 | /* |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index c05430ac1b44..a1d288327ff0 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -118,47 +118,6 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err) | |||
118 | if (!user_mode_vm(regs)) | 118 | if (!user_mode_vm(regs)) |
119 | die(str, regs, err); | 119 | die(str, regs, err); |
120 | } | 120 | } |
121 | |||
122 | /* | ||
123 | * Perform the lazy TSS's I/O bitmap copy. If the TSS has an | ||
124 | * invalid offset set (the LAZY one) and the faulting thread has | ||
125 | * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS, | ||
126 | * we set the offset field correctly and return 1. | ||
127 | */ | ||
128 | static int lazy_iobitmap_copy(void) | ||
129 | { | ||
130 | struct thread_struct *thread; | ||
131 | struct tss_struct *tss; | ||
132 | int cpu; | ||
133 | |||
134 | cpu = get_cpu(); | ||
135 | tss = &per_cpu(init_tss, cpu); | ||
136 | thread = ¤t->thread; | ||
137 | |||
138 | if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY && | ||
139 | thread->io_bitmap_ptr) { | ||
140 | memcpy(tss->io_bitmap, thread->io_bitmap_ptr, | ||
141 | thread->io_bitmap_max); | ||
142 | /* | ||
143 | * If the previously set map was extending to higher ports | ||
144 | * than the current one, pad extra space with 0xff (no access). | ||
145 | */ | ||
146 | if (thread->io_bitmap_max < tss->io_bitmap_max) { | ||
147 | memset((char *) tss->io_bitmap + | ||
148 | thread->io_bitmap_max, 0xff, | ||
149 | tss->io_bitmap_max - thread->io_bitmap_max); | ||
150 | } | ||
151 | tss->io_bitmap_max = thread->io_bitmap_max; | ||
152 | tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; | ||
153 | tss->io_bitmap_owner = thread; | ||
154 | put_cpu(); | ||
155 | |||
156 | return 1; | ||
157 | } | ||
158 | put_cpu(); | ||
159 | |||
160 | return 0; | ||
161 | } | ||
162 | #endif | 121 | #endif |
163 | 122 | ||
164 | static void __kprobes | 123 | static void __kprobes |
@@ -309,11 +268,6 @@ do_general_protection(struct pt_regs *regs, long error_code) | |||
309 | conditional_sti(regs); | 268 | conditional_sti(regs); |
310 | 269 | ||
311 | #ifdef CONFIG_X86_32 | 270 | #ifdef CONFIG_X86_32 |
312 | if (lazy_iobitmap_copy()) { | ||
313 | /* restart the faulting instruction */ | ||
314 | return; | ||
315 | } | ||
316 | |||
317 | if (regs->flags & X86_VM_MASK) | 271 | if (regs->flags & X86_VM_MASK) |
318 | goto gp_in_vm86; | 272 | goto gp_in_vm86; |
319 | #endif | 273 | #endif |
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index c609205df594..74de562812cc 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <asm/paravirt.h> | 22 | #include <asm/paravirt.h> |
23 | #include <asm/setup.h> | 23 | #include <asm/setup.h> |
24 | 24 | ||
25 | #if defined CONFIG_PCI && defined CONFIG_PARAVIRT | 25 | #ifdef CONFIG_PARAVIRT |
26 | /* | 26 | /* |
27 | * Interrupt control on vSMPowered systems: | 27 | * Interrupt control on vSMPowered systems: |
28 | * ~AC is a shadow of IF. If IF is 'on' AC should be 'off' | 28 | * ~AC is a shadow of IF. If IF is 'on' AC should be 'off' |
@@ -114,7 +114,6 @@ static void __init set_vsmp_pv_ops(void) | |||
114 | } | 114 | } |
115 | #endif | 115 | #endif |
116 | 116 | ||
117 | #ifdef CONFIG_PCI | ||
118 | static int is_vsmp = -1; | 117 | static int is_vsmp = -1; |
119 | 118 | ||
120 | static void __init detect_vsmp_box(void) | 119 | static void __init detect_vsmp_box(void) |
@@ -139,15 +138,6 @@ int is_vsmp_box(void) | |||
139 | return 0; | 138 | return 0; |
140 | } | 139 | } |
141 | } | 140 | } |
142 | #else | ||
143 | static void __init detect_vsmp_box(void) | ||
144 | { | ||
145 | } | ||
146 | int is_vsmp_box(void) | ||
147 | { | ||
148 | return 0; | ||
149 | } | ||
150 | #endif | ||
151 | 141 | ||
152 | void __init vsmp_init(void) | 142 | void __init vsmp_init(void) |
153 | { | 143 | { |