diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/apic/apic.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/apic/apic_flat_64.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/apic/io_apic.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/apic/x2apic_uv_x.c | 16 | ||||
-rw-r--r-- | arch/x86/kernel/bios_uv.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/addon_cpuid_features.c | 1 | ||||
-rw-r--r--[-rwxr-xr-x] | arch/x86/kernel/cpu/cpu_debug.c | 0 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 81 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/longhaul.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/irq.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/microcode_core.c | 33 | ||||
-rw-r--r-- | arch/x86/kernel/mpparse.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/ptrace.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/reboot.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/tlb_uv.c | 189 | ||||
-rw-r--r-- | arch/x86/kernel/uv_sysfs.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/xsave.c | 2 |
18 files changed, 232 insertions, 135 deletions
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 4b48ff9163ca..83e47febcc89 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -472,6 +472,12 @@ static void __cpuinit setup_APIC_timer(void) | |||
472 | { | 472 | { |
473 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | 473 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); |
474 | 474 | ||
475 | if (cpu_has(¤t_cpu_data, X86_FEATURE_ARAT)) { | ||
476 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; | ||
477 | /* Make LAPIC timer preferrable over percpu HPET */ | ||
478 | lapic_clockevent.rating = 150; | ||
479 | } | ||
480 | |||
475 | memcpy(levt, &lapic_clockevent, sizeof(*levt)); | 481 | memcpy(levt, &lapic_clockevent, sizeof(*levt)); |
476 | levt->cpumask = cpumask_of(smp_processor_id()); | 482 | levt->cpumask = cpumask_of(smp_processor_id()); |
477 | 483 | ||
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 0014714ea97b..306e5e88fb6f 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c | |||
@@ -212,7 +212,7 @@ struct apic apic_flat = { | |||
212 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | 212 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, |
213 | .wait_for_init_deassert = NULL, | 213 | .wait_for_init_deassert = NULL, |
214 | .smp_callin_clear_local_apic = NULL, | 214 | .smp_callin_clear_local_apic = NULL, |
215 | .inquire_remote_apic = NULL, | 215 | .inquire_remote_apic = default_inquire_remote_apic, |
216 | 216 | ||
217 | .read = native_apic_mem_read, | 217 | .read = native_apic_mem_read, |
218 | .write = native_apic_mem_write, | 218 | .write = native_apic_mem_write, |
@@ -362,7 +362,7 @@ struct apic apic_physflat = { | |||
362 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | 362 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, |
363 | .wait_for_init_deassert = NULL, | 363 | .wait_for_init_deassert = NULL, |
364 | .smp_callin_clear_local_apic = NULL, | 364 | .smp_callin_clear_local_apic = NULL, |
365 | .inquire_remote_apic = NULL, | 365 | .inquire_remote_apic = default_inquire_remote_apic, |
366 | 366 | ||
367 | .read = native_apic_mem_read, | 367 | .read = native_apic_mem_read, |
368 | .write = native_apic_mem_write, | 368 | .write = native_apic_mem_write, |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 7c9d045ac834..849900022407 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -2519,7 +2519,6 @@ static void irq_complete_move(struct irq_desc **descp) | |||
2519 | static inline void irq_complete_move(struct irq_desc **descp) {} | 2519 | static inline void irq_complete_move(struct irq_desc **descp) {} |
2520 | #endif | 2520 | #endif |
2521 | 2521 | ||
2522 | #ifdef CONFIG_X86_X2APIC | ||
2523 | static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | 2522 | static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) |
2524 | { | 2523 | { |
2525 | int apic, pin; | 2524 | int apic, pin; |
@@ -2553,6 +2552,7 @@ eoi_ioapic_irq(struct irq_desc *desc) | |||
2553 | spin_unlock_irqrestore(&ioapic_lock, flags); | 2552 | spin_unlock_irqrestore(&ioapic_lock, flags); |
2554 | } | 2553 | } |
2555 | 2554 | ||
2555 | #ifdef CONFIG_X86_X2APIC | ||
2556 | static void ack_x2apic_level(unsigned int irq) | 2556 | static void ack_x2apic_level(unsigned int irq) |
2557 | { | 2557 | { |
2558 | struct irq_desc *desc = irq_to_desc(irq); | 2558 | struct irq_desc *desc = irq_to_desc(irq); |
@@ -2629,6 +2629,9 @@ static void ack_apic_level(unsigned int irq) | |||
2629 | */ | 2629 | */ |
2630 | ack_APIC_irq(); | 2630 | ack_APIC_irq(); |
2631 | 2631 | ||
2632 | if (irq_remapped(irq)) | ||
2633 | eoi_ioapic_irq(desc); | ||
2634 | |||
2632 | /* Now we can move and renable the irq */ | 2635 | /* Now we can move and renable the irq */ |
2633 | if (unlikely(do_unmask_irq)) { | 2636 | if (unlikely(do_unmask_irq)) { |
2634 | /* Only migrate the irq if the ack has been received. | 2637 | /* Only migrate the irq if the ack has been received. |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 1248318436e8..de1a50af807b 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -549,7 +549,8 @@ void __init uv_system_init(void) | |||
549 | unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; | 549 | unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; |
550 | int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; | 550 | int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; |
551 | int max_pnode = 0; | 551 | int max_pnode = 0; |
552 | unsigned long mmr_base, present; | 552 | unsigned long mmr_base, present, paddr; |
553 | unsigned short pnode_mask; | ||
553 | 554 | ||
554 | map_low_mmrs(); | 555 | map_low_mmrs(); |
555 | 556 | ||
@@ -592,6 +593,7 @@ void __init uv_system_init(void) | |||
592 | } | 593 | } |
593 | } | 594 | } |
594 | 595 | ||
596 | pnode_mask = (1 << n_val) - 1; | ||
595 | node_id.v = uv_read_local_mmr(UVH_NODE_ID); | 597 | node_id.v = uv_read_local_mmr(UVH_NODE_ID); |
596 | gnode_upper = (((unsigned long)node_id.s.node_id) & | 598 | gnode_upper = (((unsigned long)node_id.s.node_id) & |
597 | ~((1 << n_val) - 1)) << m_val; | 599 | ~((1 << n_val) - 1)) << m_val; |
@@ -615,7 +617,7 @@ void __init uv_system_init(void) | |||
615 | uv_cpu_hub_info(cpu)->numa_blade_id = blade; | 617 | uv_cpu_hub_info(cpu)->numa_blade_id = blade; |
616 | uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; | 618 | uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; |
617 | uv_cpu_hub_info(cpu)->pnode = pnode; | 619 | uv_cpu_hub_info(cpu)->pnode = pnode; |
618 | uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1; | 620 | uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask; |
619 | uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; | 621 | uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; |
620 | uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; | 622 | uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; |
621 | uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; | 623 | uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; |
@@ -631,6 +633,16 @@ void __init uv_system_init(void) | |||
631 | lcpu, blade); | 633 | lcpu, blade); |
632 | } | 634 | } |
633 | 635 | ||
636 | /* Add blade/pnode info for nodes without cpus */ | ||
637 | for_each_online_node(nid) { | ||
638 | if (uv_node_to_blade[nid] >= 0) | ||
639 | continue; | ||
640 | paddr = node_start_pfn(nid) << PAGE_SHIFT; | ||
641 | pnode = (paddr >> m_val) & pnode_mask; | ||
642 | blade = boot_pnode_to_blade(pnode); | ||
643 | uv_node_to_blade[nid] = blade; | ||
644 | } | ||
645 | |||
634 | map_gru_high(max_pnode); | 646 | map_gru_high(max_pnode); |
635 | map_mmr_high(max_pnode); | 647 | map_mmr_high(max_pnode); |
636 | map_config_high(max_pnode); | 648 | map_config_high(max_pnode); |
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c index f63882728d91..63a88e1f987d 100644 --- a/arch/x86/kernel/bios_uv.c +++ b/arch/x86/kernel/bios_uv.c | |||
@@ -182,7 +182,8 @@ void uv_bios_init(void) | |||
182 | memcpy(&uv_systab, tab, sizeof(struct uv_systab)); | 182 | memcpy(&uv_systab, tab, sizeof(struct uv_systab)); |
183 | iounmap(tab); | 183 | iounmap(tab); |
184 | 184 | ||
185 | printk(KERN_INFO "EFI UV System Table Revision %d\n", tab->revision); | 185 | printk(KERN_INFO "EFI UV System Table Revision %d\n", |
186 | uv_systab.revision); | ||
186 | } | 187 | } |
187 | #else /* !CONFIG_EFI */ | 188 | #else /* !CONFIG_EFI */ |
188 | 189 | ||
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 8220ae69849d..c965e5212714 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c | |||
@@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
31 | 31 | ||
32 | static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { | 32 | static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { |
33 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, | 33 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, |
34 | { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 }, | ||
34 | { 0, 0, 0, 0 } | 35 | { 0, 0, 0, 0 } |
35 | }; | 36 | }; |
36 | 37 | ||
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c index 46e29ab96c6a..46e29ab96c6a 100755..100644 --- a/arch/x86/kernel/cpu/cpu_debug.c +++ b/arch/x86/kernel/cpu/cpu_debug.c | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 19f6b9d27e83..ecdb682ab516 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -68,6 +68,7 @@ struct acpi_cpufreq_data { | |||
68 | unsigned int max_freq; | 68 | unsigned int max_freq; |
69 | unsigned int resume; | 69 | unsigned int resume; |
70 | unsigned int cpu_feature; | 70 | unsigned int cpu_feature; |
71 | u64 saved_aperf, saved_mperf; | ||
71 | }; | 72 | }; |
72 | 73 | ||
73 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); | 74 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); |
@@ -152,7 +153,8 @@ struct drv_cmd { | |||
152 | u32 val; | 153 | u32 val; |
153 | }; | 154 | }; |
154 | 155 | ||
155 | static long do_drv_read(void *_cmd) | 156 | /* Called via smp_call_function_single(), on the target CPU */ |
157 | static void do_drv_read(void *_cmd) | ||
156 | { | 158 | { |
157 | struct drv_cmd *cmd = _cmd; | 159 | struct drv_cmd *cmd = _cmd; |
158 | u32 h; | 160 | u32 h; |
@@ -169,10 +171,10 @@ static long do_drv_read(void *_cmd) | |||
169 | default: | 171 | default: |
170 | break; | 172 | break; |
171 | } | 173 | } |
172 | return 0; | ||
173 | } | 174 | } |
174 | 175 | ||
175 | static long do_drv_write(void *_cmd) | 176 | /* Called via smp_call_function_many(), on the target CPUs */ |
177 | static void do_drv_write(void *_cmd) | ||
176 | { | 178 | { |
177 | struct drv_cmd *cmd = _cmd; | 179 | struct drv_cmd *cmd = _cmd; |
178 | u32 lo, hi; | 180 | u32 lo, hi; |
@@ -191,23 +193,24 @@ static long do_drv_write(void *_cmd) | |||
191 | default: | 193 | default: |
192 | break; | 194 | break; |
193 | } | 195 | } |
194 | return 0; | ||
195 | } | 196 | } |
196 | 197 | ||
197 | static void drv_read(struct drv_cmd *cmd) | 198 | static void drv_read(struct drv_cmd *cmd) |
198 | { | 199 | { |
199 | cmd->val = 0; | 200 | cmd->val = 0; |
200 | 201 | ||
201 | work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); | 202 | smp_call_function_single(cpumask_any(cmd->mask), do_drv_read, cmd, 1); |
202 | } | 203 | } |
203 | 204 | ||
204 | static void drv_write(struct drv_cmd *cmd) | 205 | static void drv_write(struct drv_cmd *cmd) |
205 | { | 206 | { |
206 | unsigned int i; | 207 | int this_cpu; |
207 | 208 | ||
208 | for_each_cpu(i, cmd->mask) { | 209 | this_cpu = get_cpu(); |
209 | work_on_cpu(i, do_drv_write, cmd); | 210 | if (cpumask_test_cpu(this_cpu, cmd->mask)) |
210 | } | 211 | do_drv_write(cmd); |
212 | smp_call_function_many(cmd->mask, do_drv_write, cmd, 1); | ||
213 | put_cpu(); | ||
211 | } | 214 | } |
212 | 215 | ||
213 | static u32 get_cur_val(const struct cpumask *mask) | 216 | static u32 get_cur_val(const struct cpumask *mask) |
@@ -241,28 +244,23 @@ static u32 get_cur_val(const struct cpumask *mask) | |||
241 | return cmd.val; | 244 | return cmd.val; |
242 | } | 245 | } |
243 | 246 | ||
244 | struct perf_cur { | 247 | struct perf_pair { |
245 | union { | 248 | union { |
246 | struct { | 249 | struct { |
247 | u32 lo; | 250 | u32 lo; |
248 | u32 hi; | 251 | u32 hi; |
249 | } split; | 252 | } split; |
250 | u64 whole; | 253 | u64 whole; |
251 | } aperf_cur, mperf_cur; | 254 | } aperf, mperf; |
252 | }; | 255 | }; |
253 | 256 | ||
254 | 257 | /* Called via smp_call_function_single(), on the target CPU */ | |
255 | static long read_measured_perf_ctrs(void *_cur) | 258 | static void read_measured_perf_ctrs(void *_cur) |
256 | { | 259 | { |
257 | struct perf_cur *cur = _cur; | 260 | struct perf_pair *cur = _cur; |
258 | 261 | ||
259 | rdmsr(MSR_IA32_APERF, cur->aperf_cur.split.lo, cur->aperf_cur.split.hi); | 262 | rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi); |
260 | rdmsr(MSR_IA32_MPERF, cur->mperf_cur.split.lo, cur->mperf_cur.split.hi); | 263 | rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi); |
261 | |||
262 | wrmsr(MSR_IA32_APERF, 0, 0); | ||
263 | wrmsr(MSR_IA32_MPERF, 0, 0); | ||
264 | |||
265 | return 0; | ||
266 | } | 264 | } |
267 | 265 | ||
268 | /* | 266 | /* |
@@ -281,52 +279,57 @@ static long read_measured_perf_ctrs(void *_cur) | |||
281 | static unsigned int get_measured_perf(struct cpufreq_policy *policy, | 279 | static unsigned int get_measured_perf(struct cpufreq_policy *policy, |
282 | unsigned int cpu) | 280 | unsigned int cpu) |
283 | { | 281 | { |
284 | struct perf_cur cur; | 282 | struct perf_pair readin, cur; |
285 | unsigned int perf_percent; | 283 | unsigned int perf_percent; |
286 | unsigned int retval; | 284 | unsigned int retval; |
287 | 285 | ||
288 | if (!work_on_cpu(cpu, read_measured_perf_ctrs, &cur)) | 286 | if (smp_call_function_single(cpu, read_measured_perf_ctrs, &readin, 1)) |
289 | return 0; | 287 | return 0; |
290 | 288 | ||
289 | cur.aperf.whole = readin.aperf.whole - | ||
290 | per_cpu(drv_data, cpu)->saved_aperf; | ||
291 | cur.mperf.whole = readin.mperf.whole - | ||
292 | per_cpu(drv_data, cpu)->saved_mperf; | ||
293 | per_cpu(drv_data, cpu)->saved_aperf = readin.aperf.whole; | ||
294 | per_cpu(drv_data, cpu)->saved_mperf = readin.mperf.whole; | ||
295 | |||
291 | #ifdef __i386__ | 296 | #ifdef __i386__ |
292 | /* | 297 | /* |
293 | * We dont want to do 64 bit divide with 32 bit kernel | 298 | * We dont want to do 64 bit divide with 32 bit kernel |
294 | * Get an approximate value. Return failure in case we cannot get | 299 | * Get an approximate value. Return failure in case we cannot get |
295 | * an approximate value. | 300 | * an approximate value. |
296 | */ | 301 | */ |
297 | if (unlikely(cur.aperf_cur.split.hi || cur.mperf_cur.split.hi)) { | 302 | if (unlikely(cur.aperf.split.hi || cur.mperf.split.hi)) { |
298 | int shift_count; | 303 | int shift_count; |
299 | u32 h; | 304 | u32 h; |
300 | 305 | ||
301 | h = max_t(u32, cur.aperf_cur.split.hi, cur.mperf_cur.split.hi); | 306 | h = max_t(u32, cur.aperf.split.hi, cur.mperf.split.hi); |
302 | shift_count = fls(h); | 307 | shift_count = fls(h); |
303 | 308 | ||
304 | cur.aperf_cur.whole >>= shift_count; | 309 | cur.aperf.whole >>= shift_count; |
305 | cur.mperf_cur.whole >>= shift_count; | 310 | cur.mperf.whole >>= shift_count; |
306 | } | 311 | } |
307 | 312 | ||
308 | if (((unsigned long)(-1) / 100) < cur.aperf_cur.split.lo) { | 313 | if (((unsigned long)(-1) / 100) < cur.aperf.split.lo) { |
309 | int shift_count = 7; | 314 | int shift_count = 7; |
310 | cur.aperf_cur.split.lo >>= shift_count; | 315 | cur.aperf.split.lo >>= shift_count; |
311 | cur.mperf_cur.split.lo >>= shift_count; | 316 | cur.mperf.split.lo >>= shift_count; |
312 | } | 317 | } |
313 | 318 | ||
314 | if (cur.aperf_cur.split.lo && cur.mperf_cur.split.lo) | 319 | if (cur.aperf.split.lo && cur.mperf.split.lo) |
315 | perf_percent = (cur.aperf_cur.split.lo * 100) / | 320 | perf_percent = (cur.aperf.split.lo * 100) / cur.mperf.split.lo; |
316 | cur.mperf_cur.split.lo; | ||
317 | else | 321 | else |
318 | perf_percent = 0; | 322 | perf_percent = 0; |
319 | 323 | ||
320 | #else | 324 | #else |
321 | if (unlikely(((unsigned long)(-1) / 100) < cur.aperf_cur.whole)) { | 325 | if (unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole)) { |
322 | int shift_count = 7; | 326 | int shift_count = 7; |
323 | cur.aperf_cur.whole >>= shift_count; | 327 | cur.aperf.whole >>= shift_count; |
324 | cur.mperf_cur.whole >>= shift_count; | 328 | cur.mperf.whole >>= shift_count; |
325 | } | 329 | } |
326 | 330 | ||
327 | if (cur.aperf_cur.whole && cur.mperf_cur.whole) | 331 | if (cur.aperf.whole && cur.mperf.whole) |
328 | perf_percent = (cur.aperf_cur.whole * 100) / | 332 | perf_percent = (cur.aperf.whole * 100) / cur.mperf.whole; |
329 | cur.mperf_cur.whole; | ||
330 | else | 333 | else |
331 | perf_percent = 0; | 334 | perf_percent = 0; |
332 | 335 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c index 0bd48e65a0ca..ce2ed3e4aad9 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/timex.h> | 33 | #include <linux/timex.h> |
34 | #include <linux/io.h> | 34 | #include <linux/io.h> |
35 | #include <linux/acpi.h> | 35 | #include <linux/acpi.h> |
36 | #include <linux/kernel.h> | ||
37 | 36 | ||
38 | #include <asm/msr.h> | 37 | #include <asm/msr.h> |
39 | #include <acpi/processor.h> | 38 | #include <acpi/processor.h> |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 70a10ca100f6..18dfa30795c9 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/list.h> | 19 | #include <linux/list.h> |
20 | 20 | ||
21 | #include <trace/syscall.h> | ||
22 | |||
21 | #include <asm/cacheflush.h> | 23 | #include <asm/cacheflush.h> |
22 | #include <asm/ftrace.h> | 24 | #include <asm/ftrace.h> |
23 | #include <asm/nops.h> | 25 | #include <asm/nops.h> |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index fd57bf35d0fc..c1739ac29708 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -63,7 +63,7 @@ static int show_other_interrupts(struct seq_file *p, int prec) | |||
63 | seq_printf(p, " Spurious interrupts\n"); | 63 | seq_printf(p, " Spurious interrupts\n"); |
64 | #endif | 64 | #endif |
65 | if (generic_interrupt_extension) { | 65 | if (generic_interrupt_extension) { |
66 | seq_printf(p, "PLT: "); | 66 | seq_printf(p, "%*s: ", prec, "PLT"); |
67 | for_each_online_cpu(j) | 67 | for_each_online_cpu(j) |
68 | seq_printf(p, "%10u ", irq_stats(j)->generic_irqs); | 68 | seq_printf(p, "%10u ", irq_stats(j)->generic_irqs); |
69 | seq_printf(p, " Platform interrupts\n"); | 69 | seq_printf(p, " Platform interrupts\n"); |
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index a0f3851ef310..2e0eb4140951 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
@@ -108,40 +108,29 @@ struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; | |||
108 | EXPORT_SYMBOL_GPL(ucode_cpu_info); | 108 | EXPORT_SYMBOL_GPL(ucode_cpu_info); |
109 | 109 | ||
110 | #ifdef CONFIG_MICROCODE_OLD_INTERFACE | 110 | #ifdef CONFIG_MICROCODE_OLD_INTERFACE |
111 | struct update_for_cpu { | ||
112 | const void __user *buf; | ||
113 | size_t size; | ||
114 | }; | ||
115 | |||
116 | static long update_for_cpu(void *_ufc) | ||
117 | { | ||
118 | struct update_for_cpu *ufc = _ufc; | ||
119 | int error; | ||
120 | |||
121 | error = microcode_ops->request_microcode_user(smp_processor_id(), | ||
122 | ufc->buf, ufc->size); | ||
123 | if (error < 0) | ||
124 | return error; | ||
125 | if (!error) | ||
126 | microcode_ops->apply_microcode(smp_processor_id()); | ||
127 | return error; | ||
128 | } | ||
129 | |||
130 | static int do_microcode_update(const void __user *buf, size_t size) | 111 | static int do_microcode_update(const void __user *buf, size_t size) |
131 | { | 112 | { |
113 | cpumask_t old; | ||
132 | int error = 0; | 114 | int error = 0; |
133 | int cpu; | 115 | int cpu; |
134 | struct update_for_cpu ufc = { .buf = buf, .size = size }; | 116 | |
117 | old = current->cpus_allowed; | ||
135 | 118 | ||
136 | for_each_online_cpu(cpu) { | 119 | for_each_online_cpu(cpu) { |
137 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 120 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
138 | 121 | ||
139 | if (!uci->valid) | 122 | if (!uci->valid) |
140 | continue; | 123 | continue; |
141 | error = work_on_cpu(cpu, update_for_cpu, &ufc); | 124 | |
125 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | ||
126 | error = microcode_ops->request_microcode_user(cpu, buf, size); | ||
142 | if (error < 0) | 127 | if (error < 0) |
143 | break; | 128 | goto out; |
129 | if (!error) | ||
130 | microcode_ops->apply_microcode(cpu); | ||
144 | } | 131 | } |
132 | out: | ||
133 | set_cpus_allowed_ptr(current, &old); | ||
145 | return error; | 134 | return error; |
146 | } | 135 | } |
147 | 136 | ||
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index dce99dca6cf8..70fd7e414c15 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -679,7 +679,7 @@ void __init get_smp_config(void) | |||
679 | __get_smp_config(0); | 679 | __get_smp_config(0); |
680 | } | 680 | } |
681 | 681 | ||
682 | static void smp_reserve_bootmem(struct mpf_intel *mpf) | 682 | static void __init smp_reserve_bootmem(struct mpf_intel *mpf) |
683 | { | 683 | { |
684 | unsigned long size = get_mpc_size(mpf->physptr); | 684 | unsigned long size = get_mpc_size(mpf->physptr); |
685 | #ifdef CONFIG_X86_32 | 685 | #ifdef CONFIG_X86_32 |
@@ -838,7 +838,7 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m) | |||
838 | 838 | ||
839 | static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; | 839 | static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; |
840 | 840 | ||
841 | static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) | 841 | static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) |
842 | { | 842 | { |
843 | int i; | 843 | int i; |
844 | 844 | ||
@@ -866,7 +866,8 @@ static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) | |||
866 | } | 866 | } |
867 | } | 867 | } |
868 | #else /* CONFIG_X86_IO_APIC */ | 868 | #else /* CONFIG_X86_IO_APIC */ |
869 | static inline void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} | 869 | static |
870 | inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} | ||
870 | #endif /* CONFIG_X86_IO_APIC */ | 871 | #endif /* CONFIG_X86_IO_APIC */ |
871 | 872 | ||
872 | static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, | 873 | static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index fe9345c967de..23b7c8f017e2 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/audit.h> | 21 | #include <linux/audit.h> |
22 | #include <linux/seccomp.h> | 22 | #include <linux/seccomp.h> |
23 | #include <linux/signal.h> | 23 | #include <linux/signal.h> |
24 | #include <linux/ftrace.h> | ||
25 | 24 | ||
26 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
27 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
@@ -35,6 +34,8 @@ | |||
35 | #include <asm/proto.h> | 34 | #include <asm/proto.h> |
36 | #include <asm/ds.h> | 35 | #include <asm/ds.h> |
37 | 36 | ||
37 | #include <trace/syscall.h> | ||
38 | |||
38 | #include "tls.h" | 39 | #include "tls.h" |
39 | 40 | ||
40 | enum x86_regset { | 41 | enum x86_regset { |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 2aef36d8aca2..1340dad417f4 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -224,6 +224,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
224 | DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"), | 224 | DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"), |
225 | }, | 225 | }, |
226 | }, | 226 | }, |
227 | { /* Handle problems with rebooting on Dell DXP061 */ | ||
228 | .callback = set_bios_reboot, | ||
229 | .ident = "Dell DXP061", | ||
230 | .matches = { | ||
231 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
232 | DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"), | ||
233 | }, | ||
234 | }, | ||
227 | { } | 235 | { } |
228 | }; | 236 | }; |
229 | 237 | ||
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index deb5ebb32c3b..ed0c33761e6d 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
@@ -25,6 +25,8 @@ static int uv_bau_retry_limit __read_mostly; | |||
25 | 25 | ||
26 | /* position of pnode (which is nasid>>1): */ | 26 | /* position of pnode (which is nasid>>1): */ |
27 | static int uv_nshift __read_mostly; | 27 | static int uv_nshift __read_mostly; |
28 | /* base pnode in this partition */ | ||
29 | static int uv_partition_base_pnode __read_mostly; | ||
28 | 30 | ||
29 | static unsigned long uv_mmask __read_mostly; | 31 | static unsigned long uv_mmask __read_mostly; |
30 | 32 | ||
@@ -32,6 +34,34 @@ static DEFINE_PER_CPU(struct ptc_stats, ptcstats); | |||
32 | static DEFINE_PER_CPU(struct bau_control, bau_control); | 34 | static DEFINE_PER_CPU(struct bau_control, bau_control); |
33 | 35 | ||
34 | /* | 36 | /* |
37 | * Determine the first node on a blade. | ||
38 | */ | ||
39 | static int __init blade_to_first_node(int blade) | ||
40 | { | ||
41 | int node, b; | ||
42 | |||
43 | for_each_online_node(node) { | ||
44 | b = uv_node_to_blade_id(node); | ||
45 | if (blade == b) | ||
46 | return node; | ||
47 | } | ||
48 | return -1; /* shouldn't happen */ | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Determine the apicid of the first cpu on a blade. | ||
53 | */ | ||
54 | static int __init blade_to_first_apicid(int blade) | ||
55 | { | ||
56 | int cpu; | ||
57 | |||
58 | for_each_present_cpu(cpu) | ||
59 | if (blade == uv_cpu_to_blade_id(cpu)) | ||
60 | return per_cpu(x86_cpu_to_apicid, cpu); | ||
61 | return -1; | ||
62 | } | ||
63 | |||
64 | /* | ||
35 | * Free a software acknowledge hardware resource by clearing its Pending | 65 | * Free a software acknowledge hardware resource by clearing its Pending |
36 | * bit. This will return a reply to the sender. | 66 | * bit. This will return a reply to the sender. |
37 | * If the message has timed out, a reply has already been sent by the | 67 | * If the message has timed out, a reply has already been sent by the |
@@ -67,7 +97,7 @@ static void uv_bau_process_message(struct bau_payload_queue_entry *msg, | |||
67 | msp = __get_cpu_var(bau_control).msg_statuses + msg_slot; | 97 | msp = __get_cpu_var(bau_control).msg_statuses + msg_slot; |
68 | cpu = uv_blade_processor_id(); | 98 | cpu = uv_blade_processor_id(); |
69 | msg->number_of_cpus = | 99 | msg->number_of_cpus = |
70 | uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); | 100 | uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); |
71 | this_cpu_mask = 1UL << cpu; | 101 | this_cpu_mask = 1UL << cpu; |
72 | if (msp->seen_by.bits & this_cpu_mask) | 102 | if (msp->seen_by.bits & this_cpu_mask) |
73 | return; | 103 | return; |
@@ -215,14 +245,14 @@ static int uv_wait_completion(struct bau_desc *bau_desc, | |||
215 | * Returns @flush_mask if some remote flushing remains to be done. The | 245 | * Returns @flush_mask if some remote flushing remains to be done. The |
216 | * mask will have some bits still set. | 246 | * mask will have some bits still set. |
217 | */ | 247 | */ |
218 | const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade, | 248 | const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode, |
219 | struct bau_desc *bau_desc, | 249 | struct bau_desc *bau_desc, |
220 | struct cpumask *flush_mask) | 250 | struct cpumask *flush_mask) |
221 | { | 251 | { |
222 | int completion_status = 0; | 252 | int completion_status = 0; |
223 | int right_shift; | 253 | int right_shift; |
224 | int tries = 0; | 254 | int tries = 0; |
225 | int blade; | 255 | int pnode; |
226 | int bit; | 256 | int bit; |
227 | unsigned long mmr_offset; | 257 | unsigned long mmr_offset; |
228 | unsigned long index; | 258 | unsigned long index; |
@@ -265,8 +295,8 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade, | |||
265 | * use the IPI method of shootdown on them. | 295 | * use the IPI method of shootdown on them. |
266 | */ | 296 | */ |
267 | for_each_cpu(bit, flush_mask) { | 297 | for_each_cpu(bit, flush_mask) { |
268 | blade = uv_cpu_to_blade_id(bit); | 298 | pnode = uv_cpu_to_pnode(bit); |
269 | if (blade == this_blade) | 299 | if (pnode == this_pnode) |
270 | continue; | 300 | continue; |
271 | cpumask_clear_cpu(bit, flush_mask); | 301 | cpumask_clear_cpu(bit, flush_mask); |
272 | } | 302 | } |
@@ -309,16 +339,16 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
309 | struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask); | 339 | struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask); |
310 | int i; | 340 | int i; |
311 | int bit; | 341 | int bit; |
312 | int blade; | 342 | int pnode; |
313 | int uv_cpu; | 343 | int uv_cpu; |
314 | int this_blade; | 344 | int this_pnode; |
315 | int locals = 0; | 345 | int locals = 0; |
316 | struct bau_desc *bau_desc; | 346 | struct bau_desc *bau_desc; |
317 | 347 | ||
318 | cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); | 348 | cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); |
319 | 349 | ||
320 | uv_cpu = uv_blade_processor_id(); | 350 | uv_cpu = uv_blade_processor_id(); |
321 | this_blade = uv_numa_blade_id(); | 351 | this_pnode = uv_hub_info->pnode; |
322 | bau_desc = __get_cpu_var(bau_control).descriptor_base; | 352 | bau_desc = __get_cpu_var(bau_control).descriptor_base; |
323 | bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; | 353 | bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; |
324 | 354 | ||
@@ -326,13 +356,14 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
326 | 356 | ||
327 | i = 0; | 357 | i = 0; |
328 | for_each_cpu(bit, flush_mask) { | 358 | for_each_cpu(bit, flush_mask) { |
329 | blade = uv_cpu_to_blade_id(bit); | 359 | pnode = uv_cpu_to_pnode(bit); |
330 | BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); | 360 | BUG_ON(pnode > (UV_DISTRIBUTION_SIZE - 1)); |
331 | if (blade == this_blade) { | 361 | if (pnode == this_pnode) { |
332 | locals++; | 362 | locals++; |
333 | continue; | 363 | continue; |
334 | } | 364 | } |
335 | bau_node_set(blade, &bau_desc->distribution); | 365 | bau_node_set(pnode - uv_partition_base_pnode, |
366 | &bau_desc->distribution); | ||
336 | i++; | 367 | i++; |
337 | } | 368 | } |
338 | if (i == 0) { | 369 | if (i == 0) { |
@@ -350,7 +381,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
350 | bau_desc->payload.address = va; | 381 | bau_desc->payload.address = va; |
351 | bau_desc->payload.sending_cpu = cpu; | 382 | bau_desc->payload.sending_cpu = cpu; |
352 | 383 | ||
353 | return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask); | 384 | return uv_flush_send_and_wait(uv_cpu, this_pnode, bau_desc, flush_mask); |
354 | } | 385 | } |
355 | 386 | ||
356 | /* | 387 | /* |
@@ -418,24 +449,58 @@ void uv_bau_message_interrupt(struct pt_regs *regs) | |||
418 | set_irq_regs(old_regs); | 449 | set_irq_regs(old_regs); |
419 | } | 450 | } |
420 | 451 | ||
452 | /* | ||
453 | * uv_enable_timeouts | ||
454 | * | ||
455 | * Each target blade (i.e. blades that have cpu's) needs to have | ||
456 | * shootdown message timeouts enabled. The timeout does not cause | ||
457 | * an interrupt, but causes an error message to be returned to | ||
458 | * the sender. | ||
459 | */ | ||
421 | static void uv_enable_timeouts(void) | 460 | static void uv_enable_timeouts(void) |
422 | { | 461 | { |
423 | int i; | ||
424 | int blade; | 462 | int blade; |
425 | int last_blade; | 463 | int nblades; |
426 | int pnode; | 464 | int pnode; |
427 | int cur_cpu = 0; | 465 | unsigned long mmr_image; |
428 | unsigned long apicid; | ||
429 | 466 | ||
430 | last_blade = -1; | 467 | nblades = uv_num_possible_blades(); |
431 | for_each_online_node(i) { | 468 | |
432 | blade = uv_node_to_blade_id(i); | 469 | for (blade = 0; blade < nblades; blade++) { |
433 | if (blade == last_blade) | 470 | if (!uv_blade_nr_possible_cpus(blade)) |
434 | continue; | 471 | continue; |
435 | last_blade = blade; | 472 | |
436 | apicid = per_cpu(x86_cpu_to_apicid, cur_cpu); | ||
437 | pnode = uv_blade_to_pnode(blade); | 473 | pnode = uv_blade_to_pnode(blade); |
438 | cur_cpu += uv_blade_nr_possible_cpus(i); | 474 | mmr_image = |
475 | uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL); | ||
476 | /* | ||
477 | * Set the timeout period and then lock it in, in three | ||
478 | * steps; captures and locks in the period. | ||
479 | * | ||
480 | * To program the period, the SOFT_ACK_MODE must be off. | ||
481 | */ | ||
482 | mmr_image &= ~((unsigned long)1 << | ||
483 | UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT); | ||
484 | uv_write_global_mmr64 | ||
485 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); | ||
486 | /* | ||
487 | * Set the 4-bit period. | ||
488 | */ | ||
489 | mmr_image &= ~((unsigned long)0xf << | ||
490 | UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT); | ||
491 | mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD << | ||
492 | UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT); | ||
493 | uv_write_global_mmr64 | ||
494 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); | ||
495 | /* | ||
496 | * Subsequent reversals of the timebase bit (3) cause an | ||
497 | * immediate timeout of one or all INTD resources as | ||
498 | * indicated in bits 2:0 (7 causes all of them to timeout). | ||
499 | */ | ||
500 | mmr_image |= ((unsigned long)1 << | ||
501 | UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT); | ||
502 | uv_write_global_mmr64 | ||
503 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); | ||
439 | } | 504 | } |
440 | } | 505 | } |
441 | 506 | ||
@@ -482,8 +547,7 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data) | |||
482 | stat->requestee, stat->onetlb, stat->alltlb, | 547 | stat->requestee, stat->onetlb, stat->alltlb, |
483 | stat->s_retry, stat->d_retry, stat->ptc_i); | 548 | stat->s_retry, stat->d_retry, stat->ptc_i); |
484 | seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n", | 549 | seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n", |
485 | uv_read_global_mmr64(uv_blade_to_pnode | 550 | uv_read_global_mmr64(uv_cpu_to_pnode(cpu), |
486 | (uv_cpu_to_blade_id(cpu)), | ||
487 | UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), | 551 | UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), |
488 | stat->sflush, stat->dflush, | 552 | stat->sflush, stat->dflush, |
489 | stat->retriesok, stat->nomsg, | 553 | stat->retriesok, stat->nomsg, |
@@ -617,16 +681,18 @@ static struct bau_control * __init uv_table_bases_init(int blade, int node) | |||
617 | * finish the initialization of the per-blade control structures | 681 | * finish the initialization of the per-blade control structures |
618 | */ | 682 | */ |
619 | static void __init | 683 | static void __init |
620 | uv_table_bases_finish(int blade, int node, int cur_cpu, | 684 | uv_table_bases_finish(int blade, |
621 | struct bau_control *bau_tablesp, | 685 | struct bau_control *bau_tablesp, |
622 | struct bau_desc *adp) | 686 | struct bau_desc *adp) |
623 | { | 687 | { |
624 | struct bau_control *bcp; | 688 | struct bau_control *bcp; |
625 | int i; | 689 | int cpu; |
626 | 690 | ||
627 | for (i = cur_cpu; i < cur_cpu + uv_blade_nr_possible_cpus(blade); i++) { | 691 | for_each_present_cpu(cpu) { |
628 | bcp = (struct bau_control *)&per_cpu(bau_control, i); | 692 | if (blade != uv_cpu_to_blade_id(cpu)) |
693 | continue; | ||
629 | 694 | ||
695 | bcp = (struct bau_control *)&per_cpu(bau_control, cpu); | ||
630 | bcp->bau_msg_head = bau_tablesp->va_queue_first; | 696 | bcp->bau_msg_head = bau_tablesp->va_queue_first; |
631 | bcp->va_queue_first = bau_tablesp->va_queue_first; | 697 | bcp->va_queue_first = bau_tablesp->va_queue_first; |
632 | bcp->va_queue_last = bau_tablesp->va_queue_last; | 698 | bcp->va_queue_last = bau_tablesp->va_queue_last; |
@@ -649,11 +715,10 @@ uv_activation_descriptor_init(int node, int pnode) | |||
649 | struct bau_desc *adp; | 715 | struct bau_desc *adp; |
650 | struct bau_desc *ad2; | 716 | struct bau_desc *ad2; |
651 | 717 | ||
652 | adp = (struct bau_desc *) | 718 | adp = (struct bau_desc *)kmalloc_node(16384, GFP_KERNEL, node); |
653 | kmalloc_node(16384, GFP_KERNEL, node); | ||
654 | BUG_ON(!adp); | 719 | BUG_ON(!adp); |
655 | 720 | ||
656 | pa = __pa((unsigned long)adp); | 721 | pa = uv_gpa(adp); /* need the real nasid*/ |
657 | n = pa >> uv_nshift; | 722 | n = pa >> uv_nshift; |
658 | m = pa & uv_mmask; | 723 | m = pa & uv_mmask; |
659 | 724 | ||
@@ -667,8 +732,12 @@ uv_activation_descriptor_init(int node, int pnode) | |||
667 | for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) { | 732 | for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) { |
668 | memset(ad2, 0, sizeof(struct bau_desc)); | 733 | memset(ad2, 0, sizeof(struct bau_desc)); |
669 | ad2->header.sw_ack_flag = 1; | 734 | ad2->header.sw_ack_flag = 1; |
670 | ad2->header.base_dest_nodeid = | 735 | /* |
671 | uv_blade_to_pnode(uv_cpu_to_blade_id(0)); | 736 | * base_dest_nodeid is the first node in the partition, so |
737 | * the bit map will indicate partition-relative node numbers. | ||
738 | * note that base_dest_nodeid is actually a nasid. | ||
739 | */ | ||
740 | ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1; | ||
672 | ad2->header.command = UV_NET_ENDPOINT_INTD; | 741 | ad2->header.command = UV_NET_ENDPOINT_INTD; |
673 | ad2->header.int_both = 1; | 742 | ad2->header.int_both = 1; |
674 | /* | 743 | /* |
@@ -686,6 +755,8 @@ static struct bau_payload_queue_entry * __init | |||
686 | uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) | 755 | uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) |
687 | { | 756 | { |
688 | struct bau_payload_queue_entry *pqp; | 757 | struct bau_payload_queue_entry *pqp; |
758 | unsigned long pa; | ||
759 | int pn; | ||
689 | char *cp; | 760 | char *cp; |
690 | 761 | ||
691 | pqp = (struct bau_payload_queue_entry *) kmalloc_node( | 762 | pqp = (struct bau_payload_queue_entry *) kmalloc_node( |
@@ -696,10 +767,14 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) | |||
696 | cp = (char *)pqp + 31; | 767 | cp = (char *)pqp + 31; |
697 | pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); | 768 | pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); |
698 | bau_tablesp->va_queue_first = pqp; | 769 | bau_tablesp->va_queue_first = pqp; |
770 | /* | ||
771 | * need the pnode of where the memory was really allocated | ||
772 | */ | ||
773 | pa = uv_gpa(pqp); | ||
774 | pn = pa >> uv_nshift; | ||
699 | uv_write_global_mmr64(pnode, | 775 | uv_write_global_mmr64(pnode, |
700 | UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, | 776 | UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, |
701 | ((unsigned long)pnode << | 777 | ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | |
702 | UV_PAYLOADQ_PNODE_SHIFT) | | ||
703 | uv_physnodeaddr(pqp)); | 778 | uv_physnodeaddr(pqp)); |
704 | uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, | 779 | uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, |
705 | uv_physnodeaddr(pqp)); | 780 | uv_physnodeaddr(pqp)); |
@@ -715,8 +790,9 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) | |||
715 | /* | 790 | /* |
716 | * Initialization of each UV blade's structures | 791 | * Initialization of each UV blade's structures |
717 | */ | 792 | */ |
718 | static int __init uv_init_blade(int blade, int node, int cur_cpu) | 793 | static int __init uv_init_blade(int blade) |
719 | { | 794 | { |
795 | int node; | ||
720 | int pnode; | 796 | int pnode; |
721 | unsigned long pa; | 797 | unsigned long pa; |
722 | unsigned long apicid; | 798 | unsigned long apicid; |
@@ -724,16 +800,17 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu) | |||
724 | struct bau_payload_queue_entry *pqp; | 800 | struct bau_payload_queue_entry *pqp; |
725 | struct bau_control *bau_tablesp; | 801 | struct bau_control *bau_tablesp; |
726 | 802 | ||
803 | node = blade_to_first_node(blade); | ||
727 | bau_tablesp = uv_table_bases_init(blade, node); | 804 | bau_tablesp = uv_table_bases_init(blade, node); |
728 | pnode = uv_blade_to_pnode(blade); | 805 | pnode = uv_blade_to_pnode(blade); |
729 | adp = uv_activation_descriptor_init(node, pnode); | 806 | adp = uv_activation_descriptor_init(node, pnode); |
730 | pqp = uv_payload_queue_init(node, pnode, bau_tablesp); | 807 | pqp = uv_payload_queue_init(node, pnode, bau_tablesp); |
731 | uv_table_bases_finish(blade, node, cur_cpu, bau_tablesp, adp); | 808 | uv_table_bases_finish(blade, bau_tablesp, adp); |
732 | /* | 809 | /* |
733 | * the below initialization can't be in firmware because the | 810 | * the below initialization can't be in firmware because the |
734 | * messaging IRQ will be determined by the OS | 811 | * messaging IRQ will be determined by the OS |
735 | */ | 812 | */ |
736 | apicid = per_cpu(x86_cpu_to_apicid, cur_cpu); | 813 | apicid = blade_to_first_apicid(blade); |
737 | pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); | 814 | pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); |
738 | if ((pa & 0xff) != UV_BAU_MESSAGE) { | 815 | if ((pa & 0xff) != UV_BAU_MESSAGE) { |
739 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, | 816 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, |
@@ -748,9 +825,7 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu) | |||
748 | static int __init uv_bau_init(void) | 825 | static int __init uv_bau_init(void) |
749 | { | 826 | { |
750 | int blade; | 827 | int blade; |
751 | int node; | ||
752 | int nblades; | 828 | int nblades; |
753 | int last_blade; | ||
754 | int cur_cpu; | 829 | int cur_cpu; |
755 | 830 | ||
756 | if (!is_uv_system()) | 831 | if (!is_uv_system()) |
@@ -763,29 +838,21 @@ static int __init uv_bau_init(void) | |||
763 | uv_bau_retry_limit = 1; | 838 | uv_bau_retry_limit = 1; |
764 | uv_nshift = uv_hub_info->n_val; | 839 | uv_nshift = uv_hub_info->n_val; |
765 | uv_mmask = (1UL << uv_hub_info->n_val) - 1; | 840 | uv_mmask = (1UL << uv_hub_info->n_val) - 1; |
766 | nblades = 0; | 841 | nblades = uv_num_possible_blades(); |
767 | last_blade = -1; | 842 | |
768 | cur_cpu = 0; | ||
769 | for_each_online_node(node) { | ||
770 | blade = uv_node_to_blade_id(node); | ||
771 | if (blade == last_blade) | ||
772 | continue; | ||
773 | last_blade = blade; | ||
774 | nblades++; | ||
775 | } | ||
776 | uv_bau_table_bases = (struct bau_control **) | 843 | uv_bau_table_bases = (struct bau_control **) |
777 | kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL); | 844 | kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL); |
778 | BUG_ON(!uv_bau_table_bases); | 845 | BUG_ON(!uv_bau_table_bases); |
779 | 846 | ||
780 | last_blade = -1; | 847 | uv_partition_base_pnode = 0x7fffffff; |
781 | for_each_online_node(node) { | 848 | for (blade = 0; blade < nblades; blade++) |
782 | blade = uv_node_to_blade_id(node); | 849 | if (uv_blade_nr_possible_cpus(blade) && |
783 | if (blade == last_blade) | 850 | (uv_blade_to_pnode(blade) < uv_partition_base_pnode)) |
784 | continue; | 851 | uv_partition_base_pnode = uv_blade_to_pnode(blade); |
785 | last_blade = blade; | 852 | for (blade = 0; blade < nblades; blade++) |
786 | uv_init_blade(blade, node, cur_cpu); | 853 | if (uv_blade_nr_possible_cpus(blade)) |
787 | cur_cpu += uv_blade_nr_possible_cpus(blade); | 854 | uv_init_blade(blade); |
788 | } | 855 | |
789 | alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); | 856 | alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); |
790 | uv_enable_timeouts(); | 857 | uv_enable_timeouts(); |
791 | 858 | ||
diff --git a/arch/x86/kernel/uv_sysfs.c b/arch/x86/kernel/uv_sysfs.c index 67f9b9dbf800..36afb98675a4 100644 --- a/arch/x86/kernel/uv_sysfs.c +++ b/arch/x86/kernel/uv_sysfs.c | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include <linux/sysdev.h> | 22 | #include <linux/sysdev.h> |
23 | #include <asm/uv/bios.h> | 23 | #include <asm/uv/bios.h> |
24 | #include <asm/uv/uv.h> | ||
24 | 25 | ||
25 | struct kobject *sgi_uv_kobj; | 26 | struct kobject *sgi_uv_kobj; |
26 | 27 | ||
@@ -47,6 +48,9 @@ static int __init sgi_uv_sysfs_init(void) | |||
47 | { | 48 | { |
48 | unsigned long ret; | 49 | unsigned long ret; |
49 | 50 | ||
51 | if (!is_uv_system()) | ||
52 | return -ENODEV; | ||
53 | |||
50 | if (!sgi_uv_kobj) | 54 | if (!sgi_uv_kobj) |
51 | sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj); | 55 | sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj); |
52 | if (!sgi_uv_kobj) { | 56 | if (!sgi_uv_kobj) { |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 2b54fe002e94..0a5b04aa98f1 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -324,7 +324,7 @@ void __ref xsave_cntxt_init(void) | |||
324 | } | 324 | } |
325 | 325 | ||
326 | /* | 326 | /* |
327 | * for now OS knows only about FP/SSE | 327 | * Support only the state known to OS. |
328 | */ | 328 | */ |
329 | pcntxt_mask = pcntxt_mask & XCNTXT_MASK; | 329 | pcntxt_mask = pcntxt_mask & XCNTXT_MASK; |
330 | xsave_init(); | 330 | xsave_init(); |