aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c16
-rw-r--r--arch/x86/kernel/bios_uv.c3
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c36
-rw-r--r--arch/x86/kernel/hpet.c6
-rw-r--r--arch/x86/kernel/i8253.c2
-rw-r--r--arch/x86/kernel/kvmclock.c7
-rw-r--r--arch/x86/kernel/microcode_core.c35
-rw-r--r--arch/x86/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86/kernel/tlb_uv.c189
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/uv_sysfs.c4
-rw-r--r--arch/x86/kernel/vmiclock_32.c2
12 files changed, 194 insertions, 110 deletions
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 1248318436e8..de1a50af807b 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -549,7 +549,8 @@ void __init uv_system_init(void)
549 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; 549 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
550 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; 550 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
551 int max_pnode = 0; 551 int max_pnode = 0;
552 unsigned long mmr_base, present; 552 unsigned long mmr_base, present, paddr;
553 unsigned short pnode_mask;
553 554
554 map_low_mmrs(); 555 map_low_mmrs();
555 556
@@ -592,6 +593,7 @@ void __init uv_system_init(void)
592 } 593 }
593 } 594 }
594 595
596 pnode_mask = (1 << n_val) - 1;
595 node_id.v = uv_read_local_mmr(UVH_NODE_ID); 597 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
596 gnode_upper = (((unsigned long)node_id.s.node_id) & 598 gnode_upper = (((unsigned long)node_id.s.node_id) &
597 ~((1 << n_val) - 1)) << m_val; 599 ~((1 << n_val) - 1)) << m_val;
@@ -615,7 +617,7 @@ void __init uv_system_init(void)
615 uv_cpu_hub_info(cpu)->numa_blade_id = blade; 617 uv_cpu_hub_info(cpu)->numa_blade_id = blade;
616 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; 618 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
617 uv_cpu_hub_info(cpu)->pnode = pnode; 619 uv_cpu_hub_info(cpu)->pnode = pnode;
618 uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1; 620 uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
619 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; 621 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
620 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; 622 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
621 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; 623 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
@@ -631,6 +633,16 @@ void __init uv_system_init(void)
631 lcpu, blade); 633 lcpu, blade);
632 } 634 }
633 635
636 /* Add blade/pnode info for nodes without cpus */
637 for_each_online_node(nid) {
638 if (uv_node_to_blade[nid] >= 0)
639 continue;
640 paddr = node_start_pfn(nid) << PAGE_SHIFT;
641 pnode = (paddr >> m_val) & pnode_mask;
642 blade = boot_pnode_to_blade(pnode);
643 uv_node_to_blade[nid] = blade;
644 }
645
634 map_gru_high(max_pnode); 646 map_gru_high(max_pnode);
635 map_mmr_high(max_pnode); 647 map_mmr_high(max_pnode);
636 map_config_high(max_pnode); 648 map_config_high(max_pnode);
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c
index f63882728d91..63a88e1f987d 100644
--- a/arch/x86/kernel/bios_uv.c
+++ b/arch/x86/kernel/bios_uv.c
@@ -182,7 +182,8 @@ void uv_bios_init(void)
182 memcpy(&uv_systab, tab, sizeof(struct uv_systab)); 182 memcpy(&uv_systab, tab, sizeof(struct uv_systab));
183 iounmap(tab); 183 iounmap(tab);
184 184
185 printk(KERN_INFO "EFI UV System Table Revision %d\n", tab->revision); 185 printk(KERN_INFO "EFI UV System Table Revision %d\n",
186 uv_systab.revision);
186} 187}
187#else /* !CONFIG_EFI */ 188#else /* !CONFIG_EFI */
188 189
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 837c2c4cc203..208ecf6643df 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -65,14 +65,18 @@ enum {
65struct acpi_cpufreq_data { 65struct acpi_cpufreq_data {
66 struct acpi_processor_performance *acpi_data; 66 struct acpi_processor_performance *acpi_data;
67 struct cpufreq_frequency_table *freq_table; 67 struct cpufreq_frequency_table *freq_table;
68 unsigned int max_freq;
69 unsigned int resume; 68 unsigned int resume;
70 unsigned int cpu_feature; 69 unsigned int cpu_feature;
71 u64 saved_aperf, saved_mperf;
72}; 70};
73 71
74static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); 72static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
75 73
74struct acpi_msr_data {
75 u64 saved_aperf, saved_mperf;
76};
77
78static DEFINE_PER_CPU(struct acpi_msr_data, msr_data);
79
76DEFINE_TRACE(power_mark); 80DEFINE_TRACE(power_mark);
77 81
78/* acpi_perf_data is a pointer to percpu data. */ 82/* acpi_perf_data is a pointer to percpu data. */
@@ -204,7 +208,13 @@ static void drv_read(struct drv_cmd *cmd)
204 208
205static void drv_write(struct drv_cmd *cmd) 209static void drv_write(struct drv_cmd *cmd)
206{ 210{
211 int this_cpu;
212
213 this_cpu = get_cpu();
214 if (cpumask_test_cpu(this_cpu, cmd->mask))
215 do_drv_write(cmd);
207 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1); 216 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
217 put_cpu();
208} 218}
209 219
210static u32 get_cur_val(const struct cpumask *mask) 220static u32 get_cur_val(const struct cpumask *mask)
@@ -281,11 +291,11 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
281 return 0; 291 return 0;
282 292
283 cur.aperf.whole = readin.aperf.whole - 293 cur.aperf.whole = readin.aperf.whole -
284 per_cpu(drv_data, cpu)->saved_aperf; 294 per_cpu(msr_data, cpu).saved_aperf;
285 cur.mperf.whole = readin.mperf.whole - 295 cur.mperf.whole = readin.mperf.whole -
286 per_cpu(drv_data, cpu)->saved_mperf; 296 per_cpu(msr_data, cpu).saved_mperf;
287 per_cpu(drv_data, cpu)->saved_aperf = readin.aperf.whole; 297 per_cpu(msr_data, cpu).saved_aperf = readin.aperf.whole;
288 per_cpu(drv_data, cpu)->saved_mperf = readin.mperf.whole; 298 per_cpu(msr_data, cpu).saved_mperf = readin.mperf.whole;
289 299
290#ifdef __i386__ 300#ifdef __i386__
291 /* 301 /*
@@ -329,7 +339,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
329 339
330#endif 340#endif
331 341
332 retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100; 342 retval = (policy->cpuinfo.max_freq * perf_percent) / 100;
333 343
334 return retval; 344 return retval;
335} 345}
@@ -682,16 +692,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
682 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ 692 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
683 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && 693 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
684 policy->cpuinfo.transition_latency > 20 * 1000) { 694 policy->cpuinfo.transition_latency > 20 * 1000) {
685 static int print_once;
686 policy->cpuinfo.transition_latency = 20 * 1000; 695 policy->cpuinfo.transition_latency = 20 * 1000;
687 if (!print_once) { 696 printk_once(KERN_INFO "Capping off P-state tranision"
688 print_once = 1; 697 " latency at 20 uS\n");
689 printk(KERN_INFO "Capping off P-state tranision latency"
690 " at 20 uS\n");
691 }
692 } 698 }
693 699
694 data->max_freq = perf->states[0].core_frequency * 1000;
695 /* table init */ 700 /* table init */
696 for (i = 0; i < perf->state_count; i++) { 701 for (i = 0; i < perf->state_count; i++) {
697 if (i > 0 && perf->states[i].core_frequency >= 702 if (i > 0 && perf->states[i].core_frequency >=
@@ -710,6 +715,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
710 if (result) 715 if (result)
711 goto err_freqfree; 716 goto err_freqfree;
712 717
718 if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
719 printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
720
713 switch (perf->control_register.space_id) { 721 switch (perf->control_register.space_id) {
714 case ACPI_ADR_SPACE_SYSTEM_IO: 722 case ACPI_ADR_SPACE_SYSTEM_IO:
715 /* Current speed is unknown and not detectable by IO port */ 723 /* Current speed is unknown and not detectable by IO port */
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 648b3a2a3a44..3f0019e0a229 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -722,7 +722,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
722/* 722/*
723 * Clock source related code 723 * Clock source related code
724 */ 724 */
725static cycle_t read_hpet(void) 725static cycle_t read_hpet(struct clocksource *cs)
726{ 726{
727 return (cycle_t)hpet_readl(HPET_COUNTER); 727 return (cycle_t)hpet_readl(HPET_COUNTER);
728} 728}
@@ -756,7 +756,7 @@ static int hpet_clocksource_register(void)
756 hpet_restart_counter(); 756 hpet_restart_counter();
757 757
758 /* Verify whether hpet counter works */ 758 /* Verify whether hpet counter works */
759 t1 = read_hpet(); 759 t1 = hpet_readl(HPET_COUNTER);
760 rdtscll(start); 760 rdtscll(start);
761 761
762 /* 762 /*
@@ -770,7 +770,7 @@ static int hpet_clocksource_register(void)
770 rdtscll(now); 770 rdtscll(now);
771 } while ((now - start) < 200000UL); 771 } while ((now - start) < 200000UL);
772 772
773 if (t1 == read_hpet()) { 773 if (t1 == hpet_readl(HPET_COUNTER)) {
774 printk(KERN_WARNING 774 printk(KERN_WARNING
775 "HPET counter not counting. HPET disabled\n"); 775 "HPET counter not counting. HPET disabled\n");
776 return -ENODEV; 776 return -ENODEV;
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 3475440baa54..c2e0bb0890d4 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -129,7 +129,7 @@ void __init setup_pit_timer(void)
129 * to just read by itself. So use jiffies to emulate a free 129 * to just read by itself. So use jiffies to emulate a free
130 * running counter: 130 * running counter:
131 */ 131 */
132static cycle_t pit_read(void) 132static cycle_t pit_read(struct clocksource *cs)
133{ 133{
134 static int old_count; 134 static int old_count;
135 static u32 old_jifs; 135 static u32 old_jifs;
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 137f2e8132df..223af43f1526 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -77,6 +77,11 @@ static cycle_t kvm_clock_read(void)
77 return ret; 77 return ret;
78} 78}
79 79
80static cycle_t kvm_clock_get_cycles(struct clocksource *cs)
81{
82 return kvm_clock_read();
83}
84
80/* 85/*
81 * If we don't do that, there is the possibility that the guest 86 * If we don't do that, there is the possibility that the guest
82 * will calibrate under heavy load - thus, getting a lower lpj - 87 * will calibrate under heavy load - thus, getting a lower lpj -
@@ -107,7 +112,7 @@ static void kvm_get_preset_lpj(void)
107 112
108static struct clocksource kvm_clock = { 113static struct clocksource kvm_clock = {
109 .name = "kvm-clock", 114 .name = "kvm-clock",
110 .read = kvm_clock_read, 115 .read = kvm_clock_get_cycles,
111 .rating = 400, 116 .rating = 400,
112 .mask = CLOCKSOURCE_MASK(64), 117 .mask = CLOCKSOURCE_MASK(64),
113 .mult = 1 << KVM_SCALE, 118 .mult = 1 << KVM_SCALE,
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index a0f3851ef310..98c470c069d1 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -108,40 +108,29 @@ struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
108EXPORT_SYMBOL_GPL(ucode_cpu_info); 108EXPORT_SYMBOL_GPL(ucode_cpu_info);
109 109
110#ifdef CONFIG_MICROCODE_OLD_INTERFACE 110#ifdef CONFIG_MICROCODE_OLD_INTERFACE
111struct update_for_cpu {
112 const void __user *buf;
113 size_t size;
114};
115
116static long update_for_cpu(void *_ufc)
117{
118 struct update_for_cpu *ufc = _ufc;
119 int error;
120
121 error = microcode_ops->request_microcode_user(smp_processor_id(),
122 ufc->buf, ufc->size);
123 if (error < 0)
124 return error;
125 if (!error)
126 microcode_ops->apply_microcode(smp_processor_id());
127 return error;
128}
129
130static int do_microcode_update(const void __user *buf, size_t size) 111static int do_microcode_update(const void __user *buf, size_t size)
131{ 112{
113 cpumask_t old;
132 int error = 0; 114 int error = 0;
133 int cpu; 115 int cpu;
134 struct update_for_cpu ufc = { .buf = buf, .size = size }; 116
117 old = current->cpus_allowed;
135 118
136 for_each_online_cpu(cpu) { 119 for_each_online_cpu(cpu) {
137 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 120 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
138 121
139 if (!uci->valid) 122 if (!uci->valid)
140 continue; 123 continue;
141 error = work_on_cpu(cpu, update_for_cpu, &ufc); 124
125 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
126 error = microcode_ops->request_microcode_user(cpu, buf, size);
142 if (error < 0) 127 if (error < 0)
143 break; 128 goto out;
129 if (!error)
130 microcode_ops->apply_microcode(cpu);
144 } 131 }
132out:
133 set_cpus_allowed_ptr(current, &old);
145 return error; 134 return error;
146} 135}
147 136
@@ -391,8 +380,6 @@ static int mc_sysdev_add(struct sys_device *sys_dev)
391 return err; 380 return err;
392 381
393 err = microcode_init_cpu(cpu); 382 err = microcode_init_cpu(cpu);
394 if (err)
395 sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
396 383
397 return err; 384 return err;
398} 385}
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 34f12e9996ed..221a3853e268 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -50,7 +50,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
50 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); 50 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
51} 51}
52 52
53struct dma_map_ops swiotlb_dma_ops = { 53static struct dma_map_ops swiotlb_dma_ops = {
54 .mapping_error = swiotlb_dma_mapping_error, 54 .mapping_error = swiotlb_dma_mapping_error,
55 .alloc_coherent = x86_swiotlb_alloc_coherent, 55 .alloc_coherent = x86_swiotlb_alloc_coherent,
56 .free_coherent = swiotlb_free_coherent, 56 .free_coherent = swiotlb_free_coherent,
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index deb5ebb32c3b..ed0c33761e6d 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -25,6 +25,8 @@ static int uv_bau_retry_limit __read_mostly;
25 25
26/* position of pnode (which is nasid>>1): */ 26/* position of pnode (which is nasid>>1): */
27static int uv_nshift __read_mostly; 27static int uv_nshift __read_mostly;
28/* base pnode in this partition */
29static int uv_partition_base_pnode __read_mostly;
28 30
29static unsigned long uv_mmask __read_mostly; 31static unsigned long uv_mmask __read_mostly;
30 32
@@ -32,6 +34,34 @@ static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
32static DEFINE_PER_CPU(struct bau_control, bau_control); 34static DEFINE_PER_CPU(struct bau_control, bau_control);
33 35
34/* 36/*
37 * Determine the first node on a blade.
38 */
39static int __init blade_to_first_node(int blade)
40{
41 int node, b;
42
43 for_each_online_node(node) {
44 b = uv_node_to_blade_id(node);
45 if (blade == b)
46 return node;
47 }
48 return -1; /* shouldn't happen */
49}
50
51/*
52 * Determine the apicid of the first cpu on a blade.
53 */
54static int __init blade_to_first_apicid(int blade)
55{
56 int cpu;
57
58 for_each_present_cpu(cpu)
59 if (blade == uv_cpu_to_blade_id(cpu))
60 return per_cpu(x86_cpu_to_apicid, cpu);
61 return -1;
62}
63
64/*
35 * Free a software acknowledge hardware resource by clearing its Pending 65 * Free a software acknowledge hardware resource by clearing its Pending
36 * bit. This will return a reply to the sender. 66 * bit. This will return a reply to the sender.
37 * If the message has timed out, a reply has already been sent by the 67 * If the message has timed out, a reply has already been sent by the
@@ -67,7 +97,7 @@ static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
67 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot; 97 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot;
68 cpu = uv_blade_processor_id(); 98 cpu = uv_blade_processor_id();
69 msg->number_of_cpus = 99 msg->number_of_cpus =
70 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); 100 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id()));
71 this_cpu_mask = 1UL << cpu; 101 this_cpu_mask = 1UL << cpu;
72 if (msp->seen_by.bits & this_cpu_mask) 102 if (msp->seen_by.bits & this_cpu_mask)
73 return; 103 return;
@@ -215,14 +245,14 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
215 * Returns @flush_mask if some remote flushing remains to be done. The 245 * Returns @flush_mask if some remote flushing remains to be done. The
216 * mask will have some bits still set. 246 * mask will have some bits still set.
217 */ 247 */
218const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade, 248const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
219 struct bau_desc *bau_desc, 249 struct bau_desc *bau_desc,
220 struct cpumask *flush_mask) 250 struct cpumask *flush_mask)
221{ 251{
222 int completion_status = 0; 252 int completion_status = 0;
223 int right_shift; 253 int right_shift;
224 int tries = 0; 254 int tries = 0;
225 int blade; 255 int pnode;
226 int bit; 256 int bit;
227 unsigned long mmr_offset; 257 unsigned long mmr_offset;
228 unsigned long index; 258 unsigned long index;
@@ -265,8 +295,8 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
265 * use the IPI method of shootdown on them. 295 * use the IPI method of shootdown on them.
266 */ 296 */
267 for_each_cpu(bit, flush_mask) { 297 for_each_cpu(bit, flush_mask) {
268 blade = uv_cpu_to_blade_id(bit); 298 pnode = uv_cpu_to_pnode(bit);
269 if (blade == this_blade) 299 if (pnode == this_pnode)
270 continue; 300 continue;
271 cpumask_clear_cpu(bit, flush_mask); 301 cpumask_clear_cpu(bit, flush_mask);
272 } 302 }
@@ -309,16 +339,16 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
309 struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask); 339 struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask);
310 int i; 340 int i;
311 int bit; 341 int bit;
312 int blade; 342 int pnode;
313 int uv_cpu; 343 int uv_cpu;
314 int this_blade; 344 int this_pnode;
315 int locals = 0; 345 int locals = 0;
316 struct bau_desc *bau_desc; 346 struct bau_desc *bau_desc;
317 347
318 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); 348 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
319 349
320 uv_cpu = uv_blade_processor_id(); 350 uv_cpu = uv_blade_processor_id();
321 this_blade = uv_numa_blade_id(); 351 this_pnode = uv_hub_info->pnode;
322 bau_desc = __get_cpu_var(bau_control).descriptor_base; 352 bau_desc = __get_cpu_var(bau_control).descriptor_base;
323 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; 353 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu;
324 354
@@ -326,13 +356,14 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
326 356
327 i = 0; 357 i = 0;
328 for_each_cpu(bit, flush_mask) { 358 for_each_cpu(bit, flush_mask) {
329 blade = uv_cpu_to_blade_id(bit); 359 pnode = uv_cpu_to_pnode(bit);
330 BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); 360 BUG_ON(pnode > (UV_DISTRIBUTION_SIZE - 1));
331 if (blade == this_blade) { 361 if (pnode == this_pnode) {
332 locals++; 362 locals++;
333 continue; 363 continue;
334 } 364 }
335 bau_node_set(blade, &bau_desc->distribution); 365 bau_node_set(pnode - uv_partition_base_pnode,
366 &bau_desc->distribution);
336 i++; 367 i++;
337 } 368 }
338 if (i == 0) { 369 if (i == 0) {
@@ -350,7 +381,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
350 bau_desc->payload.address = va; 381 bau_desc->payload.address = va;
351 bau_desc->payload.sending_cpu = cpu; 382 bau_desc->payload.sending_cpu = cpu;
352 383
353 return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask); 384 return uv_flush_send_and_wait(uv_cpu, this_pnode, bau_desc, flush_mask);
354} 385}
355 386
356/* 387/*
@@ -418,24 +449,58 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
418 set_irq_regs(old_regs); 449 set_irq_regs(old_regs);
419} 450}
420 451
452/*
453 * uv_enable_timeouts
454 *
455 * Each target blade (i.e. blades that have cpu's) needs to have
456 * shootdown message timeouts enabled. The timeout does not cause
457 * an interrupt, but causes an error message to be returned to
458 * the sender.
459 */
421static void uv_enable_timeouts(void) 460static void uv_enable_timeouts(void)
422{ 461{
423 int i;
424 int blade; 462 int blade;
425 int last_blade; 463 int nblades;
426 int pnode; 464 int pnode;
427 int cur_cpu = 0; 465 unsigned long mmr_image;
428 unsigned long apicid;
429 466
430 last_blade = -1; 467 nblades = uv_num_possible_blades();
431 for_each_online_node(i) { 468
432 blade = uv_node_to_blade_id(i); 469 for (blade = 0; blade < nblades; blade++) {
433 if (blade == last_blade) 470 if (!uv_blade_nr_possible_cpus(blade))
434 continue; 471 continue;
435 last_blade = blade; 472
436 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu);
437 pnode = uv_blade_to_pnode(blade); 473 pnode = uv_blade_to_pnode(blade);
438 cur_cpu += uv_blade_nr_possible_cpus(i); 474 mmr_image =
475 uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL);
476 /*
477 * Set the timeout period and then lock it in, in three
478 * steps; captures and locks in the period.
479 *
480 * To program the period, the SOFT_ACK_MODE must be off.
481 */
482 mmr_image &= ~((unsigned long)1 <<
483 UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
484 uv_write_global_mmr64
485 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
486 /*
487 * Set the 4-bit period.
488 */
489 mmr_image &= ~((unsigned long)0xf <<
490 UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
491 mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD <<
492 UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
493 uv_write_global_mmr64
494 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
495 /*
496 * Subsequent reversals of the timebase bit (3) cause an
497 * immediate timeout of one or all INTD resources as
498 * indicated in bits 2:0 (7 causes all of them to timeout).
499 */
500 mmr_image |= ((unsigned long)1 <<
501 UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
502 uv_write_global_mmr64
503 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
439 } 504 }
440} 505}
441 506
@@ -482,8 +547,7 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
482 stat->requestee, stat->onetlb, stat->alltlb, 547 stat->requestee, stat->onetlb, stat->alltlb,
483 stat->s_retry, stat->d_retry, stat->ptc_i); 548 stat->s_retry, stat->d_retry, stat->ptc_i);
484 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n", 549 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n",
485 uv_read_global_mmr64(uv_blade_to_pnode 550 uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
486 (uv_cpu_to_blade_id(cpu)),
487 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), 551 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
488 stat->sflush, stat->dflush, 552 stat->sflush, stat->dflush,
489 stat->retriesok, stat->nomsg, 553 stat->retriesok, stat->nomsg,
@@ -617,16 +681,18 @@ static struct bau_control * __init uv_table_bases_init(int blade, int node)
617 * finish the initialization of the per-blade control structures 681 * finish the initialization of the per-blade control structures
618 */ 682 */
619static void __init 683static void __init
620uv_table_bases_finish(int blade, int node, int cur_cpu, 684uv_table_bases_finish(int blade,
621 struct bau_control *bau_tablesp, 685 struct bau_control *bau_tablesp,
622 struct bau_desc *adp) 686 struct bau_desc *adp)
623{ 687{
624 struct bau_control *bcp; 688 struct bau_control *bcp;
625 int i; 689 int cpu;
626 690
627 for (i = cur_cpu; i < cur_cpu + uv_blade_nr_possible_cpus(blade); i++) { 691 for_each_present_cpu(cpu) {
628 bcp = (struct bau_control *)&per_cpu(bau_control, i); 692 if (blade != uv_cpu_to_blade_id(cpu))
693 continue;
629 694
695 bcp = (struct bau_control *)&per_cpu(bau_control, cpu);
630 bcp->bau_msg_head = bau_tablesp->va_queue_first; 696 bcp->bau_msg_head = bau_tablesp->va_queue_first;
631 bcp->va_queue_first = bau_tablesp->va_queue_first; 697 bcp->va_queue_first = bau_tablesp->va_queue_first;
632 bcp->va_queue_last = bau_tablesp->va_queue_last; 698 bcp->va_queue_last = bau_tablesp->va_queue_last;
@@ -649,11 +715,10 @@ uv_activation_descriptor_init(int node, int pnode)
649 struct bau_desc *adp; 715 struct bau_desc *adp;
650 struct bau_desc *ad2; 716 struct bau_desc *ad2;
651 717
652 adp = (struct bau_desc *) 718 adp = (struct bau_desc *)kmalloc_node(16384, GFP_KERNEL, node);
653 kmalloc_node(16384, GFP_KERNEL, node);
654 BUG_ON(!adp); 719 BUG_ON(!adp);
655 720
656 pa = __pa((unsigned long)adp); 721 pa = uv_gpa(adp); /* need the real nasid*/
657 n = pa >> uv_nshift; 722 n = pa >> uv_nshift;
658 m = pa & uv_mmask; 723 m = pa & uv_mmask;
659 724
@@ -667,8 +732,12 @@ uv_activation_descriptor_init(int node, int pnode)
667 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) { 732 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) {
668 memset(ad2, 0, sizeof(struct bau_desc)); 733 memset(ad2, 0, sizeof(struct bau_desc));
669 ad2->header.sw_ack_flag = 1; 734 ad2->header.sw_ack_flag = 1;
670 ad2->header.base_dest_nodeid = 735 /*
671 uv_blade_to_pnode(uv_cpu_to_blade_id(0)); 736 * base_dest_nodeid is the first node in the partition, so
737 * the bit map will indicate partition-relative node numbers.
738 * note that base_dest_nodeid is actually a nasid.
739 */
740 ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
672 ad2->header.command = UV_NET_ENDPOINT_INTD; 741 ad2->header.command = UV_NET_ENDPOINT_INTD;
673 ad2->header.int_both = 1; 742 ad2->header.int_both = 1;
674 /* 743 /*
@@ -686,6 +755,8 @@ static struct bau_payload_queue_entry * __init
686uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) 755uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
687{ 756{
688 struct bau_payload_queue_entry *pqp; 757 struct bau_payload_queue_entry *pqp;
758 unsigned long pa;
759 int pn;
689 char *cp; 760 char *cp;
690 761
691 pqp = (struct bau_payload_queue_entry *) kmalloc_node( 762 pqp = (struct bau_payload_queue_entry *) kmalloc_node(
@@ -696,10 +767,14 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
696 cp = (char *)pqp + 31; 767 cp = (char *)pqp + 31;
697 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); 768 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
698 bau_tablesp->va_queue_first = pqp; 769 bau_tablesp->va_queue_first = pqp;
770 /*
771 * need the pnode of where the memory was really allocated
772 */
773 pa = uv_gpa(pqp);
774 pn = pa >> uv_nshift;
699 uv_write_global_mmr64(pnode, 775 uv_write_global_mmr64(pnode,
700 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, 776 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
701 ((unsigned long)pnode << 777 ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
702 UV_PAYLOADQ_PNODE_SHIFT) |
703 uv_physnodeaddr(pqp)); 778 uv_physnodeaddr(pqp));
704 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, 779 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
705 uv_physnodeaddr(pqp)); 780 uv_physnodeaddr(pqp));
@@ -715,8 +790,9 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
715/* 790/*
716 * Initialization of each UV blade's structures 791 * Initialization of each UV blade's structures
717 */ 792 */
718static int __init uv_init_blade(int blade, int node, int cur_cpu) 793static int __init uv_init_blade(int blade)
719{ 794{
795 int node;
720 int pnode; 796 int pnode;
721 unsigned long pa; 797 unsigned long pa;
722 unsigned long apicid; 798 unsigned long apicid;
@@ -724,16 +800,17 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu)
724 struct bau_payload_queue_entry *pqp; 800 struct bau_payload_queue_entry *pqp;
725 struct bau_control *bau_tablesp; 801 struct bau_control *bau_tablesp;
726 802
803 node = blade_to_first_node(blade);
727 bau_tablesp = uv_table_bases_init(blade, node); 804 bau_tablesp = uv_table_bases_init(blade, node);
728 pnode = uv_blade_to_pnode(blade); 805 pnode = uv_blade_to_pnode(blade);
729 adp = uv_activation_descriptor_init(node, pnode); 806 adp = uv_activation_descriptor_init(node, pnode);
730 pqp = uv_payload_queue_init(node, pnode, bau_tablesp); 807 pqp = uv_payload_queue_init(node, pnode, bau_tablesp);
731 uv_table_bases_finish(blade, node, cur_cpu, bau_tablesp, adp); 808 uv_table_bases_finish(blade, bau_tablesp, adp);
732 /* 809 /*
733 * the below initialization can't be in firmware because the 810 * the below initialization can't be in firmware because the
734 * messaging IRQ will be determined by the OS 811 * messaging IRQ will be determined by the OS
735 */ 812 */
736 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu); 813 apicid = blade_to_first_apicid(blade);
737 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); 814 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
738 if ((pa & 0xff) != UV_BAU_MESSAGE) { 815 if ((pa & 0xff) != UV_BAU_MESSAGE) {
739 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, 816 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
@@ -748,9 +825,7 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu)
748static int __init uv_bau_init(void) 825static int __init uv_bau_init(void)
749{ 826{
750 int blade; 827 int blade;
751 int node;
752 int nblades; 828 int nblades;
753 int last_blade;
754 int cur_cpu; 829 int cur_cpu;
755 830
756 if (!is_uv_system()) 831 if (!is_uv_system())
@@ -763,29 +838,21 @@ static int __init uv_bau_init(void)
763 uv_bau_retry_limit = 1; 838 uv_bau_retry_limit = 1;
764 uv_nshift = uv_hub_info->n_val; 839 uv_nshift = uv_hub_info->n_val;
765 uv_mmask = (1UL << uv_hub_info->n_val) - 1; 840 uv_mmask = (1UL << uv_hub_info->n_val) - 1;
766 nblades = 0; 841 nblades = uv_num_possible_blades();
767 last_blade = -1; 842
768 cur_cpu = 0;
769 for_each_online_node(node) {
770 blade = uv_node_to_blade_id(node);
771 if (blade == last_blade)
772 continue;
773 last_blade = blade;
774 nblades++;
775 }
776 uv_bau_table_bases = (struct bau_control **) 843 uv_bau_table_bases = (struct bau_control **)
777 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL); 844 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
778 BUG_ON(!uv_bau_table_bases); 845 BUG_ON(!uv_bau_table_bases);
779 846
780 last_blade = -1; 847 uv_partition_base_pnode = 0x7fffffff;
781 for_each_online_node(node) { 848 for (blade = 0; blade < nblades; blade++)
782 blade = uv_node_to_blade_id(node); 849 if (uv_blade_nr_possible_cpus(blade) &&
783 if (blade == last_blade) 850 (uv_blade_to_pnode(blade) < uv_partition_base_pnode))
784 continue; 851 uv_partition_base_pnode = uv_blade_to_pnode(blade);
785 last_blade = blade; 852 for (blade = 0; blade < nblades; blade++)
786 uv_init_blade(blade, node, cur_cpu); 853 if (uv_blade_nr_possible_cpus(blade))
787 cur_cpu += uv_blade_nr_possible_cpus(blade); 854 uv_init_blade(blade);
788 } 855
789 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); 856 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
790 uv_enable_timeouts(); 857 uv_enable_timeouts();
791 858
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 7a567ebe6361..d57de05dc430 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -699,7 +699,7 @@ static struct clocksource clocksource_tsc;
699 * code, which is necessary to support wrapping clocksources like pm 699 * code, which is necessary to support wrapping clocksources like pm
700 * timer. 700 * timer.
701 */ 701 */
702static cycle_t read_tsc(void) 702static cycle_t read_tsc(struct clocksource *cs)
703{ 703{
704 cycle_t ret = (cycle_t)get_cycles(); 704 cycle_t ret = (cycle_t)get_cycles();
705 705
diff --git a/arch/x86/kernel/uv_sysfs.c b/arch/x86/kernel/uv_sysfs.c
index 67f9b9dbf800..36afb98675a4 100644
--- a/arch/x86/kernel/uv_sysfs.c
+++ b/arch/x86/kernel/uv_sysfs.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/sysdev.h> 22#include <linux/sysdev.h>
23#include <asm/uv/bios.h> 23#include <asm/uv/bios.h>
24#include <asm/uv/uv.h>
24 25
25struct kobject *sgi_uv_kobj; 26struct kobject *sgi_uv_kobj;
26 27
@@ -47,6 +48,9 @@ static int __init sgi_uv_sysfs_init(void)
47{ 48{
48 unsigned long ret; 49 unsigned long ret;
49 50
51 if (!is_uv_system())
52 return -ENODEV;
53
50 if (!sgi_uv_kobj) 54 if (!sgi_uv_kobj)
51 sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj); 55 sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj);
52 if (!sgi_uv_kobj) { 56 if (!sgi_uv_kobj) {
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index d303369a7bad..2b3eb82efeeb 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -283,7 +283,7 @@ void __devinit vmi_time_ap_init(void)
283/** vmi clocksource */ 283/** vmi clocksource */
284static struct clocksource clocksource_vmi; 284static struct clocksource clocksource_vmi;
285 285
286static cycle_t read_real_cycles(void) 286static cycle_t read_real_cycles(struct clocksource *cs)
287{ 287{
288 cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); 288 cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
289 return max(ret, clocksource_vmi.cycle_last); 289 return max(ret, clocksource_vmi.cycle_last);