aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event.c59
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c15
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c2
-rw-r--r--arch/x86/kernel/early-quirks.c18
-rw-r--r--arch/x86/kernel/hpet.c31
-rw-r--r--arch/x86/kernel/hw_breakpoint.c40
-rw-r--r--arch/x86/kernel/trampoline.c3
-rw-r--r--arch/x86/kernel/tsc.c2
11 files changed, 104 insertions, 85 deletions
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 7b598b84c902..f744f54cb248 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -698,9 +698,11 @@ void __init uv_system_init(void)
698 for (j = 0; j < 64; j++) { 698 for (j = 0; j < 64; j++) {
699 if (!test_bit(j, &present)) 699 if (!test_bit(j, &present))
700 continue; 700 continue;
701 uv_blade_info[blade].pnode = (i * 64 + j); 701 pnode = (i * 64 + j);
702 uv_blade_info[blade].pnode = pnode;
702 uv_blade_info[blade].nr_possible_cpus = 0; 703 uv_blade_info[blade].nr_possible_cpus = 0;
703 uv_blade_info[blade].nr_online_cpus = 0; 704 uv_blade_info[blade].nr_online_cpus = 0;
705 max_pnode = max(pnode, max_pnode);
704 blade++; 706 blade++;
705 } 707 }
706 } 708 }
@@ -738,7 +740,6 @@ void __init uv_system_init(void)
738 uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid); 740 uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
739 uv_node_to_blade[nid] = blade; 741 uv_node_to_blade[nid] = blade;
740 uv_cpu_to_blade[cpu] = blade; 742 uv_cpu_to_blade[cpu] = blade;
741 max_pnode = max(pnode, max_pnode);
742 } 743 }
743 744
744 /* Add blade/pnode info for nodes without cpus */ 745 /* Add blade/pnode info for nodes without cpus */
@@ -750,7 +751,6 @@ void __init uv_system_init(void)
750 pnode = (paddr >> m_val) & pnode_mask; 751 pnode = (paddr >> m_val) & pnode_mask;
751 blade = boot_pnode_to_blade(pnode); 752 blade = boot_pnode_to_blade(pnode);
752 uv_node_to_blade[nid] = blade; 753 uv_node_to_blade[nid] = blade;
753 max_pnode = max(pnode, max_pnode);
754 } 754 }
755 755
756 map_gru_high(max_pnode); 756 map_gru_high(max_pnode);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 224392d8fe8c..5e975298fa81 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -530,7 +530,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
530 err = -ENOMEM; 530 err = -ENOMEM;
531 goto out; 531 goto out;
532 } 532 }
533 if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) { 533 if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
534 kfree(b); 534 kfree(b);
535 err = -ENOMEM; 535 err = -ENOMEM;
536 goto out; 536 goto out;
@@ -543,7 +543,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
543#ifndef CONFIG_SMP 543#ifndef CONFIG_SMP
544 cpumask_setall(b->cpus); 544 cpumask_setall(b->cpus);
545#else 545#else
546 cpumask_copy(b->cpus, c->llc_shared_map); 546 cpumask_set_cpu(cpu, b->cpus);
547#endif 547#endif
548 548
549 per_cpu(threshold_banks, cpu)[bank] = b; 549 per_cpu(threshold_banks, cpu)[bank] = b;
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index c2a8b26d4fea..d9368eeda309 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -202,10 +202,11 @@ static int therm_throt_process(bool new_event, int event, int level)
202 202
203#ifdef CONFIG_SYSFS 203#ifdef CONFIG_SYSFS
204/* Add/Remove thermal_throttle interface for CPU device: */ 204/* Add/Remove thermal_throttle interface for CPU device: */
205static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev) 205static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
206 unsigned int cpu)
206{ 207{
207 int err; 208 int err;
208 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); 209 struct cpuinfo_x86 *c = &cpu_data(cpu);
209 210
210 err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group); 211 err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group);
211 if (err) 212 if (err)
@@ -251,7 +252,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
251 case CPU_UP_PREPARE: 252 case CPU_UP_PREPARE:
252 case CPU_UP_PREPARE_FROZEN: 253 case CPU_UP_PREPARE_FROZEN:
253 mutex_lock(&therm_cpu_lock); 254 mutex_lock(&therm_cpu_lock);
254 err = thermal_throttle_add_dev(sys_dev); 255 err = thermal_throttle_add_dev(sys_dev, cpu);
255 mutex_unlock(&therm_cpu_lock); 256 mutex_unlock(&therm_cpu_lock);
256 WARN_ON(err); 257 WARN_ON(err);
257 break; 258 break;
@@ -287,7 +288,7 @@ static __init int thermal_throttle_init_device(void)
287#endif 288#endif
288 /* connect live CPUs to sysfs */ 289 /* connect live CPUs to sysfs */
289 for_each_online_cpu(cpu) { 290 for_each_online_cpu(cpu) {
290 err = thermal_throttle_add_dev(get_cpu_sysdev(cpu)); 291 err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu);
291 WARN_ON(err); 292 WARN_ON(err);
292 } 293 }
293#ifdef CONFIG_HOTPLUG_CPU 294#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index f2da20fda02d..3efdf2870a35 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1154,7 +1154,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1154 /* 1154 /*
1155 * event overflow 1155 * event overflow
1156 */ 1156 */
1157 handled = 1; 1157 handled++;
1158 data.period = event->hw.last_period; 1158 data.period = event->hw.last_period;
1159 1159
1160 if (!x86_perf_event_set_period(event)) 1160 if (!x86_perf_event_set_period(event))
@@ -1200,12 +1200,20 @@ void perf_events_lapic_init(void)
1200 apic_write(APIC_LVTPC, APIC_DM_NMI); 1200 apic_write(APIC_LVTPC, APIC_DM_NMI);
1201} 1201}
1202 1202
1203struct pmu_nmi_state {
1204 unsigned int marked;
1205 int handled;
1206};
1207
1208static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
1209
1203static int __kprobes 1210static int __kprobes
1204perf_event_nmi_handler(struct notifier_block *self, 1211perf_event_nmi_handler(struct notifier_block *self,
1205 unsigned long cmd, void *__args) 1212 unsigned long cmd, void *__args)
1206{ 1213{
1207 struct die_args *args = __args; 1214 struct die_args *args = __args;
1208 struct pt_regs *regs; 1215 unsigned int this_nmi;
1216 int handled;
1209 1217
1210 if (!atomic_read(&active_events)) 1218 if (!atomic_read(&active_events))
1211 return NOTIFY_DONE; 1219 return NOTIFY_DONE;
@@ -1214,22 +1222,47 @@ perf_event_nmi_handler(struct notifier_block *self,
1214 case DIE_NMI: 1222 case DIE_NMI:
1215 case DIE_NMI_IPI: 1223 case DIE_NMI_IPI:
1216 break; 1224 break;
1217 1225 case DIE_NMIUNKNOWN:
1226 this_nmi = percpu_read(irq_stat.__nmi_count);
1227 if (this_nmi != __get_cpu_var(pmu_nmi).marked)
1228 /* let the kernel handle the unknown nmi */
1229 return NOTIFY_DONE;
1230 /*
1231 * This one is a PMU back-to-back nmi. Two events
1232 * trigger 'simultaneously' raising two back-to-back
1233 * NMIs. If the first NMI handles both, the latter
1234 * will be empty and daze the CPU. So, we drop it to
1235 * avoid false-positive 'unknown nmi' messages.
1236 */
1237 return NOTIFY_STOP;
1218 default: 1238 default:
1219 return NOTIFY_DONE; 1239 return NOTIFY_DONE;
1220 } 1240 }
1221 1241
1222 regs = args->regs;
1223
1224 apic_write(APIC_LVTPC, APIC_DM_NMI); 1242 apic_write(APIC_LVTPC, APIC_DM_NMI);
1225 /* 1243
1226 * Can't rely on the handled return value to say it was our NMI, two 1244 handled = x86_pmu.handle_irq(args->regs);
1227 * events could trigger 'simultaneously' raising two back-to-back NMIs. 1245 if (!handled)
1228 * 1246 return NOTIFY_DONE;
1229 * If the first NMI handles both, the latter will be empty and daze 1247
1230 * the CPU. 1248 this_nmi = percpu_read(irq_stat.__nmi_count);
1231 */ 1249 if ((handled > 1) ||
1232 x86_pmu.handle_irq(regs); 1250 /* the next nmi could be a back-to-back nmi */
1251 ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
1252 (__get_cpu_var(pmu_nmi).handled > 1))) {
1253 /*
1254 * We could have two subsequent back-to-back nmis: The
1255 * first handles more than one counter, the 2nd
1256 * handles only one counter and the 3rd handles no
1257 * counter.
1258 *
1259 * This is the 2nd nmi because the previous was
1260 * handling more than one counter. We will mark the
1261 * next (3rd) and then drop it if unhandled.
1262 */
1263 __get_cpu_var(pmu_nmi).marked = this_nmi + 1;
1264 __get_cpu_var(pmu_nmi).handled = handled;
1265 }
1233 1266
1234 return NOTIFY_STOP; 1267 return NOTIFY_STOP;
1235} 1268}
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index d8d86d014008..ee05c90012d2 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -712,7 +712,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
712 struct perf_sample_data data; 712 struct perf_sample_data data;
713 struct cpu_hw_events *cpuc; 713 struct cpu_hw_events *cpuc;
714 int bit, loops; 714 int bit, loops;
715 u64 ack, status; 715 u64 status;
716 int handled = 0;
716 717
717 perf_sample_data_init(&data, 0); 718 perf_sample_data_init(&data, 0);
718 719
@@ -728,6 +729,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
728 729
729 loops = 0; 730 loops = 0;
730again: 731again:
732 intel_pmu_ack_status(status);
731 if (++loops > 100) { 733 if (++loops > 100) {
732 WARN_ONCE(1, "perfevents: irq loop stuck!\n"); 734 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
733 perf_event_print_debug(); 735 perf_event_print_debug();
@@ -736,19 +738,22 @@ again:
736 } 738 }
737 739
738 inc_irq_stat(apic_perf_irqs); 740 inc_irq_stat(apic_perf_irqs);
739 ack = status;
740 741
741 intel_pmu_lbr_read(); 742 intel_pmu_lbr_read();
742 743
743 /* 744 /*
744 * PEBS overflow sets bit 62 in the global status register 745 * PEBS overflow sets bit 62 in the global status register
745 */ 746 */
746 if (__test_and_clear_bit(62, (unsigned long *)&status)) 747 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
748 handled++;
747 x86_pmu.drain_pebs(regs); 749 x86_pmu.drain_pebs(regs);
750 }
748 751
749 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 752 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
750 struct perf_event *event = cpuc->events[bit]; 753 struct perf_event *event = cpuc->events[bit];
751 754
755 handled++;
756
752 if (!test_bit(bit, cpuc->active_mask)) 757 if (!test_bit(bit, cpuc->active_mask))
753 continue; 758 continue;
754 759
@@ -761,8 +766,6 @@ again:
761 x86_pmu_stop(event); 766 x86_pmu_stop(event);
762 } 767 }
763 768
764 intel_pmu_ack_status(ack);
765
766 /* 769 /*
767 * Repeat if there is more work to be done: 770 * Repeat if there is more work to be done:
768 */ 771 */
@@ -772,7 +775,7 @@ again:
772 775
773done: 776done:
774 intel_pmu_enable_all(0); 777 intel_pmu_enable_all(0);
775 return 1; 778 return handled;
776} 779}
777 780
778static struct event_constraint * 781static struct event_constraint *
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 7e578e9cc58b..b560db3305be 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -692,7 +692,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
692 inc_irq_stat(apic_perf_irqs); 692 inc_irq_stat(apic_perf_irqs);
693 } 693 }
694 694
695 return handled > 0; 695 return handled;
696} 696}
697 697
698/* 698/*
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index e5cc7e82e60d..ebdb85cf2686 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -18,7 +18,6 @@
18#include <asm/apic.h> 18#include <asm/apic.h>
19#include <asm/iommu.h> 19#include <asm/iommu.h>
20#include <asm/gart.h> 20#include <asm/gart.h>
21#include <asm/hpet.h>
22 21
23static void __init fix_hypertransport_config(int num, int slot, int func) 22static void __init fix_hypertransport_config(int num, int slot, int func)
24{ 23{
@@ -192,21 +191,6 @@ static void __init ati_bugs_contd(int num, int slot, int func)
192} 191}
193#endif 192#endif
194 193
195/*
196 * Force the read back of the CMP register in hpet_next_event()
197 * to work around the problem that the CMP register write seems to be
198 * delayed. See hpet_next_event() for details.
199 *
200 * We do this on all SMBUS incarnations for now until we have more
201 * information about the affected chipsets.
202 */
203static void __init ati_hpet_bugs(int num, int slot, int func)
204{
205#ifdef CONFIG_HPET_TIMER
206 hpet_readback_cmp = 1;
207#endif
208}
209
210#define QFLAG_APPLY_ONCE 0x1 194#define QFLAG_APPLY_ONCE 0x1
211#define QFLAG_APPLIED 0x2 195#define QFLAG_APPLIED 0x2
212#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) 196#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -236,8 +220,6 @@ static struct chipset early_qrk[] __initdata = {
236 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs }, 220 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs },
237 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, 221 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
238 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, 222 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
239 { PCI_VENDOR_ID_ATI, PCI_ANY_ID,
240 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_hpet_bugs },
241 {} 223 {}
242}; 224};
243 225
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 351f9c0fea1f..410fdb3f1939 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -35,7 +35,6 @@
35unsigned long hpet_address; 35unsigned long hpet_address;
36u8 hpet_blockid; /* OS timer block num */ 36u8 hpet_blockid; /* OS timer block num */
37u8 hpet_msi_disable; 37u8 hpet_msi_disable;
38u8 hpet_readback_cmp;
39 38
40#ifdef CONFIG_PCI_MSI 39#ifdef CONFIG_PCI_MSI
41static unsigned long hpet_num_timers; 40static unsigned long hpet_num_timers;
@@ -395,23 +394,27 @@ static int hpet_next_event(unsigned long delta,
395 * at that point and we would wait for the next hpet interrupt 394 * at that point and we would wait for the next hpet interrupt
396 * forever. We found out that reading the CMP register back 395 * forever. We found out that reading the CMP register back
397 * forces the transfer so we can rely on the comparison with 396 * forces the transfer so we can rely on the comparison with
398 * the counter register below. 397 * the counter register below. If the read back from the
398 * compare register does not match the value we programmed
399 * then we might have a real hardware problem. We can not do
400 * much about it here, but at least alert the user/admin with
401 * a prominent warning.
399 * 402 *
400 * That works fine on those ATI chipsets, but on newer Intel 403 * An erratum on some chipsets (ICH9,..), results in
401 * chipsets (ICH9...) this triggers due to an erratum: Reading 404 * comparator read immediately following a write returning old
402 * the comparator immediately following a write is returning 405 * value. Workaround for this is to read this value second
403 * the old value. 406 * time, when first read returns old value.
404 * 407 *
405 * We restrict the read back to the affected ATI chipsets (set 408 * In fact the write to the comparator register is delayed up
406 * by quirks) and also run it with hpet=verbose for debugging 409 * to two HPET cycles so the workaround we tried to restrict
407 * purposes. 410 * the readback to those known to be borked ATI chipsets
411 * failed miserably. So we give up on optimizations forever
412 * and penalize all HPET incarnations unconditionally.
408 */ 413 */
409 if (hpet_readback_cmp || hpet_verbose) { 414 if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) {
410 u32 cmp = hpet_readl(HPET_Tn_CMP(timer)); 415 if (hpet_readl(HPET_Tn_CMP(timer)) != cnt)
411
412 if (cmp != cnt)
413 printk_once(KERN_WARNING 416 printk_once(KERN_WARNING
414 "hpet: compare register read back failed.\n"); 417 "hpet: compare register read back failed.\n");
415 } 418 }
416 419
417 return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; 420 return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index a474ec37c32f..ff15c9dcc25d 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -206,11 +206,27 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
206int arch_bp_generic_fields(int x86_len, int x86_type, 206int arch_bp_generic_fields(int x86_len, int x86_type,
207 int *gen_len, int *gen_type) 207 int *gen_len, int *gen_type)
208{ 208{
209 /* Len */ 209 /* Type */
210 switch (x86_len) { 210 switch (x86_type) {
211 case X86_BREAKPOINT_LEN_X: 211 case X86_BREAKPOINT_EXECUTE:
212 if (x86_len != X86_BREAKPOINT_LEN_X)
213 return -EINVAL;
214
215 *gen_type = HW_BREAKPOINT_X;
212 *gen_len = sizeof(long); 216 *gen_len = sizeof(long);
217 return 0;
218 case X86_BREAKPOINT_WRITE:
219 *gen_type = HW_BREAKPOINT_W;
213 break; 220 break;
221 case X86_BREAKPOINT_RW:
222 *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
223 break;
224 default:
225 return -EINVAL;
226 }
227
228 /* Len */
229 switch (x86_len) {
214 case X86_BREAKPOINT_LEN_1: 230 case X86_BREAKPOINT_LEN_1:
215 *gen_len = HW_BREAKPOINT_LEN_1; 231 *gen_len = HW_BREAKPOINT_LEN_1;
216 break; 232 break;
@@ -229,21 +245,6 @@ int arch_bp_generic_fields(int x86_len, int x86_type,
229 return -EINVAL; 245 return -EINVAL;
230 } 246 }
231 247
232 /* Type */
233 switch (x86_type) {
234 case X86_BREAKPOINT_EXECUTE:
235 *gen_type = HW_BREAKPOINT_X;
236 break;
237 case X86_BREAKPOINT_WRITE:
238 *gen_type = HW_BREAKPOINT_W;
239 break;
240 case X86_BREAKPOINT_RW:
241 *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
242 break;
243 default:
244 return -EINVAL;
245 }
246
247 return 0; 248 return 0;
248} 249}
249 250
@@ -316,9 +317,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
316 ret = -EINVAL; 317 ret = -EINVAL;
317 318
318 switch (info->len) { 319 switch (info->len) {
319 case X86_BREAKPOINT_LEN_X:
320 align = sizeof(long) -1;
321 break;
322 case X86_BREAKPOINT_LEN_1: 320 case X86_BREAKPOINT_LEN_1:
323 align = 0; 321 align = 0;
324 break; 322 break;
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index a874495b3673..e2a595257390 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -45,8 +45,7 @@ void __init setup_trampoline_page_table(void)
45 /* Copy kernel address range */ 45 /* Copy kernel address range */
46 clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY, 46 clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
47 swapper_pg_dir + KERNEL_PGD_BOUNDARY, 47 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
48 min_t(unsigned long, KERNEL_PGD_PTRS, 48 KERNEL_PGD_PTRS);
49 KERNEL_PGD_BOUNDARY));
50 49
51 /* Initialize low mappings */ 50 /* Initialize low mappings */
52 clone_pgd_range(trampoline_pg_dir, 51 clone_pgd_range(trampoline_pg_dir,
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index d632934cb638..26a863a9c2a8 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -655,7 +655,7 @@ void restore_sched_clock_state(void)
655 655
656 local_irq_save(flags); 656 local_irq_save(flags);
657 657
658 get_cpu_var(cyc2ns_offset) = 0; 658 __get_cpu_var(cyc2ns_offset) = 0;
659 offset = cyc2ns_suspend - sched_clock(); 659 offset = cyc2ns_suspend - sched_clock();
660 660
661 for_each_possible_cpu(cpu) 661 for_each_possible_cpu(cpu)