diff options
25 files changed, 59 insertions, 50 deletions
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c index ea33236190b1..6a9b96b4624d 100644 --- a/arch/x86/boot/compressed/aslr.c +++ b/arch/x86/boot/compressed/aslr.c | |||
@@ -82,7 +82,7 @@ static unsigned long get_random_long(void) | |||
82 | 82 | ||
83 | if (has_cpuflag(X86_FEATURE_TSC)) { | 83 | if (has_cpuflag(X86_FEATURE_TSC)) { |
84 | debug_putstr(" RDTSC"); | 84 | debug_putstr(" RDTSC"); |
85 | raw = native_read_tsc(); | 85 | raw = rdtsc(); |
86 | 86 | ||
87 | random ^= raw; | 87 | random ^= raw; |
88 | use_i8254 = false; | 88 | use_i8254 = false; |
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c index 972b488ac16a..0340d93c18ca 100644 --- a/arch/x86/entry/vdso/vclock_gettime.c +++ b/arch/x86/entry/vdso/vclock_gettime.c | |||
@@ -186,7 +186,7 @@ notrace static cycle_t vread_tsc(void) | |||
186 | * but no one has ever seen it happen. | 186 | * but no one has ever seen it happen. |
187 | */ | 187 | */ |
188 | rdtsc_barrier(); | 188 | rdtsc_barrier(); |
189 | ret = (cycle_t)native_read_tsc(); | 189 | ret = (cycle_t)rdtsc(); |
190 | 190 | ||
191 | last = gtod->cycle_last; | 191 | last = gtod->cycle_last; |
192 | 192 | ||
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index c89ed6ceed02..ff0c120dafe5 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h | |||
@@ -109,7 +109,16 @@ notrace static inline int native_write_msr_safe(unsigned int msr, | |||
109 | extern int rdmsr_safe_regs(u32 regs[8]); | 109 | extern int rdmsr_safe_regs(u32 regs[8]); |
110 | extern int wrmsr_safe_regs(u32 regs[8]); | 110 | extern int wrmsr_safe_regs(u32 regs[8]); |
111 | 111 | ||
112 | static __always_inline unsigned long long native_read_tsc(void) | 112 | /** |
113 | * rdtsc() - returns the current TSC without ordering constraints | ||
114 | * | ||
115 | * rdtsc() returns the result of RDTSC as a 64-bit integer. The | ||
116 | * only ordering constraint it supplies is the ordering implied by | ||
117 | * "asm volatile": it will put the RDTSC in the place you expect. The | ||
118 | * CPU can and will speculatively execute that RDTSC, though, so the | ||
119 | * results can be non-monotonic if compared on different CPUs. | ||
120 | */ | ||
121 | static __always_inline unsigned long long rdtsc(void) | ||
113 | { | 122 | { |
114 | DECLARE_ARGS(val, low, high); | 123 | DECLARE_ARGS(val, low, high); |
115 | 124 | ||
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index 2bd69d62c623..5c490db62e32 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h | |||
@@ -62,7 +62,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) | |||
62 | static __always_inline | 62 | static __always_inline |
63 | u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src) | 63 | u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src) |
64 | { | 64 | { |
65 | u64 delta = native_read_tsc() - src->tsc_timestamp; | 65 | u64 delta = rdtsc() - src->tsc_timestamp; |
66 | return pvclock_scale_delta(delta, src->tsc_to_system_mul, | 66 | return pvclock_scale_delta(delta, src->tsc_to_system_mul, |
67 | src->tsc_shift); | 67 | src->tsc_shift); |
68 | } | 68 | } |
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index bc5fa2af112e..58505f01962f 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h | |||
@@ -72,7 +72,7 @@ static __always_inline void boot_init_stack_canary(void) | |||
72 | * on during the bootup the random pool has true entropy too. | 72 | * on during the bootup the random pool has true entropy too. |
73 | */ | 73 | */ |
74 | get_random_bytes(&canary, sizeof(canary)); | 74 | get_random_bytes(&canary, sizeof(canary)); |
75 | tsc = native_read_tsc(); | 75 | tsc = rdtsc(); |
76 | canary += tsc + (tsc << 32UL); | 76 | canary += tsc + (tsc << 32UL); |
77 | 77 | ||
78 | current->stack_canary = canary; | 78 | current->stack_canary = canary; |
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index b4883902948b..3df7675debcf 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h | |||
@@ -26,7 +26,7 @@ static inline cycles_t get_cycles(void) | |||
26 | return 0; | 26 | return 0; |
27 | #endif | 27 | #endif |
28 | 28 | ||
29 | return native_read_tsc(); | 29 | return rdtsc(); |
30 | } | 30 | } |
31 | 31 | ||
32 | extern void tsc_init(void); | 32 | extern void tsc_init(void); |
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 25efa534c4e4..222a57076039 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c | |||
@@ -263,7 +263,7 @@ static int apbt_clocksource_register(void) | |||
263 | 263 | ||
264 | /* Verify whether apbt counter works */ | 264 | /* Verify whether apbt counter works */ |
265 | t1 = dw_apb_clocksource_read(clocksource_apbt); | 265 | t1 = dw_apb_clocksource_read(clocksource_apbt); |
266 | start = native_read_tsc(); | 266 | start = rdtsc(); |
267 | 267 | ||
268 | /* | 268 | /* |
269 | * We don't know the TSC frequency yet, but waiting for | 269 | * We don't know the TSC frequency yet, but waiting for |
@@ -273,7 +273,7 @@ static int apbt_clocksource_register(void) | |||
273 | */ | 273 | */ |
274 | do { | 274 | do { |
275 | rep_nop(); | 275 | rep_nop(); |
276 | now = native_read_tsc(); | 276 | now = rdtsc(); |
277 | } while ((now - start) < 200000UL); | 277 | } while ((now - start) < 200000UL); |
278 | 278 | ||
279 | /* APBT is the only always on clocksource, it has to work! */ | 279 | /* APBT is the only always on clocksource, it has to work! */ |
@@ -390,13 +390,13 @@ unsigned long apbt_quick_calibrate(void) | |||
390 | old = dw_apb_clocksource_read(clocksource_apbt); | 390 | old = dw_apb_clocksource_read(clocksource_apbt); |
391 | old += loop; | 391 | old += loop; |
392 | 392 | ||
393 | t1 = native_read_tsc(); | 393 | t1 = rdtsc(); |
394 | 394 | ||
395 | do { | 395 | do { |
396 | new = dw_apb_clocksource_read(clocksource_apbt); | 396 | new = dw_apb_clocksource_read(clocksource_apbt); |
397 | } while (new < old); | 397 | } while (new < old); |
398 | 398 | ||
399 | t2 = native_read_tsc(); | 399 | t2 = rdtsc(); |
400 | 400 | ||
401 | shift = 5; | 401 | shift = 5; |
402 | if (unlikely(loop >> shift == 0)) { | 402 | if (unlikely(loop >> shift == 0)) { |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 51af1ed1ae2e..0d71cd9b4a50 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -457,7 +457,7 @@ static int lapic_next_deadline(unsigned long delta, | |||
457 | { | 457 | { |
458 | u64 tsc; | 458 | u64 tsc; |
459 | 459 | ||
460 | tsc = native_read_tsc(); | 460 | tsc = rdtsc(); |
461 | wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); | 461 | wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); |
462 | return 0; | 462 | return 0; |
463 | } | 463 | } |
@@ -592,7 +592,7 @@ static void __init lapic_cal_handler(struct clock_event_device *dev) | |||
592 | unsigned long pm = acpi_pm_read_early(); | 592 | unsigned long pm = acpi_pm_read_early(); |
593 | 593 | ||
594 | if (cpu_has_tsc) | 594 | if (cpu_has_tsc) |
595 | tsc = native_read_tsc(); | 595 | tsc = rdtsc(); |
596 | 596 | ||
597 | switch (lapic_cal_loops++) { | 597 | switch (lapic_cal_loops++) { |
598 | case 0: | 598 | case 0: |
@@ -1209,7 +1209,7 @@ void setup_local_APIC(void) | |||
1209 | long long max_loops = cpu_khz ? cpu_khz : 1000000; | 1209 | long long max_loops = cpu_khz ? cpu_khz : 1000000; |
1210 | 1210 | ||
1211 | if (cpu_has_tsc) | 1211 | if (cpu_has_tsc) |
1212 | tsc = native_read_tsc(); | 1212 | tsc = rdtsc(); |
1213 | 1213 | ||
1214 | if (disable_apic) { | 1214 | if (disable_apic) { |
1215 | disable_ioapic_support(); | 1215 | disable_ioapic_support(); |
@@ -1293,7 +1293,7 @@ void setup_local_APIC(void) | |||
1293 | } | 1293 | } |
1294 | if (queued) { | 1294 | if (queued) { |
1295 | if (cpu_has_tsc && cpu_khz) { | 1295 | if (cpu_has_tsc && cpu_khz) { |
1296 | ntsc = native_read_tsc(); | 1296 | ntsc = rdtsc(); |
1297 | max_loops = (cpu_khz << 10) - (ntsc - tsc); | 1297 | max_loops = (cpu_khz << 10) - (ntsc - tsc); |
1298 | } else | 1298 | } else |
1299 | max_loops--; | 1299 | max_loops--; |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index a69710db6112..51ad2af84a72 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -125,10 +125,10 @@ static void init_amd_k6(struct cpuinfo_x86 *c) | |||
125 | 125 | ||
126 | n = K6_BUG_LOOP; | 126 | n = K6_BUG_LOOP; |
127 | f_vide = vide; | 127 | f_vide = vide; |
128 | d = native_read_tsc(); | 128 | d = rdtsc(); |
129 | while (n--) | 129 | while (n--) |
130 | f_vide(); | 130 | f_vide(); |
131 | d2 = native_read_tsc(); | 131 | d2 = rdtsc(); |
132 | d = d2-d; | 132 | d = d2-d; |
133 | 133 | ||
134 | if (d > 20*K6_BUG_LOOP) | 134 | if (d > 20*K6_BUG_LOOP) |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index a5283d2d0094..96cceccd11b4 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -125,7 +125,7 @@ void mce_setup(struct mce *m) | |||
125 | { | 125 | { |
126 | memset(m, 0, sizeof(struct mce)); | 126 | memset(m, 0, sizeof(struct mce)); |
127 | m->cpu = m->extcpu = smp_processor_id(); | 127 | m->cpu = m->extcpu = smp_processor_id(); |
128 | m->tsc = native_read_tsc(); | 128 | m->tsc = rdtsc(); |
129 | /* We hope get_seconds stays lockless */ | 129 | /* We hope get_seconds stays lockless */ |
130 | m->time = get_seconds(); | 130 | m->time = get_seconds(); |
131 | m->cpuvendor = boot_cpu_data.x86_vendor; | 131 | m->cpuvendor = boot_cpu_data.x86_vendor; |
@@ -1784,7 +1784,7 @@ static void collect_tscs(void *data) | |||
1784 | { | 1784 | { |
1785 | unsigned long *cpu_tsc = (unsigned long *)data; | 1785 | unsigned long *cpu_tsc = (unsigned long *)data; |
1786 | 1786 | ||
1787 | cpu_tsc[smp_processor_id()] = native_read_tsc(); | 1787 | cpu_tsc[smp_processor_id()] = rdtsc(); |
1788 | } | 1788 | } |
1789 | 1789 | ||
1790 | static int mce_apei_read_done; | 1790 | static int mce_apei_read_done; |
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c index 334a2a9c034d..67315cd0132c 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c | |||
@@ -110,7 +110,7 @@ static void init_espfix_random(void) | |||
110 | */ | 110 | */ |
111 | if (!arch_get_random_long(&rand)) { | 111 | if (!arch_get_random_long(&rand)) { |
112 | /* The constant is an arbitrary large prime */ | 112 | /* The constant is an arbitrary large prime */ |
113 | rand = native_read_tsc(); | 113 | rand = rdtsc(); |
114 | rand *= 0xc345c6b72fd16123UL; | 114 | rand *= 0xc345c6b72fd16123UL; |
115 | } | 115 | } |
116 | 116 | ||
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index cc390fe69b71..f75c5908c7a6 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -735,7 +735,7 @@ static int hpet_clocksource_register(void) | |||
735 | 735 | ||
736 | /* Verify whether hpet counter works */ | 736 | /* Verify whether hpet counter works */ |
737 | t1 = hpet_readl(HPET_COUNTER); | 737 | t1 = hpet_readl(HPET_COUNTER); |
738 | start = native_read_tsc(); | 738 | start = rdtsc(); |
739 | 739 | ||
740 | /* | 740 | /* |
741 | * We don't know the TSC frequency yet, but waiting for | 741 | * We don't know the TSC frequency yet, but waiting for |
@@ -745,7 +745,7 @@ static int hpet_clocksource_register(void) | |||
745 | */ | 745 | */ |
746 | do { | 746 | do { |
747 | rep_nop(); | 747 | rep_nop(); |
748 | now = native_read_tsc(); | 748 | now = rdtsc(); |
749 | } while ((now - start) < 200000UL); | 749 | } while ((now - start) < 200000UL); |
750 | 750 | ||
751 | if (t1 == hpet_readl(HPET_COUNTER)) { | 751 | if (t1 == hpet_readl(HPET_COUNTER)) { |
diff --git a/arch/x86/kernel/trace_clock.c b/arch/x86/kernel/trace_clock.c index bd8f4d41bd56..67efb8c96fc4 100644 --- a/arch/x86/kernel/trace_clock.c +++ b/arch/x86/kernel/trace_clock.c | |||
@@ -15,7 +15,7 @@ u64 notrace trace_clock_x86_tsc(void) | |||
15 | u64 ret; | 15 | u64 ret; |
16 | 16 | ||
17 | rdtsc_barrier(); | 17 | rdtsc_barrier(); |
18 | ret = native_read_tsc(); | 18 | ret = rdtsc(); |
19 | 19 | ||
20 | return ret; | 20 | return ret; |
21 | } | 21 | } |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index e66f5dcaeb63..21d6e04e3e82 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -248,7 +248,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | |||
248 | 248 | ||
249 | data = cyc2ns_write_begin(cpu); | 249 | data = cyc2ns_write_begin(cpu); |
250 | 250 | ||
251 | tsc_now = native_read_tsc(); | 251 | tsc_now = rdtsc(); |
252 | ns_now = cycles_2_ns(tsc_now); | 252 | ns_now = cycles_2_ns(tsc_now); |
253 | 253 | ||
254 | /* | 254 | /* |
@@ -290,7 +290,7 @@ u64 native_sched_clock(void) | |||
290 | } | 290 | } |
291 | 291 | ||
292 | /* read the Time Stamp Counter: */ | 292 | /* read the Time Stamp Counter: */ |
293 | tsc_now = native_read_tsc(); | 293 | tsc_now = rdtsc(); |
294 | 294 | ||
295 | /* return the value in ns */ | 295 | /* return the value in ns */ |
296 | return cycles_2_ns(tsc_now); | 296 | return cycles_2_ns(tsc_now); |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 954e98a8c2e3..2f0ade48614f 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -1172,7 +1172,7 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu) | |||
1172 | 1172 | ||
1173 | tsc_deadline = apic->lapic_timer.expired_tscdeadline; | 1173 | tsc_deadline = apic->lapic_timer.expired_tscdeadline; |
1174 | apic->lapic_timer.expired_tscdeadline = 0; | 1174 | apic->lapic_timer.expired_tscdeadline = 0; |
1175 | guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); | 1175 | guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc()); |
1176 | trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline); | 1176 | trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline); |
1177 | 1177 | ||
1178 | /* __delay is delay_tsc whenever the hardware has TSC, thus always. */ | 1178 | /* __delay is delay_tsc whenever the hardware has TSC, thus always. */ |
@@ -1240,7 +1240,7 @@ static void start_apic_timer(struct kvm_lapic *apic) | |||
1240 | local_irq_save(flags); | 1240 | local_irq_save(flags); |
1241 | 1241 | ||
1242 | now = apic->lapic_timer.timer.base->get_time(); | 1242 | now = apic->lapic_timer.timer.base->get_time(); |
1243 | guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); | 1243 | guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc()); |
1244 | if (likely(tscdeadline > guest_tsc)) { | 1244 | if (likely(tscdeadline > guest_tsc)) { |
1245 | ns = (tscdeadline - guest_tsc) * 1000000ULL; | 1245 | ns = (tscdeadline - guest_tsc) * 1000000ULL; |
1246 | do_div(ns, this_tsc_khz); | 1246 | do_div(ns, this_tsc_khz); |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 602b974a60a6..8dfbad7a2c44 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -1080,7 +1080,7 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) | |||
1080 | { | 1080 | { |
1081 | u64 tsc; | 1081 | u64 tsc; |
1082 | 1082 | ||
1083 | tsc = svm_scale_tsc(vcpu, native_read_tsc()); | 1083 | tsc = svm_scale_tsc(vcpu, rdtsc()); |
1084 | 1084 | ||
1085 | return target_tsc - tsc; | 1085 | return target_tsc - tsc; |
1086 | } | 1086 | } |
@@ -3079,7 +3079,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
3079 | switch (msr_info->index) { | 3079 | switch (msr_info->index) { |
3080 | case MSR_IA32_TSC: { | 3080 | case MSR_IA32_TSC: { |
3081 | msr_info->data = svm->vmcb->control.tsc_offset + | 3081 | msr_info->data = svm->vmcb->control.tsc_offset + |
3082 | svm_scale_tsc(vcpu, native_read_tsc()); | 3082 | svm_scale_tsc(vcpu, rdtsc()); |
3083 | 3083 | ||
3084 | break; | 3084 | break; |
3085 | } | 3085 | } |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 4fa1ccad7beb..10d69a6df14f 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -2236,7 +2236,7 @@ static u64 guest_read_tsc(void) | |||
2236 | { | 2236 | { |
2237 | u64 host_tsc, tsc_offset; | 2237 | u64 host_tsc, tsc_offset; |
2238 | 2238 | ||
2239 | host_tsc = native_read_tsc(); | 2239 | host_tsc = rdtsc(); |
2240 | tsc_offset = vmcs_read64(TSC_OFFSET); | 2240 | tsc_offset = vmcs_read64(TSC_OFFSET); |
2241 | return host_tsc + tsc_offset; | 2241 | return host_tsc + tsc_offset; |
2242 | } | 2242 | } |
@@ -2317,7 +2317,7 @@ static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho | |||
2317 | 2317 | ||
2318 | static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) | 2318 | static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) |
2319 | { | 2319 | { |
2320 | return target_tsc - native_read_tsc(); | 2320 | return target_tsc - rdtsc(); |
2321 | } | 2321 | } |
2322 | 2322 | ||
2323 | static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) | 2323 | static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f771058cfb5c..dfa97139282d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1455,7 +1455,7 @@ static cycle_t read_tsc(void) | |||
1455 | * but no one has ever seen it happen. | 1455 | * but no one has ever seen it happen. |
1456 | */ | 1456 | */ |
1457 | rdtsc_barrier(); | 1457 | rdtsc_barrier(); |
1458 | ret = (cycle_t)native_read_tsc(); | 1458 | ret = (cycle_t)rdtsc(); |
1459 | 1459 | ||
1460 | last = pvclock_gtod_data.clock.cycle_last; | 1460 | last = pvclock_gtod_data.clock.cycle_last; |
1461 | 1461 | ||
@@ -1646,7 +1646,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1646 | return 1; | 1646 | return 1; |
1647 | } | 1647 | } |
1648 | if (!use_master_clock) { | 1648 | if (!use_master_clock) { |
1649 | host_tsc = native_read_tsc(); | 1649 | host_tsc = rdtsc(); |
1650 | kernel_ns = get_kernel_ns(); | 1650 | kernel_ns = get_kernel_ns(); |
1651 | } | 1651 | } |
1652 | 1652 | ||
@@ -2810,7 +2810,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
2810 | 2810 | ||
2811 | if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { | 2811 | if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { |
2812 | s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : | 2812 | s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : |
2813 | native_read_tsc() - vcpu->arch.last_host_tsc; | 2813 | rdtsc() - vcpu->arch.last_host_tsc; |
2814 | if (tsc_delta < 0) | 2814 | if (tsc_delta < 0) |
2815 | mark_tsc_unstable("KVM discovered backwards TSC"); | 2815 | mark_tsc_unstable("KVM discovered backwards TSC"); |
2816 | if (check_tsc_unstable()) { | 2816 | if (check_tsc_unstable()) { |
@@ -2838,7 +2838,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |||
2838 | { | 2838 | { |
2839 | kvm_x86_ops->vcpu_put(vcpu); | 2839 | kvm_x86_ops->vcpu_put(vcpu); |
2840 | kvm_put_guest_fpu(vcpu); | 2840 | kvm_put_guest_fpu(vcpu); |
2841 | vcpu->arch.last_host_tsc = native_read_tsc(); | 2841 | vcpu->arch.last_host_tsc = rdtsc(); |
2842 | } | 2842 | } |
2843 | 2843 | ||
2844 | static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, | 2844 | static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, |
@@ -6623,7 +6623,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
6623 | hw_breakpoint_restore(); | 6623 | hw_breakpoint_restore(); |
6624 | 6624 | ||
6625 | vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, | 6625 | vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, |
6626 | native_read_tsc()); | 6626 | rdtsc()); |
6627 | 6627 | ||
6628 | vcpu->mode = OUTSIDE_GUEST_MODE; | 6628 | vcpu->mode = OUTSIDE_GUEST_MODE; |
6629 | smp_wmb(); | 6629 | smp_wmb(); |
@@ -7437,7 +7437,7 @@ int kvm_arch_hardware_enable(void) | |||
7437 | if (ret != 0) | 7437 | if (ret != 0) |
7438 | return ret; | 7438 | return ret; |
7439 | 7439 | ||
7440 | local_tsc = native_read_tsc(); | 7440 | local_tsc = rdtsc(); |
7441 | stable = !check_tsc_unstable(); | 7441 | stable = !check_tsc_unstable(); |
7442 | list_for_each_entry(kvm, &vm_list, vm_list) { | 7442 | list_for_each_entry(kvm, &vm_list, vm_list) { |
7443 | kvm_for_each_vcpu(i, vcpu, kvm) { | 7443 | kvm_for_each_vcpu(i, vcpu, kvm) { |
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index 35115f3786a9..f24bc59ab0a0 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c | |||
@@ -55,10 +55,10 @@ static void delay_tsc(unsigned long __loops) | |||
55 | preempt_disable(); | 55 | preempt_disable(); |
56 | cpu = smp_processor_id(); | 56 | cpu = smp_processor_id(); |
57 | rdtsc_barrier(); | 57 | rdtsc_barrier(); |
58 | bclock = native_read_tsc(); | 58 | bclock = rdtsc(); |
59 | for (;;) { | 59 | for (;;) { |
60 | rdtsc_barrier(); | 60 | rdtsc_barrier(); |
61 | now = native_read_tsc(); | 61 | now = rdtsc(); |
62 | if ((now - bclock) >= loops) | 62 | if ((now - bclock) >= loops) |
63 | break; | 63 | break; |
64 | 64 | ||
@@ -80,7 +80,7 @@ static void delay_tsc(unsigned long __loops) | |||
80 | loops -= (now - bclock); | 80 | loops -= (now - bclock); |
81 | cpu = smp_processor_id(); | 81 | cpu = smp_processor_id(); |
82 | rdtsc_barrier(); | 82 | rdtsc_barrier(); |
83 | bclock = native_read_tsc(); | 83 | bclock = rdtsc(); |
84 | } | 84 | } |
85 | } | 85 | } |
86 | preempt_enable(); | 86 | preempt_enable(); |
@@ -100,7 +100,7 @@ void use_tsc_delay(void) | |||
100 | int read_current_timer(unsigned long *timer_val) | 100 | int read_current_timer(unsigned long *timer_val) |
101 | { | 101 | { |
102 | if (delay_fn == delay_tsc) { | 102 | if (delay_fn == delay_tsc) { |
103 | *timer_val = native_read_tsc(); | 103 | *timer_val = rdtsc(); |
104 | return 0; | 104 | return 0; |
105 | } | 105 | } |
106 | return -1; | 106 | return -1; |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 15ada47bb720..7c56d7eaa671 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -765,7 +765,7 @@ static inline void intel_pstate_sample(struct cpudata *cpu) | |||
765 | local_irq_save(flags); | 765 | local_irq_save(flags); |
766 | rdmsrl(MSR_IA32_APERF, aperf); | 766 | rdmsrl(MSR_IA32_APERF, aperf); |
767 | rdmsrl(MSR_IA32_MPERF, mperf); | 767 | rdmsrl(MSR_IA32_MPERF, mperf); |
768 | tsc = native_read_tsc(); | 768 | tsc = rdtsc(); |
769 | local_irq_restore(flags); | 769 | local_irq_restore(flags); |
770 | 770 | ||
771 | cpu->last_sample_time = cpu->sample.time; | 771 | cpu->last_sample_time = cpu->sample.time; |
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c index abc0cb22e750..4a2a9e370be7 100644 --- a/drivers/input/gameport/gameport.c +++ b/drivers/input/gameport/gameport.c | |||
@@ -149,9 +149,9 @@ static int old_gameport_measure_speed(struct gameport *gameport) | |||
149 | 149 | ||
150 | for(i = 0; i < 50; i++) { | 150 | for(i = 0; i < 50; i++) { |
151 | local_irq_save(flags); | 151 | local_irq_save(flags); |
152 | t1 = native_read_tsc(); | 152 | t1 = rdtsc(); |
153 | for (t = 0; t < 50; t++) gameport_read(gameport); | 153 | for (t = 0; t < 50; t++) gameport_read(gameport); |
154 | t2 = native_read_tsc(); | 154 | t2 = rdtsc(); |
155 | local_irq_restore(flags); | 155 | local_irq_restore(flags); |
156 | udelay(i * 10); | 156 | udelay(i * 10); |
157 | if (t2 - t1 < tx) tx = t2 - t1; | 157 | if (t2 - t1 < tx) tx = t2 - t1; |
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c index f871b4f00056..6f8b084e13d0 100644 --- a/drivers/input/joystick/analog.c +++ b/drivers/input/joystick/analog.c | |||
@@ -143,7 +143,7 @@ struct analog_port { | |||
143 | 143 | ||
144 | #include <linux/i8253.h> | 144 | #include <linux/i8253.h> |
145 | 145 | ||
146 | #define GET_TIME(x) do { if (cpu_has_tsc) x = (unsigned int)native_read_tsc(); else x = get_time_pit(); } while (0) | 146 | #define GET_TIME(x) do { if (cpu_has_tsc) x = (unsigned int)rdtsc(); else x = get_time_pit(); } while (0) |
147 | #define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0))) | 147 | #define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0))) |
148 | #define TIME_NAME (cpu_has_tsc?"TSC":"PIT") | 148 | #define TIME_NAME (cpu_has_tsc?"TSC":"PIT") |
149 | static unsigned int get_time_pit(void) | 149 | static unsigned int get_time_pit(void) |
@@ -160,7 +160,7 @@ static unsigned int get_time_pit(void) | |||
160 | return count; | 160 | return count; |
161 | } | 161 | } |
162 | #elif defined(__x86_64__) | 162 | #elif defined(__x86_64__) |
163 | #define GET_TIME(x) do { x = (unsigned int)native_read_tsc(); } while (0) | 163 | #define GET_TIME(x) do { x = (unsigned int)rdtsc(); } while (0) |
164 | #define DELTA(x,y) ((y)-(x)) | 164 | #define DELTA(x,y) ((y)-(x)) |
165 | #define TIME_NAME "TSC" | 165 | #define TIME_NAME "TSC" |
166 | #elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE) | 166 | #elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE) |
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 44e5c3b5e0af..72c9f1f352b4 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c | |||
@@ -638,7 +638,7 @@ static int receive(struct net_device *dev, int cnt) | |||
638 | #define GETTICK(x) \ | 638 | #define GETTICK(x) \ |
639 | ({ \ | 639 | ({ \ |
640 | if (cpu_has_tsc) \ | 640 | if (cpu_has_tsc) \ |
641 | x = (unsigned int)native_read_tsc(); \ | 641 | x = (unsigned int)rdtsc(); \ |
642 | }) | 642 | }) |
643 | #else /* __i386__ */ | 643 | #else /* __i386__ */ |
644 | #define GETTICK(x) | 644 | #define GETTICK(x) |
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c index ab13448defcf..2ac0c704bcb8 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel_powerclamp.c | |||
@@ -340,7 +340,7 @@ static bool powerclamp_adjust_controls(unsigned int target_ratio, | |||
340 | 340 | ||
341 | /* check result for the last window */ | 341 | /* check result for the last window */ |
342 | msr_now = pkg_state_counter(); | 342 | msr_now = pkg_state_counter(); |
343 | tsc_now = native_read_tsc(); | 343 | tsc_now = rdtsc(); |
344 | 344 | ||
345 | /* calculate pkg cstate vs tsc ratio */ | 345 | /* calculate pkg cstate vs tsc ratio */ |
346 | if (!msr_last || !tsc_last) | 346 | if (!msr_last || !tsc_last) |
@@ -482,7 +482,7 @@ static void poll_pkg_cstate(struct work_struct *dummy) | |||
482 | u64 val64; | 482 | u64 val64; |
483 | 483 | ||
484 | msr_now = pkg_state_counter(); | 484 | msr_now = pkg_state_counter(); |
485 | tsc_now = native_read_tsc(); | 485 | tsc_now = rdtsc(); |
486 | jiffies_now = jiffies; | 486 | jiffies_now = jiffies; |
487 | 487 | ||
488 | /* calculate pkg cstate vs tsc ratio */ | 488 | /* calculate pkg cstate vs tsc ratio */ |
diff --git a/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c b/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c index f02b0c0bff9b..6ff8383f2941 100644 --- a/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c +++ b/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c | |||
@@ -81,11 +81,11 @@ static int __init cpufreq_test_tsc(void) | |||
81 | 81 | ||
82 | printk(KERN_DEBUG "start--> \n"); | 82 | printk(KERN_DEBUG "start--> \n"); |
83 | then = read_pmtmr(); | 83 | then = read_pmtmr(); |
84 | then_tsc = native_read_tsc(); | 84 | then_tsc = rdtsc(); |
85 | for (i=0;i<20;i++) { | 85 | for (i=0;i<20;i++) { |
86 | mdelay(100); | 86 | mdelay(100); |
87 | now = read_pmtmr(); | 87 | now = read_pmtmr(); |
88 | now_tsc = native_read_tsc(); | 88 | now_tsc = rdtsc(); |
89 | diff = (now - then) & 0xFFFFFF; | 89 | diff = (now - then) & 0xFFFFFF; |
90 | diff_tsc = now_tsc - then_tsc; | 90 | diff_tsc = now_tsc - then_tsc; |
91 | printk(KERN_DEBUG "t1: %08u t2: %08u diff_pmtmr: %08u diff_tsc: %016llu\n", then, now, diff, diff_tsc); | 91 | printk(KERN_DEBUG "t1: %08u t2: %08u diff_pmtmr: %08u diff_tsc: %016llu\n", then, now, diff, diff_tsc); |