diff options
-rw-r--r-- | arch/x86/kernel/time_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/tsc_64.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/tsc_sync.c | 4 | ||||
-rw-r--r-- | include/asm-x86/tsc.h | 60 |
4 files changed, 17 insertions, 61 deletions
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c index 91d4d49..61b17f5 100644 --- a/arch/x86/kernel/time_64.c +++ b/arch/x86/kernel/time_64.c @@ -83,7 +83,7 @@ unsigned long __init native_calculate_cpu_khz(void) rdtscl(tsc_start); do { rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now); - tsc_now = get_cycles_sync(); + tsc_now = get_cycles(); } while ((tsc_now - tsc_start) < TICK_COUNT); local_irq_restore(flags); diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c index 3723401..2cc55b7 100644 --- a/arch/x86/kernel/tsc_64.c +++ b/arch/x86/kernel/tsc_64.c @@ -181,12 +181,12 @@ static unsigned long __init tsc_read_refs(unsigned long *pm, int i; for (i = 0; i < MAX_RETRIES; i++) { - t1 = get_cycles_sync(); + t1 = get_cycles(); if (hpet) *hpet = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; else *pm = acpi_pm_read_early(); - t2 = get_cycles_sync(); + t2 = get_cycles(); if ((t2 - t1) < SMI_TRESHOLD) return t2; } @@ -210,9 +210,9 @@ void __init tsc_calibrate(void) outb(0xb0, 0x43); outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42); outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42); - tr1 = get_cycles_sync(); + tr1 = get_cycles(); while ((inb(0x61) & 0x20) == 0); - tr2 = get_cycles_sync(); + tr2 = get_cycles(); tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL); @@ -300,13 +300,13 @@ __setup("notsc", notsc_setup); /* clock source code: */ static cycle_t read_tsc(void) { - cycle_t ret = (cycle_t)get_cycles_sync(); + cycle_t ret = (cycle_t)get_cycles(); return ret; } static cycle_t __vsyscall_fn vread_tsc(void) { - cycle_t ret = (cycle_t)vget_cycles_sync(); + cycle_t ret = (cycle_t)vget_cycles(); return ret; } diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 05d8f25..ace3405 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -46,7 +46,7 @@ static __cpuinit void check_tsc_warp(void) cycles_t start, now, prev, end; int i; - start = get_cycles_sync(); + start = get_cycles(); /* * The measurement runs for 20 msecs: */ @@ -61,7 +61,7 @@ static __cpuinit void check_tsc_warp(void) */ __raw_spin_lock(&sync_lock); prev = last_tsc; - now = get_cycles_sync(); + now = get_cycles(); last_tsc = now; __raw_spin_unlock(&sync_lock); diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h index 4013037..f51a50d 100644 --- a/include/asm-x86/tsc.h +++ b/include/asm-x86/tsc.h @@ -36,62 +36,18 @@ static inline cycles_t get_cycles(void) return ret; } -/* Like get_cycles, but make sure the CPU is synchronized. */ -static __always_inline cycles_t __get_cycles_sync(void) +static inline cycles_t vget_cycles(void) { - unsigned long long ret; - unsigned eax, edx; - /* - * Use RDTSCP if possible; it is guaranteed to be synchronous - * and doesn't cause a VMEXIT on Hypervisors + * We only do VDSOs on TSC capable CPUs, so this shouldnt + * access boot_cpu_data (which is not VDSO-safe): */ - alternative_io(ASM_NOP3, ".byte 0x0f,0x01,0xf9", X86_FEATURE_RDTSCP, - ASM_OUTPUT2("=a" (eax), "=d" (edx)), - "a" (0U), "d" (0U) : "ecx", "memory"); - ret = (((unsigned long long)edx) << 32) | ((unsigned long long)eax); - if (ret) - return ret; - - /* - * Don't do an additional sync on CPUs where we know - * RDTSC is already synchronous: - */ - alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC, - "=a" (eax), "0" (1) : "ebx","ecx","edx","memory"); - - return 0; -} - -static __always_inline cycles_t get_cycles_sync(void) -{ - unsigned long long ret; - ret = __get_cycles_sync(); - if (!ret) - rdtscll(ret); - return ret; -} - -#ifdef CONFIG_PARAVIRT -/* - * For paravirt guests, some functionalities are executed through function - * pointers in the various pvops structures. - * These function pointers exist inside the kernel and can not - * be accessed by user space. To avoid this, we make a copy of the - * get_cycles_sync (called in kernel) but force the use of native_read_tsc. - * Ideally, the guest should set up it's own clock and vread - */ -static __always_inline long long vget_cycles_sync(void) -{ - unsigned long long ret; - ret = __get_cycles_sync(); - if (!ret) - ret = native_read_tsc(); - return ret; -} -#else -# define vget_cycles_sync() get_cycles_sync() +#ifndef CONFIG_X86_TSC + if (!cpu_has_tsc) + return 0; #endif + return (cycles_t) native_read_tsc(); +} extern void tsc_init(void); extern void mark_tsc_unstable(char *reason); |