diff options
Diffstat (limited to 'arch/mips/kernel/time.c')
-rw-r--r-- | arch/mips/kernel/time.c | 43 |
1 files changed, 35 insertions, 8 deletions
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index 787ed54..51273b7 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -163,7 +163,7 @@ void do_gettimeofday(struct timeval *tv) unsigned long seq; unsigned long lost; unsigned long usec, sec; - unsigned long max_ntp_tick = tick_usec - tickadj; + unsigned long max_ntp_tick; do { seq = read_seqbegin(&xtime_lock); @@ -178,12 +178,13 @@ void do_gettimeofday(struct timeval *tv) * Better to lose some accuracy than have time go backwards.. */ if (unlikely(time_adjust < 0)) { + max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj; usec = min(usec, max_ntp_tick); if (lost) usec += lost * max_ntp_tick; } else if (unlikely(lost)) - usec += lost * tick_usec; + usec += lost * (USEC_PER_SEC / HZ); sec = xtime.tv_sec; usec += (xtime.tv_nsec / 1000); @@ -423,6 +424,8 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) unsigned long j; unsigned int count; + write_seqlock(&xtime_lock); + count = mips_hpt_read(); mips_timer_ack(); @@ -440,7 +443,6 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) * CMOS clock accordingly every ~11 minutes. rtc_set_time() has to be * called as close as possible to 500 ms before the new second starts. */ - write_seqlock(&xtime_lock); if (ntp_synced() && xtime.tv_sec > last_rtc_update + 660 && (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && @@ -452,7 +454,6 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) last_rtc_update = xtime.tv_sec - 600; } } - write_sequnlock(&xtime_lock); /* * If jiffies has overflown in this timer_interrupt, we must @@ -495,6 +496,8 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) } } + write_sequnlock(&xtime_lock); + /* * In UP mode, we call local_timer_interrupt() to do profiling * and process accouting. @@ -507,14 +510,38 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) return IRQ_HANDLED; } +int null_perf_irq(struct pt_regs *regs) +{ + return 0; +} + +int (*perf_irq)(struct pt_regs *regs) = null_perf_irq; + +EXPORT_SYMBOL(null_perf_irq); +EXPORT_SYMBOL(perf_irq); + asmlinkage void ll_timer_interrupt(int irq, struct pt_regs *regs) { + int r2 = cpu_has_mips_r2; + irq_enter(); kstat_this_cpu.irqs[irq]++; + /* + * Suckage alert: + * Before R2 of the architecture there was no way to see if a + * performance counter interrupt was pending, so we have to run the + * performance counter interrupt handler anyway. + */ + if (!r2 || (read_c0_cause() & (1 << 26))) + if (perf_irq(regs)) + goto out; + /* we keep interrupt disabled all the time */ - timer_interrupt(irq, NULL, regs); + if (!r2 || (read_c0_cause() & (1 << 30))) + timer_interrupt(irq, NULL, regs); +out: irq_exit(); } @@ -628,9 +655,9 @@ void __init time_init(void) mips_hpt_init = c0_hpt_init; } - if ((current_cpu_data.isa_level == MIPS_CPU_ISA_M32) || - (current_cpu_data.isa_level == MIPS_CPU_ISA_I) || - (current_cpu_data.isa_level == MIPS_CPU_ISA_II)) + if (cpu_has_mips32r1 || cpu_has_mips32r2 || + (current_cpu_data.isa_level == MIPS_CPU_ISA_I) || + (current_cpu_data.isa_level == MIPS_CPU_ISA_II)) /* * We need to calibrate the counter but we don't have * 64-bit division. |