*/
unsigned long long sched_clock(void)
{
- return ((get_clock() - jiffies_timer_cc) * 1000) >> 12;
+ return ((get_clock() - jiffies_timer_cc) * 125) >> 9;
}
+/*
+ * Monotonic_clock - returns # of nanoseconds passed since time_init()
+ */
+unsigned long long monotonic_clock(void)
+{
+ return sched_clock();
+}
+EXPORT_SYMBOL(monotonic_clock);
+
void tod_to_timeval(__u64 todval, struct timespec *xtime)
{
unsigned long long sec;
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
- account_user_vtime(current);
+ account_tick_vtime(current);
#else
while (ticks--)
update_process_times(user_mode(regs));
unsigned long flags;
unsigned long seq, next;
__u64 timer, todval;
+ int cpu = smp_processor_id();
if (sysctl_hz_timer != 0)
return;
- cpu_set(smp_processor_id(), nohz_cpu_mask);
+ cpu_set(cpu, nohz_cpu_mask);
/*
* Leave the clock comparator set up for the next timer
* tick if either rcu or a softirq is pending.
*/
- if (rcu_pending(smp_processor_id()) || local_softirq_pending()) {
- cpu_clear(smp_processor_id(), nohz_cpu_mask);
+ if (rcu_needs_cpu(cpu) || local_softirq_pending()) {
+ cpu_clear(cpu, nohz_cpu_mask);
return;
}
next = next_timer_interrupt();
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
- timer = (__u64)(next - jiffies) + jiffies_64;
+ timer = (__u64 next) - (__u64 jiffies) + jiffies_64;
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
todval = -1ULL;
/* Be careful about overflows. */