/* * tracing clocks * * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar * * Implements 3 trace clock variants, with differing scalability/precision * tradeoffs: * * - local: CPU-local trace clock * - medium: scalable global clock with some jitter * - global: globally monotonic, serialized clock * * Tracer plugins will chose a default from these clocks. */ #include #include #include #include #include #include #include #include #include "trace.h" u64 notrace trace_clock_local(void) { u64 clock; preempt_disable_notrace(); clock = sched_clock(); preempt_enable_notrace(); return clock; } u64 notrace trace_clock(void) { return local_clock(); } static struct { u64 prev_time; arch_spinlock_t lock; } trace_clock_struct ____cacheline_aligned_in_smp = { .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED, }; u64 notrace trace_clock_global(void) { unsigned long flags; int this_cpu; u64 now; local_irq_save(flags); this_cpu = raw_smp_processor_id(); now = cpu_clock(this_cpu); if (unlikely(in_nmi())) goto out; arch_spin_lock(&trace_clock_struct.lock); if ((s64)(now - trace_clock_struct.prev_time) < 0) now = trace_clock_struct.prev_time + 1; trace_clock_struct.prev_time = now; arch_spin_unlock(&trace_clock_struct.lock); out: local_irq_restore(flags); return now; } static atomic64_t trace_counter; u64 notrace trace_clock_counter(void) { return atomic64_add_return(1, &trace_counter); }