diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-07-19 19:28:35 (GMT) |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-07-19 19:28:35 (GMT) |
commit | e436d80085133858bf2613a630365e8a0459fd58 (patch) | |
tree | c579410a12ed63d94e3a40a023634c7df4aebc76 | |
parent | 969bb4e4032dac67287951d8f6642a3b5119694e (diff) | |
download | linux-fsl-qoriq-e436d80085133858bf2613a630365e8a0459fd58.tar.xz |
[PATCH] sched: implement cpu_clock(cpu) high-speed time source
Implement the cpu_clock(cpu) interface for kernel-internal use:
high-speed (but slightly incorrect) per-cpu clock constructed from
sched_clock().
This API, unused at the moment, will be used in the future by blktrace,
by the softlockup-watchdog, by printk and by lockstat.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/sched.h | 7 | ||||
-rw-r--r-- | kernel/sched.c | 17 |
2 files changed, 24 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 94f624a..33b9b48 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1348,6 +1348,13 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) #endif extern unsigned long long sched_clock(void); + +/* + * For kernel-internal use: high-speed (but slightly incorrect) per-cpu + * clock constructed from sched_clock(): + */ +extern unsigned long long cpu_clock(int cpu); + extern unsigned long long task_sched_runtime(struct task_struct *task); diff --git a/kernel/sched.c b/kernel/sched.c index a35a92f..93cf241 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -379,6 +379,23 @@ static inline unsigned long long rq_clock(struct rq *rq) #define task_rq(p) cpu_rq(task_cpu(p)) #define cpu_curr(cpu) (cpu_rq(cpu)->curr) +/* + * For kernel-internal use: high-speed (but slightly incorrect) per-cpu + * clock constructed from sched_clock(): + */ +unsigned long long cpu_clock(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + unsigned long long now; + unsigned long flags; + + spin_lock_irqsave(&rq->lock, flags); + now = rq_clock(rq); + spin_unlock_irqrestore(&rq->lock, flags); + + return now; +} + #ifdef CONFIG_FAIR_GROUP_SCHED /* Change a task's ->cfs_rq if it moves across CPUs */ static inline void set_task_cfs_rq(struct task_struct *p) |