summaryrefslogtreecommitdiff
path: root/kernel/time
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/tick-common.c1
-rw-r--r--kernel/time/tick-internal.h1
-rw-r--r--kernel/time/tick-sched.c1
-rw-r--r--kernel/time/timekeeping.c19
4 files changed, 7 insertions, 15 deletions
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 64522ec..91c5f27 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -70,6 +70,7 @@ static void tick_periodic(int cpu)
do_timer(1);
write_sequnlock(&jiffies_lock);
+ update_wall_time();
}
update_process_times(user_mode(get_irq_regs()));
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index e2bced5..8329669 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -155,3 +155,4 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
#endif
extern void do_timer(unsigned long ticks);
+extern void update_wall_time(void);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 2afd43f..c58b03d 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -86,6 +86,7 @@ static void tick_do_update_jiffies64(ktime_t now)
tick_next_period = ktime_add(last_jiffies_update, tick_period);
}
write_sequnlock(&jiffies_lock);
+ update_wall_time();
}
/*
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index d62682b..44b7e6b 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1360,7 +1360,7 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk)
* update_wall_time - Uses the current clocksource to increment the wall time
*
*/
-static void update_wall_time(void)
+void update_wall_time(void)
{
struct clocksource *clock;
struct timekeeper *real_tk = &timekeeper;
@@ -1441,19 +1441,8 @@ static void update_wall_time(void)
write_seqcount_end(&timekeeper_seq);
out:
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
- if (clock_was_set) {
- /*
- * XXX - I'd rather we just call clock_was_set(), but
- * since we're currently holding the jiffies lock, calling
- * clock_was_set would trigger an ipi which would then grab
- * the jiffies lock and we'd deadlock. :(
- * The right solution should probably be droping
- * the jiffies lock before calling update_wall_time
- * but that requires some rework of the tick sched
- * code.
- */
- clock_was_set_delayed();
- }
+ if (clock_set)
+ clock_was_set();
}
/**
@@ -1598,7 +1587,6 @@ struct timespec get_monotonic_coarse(void)
void do_timer(unsigned long ticks)
{
jiffies_64 += ticks;
- update_wall_time();
calc_global_load(ticks);
}
@@ -1756,4 +1744,5 @@ void xtime_update(unsigned long ticks)
write_seqlock(&jiffies_lock);
do_timer(ticks);
write_sequnlock(&jiffies_lock);
+ update_wall_time();
}