Add patches/0001-clocksource-Recalibrate-baseline-before-first-read.patch
This commit is contained in:
commit
06b149262a
1 changed files with 39 additions and 0 deletions
|
|
@ -0,0 +1,39 @@
|
|||
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
|
||||
index 6b7c9b6c6f2b..37e1996fab0d 100644
|
||||
--- a/kernel/time/clocksource.c
|
||||
+++ b/kernel/time/clocksource.c
|
||||
@@ -12,6 +12,7 @@
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/sched/clock.h>
|
||||
#include <linux/spinlock.h>
|
||||
+#include <linux/smp.h>
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(clocksource_lock);
|
||||
static struct clocksource *curr_clocksource;
|
||||
@@ -145,13 +146,26 @@ void do_clocksource_switch(struct clocksource *cs)
|
||||
raw_spin_lock_irqsave(&clocksource_lock, flags);
|
||||
curr_clocksource = cs;
|
||||
raw_spin_unlock_irqrestore(&clocksource_lock, flags);
|
||||
-
|
||||
- enable_new_clocksource();
|
||||
-
|
||||
- queue_work(clocksource_wq, &clocksource_baseline_work);
|
||||
+
|
||||
+ /*
|
||||
+ * Make the new clocksource visible and immediately rebuild its
|
||||
+ * baseline so the very first ->read() cannot observe stale offsets
|
||||
+ * from the previous clocksource. The previous implementation deferred
|
||||
+ * baseline_recalc(), leaving a ≈6 ms window where read() reported
|
||||
+ * ~1.11 s jumps.
|
||||
+ */
|
||||
+ enable_new_clocksource();
|
||||
+
|
||||
+ baseline_recalc();
|
||||
+ smp_wmb(); /* Pair with lockless readers fetching ->baseline_offset */
|
||||
+
|
||||
+ /*
|
||||
+ * Keep the deferred refresh for slow paths/watchdogs, but by the time
|
||||
+ * it runs the baseline is already consistent.
|
||||
+ */
|
||||
+ queue_work(clocksource_wq, &clocksource_baseline_work);
|
||||
}
|
||||
Loading…
Reference in a new issue