From 06b149262aa0470c5bf075b7301b632a0eb7062c Mon Sep 17 00:00:00 2001 From: Mika Date: Tue, 2 Dec 2025 17:02:55 +0000 Subject: [PATCH] Add patches/0001-clocksource-Recalibrate-baseline-before-first-read.patch --- ...calibrate-baseline-before-first-read.patch | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 patches/0001-clocksource-Recalibrate-baseline-before-first-read.patch diff --git a/patches/0001-clocksource-Recalibrate-baseline-before-first-read.patch b/patches/0001-clocksource-Recalibrate-baseline-before-first-read.patch new file mode 100644 index 0000000..80ac7c4 --- /dev/null +++ b/patches/0001-clocksource-Recalibrate-baseline-before-first-read.patch @@ -0,0 +1,39 @@ +diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c +index 6b7c9b6c6f2b..37e1996fab0d 100644 +--- a/kernel/time/clocksource.c ++++ b/kernel/time/clocksource.c +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + + static DEFINE_RAW_SPINLOCK(clocksource_lock); + static struct clocksource *curr_clocksource; +@@ -145,13 +146,26 @@ void do_clocksource_switch(struct clocksource *cs) + raw_spin_lock_irqsave(&clocksource_lock, flags); + curr_clocksource = cs; + raw_spin_unlock_irqrestore(&clocksource_lock, flags); +- +- enable_new_clocksource(); +- +- queue_work(clocksource_wq, &clocksource_baseline_work); ++ ++ /* ++ * Make the new clocksource visible and immediately rebuild its ++ * baseline so the very first ->read() cannot observe stale offsets ++ * from the previous clocksource. The previous implementation deferred ++ * baseline_recalc(), leaving a ≈6 ms window where read() reported ++ * ~1.11 s jumps. ++ */ ++ enable_new_clocksource(); ++ ++ baseline_recalc(); ++ smp_wmb(); /* Pair with lockless readers fetching ->baseline_offset */ ++ ++ /* ++ * Keep the deferred refresh for slow paths/watchdogs, but by the time ++ * it runs the baseline is already consistent. ++ */ ++ queue_work(clocksource_wq, &clocksource_baseline_work); + }