Skip to content

Commit 249d053

Browse files
a-darwishPeter Zijlstra
authored andcommitted
timekeeping: Use seqcount_latch_t
Latch sequence counters are a multiversion concurrency control mechanism where the seqcount_t counter even/odd value is used to switch between two data storage copies. This allows the seqcount_t read path to safely interrupt its write side critical section (e.g. from NMIs). Initially, latch sequence counters were implemented as a single write function, raw_write_seqcount_latch(), above plain seqcount_t. The read path was expected to use plain seqcount_t raw_read_seqcount(). A specialized read function was later added, raw_read_seqcount_latch(), and became the standardized way for latch read paths. Having unique read and write APIs meant that latch sequence counters are basically a data type of their own -- just inappropriately overloading plain seqcount_t. The seqcount_latch_t data type was thus introduced at seqlock.h. Use that new data type instead of seqcount_raw_spinlock_t. This ensures that only latch-safe APIs are to be used with the sequence counter. Note that the use of seqcount_raw_spinlock_t was not very useful in the first place. Only the "raw_" subset of seqcount_t APIs were used at timekeeping.c. This subset was created for contexts where lockdep cannot be used. seqcount_LOCKTYPE_t's raison d'être -- verifying that the seqcount_t writer serialization lock is held -- cannot thus be done. References: 0c3351d ("seqlock: Use raw_ prefix instead of _no_lockdep") References: 55f3560 ("seqlock: Extend seqcount API with associated locks") Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200827114044.11173-6-a.darwish@linutronix.de
1 parent a690ed0 commit 249d053

1 file changed

Lines changed: 5 additions & 5 deletions

File tree

kernel/time/timekeeping.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ static struct timekeeper shadow_timekeeper;
6464
* See @update_fast_timekeeper() below.
6565
*/
6666
struct tk_fast {
67-
seqcount_raw_spinlock_t seq;
67+
seqcount_latch_t seq;
6868
struct tk_read_base base[2];
6969
};
7070

@@ -81,13 +81,13 @@ static struct clocksource dummy_clock = {
8181
};
8282

8383
static struct tk_fast tk_fast_mono ____cacheline_aligned = {
84-
.seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_fast_mono.seq, &timekeeper_lock),
84+
.seq = SEQCNT_LATCH_ZERO(tk_fast_mono.seq),
8585
.base[0] = { .clock = &dummy_clock, },
8686
.base[1] = { .clock = &dummy_clock, },
8787
};
8888

8989
static struct tk_fast tk_fast_raw ____cacheline_aligned = {
90-
.seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_fast_raw.seq, &timekeeper_lock),
90+
.seq = SEQCNT_LATCH_ZERO(tk_fast_raw.seq),
9191
.base[0] = { .clock = &dummy_clock, },
9292
.base[1] = { .clock = &dummy_clock, },
9393
};
@@ -467,7 +467,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
467467
tk_clock_read(tkr),
468468
tkr->cycle_last,
469469
tkr->mask));
470-
} while (read_seqcount_retry(&tkf->seq, seq));
470+
} while (read_seqcount_latch_retry(&tkf->seq, seq));
471471

472472
return now;
473473
}
@@ -533,7 +533,7 @@ static __always_inline u64 __ktime_get_real_fast_ns(struct tk_fast *tkf)
533533
tk_clock_read(tkr),
534534
tkr->cycle_last,
535535
tkr->mask));
536-
} while (read_seqcount_retry(&tkf->seq, seq));
536+
} while (read_seqcount_latch_retry(&tkf->seq, seq));
537537

538538
return now;
539539
}

0 commit comments

Comments
 (0)