Skip to content

Commit 1909760

Browse files
a-darwishPeter Zijlstra
authored andcommitted
seqlock: PREEMPT_RT: Do not starve seqlock_t writers
On PREEMPT_RT, seqlock_t is transformed to a sleeping lock that do not disable preemption. A seqlock_t reader can thus preempt its write side section and spin for the enter scheduler tick. If that reader belongs to a real-time scheduling class, it can spin forever and the kernel will livelock. To break this livelock possibility on PREEMPT_RT, implement seqlock_t in terms of "seqcount_spinlock_t" instead of plain "seqcount_t". Beside its pure annotational value, this will leverage the existing seqcount_LOCKNAME_T PREEMPT_RT anti-livelock mechanisms, without adding any extra code. Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200904153231.11994-6-a.darwish@linutronix.de
1 parent 8117ab5 commit 1909760

1 file changed

Lines changed: 21 additions & 11 deletions

File tree

include/linux/seqlock.h

Lines changed: 21 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -790,13 +790,17 @@ static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
790790
* - Documentation/locking/seqlock.rst
791791
*/
792792
typedef struct {
793-
struct seqcount seqcount;
793+
/*
794+
* Make sure that readers don't starve writers on PREEMPT_RT: use
795+
* seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
796+
*/
797+
seqcount_spinlock_t seqcount;
794798
spinlock_t lock;
795799
} seqlock_t;
796800

797801
#define __SEQLOCK_UNLOCKED(lockname) \
798802
{ \
799-
.seqcount = SEQCNT_ZERO(lockname), \
803+
.seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
800804
.lock = __SPIN_LOCK_UNLOCKED(lockname) \
801805
}
802806

@@ -806,8 +810,8 @@ typedef struct {
806810
*/
807811
#define seqlock_init(sl) \
808812
do { \
809-
seqcount_init(&(sl)->seqcount); \
810813
spin_lock_init(&(sl)->lock); \
814+
seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \
811815
} while (0)
812816

813817
/**
@@ -854,6 +858,12 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
854858
return read_seqcount_retry(&sl->seqcount, start);
855859
}
856860

861+
/*
862+
* For all seqlock_t write side functions, use write_seqcount_*t*_begin()
863+
* instead of the generic write_seqcount_begin(). This way, no redundant
864+
* lockdep_assert_held() checks are added.
865+
*/
866+
857867
/**
858868
* write_seqlock() - start a seqlock_t write side critical section
859869
* @sl: Pointer to seqlock_t
@@ -870,7 +880,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
870880
static inline void write_seqlock(seqlock_t *sl)
871881
{
872882
spin_lock(&sl->lock);
873-
write_seqcount_t_begin(&sl->seqcount);
883+
write_seqcount_t_begin(&sl->seqcount.seqcount);
874884
}
875885

876886
/**
@@ -882,7 +892,7 @@ static inline void write_seqlock(seqlock_t *sl)
882892
*/
883893
static inline void write_sequnlock(seqlock_t *sl)
884894
{
885-
write_seqcount_t_end(&sl->seqcount);
895+
write_seqcount_t_end(&sl->seqcount.seqcount);
886896
spin_unlock(&sl->lock);
887897
}
888898

@@ -896,7 +906,7 @@ static inline void write_sequnlock(seqlock_t *sl)
896906
static inline void write_seqlock_bh(seqlock_t *sl)
897907
{
898908
spin_lock_bh(&sl->lock);
899-
write_seqcount_t_begin(&sl->seqcount);
909+
write_seqcount_t_begin(&sl->seqcount.seqcount);
900910
}
901911

902912
/**
@@ -909,7 +919,7 @@ static inline void write_seqlock_bh(seqlock_t *sl)
909919
*/
910920
static inline void write_sequnlock_bh(seqlock_t *sl)
911921
{
912-
write_seqcount_t_end(&sl->seqcount);
922+
write_seqcount_t_end(&sl->seqcount.seqcount);
913923
spin_unlock_bh(&sl->lock);
914924
}
915925

@@ -923,7 +933,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
923933
static inline void write_seqlock_irq(seqlock_t *sl)
924934
{
925935
spin_lock_irq(&sl->lock);
926-
write_seqcount_t_begin(&sl->seqcount);
936+
write_seqcount_t_begin(&sl->seqcount.seqcount);
927937
}
928938

929939
/**
@@ -935,7 +945,7 @@ static inline void write_seqlock_irq(seqlock_t *sl)
935945
*/
936946
static inline void write_sequnlock_irq(seqlock_t *sl)
937947
{
938-
write_seqcount_t_end(&sl->seqcount);
948+
write_seqcount_t_end(&sl->seqcount.seqcount);
939949
spin_unlock_irq(&sl->lock);
940950
}
941951

@@ -944,7 +954,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
944954
unsigned long flags;
945955

946956
spin_lock_irqsave(&sl->lock, flags);
947-
write_seqcount_t_begin(&sl->seqcount);
957+
write_seqcount_t_begin(&sl->seqcount.seqcount);
948958
return flags;
949959
}
950960

@@ -973,7 +983,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
973983
static inline void
974984
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
975985
{
976-
write_seqcount_t_end(&sl->seqcount);
986+
write_seqcount_t_end(&sl->seqcount.seqcount);
977987
spin_unlock_irqrestore(&sl->lock, flags);
978988
}
979989

0 commit comments

Comments
 (0)