Skip to content

Commit 0c9794c

Browse files
a-darwishPeter Zijlstra
authored andcommitted
seqlock: seqcount latch APIs: Only allow seqcount_latch_t
All latch sequence counter call-sites have now been converted from plain seqcount_t to the new seqcount_latch_t data type. Enforce type-safety by modifying seqlock.h latch APIs to only accept seqcount_latch_t. Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200827114044.11173-9-a.darwish@linutronix.de
1 parent 24bf401 commit 0c9794c

1 file changed

Lines changed: 15 additions & 21 deletions

File tree

include/linux/seqlock.h

Lines changed: 15 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -620,7 +620,7 @@ static inline void seqcount_latch_init(seqcount_latch_t *s)
620620

621621
/**
622622
* raw_read_seqcount_latch() - pick even/odd latch data copy
623-
* @s: Pointer to seqcount_t, seqcount_raw_spinlock_t, or seqcount_latch_t
623+
* @s: Pointer to seqcount_latch_t
624624
*
625625
* See raw_write_seqcount_latch() for details and a full reader/writer
626626
* usage example.
@@ -629,17 +629,14 @@ static inline void seqcount_latch_init(seqcount_latch_t *s)
629629
* picking which data copy to read. The full counter must then be checked
630630
* with read_seqcount_latch_retry().
631631
*/
632-
#define raw_read_seqcount_latch(s) \
633-
({ \
634-
/* \
635-
* Pairs with the first smp_wmb() in raw_write_seqcount_latch(). \
636-
* Due to the dependent load, a full smp_rmb() is not needed. \
637-
*/ \
638-
_Generic(*(s), \
639-
seqcount_t: READ_ONCE(((seqcount_t *)s)->sequence), \
640-
seqcount_raw_spinlock_t: READ_ONCE(((seqcount_raw_spinlock_t *)s)->seqcount.sequence), \
641-
seqcount_latch_t: READ_ONCE(((seqcount_latch_t *)s)->seqcount.sequence)); \
642-
})
632+
static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
633+
{
634+
/*
635+
* Pairs with the first smp_wmb() in raw_write_seqcount_latch().
636+
* Due to the dependent load, a full smp_rmb() is not needed.
637+
*/
638+
return READ_ONCE(s->seqcount.sequence);
639+
}
643640

644641
/**
645642
* read_seqcount_latch_retry() - end a seqcount_latch_t read section
@@ -656,7 +653,7 @@ read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
656653

657654
/**
658655
* raw_write_seqcount_latch() - redirect latch readers to even/odd copy
659-
* @s: Pointer to seqcount_t, seqcount_raw_spinlock_t, or seqcount_latch_t
656+
* @s: Pointer to seqcount_latch_t
660657
*
661658
* The latch technique is a multiversion concurrency control method that allows
662659
* queries during non-atomic modifications. If you can guarantee queries never
@@ -735,14 +732,11 @@ read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
735732
* When data is a dynamic data structure; one should use regular RCU
736733
* patterns to manage the lifetimes of the objects within.
737734
*/
738-
#define raw_write_seqcount_latch(s) \
739-
{ \
740-
smp_wmb(); /* prior stores before incrementing "sequence" */ \
741-
_Generic(*(s), \
742-
seqcount_t: ((seqcount_t *)s)->sequence++, \
743-
seqcount_raw_spinlock_t:((seqcount_raw_spinlock_t *)s)->seqcount.sequence++, \
744-
seqcount_latch_t: ((seqcount_latch_t *)s)->seqcount.sequence++); \
745-
smp_wmb(); /* increment "sequence" before following stores */ \
735+
static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
736+
{
737+
smp_wmb(); /* prior stores before incrementing "sequence" */
738+
s->seqcount.sequence++;
739+
smp_wmb(); /* increment "sequence" before following stores */
746740
}
747741

748742
/*

0 commit comments

Comments
 (0)