Skip to content

Commit 52ac39e

Browse files
a-darwishPeter Zijlstra
authored andcommitted
seqlock: seqcount_t: Implement all read APIs as statement expressions
The sequence counters read APIs are implemented as CPP macros, so they can take either seqcount_t or any of the seqcount_LOCKNAME_t variants. Such macros then get *directly* transformed to internal C functions that only take plain seqcount_t. Further commits need access to seqcount_LOCKNAME_t inside of the actual read APIs code. Thus transform all of the seqcount read APIs to pure GCC statement expressions instead. This will not break type-safety: all of the transformed APIs resolve to a _Generic() selection that does not have a "default" case. This will also not affect the transformed APIs readability: previously added kernel-doc above all of seqlock.h functions makes the expectations quite clear for call-site developers. Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200904153231.11994-4-a.darwish@linutronix.de
1 parent 5cdd255 commit 52ac39e

1 file changed

Lines changed: 45 additions & 49 deletions

File tree

include/linux/seqlock.h

Lines changed: 45 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -184,6 +184,12 @@ __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
184184
return &s->seqcount; \
185185
} \
186186
\
187+
static __always_inline unsigned \
188+
__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
189+
{ \
190+
return READ_ONCE(s->seqcount.sequence); \
191+
} \
192+
\
187193
static __always_inline bool \
188194
__seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
189195
{ \
@@ -205,6 +211,11 @@ static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
205211
return s;
206212
}
207213

214+
static inline unsigned __seqprop_sequence(const seqcount_t *s)
215+
{
216+
return READ_ONCE(s->sequence);
217+
}
218+
208219
static inline bool __seqprop_preemptible(const seqcount_t *s)
209220
{
210221
return false;
@@ -250,6 +261,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base)
250261
__seqprop_case((s), ww_mutex, prop))
251262

252263
#define __seqcount_ptr(s) __seqprop(s, ptr)
264+
#define __seqcount_sequence(s) __seqprop(s, sequence)
253265
#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible)
254266
#define __seqcount_assert_lock_held(s) __seqprop(s, assert)
255267

@@ -268,21 +280,15 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base)
268280
* Return: count to be passed to read_seqcount_retry()
269281
*/
270282
#define __read_seqcount_begin(s) \
271-
__read_seqcount_t_begin(__seqcount_ptr(s))
272-
273-
static inline unsigned __read_seqcount_t_begin(const seqcount_t *s)
274-
{
275-
unsigned ret;
276-
277-
repeat:
278-
ret = READ_ONCE(s->sequence);
279-
if (unlikely(ret & 1)) {
280-
cpu_relax();
281-
goto repeat;
282-
}
283-
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
284-
return ret;
285-
}
283+
({ \
284+
unsigned seq; \
285+
\
286+
while ((seq = __seqcount_sequence(s)) & 1) \
287+
cpu_relax(); \
288+
\
289+
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
290+
seq; \
291+
})
286292

287293
/**
288294
* raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
@@ -291,14 +297,12 @@ static inline unsigned __read_seqcount_t_begin(const seqcount_t *s)
291297
* Return: count to be passed to read_seqcount_retry()
292298
*/
293299
#define raw_read_seqcount_begin(s) \
294-
raw_read_seqcount_t_begin(__seqcount_ptr(s))
295-
296-
static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)
297-
{
298-
unsigned ret = __read_seqcount_t_begin(s);
299-
smp_rmb();
300-
return ret;
301-
}
300+
({ \
301+
unsigned seq = __read_seqcount_begin(s); \
302+
\
303+
smp_rmb(); \
304+
seq; \
305+
})
302306

303307
/**
304308
* read_seqcount_begin() - begin a seqcount_t read critical section
@@ -307,13 +311,10 @@ static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)
307311
* Return: count to be passed to read_seqcount_retry()
308312
*/
309313
#define read_seqcount_begin(s) \
310-
read_seqcount_t_begin(__seqcount_ptr(s))
311-
312-
static inline unsigned read_seqcount_t_begin(const seqcount_t *s)
313-
{
314-
seqcount_lockdep_reader_access(s);
315-
return raw_read_seqcount_t_begin(s);
316-
}
314+
({ \
315+
seqcount_lockdep_reader_access(__seqcount_ptr(s)); \
316+
raw_read_seqcount_begin(s); \
317+
})
317318

318319
/**
319320
* raw_read_seqcount() - read the raw seqcount_t counter value
@@ -327,15 +328,13 @@ static inline unsigned read_seqcount_t_begin(const seqcount_t *s)
327328
* Return: count to be passed to read_seqcount_retry()
328329
*/
329330
#define raw_read_seqcount(s) \
330-
raw_read_seqcount_t(__seqcount_ptr(s))
331-
332-
static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
333-
{
334-
unsigned ret = READ_ONCE(s->sequence);
335-
smp_rmb();
336-
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
337-
return ret;
338-
}
331+
({ \
332+
unsigned seq = __seqcount_sequence(s); \
333+
\
334+
smp_rmb(); \
335+
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
336+
seq; \
337+
})
339338

340339
/**
341340
* raw_seqcount_begin() - begin a seqcount_t read critical section w/o
@@ -355,16 +354,13 @@ static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
355354
* Return: count to be passed to read_seqcount_retry()
356355
*/
357356
#define raw_seqcount_begin(s) \
358-
raw_seqcount_t_begin(__seqcount_ptr(s))
359-
360-
static inline unsigned raw_seqcount_t_begin(const seqcount_t *s)
361-
{
362-
/*
363-
* If the counter is odd, let read_seqcount_retry() fail
364-
* by decrementing the counter.
365-
*/
366-
return raw_read_seqcount_t(s) & ~1;
367-
}
357+
({ \
358+
/* \
359+
* If the counter is odd, let read_seqcount_retry() fail \
360+
* by decrementing the counter. \
361+
*/ \
362+
raw_read_seqcount(s) & ~1; \
363+
})
368364

369365
/**
370366
* __read_seqcount_retry() - end a seqcount_t read section w/o barrier

0 commit comments

Comments
 (0)