summaryrefslogtreecommitdiff
path: root/include/linux/seqlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/seqlock.h')
-rw-r--r--include/linux/seqlock.h54
1 files changed, 10 insertions, 44 deletions
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 019a936..21a2093 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -152,30 +152,18 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
* Sequence counter only version assumes that callers are using their
* own mutexing.
*/
-static inline void __write_seqcount_begin(seqcount_t *s)
+static inline void write_seqcount_begin(seqcount_t *s)
{
s->sequence++;
smp_wmb();
}
-static inline void write_seqcount_begin(seqcount_t *s)
-{
- preempt_disable_rt();
- __write_seqcount_begin(s);
-}
-
-static inline void __write_seqcount_end(seqcount_t *s)
+static inline void write_seqcount_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
}
-static inline void write_seqcount_end(seqcount_t *s)
-{
- __write_seqcount_end(s);
- preempt_enable_rt();
-}
-
/**
* write_seqcount_barrier - invalidate in-progress read-side seq operations
* @s: pointer to seqcount_t
@@ -216,32 +204,10 @@ typedef struct {
/*
* Read side functions for starting and finalizing a read side section.
*/
-#ifndef CONFIG_PREEMPT_RT_FULL
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
return read_seqcount_begin(&sl->seqcount);
}
-#else
-/*
- * Starvation safe read side for RT
- */
-static inline unsigned read_seqbegin(seqlock_t *sl)
-{
- unsigned ret;
-
-repeat:
- ret = ACCESS_ONCE(sl->seqcount.sequence);
- if (unlikely(ret & 1)) {
- /*
- * Take the lock and let the writer proceed (i.e. evtl
- * boost it), otherwise we could loop here forever.
- */
- spin_unlock_wait(&sl->lock);
- goto repeat;
- }
- return ret;
-}
-#endif
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
@@ -256,36 +222,36 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_begin(&sl->seqcount);
}
static inline void write_sequnlock(seqlock_t *sl)
{
- __write_seqcount_end(&sl->seqcount);
+ write_seqcount_end(&sl->seqcount);
spin_unlock(&sl->lock);
}
static inline void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_begin(&sl->seqcount);
}
static inline void write_sequnlock_bh(seqlock_t *sl)
{
- __write_seqcount_end(&sl->seqcount);
+ write_seqcount_end(&sl->seqcount);
spin_unlock_bh(&sl->lock);
}
static inline void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_begin(&sl->seqcount);
}
static inline void write_sequnlock_irq(seqlock_t *sl)
{
- __write_seqcount_end(&sl->seqcount);
+ write_seqcount_end(&sl->seqcount);
spin_unlock_irq(&sl->lock);
}
@@ -294,7 +260,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_begin(&sl->seqcount);
return flags;
}
@@ -304,7 +270,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
- __write_seqcount_end(&sl->seqcount);
+ write_seqcount_end(&sl->seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
}