1 #ifndef _LINUX_PERCPU_RWSEM_H
2 #define _LINUX_PERCPU_RWSEM_H
4 #include <linux/mutex.h>
5 #include <linux/percpu.h>
6 #include <linux/rcupdate.h>
7 #include <linux/delay.h>
9 struct percpu_rw_semaphore
{
10 unsigned __percpu
*counters
;
15 #define light_mb() barrier()
16 #define heavy_mb() synchronize_sched_expedited()
18 static inline void percpu_down_read(struct percpu_rw_semaphore
*p
)
20 rcu_read_lock_sched();
21 if (unlikely(p
->locked
)) {
22 rcu_read_unlock_sched();
24 this_cpu_inc(*p
->counters
);
25 mutex_unlock(&p
->mtx
);
28 this_cpu_inc(*p
->counters
);
29 rcu_read_unlock_sched();
30 light_mb(); /* A, between read of p->locked and read of data, paired with D */
33 static inline void percpu_up_read(struct percpu_rw_semaphore
*p
)
35 light_mb(); /* B, between read of the data and write to p->counter, paired with C */
36 this_cpu_dec(*p
->counters
);
39 static inline unsigned __percpu_count(unsigned __percpu
*counters
)
44 for_each_possible_cpu(cpu
)
45 total
+= ACCESS_ONCE(*per_cpu_ptr(counters
, cpu
));
50 static inline void percpu_down_write(struct percpu_rw_semaphore
*p
)
54 synchronize_sched_expedited(); /* make sure that all readers exit the rcu_read_lock_sched region */
55 while (__percpu_count(p
->counters
))
57 heavy_mb(); /* C, between read of p->counter and write to data, paired with B */
60 static inline void percpu_up_write(struct percpu_rw_semaphore
*p
)
62 heavy_mb(); /* D, between write to data and write to p->locked, paired with A */
64 mutex_unlock(&p
->mtx
);
67 static inline int percpu_init_rwsem(struct percpu_rw_semaphore
*p
)
69 p
->counters
= alloc_percpu(unsigned);
70 if (unlikely(!p
->counters
))
77 static inline void percpu_free_rwsem(struct percpu_rw_semaphore
*p
)
79 free_percpu(p
->counters
);
80 p
->counters
= NULL
; /* catch use after free bugs */
This page took 0.164279 seconds and 6 git commands to generate.