X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Flinux%2Frcupdate.h;h=cc24a01df940a67a18bce50a666d5a9080c7b387;hb=2d94dfc8c38edf63e91e48fd55c3a8822b6a9ced;hp=c6b7485eac7ce1e59c7f01cf71221e6637f524ce;hpb=513b046c96cc2fbce730a3474f6f7ff0c4fdd05c;p=pandora-kernel.git diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index c6b7485eac7c..cc24a01df940 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -41,6 +41,7 @@ #include #include #include +#include /** * struct rcu_head - callback structure for use with RCU @@ -133,6 +134,15 @@ static inline void rcu_bh_qsctr_inc(int cpu) extern int rcu_pending(int cpu); extern int rcu_needs_cpu(int cpu); +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern struct lockdep_map rcu_lock_map; +# define rcu_read_acquire() lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) +# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) +#else +# define rcu_read_acquire() do { } while (0) +# define rcu_read_release() do { } while (0) +#endif + /** * rcu_read_lock - mark the beginning of an RCU read-side critical section. * @@ -166,6 +176,7 @@ extern int rcu_needs_cpu(int cpu); do { \ preempt_disable(); \ __acquire(RCU); \ + rcu_read_acquire(); \ } while(0) /** @@ -175,6 +186,7 @@ extern int rcu_needs_cpu(int cpu); */ #define rcu_read_unlock() \ do { \ + rcu_read_release(); \ __release(RCU); \ preempt_enable(); \ } while(0) @@ -204,6 +216,7 @@ extern int rcu_needs_cpu(int cpu); do { \ local_bh_disable(); \ __acquire(RCU_BH); \ + rcu_read_acquire(); \ } while(0) /* @@ -213,10 +226,23 @@ extern int rcu_needs_cpu(int cpu); */ #define rcu_read_unlock_bh() \ do { \ + rcu_read_release(); \ __release(RCU_BH); \ local_bh_enable(); \ } while(0) +/* + * Prevent the compiler from merging or refetching accesses. The compiler + * is also forbidden from reordering successive instances of ACCESS_ONCE(), + * but only when the compiler is aware of some particular ordering. One way + * to make the compiler aware of ordering is to put the two invocations of + * ACCESS_ONCE() in different C statements. + * + * This macro does absolutely -nothing- to prevent the CPU from reordering, + * merging, or refetching absolutely anything at any time. + */ +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) + /** * rcu_dereference - fetch an RCU-protected pointer in an * RCU read-side critical section. This pointer may later @@ -228,7 +254,7 @@ extern int rcu_needs_cpu(int cpu); */ #define rcu_dereference(p) ({ \ - typeof(p) _________p1 = p; \ + typeof(p) _________p1 = ACCESS_ONCE(p); \ smp_read_barrier_depends(); \ (_________p1); \ }) @@ -281,7 +307,6 @@ extern void FASTCALL(call_rcu(struct rcu_head *head, extern void FASTCALL(call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *head))); extern void synchronize_rcu(void); -void synchronize_idle(void); extern void rcu_barrier(void); #endif /* __KERNEL__ */