pandora: defconfig: update
[pandora-kernel.git] / include / linux / lglock.h
1 /*
2  * Specialised local-global spinlock. Can only be declared as global variables
3  * to avoid overhead and keep things simple (and we don't want to start using
4  * these inside dynamically allocated structures).
5  *
6  * "local/global locks" (lglocks) can be used to:
7  *
8  * - Provide fast exclusive access to per-CPU data, with exclusive access to
9  *   another CPU's data allowed but possibly subject to contention, and to
10  *   provide very slow exclusive access to all per-CPU data.
11  * - Or to provide very fast and scalable read serialisation, and to provide
12  *   very slow exclusive serialisation of data (not necessarily per-CPU data).
13  *
14  * Brlocks are also implemented as a short-hand notation for the latter use
15  * case.
16  *
17  * Copyright 2009, 2010, Nick Piggin, Novell Inc.
18  */
19 #ifndef __LINUX_LGLOCK_H
20 #define __LINUX_LGLOCK_H
21
22 #include <linux/spinlock.h>
23 #include <linux/lockdep.h>
24 #include <linux/percpu.h>
25 #include <linux/cpu.h>
26
27 /* can make br locks by using local lock for read side, global lock for write */
28 #define br_lock_init(name)      name##_lock_init()
29 #define br_read_lock(name)      name##_local_lock()
30 #define br_read_unlock(name)    name##_local_unlock()
31 #define br_write_lock(name)     name##_global_lock_online()
32 #define br_write_unlock(name)   name##_global_unlock_online()
33
34 #define DECLARE_BRLOCK(name)    DECLARE_LGLOCK(name)
35 #define DEFINE_BRLOCK(name)     DEFINE_LGLOCK(name)
36
37
38 #define lg_lock_init(name)      name##_lock_init()
39 #define lg_local_lock(name)     name##_local_lock()
40 #define lg_local_unlock(name)   name##_local_unlock()
41 #define lg_local_lock_cpu(name, cpu)    name##_local_lock_cpu(cpu)
42 #define lg_local_unlock_cpu(name, cpu)  name##_local_unlock_cpu(cpu)
43 #define lg_global_lock(name)    name##_global_lock()
44 #define lg_global_unlock(name)  name##_global_unlock()
45 #define lg_global_lock_online(name) name##_global_lock_online()
46 #define lg_global_unlock_online(name) name##_global_unlock_online()
47
48 #ifdef CONFIG_DEBUG_LOCK_ALLOC
49 #define LOCKDEP_INIT_MAP lockdep_init_map
50
51 #define DEFINE_LGLOCK_LOCKDEP(name)                                     \
52  struct lock_class_key name##_lock_key;                                 \
53  struct lockdep_map name##_lock_dep_map;                                \
54  EXPORT_SYMBOL(name##_lock_dep_map)
55
56 #else
57 #define LOCKDEP_INIT_MAP(a, b, c, d)
58
59 #define DEFINE_LGLOCK_LOCKDEP(name)
60 #endif
61
62
63 #define DECLARE_LGLOCK(name)                                            \
64  extern void name##_lock_init(void);                                    \
65  extern void name##_local_lock(void);                                   \
66  extern void name##_local_unlock(void);                                 \
67  extern void name##_local_lock_cpu(int cpu);                            \
68  extern void name##_local_unlock_cpu(int cpu);                          \
69  extern void name##_global_lock(void);                                  \
70  extern void name##_global_unlock(void);                                \
71  extern void name##_global_lock_online(void);                           \
72  extern void name##_global_unlock_online(void);                         \
73
74 #define DEFINE_LGLOCK(name)                                             \
75                                                                         \
76  DEFINE_SPINLOCK(name##_cpu_lock);                                      \
77  cpumask_t name##_cpus __read_mostly;                                   \
78  DEFINE_PER_CPU(arch_spinlock_t, name##_lock);                          \
79  DEFINE_LGLOCK_LOCKDEP(name);                                           \
80                                                                         \
81  static int                                                             \
82  name##_lg_cpu_callback(struct notifier_block *nb,                      \
83                                 unsigned long action, void *hcpu)       \
84  {                                                                      \
85         switch (action & ~CPU_TASKS_FROZEN) {                           \
86         case CPU_UP_PREPARE:                                            \
87                 spin_lock(&name##_cpu_lock);                            \
88                 cpu_set((unsigned long)hcpu, name##_cpus);              \
89                 spin_unlock(&name##_cpu_lock);                          \
90                 break;                                                  \
91         case CPU_UP_CANCELED: case CPU_DEAD:                            \
92                 spin_lock(&name##_cpu_lock);                            \
93                 cpu_clear((unsigned long)hcpu, name##_cpus);            \
94                 spin_unlock(&name##_cpu_lock);                          \
95         }                                                               \
96         return NOTIFY_OK;                                               \
97  }                                                                      \
98  static struct notifier_block name##_lg_cpu_notifier = {                \
99         .notifier_call = name##_lg_cpu_callback,                        \
100  };                                                                     \
101  void name##_lock_init(void) {                                          \
102         int i;                                                          \
103         LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
104         for_each_possible_cpu(i) {                                      \
105                 arch_spinlock_t *lock;                                  \
106                 lock = &per_cpu(name##_lock, i);                        \
107                 *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;     \
108         }                                                               \
109         register_hotcpu_notifier(&name##_lg_cpu_notifier);              \
110         get_online_cpus();                                              \
111         for_each_online_cpu(i)                                          \
112                 cpu_set(i, name##_cpus);                                \
113         put_online_cpus();                                              \
114  }                                                                      \
115  EXPORT_SYMBOL(name##_lock_init);                                       \
116                                                                         \
117  void name##_local_lock(void) {                                         \
118         arch_spinlock_t *lock;                                          \
119         preempt_disable();                                              \
120         rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);     \
121         lock = &__get_cpu_var(name##_lock);                             \
122         arch_spin_lock(lock);                                           \
123  }                                                                      \
124  EXPORT_SYMBOL(name##_local_lock);                                      \
125                                                                         \
126  void name##_local_unlock(void) {                                       \
127         arch_spinlock_t *lock;                                          \
128         rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);             \
129         lock = &__get_cpu_var(name##_lock);                             \
130         arch_spin_unlock(lock);                                         \
131         preempt_enable();                                               \
132  }                                                                      \
133  EXPORT_SYMBOL(name##_local_unlock);                                    \
134                                                                         \
135  void name##_local_lock_cpu(int cpu) {                                  \
136         arch_spinlock_t *lock;                                          \
137         preempt_disable();                                              \
138         rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);     \
139         lock = &per_cpu(name##_lock, cpu);                              \
140         arch_spin_lock(lock);                                           \
141  }                                                                      \
142  EXPORT_SYMBOL(name##_local_lock_cpu);                                  \
143                                                                         \
144  void name##_local_unlock_cpu(int cpu) {                                \
145         arch_spinlock_t *lock;                                          \
146         rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);             \
147         lock = &per_cpu(name##_lock, cpu);                              \
148         arch_spin_unlock(lock);                                         \
149         preempt_enable();                                               \
150  }                                                                      \
151  EXPORT_SYMBOL(name##_local_unlock_cpu);                                \
152                                                                         \
153  void name##_global_lock_online(void) {                                 \
154         int i;                                                          \
155         spin_lock(&name##_cpu_lock);                                    \
156         rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
157         for_each_cpu(i, &name##_cpus) {                                 \
158                 arch_spinlock_t *lock;                                  \
159                 lock = &per_cpu(name##_lock, i);                        \
160                 arch_spin_lock(lock);                                   \
161         }                                                               \
162  }                                                                      \
163  EXPORT_SYMBOL(name##_global_lock_online);                              \
164                                                                         \
165  void name##_global_unlock_online(void) {                               \
166         int i;                                                          \
167         rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
168         for_each_cpu(i, &name##_cpus) {                                 \
169                 arch_spinlock_t *lock;                                  \
170                 lock = &per_cpu(name##_lock, i);                        \
171                 arch_spin_unlock(lock);                                 \
172         }                                                               \
173         spin_unlock(&name##_cpu_lock);                                  \
174  }                                                                      \
175  EXPORT_SYMBOL(name##_global_unlock_online);                            \
176                                                                         \
177  void name##_global_lock(void) {                                        \
178         int i;                                                          \
179         preempt_disable();                                              \
180         rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
181         for_each_possible_cpu(i) {                                      \
182                 arch_spinlock_t *lock;                                  \
183                 lock = &per_cpu(name##_lock, i);                        \
184                 arch_spin_lock(lock);                                   \
185         }                                                               \
186  }                                                                      \
187  EXPORT_SYMBOL(name##_global_lock);                                     \
188                                                                         \
189  void name##_global_unlock(void) {                                      \
190         int i;                                                          \
191         rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
192         for_each_possible_cpu(i) {                                      \
193                 arch_spinlock_t *lock;                                  \
194                 lock = &per_cpu(name##_lock, i);                        \
195                 arch_spin_unlock(lock);                                 \
196         }                                                               \
197         preempt_enable();                                               \
198  }                                                                      \
199  EXPORT_SYMBOL(name##_global_unlock);
200 #endif