1 #define pr_fmt(fmt) "%s: " fmt "\n", __func__
3 #include <linux/kernel.h>
4 #include <linux/percpu-refcount.h>
7 * Initially, a percpu refcount is just a set of percpu counters. Initially, we
8 * don't try to detect the ref hitting 0 - which means that get/put can just
9 * increment or decrement the local counter. Note that the counter on a
10 * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
11 * percpu counters will all sum to the correct value
13 * (More precisely: because moduler arithmatic is commutative the sum of all the
14 * percpu_count vars will be equal to what it would have been if all the gets
15 * and puts were done to a single integer, even if some of the percpu integers
16 * overflow or underflow).
18 * The real trick to implementing percpu refcounts is shutdown. We can't detect
19 * the ref hitting 0 on every put - this would require global synchronization
20 * and defeat the whole purpose of using percpu refs.
22 * What we do is require the user to keep track of the initial refcount; we know
23 * the ref can't hit 0 before the user drops the initial ref, so as long as we
24 * convert to non percpu mode before the initial ref is dropped everything
27 * Converting to non percpu mode is done with some RCUish stuff in
28 * percpu_ref_kill. Additionally, we need a bias value so that the
29 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
32 #define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
34 static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
36 return (unsigned long __percpu *)
37 (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
41 * percpu_ref_init - initialize a percpu refcount
42 * @ref: percpu_ref to initialize
43 * @release: function which will be called when refcount hits 0
44 * @gfp: allocation mask to use
46 * Initializes the refcount in single atomic counter mode with a refcount of 1;
47 * analagous to atomic_long_set(ref, 1).
49 * Note that @release must not sleep - it may potentially be called from RCU
50 * callback context by percpu_ref_kill().
52 int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
55 size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
56 __alignof__(unsigned long));
58 atomic_long_set(&ref->count, 1 + PERCPU_COUNT_BIAS);
60 ref->percpu_count_ptr = (unsigned long)
61 __alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
62 if (!ref->percpu_count_ptr)
65 ref->release = release;
68 EXPORT_SYMBOL_GPL(percpu_ref_init);
71 * percpu_ref_exit - undo percpu_ref_init()
72 * @ref: percpu_ref to exit
74 * This function exits @ref. The caller is responsible for ensuring that
75 * @ref is no longer in active use. The usual places to invoke this
76 * function from are the @ref->release() callback or in init failure path
77 * where percpu_ref_init() succeeded but other parts of the initialization
78 * of the embedding object failed.
80 void percpu_ref_exit(struct percpu_ref *ref)
82 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
85 free_percpu(percpu_count);
86 ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
89 EXPORT_SYMBOL_GPL(percpu_ref_exit);
91 static void percpu_ref_kill_rcu(struct rcu_head *rcu)
93 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
94 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
95 unsigned long count = 0;
98 for_each_possible_cpu(cpu)
99 count += *per_cpu_ptr(percpu_count, cpu);
101 pr_debug("global %ld percpu %ld",
102 atomic_long_read(&ref->count), (long)count);
105 * It's crucial that we sum the percpu counters _before_ adding the sum
106 * to &ref->count; since gets could be happening on one cpu while puts
107 * happen on another, adding a single cpu's count could cause
108 * @ref->count to hit 0 before we've got a consistent value - but the
109 * sum of all the counts will be consistent and correct.
111 * Subtracting the bias value then has to happen _after_ adding count to
112 * &ref->count; we need the bias value to prevent &ref->count from
113 * reaching 0 before we add the percpu counts. But doing it at the same
114 * time is equivalent and saves us atomic operations:
116 atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
118 WARN_ONCE(atomic_long_read(&ref->count) <= 0,
119 "percpu ref (%pf) <= 0 (%ld) after killed",
120 ref->release, atomic_long_read(&ref->count));
122 /* @ref is viewed as dead on all CPUs, send out kill confirmation */
123 if (ref->confirm_switch)
124 ref->confirm_switch(ref);
127 * Now we're in single atomic_long_t mode with a consistent
128 * refcount, so it's safe to drop our initial ref:
134 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
135 * @ref: percpu_ref to kill
136 * @confirm_kill: optional confirmation callback
138 * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
139 * @confirm_kill is not NULL. @confirm_kill, which may not block, will be
140 * called after @ref is seen as dead from all CPUs - all further
141 * invocations of percpu_ref_tryget_live() will fail. See
142 * percpu_ref_tryget_live() for more details.
144 * Due to the way percpu_ref is implemented, @confirm_kill will be called
145 * after at least one full RCU grace period has passed but this is an
146 * implementation detail and callers must not depend on it.
148 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
149 percpu_ref_func_t *confirm_kill)
151 WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC_DEAD,
152 "%s called more than once on %pf!", __func__, ref->release);
154 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC_DEAD;
155 ref->confirm_switch = confirm_kill;
157 call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
159 EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
162 * percpu_ref_reinit - re-initialize a percpu refcount
163 * @ref: perpcu_ref to re-initialize
165 * Re-initialize @ref so that it's in the same state as when it finished
166 * percpu_ref_init(). @ref must have been initialized successfully, killed
167 * and reached 0 but not exited.
169 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
170 * this function is in progress.
172 void percpu_ref_reinit(struct percpu_ref *ref)
174 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
177 BUG_ON(!percpu_count);
178 WARN_ON_ONCE(!percpu_ref_is_zero(ref));
180 atomic_long_set(&ref->count, 1 + PERCPU_COUNT_BIAS);
183 * Restore per-cpu operation. smp_store_release() is paired with
184 * smp_read_barrier_depends() in __ref_is_percpu() and guarantees
185 * that the zeroing is visible to all percpu accesses which can see
186 * the following __PERCPU_REF_ATOMIC_DEAD clearing.
188 for_each_possible_cpu(cpu)
189 *per_cpu_ptr(percpu_count, cpu) = 0;
191 smp_store_release(&ref->percpu_count_ptr,
192 ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
194 EXPORT_SYMBOL_GPL(percpu_ref_reinit);