2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/version.h>
20 #include <linux/kthread.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
24 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
25 # include <linux/freezer.h>
27 # include <linux/sched.h>
30 #include "async-thread.h"
33 * container for the kthread task pointer and the list of pending work
34 * One of these is allocated per thread.
36 struct btrfs_worker_thread {
37 /* pool we belong to */
38 struct btrfs_workers *workers;
40 /* list of struct btrfs_work that are waiting for service */
41 struct list_head pending;
43 /* list of worker threads from struct btrfs_workers */
44 struct list_head worker_list;
47 struct task_struct *task;
49 /* number of things on the pending list */
52 /* protects the pending list. */
55 /* set to non-zero when this thread is already awake and kicking */
58 /* are we currently idle */
63 * helper function to move a thread onto the idle list after it
64 * has finished some requests.
66 static void check_idle_worker(struct btrfs_worker_thread *worker)
68 if (!worker->idle && atomic_read(&worker->num_pending) <
69 worker->workers->idle_thresh / 2) {
71 spin_lock_irqsave(&worker->workers->lock, flags);
73 list_move(&worker->worker_list, &worker->workers->idle_list);
74 spin_unlock_irqrestore(&worker->workers->lock, flags);
79 * helper function to move a thread off the idle list after new
80 * pending work is added.
82 static void check_busy_worker(struct btrfs_worker_thread *worker)
84 if (worker->idle && atomic_read(&worker->num_pending) >=
85 worker->workers->idle_thresh) {
87 spin_lock_irqsave(&worker->workers->lock, flags);
89 list_move_tail(&worker->worker_list,
90 &worker->workers->worker_list);
91 spin_unlock_irqrestore(&worker->workers->lock, flags);
96 * main loop for servicing work items
98 static int worker_loop(void *arg)
100 struct btrfs_worker_thread *worker = arg;
101 struct list_head *cur;
102 struct btrfs_work *work;
104 spin_lock_irq(&worker->lock);
105 while(!list_empty(&worker->pending)) {
106 cur = worker->pending.next;
107 work = list_entry(cur, struct btrfs_work, list);
108 list_del(&work->list);
109 clear_bit(0, &work->flags);
111 work->worker = worker;
112 spin_unlock_irq(&worker->lock);
116 atomic_dec(&worker->num_pending);
117 spin_lock_irq(&worker->lock);
118 check_idle_worker(worker);
121 if (freezing(current)) {
124 set_current_state(TASK_INTERRUPTIBLE);
125 spin_unlock_irq(&worker->lock);
127 __set_current_state(TASK_RUNNING);
129 } while (!kthread_should_stop());
134 * this will wait for all the worker threads to shutdown
136 int btrfs_stop_workers(struct btrfs_workers *workers)
138 struct list_head *cur;
139 struct btrfs_worker_thread *worker;
141 list_splice_init(&workers->idle_list, &workers->worker_list);
142 while(!list_empty(&workers->worker_list)) {
143 cur = workers->worker_list.next;
144 worker = list_entry(cur, struct btrfs_worker_thread,
146 kthread_stop(worker->task);
147 list_del(&worker->worker_list);
154 * simple init on struct btrfs_workers
156 void btrfs_init_workers(struct btrfs_workers *workers, int max)
158 workers->num_workers = 0;
159 INIT_LIST_HEAD(&workers->worker_list);
160 INIT_LIST_HEAD(&workers->idle_list);
161 spin_lock_init(&workers->lock);
162 workers->max_workers = max;
163 workers->idle_thresh = 32;
167 * starts new worker threads. This does not enforce the max worker
168 * count in case you need to temporarily go past it.
170 int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
172 struct btrfs_worker_thread *worker;
176 for (i = 0; i < num_workers; i++) {
177 worker = kzalloc(sizeof(*worker), GFP_NOFS);
183 INIT_LIST_HEAD(&worker->pending);
184 INIT_LIST_HEAD(&worker->worker_list);
185 spin_lock_init(&worker->lock);
186 atomic_set(&worker->num_pending, 0);
187 worker->task = kthread_run(worker_loop, worker, "btrfs");
188 worker->workers = workers;
189 if (IS_ERR(worker->task)) {
191 ret = PTR_ERR(worker->task);
195 spin_lock_irq(&workers->lock);
196 list_add_tail(&worker->worker_list, &workers->idle_list);
197 workers->num_workers++;
198 spin_unlock_irq(&workers->lock);
202 btrfs_stop_workers(workers);
207 * run through the list and find a worker thread that doesn't have a lot
208 * to do right now. This can return null if we aren't yet at the thread
209 * count limit and all of the threads are busy.
211 static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
213 struct btrfs_worker_thread *worker;
214 struct list_head *next;
215 int enforce_min = workers->num_workers < workers->max_workers;
218 * if we find an idle thread, don't move it to the end of the
219 * idle list. This improves the chance that the next submission
220 * will reuse the same thread, and maybe catch it while it is still
223 if (!list_empty(&workers->idle_list)) {
224 next = workers->idle_list.next;
225 worker = list_entry(next, struct btrfs_worker_thread,
229 if (enforce_min || list_empty(&workers->worker_list))
233 * if we pick a busy task, move the task to the end of the list.
234 * hopefully this will keep things somewhat evenly balanced
236 next = workers->worker_list.next;
237 worker = list_entry(next, struct btrfs_worker_thread, worker_list);
238 list_move_tail(next, &workers->worker_list);
242 static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
244 struct btrfs_worker_thread *worker;
248 spin_lock_irqsave(&workers->lock, flags);
249 worker = next_worker(workers);
250 spin_unlock_irqrestore(&workers->lock, flags);
253 spin_lock_irqsave(&workers->lock, flags);
254 if (workers->num_workers >= workers->max_workers) {
255 struct list_head *fallback = NULL;
257 * we have failed to find any workers, just
258 * return the force one
260 if (!list_empty(&workers->worker_list))
261 fallback = workers->worker_list.next;
262 if (!list_empty(&workers->idle_list))
263 fallback = workers->idle_list.next;
265 worker = list_entry(fallback,
266 struct btrfs_worker_thread, worker_list);
267 spin_unlock_irqrestore(&workers->lock, flags);
269 spin_unlock_irqrestore(&workers->lock, flags);
270 /* we're below the limit, start another worker */
271 btrfs_start_workers(workers, 1);
279 * btrfs_requeue_work just puts the work item back on the tail of the list
280 * it was taken from. It is intended for use with long running work functions
281 * that make some progress and want to give the cpu up for others.
283 int btrfs_requeue_work(struct btrfs_work *work)
285 struct btrfs_worker_thread *worker = work->worker;
288 if (test_and_set_bit(0, &work->flags))
291 spin_lock_irqsave(&worker->lock, flags);
292 atomic_inc(&worker->num_pending);
293 list_add_tail(&work->list, &worker->pending);
294 check_busy_worker(worker);
295 spin_unlock_irqrestore(&worker->lock, flags);
301 * places a struct btrfs_work into the pending queue of one of the kthreads
303 int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
305 struct btrfs_worker_thread *worker;
309 /* don't requeue something already on a list */
310 if (test_and_set_bit(0, &work->flags))
313 worker = find_worker(workers);
315 spin_lock_irqsave(&worker->lock, flags);
316 atomic_inc(&worker->num_pending);
317 check_busy_worker(worker);
318 list_add_tail(&work->list, &worker->pending);
321 * avoid calling into wake_up_process if this thread has already
324 if (!worker->working)
328 spin_unlock_irqrestore(&worker->lock, flags);
331 wake_up_process(worker->task);