Btrfs: keep irqs on more often in the worker threads
authorChris Mason <chris.mason@oracle.com>
Wed, 5 Aug 2009 20:36:45 +0000 (16:36 -0400)
committerChris Mason <chris.mason@oracle.com>
Fri, 11 Sep 2009 17:31:04 +0000 (13:31 -0400)
The btrfs worker thread spinlock was being used both for the
queueing of IO and for the processing of ordered events.

The ordered events never happen from end_io handlers, and so they
don't need to use the _irq version of spinlocks.  This adds a
dedicated lock to the ordered lists so they don't have to run
with irqs off.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
fs/btrfs/async-thread.c
fs/btrfs/async-thread.h

index f10c895..4b4372d 100644 (file)
@@ -124,14 +124,12 @@ out:
 static noinline int run_ordered_completions(struct btrfs_workers *workers,
                                            struct btrfs_work *work)
 {
-       unsigned long flags;
-
        if (!workers->ordered)
                return 0;
 
        set_bit(WORK_DONE_BIT, &work->flags);
 
-       spin_lock_irqsave(&workers->lock, flags);
+       spin_lock(&workers->order_lock);
 
        while (1) {
                if (!list_empty(&workers->prio_order_list)) {
@@ -154,17 +152,17 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,
                if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
                        break;
 
-               spin_unlock_irqrestore(&workers->lock, flags);
+               spin_unlock(&workers->order_lock);
 
                work->ordered_func(work);
 
                /* now take the lock again and call the freeing code */
-               spin_lock_irqsave(&workers->lock, flags);
+               spin_lock(&workers->order_lock);
                list_del(&work->order_list);
                work->ordered_free(work);
        }
 
-       spin_unlock_irqrestore(&workers->lock, flags);
+       spin_unlock(&workers->order_lock);
        return 0;
 }
 
@@ -345,6 +343,7 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
        INIT_LIST_HEAD(&workers->order_list);
        INIT_LIST_HEAD(&workers->prio_order_list);
        spin_lock_init(&workers->lock);
+       spin_lock_init(&workers->order_lock);
        workers->max_workers = max;
        workers->idle_thresh = 32;
        workers->name = name;
@@ -374,6 +373,7 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
                INIT_LIST_HEAD(&worker->prio_pending);
                INIT_LIST_HEAD(&worker->worker_list);
                spin_lock_init(&worker->lock);
+
                atomic_set(&worker->num_pending, 0);
                atomic_set(&worker->refs, 1);
                worker->workers = workers;
@@ -453,10 +453,8 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
 again:
        spin_lock_irqsave(&workers->lock, flags);
        worker = next_worker(workers);
-       spin_unlock_irqrestore(&workers->lock, flags);
 
        if (!worker) {
-               spin_lock_irqsave(&workers->lock, flags);
                if (workers->num_workers >= workers->max_workers) {
                        goto fallback;
                } else if (workers->atomic_worker_start) {
@@ -469,6 +467,7 @@ again:
                        goto again;
                }
        }
+       spin_unlock_irqrestore(&workers->lock, flags);
        return worker;
 
 fallback:
@@ -552,14 +551,18 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
 
        worker = find_worker(workers);
        if (workers->ordered) {
-               spin_lock_irqsave(&workers->lock, flags);
+               /*
+                * you're not allowed to do ordered queues from an
+                * interrupt handler
+                */
+               spin_lock(&workers->order_lock);
                if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
                        list_add_tail(&work->order_list,
                                      &workers->prio_order_list);
                } else {
                        list_add_tail(&work->order_list, &workers->order_list);
                }
-               spin_unlock_irqrestore(&workers->lock, flags);
+               spin_unlock(&workers->order_lock);
        } else {
                INIT_LIST_HEAD(&work->order_list);
        }
index a562ad8..fc089b9 100644 (file)
@@ -99,6 +99,9 @@ struct btrfs_workers {
        /* lock for finding the next worker thread to queue on */
        spinlock_t lock;
 
+       /* lock for the ordered lists */
+       spinlock_t order_lock;
+
        /* extra name for this worker, used for current->name */
        char *name;
 };