Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/lrg/voltage-2.6
[pandora-kernel.git] / fs / xfs / linux-2.6 / xfs_buf.c
index 44c2b0e..649ade8 100644 (file)
@@ -37,6 +37,7 @@
 
 #include "xfs_sb.h"
 #include "xfs_inum.h"
+#include "xfs_log.h"
 #include "xfs_ag.h"
 #include "xfs_dmapi.h"
 #include "xfs_mount.h"
@@ -850,6 +851,12 @@ xfs_buf_lock_value(
  *     Note that this in no way locks the underlying pages, so it is only
  *     useful for synchronizing concurrent use of buffer objects, not for
  *     synchronizing independent access to the underlying pages.
+ *
+ *     If we come across a stale, pinned, locked buffer, we know that we
+ *     are being asked to lock a buffer that has been reallocated. Because
+ *     it is pinned, we know that the log has not been pushed to disk and
+ *     hence it will still be locked. Rather than sleeping until someone
+ *     else pushes the log, push it ourselves before trying to get the lock.
  */
 void
 xfs_buf_lock(
@@ -857,6 +864,8 @@ xfs_buf_lock(
 {
        trace_xfs_buf_lock(bp, _RET_IP_);
 
+       if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
+               xfs_log_force(bp->b_mount, 0);
        if (atomic_read(&bp->b_io_remaining))
                blk_run_address_space(bp->b_target->bt_mapping);
        down(&bp->b_sema);
@@ -1007,25 +1016,20 @@ xfs_bwrite(
        struct xfs_mount        *mp,
        struct xfs_buf          *bp)
 {
-       int                     iowait = (bp->b_flags & XBF_ASYNC) == 0;
-       int                     error = 0;
+       int                     error;
 
        bp->b_strat = xfs_bdstrat_cb;
        bp->b_mount = mp;
        bp->b_flags |= XBF_WRITE;
-       if (!iowait)
-               bp->b_flags |= _XBF_RUN_QUEUES;
+       bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
 
        xfs_buf_delwri_dequeue(bp);
        xfs_buf_iostrategy(bp);
 
-       if (iowait) {
-               error = xfs_buf_iowait(bp);
-               if (error)
-                       xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
-               xfs_buf_relse(bp);
-       }
-
+       error = xfs_buf_iowait(bp);
+       if (error)
+               xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
+       xfs_buf_relse(bp);
        return error;
 }
 
@@ -1614,7 +1618,8 @@ xfs_mapping_buftarg(
 
 STATIC int
 xfs_alloc_delwrite_queue(
-       xfs_buftarg_t           *btp)
+       xfs_buftarg_t           *btp,
+       const char              *fsname)
 {
        int     error = 0;
 
@@ -1622,7 +1627,7 @@ xfs_alloc_delwrite_queue(
        INIT_LIST_HEAD(&btp->bt_delwrite_queue);
        spin_lock_init(&btp->bt_delwrite_lock);
        btp->bt_flags = 0;
-       btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
+       btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
        if (IS_ERR(btp->bt_task)) {
                error = PTR_ERR(btp->bt_task);
                goto out_error;
@@ -1635,7 +1640,8 @@ out_error:
 xfs_buftarg_t *
 xfs_alloc_buftarg(
        struct block_device     *bdev,
-       int                     external)
+       int                     external,
+       const char              *fsname)
 {
        xfs_buftarg_t           *btp;
 
@@ -1647,7 +1653,7 @@ xfs_alloc_buftarg(
                goto error;
        if (xfs_mapping_buftarg(btp, bdev))
                goto error;
-       if (xfs_alloc_delwrite_queue(btp))
+       if (xfs_alloc_delwrite_queue(btp, fsname))
                goto error;
        xfs_alloc_bufhash(btp, external);
        return btp;