Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / fs / xfs / xfs_trans_ail.c
index cb3aeac..5fc2380 100644 (file)
 
 struct workqueue_struct        *xfs_ail_wq;    /* AIL workqueue */
 
-STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t);
-STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
-STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *);
-STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *);
-
 #ifdef DEBUG
-STATIC void xfs_ail_check(struct xfs_ail *, xfs_log_item_t *);
-#else
+/*
+ * Check that the list is sorted as it should be.
+ */
+STATIC void
+xfs_ail_check(
+       struct xfs_ail  *ailp,
+       xfs_log_item_t  *lip)
+{
+       xfs_log_item_t  *prev_lip;
+
+       if (list_empty(&ailp->xa_ail))
+               return;
+
+       /*
+        * Check the next and previous entries are valid.
+        */
+       ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
+       prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
+       if (&prev_lip->li_ail != &ailp->xa_ail)
+               ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
+
+       prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
+       if (&prev_lip->li_ail != &ailp->xa_ail)
+               ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
+
+
+#ifdef XFS_TRANS_DEBUG
+       /*
+        * Walk the list checking lsn ordering, and that every entry has the
+        * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
+        * when specifically debugging the transaction subsystem.
+        */
+       prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
+       list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
+               if (&prev_lip->li_ail != &ailp->xa_ail)
+                       ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
+               ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
+               prev_lip = lip;
+       }
+#endif /* XFS_TRANS_DEBUG */
+}
+#else /* !DEBUG */
 #define        xfs_ail_check(a,l)
 #endif /* DEBUG */
 
+/*
+ * Return a pointer to the first item in the AIL.  If the AIL is empty, then
+ * return NULL.
+ */
+static xfs_log_item_t *
+xfs_ail_min(
+       struct xfs_ail  *ailp)
+{
+       if (list_empty(&ailp->xa_ail))
+               return NULL;
+
+       return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
+}
+
+ /*
+ * Return a pointer to the last item in the AIL.  If the AIL is empty, then
+ * return NULL.
+ */
+static xfs_log_item_t *
+xfs_ail_max(
+       struct xfs_ail  *ailp)
+{
+       if (list_empty(&ailp->xa_ail))
+               return NULL;
+
+       return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail);
+}
+
+/*
+ * Return a pointer to the item which follows the given item in the AIL.  If
+ * the given item is the last item in the list, then return NULL.
+ */
+static xfs_log_item_t *
+xfs_ail_next(
+       struct xfs_ail  *ailp,
+       xfs_log_item_t  *lip)
+{
+       if (lip->li_ail.next == &ailp->xa_ail)
+               return NULL;
+
+       return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
+}
 
 /*
- * This is called by the log manager code to determine the LSN
- * of the tail of the log.  This is exactly the LSN of the first
- * item in the AIL.  If the AIL is empty, then this function
- * returns 0.
+ * This is called by the log manager code to determine the LSN of the tail of
+ * the log.  This is exactly the LSN of the first item in the AIL.  If the AIL
+ * is empty, then this function returns 0.
  *
- * We need the AIL lock in order to get a coherent read of the
- * lsn of the last item in the AIL.
+ * We need the AIL lock in order to get a coherent read of the lsn of the last
+ * item in the AIL.
  */
 xfs_lsn_t
-xfs_trans_ail_tail(
+xfs_ail_min_lsn(
        struct xfs_ail  *ailp)
 {
-       xfs_lsn_t       lsn;
+       xfs_lsn_t       lsn = 0;
        xfs_log_item_t  *lip;
 
        spin_lock(&ailp->xa_lock);
        lip = xfs_ail_min(ailp);
-       if (lip == NULL) {
-               lsn = (xfs_lsn_t)0;
-       } else {
+       if (lip)
+               lsn = lip->li_lsn;
+       spin_unlock(&ailp->xa_lock);
+
+       return lsn;
+}
+
+/*
+ * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
+ */
+static xfs_lsn_t
+xfs_ail_max_lsn(
+       struct xfs_ail  *ailp)
+{
+       xfs_lsn_t       lsn = 0;
+       xfs_log_item_t  *lip;
+
+       spin_lock(&ailp->xa_lock);
+       lip = xfs_ail_max(ailp);
+       if (lip)
                lsn = lip->li_lsn;
-       }
        spin_unlock(&ailp->xa_lock);
 
        return lsn;
@@ -207,26 +299,70 @@ out:
        return lip;
 }
 
+/*
+ * splice the log item list into the AIL at the given LSN.
+ */
+static void
+xfs_ail_splice(
+       struct xfs_ail  *ailp,
+       struct list_head *list,
+       xfs_lsn_t       lsn)
+{
+       xfs_log_item_t  *next_lip;
+
+       /* If the list is empty, just insert the item.  */
+       if (list_empty(&ailp->xa_ail)) {
+               list_splice(list, &ailp->xa_ail);
+               return;
+       }
+
+       list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
+               if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
+                       break;
+       }
+
+       ASSERT(&next_lip->li_ail == &ailp->xa_ail ||
+              XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0);
+
+       list_splice_init(list, &next_lip->li_ail);
+}
+
+/*
+ * Delete the given item from the AIL.  Return a pointer to the item.
+ */
+static void
+xfs_ail_delete(
+       struct xfs_ail  *ailp,
+       xfs_log_item_t  *lip)
+{
+       xfs_ail_check(ailp, lip);
+       list_del(&lip->li_ail);
+       xfs_trans_ail_cursor_clear(ailp, lip);
+}
+
 /*
  * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself
  * to run at a later time if there is more work to do to complete the push.
  */
 STATIC void
 xfs_ail_worker(
-       struct work_struct *work)
+       struct work_struct      *work)
 {
-       struct xfs_ail  *ailp = container_of(to_delayed_work(work),
+       struct xfs_ail          *ailp = container_of(to_delayed_work(work),
                                        struct xfs_ail, xa_work);
-       long            tout;
-       xfs_lsn_t       target =  ailp->xa_target;
-       xfs_lsn_t       lsn;
-       xfs_log_item_t  *lip;
-       int             flush_log, count, stuck;
-       xfs_mount_t     *mp = ailp->xa_mount;
+       xfs_mount_t             *mp = ailp->xa_mount;
        struct xfs_ail_cursor   *cur = &ailp->xa_cursors;
-       int             push_xfsbufd = 0;
+       xfs_log_item_t          *lip;
+       xfs_lsn_t               lsn;
+       xfs_lsn_t               target;
+       long                    tout = 10;
+       int                     flush_log = 0;
+       int                     stuck = 0;
+       int                     count = 0;
+       int                     push_xfsbufd = 0;
 
        spin_lock(&ailp->xa_lock);
+       target = ailp->xa_target;
        xfs_trans_ail_cursor_init(ailp, cur);
        lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn);
        if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
@@ -235,8 +371,7 @@ xfs_ail_worker(
                 */
                xfs_trans_ail_cursor_done(ailp, cur);
                spin_unlock(&ailp->xa_lock);
-               ailp->xa_last_pushed_lsn = 0;
-               return;
+               goto out_done;
        }
 
        XFS_STATS_INC(xs_push_ail);
@@ -253,8 +388,7 @@ xfs_ail_worker(
         * lots of contention on the AIL lists.
         */
        lsn = lip->li_lsn;
-       flush_log = stuck = count = 0;
-       while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) {
+       while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
                int     lock_result;
                /*
                 * If we can lock the item without sleeping, unlock the AIL
@@ -347,21 +481,25 @@ xfs_ail_worker(
        }
 
        /* assume we have more work to do in a short while */
-       tout = 10;
+out_done:
        if (!count) {
                /* We're past our target or empty, so idle */
                ailp->xa_last_pushed_lsn = 0;
 
                /*
-                * Check for an updated push target before clearing the
-                * XFS_AIL_PUSHING_BIT. If the target changed, we've got more
-                * work to do. Wait a bit longer before starting that work.
+                * We clear the XFS_AIL_PUSHING_BIT first before checking
+                * whether the target has changed. If the target has changed,
+                * this pushes the requeue race directly onto the result of the
+                * atomic test/set bit, so we are guaranteed that either the
+                * the pusher that changed the target or ourselves will requeue
+                * the work (but not both).
                 */
+               clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
                smp_rmb();
-               if (ailp->xa_target == target) {
-                       clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
+               if (XFS_LSN_CMP(ailp->xa_target, target) == 0 ||
+                   test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
                        return;
-               }
+
                tout = 50;
        } else if (XFS_LSN_CMP(lsn, target) >= 0) {
                /*
@@ -404,7 +542,7 @@ xfs_ail_worker(
  * any of the objects, so the lock is not needed.
  */
 void
-xfs_trans_ail_push(
+xfs_ail_push(
        struct xfs_ail  *ailp,
        xfs_lsn_t       threshold_lsn)
 {
@@ -420,11 +558,24 @@ xfs_trans_ail_push(
         * the XFS_AIL_PUSHING_BIT.
         */
        smp_wmb();
-       ailp->xa_target = threshold_lsn;
+       xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
        if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
                queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
 }
 
+/*
+ * Push out all items in the AIL immediately
+ */
+void
+xfs_ail_push_all(
+       struct xfs_ail  *ailp)
+{
+       xfs_lsn_t       threshold_lsn = xfs_ail_max_lsn(ailp);
+
+       if (threshold_lsn)
+               xfs_ail_push(ailp, threshold_lsn);
+}
+
 /*
  * This is to be called when an item is unlocked that may have
  * been in the AIL.  It will wake up the first member of the AIL
@@ -657,121 +808,3 @@ xfs_trans_ail_destroy(
        cancel_delayed_work_sync(&ailp->xa_work);
        kmem_free(ailp);
 }
-
-/*
- * splice the log item list into the AIL at the given LSN.
- */
-STATIC void
-xfs_ail_splice(
-       struct xfs_ail  *ailp,
-       struct list_head *list,
-       xfs_lsn_t       lsn)
-{
-       xfs_log_item_t  *next_lip;
-
-       /*
-        * If the list is empty, just insert the item.
-        */
-       if (list_empty(&ailp->xa_ail)) {
-               list_splice(list, &ailp->xa_ail);
-               return;
-       }
-
-       list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
-               if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
-                       break;
-       }
-
-       ASSERT((&next_lip->li_ail == &ailp->xa_ail) ||
-              (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0));
-
-       list_splice_init(list, &next_lip->li_ail);
-       return;
-}
-
-/*
- * Delete the given item from the AIL.  Return a pointer to the item.
- */
-STATIC void
-xfs_ail_delete(
-       struct xfs_ail  *ailp,
-       xfs_log_item_t  *lip)
-{
-       xfs_ail_check(ailp, lip);
-       list_del(&lip->li_ail);
-       xfs_trans_ail_cursor_clear(ailp, lip);
-}
-
-/*
- * Return a pointer to the first item in the AIL.
- * If the AIL is empty, then return NULL.
- */
-STATIC xfs_log_item_t *
-xfs_ail_min(
-       struct xfs_ail  *ailp)
-{
-       if (list_empty(&ailp->xa_ail))
-               return NULL;
-
-       return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
-}
-
-/*
- * Return a pointer to the item which follows
- * the given item in the AIL.  If the given item
- * is the last item in the list, then return NULL.
- */
-STATIC xfs_log_item_t *
-xfs_ail_next(
-       struct xfs_ail  *ailp,
-       xfs_log_item_t  *lip)
-{
-       if (lip->li_ail.next == &ailp->xa_ail)
-               return NULL;
-
-       return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
-}
-
-#ifdef DEBUG
-/*
- * Check that the list is sorted as it should be.
- */
-STATIC void
-xfs_ail_check(
-       struct xfs_ail  *ailp,
-       xfs_log_item_t  *lip)
-{
-       xfs_log_item_t  *prev_lip;
-
-       if (list_empty(&ailp->xa_ail))
-               return;
-
-       /*
-        * Check the next and previous entries are valid.
-        */
-       ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
-       prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
-       if (&prev_lip->li_ail != &ailp->xa_ail)
-               ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
-
-       prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
-       if (&prev_lip->li_ail != &ailp->xa_ail)
-               ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
-
-
-#ifdef XFS_TRANS_DEBUG
-       /*
-        * Walk the list checking lsn ordering, and that every entry has the
-        * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
-        * when specifically debugging the transaction subsystem.
-        */
-       prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
-       list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
-               if (&prev_lip->li_ail != &ailp->xa_ail)
-                       ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
-               ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
-               prev_lip = lip;
-       }
-#endif /* XFS_TRANS_DEBUG */
-}
-#endif /* DEBUG */