mm: thp: set the accessed flag for old pages on access fault
[pandora-kernel.git] / net / sunrpc / sched.c
index d12ffa5..18c5a50 100644 (file)
@@ -500,14 +500,18 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_next);
  */
 void rpc_wake_up(struct rpc_wait_queue *queue)
 {
-       struct rpc_task *task, *next;
        struct list_head *head;
 
        spin_lock_bh(&queue->lock);
        head = &queue->tasks[queue->maxpriority];
        for (;;) {
-               list_for_each_entry_safe(task, next, head, u.tk_wait.list)
+               while (!list_empty(head)) {
+                       struct rpc_task *task;
+                       task = list_first_entry(head,
+                                       struct rpc_task,
+                                       u.tk_wait.list);
                        rpc_wake_up_task_queue_locked(queue, task);
+               }
                if (head == &queue->tasks[0])
                        break;
                head--;
@@ -525,13 +529,16 @@ EXPORT_SYMBOL_GPL(rpc_wake_up);
  */
 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
 {
-       struct rpc_task *task, *next;
        struct list_head *head;
 
        spin_lock_bh(&queue->lock);
        head = &queue->tasks[queue->maxpriority];
        for (;;) {
-               list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
+               while (!list_empty(head)) {
+                       struct rpc_task *task;
+                       task = list_first_entry(head,
+                                       struct rpc_task,
+                                       u.tk_wait.list);
                        task->tk_status = status;
                        rpc_wake_up_task_queue_locked(queue, task);
                }
@@ -590,6 +597,27 @@ void rpc_prepare_task(struct rpc_task *task)
        task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
 }
 
+static void
+rpc_init_task_statistics(struct rpc_task *task)
+{
+       /* Initialize retry counters */
+       task->tk_garb_retry = 2;
+       task->tk_cred_retry = 2;
+       task->tk_rebind_retry = 2;
+
+       /* starting timestamp */
+       task->tk_start = ktime_get();
+}
+
+static void
+rpc_reset_task_statistics(struct rpc_task *task)
+{
+       task->tk_timeouts = 0;
+       task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
+
+       rpc_init_task_statistics(task);
+}
+
 /*
  * Helper that calls task->tk_ops->rpc_call_done if it exists
  */
@@ -602,6 +630,7 @@ void rpc_exit_task(struct rpc_task *task)
                        WARN_ON(RPC_ASSASSINATED(task));
                        /* Always release the RPC slot and buffer memory */
                        xprt_release(task);
+                       rpc_reset_task_statistics(task);
                }
        }
 }
@@ -726,7 +755,9 @@ void rpc_execute(struct rpc_task *task)
 
 static void rpc_async_schedule(struct work_struct *work)
 {
+       current->flags |= PF_FSTRANS;
        __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
+       current->flags &= ~PF_FSTRANS;
 }
 
 /**
@@ -804,11 +835,6 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
        task->tk_calldata = task_setup_data->callback_data;
        INIT_LIST_HEAD(&task->tk_task);
 
-       /* Initialize retry counters */
-       task->tk_garb_retry = 2;
-       task->tk_cred_retry = 2;
-       task->tk_rebind_retry = 2;
-
        task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
        task->tk_owner = current->tgid;
 
@@ -818,8 +844,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
        if (task->tk_ops->rpc_call_prepare != NULL)
                task->tk_action = rpc_prepare_task;
 
-       /* starting timestamp */
-       task->tk_start = ktime_get();
+       rpc_init_task_statistics(task);
 
        dprintk("RPC:       new task initialized, procpid %u\n",
                                task_pid_nr(current));
@@ -855,16 +880,35 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
        return task;
 }
 
+/*
+ * rpc_free_task - release rpc task and perform cleanups
+ *
+ * Note that we free up the rpc_task _after_ rpc_release_calldata()
+ * in order to work around a workqueue dependency issue.
+ *
+ * Tejun Heo states:
+ * "Workqueue currently considers two work items to be the same if they're
+ * on the same address and won't execute them concurrently - ie. it
+ * makes a work item which is queued again while being executed wait
+ * for the previous execution to complete.
+ *
+ * If a work function frees the work item, and then waits for an event
+ * which should be performed by another work item and *that* work item
+ * recycles the freed work item, it can create a false dependency loop.
+ * There really is no reliable way to detect this short of verifying
+ * every memory free."
+ *
+ */
 static void rpc_free_task(struct rpc_task *task)
 {
-       const struct rpc_call_ops *tk_ops = task->tk_ops;
-       void *calldata = task->tk_calldata;
+       unsigned short tk_flags = task->tk_flags;
+
+       rpc_release_calldata(task->tk_ops, task->tk_calldata);
 
-       if (task->tk_flags & RPC_TASK_DYNAMIC) {
+       if (tk_flags & RPC_TASK_DYNAMIC) {
                dprintk("RPC: %5u freeing task\n", task->tk_pid);
                mempool_free(task, rpc_task_mempool);
        }
-       rpc_release_calldata(tk_ops, calldata);
 }
 
 static void rpc_async_release(struct work_struct *work)
@@ -874,8 +918,7 @@ static void rpc_async_release(struct work_struct *work)
 
 static void rpc_release_resources_task(struct rpc_task *task)
 {
-       if (task->tk_rqstp)
-               xprt_release(task);
+       xprt_release(task);
        if (task->tk_msg.rpc_cred) {
                put_rpccred(task->tk_msg.rpc_cred);
                task->tk_msg.rpc_cred = NULL;