x86-32: Fix build failure with AUDIT=y, AUDITSYSCALL=n
[pandora-kernel.git] / fs / dlm / recoverd.c
1 /******************************************************************************
2 *******************************************************************************
3 **
4 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
5 **  Copyright (C) 2004-2011 Red Hat, Inc.  All rights reserved.
6 **
7 **  This copyrighted material is made available to anyone wishing to use,
8 **  modify, copy, or redistribute it subject to the terms and conditions
9 **  of the GNU General Public License v.2.
10 **
11 *******************************************************************************
12 ******************************************************************************/
13
14 #include "dlm_internal.h"
15 #include "lockspace.h"
16 #include "member.h"
17 #include "dir.h"
18 #include "ast.h"
19 #include "recover.h"
20 #include "lowcomms.h"
21 #include "lock.h"
22 #include "requestqueue.h"
23 #include "recoverd.h"
24
25
26 /* If the start for which we're re-enabling locking (seq) has been superseded
27    by a newer stop (ls_recover_seq), we need to leave locking disabled.
28
29    We suspend dlm_recv threads here to avoid the race where dlm_recv a) sees
30    locking stopped and b) adds a message to the requestqueue, but dlm_recoverd
31    enables locking and clears the requestqueue between a and b. */
32
33 static int enable_locking(struct dlm_ls *ls, uint64_t seq)
34 {
35         int error = -EINTR;
36
37         down_write(&ls->ls_recv_active);
38
39         spin_lock(&ls->ls_recover_lock);
40         if (ls->ls_recover_seq == seq) {
41                 set_bit(LSFL_RUNNING, &ls->ls_flags);
42                 /* unblocks processes waiting to enter the dlm */
43                 up_write(&ls->ls_in_recovery);
44                 error = 0;
45         }
46         spin_unlock(&ls->ls_recover_lock);
47
48         up_write(&ls->ls_recv_active);
49         return error;
50 }
51
52 static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
53 {
54         unsigned long start;
55         int error, neg = 0;
56
57         log_debug(ls, "dlm_recover %llx", (unsigned long long)rv->seq);
58
59         mutex_lock(&ls->ls_recoverd_active);
60
61         dlm_callback_suspend(ls);
62
63         /*
64          * Free non-master tossed rsb's.  Master rsb's are kept on toss
65          * list and put on root list to be included in resdir recovery.
66          */
67
68         dlm_clear_toss_list(ls);
69
70         /*
71          * This list of root rsb's will be the basis of most of the recovery
72          * routines.
73          */
74
75         dlm_create_root_list(ls);
76
77         /*
78          * Add or remove nodes from the lockspace's ls_nodes list.
79          */
80
81         error = dlm_recover_members(ls, rv, &neg);
82         if (error) {
83                 log_debug(ls, "dlm_recover_members error %d", error);
84                 goto fail;
85         }
86
87         dlm_set_recover_status(ls, DLM_RS_NODES);
88
89         error = dlm_recover_members_wait(ls);
90         if (error) {
91                 log_debug(ls, "dlm_recover_members_wait error %d", error);
92                 goto fail;
93         }
94
95         start = jiffies;
96
97         /*
98          * Rebuild our own share of the directory by collecting from all other
99          * nodes their master rsb names that hash to us.
100          */
101
102         error = dlm_recover_directory(ls);
103         if (error) {
104                 log_debug(ls, "dlm_recover_directory error %d", error);
105                 goto fail;
106         }
107
108         dlm_set_recover_status(ls, DLM_RS_DIR);
109
110         error = dlm_recover_directory_wait(ls);
111         if (error) {
112                 log_debug(ls, "dlm_recover_directory_wait error %d", error);
113                 goto fail;
114         }
115
116         /*
117          * We may have outstanding operations that are waiting for a reply from
118          * a failed node.  Mark these to be resent after recovery.  Unlock and
119          * cancel ops can just be completed.
120          */
121
122         dlm_recover_waiters_pre(ls);
123
124         error = dlm_recovery_stopped(ls);
125         if (error)
126                 goto fail;
127
128         if (neg || dlm_no_directory(ls)) {
129                 /*
130                  * Clear lkb's for departed nodes.
131                  */
132
133                 dlm_purge_locks(ls);
134
135                 /*
136                  * Get new master nodeid's for rsb's that were mastered on
137                  * departed nodes.
138                  */
139
140                 error = dlm_recover_masters(ls);
141                 if (error) {
142                         log_debug(ls, "dlm_recover_masters error %d", error);
143                         goto fail;
144                 }
145
146                 /*
147                  * Send our locks on remastered rsb's to the new masters.
148                  */
149
150                 error = dlm_recover_locks(ls);
151                 if (error) {
152                         log_debug(ls, "dlm_recover_locks error %d", error);
153                         goto fail;
154                 }
155
156                 dlm_set_recover_status(ls, DLM_RS_LOCKS);
157
158                 error = dlm_recover_locks_wait(ls);
159                 if (error) {
160                         log_debug(ls, "dlm_recover_locks_wait error %d", error);
161                         goto fail;
162                 }
163
164                 /*
165                  * Finalize state in master rsb's now that all locks can be
166                  * checked.  This includes conversion resolution and lvb
167                  * settings.
168                  */
169
170                 dlm_recover_rsbs(ls);
171         } else {
172                 /*
173                  * Other lockspace members may be going through the "neg" steps
174                  * while also adding us to the lockspace, in which case they'll
175                  * be doing the recover_locks (RS_LOCKS) barrier.
176                  */
177                 dlm_set_recover_status(ls, DLM_RS_LOCKS);
178
179                 error = dlm_recover_locks_wait(ls);
180                 if (error) {
181                         log_debug(ls, "dlm_recover_locks_wait error %d", error);
182                         goto fail;
183                 }
184         }
185
186         dlm_release_root_list(ls);
187
188         /*
189          * Purge directory-related requests that are saved in requestqueue.
190          * All dir requests from before recovery are invalid now due to the dir
191          * rebuild and will be resent by the requesting nodes.
192          */
193
194         dlm_purge_requestqueue(ls);
195
196         dlm_set_recover_status(ls, DLM_RS_DONE);
197
198         error = dlm_recover_done_wait(ls);
199         if (error) {
200                 log_debug(ls, "dlm_recover_done_wait error %d", error);
201                 goto fail;
202         }
203
204         dlm_clear_members_gone(ls);
205
206         dlm_adjust_timeouts(ls);
207
208         dlm_callback_resume(ls);
209
210         error = enable_locking(ls, rv->seq);
211         if (error) {
212                 log_debug(ls, "enable_locking error %d", error);
213                 goto fail;
214         }
215
216         error = dlm_process_requestqueue(ls);
217         if (error) {
218                 log_debug(ls, "dlm_process_requestqueue error %d", error);
219                 goto fail;
220         }
221
222         error = dlm_recover_waiters_post(ls);
223         if (error) {
224                 log_debug(ls, "dlm_recover_waiters_post error %d", error);
225                 goto fail;
226         }
227
228         dlm_grant_after_purge(ls);
229
230         log_debug(ls, "dlm_recover %llx generation %u done: %u ms",
231                   (unsigned long long)rv->seq, ls->ls_generation,
232                   jiffies_to_msecs(jiffies - start));
233         mutex_unlock(&ls->ls_recoverd_active);
234
235         dlm_lsop_recover_done(ls);
236         return 0;
237
238  fail:
239         dlm_release_root_list(ls);
240         log_debug(ls, "dlm_recover %llx error %d",
241                   (unsigned long long)rv->seq, error);
242         mutex_unlock(&ls->ls_recoverd_active);
243         return error;
244 }
245
246 /* The dlm_ls_start() that created the rv we take here may already have been
247    stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP
248    flag set. */
249
250 static void do_ls_recovery(struct dlm_ls *ls)
251 {
252         struct dlm_recover *rv = NULL;
253
254         spin_lock(&ls->ls_recover_lock);
255         rv = ls->ls_recover_args;
256         ls->ls_recover_args = NULL;
257         if (rv && ls->ls_recover_seq == rv->seq)
258                 clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
259         spin_unlock(&ls->ls_recover_lock);
260
261         if (rv) {
262                 ls_recover(ls, rv);
263                 kfree(rv->nodes);
264                 kfree(rv);
265         }
266 }
267
268 static int dlm_recoverd(void *arg)
269 {
270         struct dlm_ls *ls;
271
272         ls = dlm_find_lockspace_local(arg);
273         if (!ls) {
274                 log_print("dlm_recoverd: no lockspace %p", arg);
275                 return -1;
276         }
277
278         while (!kthread_should_stop()) {
279                 set_current_state(TASK_INTERRUPTIBLE);
280                 if (!test_bit(LSFL_WORK, &ls->ls_flags))
281                         schedule();
282                 set_current_state(TASK_RUNNING);
283
284                 if (test_and_clear_bit(LSFL_WORK, &ls->ls_flags))
285                         do_ls_recovery(ls);
286         }
287
288         dlm_put_lockspace(ls);
289         return 0;
290 }
291
292 void dlm_recoverd_kick(struct dlm_ls *ls)
293 {
294         set_bit(LSFL_WORK, &ls->ls_flags);
295         wake_up_process(ls->ls_recoverd_task);
296 }
297
298 int dlm_recoverd_start(struct dlm_ls *ls)
299 {
300         struct task_struct *p;
301         int error = 0;
302
303         p = kthread_run(dlm_recoverd, ls, "dlm_recoverd");
304         if (IS_ERR(p))
305                 error = PTR_ERR(p);
306         else
307                 ls->ls_recoverd_task = p;
308         return error;
309 }
310
311 void dlm_recoverd_stop(struct dlm_ls *ls)
312 {
313         kthread_stop(ls->ls_recoverd_task);
314 }
315
316 void dlm_recoverd_suspend(struct dlm_ls *ls)
317 {
318         wake_up(&ls->ls_wait_general);
319         mutex_lock(&ls->ls_recoverd_active);
320 }
321
322 void dlm_recoverd_resume(struct dlm_ls *ls)
323 {
324         mutex_unlock(&ls->ls_recoverd_active);
325 }
326