Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
[pandora-kernel.git] / fs / gfs2 / lock_dlm.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2009 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/fs.h>
11 #include <linux/dlm.h>
12 #include <linux/slab.h>
13 #include <linux/types.h>
14 #include <linux/gfs2_ondisk.h>
15
16 #include "incore.h"
17 #include "glock.h"
18 #include "util.h"
19
20
21 static void gdlm_ast(void *arg)
22 {
23         struct gfs2_glock *gl = arg;
24         unsigned ret = gl->gl_state;
25         struct gfs2_sbd *sdp = gl->gl_sbd;
26
27         BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
28
29         if (gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID)
30                 memset(gl->gl_lvb, 0, GDLM_LVB_SIZE);
31
32         switch (gl->gl_lksb.sb_status) {
33         case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
34                 if (gl->gl_ops->go_flags & GLOF_ASPACE)
35                         kmem_cache_free(gfs2_glock_aspace_cachep, gl);
36                 else
37                         kmem_cache_free(gfs2_glock_cachep, gl);
38                 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
39                         wake_up(&sdp->sd_glock_wait);
40                 return;
41         case -DLM_ECANCEL: /* Cancel while getting lock */
42                 ret |= LM_OUT_CANCELED;
43                 goto out;
44         case -EAGAIN: /* Try lock fails */
45         case -EDEADLK: /* Deadlock detected */
46                 goto out;
47         case -ETIMEDOUT: /* Canceled due to timeout */
48                 ret |= LM_OUT_ERROR;
49                 goto out;
50         case 0: /* Success */
51                 break;
52         default: /* Something unexpected */
53                 BUG();
54         }
55
56         ret = gl->gl_req;
57         if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) {
58                 if (gl->gl_req == LM_ST_SHARED)
59                         ret = LM_ST_DEFERRED;
60                 else if (gl->gl_req == LM_ST_DEFERRED)
61                         ret = LM_ST_SHARED;
62                 else
63                         BUG();
64         }
65
66         set_bit(GLF_INITIAL, &gl->gl_flags);
67         gfs2_glock_complete(gl, ret);
68         return;
69 out:
70         if (!test_bit(GLF_INITIAL, &gl->gl_flags))
71                 gl->gl_lksb.sb_lkid = 0;
72         gfs2_glock_complete(gl, ret);
73 }
74
75 static void gdlm_bast(void *arg, int mode)
76 {
77         struct gfs2_glock *gl = arg;
78
79         switch (mode) {
80         case DLM_LOCK_EX:
81                 gfs2_glock_cb(gl, LM_ST_UNLOCKED);
82                 break;
83         case DLM_LOCK_CW:
84                 gfs2_glock_cb(gl, LM_ST_DEFERRED);
85                 break;
86         case DLM_LOCK_PR:
87                 gfs2_glock_cb(gl, LM_ST_SHARED);
88                 break;
89         default:
90                 printk(KERN_ERR "unknown bast mode %d", mode);
91                 BUG();
92         }
93 }
94
95 /* convert gfs lock-state to dlm lock-mode */
96
97 static int make_mode(const unsigned int lmstate)
98 {
99         switch (lmstate) {
100         case LM_ST_UNLOCKED:
101                 return DLM_LOCK_NL;
102         case LM_ST_EXCLUSIVE:
103                 return DLM_LOCK_EX;
104         case LM_ST_DEFERRED:
105                 return DLM_LOCK_CW;
106         case LM_ST_SHARED:
107                 return DLM_LOCK_PR;
108         }
109         printk(KERN_ERR "unknown LM state %d", lmstate);
110         BUG();
111         return -1;
112 }
113
114 static u32 make_flags(const u32 lkid, const unsigned int gfs_flags,
115                       const int req)
116 {
117         u32 lkf = 0;
118
119         if (gfs_flags & LM_FLAG_TRY)
120                 lkf |= DLM_LKF_NOQUEUE;
121
122         if (gfs_flags & LM_FLAG_TRY_1CB) {
123                 lkf |= DLM_LKF_NOQUEUE;
124                 lkf |= DLM_LKF_NOQUEUEBAST;
125         }
126
127         if (gfs_flags & LM_FLAG_PRIORITY) {
128                 lkf |= DLM_LKF_NOORDER;
129                 lkf |= DLM_LKF_HEADQUE;
130         }
131
132         if (gfs_flags & LM_FLAG_ANY) {
133                 if (req == DLM_LOCK_PR)
134                         lkf |= DLM_LKF_ALTCW;
135                 else if (req == DLM_LOCK_CW)
136                         lkf |= DLM_LKF_ALTPR;
137                 else
138                         BUG();
139         }
140
141         if (lkid != 0) 
142                 lkf |= DLM_LKF_CONVERT;
143
144         lkf |= DLM_LKF_VALBLK;
145
146         return lkf;
147 }
148
149 static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
150                      unsigned int flags)
151 {
152         struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
153         int req;
154         u32 lkf;
155
156         req = make_mode(req_state);
157         lkf = make_flags(gl->gl_lksb.sb_lkid, flags, req);
158
159         /*
160          * Submit the actual lock request.
161          */
162
163         return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, gl->gl_strname,
164                         GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
165 }
166
167 static void gdlm_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl)
168 {
169         struct gfs2_sbd *sdp = gl->gl_sbd;
170         struct lm_lockstruct *ls = &sdp->sd_lockstruct;
171         int error;
172
173         if (gl->gl_lksb.sb_lkid == 0) {
174                 kmem_cache_free(cachep, gl);
175                 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
176                         wake_up(&sdp->sd_glock_wait);
177                 return;
178         }
179
180         error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
181                            NULL, gl);
182         if (error) {
183                 printk(KERN_ERR "gdlm_unlock %x,%llx err=%d\n",
184                        gl->gl_name.ln_type,
185                        (unsigned long long)gl->gl_name.ln_number, error);
186                 return;
187         }
188 }
189
190 static void gdlm_cancel(struct gfs2_glock *gl)
191 {
192         struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
193         dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);
194 }
195
196 static int gdlm_mount(struct gfs2_sbd *sdp, const char *fsname)
197 {
198         struct lm_lockstruct *ls = &sdp->sd_lockstruct;
199         int error;
200
201         if (fsname == NULL) {
202                 fs_info(sdp, "no fsname found\n");
203                 return -EINVAL;
204         }
205
206         error = dlm_new_lockspace(fsname, strlen(fsname), &ls->ls_dlm,
207                                   DLM_LSFL_FS | DLM_LSFL_NEWEXCL |
208                                   (ls->ls_nodir ? DLM_LSFL_NODIR : 0),
209                                   GDLM_LVB_SIZE);
210         if (error)
211                 printk(KERN_ERR "dlm_new_lockspace error %d", error);
212
213         return error;
214 }
215
216 static void gdlm_unmount(struct gfs2_sbd *sdp)
217 {
218         struct lm_lockstruct *ls = &sdp->sd_lockstruct;
219
220         if (ls->ls_dlm) {
221                 dlm_release_lockspace(ls->ls_dlm, 2);
222                 ls->ls_dlm = NULL;
223         }
224 }
225
226 static const match_table_t dlm_tokens = {
227         { Opt_jid, "jid=%d"},
228         { Opt_id, "id=%d"},
229         { Opt_first, "first=%d"},
230         { Opt_nodir, "nodir=%d"},
231         { Opt_err, NULL },
232 };
233
234 const struct lm_lockops gfs2_dlm_ops = {
235         .lm_proto_name = "lock_dlm",
236         .lm_mount = gdlm_mount,
237         .lm_unmount = gdlm_unmount,
238         .lm_put_lock = gdlm_put_lock,
239         .lm_lock = gdlm_lock,
240         .lm_cancel = gdlm_cancel,
241         .lm_tokens = &dlm_tokens,
242 };
243