Merge master.kernel.org:/pub/scm/linux/kernel/git/mchehab/v4l-dvb
[pandora-kernel.git] / fs / lockd / host.c
1 /*
2  * linux/fs/lockd/host.c
3  *
4  * Management for NLM peer hosts. The nlm_host struct is shared
5  * between client and server implementation. The only reason to
6  * do so is to reduce code bloat.
7  *
8  * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
9  */
10
11 #include <linux/types.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/in.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/sunrpc/svc.h>
17 #include <linux/lockd/lockd.h>
18 #include <linux/lockd/sm_inter.h>
19 #include <linux/mutex.h>
20
21
22 #define NLMDBG_FACILITY         NLMDBG_HOSTCACHE
23 #define NLM_HOST_MAX            64
24 #define NLM_HOST_NRHASH         32
25 #define NLM_ADDRHASH(addr)      (ntohl(addr) & (NLM_HOST_NRHASH-1))
26 #define NLM_HOST_REBIND         (60 * HZ)
27 #define NLM_HOST_EXPIRE         ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ)
28 #define NLM_HOST_COLLECT        ((nrhosts > NLM_HOST_MAX)? 120 * HZ :  60 * HZ)
29
30 static struct hlist_head        nlm_hosts[NLM_HOST_NRHASH];
31 static unsigned long            next_gc;
32 static int                      nrhosts;
33 static DEFINE_MUTEX(nlm_host_mutex);
34
35
36 static void                     nlm_gc_hosts(void);
37 static struct nsm_handle *      __nsm_find(const struct sockaddr_in *,
38                                         const char *, int, int);
39
40 /*
41  * Find an NLM server handle in the cache. If there is none, create it.
42  */
43 struct nlm_host *
44 nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version,
45                         const char *hostname, int hostname_len)
46 {
47         return nlm_lookup_host(0, sin, proto, version,
48                                hostname, hostname_len);
49 }
50
51 /*
52  * Find an NLM client handle in the cache. If there is none, create it.
53  */
54 struct nlm_host *
55 nlmsvc_lookup_host(struct svc_rqst *rqstp,
56                         const char *hostname, int hostname_len)
57 {
58         return nlm_lookup_host(1, &rqstp->rq_addr,
59                                rqstp->rq_prot, rqstp->rq_vers,
60                                hostname, hostname_len);
61 }
62
63 /*
64  * Common host lookup routine for server & client
65  */
66 struct nlm_host *
67 nlm_lookup_host(int server, const struct sockaddr_in *sin,
68                                         int proto, int version,
69                                         const char *hostname,
70                                         int hostname_len)
71 {
72         struct hlist_head *chain;
73         struct hlist_node *pos;
74         struct nlm_host *host;
75         struct nsm_handle *nsm = NULL;
76         int             hash;
77
78         dprintk("lockd: nlm_lookup_host(%u.%u.%u.%u, p=%d, v=%d, my role=%s, name=%.*s)\n",
79                         NIPQUAD(sin->sin_addr.s_addr), proto, version,
80                         server? "server" : "client",
81                         hostname_len,
82                         hostname? hostname : "<none>");
83
84
85         hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
86
87         /* Lock hash table */
88         mutex_lock(&nlm_host_mutex);
89
90         if (time_after_eq(jiffies, next_gc))
91                 nlm_gc_hosts();
92
93         /* We may keep several nlm_host objects for a peer, because each
94          * nlm_host is identified by
95          * (address, protocol, version, server/client)
96          * We could probably simplify this a little by putting all those
97          * different NLM rpc_clients into one single nlm_host object.
98          * This would allow us to have one nlm_host per address.
99          */
100         chain = &nlm_hosts[hash];
101         hlist_for_each_entry(host, pos, chain, h_hash) {
102                 if (!nlm_cmp_addr(&host->h_addr, sin))
103                         continue;
104
105                 /* See if we have an NSM handle for this client */
106                 if (!nsm)
107                         nsm = host->h_nsmhandle;
108
109                 if (host->h_proto != proto)
110                         continue;
111                 if (host->h_version != version)
112                         continue;
113                 if (host->h_server != server)
114                         continue;
115
116                 /* Move to head of hash chain. */
117                 hlist_del(&host->h_hash);
118                 hlist_add_head(&host->h_hash, chain);
119
120                 nlm_get_host(host);
121                 goto out;
122         }
123         if (nsm)
124                 atomic_inc(&nsm->sm_count);
125
126         host = NULL;
127
128         /* Sadly, the host isn't in our hash table yet. See if
129          * we have an NSM handle for it. If not, create one.
130          */
131         if (!nsm && !(nsm = nsm_find(sin, hostname, hostname_len)))
132                 goto out;
133
134         host = kzalloc(sizeof(*host), GFP_KERNEL);
135         if (!host) {
136                 nsm_release(nsm);
137                 goto out;
138         }
139         host->h_name       = nsm->sm_name;
140         host->h_addr       = *sin;
141         host->h_addr.sin_port = 0;      /* ouch! */
142         host->h_version    = version;
143         host->h_proto      = proto;
144         host->h_rpcclnt    = NULL;
145         mutex_init(&host->h_mutex);
146         host->h_nextrebind = jiffies + NLM_HOST_REBIND;
147         host->h_expires    = jiffies + NLM_HOST_EXPIRE;
148         atomic_set(&host->h_count, 1);
149         init_waitqueue_head(&host->h_gracewait);
150         init_rwsem(&host->h_rwsem);
151         host->h_state      = 0;                 /* pseudo NSM state */
152         host->h_nsmstate   = 0;                 /* real NSM state */
153         host->h_nsmhandle  = nsm;
154         host->h_server     = server;
155         hlist_add_head(&host->h_hash, chain);
156         INIT_LIST_HEAD(&host->h_lockowners);
157         spin_lock_init(&host->h_lock);
158         INIT_LIST_HEAD(&host->h_granted);
159         INIT_LIST_HEAD(&host->h_reclaim);
160
161         if (++nrhosts > NLM_HOST_MAX)
162                 next_gc = 0;
163
164 out:
165         mutex_unlock(&nlm_host_mutex);
166         return host;
167 }
168
169 /*
170  * Destroy a host
171  */
172 static void
173 nlm_destroy_host(struct nlm_host *host)
174 {
175         struct rpc_clnt *clnt;
176
177         BUG_ON(!list_empty(&host->h_lockowners));
178         BUG_ON(atomic_read(&host->h_count));
179
180         /*
181          * Release NSM handle and unmonitor host.
182          */
183         nsm_unmonitor(host);
184
185         if ((clnt = host->h_rpcclnt) != NULL) {
186                 if (atomic_read(&clnt->cl_users)) {
187                         printk(KERN_WARNING
188                                 "lockd: active RPC handle\n");
189                         clnt->cl_dead = 1;
190                 } else {
191                         rpc_destroy_client(host->h_rpcclnt);
192                 }
193         }
194         kfree(host);
195 }
196
197 /*
198  * Create the NLM RPC client for an NLM peer
199  */
200 struct rpc_clnt *
201 nlm_bind_host(struct nlm_host *host)
202 {
203         struct rpc_clnt *clnt;
204
205         dprintk("lockd: nlm_bind_host(%08x)\n",
206                         (unsigned)ntohl(host->h_addr.sin_addr.s_addr));
207
208         /* Lock host handle */
209         mutex_lock(&host->h_mutex);
210
211         /* If we've already created an RPC client, check whether
212          * RPC rebind is required
213          */
214         if ((clnt = host->h_rpcclnt) != NULL) {
215                 if (time_after_eq(jiffies, host->h_nextrebind)) {
216                         rpc_force_rebind(clnt);
217                         host->h_nextrebind = jiffies + NLM_HOST_REBIND;
218                         dprintk("lockd: next rebind in %ld jiffies\n",
219                                         host->h_nextrebind - jiffies);
220                 }
221         } else {
222                 unsigned long increment = nlmsvc_timeout * HZ;
223                 struct rpc_timeout timeparms = {
224                         .to_initval     = increment,
225                         .to_increment   = increment,
226                         .to_maxval      = increment * 6UL,
227                         .to_retries     = 5U,
228                 };
229                 struct rpc_create_args args = {
230                         .protocol       = host->h_proto,
231                         .address        = (struct sockaddr *)&host->h_addr,
232                         .addrsize       = sizeof(host->h_addr),
233                         .timeout        = &timeparms,
234                         .servername     = host->h_name,
235                         .program        = &nlm_program,
236                         .version        = host->h_version,
237                         .authflavor     = RPC_AUTH_UNIX,
238                         .flags          = (RPC_CLNT_CREATE_HARDRTRY |
239                                            RPC_CLNT_CREATE_AUTOBIND),
240                 };
241
242                 clnt = rpc_create(&args);
243                 if (!IS_ERR(clnt))
244                         host->h_rpcclnt = clnt;
245                 else {
246                         printk("lockd: couldn't create RPC handle for %s\n", host->h_name);
247                         clnt = NULL;
248                 }
249         }
250
251         mutex_unlock(&host->h_mutex);
252         return clnt;
253 }
254
255 /*
256  * Force a portmap lookup of the remote lockd port
257  */
258 void
259 nlm_rebind_host(struct nlm_host *host)
260 {
261         dprintk("lockd: rebind host %s\n", host->h_name);
262         if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
263                 rpc_force_rebind(host->h_rpcclnt);
264                 host->h_nextrebind = jiffies + NLM_HOST_REBIND;
265         }
266 }
267
268 /*
269  * Increment NLM host count
270  */
271 struct nlm_host * nlm_get_host(struct nlm_host *host)
272 {
273         if (host) {
274                 dprintk("lockd: get host %s\n", host->h_name);
275                 atomic_inc(&host->h_count);
276                 host->h_expires = jiffies + NLM_HOST_EXPIRE;
277         }
278         return host;
279 }
280
281 /*
282  * Release NLM host after use
283  */
284 void nlm_release_host(struct nlm_host *host)
285 {
286         if (host != NULL) {
287                 dprintk("lockd: release host %s\n", host->h_name);
288                 BUG_ON(atomic_read(&host->h_count) < 0);
289                 if (atomic_dec_and_test(&host->h_count)) {
290                         BUG_ON(!list_empty(&host->h_lockowners));
291                         BUG_ON(!list_empty(&host->h_granted));
292                         BUG_ON(!list_empty(&host->h_reclaim));
293                 }
294         }
295 }
296
297 /*
298  * We were notified that the host indicated by address &sin
299  * has rebooted.
300  * Release all resources held by that peer.
301  */
302 void nlm_host_rebooted(const struct sockaddr_in *sin,
303                                 const char *hostname, int hostname_len,
304                                 u32 new_state)
305 {
306         struct hlist_head *chain;
307         struct hlist_node *pos;
308         struct nsm_handle *nsm;
309         struct nlm_host *host;
310
311         dprintk("lockd: nlm_host_rebooted(%s, %u.%u.%u.%u)\n",
312                         hostname, NIPQUAD(sin->sin_addr));
313
314         /* Find the NSM handle for this peer */
315         if (!(nsm = __nsm_find(sin, hostname, hostname_len, 0)))
316                 return;
317
318         /* When reclaiming locks on this peer, make sure that
319          * we set up a new notification */
320         nsm->sm_monitored = 0;
321
322         /* Mark all hosts tied to this NSM state as having rebooted.
323          * We run the loop repeatedly, because we drop the host table
324          * lock for this.
325          * To avoid processing a host several times, we match the nsmstate.
326          */
327 again:  mutex_lock(&nlm_host_mutex);
328         for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
329                 hlist_for_each_entry(host, pos, chain, h_hash) {
330                         if (host->h_nsmhandle == nsm
331                          && host->h_nsmstate != new_state) {
332                                 host->h_nsmstate = new_state;
333                                 host->h_state++;
334
335                                 nlm_get_host(host);
336                                 mutex_unlock(&nlm_host_mutex);
337
338                                 if (host->h_server) {
339                                         /* We're server for this guy, just ditch
340                                          * all the locks he held. */
341                                         nlmsvc_free_host_resources(host);
342                                 } else {
343                                         /* He's the server, initiate lock recovery. */
344                                         nlmclnt_recovery(host);
345                                 }
346
347                                 nlm_release_host(host);
348                                 goto again;
349                         }
350                 }
351         }
352
353         mutex_unlock(&nlm_host_mutex);
354 }
355
356 /*
357  * Shut down the hosts module.
358  * Note that this routine is called only at server shutdown time.
359  */
360 void
361 nlm_shutdown_hosts(void)
362 {
363         struct hlist_head *chain;
364         struct hlist_node *pos;
365         struct nlm_host *host;
366
367         dprintk("lockd: shutting down host module\n");
368         mutex_lock(&nlm_host_mutex);
369
370         /* First, make all hosts eligible for gc */
371         dprintk("lockd: nuking all hosts...\n");
372         for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
373                 hlist_for_each_entry(host, pos, chain, h_hash)
374                         host->h_expires = jiffies - 1;
375         }
376
377         /* Then, perform a garbage collection pass */
378         nlm_gc_hosts();
379         mutex_unlock(&nlm_host_mutex);
380
381         /* complain if any hosts are left */
382         if (nrhosts) {
383                 printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
384                 dprintk("lockd: %d hosts left:\n", nrhosts);
385                 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
386                         hlist_for_each_entry(host, pos, chain, h_hash) {
387                                 dprintk("       %s (cnt %d use %d exp %ld)\n",
388                                         host->h_name, atomic_read(&host->h_count),
389                                         host->h_inuse, host->h_expires);
390                         }
391                 }
392         }
393 }
394
395 /*
396  * Garbage collect any unused NLM hosts.
397  * This GC combines reference counting for async operations with
398  * mark & sweep for resources held by remote clients.
399  */
400 static void
401 nlm_gc_hosts(void)
402 {
403         struct hlist_head *chain;
404         struct hlist_node *pos, *next;
405         struct nlm_host *host;
406
407         dprintk("lockd: host garbage collection\n");
408         for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
409                 hlist_for_each_entry(host, pos, chain, h_hash)
410                         host->h_inuse = 0;
411         }
412
413         /* Mark all hosts that hold locks, blocks or shares */
414         nlmsvc_mark_resources();
415
416         for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
417                 hlist_for_each_entry_safe(host, pos, next, chain, h_hash) {
418                         if (atomic_read(&host->h_count) || host->h_inuse
419                          || time_before(jiffies, host->h_expires)) {
420                                 dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
421                                         host->h_name, atomic_read(&host->h_count),
422                                         host->h_inuse, host->h_expires);
423                                 continue;
424                         }
425                         dprintk("lockd: delete host %s\n", host->h_name);
426                         hlist_del_init(&host->h_hash);
427
428                         nlm_destroy_host(host);
429                         nrhosts--;
430                 }
431         }
432
433         next_gc = jiffies + NLM_HOST_COLLECT;
434 }
435
436
437 /*
438  * Manage NSM handles
439  */
440 static LIST_HEAD(nsm_handles);
441 static DEFINE_MUTEX(nsm_mutex);
442
443 static struct nsm_handle *
444 __nsm_find(const struct sockaddr_in *sin,
445                 const char *hostname, int hostname_len,
446                 int create)
447 {
448         struct nsm_handle *nsm = NULL;
449         struct list_head *pos;
450
451         if (!sin)
452                 return NULL;
453
454         if (hostname && memchr(hostname, '/', hostname_len) != NULL) {
455                 if (printk_ratelimit()) {
456                         printk(KERN_WARNING "Invalid hostname \"%.*s\" "
457                                             "in NFS lock request\n",
458                                 hostname_len, hostname);
459                 }
460                 return NULL;
461         }
462
463         mutex_lock(&nsm_mutex);
464         list_for_each(pos, &nsm_handles) {
465                 nsm = list_entry(pos, struct nsm_handle, sm_link);
466
467                 if (hostname && nsm_use_hostnames) {
468                         if (strlen(nsm->sm_name) != hostname_len
469                          || memcmp(nsm->sm_name, hostname, hostname_len))
470                                 continue;
471                 } else if (!nlm_cmp_addr(&nsm->sm_addr, sin))
472                         continue;
473                 atomic_inc(&nsm->sm_count);
474                 goto out;
475         }
476
477         if (!create) {
478                 nsm = NULL;
479                 goto out;
480         }
481
482         nsm = kzalloc(sizeof(*nsm) + hostname_len + 1, GFP_KERNEL);
483         if (nsm != NULL) {
484                 nsm->sm_addr = *sin;
485                 nsm->sm_name = (char *) (nsm + 1);
486                 memcpy(nsm->sm_name, hostname, hostname_len);
487                 nsm->sm_name[hostname_len] = '\0';
488                 atomic_set(&nsm->sm_count, 1);
489
490                 list_add(&nsm->sm_link, &nsm_handles);
491         }
492
493 out:
494         mutex_unlock(&nsm_mutex);
495         return nsm;
496 }
497
498 struct nsm_handle *
499 nsm_find(const struct sockaddr_in *sin, const char *hostname, int hostname_len)
500 {
501         return __nsm_find(sin, hostname, hostname_len, 1);
502 }
503
504 /*
505  * Release an NSM handle
506  */
507 void
508 nsm_release(struct nsm_handle *nsm)
509 {
510         if (!nsm)
511                 return;
512         if (atomic_dec_and_test(&nsm->sm_count)) {
513                 mutex_lock(&nsm_mutex);
514                 if (atomic_read(&nsm->sm_count) == 0) {
515                         list_del(&nsm->sm_link);
516                         kfree(nsm);
517                 }
518                 mutex_unlock(&nsm_mutex);
519         }
520 }