2 * ip_vs_app.c: Application module support for IPVS
4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * Most code here is taken from ip_masq_app.c in kernel 2.2. The difference
12 * is that ip_vs_app module handles the reverse direction (incoming requests
13 * and outgoing responses).
15 * IP_MASQ_APP application masquerading module
17 * Author: Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar>
21 #define KMSG_COMPONENT "IPVS"
22 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/skbuff.h>
29 #include <linux/netfilter.h>
30 #include <linux/slab.h>
31 #include <net/net_namespace.h>
32 #include <net/protocol.h>
34 #include <asm/system.h>
35 #include <linux/stat.h>
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
38 #include <linux/mutex.h>
40 #include <net/ip_vs.h>
42 EXPORT_SYMBOL(register_ip_vs_app);
43 EXPORT_SYMBOL(unregister_ip_vs_app);
44 EXPORT_SYMBOL(register_ip_vs_app_inc);
46 /* ipvs application list head */
47 static LIST_HEAD(ip_vs_app_list);
48 static DEFINE_MUTEX(__ip_vs_app_mutex);
52 * Get an ip_vs_app object
54 static inline int ip_vs_app_get(struct ip_vs_app *app)
56 return try_module_get(app->module);
60 static inline void ip_vs_app_put(struct ip_vs_app *app)
62 module_put(app->module);
67 * Allocate/initialize app incarnation and register it in proto apps.
70 ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
72 struct ip_vs_protocol *pp;
73 struct ip_vs_app *inc;
76 if (!(pp = ip_vs_proto_get(proto)))
77 return -EPROTONOSUPPORT;
79 if (!pp->unregister_app)
82 inc = kmemdup(app, sizeof(*inc), GFP_KERNEL);
85 INIT_LIST_HEAD(&inc->p_list);
86 INIT_LIST_HEAD(&inc->incs_list);
88 inc->port = htons(port);
89 atomic_set(&inc->usecnt, 0);
93 ip_vs_create_timeout_table(app->timeouts,
95 if (!inc->timeout_table) {
101 ret = pp->register_app(inc);
105 list_add(&inc->a_list, &app->incs_list);
106 IP_VS_DBG(9, "%s App %s:%u registered\n",
107 pp->name, inc->name, ntohs(inc->port));
112 kfree(inc->timeout_table);
119 * Release app incarnation
122 ip_vs_app_inc_release(struct ip_vs_app *inc)
124 struct ip_vs_protocol *pp;
126 if (!(pp = ip_vs_proto_get(inc->protocol)))
129 if (pp->unregister_app)
130 pp->unregister_app(inc);
132 IP_VS_DBG(9, "%s App %s:%u unregistered\n",
133 pp->name, inc->name, ntohs(inc->port));
135 list_del(&inc->a_list);
137 kfree(inc->timeout_table);
143 * Get reference to app inc (only called from softirq)
146 int ip_vs_app_inc_get(struct ip_vs_app *inc)
150 atomic_inc(&inc->usecnt);
151 if (unlikely((result = ip_vs_app_get(inc->app)) != 1))
152 atomic_dec(&inc->usecnt);
158 * Put the app inc (only called from timer or net softirq)
160 void ip_vs_app_inc_put(struct ip_vs_app *inc)
162 ip_vs_app_put(inc->app);
163 atomic_dec(&inc->usecnt);
168 * Register an application incarnation in protocol applications
171 register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
175 mutex_lock(&__ip_vs_app_mutex);
177 result = ip_vs_app_inc_new(app, proto, port);
179 mutex_unlock(&__ip_vs_app_mutex);
186 * ip_vs_app registration routine
188 int register_ip_vs_app(struct ip_vs_app *app)
190 /* increase the module use count */
191 ip_vs_use_count_inc();
193 mutex_lock(&__ip_vs_app_mutex);
195 list_add(&app->a_list, &ip_vs_app_list);
197 mutex_unlock(&__ip_vs_app_mutex);
204 * ip_vs_app unregistration routine
205 * We are sure there are no app incarnations attached to services
207 void unregister_ip_vs_app(struct ip_vs_app *app)
209 struct ip_vs_app *inc, *nxt;
211 mutex_lock(&__ip_vs_app_mutex);
213 list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
214 ip_vs_app_inc_release(inc);
217 list_del(&app->a_list);
219 mutex_unlock(&__ip_vs_app_mutex);
221 /* decrease the module use count */
222 ip_vs_use_count_dec();
227 * Bind ip_vs_conn to its ip_vs_app (called by cp constructor)
229 int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp)
231 return pp->app_conn_bind(cp);
236 * Unbind cp from application incarnation (called by cp destructor)
238 void ip_vs_unbind_app(struct ip_vs_conn *cp)
240 struct ip_vs_app *inc = cp->app;
245 if (inc->unbind_conn)
246 inc->unbind_conn(inc, cp);
248 inc->done_conn(inc, cp);
249 ip_vs_app_inc_put(inc);
255 * Fixes th->seq based on ip_vs_seq info.
257 static inline void vs_fix_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
259 __u32 seq = ntohl(th->seq);
262 * Adjust seq with delta-offset for all packets after
263 * the most recent resized pkt seq and with previous_delta offset
264 * for all packets before most recent resized pkt seq.
266 if (vseq->delta || vseq->previous_delta) {
267 if(after(seq, vseq->init_seq)) {
268 th->seq = htonl(seq + vseq->delta);
269 IP_VS_DBG(9, "%s(): added delta (%d) to seq\n",
270 __func__, vseq->delta);
272 th->seq = htonl(seq + vseq->previous_delta);
273 IP_VS_DBG(9, "%s(): added previous_delta (%d) to seq\n",
274 __func__, vseq->previous_delta);
281 * Fixes th->ack_seq based on ip_vs_seq info.
284 vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
286 __u32 ack_seq = ntohl(th->ack_seq);
289 * Adjust ack_seq with delta-offset for
290 * the packets AFTER most recent resized pkt has caused a shift
291 * for packets before most recent resized pkt, use previous_delta
293 if (vseq->delta || vseq->previous_delta) {
294 /* since ack_seq is the number of octet that is expected
295 to receive next, so compare it with init_seq+delta */
296 if(after(ack_seq, vseq->init_seq+vseq->delta)) {
297 th->ack_seq = htonl(ack_seq - vseq->delta);
298 IP_VS_DBG(9, "%s(): subtracted delta "
299 "(%d) from ack_seq\n", __func__, vseq->delta);
302 th->ack_seq = htonl(ack_seq - vseq->previous_delta);
303 IP_VS_DBG(9, "%s(): subtracted "
304 "previous_delta (%d) from ack_seq\n",
305 __func__, vseq->previous_delta);
312 * Updates ip_vs_seq if pkt has been resized
313 * Assumes already checked proto==IPPROTO_TCP and diff!=0.
315 static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq,
316 unsigned flag, __u32 seq, int diff)
318 /* spinlock is to keep updating cp->flags atomic */
319 spin_lock(&cp->lock);
320 if (!(cp->flags & flag) || after(seq, vseq->init_seq)) {
321 vseq->previous_delta = vseq->delta;
323 vseq->init_seq = seq;
326 spin_unlock(&cp->lock);
329 static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb,
330 struct ip_vs_app *app)
333 const unsigned int tcp_offset = ip_hdrlen(skb);
337 if (!skb_make_writable(skb, tcp_offset + sizeof(*th)))
340 th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset);
343 * Remember seq number in case this pkt gets resized
345 seq = ntohl(th->seq);
348 * Fix seq stuff if flagged as so.
350 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
351 vs_fix_seq(&cp->out_seq, th);
352 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
353 vs_fix_ack_seq(&cp->in_seq, th);
356 * Call private output hook function
358 if (app->pkt_out == NULL)
361 if (!app->pkt_out(app, cp, skb, &diff))
365 * Update ip_vs seq stuff if len has changed.
368 vs_seq_update(cp, &cp->out_seq,
369 IP_VS_CONN_F_OUT_SEQ, seq, diff);
375 * Output pkt hook. Will call bound ip_vs_app specific function
376 * called by ipvs packet handler, assumes previously checked cp!=NULL
377 * returns false if it can't handle packet (oom)
379 int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb)
381 struct ip_vs_app *app;
384 * check if application module is bound to
387 if ((app = cp->app) == NULL)
390 /* TCP is complicated */
391 if (cp->protocol == IPPROTO_TCP)
392 return app_tcp_pkt_out(cp, skb, app);
395 * Call private output hook function
397 if (app->pkt_out == NULL)
400 return app->pkt_out(app, cp, skb, NULL);
404 static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb,
405 struct ip_vs_app *app)
408 const unsigned int tcp_offset = ip_hdrlen(skb);
412 if (!skb_make_writable(skb, tcp_offset + sizeof(*th)))
415 th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset);
418 * Remember seq number in case this pkt gets resized
420 seq = ntohl(th->seq);
423 * Fix seq stuff if flagged as so.
425 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
426 vs_fix_seq(&cp->in_seq, th);
427 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
428 vs_fix_ack_seq(&cp->out_seq, th);
431 * Call private input hook function
433 if (app->pkt_in == NULL)
436 if (!app->pkt_in(app, cp, skb, &diff))
440 * Update ip_vs seq stuff if len has changed.
443 vs_seq_update(cp, &cp->in_seq,
444 IP_VS_CONN_F_IN_SEQ, seq, diff);
450 * Input pkt hook. Will call bound ip_vs_app specific function
451 * called by ipvs packet handler, assumes previously checked cp!=NULL.
452 * returns false if can't handle packet (oom).
454 int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb)
456 struct ip_vs_app *app;
459 * check if application module is bound to
462 if ((app = cp->app) == NULL)
465 /* TCP is complicated */
466 if (cp->protocol == IPPROTO_TCP)
467 return app_tcp_pkt_in(cp, skb, app);
470 * Call private input hook function
472 if (app->pkt_in == NULL)
475 return app->pkt_in(app, cp, skb, NULL);
479 #ifdef CONFIG_PROC_FS
481 * /proc/net/ip_vs_app entry function
484 static struct ip_vs_app *ip_vs_app_idx(loff_t pos)
486 struct ip_vs_app *app, *inc;
488 list_for_each_entry(app, &ip_vs_app_list, a_list) {
489 list_for_each_entry(inc, &app->incs_list, a_list) {
498 static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
500 mutex_lock(&__ip_vs_app_mutex);
502 return *pos ? ip_vs_app_idx(*pos - 1) : SEQ_START_TOKEN;
505 static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
507 struct ip_vs_app *inc, *app;
511 if (v == SEQ_START_TOKEN)
512 return ip_vs_app_idx(0);
517 if ((e = inc->a_list.next) != &app->incs_list)
518 return list_entry(e, struct ip_vs_app, a_list);
520 /* go on to next application */
521 for (e = app->a_list.next; e != &ip_vs_app_list; e = e->next) {
522 app = list_entry(e, struct ip_vs_app, a_list);
523 list_for_each_entry(inc, &app->incs_list, a_list) {
530 static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
532 mutex_unlock(&__ip_vs_app_mutex);
535 static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
537 if (v == SEQ_START_TOKEN)
538 seq_puts(seq, "prot port usecnt name\n");
540 const struct ip_vs_app *inc = v;
542 seq_printf(seq, "%-3s %-7u %-6d %-17s\n",
543 ip_vs_proto_name(inc->protocol),
545 atomic_read(&inc->usecnt),
551 static const struct seq_operations ip_vs_app_seq_ops = {
552 .start = ip_vs_app_seq_start,
553 .next = ip_vs_app_seq_next,
554 .stop = ip_vs_app_seq_stop,
555 .show = ip_vs_app_seq_show,
558 static int ip_vs_app_open(struct inode *inode, struct file *file)
560 return seq_open(file, &ip_vs_app_seq_ops);
563 static const struct file_operations ip_vs_app_fops = {
564 .owner = THIS_MODULE,
565 .open = ip_vs_app_open,
568 .release = seq_release,
572 static int __net_init __ip_vs_app_init(struct net *net)
574 if (!net_eq(net, &init_net)) /* netns not enabled yet */
577 proc_net_fops_create(net, "ip_vs_app", 0, &ip_vs_app_fops);
581 static void __net_exit __ip_vs_app_cleanup(struct net *net)
583 proc_net_remove(net, "ip_vs_app");
586 static struct pernet_operations ip_vs_app_ops = {
587 .init = __ip_vs_app_init,
588 .exit = __ip_vs_app_cleanup,
591 int __init ip_vs_app_init(void)
595 rv = register_pernet_subsys(&ip_vs_app_ops);
600 void ip_vs_app_cleanup(void)
602 unregister_pernet_subsys(&ip_vs_app_ops);