2 * IPVS An implementation of the IP virtual server support for the
3 * LINUX operating system. IPVS is now implemented as a module
4 * over the Netfilter framework. IPVS can be used to build a
5 * high-performance and highly available server based on a
8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
9 * Peter Kese <peter.kese@ijs.si>
10 * Julian Anastasov <ja@ssi.bg>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 * The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
18 * with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
22 * Paul `Rusty' Russell properly handle non-linear skbs
23 * Harald Welte don't use nfcache
27 #define KMSG_COMPONENT "IPVS"
28 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
30 #include <linux/module.h>
31 #include <linux/kernel.h>
33 #include <linux/tcp.h>
34 #include <linux/sctp.h>
35 #include <linux/icmp.h>
36 #include <linux/slab.h>
41 #include <net/icmp.h> /* for icmp_send */
42 #include <net/route.h>
43 #include <net/ip6_checksum.h>
45 #include <linux/netfilter.h>
46 #include <linux/netfilter_ipv4.h>
48 #ifdef CONFIG_IP_VS_IPV6
50 #include <linux/netfilter_ipv6.h>
53 #include <net/ip_vs.h>
56 EXPORT_SYMBOL(register_ip_vs_scheduler);
57 EXPORT_SYMBOL(unregister_ip_vs_scheduler);
58 EXPORT_SYMBOL(ip_vs_proto_name);
59 EXPORT_SYMBOL(ip_vs_conn_new);
60 EXPORT_SYMBOL(ip_vs_conn_in_get);
61 EXPORT_SYMBOL(ip_vs_conn_out_get);
62 #ifdef CONFIG_IP_VS_PROTO_TCP
63 EXPORT_SYMBOL(ip_vs_tcp_conn_listen);
65 EXPORT_SYMBOL(ip_vs_conn_put);
66 #ifdef CONFIG_IP_VS_DEBUG
67 EXPORT_SYMBOL(ip_vs_get_debug_level);
71 /* ID used in ICMP lookups */
72 #define icmp_id(icmph) (((icmph)->un).echo.id)
73 #define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier)
75 const char *ip_vs_proto_name(unsigned proto)
90 #ifdef CONFIG_IP_VS_IPV6
95 sprintf(buf, "IP_%d", proto);
100 void ip_vs_init_hash_table(struct list_head *table, int rows)
103 INIT_LIST_HEAD(&table[rows]);
107 ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
109 struct ip_vs_dest *dest = cp->dest;
110 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
111 spin_lock(&dest->stats.lock);
112 dest->stats.ustats.inpkts++;
113 dest->stats.ustats.inbytes += skb->len;
114 spin_unlock(&dest->stats.lock);
116 spin_lock(&dest->svc->stats.lock);
117 dest->svc->stats.ustats.inpkts++;
118 dest->svc->stats.ustats.inbytes += skb->len;
119 spin_unlock(&dest->svc->stats.lock);
121 spin_lock(&ip_vs_stats.lock);
122 ip_vs_stats.ustats.inpkts++;
123 ip_vs_stats.ustats.inbytes += skb->len;
124 spin_unlock(&ip_vs_stats.lock);
130 ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
132 struct ip_vs_dest *dest = cp->dest;
133 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
134 spin_lock(&dest->stats.lock);
135 dest->stats.ustats.outpkts++;
136 dest->stats.ustats.outbytes += skb->len;
137 spin_unlock(&dest->stats.lock);
139 spin_lock(&dest->svc->stats.lock);
140 dest->svc->stats.ustats.outpkts++;
141 dest->svc->stats.ustats.outbytes += skb->len;
142 spin_unlock(&dest->svc->stats.lock);
144 spin_lock(&ip_vs_stats.lock);
145 ip_vs_stats.ustats.outpkts++;
146 ip_vs_stats.ustats.outbytes += skb->len;
147 spin_unlock(&ip_vs_stats.lock);
153 ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
155 spin_lock(&cp->dest->stats.lock);
156 cp->dest->stats.ustats.conns++;
157 spin_unlock(&cp->dest->stats.lock);
159 spin_lock(&svc->stats.lock);
160 svc->stats.ustats.conns++;
161 spin_unlock(&svc->stats.lock);
163 spin_lock(&ip_vs_stats.lock);
164 ip_vs_stats.ustats.conns++;
165 spin_unlock(&ip_vs_stats.lock);
170 ip_vs_set_state(struct ip_vs_conn *cp, int direction,
171 const struct sk_buff *skb,
172 struct ip_vs_protocol *pp)
174 if (unlikely(!pp->state_transition))
176 return pp->state_transition(cp, direction, skb, pp);
180 ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
181 struct sk_buff *skb, int protocol,
182 const union nf_inet_addr *caddr, __be16 cport,
183 const union nf_inet_addr *vaddr, __be16 vport,
184 struct ip_vs_conn_param *p)
186 ip_vs_conn_fill_param(svc->af, protocol, caddr, cport, vaddr, vport, p);
188 if (p->pe && p->pe->fill_param)
189 p->pe->fill_param(p, skb);
193 * IPVS persistent scheduling function
194 * It creates a connection entry according to its template if exists,
195 * or selects a server and creates a connection entry plus a template.
196 * Locking: we are svc user (svc->refcnt), so we hold all dests too
197 * Protocols supported: TCP, UDP
199 static struct ip_vs_conn *
200 ip_vs_sched_persist(struct ip_vs_service *svc,
204 struct ip_vs_conn *cp = NULL;
205 struct ip_vs_iphdr iph;
206 struct ip_vs_dest *dest;
207 struct ip_vs_conn *ct;
208 __be16 dport = 0; /* destination port to forward */
210 struct ip_vs_conn_param param;
211 union nf_inet_addr snet; /* source network of the client,
214 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
216 /* Mask saddr with the netmask to adjust template granularity */
217 #ifdef CONFIG_IP_VS_IPV6
218 if (svc->af == AF_INET6)
219 ipv6_addr_prefix(&snet.in6, &iph.saddr.in6, svc->netmask);
222 snet.ip = iph.saddr.ip & svc->netmask;
224 IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
226 IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(ports[0]),
227 IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(ports[1]),
228 IP_VS_DBG_ADDR(svc->af, &snet));
231 * As far as we know, FTP is a very complicated network protocol, and
232 * it uses control connection and data connections. For active FTP,
233 * FTP server initialize data connection to the client, its source port
234 * is often 20. For passive FTP, FTP server tells the clients the port
235 * that it passively listens to, and the client issues the data
236 * connection. In the tunneling or direct routing mode, the load
237 * balancer is on the client-to-server half of connection, the port
238 * number is unknown to the load balancer. So, a conn template like
239 * <caddr, 0, vaddr, 0, daddr, 0> is created for persistent FTP
240 * service, and a template like <caddr, 0, vaddr, vport, daddr, dport>
241 * is created for other persistent services.
244 int protocol = iph.protocol;
245 const union nf_inet_addr *vaddr = &iph.daddr;
246 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
249 if (ports[1] == svc->port) {
251 * <protocol, caddr, 0, vaddr, vport, daddr, dport>
253 * <protocol, caddr, 0, vaddr, 0, daddr, 0>
255 if (svc->port != FTPPORT)
258 /* Note: persistent fwmark-based services and
259 * persistent port zero service are handled here.
261 * <IPPROTO_IP,caddr,0,fwmark,0,daddr,0>
262 * port zero template:
263 * <protocol,caddr,0,vaddr,0,daddr,0>
266 protocol = IPPROTO_IP;
270 ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
271 vaddr, vport, ¶m);
274 /* Check if a template already exists */
275 ct = ip_vs_ct_in_get(¶m);
276 if (!ct || !ip_vs_check_template(ct)) {
277 /* No template found or the dest of the connection
278 * template is not available.
280 dest = svc->scheduler->schedule(svc, skb);
282 IP_VS_DBG(1, "p-schedule: no dest found.\n");
283 kfree(param.pe_data);
287 if (ports[1] == svc->port && svc->port != FTPPORT)
291 * This adds param.pe_data to the template,
292 * and thus param.pe_data will be destroyed
293 * when the template expires */
294 ct = ip_vs_conn_new(¶m, &dest->addr, dport,
295 IP_VS_CONN_F_TEMPLATE, dest);
297 kfree(param.pe_data);
301 ct->timeout = svc->timeout;
303 /* set destination with the found template */
305 kfree(param.pe_data);
309 if (dport == svc->port && dest->port)
312 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
313 && iph.protocol == IPPROTO_UDP)?
314 IP_VS_CONN_F_ONE_PACKET : 0;
317 * Create a new connection according to the template
319 ip_vs_conn_fill_param(svc->af, iph.protocol, &iph.saddr, ports[0],
320 &iph.daddr, ports[1], ¶m);
321 cp = ip_vs_conn_new(¶m, &dest->addr, dport, flags, dest);
330 ip_vs_control_add(cp, ct);
333 ip_vs_conn_stats(cp, svc);
339 * IPVS main scheduling function
340 * It selects a server according to the virtual service, and
341 * creates a connection entry.
342 * Protocols supported: TCP, UDP
345 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb)
347 struct ip_vs_conn *cp = NULL;
348 struct ip_vs_iphdr iph;
349 struct ip_vs_dest *dest;
350 __be16 _ports[2], *pptr;
353 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
354 pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
361 if (svc->flags & IP_VS_SVC_F_PERSISTENT)
362 return ip_vs_sched_persist(svc, skb, pptr);
365 * Non-persistent service
367 if (!svc->fwmark && pptr[1] != svc->port) {
369 pr_err("Schedule: port zero only supported "
370 "in persistent services, "
371 "check your ipvs configuration\n");
375 dest = svc->scheduler->schedule(svc, skb);
377 IP_VS_DBG(1, "Schedule: no dest found.\n");
381 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
382 && iph.protocol == IPPROTO_UDP)?
383 IP_VS_CONN_F_ONE_PACKET : 0;
386 * Create a connection entry.
389 struct ip_vs_conn_param p;
390 ip_vs_conn_fill_param(svc->af, iph.protocol, &iph.saddr,
391 pptr[0], &iph.daddr, pptr[1], &p);
392 cp = ip_vs_conn_new(&p, &dest->addr,
393 dest->port ? dest->port : pptr[1],
399 IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
400 "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
402 IP_VS_DBG_ADDR(svc->af, &cp->caddr), ntohs(cp->cport),
403 IP_VS_DBG_ADDR(svc->af, &cp->vaddr), ntohs(cp->vport),
404 IP_VS_DBG_ADDR(svc->af, &cp->daddr), ntohs(cp->dport),
405 cp->flags, atomic_read(&cp->refcnt));
407 ip_vs_conn_stats(cp, svc);
413 * Pass or drop the packet.
414 * Called by ip_vs_in, when the virtual service is available but
415 * no destination is available for a new connection.
417 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
418 struct ip_vs_protocol *pp)
420 __be16 _ports[2], *pptr;
421 struct ip_vs_iphdr iph;
423 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
425 pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
427 ip_vs_service_put(svc);
431 #ifdef CONFIG_IP_VS_IPV6
432 if (svc->af == AF_INET6)
433 unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST;
436 unicast = (inet_addr_type(&init_net, iph.daddr.ip) == RTN_UNICAST);
438 /* if it is fwmark-based service, the cache_bypass sysctl is up
439 and the destination is a non-local unicast, then create
440 a cache_bypass connection entry */
441 if (sysctl_ip_vs_cache_bypass && svc->fwmark && unicast) {
443 struct ip_vs_conn *cp;
444 unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
445 iph.protocol == IPPROTO_UDP)?
446 IP_VS_CONN_F_ONE_PACKET : 0;
447 union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } };
449 ip_vs_service_put(svc);
451 /* create a new connection entry */
452 IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
454 struct ip_vs_conn_param p;
455 ip_vs_conn_fill_param(svc->af, iph.protocol,
457 &iph.daddr, pptr[1], &p);
458 cp = ip_vs_conn_new(&p, &daddr, 0,
459 IP_VS_CONN_F_BYPASS | flags,
466 ip_vs_in_stats(cp, skb);
469 cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp);
471 /* transmit the first SYN packet */
472 ret = cp->packet_xmit(skb, cp, pp);
473 /* do not touch skb anymore */
475 atomic_inc(&cp->in_pkts);
481 * When the virtual ftp service is presented, packets destined
482 * for other services on the VIP may get here (except services
483 * listed in the ipvs table), pass the packets, because it is
484 * not ipvs job to decide to drop the packets.
486 if ((svc->port == FTPPORT) && (pptr[1] != FTPPORT)) {
487 ip_vs_service_put(svc);
491 ip_vs_service_put(svc);
494 * Notify the client that the destination is unreachable, and
495 * release the socket buffer.
496 * Since it is in IP layer, the TCP socket is not actually
497 * created, the TCP RST packet cannot be sent, instead that
498 * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ
500 #ifdef CONFIG_IP_VS_IPV6
501 if (svc->af == AF_INET6)
502 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
505 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
510 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
512 return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
515 static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
517 int err = ip_defrag(skb, user);
520 ip_send_check(ip_hdr(skb));
525 #ifdef CONFIG_IP_VS_IPV6
526 static inline int ip_vs_gather_frags_v6(struct sk_buff *skb, u_int32_t user)
528 /* TODO IPv6: Find out what to do here for IPv6 */
534 * Packet has been made sufficiently writable in caller
535 * - inout: 1=in->out, 0=out->in
537 void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
538 struct ip_vs_conn *cp, int inout)
540 struct iphdr *iph = ip_hdr(skb);
541 unsigned int icmp_offset = iph->ihl*4;
542 struct icmphdr *icmph = (struct icmphdr *)(skb_network_header(skb) +
544 struct iphdr *ciph = (struct iphdr *)(icmph + 1);
547 iph->saddr = cp->vaddr.ip;
549 ciph->daddr = cp->vaddr.ip;
552 iph->daddr = cp->daddr.ip;
554 ciph->saddr = cp->daddr.ip;
558 /* the TCP/UDP/SCTP port */
559 if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol ||
560 IPPROTO_SCTP == ciph->protocol) {
561 __be16 *ports = (void *)ciph + ciph->ihl*4;
564 ports[1] = cp->vport;
566 ports[0] = cp->dport;
569 /* And finally the ICMP checksum */
571 icmph->checksum = ip_vs_checksum_complete(skb, icmp_offset);
572 skb->ip_summed = CHECKSUM_UNNECESSARY;
575 IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph,
576 "Forwarding altered outgoing ICMP");
578 IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph,
579 "Forwarding altered incoming ICMP");
582 #ifdef CONFIG_IP_VS_IPV6
583 void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
584 struct ip_vs_conn *cp, int inout)
586 struct ipv6hdr *iph = ipv6_hdr(skb);
587 unsigned int icmp_offset = sizeof(struct ipv6hdr);
588 struct icmp6hdr *icmph = (struct icmp6hdr *)(skb_network_header(skb) +
590 struct ipv6hdr *ciph = (struct ipv6hdr *)(icmph + 1);
593 iph->saddr = cp->vaddr.in6;
594 ciph->daddr = cp->vaddr.in6;
596 iph->daddr = cp->daddr.in6;
597 ciph->saddr = cp->daddr.in6;
600 /* the TCP/UDP/SCTP port */
601 if (IPPROTO_TCP == ciph->nexthdr || IPPROTO_UDP == ciph->nexthdr ||
602 IPPROTO_SCTP == ciph->nexthdr) {
603 __be16 *ports = (void *)ciph + sizeof(struct ipv6hdr);
606 ports[1] = cp->vport;
608 ports[0] = cp->dport;
611 /* And finally the ICMP checksum */
612 icmph->icmp6_cksum = ~csum_ipv6_magic(&iph->saddr, &iph->daddr,
613 skb->len - icmp_offset,
615 skb->csum_start = skb_network_header(skb) - skb->head + icmp_offset;
616 skb->csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
617 skb->ip_summed = CHECKSUM_PARTIAL;
620 IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph,
621 "Forwarding altered outgoing ICMPv6");
623 IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph,
624 "Forwarding altered incoming ICMPv6");
628 /* Handle relevant response ICMP messages - forward to the right
629 * destination host. Used for NAT and local client.
631 static int handle_response_icmp(int af, struct sk_buff *skb,
632 union nf_inet_addr *snet,
633 __u8 protocol, struct ip_vs_conn *cp,
634 struct ip_vs_protocol *pp,
635 unsigned int offset, unsigned int ihl)
637 unsigned int verdict = NF_DROP;
639 if (IP_VS_FWD_METHOD(cp) != 0) {
640 pr_err("shouldn't reach here, because the box is on the "
641 "half connection in the tun/dr module.\n");
644 /* Ensure the checksum is correct */
645 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
646 /* Failed checksum! */
647 IP_VS_DBG_BUF(1, "Forward ICMP: failed checksum from %s!\n",
648 IP_VS_DBG_ADDR(af, snet));
652 if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
653 IPPROTO_SCTP == protocol)
654 offset += 2 * sizeof(__u16);
655 if (!skb_make_writable(skb, offset))
658 #ifdef CONFIG_IP_VS_IPV6
660 ip_vs_nat_icmp_v6(skb, pp, cp, 1);
663 ip_vs_nat_icmp(skb, pp, cp, 1);
665 /* do the statistics and put it back */
666 ip_vs_out_stats(cp, skb);
668 skb->ipvs_property = 1;
669 if (!(cp->flags & IP_VS_CONN_F_NFCT))
672 ip_vs_update_conntrack(skb, cp, 0);
676 __ip_vs_conn_put(cp);
682 * Handle ICMP messages in the inside-to-outside direction (outgoing).
683 * Find any that might be relevant, check against existing connections.
684 * Currently handles error types - unreachable, quench, ttl exceeded.
686 static int ip_vs_out_icmp(struct sk_buff *skb, int *related)
689 struct icmphdr _icmph, *ic;
690 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
691 struct ip_vs_iphdr ciph;
692 struct ip_vs_conn *cp;
693 struct ip_vs_protocol *pp;
694 unsigned int offset, ihl;
695 union nf_inet_addr snet;
699 /* reassemble IP fragments */
700 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
701 if (ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT))
706 offset = ihl = iph->ihl * 4;
707 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
711 IP_VS_DBG(12, "Outgoing ICMP (%d,%d) %pI4->%pI4\n",
712 ic->type, ntohs(icmp_id(ic)),
713 &iph->saddr, &iph->daddr);
716 * Work through seeing if this is for us.
717 * These checks are supposed to be in an order that means easy
718 * things are checked first to speed up processing.... however
719 * this means that some packets will manage to get a long way
720 * down this stack and then be rejected, but that's life.
722 if ((ic->type != ICMP_DEST_UNREACH) &&
723 (ic->type != ICMP_SOURCE_QUENCH) &&
724 (ic->type != ICMP_TIME_EXCEEDED)) {
729 /* Now find the contained IP header */
730 offset += sizeof(_icmph);
731 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
733 return NF_ACCEPT; /* The packet looks wrong, ignore */
735 pp = ip_vs_proto_get(cih->protocol);
739 /* Is the embedded protocol header present? */
740 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
744 IP_VS_DBG_PKT(11, pp, skb, offset, "Checking outgoing ICMP for");
746 offset += cih->ihl * 4;
748 ip_vs_fill_iphdr(AF_INET, cih, &ciph);
749 /* The embedded headers contain source and dest in reverse order */
750 cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1);
754 snet.ip = iph->saddr;
755 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
759 #ifdef CONFIG_IP_VS_IPV6
760 static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related)
763 struct icmp6hdr _icmph, *ic;
764 struct ipv6hdr _ciph, *cih; /* The ip header contained
766 struct ip_vs_iphdr ciph;
767 struct ip_vs_conn *cp;
768 struct ip_vs_protocol *pp;
770 union nf_inet_addr snet;
774 /* reassemble IP fragments */
775 if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) {
776 if (ip_vs_gather_frags_v6(skb, IP_DEFRAG_VS_OUT))
781 offset = sizeof(struct ipv6hdr);
782 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
786 IP_VS_DBG(12, "Outgoing ICMPv6 (%d,%d) %pI6->%pI6\n",
787 ic->icmp6_type, ntohs(icmpv6_id(ic)),
788 &iph->saddr, &iph->daddr);
791 * Work through seeing if this is for us.
792 * These checks are supposed to be in an order that means easy
793 * things are checked first to speed up processing.... however
794 * this means that some packets will manage to get a long way
795 * down this stack and then be rejected, but that's life.
797 if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) &&
798 (ic->icmp6_type != ICMPV6_PKT_TOOBIG) &&
799 (ic->icmp6_type != ICMPV6_TIME_EXCEED)) {
804 /* Now find the contained IP header */
805 offset += sizeof(_icmph);
806 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
808 return NF_ACCEPT; /* The packet looks wrong, ignore */
810 pp = ip_vs_proto_get(cih->nexthdr);
814 /* Is the embedded protocol header present? */
815 /* TODO: we don't support fragmentation at the moment anyways */
816 if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag))
819 IP_VS_DBG_PKT(11, pp, skb, offset, "Checking outgoing ICMPv6 for");
821 offset += sizeof(struct ipv6hdr);
823 ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
824 /* The embedded headers contain source and dest in reverse order */
825 cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1);
829 ipv6_addr_copy(&snet.in6, &iph->saddr);
830 return handle_response_icmp(AF_INET6, skb, &snet, cih->nexthdr, cp,
831 pp, offset, sizeof(struct ipv6hdr));
836 * Check if sctp chunc is ABORT chunk
838 static inline int is_sctp_abort(const struct sk_buff *skb, int nh_len)
840 sctp_chunkhdr_t *sch, schunk;
841 sch = skb_header_pointer(skb, nh_len + sizeof(sctp_sctphdr_t),
842 sizeof(schunk), &schunk);
845 if (sch->type == SCTP_CID_ABORT)
850 static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
852 struct tcphdr _tcph, *th;
854 th = skb_header_pointer(skb, nh_len, sizeof(_tcph), &_tcph);
860 /* Handle response packets: rewrite addresses and send away...
861 * Used for NAT and local client.
864 handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
865 struct ip_vs_conn *cp, int ihl)
867 IP_VS_DBG_PKT(11, pp, skb, 0, "Outgoing packet");
869 if (!skb_make_writable(skb, ihl))
872 /* mangle the packet */
873 if (pp->snat_handler && !pp->snat_handler(skb, pp, cp))
876 #ifdef CONFIG_IP_VS_IPV6
878 ipv6_hdr(skb)->saddr = cp->vaddr.in6;
882 ip_hdr(skb)->saddr = cp->vaddr.ip;
883 ip_send_check(ip_hdr(skb));
887 * nf_iterate does not expect change in the skb->dst->dev.
888 * It looks like it is not fatal to enable this code for hooks
889 * where our handlers are at the end of the chain list and
890 * when all next handlers use skb->dst->dev and not outdev.
891 * It will definitely route properly the inout NAT traffic
892 * when multiple paths are used.
895 /* For policy routing, packets originating from this
896 * machine itself may be routed differently to packets
897 * passing through. We want this packet to be routed as
898 * if it came from this machine itself. So re-compute
899 * the routing information.
901 if (sysctl_ip_vs_snat_reroute) {
902 #ifdef CONFIG_IP_VS_IPV6
903 if (af == AF_INET6) {
904 if (ip6_route_me_harder(skb) != 0)
908 if (ip_route_me_harder(skb, RTN_LOCAL) != 0)
912 IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT");
914 ip_vs_out_stats(cp, skb);
915 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp);
916 skb->ipvs_property = 1;
917 if (!(cp->flags & IP_VS_CONN_F_NFCT))
920 ip_vs_update_conntrack(skb, cp, 0);
934 * It is hooked at the NF_INET_FORWARD chain, used only for VS/NAT.
935 * Check if outgoing packet belongs to the established ip_vs_conn.
938 ip_vs_out(unsigned int hooknum, struct sk_buff *skb,
939 const struct net_device *in, const struct net_device *out,
940 int (*okfn)(struct sk_buff *))
942 struct ip_vs_iphdr iph;
943 struct ip_vs_protocol *pp;
944 struct ip_vs_conn *cp;
949 af = (skb->protocol == htons(ETH_P_IP)) ? AF_INET : AF_INET6;
951 if (skb->ipvs_property)
954 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
955 #ifdef CONFIG_IP_VS_IPV6
956 if (af == AF_INET6) {
957 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
958 int related, verdict = ip_vs_out_icmp_v6(skb, &related);
961 if (sysctl_ip_vs_snat_reroute &&
962 NF_ACCEPT == verdict &&
963 ip6_route_me_harder(skb))
967 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
971 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
972 int related, verdict = ip_vs_out_icmp(skb, &related);
975 if (sysctl_ip_vs_snat_reroute &&
976 NF_ACCEPT == verdict &&
977 ip_route_me_harder(skb, RTN_LOCAL))
981 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
984 pp = ip_vs_proto_get(iph.protocol);
988 /* reassemble IP fragments */
989 #ifdef CONFIG_IP_VS_IPV6
990 if (af == AF_INET6) {
991 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
992 int related, verdict = ip_vs_out_icmp_v6(skb, &related);
997 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1001 if (unlikely(ip_hdr(skb)->frag_off & htons(IP_MF|IP_OFFSET) &&
1002 !pp->dont_defrag)) {
1003 if (ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT))
1006 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1010 * Check if the packet belongs to an existing entry
1012 cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0);
1014 if (unlikely(!cp)) {
1015 if (sysctl_ip_vs_nat_icmp_send &&
1016 (pp->protocol == IPPROTO_TCP ||
1017 pp->protocol == IPPROTO_UDP ||
1018 pp->protocol == IPPROTO_SCTP)) {
1019 __be16 _ports[2], *pptr;
1021 pptr = skb_header_pointer(skb, iph.len,
1022 sizeof(_ports), _ports);
1024 return NF_ACCEPT; /* Not for me */
1025 if (ip_vs_lookup_real_service(af, iph.protocol,
1029 * Notify the real server: there is no
1030 * existing entry if it is not RST
1031 * packet or not TCP packet.
1033 if ((iph.protocol != IPPROTO_TCP &&
1034 iph.protocol != IPPROTO_SCTP)
1035 || ((iph.protocol == IPPROTO_TCP
1036 && !is_tcp_reset(skb, iph.len))
1037 || (iph.protocol == IPPROTO_SCTP
1038 && !is_sctp_abort(skb,
1040 #ifdef CONFIG_IP_VS_IPV6
1043 ICMPV6_DEST_UNREACH,
1044 ICMPV6_PORT_UNREACH,
1050 ICMP_PORT_UNREACH, 0);
1055 IP_VS_DBG_PKT(12, pp, skb, 0,
1056 "packet continues traversal as normal");
1060 return handle_response(af, skb, pp, cp, iph.len);
1065 * Handle ICMP messages in the outside-to-inside direction (incoming).
1066 * Find any that might be relevant, check against existing connections,
1067 * forward to the right destination host if relevant.
1068 * Currently handles error types - unreachable, quench, ttl exceeded.
1071 ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1074 struct icmphdr _icmph, *ic;
1075 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
1076 struct ip_vs_iphdr ciph;
1077 struct ip_vs_conn *cp;
1078 struct ip_vs_protocol *pp;
1079 unsigned int offset, ihl, verdict;
1080 union nf_inet_addr snet;
1084 /* reassemble IP fragments */
1085 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
1086 if (ip_vs_gather_frags(skb, hooknum == NF_INET_LOCAL_IN ?
1087 IP_DEFRAG_VS_IN : IP_DEFRAG_VS_FWD))
1092 offset = ihl = iph->ihl * 4;
1093 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
1097 IP_VS_DBG(12, "Incoming ICMP (%d,%d) %pI4->%pI4\n",
1098 ic->type, ntohs(icmp_id(ic)),
1099 &iph->saddr, &iph->daddr);
1102 * Work through seeing if this is for us.
1103 * These checks are supposed to be in an order that means easy
1104 * things are checked first to speed up processing.... however
1105 * this means that some packets will manage to get a long way
1106 * down this stack and then be rejected, but that's life.
1108 if ((ic->type != ICMP_DEST_UNREACH) &&
1109 (ic->type != ICMP_SOURCE_QUENCH) &&
1110 (ic->type != ICMP_TIME_EXCEEDED)) {
1115 /* Now find the contained IP header */
1116 offset += sizeof(_icmph);
1117 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1119 return NF_ACCEPT; /* The packet looks wrong, ignore */
1121 pp = ip_vs_proto_get(cih->protocol);
1125 /* Is the embedded protocol header present? */
1126 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
1130 IP_VS_DBG_PKT(11, pp, skb, offset, "Checking incoming ICMP for");
1132 offset += cih->ihl * 4;
1134 ip_vs_fill_iphdr(AF_INET, cih, &ciph);
1135 /* The embedded headers contain source and dest in reverse order */
1136 cp = pp->conn_in_get(AF_INET, skb, pp, &ciph, offset, 1);
1138 /* The packet could also belong to a local client */
1139 cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1);
1141 snet.ip = iph->saddr;
1142 return handle_response_icmp(AF_INET, skb, &snet,
1143 cih->protocol, cp, pp,
1151 /* Ensure the checksum is correct */
1152 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
1153 /* Failed checksum! */
1154 IP_VS_DBG(1, "Incoming ICMP: failed checksum from %pI4!\n",
1159 /* do the statistics and put it back */
1160 ip_vs_in_stats(cp, skb);
1161 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol)
1162 offset += 2 * sizeof(__u16);
1163 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset);
1164 /* do not touch skb anymore */
1167 __ip_vs_conn_put(cp);
1172 #ifdef CONFIG_IP_VS_IPV6
1174 ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
1176 struct ipv6hdr *iph;
1177 struct icmp6hdr _icmph, *ic;
1178 struct ipv6hdr _ciph, *cih; /* The ip header contained
1180 struct ip_vs_iphdr ciph;
1181 struct ip_vs_conn *cp;
1182 struct ip_vs_protocol *pp;
1183 unsigned int offset, verdict;
1184 union nf_inet_addr snet;
1188 /* reassemble IP fragments */
1189 if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) {
1190 if (ip_vs_gather_frags_v6(skb, hooknum == NF_INET_LOCAL_IN ?
1196 iph = ipv6_hdr(skb);
1197 offset = sizeof(struct ipv6hdr);
1198 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
1202 IP_VS_DBG(12, "Incoming ICMPv6 (%d,%d) %pI6->%pI6\n",
1203 ic->icmp6_type, ntohs(icmpv6_id(ic)),
1204 &iph->saddr, &iph->daddr);
1207 * Work through seeing if this is for us.
1208 * These checks are supposed to be in an order that means easy
1209 * things are checked first to speed up processing.... however
1210 * this means that some packets will manage to get a long way
1211 * down this stack and then be rejected, but that's life.
1213 if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) &&
1214 (ic->icmp6_type != ICMPV6_PKT_TOOBIG) &&
1215 (ic->icmp6_type != ICMPV6_TIME_EXCEED)) {
1220 /* Now find the contained IP header */
1221 offset += sizeof(_icmph);
1222 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1224 return NF_ACCEPT; /* The packet looks wrong, ignore */
1226 pp = ip_vs_proto_get(cih->nexthdr);
1230 /* Is the embedded protocol header present? */
1231 /* TODO: we don't support fragmentation at the moment anyways */
1232 if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag))
1235 IP_VS_DBG_PKT(11, pp, skb, offset, "Checking incoming ICMPv6 for");
1237 offset += sizeof(struct ipv6hdr);
1239 ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
1240 /* The embedded headers contain source and dest in reverse order */
1241 cp = pp->conn_in_get(AF_INET6, skb, pp, &ciph, offset, 1);
1243 /* The packet could also belong to a local client */
1244 cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1);
1246 ipv6_addr_copy(&snet.in6, &iph->saddr);
1247 return handle_response_icmp(AF_INET6, skb, &snet,
1250 sizeof(struct ipv6hdr));
1257 /* do the statistics and put it back */
1258 ip_vs_in_stats(cp, skb);
1259 if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr ||
1260 IPPROTO_SCTP == cih->nexthdr)
1261 offset += 2 * sizeof(__u16);
1262 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset);
1263 /* do not touch skb anymore */
1265 __ip_vs_conn_put(cp);
1273 * Check if it's for virtual services, look it up,
1274 * and send it on its way...
1277 ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
1278 const struct net_device *in, const struct net_device *out,
1279 int (*okfn)(struct sk_buff *))
1281 struct ip_vs_iphdr iph;
1282 struct ip_vs_protocol *pp;
1283 struct ip_vs_conn *cp;
1284 int ret, restart, af, pkts;
1286 af = (skb->protocol == htons(ETH_P_IP)) ? AF_INET : AF_INET6;
1288 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1291 * Big tappo: only PACKET_HOST, including loopback for local client
1292 * Don't handle local packets on IPv6 for now
1294 if (unlikely(skb->pkt_type != PACKET_HOST)) {
1295 IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s ignored\n",
1298 IP_VS_DBG_ADDR(af, &iph.daddr));
1302 #ifdef CONFIG_IP_VS_IPV6
1303 if (af == AF_INET6) {
1304 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1305 int related, verdict = ip_vs_in_icmp_v6(skb, &related, hooknum);
1309 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1313 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1314 int related, verdict = ip_vs_in_icmp(skb, &related, hooknum);
1318 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1321 /* Protocol supported? */
1322 pp = ip_vs_proto_get(iph.protocol);
1327 * Check if the packet belongs to an existing connection entry
1329 cp = pp->conn_in_get(af, skb, pp, &iph, iph.len, 0);
1331 if (unlikely(!cp)) {
1334 /* For local client packets, it could be a response */
1335 cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0);
1337 return handle_response(af, skb, pp, cp, iph.len);
1339 if (!pp->conn_schedule(af, skb, pp, &v, &cp))
1343 if (unlikely(!cp)) {
1344 /* sorry, all this trouble for a no-hit :) */
1345 IP_VS_DBG_PKT(12, pp, skb, 0,
1346 "packet continues traversal as normal");
1350 IP_VS_DBG_PKT(11, pp, skb, 0, "Incoming packet");
1352 /* Check the server status */
1353 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
1354 /* the destination server is not available */
1356 if (sysctl_ip_vs_expire_nodest_conn) {
1357 /* try to expire the connection immediately */
1358 ip_vs_conn_expire_now(cp);
1360 /* don't restart its timer, and silently
1362 __ip_vs_conn_put(cp);
1366 ip_vs_in_stats(cp, skb);
1367 restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp);
1368 if (cp->packet_xmit)
1369 ret = cp->packet_xmit(skb, cp, pp);
1370 /* do not touch skb anymore */
1372 IP_VS_DBG_RL("warning: packet_xmit is null");
1376 /* Increase its packet counter and check if it is needed
1377 * to be synchronized
1379 * Sync connection if it is about to close to
1380 * encorage the standby servers to update the connections timeout
1382 pkts = atomic_add_return(1, &cp->in_pkts);
1383 if (af == AF_INET && (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
1384 cp->protocol == IPPROTO_SCTP) {
1385 if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
1386 (pkts % sysctl_ip_vs_sync_threshold[1]
1387 == sysctl_ip_vs_sync_threshold[0])) ||
1388 (cp->old_state != cp->state &&
1389 ((cp->state == IP_VS_SCTP_S_CLOSED) ||
1390 (cp->state == IP_VS_SCTP_S_SHUT_ACK_CLI) ||
1391 (cp->state == IP_VS_SCTP_S_SHUT_ACK_SER)))) {
1392 ip_vs_sync_conn(cp);
1397 /* Keep this block last: TCP and others with pp->num_states <= 1 */
1398 else if (af == AF_INET &&
1399 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
1400 (((cp->protocol != IPPROTO_TCP ||
1401 cp->state == IP_VS_TCP_S_ESTABLISHED) &&
1402 (pkts % sysctl_ip_vs_sync_threshold[1]
1403 == sysctl_ip_vs_sync_threshold[0])) ||
1404 ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
1405 ((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
1406 (cp->state == IP_VS_TCP_S_CLOSE) ||
1407 (cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
1408 (cp->state == IP_VS_TCP_S_TIME_WAIT)))))
1409 ip_vs_sync_conn(cp);
1411 cp->old_state = cp->state;
1419 * It is hooked at the NF_INET_FORWARD chain, in order to catch ICMP
1420 * related packets destined for 0.0.0.0/0.
1421 * When fwmark-based virtual service is used, such as transparent
1422 * cache cluster, TCP packets can be marked and routed to ip_vs_in,
1423 * but ICMP destined for 0.0.0.0/0 cannot not be easily marked and
1424 * sent to ip_vs_in_icmp. So, catch them at the NF_INET_FORWARD chain
1425 * and send them to ip_vs_in_icmp.
1428 ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
1429 const struct net_device *in, const struct net_device *out,
1430 int (*okfn)(struct sk_buff *))
1434 if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
1437 return ip_vs_in_icmp(skb, &r, hooknum);
1440 #ifdef CONFIG_IP_VS_IPV6
1442 ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
1443 const struct net_device *in, const struct net_device *out,
1444 int (*okfn)(struct sk_buff *))
1448 if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
1451 return ip_vs_in_icmp_v6(skb, &r, hooknum);
1456 static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1457 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1458 * or VS/NAT(change destination), so that filtering rules can be
1459 * applied to IPVS. */
1462 .owner = THIS_MODULE,
1464 .hooknum = NF_INET_LOCAL_IN,
1467 /* After packet filtering, change source only for VS/NAT */
1470 .owner = THIS_MODULE,
1472 .hooknum = NF_INET_FORWARD,
1475 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1476 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
1478 .hook = ip_vs_forward_icmp,
1479 .owner = THIS_MODULE,
1481 .hooknum = NF_INET_FORWARD,
1484 #ifdef CONFIG_IP_VS_IPV6
1485 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1486 * or VS/NAT(change destination), so that filtering rules can be
1487 * applied to IPVS. */
1490 .owner = THIS_MODULE,
1492 .hooknum = NF_INET_LOCAL_IN,
1495 /* After packet filtering, change source only for VS/NAT */
1498 .owner = THIS_MODULE,
1500 .hooknum = NF_INET_FORWARD,
1503 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1504 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
1506 .hook = ip_vs_forward_icmp_v6,
1507 .owner = THIS_MODULE,
1509 .hooknum = NF_INET_FORWARD,
1517 * Initialize IP Virtual Server
1519 static int __init ip_vs_init(void)
1523 ip_vs_estimator_init();
1525 ret = ip_vs_control_init();
1527 pr_err("can't setup control.\n");
1528 goto cleanup_estimator;
1531 ip_vs_protocol_init();
1533 ret = ip_vs_app_init();
1535 pr_err("can't setup application helper.\n");
1536 goto cleanup_protocol;
1539 ret = ip_vs_conn_init();
1541 pr_err("can't setup connection table.\n");
1545 ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
1547 pr_err("can't register hooks.\n");
1551 pr_info("ipvs loaded.\n");
1555 ip_vs_conn_cleanup();
1557 ip_vs_app_cleanup();
1559 ip_vs_protocol_cleanup();
1560 ip_vs_control_cleanup();
1562 ip_vs_estimator_cleanup();
1566 static void __exit ip_vs_cleanup(void)
1568 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
1569 ip_vs_conn_cleanup();
1570 ip_vs_app_cleanup();
1571 ip_vs_protocol_cleanup();
1572 ip_vs_control_cleanup();
1573 ip_vs_estimator_cleanup();
1574 pr_info("ipvs unloaded.\n");
1577 module_init(ip_vs_init);
1578 module_exit(ip_vs_cleanup);
1579 MODULE_LICENSE("GPL");