3 * Linux ethernet bridge
6 * Lennert Buytenhek <buytenh@gnu.org>
7 * Bart De Schuymer <bdschuym@pandora.be>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
14 * Lennert dedicates this file to Kerstin Wurdinger.
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_pppox.h>
27 #include <linux/ppp_defs.h>
28 #include <linux/netfilter_bridge.h>
29 #include <linux/netfilter_ipv4.h>
30 #include <linux/netfilter_ipv6.h>
31 #include <linux/netfilter_arp.h>
32 #include <linux/in_route.h>
33 #include <linux/inetdevice.h>
37 #include <net/route.h>
39 #include <asm/uaccess.h>
40 #include "br_private.h"
42 #include <linux/sysctl.h>
45 #define skb_origaddr(skb) (((struct bridge_skb_cb *) \
46 (skb->nf_bridge->data))->daddr.ipv4)
47 #define store_orig_dstaddr(skb) (skb_origaddr(skb) = ip_hdr(skb)->daddr)
48 #define dnat_took_place(skb) (skb_origaddr(skb) != ip_hdr(skb)->daddr)
51 static struct ctl_table_header *brnf_sysctl_header;
52 static int brnf_call_iptables __read_mostly = 1;
53 static int brnf_call_ip6tables __read_mostly = 1;
54 static int brnf_call_arptables __read_mostly = 1;
55 static int brnf_filter_vlan_tagged __read_mostly = 0;
56 static int brnf_filter_pppoe_tagged __read_mostly = 0;
58 #define brnf_call_iptables 1
59 #define brnf_call_ip6tables 1
60 #define brnf_call_arptables 1
61 #define brnf_filter_vlan_tagged 0
62 #define brnf_filter_pppoe_tagged 0
65 static inline __be16 vlan_proto(const struct sk_buff *skb)
67 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
70 #define IS_VLAN_IP(skb) \
71 (skb->protocol == htons(ETH_P_8021Q) && \
72 vlan_proto(skb) == htons(ETH_P_IP) && \
73 brnf_filter_vlan_tagged)
75 #define IS_VLAN_IPV6(skb) \
76 (skb->protocol == htons(ETH_P_8021Q) && \
77 vlan_proto(skb) == htons(ETH_P_IPV6) &&\
78 brnf_filter_vlan_tagged)
80 #define IS_VLAN_ARP(skb) \
81 (skb->protocol == htons(ETH_P_8021Q) && \
82 vlan_proto(skb) == htons(ETH_P_ARP) && \
83 brnf_filter_vlan_tagged)
85 static inline __be16 pppoe_proto(const struct sk_buff *skb)
87 return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
88 sizeof(struct pppoe_hdr)));
91 #define IS_PPPOE_IP(skb) \
92 (skb->protocol == htons(ETH_P_PPP_SES) && \
93 pppoe_proto(skb) == htons(PPP_IP) && \
94 brnf_filter_pppoe_tagged)
96 #define IS_PPPOE_IPV6(skb) \
97 (skb->protocol == htons(ETH_P_PPP_SES) && \
98 pppoe_proto(skb) == htons(PPP_IPV6) && \
99 brnf_filter_pppoe_tagged)
101 static void fake_update_pmtu(struct dst_entry *dst, u32 mtu)
105 static struct dst_ops fake_dst_ops = {
107 .protocol = cpu_to_be16(ETH_P_IP),
108 .update_pmtu = fake_update_pmtu,
112 * Initialize bogus route table used to keep netfilter happy.
113 * Currently, we fill in the PMTU entry because netfilter
114 * refragmentation needs it, and the rt_flags entry because
115 * ipt_REJECT needs it. Future netfilter modules might
116 * require us to fill additional fields.
118 void br_netfilter_rtable_init(struct net_bridge *br)
120 struct rtable *rt = &br->fake_rtable;
122 atomic_set(&rt->dst.__refcnt, 1);
123 rt->dst.dev = br->dev;
124 rt->dst.path = &rt->dst;
125 rt->dst.metrics[RTAX_MTU - 1] = 1500;
126 rt->dst.flags = DST_NOXFRM;
127 rt->dst.ops = &fake_dst_ops;
130 static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
132 if (!br_port_exists(dev))
134 return &br_port_get_rcu(dev)->br->fake_rtable;
137 static inline struct net_device *bridge_parent(const struct net_device *dev)
139 if (!br_port_exists(dev))
142 return br_port_get_rcu(dev)->br->dev;
145 static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
147 skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
148 if (likely(skb->nf_bridge))
149 atomic_set(&(skb->nf_bridge->use), 1);
151 return skb->nf_bridge;
154 static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
156 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
158 if (atomic_read(&nf_bridge->use) > 1) {
159 struct nf_bridge_info *tmp = nf_bridge_alloc(skb);
162 memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
163 atomic_set(&tmp->use, 1);
165 nf_bridge_put(nf_bridge);
171 static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
173 unsigned int len = nf_bridge_encap_header_len(skb);
176 skb->network_header -= len;
179 static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
181 unsigned int len = nf_bridge_encap_header_len(skb);
184 skb->network_header += len;
187 static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
189 unsigned int len = nf_bridge_encap_header_len(skb);
191 skb_pull_rcsum(skb, len);
192 skb->network_header += len;
195 static inline void nf_bridge_save_header(struct sk_buff *skb)
197 int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
199 skb_copy_from_linear_data_offset(skb, -header_size,
200 skb->nf_bridge->data, header_size);
203 static inline void nf_bridge_update_protocol(struct sk_buff *skb)
205 if (skb->nf_bridge->mask & BRNF_8021Q)
206 skb->protocol = htons(ETH_P_8021Q);
207 else if (skb->nf_bridge->mask & BRNF_PPPoE)
208 skb->protocol = htons(ETH_P_PPP_SES);
211 /* When handing a packet over to the IP layer
212 * check whether we have a skb that is in the
216 int br_parse_ip_options(struct sk_buff *skb)
218 struct ip_options *opt;
220 struct net_device *dev = skb->dev;
224 opt = &(IPCB(skb)->opt);
226 /* Basic sanity checks */
227 if (iph->ihl < 5 || iph->version != 4)
230 if (!pskb_may_pull(skb, iph->ihl*4))
234 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
237 len = ntohs(iph->tot_len);
238 if (skb->len < len) {
239 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
241 } else if (len < (iph->ihl*4))
244 if (pskb_trim_rcsum(skb, len)) {
245 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
249 /* Zero out the CB buffer if no options present */
251 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
255 opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
256 if (ip_options_compile(dev_net(dev), opt, skb))
259 /* Check correct handling of SRR option */
260 if (unlikely(opt->srr)) {
261 struct in_device *in_dev = __in_dev_get_rcu(dev);
262 if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev))
265 if (ip_options_rcv_srr(skb))
272 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
277 /* Fill in the header for fragmented IP packets handled by
278 * the IPv4 connection tracking code.
280 int nf_bridge_copy_header(struct sk_buff *skb)
283 unsigned int header_size;
285 nf_bridge_update_protocol(skb);
286 header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
287 err = skb_cow_head(skb, header_size);
291 skb_copy_to_linear_data_offset(skb, -header_size,
292 skb->nf_bridge->data, header_size);
293 __skb_push(skb, nf_bridge_encap_header_len(skb));
297 /* PF_BRIDGE/PRE_ROUTING *********************************************/
298 /* Undo the changes made for ip6tables PREROUTING and continue the
299 * bridge PRE_ROUTING hook. */
300 static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
302 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
305 if (nf_bridge->mask & BRNF_PKT_TYPE) {
306 skb->pkt_type = PACKET_OTHERHOST;
307 nf_bridge->mask ^= BRNF_PKT_TYPE;
309 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
311 rt = bridge_parent_rtable(nf_bridge->physindev);
316 skb_dst_set_noref(skb, &rt->dst);
318 skb->dev = nf_bridge->physindev;
319 nf_bridge_update_protocol(skb);
320 nf_bridge_push_encap_header(skb);
321 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
322 br_handle_frame_finish, 1);
327 /* Obtain the correct destination MAC address, while preserving the original
328 * source MAC address. If we already know this address, we just copy it. If we
329 * don't, we use the neighbour framework to find out. In both cases, we make
330 * sure that br_handle_frame_finish() is called afterwards.
332 static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
334 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
335 struct dst_entry *dst;
337 skb->dev = bridge_parent(skb->dev);
342 neigh_hh_bridge(dst->hh, skb);
343 skb->dev = nf_bridge->physindev;
344 return br_handle_frame_finish(skb);
345 } else if (dst->neighbour) {
346 /* the neighbour function below overwrites the complete
347 * MAC header, so we save the Ethernet source address and
348 * protocol number. */
349 skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
350 /* tell br_dev_xmit to continue with forwarding */
351 nf_bridge->mask |= BRNF_BRIDGED_DNAT;
352 return dst->neighbour->output(skb);
359 /* This requires some explaining. If DNAT has taken place,
360 * we will need to fix up the destination Ethernet address.
362 * There are two cases to consider:
363 * 1. The packet was DNAT'ed to a device in the same bridge
364 * port group as it was received on. We can still bridge
366 * 2. The packet was DNAT'ed to a different device, either
367 * a non-bridged device or another bridge port group.
368 * The packet will need to be routed.
370 * The correct way of distinguishing between these two cases is to
371 * call ip_route_input() and to look at skb->dst->dev, which is
372 * changed to the destination device if ip_route_input() succeeds.
374 * Let's first consider the case that ip_route_input() succeeds:
376 * If the output device equals the logical bridge device the packet
377 * came in on, we can consider this bridging. The corresponding MAC
378 * address will be obtained in br_nf_pre_routing_finish_bridge.
379 * Otherwise, the packet is considered to be routed and we just
380 * change the destination MAC address so that the packet will
381 * later be passed up to the IP stack to be routed. For a redirected
382 * packet, ip_route_input() will give back the localhost as output device,
383 * which differs from the bridge device.
385 * Let's now consider the case that ip_route_input() fails:
387 * This can be because the destination address is martian, in which case
388 * the packet will be dropped.
389 * If IP forwarding is disabled, ip_route_input() will fail, while
390 * ip_route_output_key() can return success. The source
391 * address for ip_route_output_key() is set to zero, so ip_route_output_key()
392 * thinks we're handling a locally generated packet and won't care
393 * if IP forwarding is enabled. If the output device equals the logical bridge
394 * device, we proceed as if ip_route_input() succeeded. If it differs from the
395 * logical bridge port or if ip_route_output_key() fails we drop the packet.
397 static int br_nf_pre_routing_finish(struct sk_buff *skb)
399 struct net_device *dev = skb->dev;
400 struct iphdr *iph = ip_hdr(skb);
401 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
405 if (nf_bridge->mask & BRNF_PKT_TYPE) {
406 skb->pkt_type = PACKET_OTHERHOST;
407 nf_bridge->mask ^= BRNF_PKT_TYPE;
409 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
410 if (dnat_took_place(skb)) {
411 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
417 .tos = RT_TOS(iph->tos) },
421 struct in_device *in_dev = __in_dev_get_rcu(dev);
423 /* If err equals -EHOSTUNREACH the error is due to a
424 * martian destination or due to the fact that
425 * forwarding is disabled. For most martian packets,
426 * ip_route_output_key() will fail. It won't fail for 2 types of
427 * martian destinations: loopback destinations and destination
428 * 0.0.0.0. In both cases the packet will be dropped because the
429 * destination is the loopback device and not the bridge. */
430 if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
433 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
434 /* - Bridged-and-DNAT'ed traffic doesn't
435 * require ip_forwarding. */
436 if (((struct dst_entry *)rt)->dev == dev) {
437 skb_dst_set(skb, (struct dst_entry *)rt);
440 dst_release((struct dst_entry *)rt);
446 if (skb_dst(skb)->dev == dev) {
448 skb->dev = nf_bridge->physindev;
449 nf_bridge_update_protocol(skb);
450 nf_bridge_push_encap_header(skb);
451 NF_HOOK_THRESH(NFPROTO_BRIDGE,
454 br_nf_pre_routing_finish_bridge,
458 memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, ETH_ALEN);
459 skb->pkt_type = PACKET_HOST;
462 rt = bridge_parent_rtable(nf_bridge->physindev);
467 skb_dst_set_noref(skb, &rt->dst);
470 skb->dev = nf_bridge->physindev;
471 nf_bridge_update_protocol(skb);
472 nf_bridge_push_encap_header(skb);
473 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
474 br_handle_frame_finish, 1);
479 /* Some common code for IPv4/IPv6 */
480 static struct net_device *setup_pre_routing(struct sk_buff *skb)
482 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
484 if (skb->pkt_type == PACKET_OTHERHOST) {
485 skb->pkt_type = PACKET_HOST;
486 nf_bridge->mask |= BRNF_PKT_TYPE;
489 nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
490 nf_bridge->physindev = skb->dev;
491 skb->dev = bridge_parent(skb->dev);
492 if (skb->protocol == htons(ETH_P_8021Q))
493 nf_bridge->mask |= BRNF_8021Q;
494 else if (skb->protocol == htons(ETH_P_PPP_SES))
495 nf_bridge->mask |= BRNF_PPPoE;
500 /* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */
501 static int check_hbh_len(struct sk_buff *skb)
503 unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1);
505 const unsigned char *nh = skb_network_header(skb);
507 int len = (raw[1] + 1) << 3;
509 if ((raw + len) - skb->data > skb_headlen(skb))
516 int optlen = nh[off + 1] + 2;
527 if (nh[off + 1] != 4 || (off & 3) != 2)
529 pkt_len = ntohl(*(__be32 *) (nh + off + 2));
530 if (pkt_len <= IPV6_MAXPLEN ||
531 ipv6_hdr(skb)->payload_len)
533 if (pkt_len > skb->len - sizeof(struct ipv6hdr))
535 if (pskb_trim_rcsum(skb,
536 pkt_len + sizeof(struct ipv6hdr)))
538 nh = skb_network_header(skb);
555 /* Replicate the checks that IPv6 does on packet reception and pass the packet
556 * to ip6tables, which doesn't support NAT, so things are fairly simple. */
557 static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
559 const struct net_device *in,
560 const struct net_device *out,
561 int (*okfn)(struct sk_buff *))
566 if (skb->len < sizeof(struct ipv6hdr))
569 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
574 if (hdr->version != 6)
577 pkt_len = ntohs(hdr->payload_len);
579 if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
580 if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
582 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
585 if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb))
588 nf_bridge_put(skb->nf_bridge);
589 if (!nf_bridge_alloc(skb))
591 if (!setup_pre_routing(skb))
594 skb->protocol = htons(ETH_P_IPV6);
595 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
596 br_nf_pre_routing_finish_ipv6);
604 /* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
605 * Replicate the checks that IPv4 does on packet reception.
606 * Set skb->dev to the bridge device (i.e. parent of the
607 * receiving device) to make netfilter happy, the REDIRECT
608 * target in particular. Save the original destination IP
609 * address to be able to detect DNAT afterwards. */
610 static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
611 const struct net_device *in,
612 const struct net_device *out,
613 int (*okfn)(struct sk_buff *))
615 struct net_bridge_port *p;
616 struct net_bridge *br;
617 __u32 len = nf_bridge_encap_header_len(skb);
619 if (unlikely(!pskb_may_pull(skb, len)))
622 p = br_port_get_rcu(in);
627 if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
628 IS_PPPOE_IPV6(skb)) {
629 if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
632 nf_bridge_pull_encap_header_rcsum(skb);
633 return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
636 if (!brnf_call_iptables && !br->nf_call_iptables)
639 if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb) &&
643 nf_bridge_pull_encap_header_rcsum(skb);
645 if (br_parse_ip_options(skb))
646 /* Drop invalid packet */
649 nf_bridge_put(skb->nf_bridge);
650 if (!nf_bridge_alloc(skb))
652 if (!setup_pre_routing(skb))
654 store_orig_dstaddr(skb);
655 skb->protocol = htons(ETH_P_IP);
657 NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
658 br_nf_pre_routing_finish);
667 /* PF_BRIDGE/LOCAL_IN ************************************************/
668 /* The packet is locally destined, which requires a real
669 * dst_entry, so detach the fake one. On the way up, the
670 * packet would pass through PRE_ROUTING again (which already
671 * took place when the packet entered the bridge), but we
672 * register an IPv4 PRE_ROUTING 'sabotage' hook that will
673 * prevent this from happening. */
674 static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
675 const struct net_device *in,
676 const struct net_device *out,
677 int (*okfn)(struct sk_buff *))
679 struct rtable *rt = skb_rtable(skb);
681 if (rt && rt == bridge_parent_rtable(in))
687 /* PF_BRIDGE/FORWARD *************************************************/
688 static int br_nf_forward_finish(struct sk_buff *skb)
690 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
691 struct net_device *in;
693 if (skb->protocol != htons(ETH_P_ARP) && !IS_VLAN_ARP(skb)) {
694 in = nf_bridge->physindev;
695 if (nf_bridge->mask & BRNF_PKT_TYPE) {
696 skb->pkt_type = PACKET_OTHERHOST;
697 nf_bridge->mask ^= BRNF_PKT_TYPE;
699 nf_bridge_update_protocol(skb);
701 in = *((struct net_device **)(skb->cb));
703 nf_bridge_push_encap_header(skb);
705 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in,
706 skb->dev, br_forward_finish, 1);
710 /* This is the 'purely bridged' case. For IP, we pass the packet to
711 * netfilter with indev and outdev set to the bridge device,
712 * but we are still able to filter on the 'real' indev/outdev
713 * because of the physdev module. For ARP, indev and outdev are the
715 static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
716 const struct net_device *in,
717 const struct net_device *out,
718 int (*okfn)(struct sk_buff *))
720 struct nf_bridge_info *nf_bridge;
721 struct net_device *parent;
727 /* Need exclusive nf_bridge_info since we might have multiple
728 * different physoutdevs. */
729 if (!nf_bridge_unshare(skb))
732 parent = bridge_parent(out);
736 if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
739 else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
745 nf_bridge_pull_encap_header(skb);
747 nf_bridge = skb->nf_bridge;
748 if (skb->pkt_type == PACKET_OTHERHOST) {
749 skb->pkt_type = PACKET_HOST;
750 nf_bridge->mask |= BRNF_PKT_TYPE;
753 /* The physdev module checks on this */
754 nf_bridge->mask |= BRNF_BRIDGED;
755 nf_bridge->physoutdev = skb->dev;
757 skb->protocol = htons(ETH_P_IP);
759 skb->protocol = htons(ETH_P_IPV6);
761 NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent,
762 br_nf_forward_finish);
767 static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
768 const struct net_device *in,
769 const struct net_device *out,
770 int (*okfn)(struct sk_buff *))
772 struct net_bridge_port *p;
773 struct net_bridge *br;
774 struct net_device **d = (struct net_device **)(skb->cb);
776 p = br_port_get_rcu(out);
781 if (!brnf_call_arptables && !br->nf_call_arptables)
784 if (skb->protocol != htons(ETH_P_ARP)) {
785 if (!IS_VLAN_ARP(skb))
787 nf_bridge_pull_encap_header(skb);
790 if (arp_hdr(skb)->ar_pln != 4) {
791 if (IS_VLAN_ARP(skb))
792 nf_bridge_push_encap_header(skb);
795 *d = (struct net_device *)in;
796 NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in,
797 (struct net_device *)out, br_nf_forward_finish);
802 #if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE)
803 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
807 if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
808 skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
810 if (br_parse_ip_options(skb))
811 /* Drop invalid packet */
813 ret = ip_fragment(skb, br_dev_queue_push_xmit);
815 ret = br_dev_queue_push_xmit(skb);
820 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
822 return br_dev_queue_push_xmit(skb);
826 /* PF_BRIDGE/POST_ROUTING ********************************************/
827 static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
828 const struct net_device *in,
829 const struct net_device *out,
830 int (*okfn)(struct sk_buff *))
832 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
833 struct net_device *realoutdev = bridge_parent(skb->dev);
836 if (!nf_bridge || !(nf_bridge->mask & BRNF_BRIDGED))
842 if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
845 else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
851 /* We assume any code from br_dev_queue_push_xmit onwards doesn't care
852 * about the value of skb->pkt_type. */
853 if (skb->pkt_type == PACKET_OTHERHOST) {
854 skb->pkt_type = PACKET_HOST;
855 nf_bridge->mask |= BRNF_PKT_TYPE;
858 nf_bridge_pull_encap_header(skb);
859 nf_bridge_save_header(skb);
861 skb->protocol = htons(ETH_P_IP);
863 skb->protocol = htons(ETH_P_IPV6);
865 NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev,
866 br_nf_dev_queue_xmit);
871 /* IP/SABOTAGE *****************************************************/
872 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
873 * for the second time. */
874 static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff *skb,
875 const struct net_device *in,
876 const struct net_device *out,
877 int (*okfn)(struct sk_buff *))
879 if (skb->nf_bridge &&
880 !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
887 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
888 * br_dev_queue_push_xmit is called afterwards */
889 static struct nf_hook_ops br_nf_ops[] __read_mostly = {
891 .hook = br_nf_pre_routing,
892 .owner = THIS_MODULE,
894 .hooknum = NF_BR_PRE_ROUTING,
895 .priority = NF_BR_PRI_BRNF,
898 .hook = br_nf_local_in,
899 .owner = THIS_MODULE,
901 .hooknum = NF_BR_LOCAL_IN,
902 .priority = NF_BR_PRI_BRNF,
905 .hook = br_nf_forward_ip,
906 .owner = THIS_MODULE,
908 .hooknum = NF_BR_FORWARD,
909 .priority = NF_BR_PRI_BRNF - 1,
912 .hook = br_nf_forward_arp,
913 .owner = THIS_MODULE,
915 .hooknum = NF_BR_FORWARD,
916 .priority = NF_BR_PRI_BRNF,
919 .hook = br_nf_post_routing,
920 .owner = THIS_MODULE,
922 .hooknum = NF_BR_POST_ROUTING,
923 .priority = NF_BR_PRI_LAST,
926 .hook = ip_sabotage_in,
927 .owner = THIS_MODULE,
929 .hooknum = NF_INET_PRE_ROUTING,
930 .priority = NF_IP_PRI_FIRST,
933 .hook = ip_sabotage_in,
934 .owner = THIS_MODULE,
936 .hooknum = NF_INET_PRE_ROUTING,
937 .priority = NF_IP6_PRI_FIRST,
943 int brnf_sysctl_call_tables(ctl_table * ctl, int write,
944 void __user * buffer, size_t * lenp, loff_t * ppos)
948 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
950 if (write && *(int *)(ctl->data))
951 *(int *)(ctl->data) = 1;
955 static ctl_table brnf_table[] = {
957 .procname = "bridge-nf-call-arptables",
958 .data = &brnf_call_arptables,
959 .maxlen = sizeof(int),
961 .proc_handler = brnf_sysctl_call_tables,
964 .procname = "bridge-nf-call-iptables",
965 .data = &brnf_call_iptables,
966 .maxlen = sizeof(int),
968 .proc_handler = brnf_sysctl_call_tables,
971 .procname = "bridge-nf-call-ip6tables",
972 .data = &brnf_call_ip6tables,
973 .maxlen = sizeof(int),
975 .proc_handler = brnf_sysctl_call_tables,
978 .procname = "bridge-nf-filter-vlan-tagged",
979 .data = &brnf_filter_vlan_tagged,
980 .maxlen = sizeof(int),
982 .proc_handler = brnf_sysctl_call_tables,
985 .procname = "bridge-nf-filter-pppoe-tagged",
986 .data = &brnf_filter_pppoe_tagged,
987 .maxlen = sizeof(int),
989 .proc_handler = brnf_sysctl_call_tables,
994 static struct ctl_path brnf_path[] = {
995 { .procname = "net", },
996 { .procname = "bridge", },
1001 int __init br_netfilter_init(void)
1005 ret = dst_entries_init(&fake_dst_ops);
1009 ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1011 dst_entries_destroy(&fake_dst_ops);
1014 #ifdef CONFIG_SYSCTL
1015 brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table);
1016 if (brnf_sysctl_header == NULL) {
1018 "br_netfilter: can't register to sysctl.\n");
1019 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1020 dst_entries_destroy(&fake_dst_ops);
1024 printk(KERN_NOTICE "Bridge firewalling registered\n");
1028 void br_netfilter_fini(void)
1030 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1031 #ifdef CONFIG_SYSCTL
1032 unregister_sysctl_table(brnf_sysctl_header);
1034 dst_entries_destroy(&fake_dst_ops);