Merge branch 'master' of /repos/git/net-next-2.6
authorPatrick McHardy <kaber@trash.net>
Tue, 20 Apr 2010 14:02:01 +0000 (16:02 +0200)
committerPatrick McHardy <kaber@trash.net>
Tue, 20 Apr 2010 14:02:01 +0000 (16:02 +0200)
Conflicts:
Documentation/feature-removal-schedule.txt
net/ipv6/netfilter/ip6t_REJECT.c
net/netfilter/xt_limit.c

Signed-off-by: Patrick McHardy <kaber@trash.net>
56 files changed:
1  2 
Documentation/feature-removal-schedule.txt
include/linux/kernel.h
net/bridge/br_forward.c
net/bridge/br_input.c
net/bridge/br_multicast.c
net/bridge/br_netfilter.c
net/bridge/br_stp_bpdu.c
net/bridge/netfilter/ebt_ulog.c
net/bridge/netfilter/ebtables.c
net/decnet/dn_neigh.c
net/decnet/dn_nsp_in.c
net/decnet/dn_route.c
net/ipv4/ip_forward.c
net/ipv4/ip_input.c
net/ipv4/ip_output.c
net/ipv4/ipmr.c
net/ipv4/netfilter/ip_queue.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/ipt_REJECT.c
net/ipv4/netfilter/ipt_ULOG.c
net/ipv4/netfilter/nf_nat_rule.c
net/ipv4/netfilter/nf_nat_standalone.c
net/ipv4/raw.c
net/ipv4/xfrm4_input.c
net/ipv6/ip6_input.c
net/ipv6/ip6_output.c
net/ipv6/ip6mr.c
net/ipv6/mcast.c
net/ipv6/ndisc.c
net/ipv6/netfilter/ip6_queue.c
net/ipv6/netfilter/ip6t_REJECT.c
net/ipv6/netfilter/ip6t_hbh.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/raw.c
net/netfilter/ipvs/ip_vs_ftp.c
net/netfilter/ipvs/ip_vs_proto.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_ecache.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto.c
net/netfilter/nfnetlink.c
net/netfilter/nfnetlink_queue.c
net/netfilter/x_tables.c
net/netfilter/xt_CT.c
net/netfilter/xt_LED.c
net/netfilter/xt_RATEEST.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_connlimit.c
net/netfilter/xt_dccp.c
net/netfilter/xt_hashlimit.c
net/netfilter/xt_limit.c
net/netfilter/xt_quota.c
net/netfilter/xt_recent.c
net/netfilter/xt_statistic.c
net/netfilter/xt_string.c
net/sched/act_ipt.c

@@@ -241,6 -241,16 +241,6 @@@ Who:      Thomas Gleixner <tglx@linutronix.d
  
  ---------------------------
  
 -What (Why):
 -      - xt_recent: the old ipt_recent proc dir
 -        (superseded by /proc/net/xt_recent)
 -
 -When: January 2009 or Linux 2.7.0, whichever comes first
 -Why:  Superseded by newer revisions or modules
 -Who:  Jan Engelhardt <jengelh@computergmbh.de>
 -
 ----------------------------
 -
  What: GPIO autorequest on gpio_direction_{input,output}() in gpiolib
  When: February 2010
  Why:  All callers should use explicit gpio_request()/gpio_free().
@@@ -533,6 -543,24 +533,24 @@@ Who:     Eric Miao <eric.y.miao@gmail.com
  
  ----------------------------
  
+ What: sysfs-class-rfkill state file
+ When: Feb 2014
+ Files:        net/rfkill/core.c
+ Why:  Documented as obsolete since Feb 2010. This file is limited to 3
+       states while the rfkill drivers can have 4 states.
+ Who:  anybody or Florian Mickler <florian@mickler.org>
+ ----------------------------
+ What:         sysfs-class-rfkill claim file
+ When: Feb 2012
+ Files:        net/rfkill/core.c
+ Why:  It is not possible to claim an rfkill driver since 2007. This is
+       Documented as obsolete since Feb 2010.
+ Who:  anybody or Florian Mickler <florian@mickler.org>
+ ----------------------------
  What: capifs
  When: February 2011
  Files:        drivers/isdn/capi/capifs.*
@@@ -580,10 -608,23 +598,31 @@@ Why:    Useful in 2003, implementation is 
        Seen as doing more harm than good.
  Who:  Len Brown <len.brown@intel.com>
  
+ ----------------------------
+ What: iwlwifi 50XX module parameters
+ When: 2.6.40
+ Why:  The "..50" modules parameters were used to configure 5000 series and
+       up devices; different set of module parameters also available for 4965
+       with same functionalities. Consolidate both set into single place
+       in drivers/net/wireless/iwlwifi/iwl-agn.c
+ Who:  Wey-Yi Guy <wey-yi.w.guy@intel.com>
+ ----------------------------
+ What: iwl4965 alias support
+ When: 2.6.40
+ Why:  Internal alias support has been present in module-init-tools for some
+       time, the MODULE_ALIAS("iwl4965") boilerplate aliases can be removed
+       with no impact.
+ Who:  Wey-Yi Guy <wey-yi.w.guy@intel.com>
++
 +---------------------------
 +
 +What: xt_NOTRACK
 +Files:        net/netfilter/xt_NOTRACK.c
 +When: April 2011
 +Why:  Superseded by xt_CT
 +Who:  Netfilter developer team <netfilter-devel@vger.kernel.org>
diff --combined include/linux/kernel.h
@@@ -4,8 -4,6 +4,8 @@@
  /*
   * 'kernel.h' contains some often-used function prototypes etc
   */
 +#define __ALIGN_KERNEL(x, a)          __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
 +#define __ALIGN_KERNEL_MASK(x, mask)  (((x) + (mask)) & ~(mask))
  
  #ifdef __KERNEL__
  
@@@ -39,8 -37,8 +39,8 @@@ extern const char linux_proc_banner[]
  
  #define STACK_MAGIC   0xdeadbeef
  
 -#define ALIGN(x,a)            __ALIGN_MASK(x,(typeof(x))(a)-1)
 -#define __ALIGN_MASK(x,mask)  (((x)+(mask))&~(mask))
 +#define ALIGN(x, a)           __ALIGN_KERNEL((x), (a))
 +#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
  #define PTR_ALIGN(p, a)               ((typeof(p))ALIGN((unsigned long)(p), (a)))
  #define IS_ALIGNED(x, a)              (((x) & ((typeof(x))(a) - 1)) == 0)
  
@@@ -428,7 -426,7 +428,7 @@@ static inline char *pack_hex_byte(char 
                .burst = DEFAULT_RATELIMIT_BURST,       \
        };                                              \
                                                        \
-       if (!__ratelimit(&_rs))                         \
+       if (__ratelimit(&_rs))                          \
                printk(fmt, ##__VA_ARGS__);             \
  })
  #else
diff --combined net/bridge/br_forward.c
@@@ -12,6 -12,7 +12,7 @@@
   */
  
  #include <linux/err.h>
+ #include <linux/slab.h>
  #include <linux/kernel.h>
  #include <linux/netdevice.h>
  #include <linux/skbuff.h>
@@@ -43,7 -44,7 +44,7 @@@ int br_dev_queue_push_xmit(struct sk_bu
        if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
                kfree_skb(skb);
        else {
 -              /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */
 +              /* ip_fragment doesn't copy the MAC header */
                if (nf_bridge_maybe_copy_header(skb))
                        kfree_skb(skb);
                else {
@@@ -58,7 -59,7 +59,7 @@@
  
  int br_forward_finish(struct sk_buff *skb)
  {
 -      return NF_HOOK(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
 +      return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
                       br_dev_queue_push_xmit);
  
  }
@@@ -66,8 -67,8 +67,8 @@@
  static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
  {
        skb->dev = to->dev;
 -      NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
 -                      br_forward_finish);
 +      NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
 +              br_forward_finish);
  }
  
  static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
@@@ -83,8 -84,8 +84,8 @@@
        skb->dev = to->dev;
        skb_forward_csum(skb);
  
 -      NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
 -                      br_forward_finish);
 +      NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
 +              br_forward_finish);
  }
  
  /* called with rcu_read_lock */
diff --combined net/bridge/br_input.c
@@@ -11,6 -11,7 +11,7 @@@
   *    2 of the License, or (at your option) any later version.
   */
  
+ #include <linux/slab.h>
  #include <linux/kernel.h>
  #include <linux/netdevice.h>
  #include <linux/etherdevice.h>
@@@ -32,7 -33,7 +33,7 @@@ static int br_pass_frame_up(struct sk_b
        indev = skb->dev;
        skb->dev = brdev;
  
 -      return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
 +      return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
                       netif_receive_skb);
  }
  
@@@ -155,7 -156,7 +156,7 @@@ struct sk_buff *br_handle_frame(struct 
                if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0)
                        goto forward;
  
 -              if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
 +              if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
                            NULL, br_handle_local_finish))
                        return NULL;    /* frame consumed by filter */
                else
@@@ -176,7 -177,7 +177,7 @@@ forward
                if (!compare_ether_addr(p->br->dev->dev_addr, dest))
                        skb->pkt_type = PACKET_HOST;
  
 -              NF_HOOK(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
 +              NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
                        br_handle_frame_finish);
                break;
        default:
@@@ -608,7 -608,7 +608,7 @@@ static void br_multicast_send_query(str
        if (port) {
                __skb_push(skb, sizeof(struct ethhdr));
                skb->dev = port->dev;
 -              NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
 +              NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
                        dev_queue_xmit);
        } else
                netif_rx(skb);
@@@ -723,7 -723,7 +723,7 @@@ static int br_multicast_igmp3_report(st
                if (!pskb_may_pull(skb, len))
                        return -EINVAL;
  
-               grec = (void *)(skb->data + len);
+               grec = (void *)(skb->data + len - sizeof(*grec));
                group = grec->grec_mca;
                type = grec->grec_type;
  
@@@ -1003,8 -1003,6 +1003,6 @@@ static int br_multicast_ipv4_rcv(struc
        if (!pskb_may_pull(skb2, sizeof(*ih)))
                goto out;
  
-       iph = ip_hdr(skb2);
        switch (skb2->ip_summed) {
        case CHECKSUM_COMPLETE:
                if (!csum_fold(skb2->csum))
@@@ -3,8 -3,15 +3,8 @@@
   *    Linux ethernet bridge
   *
   *    Authors:
 - *    Lennert Buytenhek               <buytenh@gnu.org>
 - *    Bart De Schuymer (maintainer)   <bdschuym@pandora.be>
 - *
 - *    Changes:
 - *    Apr 29 2003: physdev module support (bdschuym)
 - *    Jun 19 2003: let arptables see bridged ARP traffic (bdschuym)
 - *    Oct 06 2003: filter encapsulated IP/ARP VLAN traffic on untagged bridge
 - *                 (bdschuym)
 - *    Sep 01 2004: add IPv6 filtering (bdschuym)
 + *    Lennert Buytenhek               <buytenh@gnu.org>
 + *    Bart De Schuymer                <bdschuym@pandora.be>
   *
   *    This program is free software; you can redistribute it and/or
   *    modify it under the terms of the GNU General Public License
@@@ -16,6 -23,7 +16,7 @@@
  
  #include <linux/module.h>
  #include <linux/kernel.h>
+ #include <linux/slab.h>
  #include <linux/ip.h>
  #include <linux/netdevice.h>
  #include <linux/skbuff.h>
@@@ -196,24 -204,15 +197,24 @@@ static inline void nf_bridge_save_heade
                                         skb->nf_bridge->data, header_size);
  }
  
 -/*
 - * When forwarding bridge frames, we save a copy of the original
 - * header before processing.
 +static inline void nf_bridge_update_protocol(struct sk_buff *skb)
 +{
 +      if (skb->nf_bridge->mask & BRNF_8021Q)
 +              skb->protocol = htons(ETH_P_8021Q);
 +      else if (skb->nf_bridge->mask & BRNF_PPPoE)
 +              skb->protocol = htons(ETH_P_PPP_SES);
 +}
 +
 +/* Fill in the header for fragmented IP packets handled by
 + * the IPv4 connection tracking code.
   */
  int nf_bridge_copy_header(struct sk_buff *skb)
  {
        int err;
 -      int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
 +      unsigned int header_size;
  
 +      nf_bridge_update_protocol(skb);
 +      header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
        err = skb_cow_head(skb, header_size);
        if (err)
                return err;
@@@ -247,48 -246,27 +248,48 @@@ static int br_nf_pre_routing_finish_ipv
        skb_dst_set(skb, &rt->u.dst);
  
        skb->dev = nf_bridge->physindev;
 +      nf_bridge_update_protocol(skb);
        nf_bridge_push_encap_header(skb);
 -      NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
 +      NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
                       br_handle_frame_finish, 1);
  
        return 0;
  }
  
 -static void __br_dnat_complain(void)
 +/* Obtain the correct destination MAC address, while preserving the original
 + * source MAC address. If we already know this address, we just copy it. If we
 + * don't, we use the neighbour framework to find out. In both cases, we make
 + * sure that br_handle_frame_finish() is called afterwards.
 + */
 +static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
  {
 -      static unsigned long last_complaint;
 +      struct nf_bridge_info *nf_bridge = skb->nf_bridge;
 +      struct dst_entry *dst;
  
 -      if (jiffies - last_complaint >= 5 * HZ) {
 -              printk(KERN_WARNING "Performing cross-bridge DNAT requires IP "
 -                     "forwarding to be enabled\n");
 -              last_complaint = jiffies;
 +      skb->dev = bridge_parent(skb->dev);
 +      if (!skb->dev)
 +              goto free_skb;
 +      dst = skb_dst(skb);
 +      if (dst->hh) {
 +              neigh_hh_bridge(dst->hh, skb);
 +              skb->dev = nf_bridge->physindev;
 +              return br_handle_frame_finish(skb);
 +      } else if (dst->neighbour) {
 +              /* the neighbour function below overwrites the complete
 +               * MAC header, so we save the Ethernet source address and
 +               * protocol number. */
 +              skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
 +              /* tell br_dev_xmit to continue with forwarding */
 +              nf_bridge->mask |= BRNF_BRIDGED_DNAT;
 +              return dst->neighbour->output(skb);
        }
 +free_skb:
 +      kfree_skb(skb);
 +      return 0;
  }
  
  /* This requires some explaining. If DNAT has taken place,
 - * we will need to fix up the destination Ethernet address,
 - * and this is a tricky process.
 + * we will need to fix up the destination Ethernet address.
   *
   * There are two cases to consider:
   * 1. The packet was DNAT'ed to a device in the same bridge
   * call ip_route_input() and to look at skb->dst->dev, which is
   * changed to the destination device if ip_route_input() succeeds.
   *
 - * Let us first consider the case that ip_route_input() succeeds:
 - *
 - * If skb->dst->dev equals the logical bridge device the packet
 - * came in on, we can consider this bridging. The packet is passed
 - * through the neighbour output function to build a new destination
 - * MAC address, which will make the packet enter br_nf_local_out()
 - * not much later. In that function it is assured that the iptables
 - * FORWARD chain is traversed for the packet.
 + * Let's first consider the case that ip_route_input() succeeds:
   *
 + * If the output device equals the logical bridge device the packet
 + * came in on, we can consider this bridging. The corresponding MAC
 + * address will be obtained in br_nf_pre_routing_finish_bridge.
   * Otherwise, the packet is considered to be routed and we just
   * change the destination MAC address so that the packet will
   * later be passed up to the IP stack to be routed. For a redirected
   * packet, ip_route_input() will give back the localhost as output device,
   * which differs from the bridge device.
   *
 - * Let us now consider the case that ip_route_input() fails:
 + * Let's now consider the case that ip_route_input() fails:
   *
   * This can be because the destination address is martian, in which case
   * the packet will be dropped.
 - * After a "echo '0' > /proc/sys/net/ipv4/ip_forward" ip_route_input()
 - * will fail, while __ip_route_output_key() will return success. The source
 - * address for __ip_route_output_key() is set to zero, so __ip_route_output_key
 + * If IP forwarding is disabled, ip_route_input() will fail, while
 + * ip_route_output_key() can return success. The source
 + * address for ip_route_output_key() is set to zero, so ip_route_output_key()
   * thinks we're handling a locally generated packet and won't care
 - * if IP forwarding is allowed. We send a warning message to the users's
 - * log telling her to put IP forwarding on.
 - *
 - * ip_route_input() will also fail if there is no route available.
 - * In that case we just drop the packet.
 - *
 - * --Lennert, 20020411
 - * --Bart, 20020416 (updated)
 - * --Bart, 20021007 (updated)
 - * --Bart, 20062711 (updated) */
 -static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
 -{
 -      if (skb->pkt_type == PACKET_OTHERHOST) {
 -              skb->pkt_type = PACKET_HOST;
 -              skb->nf_bridge->mask |= BRNF_PKT_TYPE;
 -      }
 -      skb->nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
 -
 -      skb->dev = bridge_parent(skb->dev);
 -      if (skb->dev) {
 -              struct dst_entry *dst = skb_dst(skb);
 -
 -              nf_bridge_pull_encap_header(skb);
 -
 -              if (dst->hh)
 -                      return neigh_hh_output(dst->hh, skb);
 -              else if (dst->neighbour)
 -                      return dst->neighbour->output(skb);
 -      }
 -      kfree_skb(skb);
 -      return 0;
 -}
 -
 + * if IP forwarding is enabled. If the output device equals the logical bridge
 + * device, we proceed as if ip_route_input() succeeded. If it differs from the
 + * logical bridge port or if ip_route_output_key() fails we drop the packet.
 + */
  static int br_nf_pre_routing_finish(struct sk_buff *skb)
  {
        struct net_device *dev = skb->dev;
                                        skb_dst_set(skb, (struct dst_entry *)rt);
                                        goto bridged_dnat;
                                }
 -                              /* we are sure that forwarding is disabled, so printing
 -                               * this message is no problem. Note that the packet could
 -                               * still have a martian destination address, in which case
 -                               * the packet could be dropped even if forwarding were enabled */
 -                              __br_dnat_complain();
                                dst_release((struct dst_entry *)rt);
                        }
  free_skb:
                } else {
                        if (skb_dst(skb)->dev == dev) {
  bridged_dnat:
 -                              /* Tell br_nf_local_out this is a
 -                               * bridged frame */
 -                              nf_bridge->mask |= BRNF_BRIDGED_DNAT;
                                skb->dev = nf_bridge->physindev;
 +                              nf_bridge_update_protocol(skb);
                                nf_bridge_push_encap_header(skb);
 -                              NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING,
 +                              NF_HOOK_THRESH(NFPROTO_BRIDGE,
 +                                             NF_BR_PRE_ROUTING,
                                               skb, skb->dev, NULL,
                                               br_nf_pre_routing_finish_bridge,
                                               1);
        }
  
        skb->dev = nf_bridge->physindev;
 +      nf_bridge_update_protocol(skb);
        nf_bridge_push_encap_header(skb);
 -      NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
 +      NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
                       br_handle_frame_finish, 1);
  
        return 0;
@@@ -421,10 -437,6 +422,10 @@@ static struct net_device *setup_pre_rou
        nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
        nf_bridge->physindev = skb->dev;
        skb->dev = bridge_parent(skb->dev);
 +      if (skb->protocol == htons(ETH_P_8021Q))
 +              nf_bridge->mask |= BRNF_8021Q;
 +      else if (skb->protocol == htons(ETH_P_PPP_SES))
 +              nf_bridge->mask |= BRNF_PPPoE;
  
        return skb->dev;
  }
@@@ -523,8 -535,7 +524,8 @@@ static unsigned int br_nf_pre_routing_i
        if (!setup_pre_routing(skb))
                return NF_DROP;
  
 -      NF_HOOK(PF_INET6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
 +      skb->protocol = htons(ETH_P_IPV6);
 +      NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
                br_nf_pre_routing_finish_ipv6);
  
        return NF_STOLEN;
@@@ -596,9 -607,8 +597,9 @@@ static unsigned int br_nf_pre_routing(u
        if (!setup_pre_routing(skb))
                return NF_DROP;
        store_orig_dstaddr(skb);
 +      skb->protocol = htons(ETH_P_IP);
  
 -      NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
 +      NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
                br_nf_pre_routing_finish);
  
        return NF_STOLEN;
@@@ -645,10 -655,8 +646,10 @@@ static int br_nf_forward_finish(struct 
        } else {
                in = *((struct net_device **)(skb->cb));
        }
 +      nf_bridge_update_protocol(skb);
        nf_bridge_push_encap_header(skb);
 -      NF_HOOK_THRESH(PF_BRIDGE, NF_BR_FORWARD, skb, in,
 +
 +      NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in,
                       skb->dev, br_forward_finish, 1);
        return 0;
  }
@@@ -699,10 -707,6 +700,10 @@@ static unsigned int br_nf_forward_ip(un
        /* The physdev module checks on this */
        nf_bridge->mask |= BRNF_BRIDGED;
        nf_bridge->physoutdev = skb->dev;
 +      if (pf == PF_INET)
 +              skb->protocol = htons(ETH_P_IP);
 +      else
 +              skb->protocol = htons(ETH_P_IPV6);
  
        NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent,
                br_nf_forward_finish);
@@@ -740,10 -744,59 +741,10 @@@ static unsigned int br_nf_forward_arp(u
        return NF_STOLEN;
  }
  
 -/* PF_BRIDGE/LOCAL_OUT ***********************************************
 - *
 - * This function sees both locally originated IP packets and forwarded
 - * IP packets (in both cases the destination device is a bridge
 - * device). It also sees bridged-and-DNAT'ed packets.
 - *
 - * If (nf_bridge->mask & BRNF_BRIDGED_DNAT) then the packet is bridged
 - * and we fake the PF_BRIDGE/FORWARD hook. The function br_nf_forward()
 - * will then fake the PF_INET/FORWARD hook. br_nf_local_out() has priority
 - * NF_BR_PRI_FIRST, so no relevant PF_BRIDGE/INPUT functions have been nor
 - * will be executed.
 - */
 -static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff *skb,
 -                                  const struct net_device *in,
 -                                  const struct net_device *out,
 -                                  int (*okfn)(struct sk_buff *))
 -{
 -      struct net_device *realindev;
 -      struct nf_bridge_info *nf_bridge;
 -
 -      if (!skb->nf_bridge)
 -              return NF_ACCEPT;
 -
 -      /* Need exclusive nf_bridge_info since we might have multiple
 -       * different physoutdevs. */
 -      if (!nf_bridge_unshare(skb))
 -              return NF_DROP;
 -
 -      nf_bridge = skb->nf_bridge;
 -      if (!(nf_bridge->mask & BRNF_BRIDGED_DNAT))
 -              return NF_ACCEPT;
 -
 -      /* Bridged, take PF_BRIDGE/FORWARD.
 -       * (see big note in front of br_nf_pre_routing_finish) */
 -      nf_bridge->physoutdev = skb->dev;
 -      realindev = nf_bridge->physindev;
 -
 -      if (nf_bridge->mask & BRNF_PKT_TYPE) {
 -              skb->pkt_type = PACKET_OTHERHOST;
 -              nf_bridge->mask ^= BRNF_PKT_TYPE;
 -      }
 -      nf_bridge_push_encap_header(skb);
 -
 -      NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, realindev, skb->dev,
 -              br_forward_finish);
 -      return NF_STOLEN;
 -}
 -
  #if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE)
  static int br_nf_dev_queue_xmit(struct sk_buff *skb)
  {
 -      if (skb->nfct != NULL &&
 -          (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb)) &&
 +      if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
            skb->len > skb->dev->mtu &&
            !skb_is_gso(skb))
                return ip_fragment(skb, br_dev_queue_push_xmit);
@@@ -767,7 -820,21 +768,7 @@@ static unsigned int br_nf_post_routing(
        struct net_device *realoutdev = bridge_parent(skb->dev);
        u_int8_t pf;
  
 -#ifdef CONFIG_NETFILTER_DEBUG
 -      /* Be very paranoid. This probably won't happen anymore, but let's
 -       * keep the check just to be sure... */
 -      if (skb_mac_header(skb) < skb->head ||
 -          skb_mac_header(skb) + ETH_HLEN > skb->data) {
 -              printk(KERN_CRIT "br_netfilter: Argh!! br_nf_post_routing: "
 -                     "bad mac.raw pointer.\n");
 -              goto print_error;
 -      }
 -#endif
 -
 -      if (!nf_bridge)
 -              return NF_ACCEPT;
 -
 -      if (!(nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT)))
 +      if (!nf_bridge || !(nf_bridge->mask & BRNF_BRIDGED))
                return NF_ACCEPT;
  
        if (!realoutdev)
        else
                return NF_ACCEPT;
  
 -#ifdef CONFIG_NETFILTER_DEBUG
 -      if (skb_dst(skb) == NULL) {
 -              printk(KERN_INFO "br_netfilter post_routing: skb->dst == NULL\n");
 -              goto print_error;
 -      }
 -#endif
 -
        /* We assume any code from br_dev_queue_push_xmit onwards doesn't care
         * about the value of skb->pkt_type. */
        if (skb->pkt_type == PACKET_OTHERHOST) {
  
        nf_bridge_pull_encap_header(skb);
        nf_bridge_save_header(skb);
 +      if (pf == PF_INET)
 +              skb->protocol = htons(ETH_P_IP);
 +      else
 +              skb->protocol = htons(ETH_P_IPV6);
  
        NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev,
                br_nf_dev_queue_xmit);
  
        return NF_STOLEN;
 -
 -#ifdef CONFIG_NETFILTER_DEBUG
 -print_error:
 -      if (skb->dev != NULL) {
 -              printk("[%s]", skb->dev->name);
 -              if (realoutdev)
 -                      printk("[%s]", realoutdev->name);
 -      }
 -      printk(" head:%p, raw:%p, data:%p\n", skb->head, skb_mac_header(skb),
 -             skb->data);
 -      dump_stack();
 -      return NF_ACCEPT;
 -#endif
  }
  
  /* IP/SABOTAGE *****************************************************/
@@@ -818,8 -901,10 +819,8 @@@ static unsigned int ip_sabotage_in(unsi
        return NF_ACCEPT;
  }
  
 -/* For br_nf_local_out we need (prio = NF_BR_PRI_FIRST), to insure that innocent
 - * PF_BRIDGE/NF_BR_LOCAL_OUT functions don't get bridged traffic as input.
 - * For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
 - * ip_refrag() can return NF_STOLEN. */
 +/* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
 + * br_dev_queue_push_xmit is called afterwards */
  static struct nf_hook_ops br_nf_ops[] __read_mostly = {
        {
                .hook = br_nf_pre_routing,
                .hooknum = NF_BR_FORWARD,
                .priority = NF_BR_PRI_BRNF,
        },
 -      {
 -              .hook = br_nf_local_out,
 -              .owner = THIS_MODULE,
 -              .pf = PF_BRIDGE,
 -              .hooknum = NF_BR_LOCAL_OUT,
 -              .priority = NF_BR_PRI_FIRST,
 -      },
        {
                .hook = br_nf_post_routing,
                .owner = THIS_MODULE,
diff --combined net/bridge/br_stp_bpdu.c
@@@ -15,6 -15,7 +15,7 @@@
  #include <linux/netfilter_bridge.h>
  #include <linux/etherdevice.h>
  #include <linux/llc.h>
+ #include <linux/slab.h>
  #include <net/net_namespace.h>
  #include <net/llc.h>
  #include <net/llc_pdu.h>
@@@ -49,7 -50,7 +50,7 @@@ static void br_send_bpdu(struct net_bri
  
        llc_mac_hdr_init(skb, p->dev->dev_addr, p->br->group_addr);
  
 -      NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
 +      NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
                dev_queue_xmit);
  }
  
@@@ -27,8 -27,9 +27,9 @@@
   *   flushed even if it is not full yet.
   *
   */
 -
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  #include <linux/module.h>
+ #include <linux/slab.h>
  #include <linux/spinlock.h>
  #include <linux/socket.h>
  #include <linux/skbuff.h>
@@@ -43,6 -44,9 +44,6 @@@
  #include <net/sock.h>
  #include "../br_private.h"
  
 -#define PRINTR(format, args...) do { if (net_ratelimit()) \
 -                              printk(format , ## args); } while (0)
 -
  static unsigned int nlbufsiz = NLMSG_GOODSIZE;
  module_param(nlbufsiz, uint, 0600);
  MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) "
@@@ -103,14 -107,15 +104,14 @@@ static struct sk_buff *ulog_alloc_skb(u
        n = max(size, nlbufsiz);
        skb = alloc_skb(n, GFP_ATOMIC);
        if (!skb) {
 -              PRINTR(KERN_ERR "ebt_ulog: can't alloc whole buffer "
 -                     "of size %ub!\n", n);
 +              pr_debug("cannot alloc whole buffer of size %ub!\n", n);
                if (n > size) {
                        /* try to allocate only as much as we need for
                         * current packet */
                        skb = alloc_skb(size, GFP_ATOMIC);
                        if (!skb)
 -                              PRINTR(KERN_ERR "ebt_ulog: can't even allocate "
 -                                     "buffer of size %ub\n", size);
 +                              pr_debug("cannot even allocate "
 +                                       "buffer of size %ub\n", size);
                }
        }
  
@@@ -137,7 -142,8 +138,7 @@@ static void ebt_ulog_packet(unsigned in
  
        size = NLMSG_SPACE(sizeof(*pm) + copy_len);
        if (size > nlbufsiz) {
 -              PRINTR("ebt_ulog: Size %Zd needed, but nlbufsiz=%d\n",
 -                     size, nlbufsiz);
 +              pr_debug("Size %Zd needed, but nlbufsiz=%d\n", size, nlbufsiz);
                return;
        }
  
@@@ -211,8 -217,8 +212,8 @@@ unlock
        return;
  
  nlmsg_failure:
 -      printk(KERN_CRIT "ebt_ulog: error during NLMSG_PUT. This should "
 -             "not happen, please report to author.\n");
 +      pr_debug("error during NLMSG_PUT. This should "
 +               "not happen, please report to author.\n");
        goto unlock;
  alloc_failure:
        goto unlock;
@@@ -249,19 -255,19 +250,19 @@@ ebt_ulog_tg(struct sk_buff *skb, const 
        return EBT_CONTINUE;
  }
  
 -static bool ebt_ulog_tg_check(const struct xt_tgchk_param *par)
 +static int ebt_ulog_tg_check(const struct xt_tgchk_param *par)
  {
        struct ebt_ulog_info *uloginfo = par->targinfo;
  
        if (uloginfo->nlgroup > 31)
 -              return false;
 +              return -EINVAL;
  
        uloginfo->prefix[EBT_ULOG_PREFIX_LEN - 1] = '\0';
  
        if (uloginfo->qthreshold > EBT_ULOG_MAX_QLEN)
                uloginfo->qthreshold = EBT_ULOG_MAX_QLEN;
  
 -      return true;
 +      return 0;
  }
  
  static struct xt_target ebt_ulog_tg_reg __read_mostly = {
@@@ -286,8 -292,8 +287,8 @@@ static int __init ebt_ulog_init(void
        int i;
  
        if (nlbufsiz >= 128*1024) {
 -              printk(KERN_NOTICE "ebt_ulog: Netlink buffer has to be <= 128kB,"
 -                     " please try a smaller nlbufsiz parameter.\n");
 +              pr_warning("Netlink buffer has to be <= 128kB,"
 +                         " please try a smaller nlbufsiz parameter.\n");
                return -EINVAL;
        }
  
        ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG,
                                          EBT_ULOG_MAXNLGROUPS, NULL, NULL,
                                          THIS_MODULE);
 -      if (!ebtulognl) {
 -              printk(KERN_WARNING KBUILD_MODNAME ": out of memory trying to "
 -                     "call netlink_kernel_create\n");
 +      if (!ebtulognl)
                ret = -ENOMEM;
 -      } else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0) {
 +      else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0)
                netlink_kernel_release(ebtulognl);
 -      }
  
        if (ret == 0)
                nf_log_register(NFPROTO_BRIDGE, &ebt_ulog_logger);
@@@ -14,7 -14,8 +14,7 @@@
   *  as published by the Free Software Foundation; either version
   *  2 of the License, or (at your option) any later version.
   */
 -
 -
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  #include <linux/kmod.h>
  #include <linux/module.h>
  #include <linux/vmalloc.h>
@@@ -22,6 -23,7 +22,7 @@@
  #include <linux/netfilter_bridge/ebtables.h>
  #include <linux/spinlock.h>
  #include <linux/mutex.h>
+ #include <linux/slab.h>
  #include <asm/uaccess.h>
  #include <linux/smp.h>
  #include <linux/cpumask.h>
@@@ -361,9 -363,12 +362,9 @@@ ebt_check_match(struct ebt_entry_match 
            left - sizeof(struct ebt_entry_match) < m->match_size)
                return -EINVAL;
  
 -      match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
 -              m->u.name, 0), "ebt_%s", m->u.name);
 +      match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
        if (IS_ERR(match))
                return PTR_ERR(match);
 -      if (match == NULL)
 -              return -ENOENT;
        m->u.match = match;
  
        par->match     = match;
@@@ -392,9 -397,13 +393,9 @@@ ebt_check_watcher(struct ebt_entry_watc
           left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
                return -EINVAL;
  
 -      watcher = try_then_request_module(
 -                xt_find_target(NFPROTO_BRIDGE, w->u.name, 0),
 -                "ebt_%s", w->u.name);
 +      watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
        if (IS_ERR(watcher))
                return PTR_ERR(watcher);
 -      if (watcher == NULL)
 -              return -ENOENT;
        w->u.watcher = watcher;
  
        par->target   = watcher;
@@@ -707,10 -716,15 +708,10 @@@ ebt_check_entry(struct ebt_entry *e, st
        t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
        gap = e->next_offset - e->target_offset;
  
 -      target = try_then_request_module(
 -               xt_find_target(NFPROTO_BRIDGE, t->u.name, 0),
 -               "ebt_%s", t->u.name);
 +      target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
        if (IS_ERR(target)) {
                ret = PTR_ERR(target);
                goto cleanup_watchers;
 -      } else if (target == NULL) {
 -              ret = -ENOENT;
 -              goto cleanup_watchers;
        }
  
        t->u.target = target;
@@@ -2114,7 -2128,7 +2115,7 @@@ static int size_entry_mwt(struct ebt_en
                        return ret;
                new_offset += ret;
                if (offsets_update && new_offset) {
 -                      pr_debug("ebtables: change offset %d to %d\n",
 +                      pr_debug("change offset %d to %d\n",
                                offsets_update[i], offsets[j] + new_offset);
                        offsets_update[i] = offsets[j] + new_offset;
                }
diff --combined net/decnet/dn_neigh.c
@@@ -28,6 -28,7 +28,7 @@@
  #include <linux/module.h>
  #include <linux/socket.h>
  #include <linux/if_arp.h>
+ #include <linux/slab.h>
  #include <linux/if_ether.h>
  #include <linux/init.h>
  #include <linux/proc_fs.h>
@@@ -265,8 -266,7 +266,8 @@@ static int dn_long_output(struct sk_buf
  
        skb_reset_network_header(skb);
  
 -      return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet);
 +      return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL,
 +                     neigh->dev, dn_neigh_output_packet);
  }
  
  static int dn_short_output(struct sk_buff *skb)
  
        skb_reset_network_header(skb);
  
 -      return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet);
 +      return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL,
 +                     neigh->dev, dn_neigh_output_packet);
  }
  
  /*
@@@ -348,8 -347,7 +349,8 @@@ static int dn_phase3_output(struct sk_b
  
        skb_reset_network_header(skb);
  
 -      return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet);
 +      return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL,
 +                     neigh->dev, dn_neigh_output_packet);
  }
  
  /*
diff --combined net/decnet/dn_nsp_in.c
@@@ -57,6 -57,7 +57,7 @@@
  #include <linux/netdevice.h>
  #include <linux/inet.h>
  #include <linux/route.h>
+ #include <linux/slab.h>
  #include <net/sock.h>
  #include <net/tcp_states.h>
  #include <asm/system.h>
@@@ -809,8 -810,7 +810,8 @@@ free_out
  
  int dn_nsp_rx(struct sk_buff *skb)
  {
 -      return NF_HOOK(PF_DECnet, NF_DN_LOCAL_IN, skb, skb->dev, NULL, dn_nsp_rx_packet);
 +      return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_IN, skb, skb->dev, NULL,
 +                     dn_nsp_rx_packet);
  }
  
  /*
diff --combined net/decnet/dn_route.c
@@@ -66,6 -66,7 +66,7 @@@
  #include <linux/inet.h>
  #include <linux/route.h>
  #include <linux/in_route.h>
+ #include <linux/slab.h>
  #include <net/sock.h>
  #include <linux/mm.h>
  #include <linux/proc_fs.h>
@@@ -517,8 -518,7 +518,8 @@@ static int dn_route_rx_long(struct sk_b
        ptr++;
        cb->hops = *ptr++; /* Visit Count */
  
 -      return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet);
 +      return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL,
 +                     dn_route_rx_packet);
  
  drop_it:
        kfree_skb(skb);
@@@ -544,8 -544,7 +545,8 @@@ static int dn_route_rx_short(struct sk_
        ptr += 2;
        cb->hops = *ptr & 0x3f;
  
 -      return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet);
 +      return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL,
 +                     dn_route_rx_packet);
  
  drop_it:
        kfree_skb(skb);
@@@ -647,24 -646,16 +648,24 @@@ int dn_route_rcv(struct sk_buff *skb, s
  
                switch(flags & DN_RT_CNTL_MSK) {
                        case DN_RT_PKT_HELO:
 -                              return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_route_ptp_hello);
 +                              return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
 +                                             skb, skb->dev, NULL,
 +                                             dn_route_ptp_hello);
  
                        case DN_RT_PKT_L1RT:
                        case DN_RT_PKT_L2RT:
 -                              return NF_HOOK(PF_DECnet, NF_DN_ROUTE, skb, skb->dev, NULL, dn_route_discard);
 +                              return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE,
 +                                             skb, skb->dev, NULL,
 +                                             dn_route_discard);
                        case DN_RT_PKT_ERTH:
 -                              return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_router_hello);
 +                              return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
 +                                             skb, skb->dev, NULL,
 +                                             dn_neigh_router_hello);
  
                        case DN_RT_PKT_EEDH:
 -                              return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_endnode_hello);
 +                              return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
 +                                             skb, skb->dev, NULL,
 +                                             dn_neigh_endnode_hello);
                }
        } else {
                if (dn->parms.state != DN_DEV_S_RU)
@@@ -713,8 -704,7 +714,8 @@@ static int dn_output(struct sk_buff *sk
        cb->rt_flags |= DN_RT_F_IE;
        cb->hops = 0;
  
 -      return NF_HOOK(PF_DECnet, NF_DN_LOCAL_OUT, skb, NULL, dev, neigh->output);
 +      return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, skb, NULL, dev,
 +                     neigh->output);
  
  error:
        if (net_ratelimit())
@@@ -763,8 -753,7 +764,8 @@@ static int dn_forward(struct sk_buff *s
        if (rt->rt_flags & RTCF_DOREDIRECT)
                cb->rt_flags |= DN_RT_F_IE;
  
 -      return NF_HOOK(PF_DECnet, NF_DN_FORWARD, skb, dev, skb->dev, neigh->output);
 +      return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, skb, dev, skb->dev,
 +                     neigh->output);
  
  drop:
        kfree_skb(skb);
diff --combined net/ipv4/ip_forward.c
@@@ -25,6 -25,7 +25,7 @@@
  #include <linux/ip.h>
  #include <linux/icmp.h>
  #include <linux/netdevice.h>
+ #include <linux/slab.h>
  #include <net/sock.h>
  #include <net/ip.h>
  #include <net/tcp.h>
@@@ -111,8 -112,8 +112,8 @@@ int ip_forward(struct sk_buff *skb
  
        skb->priority = rt_tos2priority(iph->tos);
  
 -      return NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, rt->u.dst.dev,
 -                     ip_forward_finish);
 +      return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev,
 +                     rt->u.dst.dev, ip_forward_finish);
  
  sr_failed:
        /*
diff --combined net/ipv4/ip_input.c
  #include <linux/kernel.h>
  #include <linux/string.h>
  #include <linux/errno.h>
+ #include <linux/slab.h>
  
  #include <linux/net.h>
  #include <linux/socket.h>
@@@ -265,7 -266,7 +266,7 @@@ int ip_local_deliver(struct sk_buff *sk
                        return 0;
        }
  
 -      return NF_HOOK(PF_INET, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
 +      return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
                       ip_local_deliver_finish);
  }
  
@@@ -443,7 -444,7 +444,7 @@@ int ip_rcv(struct sk_buff *skb, struct 
        /* Must drop socket now because of tproxy. */
        skb_orphan(skb);
  
 -      return NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, dev, NULL,
 +      return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, dev, NULL,
                       ip_rcv_finish);
  
  inhdr_error:
diff --combined net/ipv4/ip_output.c
@@@ -51,6 -51,7 +51,7 @@@
  #include <linux/string.h>
  #include <linux/errno.h>
  #include <linux/highmem.h>
+ #include <linux/slab.h>
  
  #include <linux/socket.h>
  #include <linux/sockios.h>
@@@ -95,8 -96,8 +96,8 @@@ int __ip_local_out(struct sk_buff *skb
  
        iph->tot_len = htons(skb->len);
        ip_send_check(iph);
 -      return nf_hook(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
 -                     dst_output);
 +      return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
 +                     skb_dst(skb)->dev, dst_output);
  }
  
  int ip_local_out(struct sk_buff *skb)
@@@ -271,8 -272,8 +272,8 @@@ int ip_mc_output(struct sk_buff *skb
                   ) {
                        struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
                        if (newskb)
 -                              NF_HOOK(PF_INET, NF_INET_POST_ROUTING, newskb,
 -                                      NULL, newskb->dev,
 +                              NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 +                                      newskb, NULL, newskb->dev,
                                        ip_dev_loopback_xmit);
                }
  
        if (rt->rt_flags&RTCF_BROADCAST) {
                struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
                if (newskb)
 -                      NF_HOOK(PF_INET, NF_INET_POST_ROUTING, newskb, NULL,
 -                              newskb->dev, ip_dev_loopback_xmit);
 +                      NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
 +                              NULL, newskb->dev, ip_dev_loopback_xmit);
        }
  
 -      return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb, NULL, skb->dev,
 -                          ip_finish_output,
 +      return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
 +                          skb->dev, ip_finish_output,
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
  }
  
@@@ -305,12 -306,12 +306,12 @@@ int ip_output(struct sk_buff *skb
        skb->dev = dev;
        skb->protocol = htons(ETH_P_IP);
  
 -      return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb, NULL, dev,
 +      return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
                            ip_finish_output,
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
  }
  
- int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
+ int ip_queue_xmit(struct sk_buff *skb)
  {
        struct sock *sk = skb->sk;
        struct inet_sock *inet = inet_sk(sk);
@@@ -369,7 -370,7 +370,7 @@@ packet_routed
        skb_reset_network_header(skb);
        iph = ip_hdr(skb);
        *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
-       if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
+       if (ip_dont_fragment(sk, &rt->u.dst) && !skb->local_df)
                iph->frag_off = htons(IP_DF);
        else
                iph->frag_off = 0;
diff --combined net/ipv4/ipmr.c
@@@ -47,6 -47,7 +47,7 @@@
  #include <linux/mroute.h>
  #include <linux/init.h>
  #include <linux/if_ether.h>
+ #include <linux/slab.h>
  #include <net/net_namespace.h>
  #include <net/ip.h>
  #include <net/protocol.h>
  #include <net/ipip.h>
  #include <net/checksum.h>
  #include <net/netlink.h>
+ #include <net/fib_rules.h>
  
  #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
  #define CONFIG_IP_PIMSM       1
  #endif
  
+ struct mr_table {
+       struct list_head        list;
+ #ifdef CONFIG_NET_NS
+       struct net              *net;
+ #endif
+       u32                     id;
+       struct sock             *mroute_sk;
+       struct timer_list       ipmr_expire_timer;
+       struct list_head        mfc_unres_queue;
+       struct list_head        mfc_cache_array[MFC_LINES];
+       struct vif_device       vif_table[MAXVIFS];
+       int                     maxvif;
+       atomic_t                cache_resolve_queue_len;
+       int                     mroute_do_assert;
+       int                     mroute_do_pim;
+ #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
+       int                     mroute_reg_vif_num;
+ #endif
+ };
+ struct ipmr_rule {
+       struct fib_rule         common;
+ };
+ struct ipmr_result {
+       struct mr_table         *mrt;
+ };
  /* Big lock, protecting vif table, mrt cache and mroute socket state.
     Note that the changes are semaphored via rtnl_lock.
   */
@@@ -77,9 -107,7 +107,7 @@@ static DEFINE_RWLOCK(mrt_lock)
   *    Multicast router control variables
   */
  
- #define VIF_EXISTS(_net, _idx) ((_net)->ipv4.vif_table[_idx].dev != NULL)
- static struct mfc_cache *mfc_unres_queue;             /* Queue of unresolved entries */
+ #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
  
  /* Special spinlock for queue of unresolved entries */
  static DEFINE_SPINLOCK(mfc_unres_lock);
  
  static struct kmem_cache *mrt_cachep __read_mostly;
  
- static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
- static int ipmr_cache_report(struct net *net,
+ static struct mr_table *ipmr_new_table(struct net *net, u32 id);
+ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
+                        struct sk_buff *skb, struct mfc_cache *cache,
+                        int local);
+ static int ipmr_cache_report(struct mr_table *mrt,
                             struct sk_buff *pkt, vifi_t vifi, int assert);
- static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
+ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+                           struct mfc_cache *c, struct rtmsg *rtm);
+ static void ipmr_expire_process(unsigned long arg);
+ #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+ #define ipmr_for_each_table(mrt, net) \
+       list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
+ static struct mr_table *ipmr_get_table(struct net *net, u32 id)
+ {
+       struct mr_table *mrt;
+       ipmr_for_each_table(mrt, net) {
+               if (mrt->id == id)
+                       return mrt;
+       }
+       return NULL;
+ }
+ static int ipmr_fib_lookup(struct net *net, struct flowi *flp,
+                          struct mr_table **mrt)
+ {
+       struct ipmr_result res;
+       struct fib_lookup_arg arg = { .result = &res, };
+       int err;
+       err = fib_rules_lookup(net->ipv4.mr_rules_ops, flp, 0, &arg);
+       if (err < 0)
+               return err;
+       *mrt = res.mrt;
+       return 0;
+ }
+ static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
+                           int flags, struct fib_lookup_arg *arg)
+ {
+       struct ipmr_result *res = arg->result;
+       struct mr_table *mrt;
+       switch (rule->action) {
+       case FR_ACT_TO_TBL:
+               break;
+       case FR_ACT_UNREACHABLE:
+               return -ENETUNREACH;
+       case FR_ACT_PROHIBIT:
+               return -EACCES;
+       case FR_ACT_BLACKHOLE:
+       default:
+               return -EINVAL;
+       }
+       mrt = ipmr_get_table(rule->fr_net, rule->table);
+       if (mrt == NULL)
+               return -EAGAIN;
+       res->mrt = mrt;
+       return 0;
+ }
+ static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
+ {
+       return 1;
+ }
+ static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
+       FRA_GENERIC_POLICY,
+ };
+ static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
+                              struct fib_rule_hdr *frh, struct nlattr **tb)
+ {
+       return 0;
+ }
+ static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
+                            struct nlattr **tb)
+ {
+       return 1;
+ }
+ static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
+                         struct fib_rule_hdr *frh)
+ {
+       frh->dst_len = 0;
+       frh->src_len = 0;
+       frh->tos     = 0;
+       return 0;
+ }
+ static struct fib_rules_ops ipmr_rules_ops_template = {
+       .family         = FIB_RULES_IPMR,
+       .rule_size      = sizeof(struct ipmr_rule),
+       .addr_size      = sizeof(u32),
+       .action         = ipmr_rule_action,
+       .match          = ipmr_rule_match,
+       .configure      = ipmr_rule_configure,
+       .compare        = ipmr_rule_compare,
+       .default_pref   = fib_default_rule_pref,
+       .fill           = ipmr_rule_fill,
+       .nlgroup        = RTNLGRP_IPV4_RULE,
+       .policy         = ipmr_rule_policy,
+       .owner          = THIS_MODULE,
+ };
+ static int __net_init ipmr_rules_init(struct net *net)
+ {
+       struct fib_rules_ops *ops;
+       struct mr_table *mrt;
+       int err;
+       ops = fib_rules_register(&ipmr_rules_ops_template, net);
+       if (IS_ERR(ops))
+               return PTR_ERR(ops);
+       INIT_LIST_HEAD(&net->ipv4.mr_tables);
+       mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
+       if (mrt == NULL) {
+               err = -ENOMEM;
+               goto err1;
+       }
+       err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
+       if (err < 0)
+               goto err2;
+       net->ipv4.mr_rules_ops = ops;
+       return 0;
+ err2:
+       kfree(mrt);
+ err1:
+       fib_rules_unregister(ops);
+       return err;
+ }
+ static void __net_exit ipmr_rules_exit(struct net *net)
+ {
+       struct mr_table *mrt, *next;
+       list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list)
+               kfree(mrt);
+       fib_rules_unregister(net->ipv4.mr_rules_ops);
+ }
+ #else
+ #define ipmr_for_each_table(mrt, net) \
+       for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
+ static struct mr_table *ipmr_get_table(struct net *net, u32 id)
+ {
+       return net->ipv4.mrt;
+ }
+ static int ipmr_fib_lookup(struct net *net, struct flowi *flp,
+                          struct mr_table **mrt)
+ {
+       *mrt = net->ipv4.mrt;
+       return 0;
+ }
+ static int __net_init ipmr_rules_init(struct net *net)
+ {
+       net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
+       return net->ipv4.mrt ? 0 : -ENOMEM;
+ }
+ static void __net_exit ipmr_rules_exit(struct net *net)
+ {
+       kfree(net->ipv4.mrt);
+ }
+ #endif
+ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
+ {
+       struct mr_table *mrt;
+       unsigned int i;
  
- static struct timer_list ipmr_expire_timer;
+       mrt = ipmr_get_table(net, id);
+       if (mrt != NULL)
+               return mrt;
+       mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
+       if (mrt == NULL)
+               return NULL;
+       write_pnet(&mrt->net, net);
+       mrt->id = id;
+       /* Forwarding cache */
+       for (i = 0; i < MFC_LINES; i++)
+               INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
+       INIT_LIST_HEAD(&mrt->mfc_unres_queue);
+       setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
+                   (unsigned long)mrt);
+ #ifdef CONFIG_IP_PIMSM
+       mrt->mroute_reg_vif_num = -1;
+ #endif
+ #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+       list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
+ #endif
+       return mrt;
+ }
  
  /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
  
@@@ -200,12 -431,22 +431,22 @@@ failure
  static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
  {
        struct net *net = dev_net(dev);
+       struct mr_table *mrt;
+       struct flowi fl = {
+               .oif            = dev->ifindex,
+               .iif            = skb->skb_iif,
+               .mark           = skb->mark,
+       };
+       int err;
+       err = ipmr_fib_lookup(net, &fl, &mrt);
+       if (err < 0)
+               return err;
  
        read_lock(&mrt_lock);
        dev->stats.tx_bytes += skb->len;
        dev->stats.tx_packets++;
-       ipmr_cache_report(net, skb, net->ipv4.mroute_reg_vif_num,
-                         IGMPMSG_WHOLEPKT);
+       ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
        read_unlock(&mrt_lock);
        kfree_skb(skb);
        return NETDEV_TX_OK;
@@@ -225,12 -466,18 +466,18 @@@ static void reg_vif_setup(struct net_de
        dev->features           |= NETIF_F_NETNS_LOCAL;
  }
  
- static struct net_device *ipmr_reg_vif(struct net *net)
+ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
  {
        struct net_device *dev;
        struct in_device *in_dev;
+       char name[IFNAMSIZ];
  
-       dev = alloc_netdev(0, "pimreg", reg_vif_setup);
+       if (mrt->id == RT_TABLE_DEFAULT)
+               sprintf(name, "pimreg");
+       else
+               sprintf(name, "pimreg%u", mrt->id);
+       dev = alloc_netdev(0, name, reg_vif_setup);
  
        if (dev == NULL)
                return NULL;
@@@ -275,17 -522,17 +522,17 @@@ failure
   *    @notify: Set to 1, if the caller is a notifier_call
   */
  
- static int vif_delete(struct net *net, int vifi, int notify,
+ static int vif_delete(struct mr_table *mrt, int vifi, int notify,
                      struct list_head *head)
  {
        struct vif_device *v;
        struct net_device *dev;
        struct in_device *in_dev;
  
-       if (vifi < 0 || vifi >= net->ipv4.maxvif)
+       if (vifi < 0 || vifi >= mrt->maxvif)
                return -EADDRNOTAVAIL;
  
-       v = &net->ipv4.vif_table[vifi];
+       v = &mrt->vif_table[vifi];
  
        write_lock_bh(&mrt_lock);
        dev = v->dev;
        }
  
  #ifdef CONFIG_IP_PIMSM
-       if (vifi == net->ipv4.mroute_reg_vif_num)
-               net->ipv4.mroute_reg_vif_num = -1;
+       if (vifi == mrt->mroute_reg_vif_num)
+               mrt->mroute_reg_vif_num = -1;
  #endif
  
-       if (vifi+1 == net->ipv4.maxvif) {
+       if (vifi+1 == mrt->maxvif) {
                int tmp;
                for (tmp=vifi-1; tmp>=0; tmp--) {
-                       if (VIF_EXISTS(net, tmp))
+                       if (VIF_EXISTS(mrt, tmp))
                                break;
                }
-               net->ipv4.maxvif = tmp+1;
+               mrt->maxvif = tmp+1;
        }
  
        write_unlock_bh(&mrt_lock);
  
  static inline void ipmr_cache_free(struct mfc_cache *c)
  {
-       release_net(mfc_net(c));
        kmem_cache_free(mrt_cachep, c);
  }
  
     and reporting error to netlink readers.
   */
  
- static void ipmr_destroy_unres(struct mfc_cache *c)
+ static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
  {
+       struct net *net = read_pnet(&mrt->net);
        struct sk_buff *skb;
        struct nlmsgerr *e;
-       struct net *net = mfc_net(c);
  
-       atomic_dec(&net->ipv4.cache_resolve_queue_len);
+       atomic_dec(&mrt->cache_resolve_queue_len);
  
        while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
                if (ip_hdr(skb)->version == 0) {
  }
  
  
- /* Single timer process for all the unresolved queue. */
+ /* Timer process for the unresolved queue. */
  
- static void ipmr_expire_process(unsigned long dummy)
+ static void ipmr_expire_process(unsigned long arg)
  {
+       struct mr_table *mrt = (struct mr_table *)arg;
        unsigned long now;
        unsigned long expires;
-       struct mfc_cache *c, **cp;
+       struct mfc_cache *c, *next;
  
        if (!spin_trylock(&mfc_unres_lock)) {
-               mod_timer(&ipmr_expire_timer, jiffies+HZ/10);
+               mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
                return;
        }
  
-       if (mfc_unres_queue == NULL)
+       if (list_empty(&mrt->mfc_unres_queue))
                goto out;
  
        now = jiffies;
        expires = 10*HZ;
-       cp = &mfc_unres_queue;
  
-       while ((c=*cp) != NULL) {
+       list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
                if (time_after(c->mfc_un.unres.expires, now)) {
                        unsigned long interval = c->mfc_un.unres.expires - now;
                        if (interval < expires)
                                expires = interval;
-                       cp = &c->next;
                        continue;
                }
  
-               *cp = c->next;
-               ipmr_destroy_unres(c);
+               list_del(&c->list);
+               ipmr_destroy_unres(mrt, c);
        }
  
-       if (mfc_unres_queue != NULL)
-               mod_timer(&ipmr_expire_timer, jiffies + expires);
+       if (!list_empty(&mrt->mfc_unres_queue))
+               mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
  
  out:
        spin_unlock(&mfc_unres_lock);
  
  /* Fill oifs list. It is called under write locked mrt_lock. */
  
- static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
+ static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
+                                  unsigned char *ttls)
  {
        int vifi;
-       struct net *net = mfc_net(cache);
  
        cache->mfc_un.res.minvif = MAXVIFS;
        cache->mfc_un.res.maxvif = 0;
        memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
  
-       for (vifi = 0; vifi < net->ipv4.maxvif; vifi++) {
-               if (VIF_EXISTS(net, vifi) &&
+       for (vifi = 0; vifi < mrt->maxvif; vifi++) {
+               if (VIF_EXISTS(mrt, vifi) &&
                    ttls[vifi] && ttls[vifi] < 255) {
                        cache->mfc_un.res.ttls[vifi] = ttls[vifi];
                        if (cache->mfc_un.res.minvif > vifi)
        }
  }
  
- static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
+ static int vif_add(struct net *net, struct mr_table *mrt,
+                  struct vifctl *vifc, int mrtsock)
  {
        int vifi = vifc->vifc_vifi;
-       struct vif_device *v = &net->ipv4.vif_table[vifi];
+       struct vif_device *v = &mrt->vif_table[vifi];
        struct net_device *dev;
        struct in_device *in_dev;
        int err;
  
        /* Is vif busy ? */
-       if (VIF_EXISTS(net, vifi))
+       if (VIF_EXISTS(mrt, vifi))
                return -EADDRINUSE;
  
        switch (vifc->vifc_flags) {
                 * Special Purpose VIF in PIM
                 * All the packets will be sent to the daemon
                 */
-               if (net->ipv4.mroute_reg_vif_num >= 0)
+               if (mrt->mroute_reg_vif_num >= 0)
                        return -EADDRINUSE;
-               dev = ipmr_reg_vif(net);
+               dev = ipmr_reg_vif(net, mrt);
                if (!dev)
                        return -ENOBUFS;
                err = dev_set_allmulti(dev, 1);
        v->dev = dev;
  #ifdef CONFIG_IP_PIMSM
        if (v->flags&VIFF_REGISTER)
-               net->ipv4.mroute_reg_vif_num = vifi;
+               mrt->mroute_reg_vif_num = vifi;
  #endif
-       if (vifi+1 > net->ipv4.maxvif)
-               net->ipv4.maxvif = vifi+1;
+       if (vifi+1 > mrt->maxvif)
+               mrt->maxvif = vifi+1;
        write_unlock_bh(&mrt_lock);
        return 0;
  }
  
- static struct mfc_cache *ipmr_cache_find(struct net *net,
+ static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
                                         __be32 origin,
                                         __be32 mcastgrp)
  {
        int line = MFC_HASH(mcastgrp, origin);
        struct mfc_cache *c;
  
-       for (c = net->ipv4.mfc_cache_array[line]; c; c = c->next) {
-               if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp)
-                       break;
+       list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
+               if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
+                       return c;
        }
-       return c;
+       return NULL;
  }
  
  /*
   *    Allocate a multicast cache entry
   */
- static struct mfc_cache *ipmr_cache_alloc(struct net *net)
+ static struct mfc_cache *ipmr_cache_alloc(void)
  {
        struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
        if (c == NULL)
                return NULL;
        c->mfc_un.res.minvif = MAXVIFS;
-       mfc_net_set(c, net);
        return c;
  }
  
- static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net)
+ static struct mfc_cache *ipmr_cache_alloc_unres(void)
  {
        struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
        if (c == NULL)
                return NULL;
        skb_queue_head_init(&c->mfc_un.unres.unresolved);
        c->mfc_un.unres.expires = jiffies + 10*HZ;
-       mfc_net_set(c, net);
        return c;
  }
  
   *    A cache entry has gone into a resolved state from queued
   */
  
- static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
+ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
+                              struct mfc_cache *uc, struct mfc_cache *c)
  {
        struct sk_buff *skb;
        struct nlmsgerr *e;
                if (ip_hdr(skb)->version == 0) {
                        struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
  
-                       if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
+                       if (ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
                                nlh->nlmsg_len = (skb_tail_pointer(skb) -
                                                  (u8 *)nlh);
                        } else {
                                memset(&e->msg, 0, sizeof(e->msg));
                        }
  
-                       rtnl_unicast(skb, mfc_net(c), NETLINK_CB(skb).pid);
+                       rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
                } else
-                       ip_mr_forward(skb, c, 0);
+                       ip_mr_forward(net, mrt, skb, c, 0);
        }
  }
  
   *    Called under mrt_lock.
   */
  
- static int ipmr_cache_report(struct net *net,
+ static int ipmr_cache_report(struct mr_table *mrt,
                             struct sk_buff *pkt, vifi_t vifi, int assert)
  {
        struct sk_buff *skb;
                memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
                msg->im_msgtype = IGMPMSG_WHOLEPKT;
                msg->im_mbz = 0;
-               msg->im_vif = net->ipv4.mroute_reg_vif_num;
+               msg->im_vif = mrt->mroute_reg_vif_num;
                ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
                ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
                                             sizeof(struct iphdr));
        skb->transport_header = skb->network_header;
        }
  
-       if (net->ipv4.mroute_sk == NULL) {
+       if (mrt->mroute_sk == NULL) {
                kfree_skb(skb);
                return -EINVAL;
        }
        /*
         *      Deliver to mrouted
         */
-       ret = sock_queue_rcv_skb(net->ipv4.mroute_sk, skb);
+       ret = sock_queue_rcv_skb(mrt->mroute_sk, skb);
        if (ret < 0) {
                if (net_ratelimit())
                        printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
   */
  
  static int
- ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
+ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
  {
+       bool found = false;
        int err;
        struct mfc_cache *c;
        const struct iphdr *iph = ip_hdr(skb);
  
        spin_lock_bh(&mfc_unres_lock);
-       for (c=mfc_unres_queue; c; c=c->next) {
-               if (net_eq(mfc_net(c), net) &&
-                   c->mfc_mcastgrp == iph->daddr &&
-                   c->mfc_origin == iph->saddr)
+       list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
+               if (c->mfc_mcastgrp == iph->daddr &&
+                   c->mfc_origin == iph->saddr) {
+                       found = true;
                        break;
+               }
        }
  
-       if (c == NULL) {
+       if (!found) {
                /*
                 *      Create a new entry if allowable
                 */
  
-               if (atomic_read(&net->ipv4.cache_resolve_queue_len) >= 10 ||
-                   (c = ipmr_cache_alloc_unres(net)) == NULL) {
+               if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
+                   (c = ipmr_cache_alloc_unres()) == NULL) {
                        spin_unlock_bh(&mfc_unres_lock);
  
                        kfree_skb(skb);
                /*
                 *      Reflect first query at mrouted.
                 */
-               err = ipmr_cache_report(net, skb, vifi, IGMPMSG_NOCACHE);
+               err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
                if (err < 0) {
                        /* If the report failed throw the cache entry
                           out - Brad Parker
                        return err;
                }
  
-               atomic_inc(&net->ipv4.cache_resolve_queue_len);
-               c->next = mfc_unres_queue;
-               mfc_unres_queue = c;
+               atomic_inc(&mrt->cache_resolve_queue_len);
+               list_add(&c->list, &mrt->mfc_unres_queue);
  
-               mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires);
+               mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
        }
  
        /*
   *    MFC cache manipulation by user space mroute daemon
   */
  
- static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc)
+ static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
  {
        int line;
-       struct mfc_cache *c, **cp;
+       struct mfc_cache *c, *next;
  
        line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
  
-       for (cp = &net->ipv4.mfc_cache_array[line];
-            (c = *cp) != NULL; cp = &c->next) {
+       list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
                if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
                    c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
                        write_lock_bh(&mrt_lock);
-                       *cp = c->next;
+                       list_del(&c->list);
                        write_unlock_bh(&mrt_lock);
  
                        ipmr_cache_free(c);
        return -ENOENT;
  }
  
- static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
+ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
+                       struct mfcctl *mfc, int mrtsock)
  {
+       bool found = false;
        int line;
-       struct mfc_cache *uc, *c, **cp;
+       struct mfc_cache *uc, *c;
+       if (mfc->mfcc_parent >= MAXVIFS)
+               return -ENFILE;
  
        line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
  
-       for (cp = &net->ipv4.mfc_cache_array[line];
-            (c = *cp) != NULL; cp = &c->next) {
+       list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
                if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
-                   c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr)
+                   c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
+                       found = true;
                        break;
+               }
        }
  
-       if (c != NULL) {
+       if (found) {
                write_lock_bh(&mrt_lock);
                c->mfc_parent = mfc->mfcc_parent;
-               ipmr_update_thresholds(c, mfc->mfcc_ttls);
+               ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
                if (!mrtsock)
                        c->mfc_flags |= MFC_STATIC;
                write_unlock_bh(&mrt_lock);
        if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
                return -EINVAL;
  
-       c = ipmr_cache_alloc(net);
+       c = ipmr_cache_alloc();
        if (c == NULL)
                return -ENOMEM;
  
        c->mfc_origin = mfc->mfcc_origin.s_addr;
        c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
        c->mfc_parent = mfc->mfcc_parent;
-       ipmr_update_thresholds(c, mfc->mfcc_ttls);
+       ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
        if (!mrtsock)
                c->mfc_flags |= MFC_STATIC;
  
        write_lock_bh(&mrt_lock);
-       c->next = net->ipv4.mfc_cache_array[line];
-       net->ipv4.mfc_cache_array[line] = c;
+       list_add(&c->list, &mrt->mfc_cache_array[line]);
        write_unlock_bh(&mrt_lock);
  
        /*
         *      Check to see if we resolved a queued list. If so we
         *      need to send on the frames and tidy up.
         */
+       found = false;
        spin_lock_bh(&mfc_unres_lock);
-       for (cp = &mfc_unres_queue; (uc=*cp) != NULL;
-            cp = &uc->next) {
-               if (net_eq(mfc_net(uc), net) &&
-                   uc->mfc_origin == c->mfc_origin &&
+       list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
+               if (uc->mfc_origin == c->mfc_origin &&
                    uc->mfc_mcastgrp == c->mfc_mcastgrp) {
-                       *cp = uc->next;
-                       atomic_dec(&net->ipv4.cache_resolve_queue_len);
+                       list_del(&uc->list);
+                       atomic_dec(&mrt->cache_resolve_queue_len);
+                       found = true;
                        break;
                }
        }
-       if (mfc_unres_queue == NULL)
-               del_timer(&ipmr_expire_timer);
+       if (list_empty(&mrt->mfc_unres_queue))
+               del_timer(&mrt->ipmr_expire_timer);
        spin_unlock_bh(&mfc_unres_lock);
  
-       if (uc) {
-               ipmr_cache_resolve(uc, c);
+       if (found) {
+               ipmr_cache_resolve(net, mrt, uc, c);
                ipmr_cache_free(uc);
        }
        return 0;
   *    Close the multicast socket, and clear the vif tables etc
   */
  
- static void mroute_clean_tables(struct net *net)
+ static void mroute_clean_tables(struct mr_table *mrt)
  {
        int i;
        LIST_HEAD(list);
+       struct mfc_cache *c, *next;
  
        /*
         *      Shut down all active vif entries
         */
-       for (i = 0; i < net->ipv4.maxvif; i++) {
-               if (!(net->ipv4.vif_table[i].flags&VIFF_STATIC))
-                       vif_delete(net, i, 0, &list);
+       for (i = 0; i < mrt->maxvif; i++) {
+               if (!(mrt->vif_table[i].flags&VIFF_STATIC))
+                       vif_delete(mrt, i, 0, &list);
        }
        unregister_netdevice_many(&list);
  
        /*
         *      Wipe the cache
         */
-       for (i=0; i<MFC_LINES; i++) {
-               struct mfc_cache *c, **cp;
-               cp = &net->ipv4.mfc_cache_array[i];
-               while ((c = *cp) != NULL) {
-                       if (c->mfc_flags&MFC_STATIC) {
-                               cp = &c->next;
+       for (i = 0; i < MFC_LINES; i++) {
+               list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
+                       if (c->mfc_flags&MFC_STATIC)
                                continue;
-                       }
                        write_lock_bh(&mrt_lock);
-                       *cp = c->next;
+                       list_del(&c->list);
                        write_unlock_bh(&mrt_lock);
  
                        ipmr_cache_free(c);
                }
        }
  
-       if (atomic_read(&net->ipv4.cache_resolve_queue_len) != 0) {
-               struct mfc_cache *c, **cp;
+       if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
                spin_lock_bh(&mfc_unres_lock);
-               cp = &mfc_unres_queue;
-               while ((c = *cp) != NULL) {
-                       if (!net_eq(mfc_net(c), net)) {
-                               cp = &c->next;
-                               continue;
-                       }
-                       *cp = c->next;
-                       ipmr_destroy_unres(c);
+               list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
+                       list_del(&c->list);
+                       ipmr_destroy_unres(mrt, c);
                }
                spin_unlock_bh(&mfc_unres_lock);
        }
  static void mrtsock_destruct(struct sock *sk)
  {
        struct net *net = sock_net(sk);
+       struct mr_table *mrt;
  
        rtnl_lock();
-       if (sk == net->ipv4.mroute_sk) {
-               IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
+       ipmr_for_each_table(mrt, net) {
+               if (sk == mrt->mroute_sk) {
+                       IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
  
-               write_lock_bh(&mrt_lock);
-               net->ipv4.mroute_sk = NULL;
-               write_unlock_bh(&mrt_lock);
+                       write_lock_bh(&mrt_lock);
+                       mrt->mroute_sk = NULL;
+                       write_unlock_bh(&mrt_lock);
  
-               mroute_clean_tables(net);
+                       mroute_clean_tables(mrt);
+               }
        }
        rtnl_unlock();
  }
@@@ -952,9 -1192,14 +1192,14 @@@ int ip_mroute_setsockopt(struct sock *s
        struct vifctl vif;
        struct mfcctl mfc;
        struct net *net = sock_net(sk);
+       struct mr_table *mrt;
+       mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
+       if (mrt == NULL)
+               return -ENOENT;
  
        if (optname != MRT_INIT) {
-               if (sk != net->ipv4.mroute_sk && !capable(CAP_NET_ADMIN))
+               if (sk != mrt->mroute_sk && !capable(CAP_NET_ADMIN))
                        return -EACCES;
        }
  
                        return -ENOPROTOOPT;
  
                rtnl_lock();
-               if (net->ipv4.mroute_sk) {
+               if (mrt->mroute_sk) {
                        rtnl_unlock();
                        return -EADDRINUSE;
                }
                ret = ip_ra_control(sk, 1, mrtsock_destruct);
                if (ret == 0) {
                        write_lock_bh(&mrt_lock);
-                       net->ipv4.mroute_sk = sk;
+                       mrt->mroute_sk = sk;
                        write_unlock_bh(&mrt_lock);
  
                        IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
                rtnl_unlock();
                return ret;
        case MRT_DONE:
-               if (sk != net->ipv4.mroute_sk)
+               if (sk != mrt->mroute_sk)
                        return -EACCES;
                return ip_ra_control(sk, 0, NULL);
        case MRT_ADD_VIF:
                        return -ENFILE;
                rtnl_lock();
                if (optname == MRT_ADD_VIF) {
-                       ret = vif_add(net, &vif, sk == net->ipv4.mroute_sk);
+                       ret = vif_add(net, mrt, &vif, sk == mrt->mroute_sk);
                } else {
-                       ret = vif_delete(net, vif.vifc_vifi, 0, NULL);
+                       ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
                }
                rtnl_unlock();
                return ret;
                        return -EFAULT;
                rtnl_lock();
                if (optname == MRT_DEL_MFC)
-                       ret = ipmr_mfc_delete(net, &mfc);
+                       ret = ipmr_mfc_delete(mrt, &mfc);
                else
-                       ret = ipmr_mfc_add(net, &mfc, sk == net->ipv4.mroute_sk);
+                       ret = ipmr_mfc_add(net, mrt, &mfc, sk == mrt->mroute_sk);
                rtnl_unlock();
                return ret;
                /*
                int v;
                if (get_user(v,(int __user *)optval))
                        return -EFAULT;
-               net->ipv4.mroute_do_assert = (v) ? 1 : 0;
+               mrt->mroute_do_assert = (v) ? 1 : 0;
                return 0;
        }
  #ifdef CONFIG_IP_PIMSM
  
                rtnl_lock();
                ret = 0;
-               if (v != net->ipv4.mroute_do_pim) {
-                       net->ipv4.mroute_do_pim = v;
-                       net->ipv4.mroute_do_assert = v;
+               if (v != mrt->mroute_do_pim) {
+                       mrt->mroute_do_pim = v;
+                       mrt->mroute_do_assert = v;
                }
                rtnl_unlock();
                return ret;
        }
+ #endif
+ #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+       case MRT_TABLE:
+       {
+               u32 v;
+               if (optlen != sizeof(u32))
+                       return -EINVAL;
+               if (get_user(v, (u32 __user *)optval))
+                       return -EFAULT;
+               if (sk == mrt->mroute_sk)
+                       return -EBUSY;
+               rtnl_lock();
+               ret = 0;
+               if (!ipmr_new_table(net, v))
+                       ret = -ENOMEM;
+               raw_sk(sk)->ipmr_table = v;
+               rtnl_unlock();
+               return ret;
+       }
  #endif
        /*
         *      Spurious command, or MRT_VERSION which you cannot
@@@ -1068,6 -1334,11 +1334,11 @@@ int ip_mroute_getsockopt(struct sock *s
        int olr;
        int val;
        struct net *net = sock_net(sk);
+       struct mr_table *mrt;
+       mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
+       if (mrt == NULL)
+               return -ENOENT;
  
        if (optname != MRT_VERSION &&
  #ifdef CONFIG_IP_PIMSM
                val = 0x0305;
  #ifdef CONFIG_IP_PIMSM
        else if (optname == MRT_PIM)
-               val = net->ipv4.mroute_do_pim;
+               val = mrt->mroute_do_pim;
  #endif
        else
-               val = net->ipv4.mroute_do_assert;
+               val = mrt->mroute_do_assert;
        if (copy_to_user(optval, &val, olr))
                return -EFAULT;
        return 0;
@@@ -1109,16 -1380,21 +1380,21 @@@ int ipmr_ioctl(struct sock *sk, int cmd
        struct vif_device *vif;
        struct mfc_cache *c;
        struct net *net = sock_net(sk);
+       struct mr_table *mrt;
+       mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
+       if (mrt == NULL)
+               return -ENOENT;
  
        switch (cmd) {
        case SIOCGETVIFCNT:
                if (copy_from_user(&vr, arg, sizeof(vr)))
                        return -EFAULT;
-               if (vr.vifi >= net->ipv4.maxvif)
+               if (vr.vifi >= mrt->maxvif)
                        return -EINVAL;
                read_lock(&mrt_lock);
-               vif = &net->ipv4.vif_table[vr.vifi];
-               if (VIF_EXISTS(net, vr.vifi)) {
+               vif = &mrt->vif_table[vr.vifi];
+               if (VIF_EXISTS(mrt, vr.vifi)) {
                        vr.icount = vif->pkt_in;
                        vr.ocount = vif->pkt_out;
                        vr.ibytes = vif->bytes_in;
                        return -EFAULT;
  
                read_lock(&mrt_lock);
-               c = ipmr_cache_find(net, sr.src.s_addr, sr.grp.s_addr);
+               c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
                if (c) {
                        sr.pktcnt = c->mfc_un.res.pkt;
                        sr.bytecnt = c->mfc_un.res.bytes;
@@@ -1159,16 -1435,20 +1435,20 @@@ static int ipmr_device_event(struct not
  {
        struct net_device *dev = ptr;
        struct net *net = dev_net(dev);
+       struct mr_table *mrt;
        struct vif_device *v;
        int ct;
        LIST_HEAD(list);
  
        if (event != NETDEV_UNREGISTER)
                return NOTIFY_DONE;
-       v = &net->ipv4.vif_table[0];
-       for (ct = 0; ct < net->ipv4.maxvif; ct++, v++) {
-               if (v->dev == dev)
-                       vif_delete(net, ct, 1, &list);
+       ipmr_for_each_table(mrt, net) {
+               v = &mrt->vif_table[0];
+               for (ct = 0; ct < mrt->maxvif; ct++, v++) {
+                       if (v->dev == dev)
+                               vif_delete(mrt, ct, 1, &list);
+               }
        }
        unregister_netdevice_many(&list);
        return NOTIFY_DONE;
@@@ -1227,11 -1507,11 +1507,11 @@@ static inline int ipmr_forward_finish(s
   *    Processing handlers for ipmr_forward
   */
  
- static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
+ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
+                           struct sk_buff *skb, struct mfc_cache *c, int vifi)
  {
-       struct net *net = mfc_net(c);
        const struct iphdr *iph = ip_hdr(skb);
-       struct vif_device *vif = &net->ipv4.vif_table[vifi];
+       struct vif_device *vif = &mrt->vif_table[vifi];
        struct net_device *dev;
        struct rtable *rt;
        int    encap = 0;
                vif->bytes_out += skb->len;
                vif->dev->stats.tx_bytes += skb->len;
                vif->dev->stats.tx_packets++;
-               ipmr_cache_report(net, skb, vifi, IGMPMSG_WHOLEPKT);
+               ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
                goto out_free;
        }
  #endif
         * not mrouter) cannot join to more than one interface - it will
         * result in receiving multiple packets.
         */
 -      NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, dev,
 +      NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, dev,
                ipmr_forward_finish);
        return;
  
@@@ -1328,12 -1608,12 +1608,12 @@@ out_free
        return;
  }
  
- static int ipmr_find_vif(struct net_device *dev)
+ static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
  {
-       struct net *net = dev_net(dev);
        int ct;
-       for (ct = net->ipv4.maxvif-1; ct >= 0; ct--) {
-               if (net->ipv4.vif_table[ct].dev == dev)
+       for (ct = mrt->maxvif-1; ct >= 0; ct--) {
+               if (mrt->vif_table[ct].dev == dev)
                        break;
        }
        return ct;
  
  /* "local" means that we should preserve one skb (for local delivery) */
  
- static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local)
+ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
+                        struct sk_buff *skb, struct mfc_cache *cache,
+                        int local)
  {
        int psend = -1;
        int vif, ct;
-       struct net *net = mfc_net(cache);
  
        vif = cache->mfc_parent;
        cache->mfc_un.res.pkt++;
        /*
         * Wrong interface: drop packet and (maybe) send PIM assert.
         */
-       if (net->ipv4.vif_table[vif].dev != skb->dev) {
+       if (mrt->vif_table[vif].dev != skb->dev) {
                int true_vifi;
  
                if (skb_rtable(skb)->fl.iif == 0) {
                }
  
                cache->mfc_un.res.wrong_if++;
-               true_vifi = ipmr_find_vif(skb->dev);
+               true_vifi = ipmr_find_vif(mrt, skb->dev);
  
-               if (true_vifi >= 0 && net->ipv4.mroute_do_assert &&
+               if (true_vifi >= 0 && mrt->mroute_do_assert &&
                    /* pimsm uses asserts, when switching from RPT to SPT,
                       so that we cannot check that packet arrived on an oif.
                       It is bad, but otherwise we would need to move pretty
                       large chunk of pimd to kernel. Ough... --ANK
                     */
-                   (net->ipv4.mroute_do_pim ||
+                   (mrt->mroute_do_pim ||
                     cache->mfc_un.res.ttls[true_vifi] < 255) &&
                    time_after(jiffies,
                               cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
                        cache->mfc_un.res.last_assert = jiffies;
-                       ipmr_cache_report(net, skb, true_vifi, IGMPMSG_WRONGVIF);
+                       ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
                }
                goto dont_forward;
        }
  
-       net->ipv4.vif_table[vif].pkt_in++;
-       net->ipv4.vif_table[vif].bytes_in += skb->len;
+       mrt->vif_table[vif].pkt_in++;
+       mrt->vif_table[vif].bytes_in += skb->len;
  
        /*
         *      Forward the frame
                        if (psend != -1) {
                                struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
                                if (skb2)
-                                       ipmr_queue_xmit(skb2, cache, psend);
+                                       ipmr_queue_xmit(net, mrt, skb2, cache,
+                                                       psend);
                        }
                        psend = ct;
                }
                if (local) {
                        struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
                        if (skb2)
-                               ipmr_queue_xmit(skb2, cache, psend);
+                               ipmr_queue_xmit(net, mrt, skb2, cache, psend);
                } else {
-                       ipmr_queue_xmit(skb, cache, psend);
+                       ipmr_queue_xmit(net, mrt, skb, cache, psend);
                        return 0;
                }
        }
@@@ -1434,6 -1716,8 +1716,8 @@@ int ip_mr_input(struct sk_buff *skb
        struct mfc_cache *cache;
        struct net *net = dev_net(skb->dev);
        int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
+       struct mr_table *mrt;
+       int err;
  
        /* Packet is looped back after forward, it should not be
           forwarded second time, but still can be delivered locally.
        if (IPCB(skb)->flags&IPSKB_FORWARDED)
                goto dont_forward;
  
+       err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt);
+       if (err < 0)
+               return err;
        if (!local) {
                    if (IPCB(skb)->opt.router_alert) {
                            if (ip_call_ra_chain(skb))
                               that we can forward NO IGMP messages.
                             */
                            read_lock(&mrt_lock);
-                           if (net->ipv4.mroute_sk) {
+                           if (mrt->mroute_sk) {
                                    nf_reset(skb);
-                                   raw_rcv(net->ipv4.mroute_sk, skb);
+                                   raw_rcv(mrt->mroute_sk, skb);
                                    read_unlock(&mrt_lock);
                                    return 0;
                            }
        }
  
        read_lock(&mrt_lock);
-       cache = ipmr_cache_find(net, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
+       cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
  
        /*
         *      No usable cache entry
                        skb = skb2;
                }
  
-               vif = ipmr_find_vif(skb->dev);
+               vif = ipmr_find_vif(mrt, skb->dev);
                if (vif >= 0) {
-                       int err = ipmr_cache_unresolved(net, vif, skb);
+                       int err = ipmr_cache_unresolved(mrt, vif, skb);
                        read_unlock(&mrt_lock);
  
                        return err;
                return -ENODEV;
        }
  
-       ip_mr_forward(skb, cache, local);
+       ip_mr_forward(net, mrt, skb, cache, local);
  
        read_unlock(&mrt_lock);
  
@@@ -1511,11 -1799,11 +1799,11 @@@ dont_forward
  }
  
  #ifdef CONFIG_IP_PIMSM
- static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
+ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
+                    unsigned int pimlen)
  {
        struct net_device *reg_dev = NULL;
        struct iphdr *encap;
-       struct net *net = dev_net(skb->dev);
  
        encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
        /*
                return 1;
  
        read_lock(&mrt_lock);
-       if (net->ipv4.mroute_reg_vif_num >= 0)
-               reg_dev = net->ipv4.vif_table[net->ipv4.mroute_reg_vif_num].dev;
+       if (mrt->mroute_reg_vif_num >= 0)
+               reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
        if (reg_dev)
                dev_hold(reg_dev);
        read_unlock(&mrt_lock);
@@@ -1566,17 -1854,21 +1854,21 @@@ int pim_rcv_v1(struct sk_buff * skb
  {
        struct igmphdr *pim;
        struct net *net = dev_net(skb->dev);
+       struct mr_table *mrt;
  
        if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
                goto drop;
  
        pim = igmp_hdr(skb);
  
-       if (!net->ipv4.mroute_do_pim ||
+       if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0)
+               goto drop;
+       if (!mrt->mroute_do_pim ||
            pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
                goto drop;
  
-       if (__pim_rcv(skb, sizeof(*pim))) {
+       if (__pim_rcv(mrt, skb, sizeof(*pim))) {
  drop:
                kfree_skb(skb);
        }
  static int pim_rcv(struct sk_buff * skb)
  {
        struct pimreghdr *pim;
+       struct net *net = dev_net(skb->dev);
+       struct mr_table *mrt;
  
        if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
                goto drop;
             csum_fold(skb_checksum(skb, 0, skb->len, 0))))
                goto drop;
  
-       if (__pim_rcv(skb, sizeof(*pim))) {
+       if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0)
+               goto drop;
+       if (__pim_rcv(mrt, skb, sizeof(*pim))) {
  drop:
                kfree_skb(skb);
        }
  #endif
  
  static int
- ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
+ ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c,
+                struct rtmsg *rtm)
  {
        int ct;
        struct rtnexthop *nhp;
-       struct net *net = mfc_net(c);
-       struct net_device *dev = net->ipv4.vif_table[c->mfc_parent].dev;
        u8 *b = skb_tail_pointer(skb);
        struct rtattr *mp_head;
  
-       if (dev)
-               RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
+       /* If cache is unresolved, don't try to parse IIF and OIF */
+       if (c->mfc_parent > MAXVIFS)
+               return -ENOENT;
+       if (VIF_EXISTS(mrt, c->mfc_parent))
+               RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex);
  
        mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
  
        for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
-               if (c->mfc_un.res.ttls[ct] < 255) {
+               if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
                        if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
                                goto rtattr_failure;
                        nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
                        nhp->rtnh_flags = 0;
                        nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
-                       nhp->rtnh_ifindex = net->ipv4.vif_table[ct].dev->ifindex;
+                       nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
                        nhp->rtnh_len = sizeof(*nhp);
                }
        }
@@@ -1647,11 -1947,16 +1947,16 @@@ int ipmr_get_route(struct net *net
                   struct sk_buff *skb, struct rtmsg *rtm, int nowait)
  {
        int err;
+       struct mr_table *mrt;
        struct mfc_cache *cache;
        struct rtable *rt = skb_rtable(skb);
  
+       mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
+       if (mrt == NULL)
+               return -ENOENT;
        read_lock(&mrt_lock);
-       cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst);
+       cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst);
  
        if (cache == NULL) {
                struct sk_buff *skb2;
                }
  
                dev = skb->dev;
-               if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) {
+               if (dev == NULL || (vif = ipmr_find_vif(mrt, dev)) < 0) {
                        read_unlock(&mrt_lock);
                        return -ENODEV;
                }
                iph->saddr = rt->rt_src;
                iph->daddr = rt->rt_dst;
                iph->version = 0;
-               err = ipmr_cache_unresolved(net, vif, skb2);
+               err = ipmr_cache_unresolved(mrt, vif, skb2);
                read_unlock(&mrt_lock);
                return err;
        }
  
        if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
                cache->mfc_flags |= MFC_NOTIFY;
-       err = ipmr_fill_mroute(skb, cache, rtm);
+       err = ipmr_fill_mroute(mrt, skb, cache, rtm);
        read_unlock(&mrt_lock);
        return err;
  }
   */
  struct ipmr_vif_iter {
        struct seq_net_private p;
+       struct mr_table *mrt;
        int ct;
  };
  
@@@ -1707,11 -2013,13 +2013,13 @@@ static struct vif_device *ipmr_vif_seq_
                                           struct ipmr_vif_iter *iter,
                                           loff_t pos)
  {
-       for (iter->ct = 0; iter->ct < net->ipv4.maxvif; ++iter->ct) {
-               if (!VIF_EXISTS(net, iter->ct))
+       struct mr_table *mrt = iter->mrt;
+       for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
+               if (!VIF_EXISTS(mrt, iter->ct))
                        continue;
                if (pos-- == 0)
-                       return &net->ipv4.vif_table[iter->ct];
+                       return &mrt->vif_table[iter->ct];
        }
        return NULL;
  }
  static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
        __acquires(mrt_lock)
  {
+       struct ipmr_vif_iter *iter = seq->private;
        struct net *net = seq_file_net(seq);
+       struct mr_table *mrt;
+       mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
+       if (mrt == NULL)
+               return ERR_PTR(-ENOENT);
+       iter->mrt = mrt;
  
        read_lock(&mrt_lock);
        return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
@@@ -1730,15 -2046,16 +2046,16 @@@ static void *ipmr_vif_seq_next(struct s
  {
        struct ipmr_vif_iter *iter = seq->private;
        struct net *net = seq_file_net(seq);
+       struct mr_table *mrt = iter->mrt;
  
        ++*pos;
        if (v == SEQ_START_TOKEN)
                return ipmr_vif_seq_idx(net, iter, 0);
  
-       while (++iter->ct < net->ipv4.maxvif) {
-               if (!VIF_EXISTS(net, iter->ct))
+       while (++iter->ct < mrt->maxvif) {
+               if (!VIF_EXISTS(mrt, iter->ct))
                        continue;
-               return &net->ipv4.vif_table[iter->ct];
+               return &mrt->vif_table[iter->ct];
        }
        return NULL;
  }
@@@ -1751,7 -2068,8 +2068,8 @@@ static void ipmr_vif_seq_stop(struct se
  
  static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
  {
-       struct net *net = seq_file_net(seq);
+       struct ipmr_vif_iter *iter = seq->private;
+       struct mr_table *mrt = iter->mrt;
  
        if (v == SEQ_START_TOKEN) {
                seq_puts(seq,
  
                seq_printf(seq,
                           "%2Zd %-10s %8ld %7ld  %8ld %7ld %05X %08X %08X\n",
-                          vif - net->ipv4.vif_table,
+                          vif - mrt->vif_table,
                           name, vif->bytes_in, vif->pkt_in,
                           vif->bytes_out, vif->pkt_out,
                           vif->flags, vif->local, vif->remote);
@@@ -1793,7 -2111,8 +2111,8 @@@ static const struct file_operations ipm
  
  struct ipmr_mfc_iter {
        struct seq_net_private p;
-       struct mfc_cache **cache;
+       struct mr_table *mrt;
+       struct list_head *cache;
        int ct;
  };
  
  static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
                                          struct ipmr_mfc_iter *it, loff_t pos)
  {
+       struct mr_table *mrt = it->mrt;
        struct mfc_cache *mfc;
  
-       it->cache = net->ipv4.mfc_cache_array;
        read_lock(&mrt_lock);
-       for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
-               for (mfc = net->ipv4.mfc_cache_array[it->ct];
-                    mfc; mfc = mfc->next)
+       for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
+               it->cache = &mrt->mfc_cache_array[it->ct];
+               list_for_each_entry(mfc, it->cache, list)
                        if (pos-- == 0)
                                return mfc;
+       }
        read_unlock(&mrt_lock);
  
-       it->cache = &mfc_unres_queue;
        spin_lock_bh(&mfc_unres_lock);
-       for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
-               if (net_eq(mfc_net(mfc), net) &&
-                   pos-- == 0)
+       it->cache = &mrt->mfc_unres_queue;
+       list_for_each_entry(mfc, it->cache, list)
+               if (pos-- == 0)
                        return mfc;
        spin_unlock_bh(&mfc_unres_lock);
  
@@@ -1829,7 -2148,13 +2148,13 @@@ static void *ipmr_mfc_seq_start(struct 
  {
        struct ipmr_mfc_iter *it = seq->private;
        struct net *net = seq_file_net(seq);
+       struct mr_table *mrt;
+       mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
+       if (mrt == NULL)
+               return ERR_PTR(-ENOENT);
  
+       it->mrt = mrt;
        it->cache = NULL;
        it->ct = 0;
        return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
@@@ -1841,37 -2166,36 +2166,36 @@@ static void *ipmr_mfc_seq_next(struct s
        struct mfc_cache *mfc = v;
        struct ipmr_mfc_iter *it = seq->private;
        struct net *net = seq_file_net(seq);
+       struct mr_table *mrt = it->mrt;
  
        ++*pos;
  
        if (v == SEQ_START_TOKEN)
                return ipmr_mfc_seq_idx(net, seq->private, 0);
  
-       if (mfc->next)
-               return mfc->next;
+       if (mfc->list.next != it->cache)
+               return list_entry(mfc->list.next, struct mfc_cache, list);
  
-       if (it->cache == &mfc_unres_queue)
+       if (it->cache == &mrt->mfc_unres_queue)
                goto end_of_list;
  
-       BUG_ON(it->cache != net->ipv4.mfc_cache_array);
+       BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
  
        while (++it->ct < MFC_LINES) {
-               mfc = net->ipv4.mfc_cache_array[it->ct];
-               if (mfc)
-                       return mfc;
+               it->cache = &mrt->mfc_cache_array[it->ct];
+               if (list_empty(it->cache))
+                       continue;
+               return list_first_entry(it->cache, struct mfc_cache, list);
        }
  
        /* exhausted cache_array, show unresolved */
        read_unlock(&mrt_lock);
-       it->cache = &mfc_unres_queue;
+       it->cache = &mrt->mfc_unres_queue;
        it->ct = 0;
  
        spin_lock_bh(&mfc_unres_lock);
-       mfc = mfc_unres_queue;
-       while (mfc && !net_eq(mfc_net(mfc), net))
-               mfc = mfc->next;
-       if (mfc)
-               return mfc;
+       if (!list_empty(it->cache))
+               return list_first_entry(it->cache, struct mfc_cache, list);
  
   end_of_list:
        spin_unlock_bh(&mfc_unres_lock);
  static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
  {
        struct ipmr_mfc_iter *it = seq->private;
-       struct net *net = seq_file_net(seq);
+       struct mr_table *mrt = it->mrt;
  
-       if (it->cache == &mfc_unres_queue)
+       if (it->cache == &mrt->mfc_unres_queue)
                spin_unlock_bh(&mfc_unres_lock);
-       else if (it->cache == net->ipv4.mfc_cache_array)
+       else if (it->cache == &mrt->mfc_cache_array[it->ct])
                read_unlock(&mrt_lock);
  }
  
  static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
  {
        int n;
-       struct net *net = seq_file_net(seq);
  
        if (v == SEQ_START_TOKEN) {
                seq_puts(seq,
        } else {
                const struct mfc_cache *mfc = v;
                const struct ipmr_mfc_iter *it = seq->private;
+               const struct mr_table *mrt = it->mrt;
  
                seq_printf(seq, "%08lX %08lX %-3hd",
                           (unsigned long) mfc->mfc_mcastgrp,
                           (unsigned long) mfc->mfc_origin,
                           mfc->mfc_parent);
  
-               if (it->cache != &mfc_unres_queue) {
+               if (it->cache != &mrt->mfc_unres_queue) {
                        seq_printf(seq, " %8lu %8lu %8lu",
                                   mfc->mfc_un.res.pkt,
                                   mfc->mfc_un.res.bytes,
                                   mfc->mfc_un.res.wrong_if);
                        for (n = mfc->mfc_un.res.minvif;
                             n < mfc->mfc_un.res.maxvif; n++ ) {
-                               if (VIF_EXISTS(net, n) &&
+                               if (VIF_EXISTS(mrt, n) &&
                                    mfc->mfc_un.res.ttls[n] < 255)
                                        seq_printf(seq,
                                           " %2d:%-3d",
@@@ -1967,27 -2291,11 +2291,11 @@@ static const struct net_protocol pim_pr
   */
  static int __net_init ipmr_net_init(struct net *net)
  {
-       int err = 0;
+       int err;
  
-       net->ipv4.vif_table = kcalloc(MAXVIFS, sizeof(struct vif_device),
-                                     GFP_KERNEL);
-       if (!net->ipv4.vif_table) {
-               err = -ENOMEM;
+       err = ipmr_rules_init(net);
+       if (err < 0)
                goto fail;
-       }
-       /* Forwarding cache */
-       net->ipv4.mfc_cache_array = kcalloc(MFC_LINES,
-                                           sizeof(struct mfc_cache *),
-                                           GFP_KERNEL);
-       if (!net->ipv4.mfc_cache_array) {
-               err = -ENOMEM;
-               goto fail_mfc_cache;
-       }
- #ifdef CONFIG_IP_PIMSM
-       net->ipv4.mroute_reg_vif_num = -1;
- #endif
  
  #ifdef CONFIG_PROC_FS
        err = -ENOMEM;
  proc_cache_fail:
        proc_net_remove(net, "ip_mr_vif");
  proc_vif_fail:
-       kfree(net->ipv4.mfc_cache_array);
+       ipmr_rules_exit(net);
  #endif
- fail_mfc_cache:
-       kfree(net->ipv4.vif_table);
  fail:
        return err;
  }
@@@ -2016,8 -2322,7 +2322,7 @@@ static void __net_exit ipmr_net_exit(st
        proc_net_remove(net, "ip_mr_cache");
        proc_net_remove(net, "ip_mr_vif");
  #endif
-       kfree(net->ipv4.mfc_cache_array);
-       kfree(net->ipv4.vif_table);
+       ipmr_rules_exit(net);
  }
  
  static struct pernet_operations ipmr_net_ops = {
@@@ -2040,7 -2345,6 +2345,6 @@@ int __init ip_mr_init(void
        if (err)
                goto reg_pernet_fail;
  
-       setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
        err = register_netdevice_notifier(&ip_mr_notifier);
        if (err)
                goto reg_notif_fail;
@@@ -2058,7 -2362,6 +2362,6 @@@ add_proto_fail
        unregister_netdevice_notifier(&ip_mr_notifier);
  #endif
  reg_notif_fail:
-       del_timer(&ipmr_expire_timer);
        unregister_pernet_subsys(&ipmr_net_ops);
  reg_pernet_fail:
        kmem_cache_destroy(mrt_cachep);
@@@ -26,6 -26,7 +26,7 @@@
  #include <linux/security.h>
  #include <linux/net.h>
  #include <linux/mutex.h>
+ #include <linux/slab.h>
  #include <net/net_namespace.h>
  #include <net/sock.h>
  #include <net/route.h>
@@@ -160,7 -161,8 +161,7 @@@ ipq_build_packet_message(struct nf_queu
                break;
  
        case IPQ_COPY_PACKET:
 -              if ((entry->skb->ip_summed == CHECKSUM_PARTIAL ||
 -                   entry->skb->ip_summed == CHECKSUM_COMPLETE) &&
 +              if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
                    (*errp = skb_checksum_help(entry->skb))) {
                        read_unlock_bh(&queue_lock);
                        return NULL;
@@@ -9,12 -9,12 +9,13 @@@
   * published by the Free Software Foundation.
   *
   */
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  #include <linux/module.h>
  #include <linux/proc_fs.h>
  #include <linux/jhash.h>
  #include <linux/bitops.h>
  #include <linux/skbuff.h>
+ #include <linux/slab.h>
  #include <linux/ip.h>
  #include <linux/tcp.h>
  #include <linux/udp.h>
@@@ -88,7 -88,7 +89,7 @@@ clusterip_config_entry_put(struct clust
                list_del(&c->list);
                write_unlock_bh(&clusterip_lock);
  
-               dev_mc_delete(c->dev, c->clustermac, ETH_ALEN, 0);
+               dev_mc_del(c->dev, c->clustermac);
                dev_put(c->dev);
  
                /* In case anyone still accesses the file, the open/close
@@@ -239,7 -239,8 +240,7 @@@ clusterip_hashfn(const struct sk_buff *
                break;
        default:
                if (net_ratelimit())
 -                      printk(KERN_NOTICE "CLUSTERIP: unknown protocol `%u'\n",
 -                              iph->protocol);
 +                      pr_info("unknown protocol %u\n", iph->protocol);
                sport = dport = 0;
        }
  
                hashval = 0;
                /* This cannot happen, unless the check function wasn't called
                 * at rule load time */
 -              printk("CLUSTERIP: unknown mode `%u'\n", config->hash_mode);
 +              pr_info("unknown mode %u\n", config->hash_mode);
                BUG();
                break;
        }
@@@ -294,7 -295,7 +295,7 @@@ clusterip_tg(struct sk_buff *skb, cons
  
        ct = nf_ct_get(skb, &ctinfo);
        if (ct == NULL) {
 -              printk(KERN_ERR "CLUSTERIP: no conntrack!\n");
 +              pr_info("no conntrack!\n");
                        /* FIXME: need to drop invalid ones, since replies
                         * to outgoing connections of other nodes will be
                         * marked as INVALID */
        return XT_CONTINUE;
  }
  
 -static bool clusterip_tg_check(const struct xt_tgchk_param *par)
 +static int clusterip_tg_check(const struct xt_tgchk_param *par)
  {
        struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
        const struct ipt_entry *e = par->entryinfo;
 -
        struct clusterip_config *config;
 +      int ret;
  
        if (cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP &&
            cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT &&
            cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT_DPT) {
 -              printk(KERN_WARNING "CLUSTERIP: unknown mode `%u'\n",
 -                      cipinfo->hash_mode);
 -              return false;
 +              pr_info("unknown mode %u\n", cipinfo->hash_mode);
 +              return -EINVAL;
  
        }
        if (e->ip.dmsk.s_addr != htonl(0xffffffff) ||
            e->ip.dst.s_addr == 0) {
 -              printk(KERN_ERR "CLUSTERIP: Please specify destination IP\n");
 -              return false;
 +              pr_info("Please specify destination IP\n");
 +              return -EINVAL;
        }
  
        /* FIXME: further sanity checks */
        config = clusterip_config_find_get(e->ip.dst.s_addr, 1);
        if (!config) {
                if (!(cipinfo->flags & CLUSTERIP_FLAG_NEW)) {
 -                      printk(KERN_WARNING "CLUSTERIP: no config found for %pI4, need 'new'\n", &e->ip.dst.s_addr);
 -                      return false;
 +                      pr_info("no config found for %pI4, need 'new'\n",
 +                              &e->ip.dst.s_addr);
 +                      return -EINVAL;
                } else {
                        struct net_device *dev;
  
                        if (e->ip.iniface[0] == '\0') {
 -                              printk(KERN_WARNING "CLUSTERIP: Please specify an interface name\n");
 -                              return false;
 +                              pr_info("Please specify an interface name\n");
 +                              return -EINVAL;
                        }
  
                        dev = dev_get_by_name(&init_net, e->ip.iniface);
                        if (!dev) {
 -                              printk(KERN_WARNING "CLUSTERIP: no such interface %s\n", e->ip.iniface);
 -                              return false;
 +                              pr_info("no such interface %s\n",
 +                                      e->ip.iniface);
 +                              return -ENOENT;
                        }
  
                        config = clusterip_config_init(cipinfo,
                                                        e->ip.dst.s_addr, dev);
                        if (!config) {
 -                              printk(KERN_WARNING "CLUSTERIP: cannot allocate config\n");
 +                              pr_info("cannot allocate config\n");
                                dev_put(dev);
 -                              return false;
 +                              return -ENOMEM;
                        }
-                       dev_mc_add(config->dev,config->clustermac, ETH_ALEN, 0);
+                       dev_mc_add(config->dev, config->clustermac);
                }
        }
        cipinfo->config = config;
  
 -      if (nf_ct_l3proto_try_module_get(par->target->family) < 0) {
 -              printk(KERN_WARNING "can't load conntrack support for "
 -                                  "proto=%u\n", par->target->family);
 -              return false;
 -      }
 -
 -      return true;
 +      ret = nf_ct_l3proto_try_module_get(par->family);
 +      if (ret < 0)
 +              pr_info("cannot load conntrack support for proto=%u\n",
 +                      par->family);
 +      return ret;
  }
  
  /* drop reference count of cluster config when rule is deleted */
@@@ -420,7 -422,7 +421,7 @@@ static void clusterip_tg_destroy(const 
  
        clusterip_config_put(cipinfo->config);
  
 -      nf_ct_l3proto_module_put(par->target->family);
 +      nf_ct_l3proto_module_put(par->family);
  }
  
  #ifdef CONFIG_COMPAT
@@@ -477,8 -479,8 +478,8 @@@ static void arp_print(struct arp_payloa
        }
        hbuffer[--k]='\0';
  
 -      printk("src %pI4@%s, dst %pI4\n",
 -              &payload->src_ip, hbuffer, &payload->dst_ip);
 +      pr_debug("src %pI4@%s, dst %pI4\n",
 +               &payload->src_ip, hbuffer, &payload->dst_ip);
  }
  #endif
  
@@@ -517,7 -519,7 +518,7 @@@ arp_mangle(unsigned int hook
         * this wouldn't work, since we didn't subscribe the mcast group on
         * other interfaces */
        if (c->dev != out) {
 -              pr_debug("CLUSTERIP: not mangling arp reply on different "
 +              pr_debug("not mangling arp reply on different "
                         "interface: cip'%s'-skb'%s'\n",
                         c->dev->name, out->name);
                clusterip_config_put(c);
        memcpy(payload->src_hw, c->clustermac, arp->ar_hln);
  
  #ifdef DEBUG
 -      pr_debug(KERN_DEBUG "CLUSTERIP mangled arp reply: ");
 +      pr_debug("mangled arp reply: ");
        arp_print(payload);
  #endif
  
@@@ -599,8 -601,7 +600,8 @@@ static void *clusterip_seq_next(struct 
  
  static void clusterip_seq_stop(struct seq_file *s, void *v)
  {
 -      kfree(v);
 +      if (!IS_ERR(v))
 +              kfree(v);
  }
  
  static int clusterip_seq_show(struct seq_file *s, void *v)
@@@ -705,13 -706,13 +706,13 @@@ static int __init clusterip_tg_init(voi
  #ifdef CONFIG_PROC_FS
        clusterip_procdir = proc_mkdir("ipt_CLUSTERIP", init_net.proc_net);
        if (!clusterip_procdir) {
 -              printk(KERN_ERR "CLUSTERIP: Unable to proc dir entry\n");
 +              pr_err("Unable to proc dir entry\n");
                ret = -ENOMEM;
                goto cleanup_hook;
        }
  #endif /* CONFIG_PROC_FS */
  
 -      printk(KERN_NOTICE "ClusterIP Version %s loaded successfully\n",
 +      pr_info("ClusterIP Version %s loaded successfully\n",
                CLUSTERIP_VERSION);
        return 0;
  
@@@ -726,7 -727,8 +727,7 @@@ cleanup_target
  
  static void __exit clusterip_tg_exit(void)
  {
 -      printk(KERN_NOTICE "ClusterIP Version %s unloading\n",
 -              CLUSTERIP_VERSION);
 +      pr_info("ClusterIP Version %s unloading\n", CLUSTERIP_VERSION);
  #ifdef CONFIG_PROC_FS
        remove_proc_entry(clusterip_procdir->name, clusterip_procdir->parent);
  #endif
@@@ -9,9 -9,10 +9,10 @@@
   * it under the terms of the GNU General Public License version 2 as
   * published by the Free Software Foundation.
   */
 -
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  #include <linux/module.h>
  #include <linux/skbuff.h>
+ #include <linux/slab.h>
  #include <linux/ip.h>
  #include <linux/udp.h>
  #include <linux/icmp.h>
@@@ -139,6 -140,9 +140,6 @@@ reject_tg(struct sk_buff *skb, const st
  {
        const struct ipt_reject_info *reject = par->targinfo;
  
 -      /* WARNING: This code causes reentry within iptables.
 -         This means that the iptables jump stack is now crap.  We
 -         must return an absolute verdict. --RR */
        switch (reject->with) {
        case IPT_ICMP_NET_UNREACHABLE:
                send_unreach(skb, ICMP_NET_UNREACH);
        return NF_DROP;
  }
  
 -static bool reject_tg_check(const struct xt_tgchk_param *par)
 +static int reject_tg_check(const struct xt_tgchk_param *par)
  {
        const struct ipt_reject_info *rejinfo = par->targinfo;
        const struct ipt_entry *e = par->entryinfo;
  
        if (rejinfo->with == IPT_ICMP_ECHOREPLY) {
 -              printk("ipt_REJECT: ECHOREPLY no longer supported.\n");
 -              return false;
 +              pr_info("ECHOREPLY no longer supported.\n");
 +              return -EINVAL;
        } else if (rejinfo->with == IPT_TCP_RESET) {
                /* Must specify that it's a TCP packet */
                if (e->ip.proto != IPPROTO_TCP ||
                    (e->ip.invflags & XT_INV_PROTO)) {
 -                      printk("ipt_REJECT: TCP_RESET invalid for non-tcp\n");
 -                      return false;
 +                      pr_info("TCP_RESET invalid for non-tcp\n");
 +                      return -EINVAL;
                }
        }
 -      return true;
 +      return 0;
  }
  
  static struct xt_target reject_tg_reg __read_mostly = {
   *   Specify, after how many hundredths of a second the queue should be
   *   flushed even if it is not full yet.
   */
 -
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  #include <linux/module.h>
  #include <linux/spinlock.h>
  #include <linux/socket.h>
+ #include <linux/slab.h>
  #include <linux/skbuff.h>
  #include <linux/kernel.h>
  #include <linux/timer.h>
@@@ -56,6 -57,8 +57,6 @@@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, N
  #define ULOG_NL_EVENT         111             /* Harald's favorite number */
  #define ULOG_MAXNLGROUPS      32              /* numer of nlgroups */
  
 -#define PRINTR(format, args...) do { if (net_ratelimit()) printk(format , ## args); } while (0)
 -
  static unsigned int nlbufsiz = NLMSG_GOODSIZE;
  module_param(nlbufsiz, uint, 0400);
  MODULE_PARM_DESC(nlbufsiz, "netlink buffer size");
@@@ -88,12 -91,12 +89,12 @@@ static void ulog_send(unsigned int nlgr
        ulog_buff_t *ub = &ulog_buffers[nlgroupnum];
  
        if (timer_pending(&ub->timer)) {
 -              pr_debug("ipt_ULOG: ulog_send: timer was pending, deleting\n");
 +              pr_debug("ulog_send: timer was pending, deleting\n");
                del_timer(&ub->timer);
        }
  
        if (!ub->skb) {
 -              pr_debug("ipt_ULOG: ulog_send: nothing to send\n");
 +              pr_debug("ulog_send: nothing to send\n");
                return;
        }
  
                ub->lastnlh->nlmsg_type = NLMSG_DONE;
  
        NETLINK_CB(ub->skb).dst_group = nlgroupnum + 1;
 -      pr_debug("ipt_ULOG: throwing %d packets to netlink group %u\n",
 +      pr_debug("throwing %d packets to netlink group %u\n",
                 ub->qlen, nlgroupnum + 1);
        netlink_broadcast(nflognl, ub->skb, 0, nlgroupnum + 1, GFP_ATOMIC);
  
  /* timer function to flush queue in flushtimeout time */
  static void ulog_timer(unsigned long data)
  {
 -      pr_debug("ipt_ULOG: timer function called, calling ulog_send\n");
 +      pr_debug("timer function called, calling ulog_send\n");
  
        /* lock to protect against somebody modifying our structure
         * from ipt_ulog_target at the same time */
@@@ -136,7 -139,7 +137,7 @@@ static struct sk_buff *ulog_alloc_skb(u
        n = max(size, nlbufsiz);
        skb = alloc_skb(n, GFP_ATOMIC);
        if (!skb) {
 -              PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", n);
 +              pr_debug("cannot alloc whole buffer %ub!\n", n);
  
                if (n > size) {
                        /* try to allocate only as much as we need for
  
                        skb = alloc_skb(size, GFP_ATOMIC);
                        if (!skb)
 -                              PRINTR("ipt_ULOG: can't even allocate %ub\n",
 -                                     size);
 +                              pr_debug("cannot even allocate %ub\n", size);
                }
        }
  
@@@ -195,7 -199,8 +196,7 @@@ static void ipt_ulog_packet(unsigned in
                        goto alloc_failure;
        }
  
 -      pr_debug("ipt_ULOG: qlen %d, qthreshold %Zu\n", ub->qlen,
 -               loginfo->qthreshold);
 +      pr_debug("qlen %d, qthreshold %Zu\n", ub->qlen, loginfo->qthreshold);
  
        /* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */
        nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
        return;
  
  nlmsg_failure:
 -      PRINTR("ipt_ULOG: error during NLMSG_PUT\n");
 -
 +      pr_debug("error during NLMSG_PUT\n");
  alloc_failure:
 -      PRINTR("ipt_ULOG: Error building netlink message\n");
 -
 +      pr_debug("Error building netlink message\n");
        spin_unlock_bh(&ulog_lock);
  }
  
@@@ -307,20 -314,21 +308,20 @@@ static void ipt_logfn(u_int8_t pf
        ipt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix);
  }
  
 -static bool ulog_tg_check(const struct xt_tgchk_param *par)
 +static int ulog_tg_check(const struct xt_tgchk_param *par)
  {
        const struct ipt_ulog_info *loginfo = par->targinfo;
  
        if (loginfo->prefix[sizeof(loginfo->prefix) - 1] != '\0') {
 -              pr_debug("ipt_ULOG: prefix term %i\n",
 -                       loginfo->prefix[sizeof(loginfo->prefix) - 1]);
 -              return false;
 +              pr_debug("prefix not null-terminated\n");
 +              return -EINVAL;
        }
        if (loginfo->qthreshold > ULOG_MAX_QLEN) {
 -              pr_debug("ipt_ULOG: queue threshold %Zu > MAX_QLEN\n",
 +              pr_debug("queue threshold %Zu > MAX_QLEN\n",
                         loginfo->qthreshold);
 -              return false;
 +              return -EINVAL;
        }
 -      return true;
 +      return 0;
  }
  
  #ifdef CONFIG_COMPAT
@@@ -382,10 -390,10 +383,10 @@@ static int __init ulog_tg_init(void
  {
        int ret, i;
  
 -      pr_debug("ipt_ULOG: init module\n");
 +      pr_debug("init module\n");
  
        if (nlbufsiz > 128*1024) {
 -              printk("Netlink buffer has to be <= 128kB\n");
 +              pr_warning("Netlink buffer has to be <= 128kB\n");
                return -EINVAL;
        }
  
@@@ -415,7 -423,7 +416,7 @@@ static void __exit ulog_tg_exit(void
        ulog_buff_t *ub;
        int i;
  
 -      pr_debug("ipt_ULOG: cleanup_module\n");
 +      pr_debug("cleanup_module\n");
  
        if (nflog)
                nf_log_unregister(&ipt_ulog_logger);
@@@ -7,7 -7,6 +7,7 @@@
   */
  
  /* Everything about the rules for NAT. */
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  #include <linux/types.h>
  #include <linux/ip.h>
  #include <linux/netfilter.h>
@@@ -16,6 -15,7 +16,7 @@@
  #include <linux/kmod.h>
  #include <linux/skbuff.h>
  #include <linux/proc_fs.h>
+ #include <linux/slab.h>
  #include <net/checksum.h>
  #include <net/route.h>
  #include <linux/bitops.h>
@@@ -74,28 -74,28 +75,28 @@@ ipt_dnat_target(struct sk_buff *skb, co
        return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_DST);
  }
  
 -static bool ipt_snat_checkentry(const struct xt_tgchk_param *par)
 +static int ipt_snat_checkentry(const struct xt_tgchk_param *par)
  {
        const struct nf_nat_multi_range_compat *mr = par->targinfo;
  
        /* Must be a valid range */
        if (mr->rangesize != 1) {
 -              printk("SNAT: multiple ranges no longer supported\n");
 -              return false;
 +              pr_info("SNAT: multiple ranges no longer supported\n");
 +              return -EINVAL;
        }
 -      return true;
 +      return 0;
  }
  
 -static bool ipt_dnat_checkentry(const struct xt_tgchk_param *par)
 +static int ipt_dnat_checkentry(const struct xt_tgchk_param *par)
  {
        const struct nf_nat_multi_range_compat *mr = par->targinfo;
  
        /* Must be a valid range */
        if (mr->rangesize != 1) {
 -              printk("DNAT: multiple ranges no longer supported\n");
 -              return false;
 +              pr_info("DNAT: multiple ranges no longer supported\n");
 +              return -EINVAL;
        }
 -      return true;
 +      return 0;
  }
  
  unsigned int
@@@ -7,6 -7,7 +7,7 @@@
   */
  #include <linux/types.h>
  #include <linux/icmp.h>
+ #include <linux/gfp.h>
  #include <linux/ip.h>
  #include <linux/netfilter.h>
  #include <linux/netfilter_ipv4.h>
@@@ -137,8 -138,9 +138,8 @@@ nf_nat_fn(unsigned int hooknum
                                ret = nf_nat_rule_find(skb, hooknum, in, out,
                                                       ct);
  
 -                      if (ret != NF_ACCEPT) {
 +                      if (ret != NF_ACCEPT)
                                return ret;
 -                      }
                } else
                        pr_debug("Already setup manip %s for ct %p\n",
                                 maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST",
diff --combined net/ipv4/raw.c
@@@ -60,7 -60,6 +60,6 @@@
  #include <net/net_namespace.h>
  #include <net/dst.h>
  #include <net/sock.h>
- #include <linux/gfp.h>
  #include <linux/ip.h>
  #include <linux/net.h>
  #include <net/ip.h>
@@@ -382,8 -381,8 +381,8 @@@ static int raw_send_hdrinc(struct sock 
                icmp_out_count(net, ((struct icmphdr *)
                        skb_transport_header(skb))->type);
  
 -      err = NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
 -                    dst_output);
 +      err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
 +                    rt->u.dst.dev, dst_output);
        if (err > 0)
                err = net_xmit_errno(err);
        if (err)
diff --combined net/ipv4/xfrm4_input.c
@@@ -9,6 -9,7 +9,7 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include <linux/module.h>
  #include <linux/string.h>
  #include <linux/netfilter.h>
@@@ -60,7 -61,7 +61,7 @@@ int xfrm4_transport_finish(struct sk_bu
        iph->tot_len = htons(skb->len);
        ip_send_check(iph);
  
 -      NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
 +      NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
                xfrm4_rcv_encap_finish);
        return 0;
  }
diff --combined net/ipv6/ip6_input.c
@@@ -28,6 -28,7 +28,7 @@@
  #include <linux/in6.h>
  #include <linux/icmpv6.h>
  #include <linux/mroute6.h>
+ #include <linux/slab.h>
  
  #include <linux/netfilter.h>
  #include <linux/netfilter_ipv6.h>
@@@ -142,7 -143,7 +143,7 @@@ int ipv6_rcv(struct sk_buff *skb, struc
        /* Must drop socket now because of tproxy. */
        skb_orphan(skb);
  
 -      return NF_HOOK(PF_INET6, NF_INET_PRE_ROUTING, skb, dev, NULL,
 +      return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, dev, NULL,
                       ip6_rcv_finish);
  err:
        IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
@@@ -235,7 -236,7 +236,7 @@@ discard
  
  int ip6_input(struct sk_buff *skb)
  {
 -      return NF_HOOK(PF_INET6, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
 +      return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
                       ip6_input_finish);
  }
  
diff --combined net/ipv6/ip6_output.c
@@@ -37,6 -37,7 +37,7 @@@
  #include <linux/tcp.h>
  #include <linux/route.h>
  #include <linux/module.h>
+ #include <linux/slab.h>
  
  #include <linux/netfilter.h>
  #include <linux/netfilter_ipv6.h>
@@@ -66,8 -67,8 +67,8 @@@ int __ip6_local_out(struct sk_buff *skb
                len = 0;
        ipv6_hdr(skb)->payload_len = htons(len);
  
 -      return nf_hook(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
 -                     dst_output);
 +      return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
 +                     skb_dst(skb)->dev, dst_output);
  }
  
  int ip6_local_out(struct sk_buff *skb)
  }
  EXPORT_SYMBOL_GPL(ip6_local_out);
  
 -static int ip6_output_finish(struct sk_buff *skb)
 -{
 -      struct dst_entry *dst = skb_dst(skb);
 -
 -      if (dst->hh)
 -              return neigh_hh_output(dst->hh, skb);
 -      else if (dst->neighbour)
 -              return dst->neighbour->output(skb);
 -
 -      IP6_INC_STATS_BH(dev_net(dst->dev),
 -                       ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
 -      kfree_skb(skb);
 -      return -EINVAL;
 -
 -}
 -
  /* dev_loopback_xmit for use with netfilter. */
  static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
  {
        return 0;
  }
  
 -
 -static int ip6_output2(struct sk_buff *skb)
 +static int ip6_finish_output2(struct sk_buff *skb)
  {
        struct dst_entry *dst = skb_dst(skb);
        struct net_device *dev = dst->dev;
                           is not supported in any case.
                         */
                        if (newskb)
 -                              NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, newskb,
 -                                      NULL, newskb->dev,
 +                              NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
 +                                      newskb, NULL, newskb->dev,
                                        ip6_dev_loopback_xmit);
  
                        if (ipv6_hdr(skb)->hop_limit == 0) {
                                skb->len);
        }
  
 -      return NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, skb, NULL, skb->dev,
 -                     ip6_output_finish);
 +      if (dst->hh)
 +              return neigh_hh_output(dst->hh, skb);
 +      else if (dst->neighbour)
 +              return dst->neighbour->output(skb);
 +
 +      IP6_INC_STATS_BH(dev_net(dst->dev),
 +                       ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
 +      kfree_skb(skb);
 +      return -EINVAL;
  }
  
  static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
               skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
  }
  
 +static int ip6_finish_output(struct sk_buff *skb)
 +{
 +      if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
 +          dst_allfrag(skb_dst(skb)))
 +              return ip6_fragment(skb, ip6_finish_output2);
 +      else
 +              return ip6_finish_output2(skb);
 +}
 +
  int ip6_output(struct sk_buff *skb)
  {
 +      struct net_device *dev = skb_dst(skb)->dev;
        struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
        if (unlikely(idev->cnf.disable_ipv6)) {
 -              IP6_INC_STATS(dev_net(skb_dst(skb)->dev), idev,
 +              IP6_INC_STATS(dev_net(dev), idev,
                              IPSTATS_MIB_OUTDISCARDS);
                kfree_skb(skb);
                return 0;
        }
  
 -      if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
 -                              dst_allfrag(skb_dst(skb)))
 -              return ip6_fragment(skb, ip6_output2);
 -      else
 -              return ip6_output2(skb);
 +      return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
 +                          ip6_finish_output,
 +                          !(IP6CB(skb)->flags & IP6SKB_REROUTED));
  }
  
  /*
-  *    xmit an sk_buff (used by TCP)
+  *    xmit an sk_buff (used by TCP, SCTP and DCCP)
   */
  
  int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
-            struct ipv6_txoptions *opt, int ipfragok)
+            struct ipv6_txoptions *opt)
  {
        struct net *net = sock_net(sk);
        struct ipv6_pinfo *np = inet6_sk(sk);
        skb_reset_network_header(skb);
        hdr = ipv6_hdr(skb);
  
-       /* Allow local fragmentation. */
-       if (ipfragok)
-               skb->local_df = 1;
        /*
         *      Fill in the IPv6 header
         */
        if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
                IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
                              IPSTATS_MIB_OUT, skb->len);
 -              return NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
 -                              dst_output);
 +              return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
 +                             dst->dev, dst_output);
        }
  
        if (net_ratelimit())
@@@ -535,7 -534,7 +532,7 @@@ int ip6_forward(struct sk_buff *skb
        hdr->hop_limit--;
  
        IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
 -      return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
 +      return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
                       ip6_forward_finish);
  
  error:
diff --combined net/ipv6/ip6mr.c
@@@ -33,6 -33,7 +33,7 @@@
  #include <linux/proc_fs.h>
  #include <linux/seq_file.h>
  #include <linux/init.h>
+ #include <linux/slab.h>
  #include <net/protocol.h>
  #include <linux/skbuff.h>
  #include <net/sock.h>
@@@ -1113,6 -1114,9 +1114,9 @@@ static int ip6mr_mfc_add(struct net *ne
        unsigned char ttls[MAXMIFS];
        int i;
  
+       if (mfc->mf6cc_parent >= MAXMIFS)
+               return -ENFILE;
        memset(ttls, 255, MAXMIFS);
        for (i = 0; i < MAXMIFS; i++) {
                if (IF_ISSET(i, &mfc->mf6cc_ifset))
@@@ -1566,7 -1570,7 +1570,7 @@@ static int ip6mr_forward2(struct sk_buf
  
        IP6CB(skb)->flags |= IP6SKB_FORWARDED;
  
 -      return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dev,
 +      return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev,
                       ip6mr_forward2_finish);
  
  out_free:
@@@ -1692,17 -1696,20 +1696,20 @@@ ip6mr_fill_mroute(struct sk_buff *skb, 
        int ct;
        struct rtnexthop *nhp;
        struct net *net = mfc6_net(c);
-       struct net_device *dev = net->ipv6.vif6_table[c->mf6c_parent].dev;
        u8 *b = skb_tail_pointer(skb);
        struct rtattr *mp_head;
  
-       if (dev)
-               RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
+       /* If cache is unresolved, don't try to parse IIF and OIF */
+       if (c->mf6c_parent > MAXMIFS)
+               return -ENOENT;
+       if (MIF_EXISTS(net, c->mf6c_parent))
+               RTA_PUT(skb, RTA_IIF, 4, &net->ipv6.vif6_table[c->mf6c_parent].dev->ifindex);
  
        mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
  
        for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
-               if (c->mfc_un.res.ttls[ct] < 255) {
+               if (MIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) {
                        if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
                                goto rtattr_failure;
                        nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
diff --combined net/ipv6/mcast.c
@@@ -43,6 -43,7 +43,7 @@@
  #include <linux/init.h>
  #include <linux/proc_fs.h>
  #include <linux/seq_file.h>
+ #include <linux/slab.h>
  
  #include <linux/netfilter.h>
  #include <linux/netfilter_ipv6.h>
@@@ -714,7 -715,7 +715,7 @@@ static void igmp6_group_added(struct if
        if (!(mc->mca_flags&MAF_LOADED)) {
                mc->mca_flags |= MAF_LOADED;
                if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
-                       dev_mc_add(dev, buf, dev->addr_len, 0);
+                       dev_mc_add(dev, buf);
        }
        spin_unlock_bh(&mc->mca_lock);
  
@@@ -740,7 -741,7 +741,7 @@@ static void igmp6_group_dropped(struct 
        if (mc->mca_flags&MAF_LOADED) {
                mc->mca_flags &= ~MAF_LOADED;
                if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
-                       dev_mc_delete(dev, buf, dev->addr_len, 0);
+                       dev_mc_del(dev, buf);
        }
  
        if (mc->mca_flags & MAF_NOREPORT)
@@@ -1479,7 -1480,7 +1480,7 @@@ static void mld_sendpack(struct sk_buf
  
        payload_len = skb->len;
  
 -      err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
 +      err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
                      dst_output);
  out:
        if (!err) {
@@@ -1847,7 -1848,7 +1848,7 @@@ static void igmp6_send(struct in6_addr 
                goto err_out;
  
        skb_dst_set(skb, dst);
 -      err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
 +      err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
                      dst_output);
  out:
        if (!err) {
diff --combined net/ipv6/ndisc.c
@@@ -59,6 -59,7 +59,7 @@@
  #include <linux/route.h>
  #include <linux/init.h>
  #include <linux/rcupdate.h>
+ #include <linux/slab.h>
  #ifdef CONFIG_SYSCTL
  #include <linux/sysctl.h>
  #endif
@@@ -535,7 -536,7 +536,7 @@@ void ndisc_send_skb(struct sk_buff *skb
        idev = in6_dev_get(dst->dev);
        IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
  
 -      err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
 +      err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
                      dst_output);
        if (!err) {
                ICMP6MSGOUT_INC_STATS(net, idev, type);
@@@ -1617,7 -1618,7 +1618,7 @@@ void ndisc_send_redirect(struct sk_buf
        skb_dst_set(buff, dst);
        idev = in6_dev_get(dst->dev);
        IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
 -      err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev,
 +      err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev,
                      dst_output);
        if (!err) {
                ICMP6MSGOUT_INC_STATS(net, idev, NDISC_REDIRECT);
@@@ -25,6 -25,7 +25,7 @@@
  #include <linux/proc_fs.h>
  #include <linux/seq_file.h>
  #include <linux/mutex.h>
+ #include <linux/slab.h>
  #include <net/net_namespace.h>
  #include <net/sock.h>
  #include <net/ipv6.h>
@@@ -161,7 -162,8 +162,7 @@@ ipq_build_packet_message(struct nf_queu
                break;
  
        case IPQ_COPY_PACKET:
 -              if ((entry->skb->ip_summed == CHECKSUM_PARTIAL ||
 -                   entry->skb->ip_summed == CHECKSUM_COMPLETE) &&
 +              if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
                    (*errp = skb_checksum_help(entry->skb))) {
                        read_unlock_bh(&queue_lock);
                        return NULL;
@@@ -14,7 -14,8 +14,9 @@@
   * as published by the Free Software Foundation; either version
   * 2 of the License, or (at your option) any later version.
   */
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ #include <linux/gfp.h>
  #include <linux/module.h>
  #include <linux/skbuff.h>
  #include <linux/icmpv6.h>
@@@ -49,7 -50,7 +51,7 @@@ static void send_reset(struct net *net
  
        if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
            (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
 -              pr_debug("ip6t_REJECT: addr is not unicast.\n");
 +              pr_debug("addr is not unicast.\n");
                return;
        }
  
@@@ -57,7 -58,7 +59,7 @@@
        tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto);
  
        if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
 -              pr_debug("ip6t_REJECT: Can't get TCP header.\n");
 +              pr_debug("Cannot get TCP header.\n");
                return;
        }
  
@@@ -65,7 -66,7 +67,7 @@@
  
        /* IP header checks: fragment, too short. */
        if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) {
 -              pr_debug("ip6t_REJECT: proto(%d) != IPPROTO_TCP, "
 +              pr_debug("proto(%d) != IPPROTO_TCP, "
                         "or too short. otcplen = %d\n",
                         proto, otcplen);
                return;
  
        /* No RST for RST. */
        if (otcph.rst) {
 -              pr_debug("ip6t_REJECT: RST is set\n");
 +              pr_debug("RST is set\n");
                return;
        }
  
        /* Check checksum. */
        if (csum_ipv6_magic(&oip6h->saddr, &oip6h->daddr, otcplen, IPPROTO_TCP,
                            skb_checksum(oldskb, tcphoff, otcplen, 0))) {
 -              pr_debug("ip6t_REJECT: TCP checksum is invalid\n");
 +              pr_debug("TCP checksum is invalid\n");
                return;
        }
  
  
        if (!nskb) {
                if (net_ratelimit())
 -                      printk("ip6t_REJECT: Can't alloc skb\n");
 +                      pr_debug("cannot alloc skb\n");
                dst_release(dst);
                return;
        }
@@@ -179,6 -180,9 +181,6 @@@ reject_tg6(struct sk_buff *skb, const s
        struct net *net = dev_net((par->in != NULL) ? par->in : par->out);
  
        pr_debug("%s: medium point\n", __func__);
 -      /* WARNING: This code causes reentry within ip6tables.
 -         This means that the ip6tables jump stack is now crap.  We
 -         must return an absolute verdict. --RR */
        switch (reject->with) {
        case IP6T_ICMP6_NO_ROUTE:
                send_unreach(net, skb, ICMPV6_NOROUTE, par->hooknum);
                break;
        default:
                if (net_ratelimit())
 -                      printk(KERN_WARNING "ip6t_REJECT: case %u not handled yet\n", reject->with);
 +                      pr_info("case %u not handled yet\n", reject->with);
                break;
        }
  
        return NF_DROP;
  }
  
 -static bool reject_tg6_check(const struct xt_tgchk_param *par)
 +static int reject_tg6_check(const struct xt_tgchk_param *par)
  {
        const struct ip6t_reject_info *rejinfo = par->targinfo;
        const struct ip6t_entry *e = par->entryinfo;
  
        if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) {
 -              printk("ip6t_REJECT: ECHOREPLY is not supported.\n");
 -              return false;
 +              pr_info("ECHOREPLY is not supported.\n");
 +              return -EINVAL;
        } else if (rejinfo->with == IP6T_TCP_RESET) {
                /* Must specify that it's a TCP packet */
                if (e->ipv6.proto != IPPROTO_TCP ||
                    (e->ipv6.invflags & XT_INV_PROTO)) {
 -                      printk("ip6t_REJECT: TCP_RESET illegal for non-tcp\n");
 -                      return false;
 +                      pr_info("TCP_RESET illegal for non-tcp\n");
 +                      return -EINVAL;
                }
        }
 -      return true;
 +      return 0;
  }
  
  static struct xt_target reject_tg6_reg __read_mostly = {
@@@ -6,7 -6,7 +6,7 @@@
   * it under the terms of the GNU General Public License version 2 as
   * published by the Free Software Foundation.
   */
 -
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  #include <linux/module.h>
  #include <linux/skbuff.h>
  #include <linux/ipv6.h>
@@@ -41,8 -41,6 +41,8 @@@ MODULE_ALIAS("ip6t_dst")
   *    5       -> RTALERT 2 x x
   */
  
 +static struct xt_match hbh_mt6_reg[] __read_mostly;
 +
  static bool
  hbh_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
  {
@@@ -60,9 -58,7 +60,9 @@@
        unsigned int optlen;
        int err;
  
 -      err = ipv6_find_hdr(skb, &ptr, par->match->data, NULL);
 +      err = ipv6_find_hdr(skb, &ptr,
 +                          (par->match == &hbh_mt6_reg[0]) ?
 +                          NEXTHDR_HOP : NEXTHDR_DEST, NULL);
        if (err < 0) {
                if (err != -ENOENT)
                        *par->hotdrop = true;
                        }
  
                        /* Step to the next */
-                       pr_debug("len%04X \n", optlen);
+                       pr_debug("len%04X\n", optlen);
  
                        if ((ptr > skb->len - optlen || hdrlen < optlen) &&
                            temp < optinfo->optsnr - 1) {
-                               pr_debug("new pointer is too large! \n");
+                               pr_debug("new pointer is too large!\n");
                                break;
                        }
                        ptr += optlen;
        return false;
  }
  
 -static bool hbh_mt6_check(const struct xt_mtchk_param *par)
 +static int hbh_mt6_check(const struct xt_mtchk_param *par)
  {
        const struct ip6t_opts *optsinfo = par->matchinfo;
  
        if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) {
 -              pr_debug("ip6t_opts: unknown flags %X\n", optsinfo->invflags);
 -              return false;
 +              pr_debug("unknown flags %X\n", optsinfo->invflags);
 +              return -EINVAL;
        }
  
        if (optsinfo->flags & IP6T_OPTS_NSTRICT) {
 -              pr_debug("ip6t_opts: Not strict - not implemented");
 -              return false;
 +              pr_debug("Not strict - not implemented");
 +              return -EINVAL;
        }
  
 -      return true;
 +      return 0;
  }
  
  static struct xt_match hbh_mt6_reg[] __read_mostly = {
        {
 +              /* Note, hbh_mt6 relies on the order of hbh_mt6_reg */
                .name           = "hbh",
                .family         = NFPROTO_IPV6,
                .match          = hbh_mt6,
                .matchsize      = sizeof(struct ip6t_opts),
                .checkentry     = hbh_mt6_check,
                .me             = THIS_MODULE,
 -              .data           = NEXTHDR_HOP,
        },
        {
                .name           = "dst",
                .matchsize      = sizeof(struct ip6t_opts),
                .checkentry     = hbh_mt6_check,
                .me             = THIS_MODULE,
 -              .data           = NEXTHDR_DEST,
        },
  };
  
@@@ -27,6 -27,7 +27,7 @@@
  #include <linux/ipv6.h>
  #include <linux/icmpv6.h>
  #include <linux/random.h>
+ #include <linux/slab.h>
  
  #include <net/sock.h>
  #include <net/snmp.h>
@@@ -643,7 -644,7 +644,7 @@@ void nf_ct_frag6_output(unsigned int ho
                s2 = s->next;
                s->next = NULL;
  
 -              NF_HOOK_THRESH(PF_INET6, hooknum, s, in, out, okfn,
 +              NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, s, in, out, okfn,
                               NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
                s = s2;
        }
diff --combined net/ipv6/raw.c
@@@ -21,6 -21,7 +21,7 @@@
  #include <linux/errno.h>
  #include <linux/types.h>
  #include <linux/socket.h>
+ #include <linux/slab.h>
  #include <linux/sockios.h>
  #include <linux/net.h>
  #include <linux/in6.h>
@@@ -636,8 -637,8 +637,8 @@@ static int rawv6_send_hdrinc(struct soc
                goto error_fault;
  
        IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
 -      err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
 -                    dst_output);
 +      err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
 +                    rt->u.dst.dev, dst_output);
        if (err > 0)
                err = net_xmit_errno(err);
        if (err)
@@@ -32,6 -32,7 +32,7 @@@
  #include <linux/in.h>
  #include <linux/ip.h>
  #include <linux/netfilter.h>
+ #include <linux/gfp.h>
  #include <net/protocol.h>
  #include <net/tcp.h>
  #include <asm/unaligned.h>
@@@ -208,14 -209,8 +209,14 @@@ static int ip_vs_ftp_out(struct ip_vs_a
                 */
                from.ip = n_cp->vaddr.ip;
                port = n_cp->vport;
 -              sprintf(buf, "%u,%u,%u,%u,%u,%u", NIPQUAD(from.ip),
 -                      (ntohs(port)>>8)&255, ntohs(port)&255);
 +              snprintf(buf, sizeof(buf), "%u,%u,%u,%u,%u,%u",
 +                       ((unsigned char *)&from.ip)[0],
 +                       ((unsigned char *)&from.ip)[1],
 +                       ((unsigned char *)&from.ip)[2],
 +                       ((unsigned char *)&from.ip)[3],
 +                       ntohs(port) >> 8,
 +                       ntohs(port) & 0xFF);
 +
                buf_len = strlen(buf);
  
                /*
@@@ -19,6 -19,7 +19,7 @@@
  #include <linux/module.h>
  #include <linux/kernel.h>
  #include <linux/skbuff.h>
+ #include <linux/gfp.h>
  #include <linux/in.h>
  #include <linux/ip.h>
  #include <net/protocol.h>
@@@ -166,24 -167,26 +167,24 @@@ ip_vs_tcpudp_debug_packet_v4(struct ip_
  
        ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
        if (ih == NULL)
 -              sprintf(buf, "%s TRUNCATED", pp->name);
 +              sprintf(buf, "TRUNCATED");
        else if (ih->frag_off & htons(IP_OFFSET))
 -              sprintf(buf, "%s %pI4->%pI4 frag",
 -                      pp->name, &ih->saddr, &ih->daddr);
 +              sprintf(buf, "%pI4->%pI4 frag", &ih->saddr, &ih->daddr);
        else {
                __be16 _ports[2], *pptr
  ;
                pptr = skb_header_pointer(skb, offset + ih->ihl*4,
                                          sizeof(_ports), _ports);
                if (pptr == NULL)
 -                      sprintf(buf, "%s TRUNCATED %pI4->%pI4",
 -                              pp->name, &ih->saddr, &ih->daddr);
 +                      sprintf(buf, "TRUNCATED %pI4->%pI4",
 +                              &ih->saddr, &ih->daddr);
                else
 -                      sprintf(buf, "%s %pI4:%u->%pI4:%u",
 -                              pp->name,
 +                      sprintf(buf, "%pI4:%u->%pI4:%u",
                                &ih->saddr, ntohs(pptr[0]),
                                &ih->daddr, ntohs(pptr[1]));
        }
  
 -      pr_debug("%s: %s\n", msg, buf);
 +      pr_debug("%s: %s %s\n", msg, pp->name, buf);
  }
  
  #ifdef CONFIG_IP_VS_IPV6
@@@ -198,24 -201,26 +199,24 @@@ ip_vs_tcpudp_debug_packet_v6(struct ip_
  
        ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
        if (ih == NULL)
 -              sprintf(buf, "%s TRUNCATED", pp->name);
 +              sprintf(buf, "TRUNCATED");
        else if (ih->nexthdr == IPPROTO_FRAGMENT)
 -              sprintf(buf, "%s %pI6->%pI6 frag",
 -                      pp->name, &ih->saddr, &ih->daddr);
 +              sprintf(buf, "%pI6->%pI6 frag", &ih->saddr, &ih->daddr);
        else {
                __be16 _ports[2], *pptr;
  
                pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr),
                                          sizeof(_ports), _ports);
                if (pptr == NULL)
 -                      sprintf(buf, "%s TRUNCATED %pI6->%pI6",
 -                              pp->name, &ih->saddr, &ih->daddr);
 +                      sprintf(buf, "TRUNCATED %pI6->%pI6",
 +                              &ih->saddr, &ih->daddr);
                else
 -                      sprintf(buf, "%s %pI6:%u->%pI6:%u",
 -                              pp->name,
 +                      sprintf(buf, "%pI6:%u->%pI6:%u",
                                &ih->saddr, ntohs(pptr[0]),
                                &ih->daddr, ntohs(pptr[1]));
        }
  
 -      pr_debug("%s: %s\n", msg, buf);
 +      pr_debug("%s: %s %s\n", msg, pp->name, buf);
  }
  #endif
  
@@@ -17,6 -17,7 +17,7 @@@
  #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  
  #include <linux/kernel.h>
+ #include <linux/slab.h>
  #include <linux/tcp.h>                  /* for tcphdr */
  #include <net/ip.h>
  #include <net/tcp.h>                    /* for csum_tcpudp_magic */
@@@ -269,7 -270,7 +270,7 @@@ ip_vs_bypass_xmit(struct sk_buff *skb, 
        /* Another hack: avoid icmp_send in ip_fragment */
        skb->local_df = 1;
  
 -      IP_VS_XMIT(PF_INET, skb, rt);
 +      IP_VS_XMIT(NFPROTO_IPV4, skb, rt);
  
        LeaveFunction(10);
        return NF_STOLEN;
@@@ -333,7 -334,7 +334,7 @@@ ip_vs_bypass_xmit_v6(struct sk_buff *sk
        /* Another hack: avoid icmp_send in ip_fragment */
        skb->local_df = 1;
  
 -      IP_VS_XMIT(PF_INET6, skb, rt);
 +      IP_VS_XMIT(NFPROTO_IPV6, skb, rt);
  
        LeaveFunction(10);
        return NF_STOLEN;
@@@ -409,7 -410,7 +410,7 @@@ ip_vs_nat_xmit(struct sk_buff *skb, str
        /* Another hack: avoid icmp_send in ip_fragment */
        skb->local_df = 1;
  
 -      IP_VS_XMIT(PF_INET, skb, rt);
 +      IP_VS_XMIT(NFPROTO_IPV4, skb, rt);
  
        LeaveFunction(10);
        return NF_STOLEN;
@@@ -485,7 -486,7 +486,7 @@@ ip_vs_nat_xmit_v6(struct sk_buff *skb, 
        /* Another hack: avoid icmp_send in ip_fragment */
        skb->local_df = 1;
  
 -      IP_VS_XMIT(PF_INET6, skb, rt);
 +      IP_VS_XMIT(NFPROTO_IPV6, skb, rt);
  
        LeaveFunction(10);
        return NF_STOLEN;
@@@ -784,7 -785,7 +785,7 @@@ ip_vs_dr_xmit(struct sk_buff *skb, stru
        /* Another hack: avoid icmp_send in ip_fragment */
        skb->local_df = 1;
  
 -      IP_VS_XMIT(PF_INET, skb, rt);
 +      IP_VS_XMIT(NFPROTO_IPV4, skb, rt);
  
        LeaveFunction(10);
        return NF_STOLEN;
@@@ -837,7 -838,7 +838,7 @@@ ip_vs_dr_xmit_v6(struct sk_buff *skb, s
        /* Another hack: avoid icmp_send in ip_fragment */
        skb->local_df = 1;
  
 -      IP_VS_XMIT(PF_INET6, skb, rt);
 +      IP_VS_XMIT(NFPROTO_IPV6, skb, rt);
  
        LeaveFunction(10);
        return NF_STOLEN;
@@@ -911,7 -912,7 +912,7 @@@ ip_vs_icmp_xmit(struct sk_buff *skb, st
        /* Another hack: avoid icmp_send in ip_fragment */
        skb->local_df = 1;
  
 -      IP_VS_XMIT(PF_INET, skb, rt);
 +      IP_VS_XMIT(NFPROTO_IPV4, skb, rt);
  
        rc = NF_STOLEN;
        goto out;
@@@ -986,7 -987,7 +987,7 @@@ ip_vs_icmp_xmit_v6(struct sk_buff *skb
        /* Another hack: avoid icmp_send in ip_fragment */
        skb->local_df = 1;
  
 -      IP_VS_XMIT(PF_INET6, skb, rt);
 +      IP_VS_XMIT(NFPROTO_IPV6, skb, rt);
  
        rc = NF_STOLEN;
        goto out;
@@@ -18,6 -18,7 +18,7 @@@
  #include <linux/percpu.h>
  #include <linux/kernel.h>
  #include <linux/netdevice.h>
+ #include <linux/slab.h>
  
  #include <net/netfilter/nf_conntrack.h>
  #include <net/netfilter/nf_conntrack_core.h>
@@@ -81,9 -82,11 +82,9 @@@ EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_
  int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
  {
        int ret = 0;
 -      struct nf_ct_event_notifier *notify;
  
        mutex_lock(&nf_ct_ecache_mutex);
 -      notify = rcu_dereference(nf_conntrack_event_cb);
 -      if (notify != NULL) {
 +      if (nf_conntrack_event_cb != NULL) {
                ret = -EBUSY;
                goto out_unlock;
        }
@@@ -99,8 -102,11 +100,8 @@@ EXPORT_SYMBOL_GPL(nf_conntrack_register
  
  void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
  {
 -      struct nf_ct_event_notifier *notify;
 -
        mutex_lock(&nf_ct_ecache_mutex);
 -      notify = rcu_dereference(nf_conntrack_event_cb);
 -      BUG_ON(notify != new);
 +      BUG_ON(nf_conntrack_event_cb != new);
        rcu_assign_pointer(nf_conntrack_event_cb, NULL);
        mutex_unlock(&nf_ct_ecache_mutex);
  }
@@@ -109,9 -115,11 +110,9 @@@ EXPORT_SYMBOL_GPL(nf_conntrack_unregist
  int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
  {
        int ret = 0;
 -      struct nf_exp_event_notifier *notify;
  
        mutex_lock(&nf_ct_ecache_mutex);
 -      notify = rcu_dereference(nf_expect_event_cb);
 -      if (notify != NULL) {
 +      if (nf_expect_event_cb != NULL) {
                ret = -EBUSY;
                goto out_unlock;
        }
@@@ -127,8 -135,11 +128,8 @@@ EXPORT_SYMBOL_GPL(nf_ct_expect_register
  
  void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
  {
 -      struct nf_exp_event_notifier *notify;
 -
        mutex_lock(&nf_ct_ecache_mutex);
 -      notify = rcu_dereference(nf_expect_event_cb);
 -      BUG_ON(notify != new);
 +      BUG_ON(nf_expect_event_cb != new);
        rcu_assign_pointer(nf_expect_event_cb, NULL);
        mutex_unlock(&nf_ct_ecache_mutex);
  }
@@@ -27,6 -27,7 +27,7 @@@
  #include <linux/netlink.h>
  #include <linux/spinlock.h>
  #include <linux/interrupt.h>
+ #include <linux/slab.h>
  
  #include <linux/netfilter.h>
  #include <net/netlink.h>
@@@ -425,17 -426,6 +426,17 @@@ ctnetlink_proto_size(const struct nf_co
        return len;
  }
  
 +static inline size_t
 +ctnetlink_counters_size(const struct nf_conn *ct)
 +{
 +      if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
 +              return 0;
 +      return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
 +             + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
 +             + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
 +             ;
 +}
 +
  static inline size_t
  ctnetlink_nlmsg_size(const struct nf_conn *ct)
  {
               + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
               + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
               + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
 -#ifdef CONFIG_NF_CT_ACCT
 -             + 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
 -             + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
 -             + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
 -#endif
 +             + ctnetlink_counters_size(ct)
               + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
               + nla_total_size(0) /* CTA_PROTOINFO */
               + nla_total_size(0) /* CTA_HELP */
@@@ -589,7 -583,9 +590,9 @@@ nla_put_failure
  nlmsg_failure:
        kfree_skb(skb);
  errout:
-       nfnetlink_set_err(net, 0, group, -ENOBUFS);
+       if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0)
+               return -ENOBUFS;
        return 0;
  }
  #endif /* CONFIG_NF_CONNTRACK_EVENTS */
  #include <linux/types.h>
  #include <linux/netfilter.h>
  #include <linux/module.h>
+ #include <linux/slab.h>
  #include <linux/mutex.h>
 -#include <linux/skbuff.h>
  #include <linux/vmalloc.h>
  #include <linux/stddef.h>
  #include <linux/err.h>
  #include <linux/percpu.h>
 -#include <linux/moduleparam.h>
  #include <linux/notifier.h>
  #include <linux/kernel.h>
  #include <linux/netdevice.h>
  #include <linux/types.h>
  #include <linux/socket.h>
  #include <linux/kernel.h>
 -#include <linux/major.h>
 -#include <linux/timer.h>
  #include <linux/string.h>
  #include <linux/sockios.h>
  #include <linux/net.h>
 -#include <linux/fcntl.h>
  #include <linux/skbuff.h>
  #include <asm/uaccess.h>
  #include <asm/system.h>
@@@ -110,9 -113,9 +110,9 @@@ int nfnetlink_send(struct sk_buff *skb
  }
  EXPORT_SYMBOL_GPL(nfnetlink_send);
  
void nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error)
int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error)
  {
-       netlink_set_err(net->nfnl, pid, group, error);
+       return netlink_set_err(net->nfnl, pid, group, error);
  }
  EXPORT_SYMBOL_GPL(nfnetlink_set_err);
  
@@@ -18,6 -18,7 +18,7 @@@
  #include <linux/skbuff.h>
  #include <linux/init.h>
  #include <linux/spinlock.h>
+ #include <linux/slab.h>
  #include <linux/notifier.h>
  #include <linux/netdevice.h>
  #include <linux/netfilter.h>
@@@ -245,7 -246,8 +246,7 @@@ nfqnl_build_packet_message(struct nfqnl
                break;
  
        case NFQNL_COPY_PACKET:
 -              if ((entskb->ip_summed == CHECKSUM_PARTIAL ||
 -                   entskb->ip_summed == CHECKSUM_COMPLETE) &&
 +              if (entskb->ip_summed == CHECKSUM_PARTIAL &&
                    skb_checksum_help(entskb)) {
                        spin_unlock_bh(&queue->lock);
                        return NULL;
diff --combined net/netfilter/x_tables.c
@@@ -12,7 -12,7 +12,7 @@@
   * published by the Free Software Foundation.
   *
   */
 -
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  #include <linux/kernel.h>
  #include <linux/socket.h>
  #include <linux/net.h>
@@@ -22,6 -22,7 +22,7 @@@
  #include <linux/vmalloc.h>
  #include <linux/mutex.h>
  #include <linux/mm.h>
+ #include <linux/slab.h>
  #include <net/net_namespace.h>
  
  #include <linux/netfilter/x_tables.h>
@@@ -54,6 -55,12 +55,6 @@@ struct xt_af 
  
  static struct xt_af *xt;
  
 -#ifdef DEBUG_IP_FIREWALL_USER
 -#define duprintf(format, args...) printk(format , ## args)
 -#else
 -#define duprintf(format, args...)
 -#endif
 -
  static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
        [NFPROTO_UNSPEC] = "x",
        [NFPROTO_IPV4]   = "ip",
@@@ -62,9 -69,6 +63,9 @@@
        [NFPROTO_IPV6]   = "ip6",
  };
  
 +/* Allow this many total (re)entries. */
 +static const unsigned int xt_jumpstack_multiplier = 2;
 +
  /* Registration hooks for targets. */
  int
  xt_register_target(struct xt_target *target)
@@@ -217,17 -221,6 +218,17 @@@ struct xt_match *xt_find_match(u8 af, c
  }
  EXPORT_SYMBOL(xt_find_match);
  
 +struct xt_match *
 +xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
 +{
 +      struct xt_match *match;
 +
 +      match = try_then_request_module(xt_find_match(nfproto, name, revision),
 +                                      "%st_%s", xt_prefix[nfproto], name);
 +      return (match != NULL) ? match : ERR_PTR(-ENOENT);
 +}
 +EXPORT_SYMBOL_GPL(xt_request_find_match);
 +
  /* Find target, grabs ref.  Returns ERR_PTR() on error. */
  struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
  {
@@@ -264,7 -257,9 +265,7 @@@ struct xt_target *xt_request_find_targe
  
        target = try_then_request_module(xt_find_target(af, name, revision),
                                         "%st_%s", xt_prefix[af], name);
 -      if (IS_ERR(target) || !target)
 -              return NULL;
 -      return target;
 +      return (target != NULL) ? target : ERR_PTR(-ENOENT);
  }
  EXPORT_SYMBOL_GPL(xt_request_find_target);
  
@@@ -366,8 -361,6 +367,8 @@@ static char *textify_hooks(char *buf, s
  int xt_check_match(struct xt_mtchk_param *par,
                   unsigned int size, u_int8_t proto, bool inv_proto)
  {
 +      int ret;
 +
        if (XT_ALIGN(par->match->matchsize) != size &&
            par->match->matchsize != -1) {
                /*
                       par->match->proto);
                return -EINVAL;
        }
 -      if (par->match->checkentry != NULL && !par->match->checkentry(par))
 -              return -EINVAL;
 +      if (par->match->checkentry != NULL) {
 +              ret = par->match->checkentry(par);
 +              if (ret < 0)
 +                      return ret;
 +              else if (ret > 0)
 +                      /* Flag up potential errors. */
 +                      return -EIO;
 +      }
        return 0;
  }
  EXPORT_SYMBOL_GPL(xt_check_match);
@@@ -531,8 -518,6 +532,8 @@@ EXPORT_SYMBOL_GPL(xt_compat_match_to_us
  int xt_check_target(struct xt_tgchk_param *par,
                    unsigned int size, u_int8_t proto, bool inv_proto)
  {
 +      int ret;
 +
        if (XT_ALIGN(par->target->targetsize) != size) {
                pr_err("%s_tables: %s.%u target: invalid size "
                       "%u (kernel) != (user) %u\n",
                       par->target->proto);
                return -EINVAL;
        }
 -      if (par->target->checkentry != NULL && !par->target->checkentry(par))
 -              return -EINVAL;
 +      if (par->target->checkentry != NULL) {
 +              ret = par->target->checkentry(par);
 +              if (ret < 0)
 +                      return ret;
 +              else if (ret > 0)
 +                      /* Flag up potential errors. */
 +                      return -EIO;
 +      }
        return 0;
  }
  EXPORT_SYMBOL_GPL(xt_check_target);
@@@ -683,26 -662,6 +684,26 @@@ void xt_free_table_info(struct xt_table
                else
                        vfree(info->entries[cpu]);
        }
 +
 +      if (info->jumpstack != NULL) {
 +              if (sizeof(void *) * info->stacksize > PAGE_SIZE) {
 +                      for_each_possible_cpu(cpu)
 +                              vfree(info->jumpstack[cpu]);
 +              } else {
 +                      for_each_possible_cpu(cpu)
 +                              kfree(info->jumpstack[cpu]);
 +              }
 +      }
 +
 +      if (sizeof(void **) * nr_cpu_ids > PAGE_SIZE)
 +              vfree(info->jumpstack);
 +      else
 +              kfree(info->jumpstack);
 +      if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE)
 +              vfree(info->stackptr);
 +      else
 +              kfree(info->stackptr);
 +
        kfree(info);
  }
  EXPORT_SYMBOL(xt_free_table_info);
@@@ -747,49 -706,6 +748,49 @@@ EXPORT_SYMBOL_GPL(xt_compat_unlock)
  DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks);
  EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks);
  
 +static int xt_jumpstack_alloc(struct xt_table_info *i)
 +{
 +      unsigned int size;
 +      int cpu;
 +
 +      size = sizeof(unsigned int) * nr_cpu_ids;
 +      if (size > PAGE_SIZE)
 +              i->stackptr = vmalloc(size);
 +      else
 +              i->stackptr = kmalloc(size, GFP_KERNEL);
 +      if (i->stackptr == NULL)
 +              return -ENOMEM;
 +      memset(i->stackptr, 0, size);
 +
 +      size = sizeof(void **) * nr_cpu_ids;
 +      if (size > PAGE_SIZE)
 +              i->jumpstack = vmalloc(size);
 +      else
 +              i->jumpstack = kmalloc(size, GFP_KERNEL);
 +      if (i->jumpstack == NULL)
 +              return -ENOMEM;
 +      memset(i->jumpstack, 0, size);
 +
 +      i->stacksize *= xt_jumpstack_multiplier;
 +      size = sizeof(void *) * i->stacksize;
 +      for_each_possible_cpu(cpu) {
 +              if (size > PAGE_SIZE)
 +                      i->jumpstack[cpu] = vmalloc_node(size,
 +                              cpu_to_node(cpu));
 +              else
 +                      i->jumpstack[cpu] = kmalloc_node(size,
 +                              GFP_KERNEL, cpu_to_node(cpu));
 +              if (i->jumpstack[cpu] == NULL)
 +                      /*
 +                       * Freeing will be done later on by the callers. The
 +                       * chain is: xt_replace_table -> __do_replace ->
 +                       * do_replace -> xt_free_table_info.
 +                       */
 +                      return -ENOMEM;
 +      }
 +
 +      return 0;
 +}
  
  struct xt_table_info *
  xt_replace_table(struct xt_table *table,
              int *error)
  {
        struct xt_table_info *private;
 +      int ret;
  
        /* Do the substitution. */
        local_bh_disable();
  
        /* Check inside lock: is the old number correct? */
        if (num_counters != private->number) {
 -              duprintf("num_counters != table->private->number (%u/%u)\n",
 +              pr_debug("num_counters != table->private->number (%u/%u)\n",
                         num_counters, private->number);
                local_bh_enable();
                *error = -EAGAIN;
                return NULL;
        }
  
 +      ret = xt_jumpstack_alloc(newinfo);
 +      if (ret < 0) {
 +              *error = ret;
 +              return NULL;
 +      }
 +
        table->private = newinfo;
        newinfo->initial_entries = private->initial_entries;
  
@@@ -843,10 -752,6 +844,10 @@@ struct xt_table *xt_register_table(stru
        struct xt_table_info *private;
        struct xt_table *t, *table;
  
 +      ret = xt_jumpstack_alloc(newinfo);
 +      if (ret < 0)
 +              return ERR_PTR(ret);
 +
        /* Don't add one object to multiple lists. */
        table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
        if (!table) {
                goto unlock;
  
        private = table->private;
 -      duprintf("table->private->number = %u\n", private->number);
 +      pr_debug("table->private->number = %u\n", private->number);
  
        /* save number of initial entries */
        private->initial_entries = private->number;
diff --combined net/netfilter/xt_CT.c
@@@ -7,6 -7,7 +7,7 @@@
   */
  
  #include <linux/module.h>
+ #include <linux/gfp.h>
  #include <linux/skbuff.h>
  #include <linux/selinux.h>
  #include <linux/netfilter_ipv4/ip_tables.h>
@@@ -37,13 -38,13 +38,13 @@@ static unsigned int xt_ct_target(struc
  
  static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
  {
 -      if (par->family == AF_INET) {
 +      if (par->family == NFPROTO_IPV4) {
                const struct ipt_entry *e = par->entryinfo;
  
                if (e->ip.invflags & IPT_INV_PROTO)
                        return 0;
                return e->ip.proto;
 -      } else if (par->family == AF_INET6) {
 +      } else if (par->family == NFPROTO_IPV6) {
                const struct ip6t_entry *e = par->entryinfo;
  
                if (e->ipv6.invflags & IP6T_INV_PROTO)
                return 0;
  }
  
 -static bool xt_ct_tg_check(const struct xt_tgchk_param *par)
 +static int xt_ct_tg_check(const struct xt_tgchk_param *par)
  {
        struct xt_ct_target_info *info = par->targinfo;
        struct nf_conntrack_tuple t;
        struct nf_conn_help *help;
        struct nf_conn *ct;
 +      int ret = 0;
        u8 proto;
  
        if (info->flags & ~XT_CT_NOTRACK)
 -              return false;
 +              return -EINVAL;
  
        if (info->flags & XT_CT_NOTRACK) {
                ct = &nf_conntrack_untracked;
                goto err1;
  #endif
  
 -      if (nf_ct_l3proto_try_module_get(par->family) < 0)
 +      ret = nf_ct_l3proto_try_module_get(par->family);
 +      if (ret < 0)
                goto err1;
  
        memset(&t, 0, sizeof(t));
        ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL);
 +      ret = PTR_ERR(ct);
        if (IS_ERR(ct))
                goto err2;
  
 +      ret = 0;
        if ((info->ct_events || info->exp_events) &&
            !nf_ct_ecache_ext_add(ct, info->ct_events, info->exp_events,
                                  GFP_KERNEL))
                goto err3;
  
        if (info->helper[0]) {
 +              ret = -ENOENT;
                proto = xt_ct_find_proto(par);
                if (!proto)
                        goto err3;
  
 +              ret = -ENOMEM;
                help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
                if (help == NULL)
                        goto err3;
  
 +              ret = -ENOENT;
                help->helper = nf_conntrack_helper_try_module_get(info->helper,
                                                                  par->family,
                                                                  proto);
        __set_bit(IPS_CONFIRMED_BIT, &ct->status);
  out:
        info->ct = ct;
 -      return true;
 +      return 0;
  
  err3:
        nf_conntrack_free(ct);
  err2:
        nf_ct_l3proto_module_put(par->family);
  err1:
 -      return false;
 +      return ret;
  }
  
  static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
  static struct xt_target xt_ct_tg __read_mostly = {
        .name           = "CT",
        .family         = NFPROTO_UNSPEC,
 -      .targetsize     = XT_ALIGN(sizeof(struct xt_ct_target_info)),
 +      .targetsize     = sizeof(struct xt_ct_target_info),
        .checkentry     = xt_ct_tg_check,
        .destroy        = xt_ct_tg_destroy,
        .target         = xt_ct_target,
diff --combined net/netfilter/xt_LED.c
   * 02110-1301 USA.
   *
   */
 -
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  #include <linux/module.h>
  #include <linux/skbuff.h>
  #include <linux/netfilter/x_tables.h>
+ #include <linux/slab.h>
  #include <linux/leds.h>
  #include <linux/mutex.h>
  
@@@ -31,18 -32,12 +32,18 @@@ MODULE_LICENSE("GPL")
  MODULE_AUTHOR("Adam Nielsen <a.nielsen@shikadi.net>");
  MODULE_DESCRIPTION("Xtables: trigger LED devices on packet match");
  
 +static LIST_HEAD(xt_led_triggers);
 +static DEFINE_MUTEX(xt_led_mutex);
 +
  /*
   * This is declared in here (the kernel module) only, to avoid having these
   * dependencies in userspace code.  This is what xt_led_info.internal_data
   * points to.
   */
  struct xt_led_info_internal {
 +      struct list_head list;
 +      int refcnt;
 +      char *trigger_id;
        struct led_trigger netfilter_led_trigger;
        struct timer_list timer;
  };
@@@ -59,7 -54,7 +60,7 @@@ led_tg(struct sk_buff *skb, const struc
         */
        if ((ledinfo->delay > 0) && ledinfo->always_blink &&
            timer_pending(&ledinternal->timer))
 -              led_trigger_event(&ledinternal->netfilter_led_trigger,LED_OFF);
 +              led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF);
  
        led_trigger_event(&ledinternal->netfilter_led_trigger, LED_FULL);
  
  
  static void led_timeout_callback(unsigned long data)
  {
 -      struct xt_led_info *ledinfo = (struct xt_led_info *)data;
 -      struct xt_led_info_internal *ledinternal = ledinfo->internal_data;
 +      struct xt_led_info_internal *ledinternal = (struct xt_led_info_internal *)data;
  
        led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF);
  }
  
 -static bool led_tg_check(const struct xt_tgchk_param *par)
 +static struct xt_led_info_internal *led_trigger_lookup(const char *name)
 +{
 +      struct xt_led_info_internal *ledinternal;
 +
 +      list_for_each_entry(ledinternal, &xt_led_triggers, list) {
 +              if (!strcmp(name, ledinternal->netfilter_led_trigger.name)) {
 +                      return ledinternal;
 +              }
 +      }
 +      return NULL;
 +}
 +
 +static int led_tg_check(const struct xt_tgchk_param *par)
  {
        struct xt_led_info *ledinfo = par->targinfo;
        struct xt_led_info_internal *ledinternal;
        int err;
  
        if (ledinfo->id[0] == '\0') {
 -              printk(KERN_ERR KBUILD_MODNAME ": No 'id' parameter given.\n");
 -              return false;
 +              pr_info("No 'id' parameter given.\n");
 +              return -EINVAL;
        }
  
 -      ledinternal = kzalloc(sizeof(struct xt_led_info_internal), GFP_KERNEL);
 -      if (!ledinternal) {
 -              printk(KERN_CRIT KBUILD_MODNAME ": out of memory\n");
 -              return false;
 +      mutex_lock(&xt_led_mutex);
 +
 +      ledinternal = led_trigger_lookup(ledinfo->id);
 +      if (ledinternal) {
 +              ledinternal->refcnt++;
 +              goto out;
        }
  
 -      ledinternal->netfilter_led_trigger.name = ledinfo->id;
 +      err = -ENOMEM;
 +      ledinternal = kzalloc(sizeof(struct xt_led_info_internal), GFP_KERNEL);
 +      if (!ledinternal)
 +              goto exit_mutex_only;
 +
 +      ledinternal->trigger_id = kstrdup(ledinfo->id, GFP_KERNEL);
 +      if (!ledinternal->trigger_id)
 +              goto exit_internal_alloc;
 +
 +      ledinternal->refcnt = 1;
 +      ledinternal->netfilter_led_trigger.name = ledinternal->trigger_id;
  
        err = led_trigger_register(&ledinternal->netfilter_led_trigger);
        if (err) {
 -              printk(KERN_CRIT KBUILD_MODNAME
 -                      ": led_trigger_register() failed\n");
 +              pr_warning("led_trigger_register() failed\n");
                if (err == -EEXIST)
 -                      printk(KERN_ERR KBUILD_MODNAME
 -                              ": Trigger name is already in use.\n");
 +                      pr_warning("Trigger name is already in use.\n");
                goto exit_alloc;
        }
  
        /* See if we need to set up a timer */
        if (ledinfo->delay > 0)
                setup_timer(&ledinternal->timer, led_timeout_callback,
 -                          (unsigned long)ledinfo);
 +                          (unsigned long)ledinternal);
 +
 +      list_add_tail(&ledinternal->list, &xt_led_triggers);
 +
 +out:
 +      mutex_unlock(&xt_led_mutex);
  
        ledinfo->internal_data = ledinternal;
  
 -      return true;
 +      return 0;
  
  exit_alloc:
 +      kfree(ledinternal->trigger_id);
 +
 +exit_internal_alloc:
        kfree(ledinternal);
  
 -      return false;
 +exit_mutex_only:
 +      mutex_unlock(&xt_led_mutex);
 +
 +      return err;
  }
  
  static void led_tg_destroy(const struct xt_tgdtor_param *par)
        const struct xt_led_info *ledinfo = par->targinfo;
        struct xt_led_info_internal *ledinternal = ledinfo->internal_data;
  
 +      mutex_lock(&xt_led_mutex);
 +
 +      if (--ledinternal->refcnt) {
 +              mutex_unlock(&xt_led_mutex);
 +              return;
 +      }
 +
 +      list_del(&ledinternal->list);
 +
        if (ledinfo->delay > 0)
                del_timer_sync(&ledinternal->timer);
  
        led_trigger_unregister(&ledinternal->netfilter_led_trigger);
 +
 +      mutex_unlock(&xt_led_mutex);
 +
 +      kfree(ledinternal->trigger_id);
        kfree(ledinternal);
  }
  
@@@ -192,7 -142,7 +193,7 @@@ static struct xt_target led_tg_reg __re
        .revision       = 0,
        .family         = NFPROTO_UNSPEC,
        .target         = led_tg,
 -      .targetsize     = XT_ALIGN(sizeof(struct xt_led_info)),
 +      .targetsize     = sizeof(struct xt_led_info),
        .checkentry     = led_tg_check,
        .destroy        = led_tg_destroy,
        .me             = THIS_MODULE,
@@@ -11,6 -11,7 +11,7 @@@
  #include <linux/jhash.h>
  #include <linux/rtnetlink.h>
  #include <linux/random.h>
+ #include <linux/slab.h>
  #include <net/gen_stats.h>
  #include <net/netlink.h>
  
@@@ -85,7 -86,7 +86,7 @@@ xt_rateest_tg(struct sk_buff *skb, cons
        return XT_CONTINUE;
  }
  
 -static bool xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
 +static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
  {
        struct xt_rateest_target_info *info = par->targinfo;
        struct xt_rateest *est;
@@@ -93,7 -94,6 +94,7 @@@
                struct nlattr           opt;
                struct gnet_estimator   est;
        } cfg;
 +      int ret;
  
        if (unlikely(!rnd_inited)) {
                get_random_bytes(&jhash_rnd, sizeof(jhash_rnd));
                    (info->interval != est->params.interval ||
                     info->ewma_log != est->params.ewma_log)) {
                        xt_rateest_put(est);
 -                      return false;
 +                      return -EINVAL;
                }
                info->est = est;
 -              return true;
 +              return 0;
        }
  
 +      ret = -ENOMEM;
        est = kzalloc(sizeof(*est), GFP_KERNEL);
        if (!est)
                goto err1;
        cfg.est.interval        = info->interval;
        cfg.est.ewma_log        = info->ewma_log;
  
 -      if (gen_new_estimator(&est->bstats, &est->rstats, &est->lock,
 -                            &cfg.opt) < 0)
 +      ret = gen_new_estimator(&est->bstats, &est->rstats,
 +                              &est->lock, &cfg.opt);
 +      if (ret < 0)
                goto err2;
  
        info->est = est;
        xt_rateest_hash_insert(est);
 -
 -      return true;
 +      return 0;
  
  err2:
        kfree(est);
  err1:
 -      return false;
 +      return ret;
  }
  
  static void xt_rateest_tg_destroy(const struct xt_tgdtor_param *par)
@@@ -7,10 -7,11 +7,11 @@@
   * it under the terms of the GNU General Public License version 2 as
   * published by the Free Software Foundation.
   */
 -
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  #include <linux/module.h>
  #include <linux/skbuff.h>
  #include <linux/ip.h>
+ #include <linux/gfp.h>
  #include <linux/ipv6.h>
  #include <linux/tcp.h>
  #include <net/dst.h>
@@@ -67,14 -68,15 +68,14 @@@ tcpmss_mangle_packet(struct sk_buff *sk
        if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
                if (dst_mtu(skb_dst(skb)) <= minlen) {
                        if (net_ratelimit())
 -                              printk(KERN_ERR "xt_TCPMSS: "
 -                                     "unknown or invalid path-MTU (%u)\n",
 +                              pr_err("unknown or invalid path-MTU (%u)\n",
                                       dst_mtu(skb_dst(skb)));
                        return -1;
                }
                if (in_mtu <= minlen) {
                        if (net_ratelimit())
 -                              printk(KERN_ERR "xt_TCPMSS: unknown or "
 -                                     "invalid path-MTU (%u)\n", in_mtu);
 +                              pr_err("unknown or invalid path-MTU (%u)\n",
 +                                     in_mtu);
                        return -1;
                }
                newmss = min(dst_mtu(skb_dst(skb)), in_mtu) - minlen;
@@@ -234,7 -236,7 +235,7 @@@ static inline bool find_syn_match(cons
        return false;
  }
  
 -static bool tcpmss_tg4_check(const struct xt_tgchk_param *par)
 +static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
  {
        const struct xt_tcpmss_info *info = par->targinfo;
        const struct ipt_entry *e = par->entryinfo;
            (par->hook_mask & ~((1 << NF_INET_FORWARD) |
                           (1 << NF_INET_LOCAL_OUT) |
                           (1 << NF_INET_POST_ROUTING))) != 0) {
 -              printk("xt_TCPMSS: path-MTU clamping only supported in "
 -                     "FORWARD, OUTPUT and POSTROUTING hooks\n");
 -              return false;
 +              pr_info("path-MTU clamping only supported in "
 +                      "FORWARD, OUTPUT and POSTROUTING hooks\n");
 +              return -EINVAL;
        }
        xt_ematch_foreach(ematch, e)
                if (find_syn_match(ematch))
 -                      return true;
 -      printk("xt_TCPMSS: Only works on TCP SYN packets\n");
 -      return false;
 +                      return 0;
 +      pr_info("Only works on TCP SYN packets\n");
 +      return -EINVAL;
  }
  
  #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
 -static bool tcpmss_tg6_check(const struct xt_tgchk_param *par)
 +static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
  {
        const struct xt_tcpmss_info *info = par->targinfo;
        const struct ip6t_entry *e = par->entryinfo;
            (par->hook_mask & ~((1 << NF_INET_FORWARD) |
                           (1 << NF_INET_LOCAL_OUT) |
                           (1 << NF_INET_POST_ROUTING))) != 0) {
 -              printk("xt_TCPMSS: path-MTU clamping only supported in "
 -                     "FORWARD, OUTPUT and POSTROUTING hooks\n");
 -              return false;
 +              pr_info("path-MTU clamping only supported in "
 +                      "FORWARD, OUTPUT and POSTROUTING hooks\n");
 +              return -EINVAL;
        }
        xt_ematch_foreach(ematch, e)
                if (find_syn_match(ematch))
 -                      return true;
 -      printk("xt_TCPMSS: Only works on TCP SYN packets\n");
 -      return false;
 +                      return 0;
 +      pr_info("Only works on TCP SYN packets\n");
 +      return -EINVAL;
  }
  #endif
  
@@@ -5,18 -5,19 +5,19 @@@
   *   Nov 2002: Martin Bene <martin.bene@icomedias.com>:
   *            only ignore TIME_WAIT or gone connections
   *   (C) CC Computer Consultants GmbH, 2007
 - *   Contact: <jengelh@computergmbh.de>
   *
   * based on ...
   *
   * Kernel module to match connection tracking information.
   * GPL (C) 1999  Rusty Russell (rusty@rustcorp.com.au).
   */
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  #include <linux/in.h>
  #include <linux/in6.h>
  #include <linux/ip.h>
  #include <linux/ipv6.h>
  #include <linux/jhash.h>
+ #include <linux/slab.h>
  #include <linux/list.h>
  #include <linux/module.h>
  #include <linux/random.h>
@@@ -216,35 -217,33 +217,35 @@@ connlimit_mt(const struct sk_buff *skb
        return false;
  }
  
 -static bool connlimit_mt_check(const struct xt_mtchk_param *par)
 +static int connlimit_mt_check(const struct xt_mtchk_param *par)
  {
        struct xt_connlimit_info *info = par->matchinfo;
        unsigned int i;
 +      int ret;
  
        if (unlikely(!connlimit_rnd_inited)) {
                get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
                connlimit_rnd_inited = true;
        }
 -      if (nf_ct_l3proto_try_module_get(par->family) < 0) {
 -              printk(KERN_WARNING "cannot load conntrack support for "
 -                     "address family %u\n", par->family);
 -              return false;
 +      ret = nf_ct_l3proto_try_module_get(par->family);
 +      if (ret < 0) {
 +              pr_info("cannot load conntrack support for "
 +                      "address family %u\n", par->family);
 +              return ret;
        }
  
        /* init private data */
        info->data = kmalloc(sizeof(struct xt_connlimit_data), GFP_KERNEL);
        if (info->data == NULL) {
                nf_ct_l3proto_module_put(par->family);
 -              return false;
 +              return -ENOMEM;
        }
  
        spin_lock_init(&info->data->lock);
        for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i)
                INIT_LIST_HEAD(&info->data->iphash[i]);
  
 -      return true;
 +      return 0;
  }
  
  static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
diff --combined net/netfilter/xt_dccp.c
@@@ -10,6 -10,7 +10,7 @@@
  
  #include <linux/module.h>
  #include <linux/skbuff.h>
+ #include <linux/slab.h>
  #include <linux/spinlock.h>
  #include <net/ip.h>
  #include <linux/dccp.h>
@@@ -123,17 -124,13 +124,17 @@@ dccp_mt(const struct sk_buff *skb, cons
                           XT_DCCP_OPTION, info->flags, info->invflags);
  }
  
 -static bool dccp_mt_check(const struct xt_mtchk_param *par)
 +static int dccp_mt_check(const struct xt_mtchk_param *par)
  {
        const struct xt_dccp_info *info = par->matchinfo;
  
 -      return !(info->flags & ~XT_DCCP_VALID_FLAGS)
 -              && !(info->invflags & ~XT_DCCP_VALID_FLAGS)
 -              && !(info->invflags & ~info->flags);
 +      if (info->flags & ~XT_DCCP_VALID_FLAGS)
 +              return -EINVAL;
 +      if (info->invflags & ~XT_DCCP_VALID_FLAGS)
 +              return -EINVAL;
 +      if (info->invflags & ~info->flags)
 +              return -EINVAL;
 +      return 0;
  }
  
  static struct xt_match dccp_mt_reg[] __read_mostly = {
@@@ -7,7 -7,6 +7,7 @@@
   *
   * Development of this code was funded by Astaro AG, http://www.astaro.com/
   */
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  #include <linux/module.h>
  #include <linux/spinlock.h>
  #include <linux/random.h>
@@@ -37,7 -36,7 +37,7 @@@
  
  MODULE_LICENSE("GPL");
  MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
 -MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
 +MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
  MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match");
  MODULE_ALIAS("ipt_hashlimit");
  MODULE_ALIAS("ip6t_hashlimit");
@@@ -81,14 -80,12 +81,14 @@@ struct dsthash_ent 
        struct dsthash_dst dst;
  
        /* modified structure members in the end */
 +      spinlock_t lock;
        unsigned long expires;          /* precalculated expiry time */
        struct {
                unsigned long prev;     /* last modification */
                u_int32_t credit;
                u_int32_t credit_cap, cost;
        } rateinfo;
 +      struct rcu_head rcu;
  };
  
  struct xt_hashlimit_htable {
@@@ -145,11 -142,9 +145,11 @@@ dsthash_find(const struct xt_hashlimit_
        u_int32_t hash = hash_dst(ht, dst);
  
        if (!hlist_empty(&ht->hash[hash])) {
 -              hlist_for_each_entry(ent, pos, &ht->hash[hash], node)
 -                      if (dst_cmp(ent, dst))
 +              hlist_for_each_entry_rcu(ent, pos, &ht->hash[hash], node)
 +                      if (dst_cmp(ent, dst)) {
 +                              spin_lock(&ent->lock);
                                return ent;
 +                      }
        }
        return NULL;
  }
@@@ -161,10 -156,9 +161,10 @@@ dsthash_alloc_init(struct xt_hashlimit_
  {
        struct dsthash_ent *ent;
  
 +      spin_lock(&ht->lock);
        /* initialize hash with random val at the time we allocate
         * the first hashtable entry */
 -      if (!ht->rnd_initialized) {
 +      if (unlikely(!ht->rnd_initialized)) {
                get_random_bytes(&ht->rnd, sizeof(ht->rnd));
                ht->rnd_initialized = true;
        }
        if (ht->cfg.max && ht->count >= ht->cfg.max) {
                /* FIXME: do something. question is what.. */
                if (net_ratelimit())
 -                      printk(KERN_WARNING
 -                              "xt_hashlimit: max count of %u reached\n",
 -                              ht->cfg.max);
 -              return NULL;
 -      }
 -
 -      ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
 +                      pr_err("max count of %u reached\n", ht->cfg.max);
 +              ent = NULL;
 +      } else
 +              ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
        if (!ent) {
                if (net_ratelimit())
 -                      printk(KERN_ERR
 -                              "xt_hashlimit: can't allocate dsthash_ent\n");
 -              return NULL;
 -      }
 -      memcpy(&ent->dst, dst, sizeof(ent->dst));
 +                      pr_err("cannot allocate dsthash_ent\n");
 +      } else {
 +              memcpy(&ent->dst, dst, sizeof(ent->dst));
 +              spin_lock_init(&ent->lock);
  
 -      hlist_add_head(&ent->node, &ht->hash[hash_dst(ht, dst)]);
 -      ht->count++;
 +              spin_lock(&ent->lock);
 +              hlist_add_head_rcu(&ent->node, &ht->hash[hash_dst(ht, dst)]);
 +              ht->count++;
 +      }
 +      spin_unlock(&ht->lock);
        return ent;
  }
  
 -static inline void
 -dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
 +static void dsthash_free_rcu(struct rcu_head *head)
  {
 -      hlist_del(&ent->node);
 +      struct dsthash_ent *ent = container_of(head, struct dsthash_ent, rcu);
 +
        kmem_cache_free(hashlimit_cachep, ent);
 -      ht->count--;
  }
 -static void htable_gc(unsigned long htlong);
  
 -static int htable_create_v0(struct net *net, struct xt_hashlimit_info *minfo, u_int8_t family)
 +static inline void
 +dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
  {
 -      struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
 -      struct xt_hashlimit_htable *hinfo;
 -      unsigned int size;
 -      unsigned int i;
 -
 -      if (minfo->cfg.size)
 -              size = minfo->cfg.size;
 -      else {
 -              size = ((totalram_pages << PAGE_SHIFT) / 16384) /
 -                     sizeof(struct list_head);
 -              if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
 -                      size = 8192;
 -              if (size < 16)
 -                      size = 16;
 -      }
 -      /* FIXME: don't use vmalloc() here or anywhere else -HW */
 -      hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) +
 -                      sizeof(struct list_head) * size);
 -      if (!hinfo) {
 -              printk(KERN_ERR "xt_hashlimit: unable to create hashtable\n");
 -              return -1;
 -      }
 -      minfo->hinfo = hinfo;
 -
 -      /* copy match config into hashtable config */
 -      hinfo->cfg.mode        = minfo->cfg.mode;
 -      hinfo->cfg.avg         = minfo->cfg.avg;
 -      hinfo->cfg.burst       = minfo->cfg.burst;
 -      hinfo->cfg.max         = minfo->cfg.max;
 -      hinfo->cfg.gc_interval = minfo->cfg.gc_interval;
 -      hinfo->cfg.expire      = minfo->cfg.expire;
 -
 -      if (family == NFPROTO_IPV4)
 -              hinfo->cfg.srcmask = hinfo->cfg.dstmask = 32;
 -      else
 -              hinfo->cfg.srcmask = hinfo->cfg.dstmask = 128;
 -
 -      hinfo->cfg.size = size;
 -      if (!hinfo->cfg.max)
 -              hinfo->cfg.max = 8 * hinfo->cfg.size;
 -      else if (hinfo->cfg.max < hinfo->cfg.size)
 -              hinfo->cfg.max = hinfo->cfg.size;
 -
 -      for (i = 0; i < hinfo->cfg.size; i++)
 -              INIT_HLIST_HEAD(&hinfo->hash[i]);
 -
 -      hinfo->use = 1;
 -      hinfo->count = 0;
 -      hinfo->family = family;
 -      hinfo->rnd_initialized = false;
 -      spin_lock_init(&hinfo->lock);
 -      hinfo->pde = proc_create_data(minfo->name, 0,
 -              (family == NFPROTO_IPV4) ?
 -              hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit,
 -              &dl_file_ops, hinfo);
 -      if (!hinfo->pde) {
 -              vfree(hinfo);
 -              return -1;
 -      }
 -      hinfo->net = net;
 -
 -      setup_timer(&hinfo->timer, htable_gc, (unsigned long )hinfo);
 -      hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
 -      add_timer(&hinfo->timer);
 -
 -      hlist_add_head(&hinfo->node, &hashlimit_net->htables);
 -
 -      return 0;
 +      hlist_del_rcu(&ent->node);
 +      call_rcu_bh(&ent->rcu, dsthash_free_rcu);
 +      ht->count--;
  }
 +static void htable_gc(unsigned long htlong);
  
  static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
                         u_int8_t family)
        /* FIXME: don't use vmalloc() here or anywhere else -HW */
        hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) +
                        sizeof(struct list_head) * size);
 -      if (hinfo == NULL) {
 -              printk(KERN_ERR "xt_hashlimit: unable to create hashtable\n");
 -              return -1;
 -      }
 +      if (hinfo == NULL)
 +              return -ENOMEM;
        minfo->hinfo = hinfo;
  
        /* copy match config into hashtable config */
                &dl_file_ops, hinfo);
        if (hinfo->pde == NULL) {
                vfree(hinfo);
 -              return -1;
 +              return -ENOMEM;
        }
        hinfo->net = net;
  
@@@ -431,6 -493,7 +431,7 @@@ static void hashlimit_ipv6_mask(__be32 
        case 64 ... 95:
                i[2] = maskl(i[2], p - 64);
                i[3] = 0;
+               break;
        case 96 ... 127:
                i[3] = maskl(i[3], p - 96);
                break;
@@@ -514,6 -577,57 +515,6 @@@ hashlimit_init_dst(const struct xt_hash
        return 0;
  }
  
 -static bool
 -hashlimit_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
 -{
 -      const struct xt_hashlimit_info *r = par->matchinfo;
 -      struct xt_hashlimit_htable *hinfo = r->hinfo;
 -      unsigned long now = jiffies;
 -      struct dsthash_ent *dh;
 -      struct dsthash_dst dst;
 -
 -      if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0)
 -              goto hotdrop;
 -
 -      spin_lock_bh(&hinfo->lock);
 -      dh = dsthash_find(hinfo, &dst);
 -      if (!dh) {
 -              dh = dsthash_alloc_init(hinfo, &dst);
 -              if (!dh) {
 -                      spin_unlock_bh(&hinfo->lock);
 -                      goto hotdrop;
 -              }
 -
 -              dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
 -              dh->rateinfo.prev = jiffies;
 -              dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
 -                                                 hinfo->cfg.burst);
 -              dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
 -                                                     hinfo->cfg.burst);
 -              dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
 -      } else {
 -              /* update expiration timeout */
 -              dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
 -              rateinfo_recalc(dh, now);
 -      }
 -
 -      if (dh->rateinfo.credit >= dh->rateinfo.cost) {
 -              /* We're underlimit. */
 -              dh->rateinfo.credit -= dh->rateinfo.cost;
 -              spin_unlock_bh(&hinfo->lock);
 -              return true;
 -      }
 -
 -      spin_unlock_bh(&hinfo->lock);
 -
 -      /* default case: we're overlimit, thus don't match */
 -      return false;
 -
 -hotdrop:
 -      *par->hotdrop = true;
 -      return false;
 -}
 -
  static bool
  hashlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
  {
        if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0)
                goto hotdrop;
  
 -      spin_lock_bh(&hinfo->lock);
 +      rcu_read_lock_bh();
        dh = dsthash_find(hinfo, &dst);
        if (dh == NULL) {
                dh = dsthash_alloc_init(hinfo, &dst);
                if (dh == NULL) {
 -                      spin_unlock_bh(&hinfo->lock);
 +                      rcu_read_unlock_bh();
                        goto hotdrop;
                }
 -
                dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
                dh->rateinfo.prev = jiffies;
                dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
        if (dh->rateinfo.credit >= dh->rateinfo.cost) {
                /* below the limit */
                dh->rateinfo.credit -= dh->rateinfo.cost;
 -              spin_unlock_bh(&hinfo->lock);
 +              spin_unlock(&dh->lock);
 +              rcu_read_unlock_bh();
                return !(info->cfg.mode & XT_HASHLIMIT_INVERT);
        }
  
 -      spin_unlock_bh(&hinfo->lock);
 +      spin_unlock(&dh->lock);
 +      rcu_read_unlock_bh();
        /* default match is underlimit - so over the limit, we need to invert */
        return info->cfg.mode & XT_HASHLIMIT_INVERT;
  
        return false;
  }
  
 -static bool hashlimit_mt_check_v0(const struct xt_mtchk_param *par)
 -{
 -      struct net *net = par->net;
 -      struct xt_hashlimit_info *r = par->matchinfo;
 -
 -      /* Check for overflow. */
 -      if (r->cfg.burst == 0 ||
 -          user2credits(r->cfg.avg * r->cfg.burst) < user2credits(r->cfg.avg)) {
 -              printk(KERN_ERR "xt_hashlimit: overflow, try lower: %u/%u\n",
 -                     r->cfg.avg, r->cfg.burst);
 -              return false;
 -      }
 -      if (r->cfg.mode == 0 ||
 -          r->cfg.mode > (XT_HASHLIMIT_HASH_DPT |
 -                         XT_HASHLIMIT_HASH_DIP |
 -                         XT_HASHLIMIT_HASH_SIP |
 -                         XT_HASHLIMIT_HASH_SPT))
 -              return false;
 -      if (!r->cfg.gc_interval)
 -              return false;
 -      if (!r->cfg.expire)
 -              return false;
 -      if (r->name[sizeof(r->name) - 1] != '\0')
 -              return false;
 -
 -      mutex_lock(&hashlimit_mutex);
 -      r->hinfo = htable_find_get(net, r->name, par->match->family);
 -      if (!r->hinfo && htable_create_v0(net, r, par->match->family) != 0) {
 -              mutex_unlock(&hashlimit_mutex);
 -              return false;
 -      }
 -      mutex_unlock(&hashlimit_mutex);
 -
 -      return true;
 -}
 -
 -static bool hashlimit_mt_check(const struct xt_mtchk_param *par)
 +static int hashlimit_mt_check(const struct xt_mtchk_param *par)
  {
        struct net *net = par->net;
        struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
 +      int ret;
  
        /* Check for overflow. */
        if (info->cfg.burst == 0 ||
            user2credits(info->cfg.avg * info->cfg.burst) <
            user2credits(info->cfg.avg)) {
 -              printk(KERN_ERR "xt_hashlimit: overflow, try lower: %u/%u\n",
 -                     info->cfg.avg, info->cfg.burst);
 -              return false;
 +              pr_info("overflow, try lower: %u/%u\n",
 +                      info->cfg.avg, info->cfg.burst);
 +              return -ERANGE;
        }
        if (info->cfg.gc_interval == 0 || info->cfg.expire == 0)
 -              return false;
 +              return -EINVAL;
        if (info->name[sizeof(info->name)-1] != '\0')
 -              return false;
 -      if (par->match->family == NFPROTO_IPV4) {
 +              return -EINVAL;
 +      if (par->family == NFPROTO_IPV4) {
                if (info->cfg.srcmask > 32 || info->cfg.dstmask > 32)
 -                      return false;
 +                      return -EINVAL;
        } else {
                if (info->cfg.srcmask > 128 || info->cfg.dstmask > 128)
 -                      return false;
 +                      return -EINVAL;
        }
  
        mutex_lock(&hashlimit_mutex);
 -      info->hinfo = htable_find_get(net, info->name, par->match->family);
 -      if (!info->hinfo && htable_create(net, info, par->match->family) != 0) {
 -              mutex_unlock(&hashlimit_mutex);
 -              return false;
 +      info->hinfo = htable_find_get(net, info->name, par->family);
 +      if (info->hinfo == NULL) {
 +              ret = htable_create(net, info, par->family);
 +              if (ret < 0) {
 +                      mutex_unlock(&hashlimit_mutex);
 +                      return ret;
 +              }
        }
        mutex_unlock(&hashlimit_mutex);
 -      return true;
 -}
 -
 -static void
 -hashlimit_mt_destroy_v0(const struct xt_mtdtor_param *par)
 -{
 -      const struct xt_hashlimit_info *r = par->matchinfo;
 -
 -      htable_put(r->hinfo);
 +      return 0;
  }
  
  static void hashlimit_mt_destroy(const struct xt_mtdtor_param *par)
        htable_put(info->hinfo);
  }
  
 -#ifdef CONFIG_COMPAT
 -struct compat_xt_hashlimit_info {
 -      char name[IFNAMSIZ];
 -      struct hashlimit_cfg cfg;
 -      compat_uptr_t hinfo;
 -      compat_uptr_t master;
 -};
 -
 -static void hashlimit_mt_compat_from_user(void *dst, const void *src)
 -{
 -      int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
 -
 -      memcpy(dst, src, off);
 -      memset(dst + off, 0, sizeof(struct compat_xt_hashlimit_info) - off);
 -}
 -
 -static int hashlimit_mt_compat_to_user(void __user *dst, const void *src)
 -{
 -      int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
 -
 -      return copy_to_user(dst, src, off) ? -EFAULT : 0;
 -}
 -#endif
 -
  static struct xt_match hashlimit_mt_reg[] __read_mostly = {
 -      {
 -              .name           = "hashlimit",
 -              .revision       = 0,
 -              .family         = NFPROTO_IPV4,
 -              .match          = hashlimit_mt_v0,
 -              .matchsize      = sizeof(struct xt_hashlimit_info),
 -#ifdef CONFIG_COMPAT
 -              .compatsize     = sizeof(struct compat_xt_hashlimit_info),
 -              .compat_from_user = hashlimit_mt_compat_from_user,
 -              .compat_to_user = hashlimit_mt_compat_to_user,
 -#endif
 -              .checkentry     = hashlimit_mt_check_v0,
 -              .destroy        = hashlimit_mt_destroy_v0,
 -              .me             = THIS_MODULE
 -      },
        {
                .name           = "hashlimit",
                .revision       = 1,
                .me             = THIS_MODULE,
        },
  #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
 -      {
 -              .name           = "hashlimit",
 -              .family         = NFPROTO_IPV6,
 -              .match          = hashlimit_mt_v0,
 -              .matchsize      = sizeof(struct xt_hashlimit_info),
 -#ifdef CONFIG_COMPAT
 -              .compatsize     = sizeof(struct compat_xt_hashlimit_info),
 -              .compat_from_user = hashlimit_mt_compat_from_user,
 -              .compat_to_user = hashlimit_mt_compat_to_user,
 -#endif
 -              .checkentry     = hashlimit_mt_check_v0,
 -              .destroy        = hashlimit_mt_destroy_v0,
 -              .me             = THIS_MODULE
 -      },
        {
                .name           = "hashlimit",
                .revision       = 1,
@@@ -674,22 -880,20 +675,23 @@@ static void dl_seq_stop(struct seq_fil
        struct xt_hashlimit_htable *htable = s->private;
        unsigned int *bucket = (unsigned int *)v;
  
-       kfree(bucket);
+       if (!IS_ERR(bucket))
+               kfree(bucket);
        spin_unlock_bh(&htable->lock);
  }
  
  static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
                                   struct seq_file *s)
  {
 +      int res;
 +
 +      spin_lock(&ent->lock);
        /* recalculate to show accurate numbers */
        rateinfo_recalc(ent, jiffies);
  
        switch (family) {
        case NFPROTO_IPV4:
 -              return seq_printf(s, "%ld %pI4:%u->%pI4:%u %u %u %u\n",
 +              res = seq_printf(s, "%ld %pI4:%u->%pI4:%u %u %u %u\n",
                                 (long)(ent->expires - jiffies)/HZ,
                                 &ent->dst.ip.src,
                                 ntohs(ent->dst.src_port),
                                 ntohs(ent->dst.dst_port),
                                 ent->rateinfo.credit, ent->rateinfo.credit_cap,
                                 ent->rateinfo.cost);
 +              break;
  #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
        case NFPROTO_IPV6:
 -              return seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n",
 +              res = seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n",
                                 (long)(ent->expires - jiffies)/HZ,
                                 &ent->dst.ip6.src,
                                 ntohs(ent->dst.src_port),
                                 ntohs(ent->dst.dst_port),
                                 ent->rateinfo.credit, ent->rateinfo.credit_cap,
                                 ent->rateinfo.cost);
 +              break;
  #endif
        default:
                BUG();
 -              return 0;
 +              res = 0;
        }
 +      spin_unlock(&ent->lock);
 +      return res;
  }
  
  static int dl_seq_show(struct seq_file *s, void *v)
@@@ -824,7 -1024,7 +826,7 @@@ static int __init hashlimit_mt_init(voi
                                            sizeof(struct dsthash_ent), 0, 0,
                                            NULL);
        if (!hashlimit_cachep) {
 -              printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n");
 +              pr_warning("unable to create slab cache\n");
                goto err2;
        }
        return 0;
@@@ -839,11 -1039,9 +841,11 @@@ err1
  
  static void __exit hashlimit_mt_exit(void)
  {
 -      kmem_cache_destroy(hashlimit_cachep);
        xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
        unregister_pernet_subsys(&hashlimit_net_ops);
 +
 +      rcu_barrier_bh();
 +      kmem_cache_destroy(hashlimit_cachep);
  }
  
  module_init(hashlimit_mt_init);
diff --combined net/netfilter/xt_limit.c
@@@ -5,7 -5,8 +5,9 @@@
   * it under the terms of the GNU General Public License version 2 as
   * published by the Free Software Foundation.
   */
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ #include <linux/slab.h>
  #include <linux/module.h>
  #include <linux/skbuff.h>
  #include <linux/spinlock.h>
@@@ -97,7 -98,7 +99,7 @@@ user2credits(u_int32_t user
        return (user * HZ * CREDITS_PER_JIFFY) / XT_LIMIT_SCALE;
  }
  
 -static bool limit_mt_check(const struct xt_mtchk_param *par)
 +static int limit_mt_check(const struct xt_mtchk_param *par)
  {
        struct xt_rateinfo *r = par->matchinfo;
        struct xt_limit_priv *priv;
        /* Check for overflow. */
        if (r->burst == 0
            || user2credits(r->avg * r->burst) < user2credits(r->avg)) {
 -              printk("Overflow in xt_limit, try lower: %u/%u\n",
 -                     r->avg, r->burst);
 -              return false;
 +              pr_info("Overflow, try lower: %u/%u\n",
 +                      r->avg, r->burst);
 +              return -ERANGE;
        }
  
        priv = kmalloc(sizeof(*priv), GFP_KERNEL);
        if (priv == NULL)
 -              return false;
 +              return -ENOMEM;
  
        /* For SMP, we only want to use one set of state. */
        r->master = priv;
                r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */
                r->cost = user2credits(r->avg);
        }
 -      return true;
 +      return 0;
  }
  
  static void limit_mt_destroy(const struct xt_mtdtor_param *par)
diff --combined net/netfilter/xt_quota.c
@@@ -4,6 -4,7 +4,7 @@@
   * Sam Johnston <samj@samj.net>
   */
  #include <linux/skbuff.h>
+ #include <linux/slab.h>
  #include <linux/spinlock.h>
  
  #include <linux/netfilter/x_tables.h>
@@@ -43,19 -44,19 +44,19 @@@ quota_mt(const struct sk_buff *skb, con
        return ret;
  }
  
 -static bool quota_mt_check(const struct xt_mtchk_param *par)
 +static int quota_mt_check(const struct xt_mtchk_param *par)
  {
        struct xt_quota_info *q = par->matchinfo;
  
        if (q->flags & ~XT_QUOTA_MASK)
 -              return false;
 +              return -EINVAL;
  
        q->master = kmalloc(sizeof(*q->master), GFP_KERNEL);
        if (q->master == NULL)
 -              return false;
 +              return -ENOMEM;
  
        q->master->quota = q->quota;
 -      return true;
 +      return 0;
  }
  
  static void quota_mt_destroy(const struct xt_mtdtor_param *par)
@@@ -12,7 -12,6 +12,7 @@@
   * Author: Stephen Frost <sfrost@snowman.net>
   * Copyright 2002-2003, Stephen Frost, 2.5.x port by laforge@netfilter.org
   */
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  #include <linux/init.h>
  #include <linux/ip.h>
  #include <linux/ipv6.h>
@@@ -28,6 -27,7 +28,7 @@@
  #include <linux/bitops.h>
  #include <linux/skbuff.h>
  #include <linux/inet.h>
+ #include <linux/slab.h>
  #include <net/net_namespace.h>
  #include <net/netns/generic.h>
  
@@@ -35,8 -35,8 +36,8 @@@
  #include <linux/netfilter/xt_recent.h>
  
  MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 -MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
 -MODULE_DESCRIPTION("Xtables: \"recently-seen\" host matching for IPv4");
 +MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
 +MODULE_DESCRIPTION("Xtables: \"recently-seen\" host matching");
  MODULE_LICENSE("GPL");
  MODULE_ALIAS("ipt_recent");
  MODULE_ALIAS("ip6t_recent");
@@@ -51,14 -51,14 +52,14 @@@ module_param(ip_list_tot, uint, 0400)
  module_param(ip_pkt_list_tot, uint, 0400);
  module_param(ip_list_hash_size, uint, 0400);
  module_param(ip_list_perms, uint, 0400);
 -module_param(ip_list_uid, uint, 0400);
 -module_param(ip_list_gid, uint, 0400);
 +module_param(ip_list_uid, uint, S_IRUGO | S_IWUSR);
 +module_param(ip_list_gid, uint, S_IRUGO | S_IWUSR);
  MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list");
  MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)");
  MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs");
  MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files");
 -MODULE_PARM_DESC(ip_list_uid,"owner of /proc/net/xt_recent/* files");
 -MODULE_PARM_DESC(ip_list_gid,"owning group of /proc/net/xt_recent/* files");
 +MODULE_PARM_DESC(ip_list_uid, "default owner of /proc/net/xt_recent/* files");
 +MODULE_PARM_DESC(ip_list_gid, "default owning group of /proc/net/xt_recent/* files");
  
  struct recent_entry {
        struct list_head        list;
@@@ -84,6 -84,9 +85,6 @@@ struct recent_net 
        struct list_head        tables;
  #ifdef CONFIG_PROC_FS
        struct proc_dir_entry   *xt_recent;
 -#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
 -      struct proc_dir_entry   *ipt_recent;
 -#endif
  #endif
  };
  
@@@ -144,25 -147,6 +145,25 @@@ static void recent_entry_remove(struct 
        t->entries--;
  }
  
 +/*
 + * Drop entries with timestamps older then 'time'.
 + */
 +static void recent_entry_reap(struct recent_table *t, unsigned long time)
 +{
 +      struct recent_entry *e;
 +
 +      /*
 +       * The head of the LRU list is always the oldest entry.
 +       */
 +      e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
 +
 +      /*
 +       * The last time stamp is the most recent.
 +       */
 +      if (time_after(time, e->stamps[e->index-1]))
 +              recent_entry_remove(t, e);
 +}
 +
  static struct recent_entry *
  recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr,
                  u_int16_t family, u_int8_t ttl)
@@@ -234,7 -218,7 +235,7 @@@ recent_mt(const struct sk_buff *skb, co
        u_int8_t ttl;
        bool ret = info->invert;
  
 -      if (par->match->family == NFPROTO_IPV4) {
 +      if (par->family == NFPROTO_IPV4) {
                const struct iphdr *iph = ip_hdr(skb);
  
                if (info->side == XT_RECENT_DEST)
  
        spin_lock_bh(&recent_lock);
        t = recent_table_lookup(recent_net, info->name);
 -      e = recent_entry_lookup(t, &addr, par->match->family,
 +      e = recent_entry_lookup(t, &addr, par->family,
                                (info->check_set & XT_RECENT_TTL) ? ttl : 0);
        if (e == NULL) {
                if (!(info->check_set & XT_RECENT_SET))
                        goto out;
 -              e = recent_entry_init(t, &addr, par->match->family, ttl);
 +              e = recent_entry_init(t, &addr, par->family, ttl);
                if (e == NULL)
                        *par->hotdrop = true;
                ret = !ret;
                for (i = 0; i < e->nstamps; i++) {
                        if (info->seconds && time_after(time, e->stamps[i]))
                                continue;
-                       if (info->hit_count && ++hits >= info->hit_count) {
+                       if (!info->hit_count || ++hits >= info->hit_count) {
                                ret = !ret;
                                break;
                        }
                }
 +
 +              /* info->seconds must be non-zero */
 +              if (info->check_set & XT_RECENT_REAP)
 +                      recent_entry_reap(t, time);
        }
  
        if (info->check_set & XT_RECENT_SET ||
@@@ -305,7 -285,7 +306,7 @@@ out
        return ret;
  }
  
 -static bool recent_mt_check(const struct xt_mtchk_param *par)
 +static int recent_mt_check(const struct xt_mtchk_param *par)
  {
        struct recent_net *recent_net = recent_pernet(par->net);
        const struct xt_recent_mtinfo *info = par->matchinfo;
        struct proc_dir_entry *pde;
  #endif
        unsigned i;
 -      bool ret = false;
 +      int ret = -EINVAL;
  
        if (unlikely(!hash_rnd_inited)) {
                get_random_bytes(&hash_rnd, sizeof(hash_rnd));
                hash_rnd_inited = true;
        }
 +      if (info->check_set & ~XT_RECENT_VALID_FLAGS) {
 +              pr_info("Unsupported user space flags (%08x)\n",
 +                      info->check_set);
 +              return -EINVAL;
 +      }
        if (hweight8(info->check_set &
                     (XT_RECENT_SET | XT_RECENT_REMOVE |
                      XT_RECENT_CHECK | XT_RECENT_UPDATE)) != 1)
 -              return false;
 +              return -EINVAL;
        if ((info->check_set & (XT_RECENT_SET | XT_RECENT_REMOVE)) &&
 -          (info->seconds || info->hit_count))
 -              return false;
 +          (info->seconds || info->hit_count ||
 +          (info->check_set & XT_RECENT_MODIFIERS)))
 +              return -EINVAL;
 +      if ((info->check_set & XT_RECENT_REAP) && !info->seconds)
 +              return -EINVAL;
        if (info->hit_count > ip_pkt_list_tot) {
 -              pr_info(KBUILD_MODNAME ": hitcount (%u) is larger than "
 +              pr_info("hitcount (%u) is larger than "
                        "packets to be remembered (%u)\n",
                        info->hit_count, ip_pkt_list_tot);
 -              return false;
 +              return -EINVAL;
        }
        if (info->name[0] == '\0' ||
            strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN)
 -              return false;
 +              return -EINVAL;
  
        mutex_lock(&recent_mutex);
        t = recent_table_lookup(recent_net, info->name);
        if (t != NULL) {
                t->refcnt++;
 -              ret = true;
 +              ret = 0;
                goto out;
        }
  
        t = kzalloc(sizeof(*t) + sizeof(t->iphash[0]) * ip_list_hash_size,
                    GFP_KERNEL);
 -      if (t == NULL)
 +      if (t == NULL) {
 +              ret = -ENOMEM;
                goto out;
 +      }
        t->refcnt = 1;
        strcpy(t->name, info->name);
        INIT_LIST_HEAD(&t->lru_list);
                  &recent_mt_fops, t);
        if (pde == NULL) {
                kfree(t);
 -              goto out;
 -      }
 -      pde->uid = ip_list_uid;
 -      pde->gid = ip_list_gid;
 -#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
 -      pde = proc_create_data(t->name, ip_list_perms, recent_net->ipt_recent,
 -                    &recent_old_fops, t);
 -      if (pde == NULL) {
 -              remove_proc_entry(t->name, recent_net->xt_recent);
 -              kfree(t);
 +              ret = -ENOMEM;
                goto out;
        }
        pde->uid = ip_list_uid;
        pde->gid = ip_list_gid;
 -#endif
  #endif
        spin_lock_bh(&recent_lock);
        list_add_tail(&t->list, &recent_net->tables);
        spin_unlock_bh(&recent_lock);
 -      ret = true;
 +      ret = 0;
  out:
        mutex_unlock(&recent_mutex);
        return ret;
@@@ -397,6 -377,9 +398,6 @@@ static void recent_mt_destroy(const str
                list_del(&t->list);
                spin_unlock_bh(&recent_lock);
  #ifdef CONFIG_PROC_FS
 -#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
 -              remove_proc_entry(t->name, recent_net->ipt_recent);
 -#endif
                remove_proc_entry(t->name, recent_net->xt_recent);
  #endif
                recent_table_flush(t);
@@@ -488,6 -471,84 +489,6 @@@ static int recent_seq_open(struct inod
        return 0;
  }
  
 -#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
 -static int recent_old_seq_open(struct inode *inode, struct file *filp)
 -{
 -      static bool warned_of_old;
 -
 -      if (unlikely(!warned_of_old)) {
 -              printk(KERN_INFO KBUILD_MODNAME ": Use of /proc/net/ipt_recent"
 -                     " is deprecated; use /proc/net/xt_recent.\n");
 -              warned_of_old = true;
 -      }
 -      return recent_seq_open(inode, filp);
 -}
 -
 -static ssize_t recent_old_proc_write(struct file *file,
 -                                   const char __user *input,
 -                                   size_t size, loff_t *loff)
 -{
 -      const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
 -      struct recent_table *t = pde->data;
 -      struct recent_entry *e;
 -      char buf[sizeof("+255.255.255.255")], *c = buf;
 -      union nf_inet_addr addr = {};
 -      int add;
 -
 -      if (size > sizeof(buf))
 -              size = sizeof(buf);
 -      if (copy_from_user(buf, input, size))
 -              return -EFAULT;
 -
 -      c = skip_spaces(c);
 -
 -      if (size - (c - buf) < 5)
 -              return c - buf;
 -      if (!strncmp(c, "clear", 5)) {
 -              c += 5;
 -              spin_lock_bh(&recent_lock);
 -              recent_table_flush(t);
 -              spin_unlock_bh(&recent_lock);
 -              return c - buf;
 -      }
 -
 -      switch (*c) {
 -      case '-':
 -              add = 0;
 -              c++;
 -              break;
 -      case '+':
 -              c++;
 -      default:
 -              add = 1;
 -              break;
 -      }
 -      addr.ip = in_aton(c);
 -
 -      spin_lock_bh(&recent_lock);
 -      e = recent_entry_lookup(t, &addr, NFPROTO_IPV4, 0);
 -      if (e == NULL) {
 -              if (add)
 -                      recent_entry_init(t, &addr, NFPROTO_IPV4, 0);
 -      } else {
 -              if (add)
 -                      recent_entry_update(t, e);
 -              else
 -                      recent_entry_remove(t, e);
 -      }
 -      spin_unlock_bh(&recent_lock);
 -      return size;
 -}
 -
 -static const struct file_operations recent_old_fops = {
 -      .open           = recent_old_seq_open,
 -      .read           = seq_read,
 -      .write          = recent_old_proc_write,
 -      .release        = seq_release_private,
 -      .owner          = THIS_MODULE,
 -};
 -#endif
 -
  static ssize_t
  recent_mt_proc_write(struct file *file, const char __user *input,
                     size_t size, loff_t *loff)
                add = true;
                break;
        default:
 -              printk(KERN_INFO KBUILD_MODNAME ": Need +ip, -ip or /\n");
 +              pr_info("Need \"+ip\", \"-ip\" or \"/\"\n");
                return -EINVAL;
        }
  
        }
  
        if (!succ) {
 -              printk(KERN_INFO KBUILD_MODNAME ": illegal address written "
 -                     "to procfs\n");
 +              pr_info("illegal address written to procfs\n");
                return -EINVAL;
        }
  
@@@ -575,11 -637,21 +576,11 @@@ static int __net_init recent_proc_net_i
        recent_net->xt_recent = proc_mkdir("xt_recent", net->proc_net);
        if (!recent_net->xt_recent)
                return -ENOMEM;
 -#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
 -      recent_net->ipt_recent = proc_mkdir("ipt_recent", net->proc_net);
 -      if (!recent_net->ipt_recent) {
 -              proc_net_remove(net, "xt_recent");
 -              return -ENOMEM;
 -      }
 -#endif
        return 0;
  }
  
  static void __net_exit recent_proc_net_exit(struct net *net)
  {
 -#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
 -      proc_net_remove(net, "ipt_recent");
 -#endif
        proc_net_remove(net, "xt_recent");
  }
  #else
@@@ -12,6 -12,7 +12,7 @@@
  #include <linux/spinlock.h>
  #include <linux/skbuff.h>
  #include <linux/net.h>
+ #include <linux/slab.h>
  
  #include <linux/netfilter/xt_statistic.h>
  #include <linux/netfilter/x_tables.h>
@@@ -52,20 -53,22 +53,20 @@@ statistic_mt(const struct sk_buff *skb
        return ret;
  }
  
 -static bool statistic_mt_check(const struct xt_mtchk_param *par)
 +static int statistic_mt_check(const struct xt_mtchk_param *par)
  {
        struct xt_statistic_info *info = par->matchinfo;
  
        if (info->mode > XT_STATISTIC_MODE_MAX ||
            info->flags & ~XT_STATISTIC_MASK)
 -              return false;
 +              return -EINVAL;
  
        info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
 -      if (info->master == NULL) {
 -              printk(KERN_ERR KBUILD_MODNAME ": Out of memory\n");
 -              return false;
 -      }
 +      if (info->master == NULL)
 +              return -ENOMEM;
        info->master->count = info->u.nth.count;
  
 -      return true;
 +      return 0;
  }
  
  static void statistic_mt_destroy(const struct xt_mtdtor_param *par)
@@@ -7,6 -7,7 +7,7 @@@
   * published by the Free Software Foundation.
   */
  
+ #include <linux/gfp.h>
  #include <linux/init.h>
  #include <linux/module.h>
  #include <linux/kernel.h>
@@@ -26,10 -27,12 +27,10 @@@ string_mt(const struct sk_buff *skb, co
  {
        const struct xt_string_info *conf = par->matchinfo;
        struct ts_state state;
 -      int invert;
 +      bool invert;
  
        memset(&state, 0, sizeof(struct ts_state));
 -
 -      invert = (par->match->revision == 0 ? conf->u.v0.invert :
 -                                  conf->u.v1.flags & XT_STRING_FLAG_INVERT);
 +      invert = conf->u.v1.flags & XT_STRING_FLAG_INVERT;
  
        return (skb_find_text((struct sk_buff *)skb, conf->from_offset,
                             conf->to_offset, conf->config, &state)
@@@ -38,7 -41,7 +39,7 @@@
  
  #define STRING_TEXT_PRIV(m) ((struct xt_string_info *)(m))
  
 -static bool string_mt_check(const struct xt_mtchk_param *par)
 +static int string_mt_check(const struct xt_mtchk_param *par)
  {
        struct xt_string_info *conf = par->matchinfo;
        struct ts_config *ts_conf;
  
        /* Damn, can't handle this case properly with iptables... */
        if (conf->from_offset > conf->to_offset)
 -              return false;
 +              return -EINVAL;
        if (conf->algo[XT_STRING_MAX_ALGO_NAME_SIZE - 1] != '\0')
 -              return false;
 +              return -EINVAL;
        if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE)
 -              return false;
 -      if (par->match->revision == 1) {
 -              if (conf->u.v1.flags &
 -                  ~(XT_STRING_FLAG_IGNORECASE | XT_STRING_FLAG_INVERT))
 -                      return false;
 -              if (conf->u.v1.flags & XT_STRING_FLAG_IGNORECASE)
 -                      flags |= TS_IGNORECASE;
 -      }
 +              return -EINVAL;
 +      if (conf->u.v1.flags &
 +          ~(XT_STRING_FLAG_IGNORECASE | XT_STRING_FLAG_INVERT))
 +              return -EINVAL;
 +      if (conf->u.v1.flags & XT_STRING_FLAG_IGNORECASE)
 +              flags |= TS_IGNORECASE;
        ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen,
                                     GFP_KERNEL, flags);
        if (IS_ERR(ts_conf))
 -              return false;
 +              return PTR_ERR(ts_conf);
  
        conf->config = ts_conf;
 -
 -      return true;
 +      return 0;
  }
  
  static void string_mt_destroy(const struct xt_mtdtor_param *par)
        textsearch_destroy(STRING_TEXT_PRIV(par->matchinfo)->config);
  }
  
 -static struct xt_match xt_string_mt_reg[] __read_mostly = {
 -      {
 -              .name           = "string",
 -              .revision       = 0,
 -              .family         = NFPROTO_UNSPEC,
 -              .checkentry     = string_mt_check,
 -              .match          = string_mt,
 -              .destroy        = string_mt_destroy,
 -              .matchsize      = sizeof(struct xt_string_info),
 -              .me             = THIS_MODULE
 -      },
 -      {
 -              .name           = "string",
 -              .revision       = 1,
 -              .family         = NFPROTO_UNSPEC,
 -              .checkentry     = string_mt_check,
 -              .match          = string_mt,
 -              .destroy        = string_mt_destroy,
 -              .matchsize      = sizeof(struct xt_string_info),
 -              .me             = THIS_MODULE
 -      },
 +static struct xt_match xt_string_mt_reg __read_mostly = {
 +      .name       = "string",
 +      .revision   = 1,
 +      .family     = NFPROTO_UNSPEC,
 +      .checkentry = string_mt_check,
 +      .match      = string_mt,
 +      .destroy    = string_mt_destroy,
 +      .matchsize  = sizeof(struct xt_string_info),
 +      .me         = THIS_MODULE,
  };
  
  static int __init string_mt_init(void)
  {
 -      return xt_register_matches(xt_string_mt_reg,
 -                                 ARRAY_SIZE(xt_string_mt_reg));
 +      return xt_register_match(&xt_string_mt_reg);
  }
  
  static void __exit string_mt_exit(void)
  {
 -      xt_unregister_matches(xt_string_mt_reg, ARRAY_SIZE(xt_string_mt_reg));
 +      xt_unregister_match(&xt_string_mt_reg);
  }
  
  module_init(string_mt_init);
diff --combined net/sched/act_ipt.c
@@@ -19,6 -19,7 +19,7 @@@
  #include <linux/rtnetlink.h>
  #include <linux/module.h>
  #include <linux/init.h>
+ #include <linux/slab.h>
  #include <net/netlink.h>
  #include <net/pkt_sched.h>
  #include <linux/tc_act/tc_ipt.h>
@@@ -46,8 -47,8 +47,8 @@@ static int ipt_init_target(struct ipt_e
  
        target = xt_request_find_target(AF_INET, t->u.user.name,
                                        t->u.user.revision);
 -      if (!target)
 -              return -ENOENT;
 +      if (IS_ERR(target))
 +              return PTR_ERR(target);
  
        t->u.kernel.target = target;
        par.table     = table;