Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
authorDavid S. Miller <davem@davemloft.net>
Thu, 5 Mar 2009 10:06:47 +0000 (02:06 -0800)
committerDavid S. Miller <davem@davemloft.net>
Thu, 5 Mar 2009 10:06:47 +0000 (02:06 -0800)
Conflicts:
drivers/net/tokenring/tmspci.c
drivers/net/ucc_geth_mii.c

16 files changed:
1  2 
MAINTAINERS
drivers/net/arm/ks8695net.c
drivers/net/bonding/bond_main.c
drivers/net/fsl_pq_mdio.c
drivers/net/jme.c
drivers/net/sungem.c
drivers/net/tokenring/tmspci.c
include/linux/netdevice.h
net/802/tr.c
net/core/dev.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/netlink/af_netlink.c
net/sctp/protocol.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c

diff --combined MAINTAINERS
@@@ -1011,8 -1011,6 +1011,8 @@@ L:      netdev@vger.kernel.or
  S:    Supported
  
  BROADCOM TG3 GIGABIT ETHERNET DRIVER
 +P:    Matt Carlson
 +M:    mcarlson@broadcom.com
  P:    Michael Chan
  M:    mchan@broadcom.com
  L:    netdev@vger.kernel.org
@@@ -2466,7 -2464,7 +2466,7 @@@ S:      Maintaine
  
  ISDN SUBSYSTEM
  P:    Karsten Keil
- M:    kkeil@suse.de
+ M:    isdn@linux-pingi.de
  L:    isdn4linux@listserv.isdn4linux.de (subscribers-only)
  W:    http://www.isdn4linux.de
  T:    git kernel.org:/pub/scm/linux/kernel/kkeil/isdn-2.6.git
@@@ -3641,12 -3639,6 +3641,12 @@@ M:    florian.fainelli@telecomint.e
  L:    netdev@vger.kernel.org
  S:    Maintained
  
 +RDS - RELIABLE DATAGRAM SOCKETS
 +P:    Andy Grover
 +M:    andy.grover@oracle.com
 +L:    rds-devel@oss.oracle.com
 +S:    Supported
 +
  READ-COPY UPDATE (RCU)
  P:    Dipankar Sarma
  M:    dipankar@in.ibm.com
@@@ -560,7 -560,7 +560,7 @@@ ks8695_reset(struct ks8695_priv *ksp
                msleep(1);
        }
  
-       if (reset_timeout == 0) {
+       if (reset_timeout < 0) {
                dev_crit(ksp->dev,
                         "Timeout waiting for DMA engines to reset\n");
                /* And blithely carry on */
@@@ -1059,7 -1059,7 +1059,7 @@@ ks8695_get_drvinfo(struct net_device *n
  {
        strlcpy(info->driver, MODULENAME, sizeof(info->driver));
        strlcpy(info->version, MODULEVERSION, sizeof(info->version));
 -      strlcpy(info->bus_info, ndev->dev.parent->bus_id,
 +      strlcpy(info->bus_info, dev_name(ndev->dev.parent),
                sizeof(info->bus_info));
  }
  
@@@ -1002,10 -1002,6 +1002,10 @@@ static void bond_mc_swap(struct bondin
  static void bond_do_fail_over_mac(struct bonding *bond,
                                  struct slave *new_active,
                                  struct slave *old_active)
 +      __releases(&bond->curr_slave_lock)
 +      __releases(&bond->lock)
 +      __acquires(&bond->lock)
 +      __acquires(&bond->curr_slave_lock)
  {
        u8 tmp_mac[ETH_ALEN];
        struct sockaddr saddr;
@@@ -3197,8 -3193,6 +3197,8 @@@ out
  #ifdef CONFIG_PROC_FS
  
  static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
 +      __acquires(&dev_base_lock)
 +      __acquires(&bond->lock)
  {
        struct bonding *bond = seq->private;
        loff_t off = 0;
@@@ -3238,8 -3232,6 +3238,8 @@@ static void *bond_info_seq_next(struct 
  }
  
  static void bond_info_seq_stop(struct seq_file *seq, void *v)
 +      __releases(&bond->lock)
 +      __releases(&dev_base_lock)
  {
        struct bonding *bond = seq->private;
  
@@@ -3377,7 -3369,7 +3377,7 @@@ static int bond_info_seq_show(struct se
        return 0;
  }
  
 -static struct seq_operations bond_info_seq_ops = {
 +static const struct seq_operations bond_info_seq_ops = {
        .start = bond_info_seq_start,
        .next  = bond_info_seq_next,
        .stop  = bond_info_seq_stop,
@@@ -4121,7 -4113,7 +4121,7 @@@ static int bond_neigh_setup(struct net_
                const struct net_device_ops *slave_ops
                        = slave->dev->netdev_ops;
                if (slave_ops->ndo_neigh_setup)
-                       return slave_ops->ndo_neigh_setup(dev, parms);
+                       return slave_ops->ndo_neigh_setup(slave->dev, parms);
        }
        return 0;
  }
@@@ -4732,7 -4724,7 +4732,7 @@@ static void bond_free_all(void
   */
  int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
  {
 -      int mode = -1, i, rv;
 +      int modeint = -1, i, rv;
        char *p, modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, };
  
        for (p = (char *)buf; *p; p++)
        if (*p)
                rv = sscanf(buf, "%20s", modestr);
        else
 -              rv = sscanf(buf, "%d", &mode);
 +              rv = sscanf(buf, "%d", &modeint);
  
        if (!rv)
                return -1;
  
        for (i = 0; tbl[i].modename; i++) {
 -              if (mode == tbl[i].mode)
 +              if (modeint == tbl[i].mode)
                        return tbl[i].mode;
                if (strcmp(modestr, tbl[i].modename) == 0)
                        return tbl[i].mode;
index c434a15,0000000..b0ce144
mode 100644,000000..100644
--- /dev/null
@@@ -1,463 -1,0 +1,463 @@@
-       unsigned int timeout = PHY_INIT_TIMEOUT;
 +/*
 + * Freescale PowerQUICC Ethernet Driver -- MIIM bus implementation
 + * Provides Bus interface for MIIM regs
 + *
 + * Author: Andy Fleming <afleming@freescale.com>
 + *
 + * Copyright (c) 2002-2004,2008 Freescale Semiconductor, Inc.
 + *
 + * Based on gianfar_mii.c and ucc_geth_mii.c (Li Yang, Kim Phillips)
 + *
 + * This program is free software; you can redistribute  it and/or modify it
 + * under  the terms of  the GNU General  Public License as published by the
 + * Free Software Foundation;  either version 2 of the  License, or (at your
 + * option) any later version.
 + *
 + */
 +
 +#include <linux/kernel.h>
 +#include <linux/string.h>
 +#include <linux/errno.h>
 +#include <linux/unistd.h>
 +#include <linux/slab.h>
 +#include <linux/interrupt.h>
 +#include <linux/init.h>
 +#include <linux/delay.h>
 +#include <linux/netdevice.h>
 +#include <linux/etherdevice.h>
 +#include <linux/skbuff.h>
 +#include <linux/spinlock.h>
 +#include <linux/mm.h>
 +#include <linux/module.h>
 +#include <linux/platform_device.h>
 +#include <linux/crc32.h>
 +#include <linux/mii.h>
 +#include <linux/phy.h>
 +#include <linux/of.h>
 +#include <linux/of_platform.h>
 +
 +#include <asm/io.h>
 +#include <asm/irq.h>
 +#include <asm/uaccess.h>
 +#include <asm/ucc.h>
 +
 +#include "gianfar.h"
 +#include "fsl_pq_mdio.h"
 +
 +/*
 + * Write value to the PHY at mii_id at register regnum,
 + * on the bus attached to the local interface, which may be different from the
 + * generic mdio bus (tied to a single interface), waiting until the write is
 + * done before returning. This is helpful in programming interfaces like
 + * the TBI which control interfaces like onchip SERDES and are always tied to
 + * the local mdio pins, which may not be the same as system mdio bus, used for
 + * controlling the external PHYs, for example.
 + */
 +int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
 +              int regnum, u16 value)
 +{
 +      /* Set the PHY address and the register address we want to write */
 +      out_be32(&regs->miimadd, (mii_id << 8) | regnum);
 +
 +      /* Write out the value we want */
 +      out_be32(&regs->miimcon, value);
 +
 +      /* Wait for the transaction to finish */
 +      while (in_be32(&regs->miimind) & MIIMIND_BUSY)
 +              cpu_relax();
 +
 +      return 0;
 +}
 +
 +/*
 + * Read the bus for PHY at addr mii_id, register regnum, and
 + * return the value.  Clears miimcom first.  All PHY operation
 + * done on the bus attached to the local interface,
 + * which may be different from the generic mdio bus
 + * This is helpful in programming interfaces like
 + * the TBI which, in turn, control interfaces like onchip SERDES
 + * and are always tied to the local mdio pins, which may not be the
 + * same as system mdio bus, used for controlling the external PHYs, for eg.
 + */
 +int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
 +              int mii_id, int regnum)
 +{
 +      u16 value;
 +
 +      /* Set the PHY address and the register address we want to read */
 +      out_be32(&regs->miimadd, (mii_id << 8) | regnum);
 +
 +      /* Clear miimcom, and then initiate a read */
 +      out_be32(&regs->miimcom, 0);
 +      out_be32(&regs->miimcom, MII_READ_COMMAND);
 +
 +      /* Wait for the transaction to finish */
 +      while (in_be32(&regs->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY))
 +              cpu_relax();
 +
 +      /* Grab the value of the register from miimstat */
 +      value = in_be32(&regs->miimstat);
 +
 +      return value;
 +}
 +
 +/*
 + * Write value to the PHY at mii_id at register regnum,
 + * on the bus, waiting until the write is done before returning.
 + */
 +int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
 +{
 +      struct fsl_pq_mdio __iomem *regs = (void __iomem *)bus->priv;
 +
 +      /* Write to the local MII regs */
 +      return(fsl_pq_local_mdio_write(regs, mii_id, regnum, value));
 +}
 +
 +/*
 + * Read the bus for PHY at addr mii_id, register regnum, and
 + * return the value.  Clears miimcom first.
 + */
 +int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 +{
 +      struct fsl_pq_mdio __iomem *regs = (void __iomem *)bus->priv;
 +
 +      /* Read the local MII regs */
 +      return(fsl_pq_local_mdio_read(regs, mii_id, regnum));
 +}
 +
 +/* Reset the MIIM registers, and wait for the bus to free */
 +static int fsl_pq_mdio_reset(struct mii_bus *bus)
 +{
 +      struct fsl_pq_mdio __iomem *regs = (void __iomem *)bus->priv;
-       if(timeout == 0) {
++      int timeout = PHY_INIT_TIMEOUT;
 +
 +      mutex_lock(&bus->mdio_lock);
 +
 +      /* Reset the management interface */
 +      out_be32(&regs->miimcfg, MIIMCFG_RESET);
 +
 +      /* Setup the MII Mgmt clock speed */
 +      out_be32(&regs->miimcfg, MIIMCFG_INIT_VALUE);
 +
 +      /* Wait until the bus is free */
 +      while ((in_be32(&regs->miimind) & MIIMIND_BUSY) && timeout--)
 +              cpu_relax();
 +
 +      mutex_unlock(&bus->mdio_lock);
 +
++      if (timeout < 0) {
 +              printk(KERN_ERR "%s: The MII Bus is stuck!\n",
 +                              bus->name);
 +              return -EBUSY;
 +      }
 +
 +      return 0;
 +}
 +
 +/* Allocate an array which provides irq #s for each PHY on the given bus */
 +static int *create_irq_map(struct device_node *np)
 +{
 +      int *irqs;
 +      int i;
 +      struct device_node *child = NULL;
 +
 +      irqs = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
 +
 +      if (!irqs)
 +              return NULL;
 +
 +      for (i = 0; i < PHY_MAX_ADDR; i++)
 +              irqs[i] = PHY_POLL;
 +
 +      while ((child = of_get_next_child(np, child)) != NULL) {
 +              int irq = irq_of_parse_and_map(child, 0);
 +              const u32 *id;
 +
 +              if (irq == NO_IRQ)
 +                      continue;
 +
 +              id = of_get_property(child, "reg", NULL);
 +
 +              if (!id)
 +                      continue;
 +
 +              if (*id < PHY_MAX_ADDR && *id >= 0)
 +                      irqs[*id] = irq;
 +              else
 +                      printk(KERN_WARNING "%s: "
 +                                      "%d is not a valid PHY address\n",
 +                                      np->full_name, *id);
 +      }
 +
 +      return irqs;
 +}
 +
 +void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
 +{
 +      const u32 *reg;
 +
 +      reg = of_get_property(np, "reg", NULL);
 +
 +      snprintf(name, MII_BUS_ID_SIZE, "%s@%x", np->name, reg ? *reg : 0);
 +}
 +
 +/* Scan the bus in reverse, looking for an empty spot */
 +static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
 +{
 +      int i;
 +
 +      for (i = PHY_MAX_ADDR; i > 0; i--) {
 +              u32 phy_id;
 +
 +              if (get_phy_id(new_bus, i, &phy_id))
 +                      return -1;
 +
 +              if (phy_id == 0xffffffff)
 +                      break;
 +      }
 +
 +      return i;
 +}
 +
 +
 +#ifdef CONFIG_GIANFAR
 +static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs)
 +{
 +      struct gfar __iomem *enet_regs;
 +
 +      /*
 +       * This is mildly evil, but so is our hardware for doing this.
 +       * Also, we have to cast back to struct gfar because of
 +       * definition weirdness done in gianfar.h.
 +       */
 +      enet_regs = (struct gfar __iomem *)
 +              ((char __iomem *)regs - offsetof(struct gfar, gfar_mii_regs));
 +
 +      return &enet_regs->tbipa;
 +}
 +#endif
 +
 +
 +#ifdef CONFIG_UCC_GETH
 +static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
 +{
 +      struct device_node *np = NULL;
 +      int err = 0;
 +
 +      for_each_compatible_node(np, NULL, "ucc_geth") {
 +              struct resource tempres;
 +
 +              err = of_address_to_resource(np, 0, &tempres);
 +              if (err)
 +                      continue;
 +
 +              /* if our mdio regs fall within this UCC regs range */
 +              if ((start >= tempres.start) && (end <= tempres.end)) {
 +                      /* Find the id of the UCC */
 +                      const u32 *id;
 +
 +                      id = of_get_property(np, "cell-index", NULL);
 +                      if (!id) {
 +                              id = of_get_property(np, "device-id", NULL);
 +                              if (!id)
 +                                      continue;
 +                      }
 +
 +                      *ucc_id = *id;
 +
 +                      return 0;
 +              }
 +      }
 +
 +      if (err)
 +              return err;
 +      else
 +              return -EINVAL;
 +}
 +#endif
 +
 +
 +static int fsl_pq_mdio_probe(struct of_device *ofdev,
 +              const struct of_device_id *match)
 +{
 +      struct device_node *np = ofdev->node;
 +      struct device_node *tbi;
 +      struct fsl_pq_mdio __iomem *regs;
 +      u32 __iomem *tbipa;
 +      struct mii_bus *new_bus;
 +      int tbiaddr = -1;
 +      u64 addr, size;
 +      int err = 0;
 +
 +      new_bus = mdiobus_alloc();
 +      if (NULL == new_bus)
 +              return -ENOMEM;
 +
 +      new_bus->name = "Freescale PowerQUICC MII Bus",
 +      new_bus->read = &fsl_pq_mdio_read,
 +      new_bus->write = &fsl_pq_mdio_write,
 +      new_bus->reset = &fsl_pq_mdio_reset,
 +      fsl_pq_mdio_bus_name(new_bus->id, np);
 +
 +      /* Set the PHY base address */
 +      addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
 +      regs = ioremap(addr, size);
 +
 +      if (NULL == regs) {
 +              err = -ENOMEM;
 +              goto err_free_bus;
 +      }
 +
 +      new_bus->priv = (void __force *)regs;
 +
 +      new_bus->irq = create_irq_map(np);
 +
 +      if (NULL == new_bus->irq) {
 +              err = -ENOMEM;
 +              goto err_unmap_regs;
 +      }
 +
 +      new_bus->parent = &ofdev->dev;
 +      dev_set_drvdata(&ofdev->dev, new_bus);
 +
 +      if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
 +                      of_device_is_compatible(np, "gianfar")) {
 +#ifdef CONFIG_GIANFAR
 +              tbipa = get_gfar_tbipa(regs);
 +#else
 +              err = -ENODEV;
 +              goto err_free_irqs;
 +#endif
 +      } else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
 +                      of_device_is_compatible(np, "ucc_geth_phy")) {
 +#ifdef CONFIG_UCC_GETH
 +              u32 id;
 +
 +              tbipa = &regs->utbipar;
 +
 +              if ((err = get_ucc_id_for_range(addr, addr + size, &id)))
 +                      goto err_free_irqs;
 +
 +              ucc_set_qe_mux_mii_mng(id - 1);
 +#else
 +              err = -ENODEV;
 +              goto err_free_irqs;
 +#endif
 +      } else {
 +              err = -ENODEV;
 +              goto err_free_irqs;
 +      }
 +
 +      for_each_child_of_node(np, tbi) {
 +              if (!strncmp(tbi->type, "tbi-phy", 8))
 +                      break;
 +      }
 +
 +      if (tbi) {
 +              const u32 *prop = of_get_property(tbi, "reg", NULL);
 +
 +              if (prop)
 +                      tbiaddr = *prop;
 +      }
 +
 +      if (tbiaddr == -1) {
 +              out_be32(tbipa, 0);
 +
 +              tbiaddr = fsl_pq_mdio_find_free(new_bus);
 +      }
 +
 +      /*
 +       * We define TBIPA at 0 to be illegal, opting to fail for boards that
 +       * have PHYs at 1-31, rather than change tbipa and rescan.
 +       */
 +      if (tbiaddr == 0) {
 +              err = -EBUSY;
 +
 +              goto err_free_irqs;
 +      }
 +
 +      out_be32(tbipa, tbiaddr);
 +
 +      /*
 +       * The TBIPHY-only buses will find PHYs at every address,
 +       * so we mask them all but the TBI
 +       */
 +      if (!of_device_is_compatible(np, "fsl,gianfar-mdio"))
 +              new_bus->phy_mask = ~(1 << tbiaddr);
 +
 +      err = mdiobus_register(new_bus);
 +
 +      if (err) {
 +              printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
 +                              new_bus->name);
 +              goto err_free_irqs;
 +      }
 +
 +      return 0;
 +
 +err_free_irqs:
 +      kfree(new_bus->irq);
 +err_unmap_regs:
 +      iounmap(regs);
 +err_free_bus:
 +      kfree(new_bus);
 +
 +      return err;
 +}
 +
 +
 +static int fsl_pq_mdio_remove(struct of_device *ofdev)
 +{
 +      struct device *device = &ofdev->dev;
 +      struct mii_bus *bus = dev_get_drvdata(device);
 +
 +      mdiobus_unregister(bus);
 +
 +      dev_set_drvdata(device, NULL);
 +
 +      iounmap((void __iomem *)bus->priv);
 +      bus->priv = NULL;
 +      mdiobus_free(bus);
 +
 +      return 0;
 +}
 +
 +static struct of_device_id fsl_pq_mdio_match[] = {
 +      {
 +              .type = "mdio",
 +              .compatible = "ucc_geth_phy",
 +      },
 +      {
 +              .type = "mdio",
 +              .compatible = "gianfar",
 +      },
 +      {
 +              .compatible = "fsl,ucc-mdio",
 +      },
 +      {
 +              .compatible = "fsl,gianfar-tbi",
 +      },
 +      {
 +              .compatible = "fsl,gianfar-mdio",
 +      },
 +      {},
 +};
 +
 +static struct of_platform_driver fsl_pq_mdio_driver = {
 +      .name = "fsl-pq_mdio",
 +      .probe = fsl_pq_mdio_probe,
 +      .remove = fsl_pq_mdio_remove,
 +      .match_table = fsl_pq_mdio_match,
 +};
 +
 +int __init fsl_pq_mdio_init(void)
 +{
 +      return of_register_platform_driver(&fsl_pq_mdio_driver);
 +}
 +
 +void fsl_pq_mdio_exit(void)
 +{
 +      of_unregister_platform_driver(&fsl_pq_mdio_driver);
 +}
 +subsys_initcall_sync(fsl_pq_mdio_init);
 +module_exit(fsl_pq_mdio_exit);
diff --combined drivers/net/jme.c
@@@ -429,9 -429,10 +429,9 @@@ jme_check_link(struct net_device *netde
  
                jme->phylink = phylink;
  
 -              ghc = jme->reg_ghc & ~(GHC_SPEED_10M |
 -                                      GHC_SPEED_100M |
 -                                      GHC_SPEED_1000M |
 -                                      GHC_DPX);
 +              ghc = jme->reg_ghc & ~(GHC_SPEED | GHC_DPX |
 +                              GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE |
 +                              GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY);
                switch (phylink & PHY_LINK_SPEED_MASK) {
                case PHY_LINK_SPEED_10M:
                        ghc |= GHC_SPEED_10M |
@@@ -956,13 -957,14 +956,14 @@@ jme_process_receive(struct jme_adapter 
                goto out_inc;
  
        i = atomic_read(&rxring->next_to_clean);
-       while (limit-- > 0) {
+       while (limit > 0) {
                rxdesc = rxring->desc;
                rxdesc += i;
  
                if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
                !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
                        goto out;
+               --limit;
  
                desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
  
@@@ -1832,7 -1834,7 +1833,7 @@@ jme_tx_vlan(struct sk_buff *skb, __le1
  }
  
  static int
 -jme_fill_first_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
 +jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
  {
        struct jme_ring *txring = jme->txring;
        struct txdesc *txdesc;
        if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
                jme_tx_csum(jme, skb, &flags);
        jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
 +      jme_map_tx_skb(jme, skb, idx);
        txdesc->desc1.flags = flags;
        /*
         * Set tx buffer info after telling NIC to send
@@@ -1932,7 -1933,8 +1933,7 @@@ jme_start_xmit(struct sk_buff *skb, str
                return NETDEV_TX_BUSY;
        }
  
 -      jme_map_tx_skb(jme, skb, idx);
 -      jme_fill_first_tx_desc(jme, skb, idx);
 +      jme_fill_tx_desc(jme, skb, idx);
  
        jwrite32(jme, JME_TXCS, jme->reg_txcs |
                                TXCS_SELECT_QUEUE0 |
@@@ -2589,16 -2591,6 +2590,16 @@@ static const struct ethtool_ops jme_eth
  static int
  jme_pci_dma64(struct pci_dev *pdev)
  {
 +      if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
 +          !pci_set_dma_mask(pdev, DMA_64BIT_MASK))
 +              if (!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
 +                      return 1;
 +
 +      if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
 +          !pci_set_dma_mask(pdev, DMA_40BIT_MASK))
 +              if (!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK))
 +                      return 1;
 +
        if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
                if (!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
                        return 0;
@@@ -2865,11 -2857,7 +2866,11 @@@ jme_init_one(struct pci_dev *pdev
                goto err_out_free_shadow;
        }
  
 -      msg_probe(jme, "JMC250 gigabit%s ver:%x rev:%x macaddr:%pM\n",
 +      msg_probe(jme, "%s%s ver:%x rev:%x macaddr:%pM\n",
 +              (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
 +                      "JMC250 Gigabit Ethernet" :
 +                      (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
 +                              "JMC260 Fast Ethernet" : "Unknown",
                (jme->fpgaver != 0) ? " (FPGA)" : "",
                (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
                jme->rev, netdev->dev_addr);
@@@ -3015,7 -3003,7 +3016,7 @@@ static struct pci_driver jme_driver = 
  static int __init
  jme_init_module(void)
  {
 -      printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
 +      printk(KERN_INFO PFX "JMicron JMC2XX ethernet "
               "driver version %s\n", DRV_VERSION);
        return pci_register_driver(&jme_driver);
  }
diff --combined drivers/net/sungem.c
@@@ -921,7 -921,7 +921,7 @@@ static int gem_poll(struct napi_struct 
                gp->status = readl(gp->regs + GREG_STAT);
        } while (gp->status & GREG_STAT_NAPI);
  
 -      __netif_rx_complete(napi);
 +      __napi_complete(napi);
        gem_enable_ints(gp);
  
        spin_unlock_irqrestore(&gp->lock, flags);
@@@ -944,7 -944,7 +944,7 @@@ static irqreturn_t gem_interrupt(int ir
  
        spin_lock_irqsave(&gp->lock, flags);
  
 -      if (netif_rx_schedule_prep(&gp->napi)) {
 +      if (napi_schedule_prep(&gp->napi)) {
                u32 gem_status = readl(gp->regs + GREG_STAT);
  
                if (gem_status == 0) {
                }
                gp->status = gem_status;
                gem_disable_ints(gp);
 -              __netif_rx_schedule(&gp->napi);
 +              __napi_schedule(&gp->napi);
        }
  
        spin_unlock_irqrestore(&gp->lock, flags);
@@@ -1229,7 -1229,7 +1229,7 @@@ static void gem_reset(struct gem *gp
                        break;
        } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
  
-       if (limit <= 0)
+       if (limit < 0)
                printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
  
        if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
@@@ -121,11 -121,6 +121,6 @@@ static int __devinit tms_pci_attach(str
                goto err_out_trdev;
        }
  
-       ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED,
-                         dev->name, dev);
-       if (ret)
-               goto err_out_region;
        dev->base_addr  = pci_ioaddr;
        dev->irq        = pci_irq_line;
        dev->dma        = 0;
        ret = tmsdev_init(dev, &pdev->dev);
        if (ret) {
                printk("%s: unable to get memory for dev->priv.\n", dev->name);
-               goto err_out_irq;
+               goto err_out_region;
        }
  
        tp = netdev_priv(dev);
  
        tp->tmspriv = cardinfo;
  
 -      dev->open = tms380tr_open;
 -      dev->stop = tms380tr_close;
 +      dev->netdev_ops = &tms380tr_netdev_ops;
 +
+       ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED,
+                         dev->name, dev);
+       if (ret)
+               goto err_out_tmsdev;
        pci_set_drvdata(pdev, dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
  
        ret = register_netdev(dev);
        if (ret)
-               goto err_out_tmsdev;
+               goto err_out_irq;
        
        return 0;
  
+ err_out_irq:
+       free_irq(pdev->irq, dev);
  err_out_tmsdev:
        pci_set_drvdata(pdev, NULL);
        tmsdev_term(dev);
- err_out_irq:
-       free_irq(pdev->irq, dev);
  err_out_region:
        release_region(pci_ioaddr, TMS_PCI_IO_EXTENT);
  err_out_trdev:
@@@ -96,7 -96,7 +96,7 @@@ struct wireless_dev
   *    Compute the worst case header length according to the protocols
   *    used.
   */
 - 
 +
  #if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
  # if defined(CONFIG_MAC80211_MESH)
  #  define LL_MAX_HEADER 128
   *    Network device statistics. Akin to the 2.0 ether stats but
   *    with byte counters.
   */
 - 
 +
  struct net_device_stats
  {
        unsigned long   rx_packets;             /* total packets received       */
@@@ -285,7 -285,7 +285,7 @@@ enum netdev_state_
  
  /*
   * This structure holds at boot time configured netdevice settings. They
 - * are then used in the device probing. 
 + * are then used in the device probing.
   */
  struct netdev_boot_setup {
        char name[IFNAMSIZ];
@@@ -314,9 -314,6 +314,9 @@@ struct napi_struct 
        spinlock_t              poll_lock;
        int                     poll_owner;
  #endif
 +
 +      unsigned int            gro_count;
 +
        struct net_device       *dev;
        struct list_head        dev_list;
        struct sk_buff          *gro_list;
@@@ -743,7 -740,7 +743,7 @@@ struct net_devic
        void                    *dsa_ptr;       /* dsa specific data */
  #endif
        void                    *atalk_ptr;     /* AppleTalk link       */
 -      void                    *ip_ptr;        /* IPv4 specific data   */  
 +      void                    *ip_ptr;        /* IPv4 specific data   */
        void                    *dn_ptr;        /* DECnet specific data */
        void                    *ip6_ptr;       /* IPv6 specific data */
        void                    *ec_ptr;        /* Econet specific data */
   */
        unsigned long           last_rx;        /* Time of last Rx      */
        /* Interface address info used in eth_type_trans() */
 -      unsigned char           dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast 
 +      unsigned char           dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
                                                           because most packets are unicast) */
  
        unsigned char           broadcast[MAX_ADDR_LEN];        /* hw bcast add */
@@@ -987,9 -984,6 +987,9 @@@ void netif_napi_add(struct net_device *
  void netif_napi_del(struct napi_struct *napi);
  
  struct napi_gro_cb {
 +      /* This indicates where we are processing relative to skb->data. */
 +      int data_offset;
 +
        /* This is non-zero if the packet may be of the same flow. */
        int same_flow;
  
@@@ -1085,6 -1079,7 +1085,7 @@@ extern void             synchronize_net(void)
  extern int            register_netdevice_notifier(struct notifier_block *nb);
  extern int            unregister_netdevice_notifier(struct notifier_block *nb);
  extern int            init_dummy_netdev(struct net_device *dev);
+ extern void           netdev_resync_ops(struct net_device *dev);
  
  extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
  extern struct net_device      *dev_get_by_index(struct net *net, int ifindex);
@@@ -1093,36 -1088,6 +1094,36 @@@ extern int            dev_restart(struct net_devi
  #ifdef CONFIG_NETPOLL_TRAP
  extern int            netpoll_trap(void);
  #endif
 +extern void         *skb_gro_header(struct sk_buff *skb, unsigned int hlen);
 +extern int           skb_gro_receive(struct sk_buff **head,
 +                                     struct sk_buff *skb);
 +
 +static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
 +{
 +      return NAPI_GRO_CB(skb)->data_offset;
 +}
 +
 +static inline unsigned int skb_gro_len(const struct sk_buff *skb)
 +{
 +      return skb->len - NAPI_GRO_CB(skb)->data_offset;
 +}
 +
 +static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
 +{
 +      NAPI_GRO_CB(skb)->data_offset += len;
 +}
 +
 +static inline void skb_gro_reset_offset(struct sk_buff *skb)
 +{
 +      NAPI_GRO_CB(skb)->data_offset = 0;
 +}
 +
 +static inline void *skb_gro_mac_header(struct sk_buff *skb)
 +{
 +      return skb_mac_header(skb) < skb->data ? skb_mac_header(skb) :
 +             page_address(skb_shinfo(skb)->frags[0].page) +
 +             skb_shinfo(skb)->frags[0].page_offset;
 +}
  
  static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
                                  unsigned short type,
@@@ -1411,15 -1376,12 +1412,15 @@@ extern int           netif_receive_skb(struct sk
  extern void           napi_gro_flush(struct napi_struct *napi);
  extern int            dev_gro_receive(struct napi_struct *napi,
                                        struct sk_buff *skb);
 +extern int            napi_skb_finish(int ret, struct sk_buff *skb);
  extern int            napi_gro_receive(struct napi_struct *napi,
                                         struct sk_buff *skb);
  extern void           napi_reuse_skb(struct napi_struct *napi,
                                       struct sk_buff *skb);
  extern struct sk_buff *       napi_fraginfo_skb(struct napi_struct *napi,
                                          struct napi_gro_fraginfo *info);
 +extern int            napi_frags_finish(struct napi_struct *napi,
 +                                        struct sk_buff *skb, int ret);
  extern int            napi_gro_frags(struct napi_struct *napi,
                                       struct napi_gro_fraginfo *info);
  extern void           netif_nit_deliver(struct sk_buff *skb);
@@@ -1613,6 -1575,56 +1614,6 @@@ static inline u32 netif_msg_init(int de
        return (1 << debug_value) - 1;
  }
  
 -/* Test if receive needs to be scheduled but only if up */
 -static inline int netif_rx_schedule_prep(struct napi_struct *napi)
 -{
 -      return napi_schedule_prep(napi);
 -}
 -
 -/* Add interface to tail of rx poll list. This assumes that _prep has
 - * already been called and returned 1.
 - */
 -static inline void __netif_rx_schedule(struct napi_struct *napi)
 -{
 -      __napi_schedule(napi);
 -}
 -
 -/* Try to reschedule poll. Called by irq handler. */
 -
 -static inline void netif_rx_schedule(struct napi_struct *napi)
 -{
 -      if (netif_rx_schedule_prep(napi))
 -              __netif_rx_schedule(napi);
 -}
 -
 -/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete().  */
 -static inline int netif_rx_reschedule(struct napi_struct *napi)
 -{
 -      if (napi_schedule_prep(napi)) {
 -              __netif_rx_schedule(napi);
 -              return 1;
 -      }
 -      return 0;
 -}
 -
 -/* same as netif_rx_complete, except that local_irq_save(flags)
 - * has already been issued
 - */
 -static inline void __netif_rx_complete(struct napi_struct *napi)
 -{
 -      __napi_complete(napi);
 -}
 -
 -/* Remove interface from poll list: it must be in the poll list
 - * on current cpu. This primitive is called by dev->poll(), when
 - * it completes the work. The device cannot be out of poll list at this
 - * moment, it is BUG().
 - */
 -static inline void netif_rx_complete(struct napi_struct *napi)
 -{
 -      napi_complete(napi);
 -}
 -
  static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
  {
        spin_lock(&txq->_xmit_lock);
@@@ -1863,7 -1875,7 +1864,7 @@@ static inline int skb_bond_should_drop(
  
                if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
                        if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
 -                          skb->protocol == __constant_htons(ETH_P_ARP))
 +                          skb->protocol == __cpu_to_be16(ETH_P_ARP))
                                return 0;
  
                        if (master->priv_flags & IFF_MASTER_ALB) {
                                        return 0;
                        }
                        if (master->priv_flags & IFF_MASTER_8023AD &&
 -                          skb->protocol == __constant_htons(ETH_P_SLOW))
 +                          skb->protocol == __cpu_to_be16(ETH_P_SLOW))
                                return 0;
  
                        return 1;
diff --combined net/802/tr.c
@@@ -486,7 -486,6 +486,7 @@@ static struct rif_cache *rif_get_idx(lo
  }
  
  static void *rif_seq_start(struct seq_file *seq, loff_t *pos)
 +      __acquires(&rif_lock)
  {
        spin_lock_irq(&rif_lock);
  
@@@ -518,7 -517,6 +518,7 @@@ static void *rif_seq_next(struct seq_fi
  }
  
  static void rif_seq_stop(struct seq_file *seq, void *v)
 +      __releases(&rif_lock)
  {
        spin_unlock_irq(&rif_lock);
  }
@@@ -670,3 -668,5 +670,5 @@@ module_init(rif_init)
  
  EXPORT_SYMBOL(tr_type_trans);
  EXPORT_SYMBOL(alloc_trdev);
+ MODULE_LICENSE("GPL");
diff --combined net/core/dev.c
  /* This should be increased if a protocol with a bigger head is added. */
  #define GRO_MAX_HEAD (MAX_HEADER + 128)
  
 +enum {
 +      GRO_MERGED,
 +      GRO_MERGED_FREE,
 +      GRO_HELD,
 +      GRO_NORMAL,
 +      GRO_DROP,
 +};
 +
  /*
   *    The list of packet types we will receive (as opposed to discard)
   *    and the routines to invoke.
@@@ -1676,7 -1668,6 +1676,7 @@@ int dev_hard_start_xmit(struct sk_buff 
                        struct netdev_queue *txq)
  {
        const struct net_device_ops *ops = dev->netdev_ops;
 +      int rc;
  
        prefetch(&dev->netdev_ops->ndo_start_xmit);
        if (likely(!skb->next)) {
                                goto gso;
                }
  
 -              return ops->ndo_start_xmit(skb, dev);
 +              rc = ops->ndo_start_xmit(skb, dev);
 +              /*
 +               * TODO: if skb_orphan() was called by
 +               * dev->hard_start_xmit() (for example, the unmodified
 +               * igb driver does that; bnx2 doesn't), then
 +               * skb_tx_software_timestamp() will be unable to send
 +               * back the time stamp.
 +               *
 +               * How can this be prevented? Always create another
 +               * reference to the socket before calling
 +               * dev->hard_start_xmit()? Prevent that skb_orphan()
 +               * does anything in dev->hard_start_xmit() by clearing
 +               * the skb destructor before the call and restoring it
 +               * afterwards, then doing the skb_orphan() ourselves?
 +               */
 +              return rc;
        }
  
  gso:
        do {
                struct sk_buff *nskb = skb->next;
 -              int rc;
  
                skb->next = nskb->next;
                nskb->next = NULL;
@@@ -1731,20 -1708,56 +1731,20 @@@ out_kfree_skb
        return 0;
  }
  
 -static u32 simple_tx_hashrnd;
 -static int simple_tx_hashrnd_initialized = 0;
 +static u32 skb_tx_hashrnd;
  
 -static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
 +static u16 skb_tx_hash(struct net_device *dev, struct sk_buff *skb)
  {
 -      u32 addr1, addr2, ports;
 -      u32 hash, ihl;
 -      u8 ip_proto = 0;
 -
 -      if (unlikely(!simple_tx_hashrnd_initialized)) {
 -              get_random_bytes(&simple_tx_hashrnd, 4);
 -              simple_tx_hashrnd_initialized = 1;
 -      }
 -
 -      switch (skb->protocol) {
 -      case htons(ETH_P_IP):
 -              if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
 -                      ip_proto = ip_hdr(skb)->protocol;
 -              addr1 = ip_hdr(skb)->saddr;
 -              addr2 = ip_hdr(skb)->daddr;
 -              ihl = ip_hdr(skb)->ihl;
 -              break;
 -      case htons(ETH_P_IPV6):
 -              ip_proto = ipv6_hdr(skb)->nexthdr;
 -              addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
 -              addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
 -              ihl = (40 >> 2);
 -              break;
 -      default:
 -              return 0;
 -      }
 +      u32 hash;
  
 +      if (skb_rx_queue_recorded(skb)) {
 +              hash = skb_get_rx_queue(skb);
 +      } else if (skb->sk && skb->sk->sk_hash) {
 +              hash = skb->sk->sk_hash;
 +      } else
 +              hash = skb->protocol;
  
 -      switch (ip_proto) {
 -      case IPPROTO_TCP:
 -      case IPPROTO_UDP:
 -      case IPPROTO_DCCP:
 -      case IPPROTO_ESP:
 -      case IPPROTO_AH:
 -      case IPPROTO_SCTP:
 -      case IPPROTO_UDPLITE:
 -              ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
 -              break;
 -
 -      default:
 -              ports = 0;
 -              break;
 -      }
 -
 -      hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
 +      hash = jhash_1word(hash, skb_tx_hashrnd);
  
        return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
  }
@@@ -1758,7 -1771,7 +1758,7 @@@ static struct netdev_queue *dev_pick_tx
        if (ops->ndo_select_queue)
                queue_index = ops->ndo_select_queue(dev, skb);
        else if (dev->real_num_tx_queues > 1)
 -              queue_index = simple_tx_hash(dev, skb);
 +              queue_index = skb_tx_hash(dev, skb);
  
        skb_set_queue_mapping(skb, queue_index);
        return netdev_get_tx_queue(dev, queue_index);
@@@ -2284,8 -2297,6 +2284,8 @@@ ncls
        if (!skb)
                goto out;
  
 +      skb_orphan(skb);
 +
        type = skb->protocol;
        list_for_each_entry_rcu(ptype,
                        &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
@@@ -2355,6 -2366,7 +2355,6 @@@ static int napi_gro_complete(struct sk_
  
  out:
        skb_shinfo(skb)->gso_size = 0;
 -      __skb_push(skb, -skb_network_offset(skb));
        return netif_receive_skb(skb);
  }
  
@@@ -2368,40 -2380,20 +2368,40 @@@ void napi_gro_flush(struct napi_struct 
                napi_gro_complete(skb);
        }
  
 +      napi->gro_count = 0;
        napi->gro_list = NULL;
  }
  EXPORT_SYMBOL(napi_gro_flush);
  
 +void *skb_gro_header(struct sk_buff *skb, unsigned int hlen)
 +{
 +      unsigned int offset = skb_gro_offset(skb);
 +
 +      hlen += offset;
 +      if (hlen <= skb_headlen(skb))
 +              return skb->data + offset;
 +
 +      if (unlikely(!skb_shinfo(skb)->nr_frags ||
 +                   skb_shinfo(skb)->frags[0].size <=
 +                   hlen - skb_headlen(skb) ||
 +                   PageHighMem(skb_shinfo(skb)->frags[0].page)))
 +              return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
 +
 +      return page_address(skb_shinfo(skb)->frags[0].page) +
 +             skb_shinfo(skb)->frags[0].page_offset +
 +             offset - skb_headlen(skb);
 +}
 +EXPORT_SYMBOL(skb_gro_header);
 +
  int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  {
        struct sk_buff **pp = NULL;
        struct packet_type *ptype;
        __be16 type = skb->protocol;
        struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
 -      int count = 0;
        int same_flow;
        int mac_len;
 -      int free;
 +      int ret;
  
        if (!(skb->dev->features & NETIF_F_GRO))
                goto normal;
  
        rcu_read_lock();
        list_for_each_entry_rcu(ptype, head, list) {
 -              struct sk_buff *p;
 -
                if (ptype->type != type || ptype->dev || !ptype->gro_receive)
                        continue;
  
 -              skb_reset_network_header(skb);
 +              skb_set_network_header(skb, skb_gro_offset(skb));
                mac_len = skb->network_header - skb->mac_header;
                skb->mac_len = mac_len;
                NAPI_GRO_CB(skb)->same_flow = 0;
                NAPI_GRO_CB(skb)->flush = 0;
                NAPI_GRO_CB(skb)->free = 0;
  
 -              for (p = napi->gro_list; p; p = p->next) {
 -                      count++;
 -
 -                      if (!NAPI_GRO_CB(p)->same_flow)
 -                              continue;
 -
 -                      if (p->mac_len != mac_len ||
 -                          memcmp(skb_mac_header(p), skb_mac_header(skb),
 -                                 mac_len))
 -                              NAPI_GRO_CB(p)->same_flow = 0;
 -              }
 -
                pp = ptype->gro_receive(&napi->gro_list, skb);
                break;
        }
                goto normal;
  
        same_flow = NAPI_GRO_CB(skb)->same_flow;
 -      free = NAPI_GRO_CB(skb)->free;
 +      ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
  
        if (pp) {
                struct sk_buff *nskb = *pp;
                *pp = nskb->next;
                nskb->next = NULL;
                napi_gro_complete(nskb);
 -              count--;
 +              napi->gro_count--;
        }
  
        if (same_flow)
                goto ok;
  
 -      if (NAPI_GRO_CB(skb)->flush || count >= MAX_GRO_SKBS) {
 -              __skb_push(skb, -skb_network_offset(skb));
 +      if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
                goto normal;
 -      }
  
 +      napi->gro_count++;
        NAPI_GRO_CB(skb)->count = 1;
 -      skb_shinfo(skb)->gso_size = skb->len;
 +      skb_shinfo(skb)->gso_size = skb_gro_len(skb);
        skb->next = napi->gro_list;
        napi->gro_list = skb;
 +      ret = GRO_HELD;
 +
 +pull:
 +      if (unlikely(!pskb_may_pull(skb, skb_gro_offset(skb)))) {
 +              if (napi->gro_list == skb)
 +                      napi->gro_list = skb->next;
 +              ret = GRO_DROP;
 +      }
  
  ok:
 -      return free;
 +      return ret;
  
  normal:
 -      return -1;
 +      ret = GRO_NORMAL;
 +      goto pull;
  }
  EXPORT_SYMBOL(dev_gro_receive);
  
@@@ -2475,43 -2473,28 +2475,43 @@@ static int __napi_gro_receive(struct na
        struct sk_buff *p;
  
        for (p = napi->gro_list; p; p = p->next) {
 -              NAPI_GRO_CB(p)->same_flow = 1;
 +              NAPI_GRO_CB(p)->same_flow = !compare_ether_header(
 +                      skb_mac_header(p), skb_gro_mac_header(skb));
                NAPI_GRO_CB(p)->flush = 0;
        }
  
        return dev_gro_receive(napi, skb);
  }
  
 -int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 +int napi_skb_finish(int ret, struct sk_buff *skb)
  {
 +      int err = NET_RX_SUCCESS;
 +
        if (netpoll_receive_skb(skb))
                return NET_RX_DROP;
  
 -      switch (__napi_gro_receive(napi, skb)) {
 -      case -1:
 +      switch (ret) {
 +      case GRO_NORMAL:
                return netif_receive_skb(skb);
  
 -      case 1:
 +      case GRO_DROP:
 +              err = NET_RX_DROP;
 +              /* fall through */
 +
 +      case GRO_MERGED_FREE:
                kfree_skb(skb);
                break;
        }
  
 -      return NET_RX_SUCCESS;
 +      return err;
 +}
 +EXPORT_SYMBOL(napi_skb_finish);
 +
 +int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 +{
 +      skb_gro_reset_offset(skb);
 +
 +      return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
  }
  EXPORT_SYMBOL(napi_gro_receive);
  
@@@ -2529,9 -2512,6 +2529,9 @@@ struct sk_buff *napi_fraginfo_skb(struc
  {
        struct net_device *dev = napi->dev;
        struct sk_buff *skb = napi->skb;
 +      struct ethhdr *eth;
 +      skb_frag_t *frag;
 +      int i;
  
        napi->skb = NULL;
  
        }
  
        BUG_ON(info->nr_frags > MAX_SKB_FRAGS);
 +      frag = &info->frags[info->nr_frags - 1];
 +
 +      for (i = skb_shinfo(skb)->nr_frags; i < info->nr_frags; i++) {
 +              skb_fill_page_desc(skb, i, frag->page, frag->page_offset,
 +                                 frag->size);
 +              frag++;
 +      }
        skb_shinfo(skb)->nr_frags = info->nr_frags;
 -      memcpy(skb_shinfo(skb)->frags, info->frags, sizeof(info->frags));
  
        skb->data_len = info->len;
        skb->len += info->len;
        skb->truesize += info->len;
  
 -      if (!pskb_may_pull(skb, ETH_HLEN)) {
 +      skb_reset_mac_header(skb);
 +      skb_gro_reset_offset(skb);
 +
 +      eth = skb_gro_header(skb, sizeof(*eth));
 +      if (!eth) {
                napi_reuse_skb(napi, skb);
                skb = NULL;
                goto out;
        }
  
 -      skb->protocol = eth_type_trans(skb, dev);
 +      skb_gro_pull(skb, sizeof(*eth));
 +
 +      /*
 +       * This works because the only protocols we care about don't require
 +       * special handling.  We'll fix it up properly at the end.
 +       */
 +      skb->protocol = eth->h_proto;
  
        skb->ip_summed = info->ip_summed;
        skb->csum = info->csum;
  }
  EXPORT_SYMBOL(napi_fraginfo_skb);
  
 -int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
 +int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
  {
 -      struct sk_buff *skb = napi_fraginfo_skb(napi, info);
 -      int err = NET_RX_DROP;
 -
 -      if (!skb)
 -              goto out;
 +      int err = NET_RX_SUCCESS;
  
        if (netpoll_receive_skb(skb))
 -              goto out;
 +              return NET_RX_DROP;
  
 -      err = NET_RX_SUCCESS;
 +      switch (ret) {
 +      case GRO_NORMAL:
 +      case GRO_HELD:
 +              skb->protocol = eth_type_trans(skb, napi->dev);
  
 -      switch (__napi_gro_receive(napi, skb)) {
 -      case -1:
 -              return netif_receive_skb(skb);
 +              if (ret == GRO_NORMAL)
 +                      return netif_receive_skb(skb);
  
 -      case 0:
 -              goto out;
 -      }
 +              skb_gro_pull(skb, -ETH_HLEN);
 +              break;
  
 -      napi_reuse_skb(napi, skb);
 +      case GRO_DROP:
 +              err = NET_RX_DROP;
 +              /* fall through */
 +
 +      case GRO_MERGED_FREE:
 +              napi_reuse_skb(napi, skb);
 +              break;
 +      }
  
 -out:
        return err;
  }
 +EXPORT_SYMBOL(napi_frags_finish);
 +
 +int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
 +{
 +      struct sk_buff *skb = napi_fraginfo_skb(napi, info);
 +
 +      if (!skb)
 +              return NET_RX_DROP;
 +
 +      return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
 +}
  EXPORT_SYMBOL(napi_gro_frags);
  
  static int process_backlog(struct napi_struct *napi, int quota)
@@@ -2702,7 -2652,6 +2702,7 @@@ void netif_napi_add(struct net_device *
                    int (*poll)(struct napi_struct *, int), int weight)
  {
        INIT_LIST_HEAD(&napi->poll_list);
 +      napi->gro_count = 0;
        napi->gro_list = NULL;
        napi->skb = NULL;
        napi->poll = poll;
@@@ -2731,7 -2680,6 +2731,7 @@@ void netif_napi_del(struct napi_struct 
        }
  
        napi->gro_list = NULL;
 +      napi->gro_count = 0;
  }
  EXPORT_SYMBOL(netif_napi_del);
  
@@@ -4000,7 -3948,6 +4000,7 @@@ static int dev_ifsioc(struct net *net, 
                            cmd == SIOCSMIIREG ||
                            cmd == SIOCBRADDIF ||
                            cmd == SIOCBRDELIF ||
 +                          cmd == SIOCSHWTSTAMP ||
                            cmd == SIOCWANDEV) {
                                err = -EOPNOTSUPP;
                                if (ops->ndo_do_ioctl) {
@@@ -4155,7 -4102,6 +4155,7 @@@ int dev_ioctl(struct net *net, unsigne
                case SIOCBONDCHANGEACTIVE:
                case SIOCBRADDIF:
                case SIOCBRDELIF:
 +              case SIOCSHWTSTAMP:
                        if (!capable(CAP_NET_ADMIN))
                                return -EPERM;
                        /* fall through */
@@@ -4336,6 -4282,39 +4336,39 @@@ unsigned long netdev_fix_features(unsig
  }
  EXPORT_SYMBOL(netdev_fix_features);
  
+ /* Some devices need to (re-)set their netdev_ops inside
+  * ->init() or similar.  If that happens, we have to setup
+  * the compat pointers again.
+  */
+ void netdev_resync_ops(struct net_device *dev)
+ {
+ #ifdef CONFIG_COMPAT_NET_DEV_OPS
+       const struct net_device_ops *ops = dev->netdev_ops;
+       dev->init = ops->ndo_init;
+       dev->uninit = ops->ndo_uninit;
+       dev->open = ops->ndo_open;
+       dev->change_rx_flags = ops->ndo_change_rx_flags;
+       dev->set_rx_mode = ops->ndo_set_rx_mode;
+       dev->set_multicast_list = ops->ndo_set_multicast_list;
+       dev->set_mac_address = ops->ndo_set_mac_address;
+       dev->validate_addr = ops->ndo_validate_addr;
+       dev->do_ioctl = ops->ndo_do_ioctl;
+       dev->set_config = ops->ndo_set_config;
+       dev->change_mtu = ops->ndo_change_mtu;
+       dev->neigh_setup = ops->ndo_neigh_setup;
+       dev->tx_timeout = ops->ndo_tx_timeout;
+       dev->get_stats = ops->ndo_get_stats;
+       dev->vlan_rx_register = ops->ndo_vlan_rx_register;
+       dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
+       dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+       dev->poll_controller = ops->ndo_poll_controller;
+ #endif
+ #endif
+ }
+ EXPORT_SYMBOL(netdev_resync_ops);
  /**
   *    register_netdevice      - register a network device
   *    @dev: device to register
@@@ -4380,27 -4359,7 +4413,7 @@@ int register_netdevice(struct net_devic
         * This is temporary until all network devices are converted.
         */
        if (dev->netdev_ops) {
-               const struct net_device_ops *ops = dev->netdev_ops;
-               dev->init = ops->ndo_init;
-               dev->uninit = ops->ndo_uninit;
-               dev->open = ops->ndo_open;
-               dev->change_rx_flags = ops->ndo_change_rx_flags;
-               dev->set_rx_mode = ops->ndo_set_rx_mode;
-               dev->set_multicast_list = ops->ndo_set_multicast_list;
-               dev->set_mac_address = ops->ndo_set_mac_address;
-               dev->validate_addr = ops->ndo_validate_addr;
-               dev->do_ioctl = ops->ndo_do_ioctl;
-               dev->set_config = ops->ndo_set_config;
-               dev->change_mtu = ops->ndo_change_mtu;
-               dev->tx_timeout = ops->ndo_tx_timeout;
-               dev->get_stats = ops->ndo_get_stats;
-               dev->vlan_rx_register = ops->ndo_vlan_rx_register;
-               dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
-               dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
- #ifdef CONFIG_NET_POLL_CONTROLLER
-               dev->poll_controller = ops->ndo_poll_controller;
- #endif
+               netdev_resync_ops(dev);
        } else {
                char drivername[64];
                pr_info("%s (%s): not using net_device_ops yet\n",
@@@ -5239,7 -5198,6 +5252,7 @@@ static int __init net_dev_init(void
                queue->backlog.poll = process_backlog;
                queue->backlog.weight = weight_p;
                queue->backlog.gro_list = NULL;
 +              queue->backlog.gro_count = 0;
        }
  
        dev_boot_phase = 0;
  
  subsys_initcall(net_dev_init);
  
 +static int __init initialize_hashrnd(void)
 +{
 +      get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
 +      return 0;
 +}
 +
 +late_initcall_sync(initialize_hashrnd);
 +
  EXPORT_SYMBOL(__dev_get_by_index);
  EXPORT_SYMBOL(__dev_get_by_name);
  EXPORT_SYMBOL(__dev_remove_pack);
diff --combined net/ipv6/addrconf.c
@@@ -493,15 -493,17 +493,17 @@@ static void addrconf_forward_change(str
        read_unlock(&dev_base_lock);
  }
  
- static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
+ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
  {
        struct net *net;
  
        net = (struct net *)table->extra2;
        if (p == &net->ipv6.devconf_dflt->forwarding)
-               return;
+               return 0;
+       if (!rtnl_trylock())
+               return -ERESTARTSYS;
  
-       rtnl_lock();
        if (p == &net->ipv6.devconf_all->forwarding) {
                __s32 newf = net->ipv6.devconf_all->forwarding;
                net->ipv6.devconf_dflt->forwarding = newf;
  
        if (*p)
                rt6_purge_dflt_routers(net);
+       return 1;
  }
  #endif
  
@@@ -2224,24 -2227,10 +2227,24 @@@ int addrconf_del_ifaddr(struct net *net
        return err;
  }
  
 +static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
 +                   int plen, int scope)
 +{
 +      struct inet6_ifaddr *ifp;
 +
 +      ifp = ipv6_add_addr(idev, addr, plen, scope, IFA_F_PERMANENT);
 +      if (!IS_ERR(ifp)) {
 +              spin_lock_bh(&ifp->lock);
 +              ifp->flags &= ~IFA_F_TENTATIVE;
 +              spin_unlock_bh(&ifp->lock);
 +              ipv6_ifa_notify(RTM_NEWADDR, ifp);
 +              in6_ifa_put(ifp);
 +      }
 +}
 +
  #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
  static void sit_add_v4_addrs(struct inet6_dev *idev)
  {
 -      struct inet6_ifaddr * ifp;
        struct in6_addr addr;
        struct net_device *dev;
        struct net *net = dev_net(idev->dev);
        }
  
        if (addr.s6_addr32[3]) {
 -              ifp = ipv6_add_addr(idev, &addr, 128, scope, IFA_F_PERMANENT);
 -              if (!IS_ERR(ifp)) {
 -                      spin_lock_bh(&ifp->lock);
 -                      ifp->flags &= ~IFA_F_TENTATIVE;
 -                      spin_unlock_bh(&ifp->lock);
 -                      ipv6_ifa_notify(RTM_NEWADDR, ifp);
 -                      in6_ifa_put(ifp);
 -              }
 +              add_addr(idev, &addr, 128, scope);
                return;
        }
  
                                else
                                        plen = 96;
  
 -                              ifp = ipv6_add_addr(idev, &addr, plen, flag,
 -                                                  IFA_F_PERMANENT);
 -                              if (!IS_ERR(ifp)) {
 -                                      spin_lock_bh(&ifp->lock);
 -                                      ifp->flags &= ~IFA_F_TENTATIVE;
 -                                      spin_unlock_bh(&ifp->lock);
 -                                      ipv6_ifa_notify(RTM_NEWADDR, ifp);
 -                                      in6_ifa_put(ifp);
 -                              }
 +                              add_addr(idev, &addr, plen, flag);
                        }
                }
        }
  static void init_loopback(struct net_device *dev)
  {
        struct inet6_dev  *idev;
 -      struct inet6_ifaddr * ifp;
  
        /* ::1 */
  
                return;
        }
  
 -      ifp = ipv6_add_addr(idev, &in6addr_loopback, 128, IFA_HOST, IFA_F_PERMANENT);
 -      if (!IS_ERR(ifp)) {
 -              spin_lock_bh(&ifp->lock);
 -              ifp->flags &= ~IFA_F_TENTATIVE;
 -              spin_unlock_bh(&ifp->lock);
 -              ipv6_ifa_notify(RTM_NEWADDR, ifp);
 -              in6_ifa_put(ifp);
 -      }
 +      add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
  }
  
  static void addrconf_add_linklocal(struct inet6_dev *idev, struct in6_addr *addr)
@@@ -2599,9 -2611,6 +2602,6 @@@ static int addrconf_ifdown(struct net_d
  
        ASSERT_RTNL();
  
-       if ((dev->flags & IFF_LOOPBACK) && how == 1)
-               how = 0;
        rt6_ifdown(net, dev);
        neigh_ifdown(&nd_tbl, dev);
  
@@@ -3638,8 -3647,7 +3638,8 @@@ static void inet6_ifa_notify(int event
                kfree_skb(skb);
                goto errout;
        }
 -      err = rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
 +      rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
 +      return;
  errout:
        if (err < 0)
                rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
@@@ -3850,8 -3858,7 +3850,8 @@@ void inet6_ifinfo_notify(int event, str
                kfree_skb(skb);
                goto errout;
        }
 -      err = rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
 +      rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
 +      return;
  errout:
        if (err < 0)
                rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
@@@ -3921,8 -3928,7 +3921,8 @@@ static void inet6_prefix_notify(int eve
                kfree_skb(skb);
                goto errout;
        }
 -      err = rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
 +      rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
 +      return;
  errout:
        if (err < 0)
                rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
@@@ -3977,7 -3983,7 +3977,7 @@@ int addrconf_sysctl_forward(ctl_table *
        ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
  
        if (write)
-               addrconf_fixup_forwarding(ctl, valp, val);
+               ret = addrconf_fixup_forwarding(ctl, valp, val);
        return ret;
  }
  
@@@ -4013,8 -4019,7 +4013,7 @@@ static int addrconf_sysctl_forward_stra
        }
  
        *valp = new;
-       addrconf_fixup_forwarding(table, valp, val);
-       return 1;
+       return addrconf_fixup_forwarding(table, valp, val);
  }
  
  static struct addrconf_sysctl_table
@@@ -4440,25 -4445,6 +4439,6 @@@ int unregister_inet6addr_notifier(struc
  
  EXPORT_SYMBOL(unregister_inet6addr_notifier);
  
- static void addrconf_net_exit(struct net *net)
- {
-       struct net_device *dev;
-       rtnl_lock();
-       /* clean dev list */
-       for_each_netdev(net, dev) {
-               if (__in6_dev_get(dev) == NULL)
-                       continue;
-               addrconf_ifdown(dev, 1);
-       }
-       addrconf_ifdown(net->loopback_dev, 2);
-       rtnl_unlock();
- }
- static struct pernet_operations addrconf_net_ops = {
-       .exit = addrconf_net_exit,
- };
  /*
   *    Init / cleanup code
   */
@@@ -4500,10 -4486,6 +4480,6 @@@ int __init addrconf_init(void
        if (err)
                goto errlo;
  
-       err = register_pernet_device(&addrconf_net_ops);
-       if (err)
-               return err;
        register_netdevice_notifier(&ipv6_dev_notf);
  
        addrconf_verify(0);
@@@ -4533,15 -4515,22 +4509,22 @@@ errlo
  void addrconf_cleanup(void)
  {
        struct inet6_ifaddr *ifa;
+       struct net_device *dev;
        int i;
  
        unregister_netdevice_notifier(&ipv6_dev_notf);
-       unregister_pernet_device(&addrconf_net_ops);
        unregister_pernet_subsys(&addrconf_ops);
  
        rtnl_lock();
  
+       /* clean dev list */
+       for_each_netdev(&init_net, dev) {
+               if (__in6_dev_get(dev) == NULL)
+                       continue;
+               addrconf_ifdown(dev, 1);
+       }
+       addrconf_ifdown(init_net.loopback_dev, 2);
        /*
         *      Check hash table.
         */
  
        del_timer(&addr_chk_timer);
        rtnl_unlock();
-       unregister_pernet_subsys(&addrconf_net_ops);
  }
diff --combined net/ipv6/af_inet6.c
@@@ -72,6 -72,10 +72,10 @@@ MODULE_LICENSE("GPL")
  static struct list_head inetsw6[SOCK_MAX];
  static DEFINE_SPINLOCK(inetsw6_lock);
  
+ static int disable_ipv6 = 0;
+ module_param_named(disable, disable_ipv6, int, 0);
+ MODULE_PARM_DESC(disable, "Disable IPv6 such that it is non-functional");
  static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
  {
        const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo);
@@@ -799,34 -803,24 +803,34 @@@ static struct sk_buff **ipv6_gro_receiv
        int proto;
        __wsum csum;
  
 -      if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
 +      iph = skb_gro_header(skb, sizeof(*iph));
 +      if (unlikely(!iph))
                goto out;
  
 -      iph = ipv6_hdr(skb);
 -      __skb_pull(skb, sizeof(*iph));
 +      skb_gro_pull(skb, sizeof(*iph));
 +      skb_set_transport_header(skb, skb_gro_offset(skb));
  
 -      flush += ntohs(iph->payload_len) != skb->len;
 +      flush += ntohs(iph->payload_len) != skb_gro_len(skb);
  
        rcu_read_lock();
 -      proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr);
 -      iph = ipv6_hdr(skb);
 -      IPV6_GRO_CB(skb)->proto = proto;
 +      proto = iph->nexthdr;
        ops = rcu_dereference(inet6_protos[proto]);
 -      if (!ops || !ops->gro_receive)
 -              goto out_unlock;
 +      if (!ops || !ops->gro_receive) {
 +              __pskb_pull(skb, skb_gro_offset(skb));
 +              proto = ipv6_gso_pull_exthdrs(skb, proto);
 +              skb_gro_pull(skb, -skb_transport_offset(skb));
 +              skb_reset_transport_header(skb);
 +              __skb_push(skb, skb_gro_offset(skb));
 +
 +              if (!ops || !ops->gro_receive)
 +                      goto out_unlock;
 +
 +              iph = ipv6_hdr(skb);
 +      }
 +
 +      IPV6_GRO_CB(skb)->proto = proto;
  
        flush--;
 -      skb_reset_transport_header(skb);
        nlen = skb_network_header_len(skb);
  
        for (p = *head; p; p = p->next) {
@@@ -890,7 -884,7 +894,7 @@@ out_unlock
  }
  
  static struct packet_type ipv6_packet_type = {
 -      .type = __constant_htons(ETH_P_IPV6),
 +      .type = cpu_to_be16(ETH_P_IPV6),
        .func = ipv6_rcv,
        .gso_send_check = ipv6_gso_send_check,
        .gso_segment = ipv6_gso_segment,
@@@ -1001,10 -995,21 +1005,21 @@@ static int __init inet6_init(void
  {
        struct sk_buff *dummy_skb;
        struct list_head *r;
-       int err;
+       int err = 0;
  
        BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb));
  
+       /* Register the socket-side information for inet6_create.  */
+       for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
+               INIT_LIST_HEAD(r);
+       if (disable_ipv6) {
+               printk(KERN_INFO
+                      "IPv6: Loaded, but administratively disabled, "
+                      "reboot required to enable\n");
+               goto out;
+       }
        err = proto_register(&tcpv6_prot, 1);
        if (err)
                goto out;
                goto out_unregister_udplite_proto;
  
  
-       /* Register the socket-side information for inet6_create.  */
-       for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
-               INIT_LIST_HEAD(r);
        /* We MUST register RAW sockets before we create the ICMP6,
         * IGMP6, or NDISC control sockets.
         */
diff --combined net/netlink/af_netlink.c
@@@ -85,7 -85,6 +85,7 @@@ struct netlink_sock 
  
  #define NETLINK_KERNEL_SOCKET 0x1
  #define NETLINK_RECV_PKTINFO  0x2
 +#define NETLINK_BROADCAST_SEND_ERROR  0x4
  
  static inline struct netlink_sock *nlk_sk(struct sock *sk)
  {
@@@ -951,7 -950,6 +951,7 @@@ struct netlink_broadcast_data 
        u32 pid;
        u32 group;
        int failure;
 +      int delivery_failure;
        int congested;
        int delivered;
        gfp_t allocation;
@@@ -996,15 -994,11 +996,15 @@@ static inline int do_one_broadcast(stru
                netlink_overrun(sk);
                /* Clone failed. Notify ALL listeners. */
                p->failure = 1;
 +              if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
 +                      p->delivery_failure = 1;
        } else if (sk_filter(sk, p->skb2)) {
                kfree_skb(p->skb2);
                p->skb2 = NULL;
        } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
                netlink_overrun(sk);
 +              if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
 +                      p->delivery_failure = 1;
        } else {
                p->congested |= val;
                p->delivered = 1;
@@@ -1031,7 -1025,6 +1031,7 @@@ int netlink_broadcast(struct sock *ssk
        info.pid = pid;
        info.group = group;
        info.failure = 0;
 +      info.delivery_failure = 0;
        info.congested = 0;
        info.delivered = 0;
        info.allocation = allocation;
  
        netlink_unlock_table();
  
 -      if (info.skb2)
 -              kfree_skb(info.skb2);
 +      kfree_skb(info.skb2);
 +
 +      if (info.delivery_failure)
 +              return -ENOBUFS;
  
        if (info.delivered) {
                if (info.congested && (allocation & __GFP_WAIT))
                        yield();
                return 0;
        }
 -      if (info.failure)
 -              return -ENOBUFS;
        return -ESRCH;
  }
  EXPORT_SYMBOL(netlink_broadcast);
        return 0;
  }
  
+ /**
+  * netlink_set_err - report error to broadcast listeners
+  * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
+  * @pid: the PID of a process that we want to skip (if any)
+  * @groups: the broadcast group that will notice the error
+  * @code: error code, must be negative (as usual in kernelspace)
+  */
  void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
  {
        struct netlink_set_err_data info;
        info.exclude_sk = ssk;
        info.pid = pid;
        info.group = group;
-       info.code = code;
+       /* sk->sk_err wants a positive error value */
+       info.code = -code;
  
        read_lock(&nl_table_lock);
  
@@@ -1166,13 -1167,6 +1174,13 @@@ static int netlink_setsockopt(struct so
                err = 0;
                break;
        }
 +      case NETLINK_BROADCAST_ERROR:
 +              if (val)
 +                      nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
 +              else
 +                      nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
 +              err = 0;
 +              break;
        default:
                err = -ENOPROTOOPT;
        }
@@@ -1205,16 -1199,6 +1213,16 @@@ static int netlink_getsockopt(struct so
                        return -EFAULT;
                err = 0;
                break;
 +      case NETLINK_BROADCAST_ERROR:
 +              if (len < sizeof(int))
 +                      return -EINVAL;
 +              len = sizeof(int);
 +              val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
 +              if (put_user(len, optlen) ||
 +                  put_user(val, optval))
 +                      return -EFAULT;
 +              err = 0;
 +              break;
        default:
                err = -ENOPROTOOPT;
        }
@@@ -1541,7 -1525,8 +1549,7 @@@ EXPORT_SYMBOL(netlink_set_nonroot)
  
  static void netlink_destroy_callback(struct netlink_callback *cb)
  {
 -      if (cb->skb)
 -              kfree_skb(cb->skb);
 +      kfree_skb(cb->skb);
        kfree(cb);
  }
  
@@@ -1758,18 -1743,12 +1766,18 @@@ int nlmsg_notify(struct sock *sk, struc
                        exclude_pid = pid;
                }
  
 -              /* errors reported via destination sk->sk_err */
 -              nlmsg_multicast(sk, skb, exclude_pid, group, flags);
 +              /* errors reported via destination sk->sk_err, but propagate
 +               * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
 +              err = nlmsg_multicast(sk, skb, exclude_pid, group, flags);
        }
  
 -      if (report)
 -              err = nlmsg_unicast(sk, skb, pid);
 +      if (report) {
 +              int err2;
 +
 +              err2 = nlmsg_unicast(sk, skb, pid);
 +              if (!err || err == -ESRCH)
 +                      err = err2;
 +      }
  
        return err;
  }
diff --combined net/sctp/protocol.c
@@@ -589,21 -589,46 +589,21 @@@ static int sctp_v4_is_ce(const struct s
  static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
                                             struct sctp_association *asoc)
  {
 -      struct inet_sock *inet = inet_sk(sk);
 -      struct inet_sock *newinet;
        struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL,
                        sk->sk_prot);
 +      struct inet_sock *newinet;
  
        if (!newsk)
                goto out;
  
        sock_init_data(NULL, newsk);
  
 -      newsk->sk_type = SOCK_STREAM;
 -
 -      newsk->sk_no_check = sk->sk_no_check;
 -      newsk->sk_reuse = sk->sk_reuse;
 -      newsk->sk_shutdown = sk->sk_shutdown;
 -
 -      newsk->sk_destruct = inet_sock_destruct;
 -      newsk->sk_family = PF_INET;
 -      newsk->sk_protocol = IPPROTO_SCTP;
 -      newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
 +      sctp_copy_sock(newsk, sk, asoc);
        sock_reset_flag(newsk, SOCK_ZAPPED);
  
        newinet = inet_sk(newsk);
  
 -      /* Initialize sk's sport, dport, rcv_saddr and daddr for
 -       * getsockname() and getpeername()
 -       */
 -      newinet->sport = inet->sport;
 -      newinet->saddr = inet->saddr;
 -      newinet->rcv_saddr = inet->rcv_saddr;
 -      newinet->dport = htons(asoc->peer.port);
        newinet->daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr;
 -      newinet->pmtudisc = inet->pmtudisc;
 -      newinet->id = asoc->next_tsn ^ jiffies;
 -
 -      newinet->uc_ttl = -1;
 -      newinet->mc_loop = 1;
 -      newinet->mc_ttl = 1;
 -      newinet->mc_index = 0;
 -      newinet->mc_list = NULL;
  
        sk_refcnt_debug_inc(newsk);
  
@@@ -692,15 -717,20 +692,20 @@@ static int sctp_inetaddr_event(struct n
  static int sctp_ctl_sock_init(void)
  {
        int err;
-       sa_family_t family;
+       sa_family_t family = PF_INET;
  
        if (sctp_get_pf_specific(PF_INET6))
                family = PF_INET6;
-       else
-               family = PF_INET;
  
        err = inet_ctl_sock_create(&sctp_ctl_sock, family,
                                   SOCK_SEQPACKET, IPPROTO_SCTP, &init_net);
+       /* If IPv6 socket could not be created, try the IPv4 socket */
+       if (err < 0 && family == PF_INET6)
+               err = inet_ctl_sock_create(&sctp_ctl_sock, AF_INET,
+                                          SOCK_SEQPACKET, IPPROTO_SCTP,
+                                          &init_net);
        if (err < 0) {
                printk(KERN_ERR
                       "SCTP: Failed to create the SCTP control socket.\n");
@@@ -1297,9 -1327,8 +1302,8 @@@ SCTP_STATIC __init int sctp_init(void
  out:
        return status;
  err_v6_add_protocol:
-       sctp_v6_del_protocol();
- err_add_protocol:
        sctp_v4_del_protocol();
+ err_add_protocol:
        inet_ctl_sock_destroy(sctp_ctl_sock);
  err_ctl_sock_init:
        sctp_v6_protosw_exit();
@@@ -1310,7 -1339,6 +1314,6 @@@ err_protosw_init
        sctp_v4_pf_exit();
        sctp_v6_pf_exit();
        sctp_sysctl_unregister();
-       list_del(&sctp_af_inet.list);
        free_pages((unsigned long)sctp_port_hashtable,
                   get_order(sctp_port_hashsize *
                             sizeof(struct sctp_bind_hashbucket)));
@@@ -1358,7 -1386,6 +1361,6 @@@ SCTP_STATIC __exit void sctp_exit(void
        sctp_v4_pf_exit();
  
        sctp_sysctl_unregister();
-       list_del(&sctp_af_inet.list);
  
        free_pages((unsigned long)sctp_assoc_hashtable,
                   get_order(sctp_assoc_hashsize *
@@@ -1386,6 -1413,4 +1388,6 @@@ MODULE_ALIAS("net-pf-" __stringify(PF_I
  MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-132");
  MODULE_AUTHOR("Linux Kernel SCTP developers <lksctp-developers@lists.sourceforge.net>");
  MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)");
 +module_param_named(no_checksums, sctp_checksum_disable, bool, 0644);
 +MODULE_PARM_DESC(no_checksums, "Disable checksums computing and verification");
  MODULE_LICENSE("GPL");
diff --combined net/sctp/sm_sideeffect.c
@@@ -434,8 -434,7 +434,8 @@@ sctp_timer_event_t *sctp_timer_events[S
   *
   */
  static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
 -                                       struct sctp_transport *transport)
 +                                       struct sctp_transport *transport,
 +                                       int is_hb)
  {
        /* The check for association's overall error counter exceeding the
         * threshold is done in the state function.
         * expires, set RTO <- RTO * 2 ("back off the timer").  The
         * maximum value discussed in rule C7 above (RTO.max) may be
         * used to provide an upper bound to this doubling operation.
 +       *
 +       * Special Case:  the first HB doesn't trigger exponential backoff.
 +       * The first unacknowleged HB triggers it.  We do this with a flag
 +       * that indicates that we have an outstanding HB.
         */
 -      transport->last_rto = transport->rto;
 -      transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
 +      if (!is_hb || transport->hb_sent) {
 +              transport->last_rto = transport->rto;
 +              transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
 +      }
  }
  
  /* Worker routine to handle INIT command failure.  */
@@@ -628,11 -621,6 +628,11 @@@ static void sctp_cmd_transport_on(sctp_
        t->error_count = 0;
        t->asoc->overall_error_count = 0;
  
 +      /* Clear the hb_sent flag to signal that we had a good
 +       * acknowledgement.
 +       */
 +      t->hb_sent = 0;
 +
        /* Mark the destination transport address as active if it is not so
         * marked.
         */
                sctp_transport_hold(t);
  }
  
 -/* Helper function to do a transport reset at the expiry of the hearbeat
 - * timer.
 - */
 -static void sctp_cmd_transport_reset(sctp_cmd_seq_t *cmds,
 -                                   struct sctp_association *asoc,
 -                                   struct sctp_transport *t)
 -{
 -      sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE);
 -
 -      /* Mark one strike against a transport.  */
 -      sctp_do_8_2_transport_strike(asoc, t);
 -}
  
  /* Helper function to process the process SACK command.  */
  static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
@@@ -787,36 -787,48 +787,48 @@@ static void sctp_cmd_process_operr(sctp
                                   struct sctp_association *asoc,
                                   struct sctp_chunk *chunk)
  {
-       struct sctp_operr_chunk *operr_chunk;
        struct sctp_errhdr *err_hdr;
-       operr_chunk = (struct sctp_operr_chunk *)chunk->chunk_hdr;
-       err_hdr = &operr_chunk->err_hdr;
-       switch (err_hdr->cause) {
-       case SCTP_ERROR_UNKNOWN_CHUNK:
-       {
-               struct sctp_chunkhdr *unk_chunk_hdr;
-               unk_chunk_hdr = (struct sctp_chunkhdr *)err_hdr->variable;
-               switch (unk_chunk_hdr->type) {
-               /* ADDIP 4.1 A9) If the peer responds to an ASCONF with an
-                * ERROR chunk reporting that it did not recognized the ASCONF
-                * chunk type, the sender of the ASCONF MUST NOT send any
-                * further ASCONF chunks and MUST stop its T-4 timer.
-                */
-               case SCTP_CID_ASCONF:
-                       asoc->peer.asconf_capable = 0;
-                       sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
+       struct sctp_ulpevent *ev;
+       while (chunk->chunk_end > chunk->skb->data) {
+               err_hdr = (struct sctp_errhdr *)(chunk->skb->data);
+               ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
+                                                    GFP_ATOMIC);
+               if (!ev)
+                       return;
+               sctp_ulpq_tail_event(&asoc->ulpq, ev);
+               switch (err_hdr->cause) {
+               case SCTP_ERROR_UNKNOWN_CHUNK:
+               {
+                       sctp_chunkhdr_t *unk_chunk_hdr;
+                       unk_chunk_hdr = (sctp_chunkhdr_t *)err_hdr->variable;
+                       switch (unk_chunk_hdr->type) {
+                       /* ADDIP 4.1 A9) If the peer responds to an ASCONF with
+                        * an ERROR chunk reporting that it did not recognized
+                        * the ASCONF chunk type, the sender of the ASCONF MUST
+                        * NOT send any further ASCONF chunks and MUST stop its
+                        * T-4 timer.
+                        */
+                       case SCTP_CID_ASCONF:
+                               if (asoc->peer.asconf_capable == 0)
+                                       break;
+                               asoc->peer.asconf_capable = 0;
+                               sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
                                        SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
+                               break;
+                       default:
+                               break;
+                       }
                        break;
+               }
                default:
                        break;
                }
-               break;
-       }
-       default:
-               break;
        }
  }
  
@@@ -1446,19 -1458,12 +1458,19 @@@ static int sctp_cmd_interpreter(sctp_ev
  
                case SCTP_CMD_STRIKE:
                        /* Mark one strike against a transport.  */
 -                      sctp_do_8_2_transport_strike(asoc, cmd->obj.transport);
 +                      sctp_do_8_2_transport_strike(asoc, cmd->obj.transport,
 +                                                  0);
 +                      break;
 +
 +              case SCTP_CMD_TRANSPORT_IDLE:
 +                      t = cmd->obj.transport;
 +                      sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE);
                        break;
  
 -              case SCTP_CMD_TRANSPORT_RESET:
 +              case SCTP_CMD_TRANSPORT_HB_SENT:
                        t = cmd->obj.transport;
 -                      sctp_cmd_transport_reset(commands, asoc, t);
 +                      sctp_do_8_2_transport_strike(asoc, t, 1);
 +                      t->hb_sent = 1;
                        break;
  
                case SCTP_CMD_TRANSPORT_ON:
diff --combined net/sctp/sm_statefuns.c
@@@ -988,9 -988,7 +988,9 @@@ sctp_disposition_t sctp_sf_sendbeat_8_3
                /* Set transport error counter and association error counter
                 * when sending heartbeat.
                 */
 -              sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_RESET,
 +              sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_IDLE,
 +                              SCTP_TRANSPORT(transport));
 +              sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT,
                                SCTP_TRANSPORT(transport));
        }
        sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE,
@@@ -3165,7 -3163,6 +3165,6 @@@ sctp_disposition_t sctp_sf_operr_notify
                                        sctp_cmd_seq_t *commands)
  {
        struct sctp_chunk *chunk = arg;
-       struct sctp_ulpevent *ev;
  
        if (!sctp_vtag_verify(chunk, asoc))
                return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
                return sctp_sf_violation_chunklen(ep, asoc, type, arg,
                                                  commands);
  
-       while (chunk->chunk_end > chunk->skb->data) {
-               ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
-                                                    GFP_ATOMIC);
-               if (!ev)
-                       goto nomem;
+       sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
+                       SCTP_CHUNK(chunk));
  
-               sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
-                               SCTP_ULPEVENT(ev));
-               sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
-                               SCTP_CHUNK(chunk));
-       }
        return SCTP_DISPOSITION_CONSUME;
- nomem:
-       return SCTP_DISPOSITION_NOMEM;
  }
  
  /*
@@@ -4969,7 -4955,7 +4957,7 @@@ sctp_disposition_t sctp_sf_do_prm_reque
         *    to that address and not acknowledged within one RTO.
         *
         */
 -      sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_RESET,
 +      sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT,
                        SCTP_TRANSPORT(arg));
        return SCTP_DISPOSITION_CONSUME;
  }