Merge branch 'fixes' of http://ftp.arm.linux.org.uk/pub/linux/arm/kernel/git-cur...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 26 Sep 2011 20:26:30 +0000 (13:26 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 26 Sep 2011 20:26:30 +0000 (13:26 -0700)
* 'fixes' of http://ftp.arm.linux.org.uk/pub/linux/arm/kernel/git-cur/linux-2.6-arm:
  ARM: 7099/1: futex: preserve oldval in SMP __futex_atomic_op
  ARM: dma-mapping: free allocated page if unable to map
  ARM: fix vmlinux.lds.S discarding sections
  ARM: nommu: fix warning with checksyscalls.sh
  ARM: 7091/1: errata: D-cache line maintenance operation by MVA may not succeed

212 files changed:
Documentation/networking/dmfe.txt
Documentation/vm/transhuge.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/tegra-harmony.dts
arch/arm/boot/dts/tegra-seaboard.dts
arch/arm/mach-dove/common.c
arch/arm/mach-exynos4/clock.c
arch/arm/mach-exynos4/mct.c
arch/arm/mach-exynos4/platsmp.c
arch/arm/mach-exynos4/setup-keypad.c
arch/arm/mach-integrator/integrator_ap.c
arch/arm/mach-integrator/pci_v3.c
arch/arm/mach-s3c64xx/mach-smdk6410.c
arch/arm/plat-samsung/clock.c
arch/arm/plat-samsung/include/plat/clock.h
arch/arm/plat-samsung/include/plat/watchdog-reset.h
arch/s390/include/asm/pgtable.h
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/entry64.S
arch/s390/kvm/kvm-s390.c
arch/s390/mm/pgtable.c
block/blk-cgroup.c
block/blk-core.c
block/blk-softirq.c
block/blk-sysfs.c
block/cfq-iosched.c
drivers/block/floppy.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkback/xenbus.c
drivers/bluetooth/btusb.c
drivers/bluetooth/btwilink.c
drivers/char/tpm/Kconfig
drivers/char/tpm/tpm.c
drivers/char/tpm/tpm_nsc.c
drivers/firewire/ohci.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r200.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/iommu/dmar.c
drivers/mfd/max8997.c
drivers/mfd/omap-usb-host.c
drivers/mfd/tps65910-irq.c
drivers/mfd/twl4030-madc.c
drivers/mfd/wm8350-gpio.c
drivers/mmc/card/block.c
drivers/net/Kconfig
drivers/net/bnx2x/bnx2x.h
drivers/net/bnx2x/bnx2x_cmn.c
drivers/net/bnx2x/bnx2x_ethtool.c
drivers/net/bnx2x/bnx2x_link.c
drivers/net/bnx2x/bnx2x_main.c
drivers/net/bnx2x/bnx2x_reg.h
drivers/net/bnx2x/bnx2x_stats.c
drivers/net/can/ti_hecc.c
drivers/net/e1000/e1000_hw.c
drivers/net/gianfar_ethtool.c
drivers/net/greth.c
drivers/net/greth.h
drivers/net/ibmveth.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/netconsole.c
drivers/net/pch_gbe/pch_gbe.h
drivers/net/pch_gbe/pch_gbe_main.c
drivers/net/ppp_generic.c
drivers/net/pxa168_eth.c
drivers/net/r8169.c
drivers/net/sfc/efx.c
drivers/net/sfc/io.h
drivers/net/sfc/mcdi.c
drivers/net/sfc/nic.c
drivers/net/sfc/nic.h
drivers/net/sfc/siena.c
drivers/net/sfc/workarounds.h
drivers/net/tg3.c
drivers/net/usb/ipheth.c
drivers/net/wireless/ath/ath9k/ar9002_calib.c
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/iwlegacy/iwl-3945-rs.c
drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
drivers/scsi/Kconfig
drivers/scsi/qla2xxx/qla_isr.c
drivers/spi/spi-fsl-spi.c
drivers/spi/spi-imx.c
drivers/staging/comedi/drivers/ni_labpc.c
drivers/staging/zcache/zcache-main.c
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/target_core_cdb.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tcm_fc.h
drivers/target/tcm_fc/tfc_cmd.c
drivers/target/tcm_fc/tfc_conf.c
drivers/target/tcm_fc/tfc_io.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-ring.c
drivers/watchdog/hpwdt.c
drivers/watchdog/lantiq_wdt.c
drivers/watchdog/sbc_epx_c3.c
drivers/watchdog/watchdog_dev.c
drivers/zorro/zorro.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/cifs/cifsencrypt.c
fs/cifs/cifsfs.c
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/ext3/inode.c
fs/ext3/namei.c
fs/ext4/inode.c
fs/ext4/namei.c
fs/gfs2/log.c
fs/gfs2/meta_io.c
fs/gfs2/ops_fstype.c
fs/gfs2/quota.c
fs/proc/task_mmu.c
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/fs.h
include/linux/kvm.h
include/linux/mfd/wm8994/pdata.h
include/linux/skbuff.h
include/linux/snmp.h
include/net/flow.h
include/net/request_sock.h
include/net/sctp/command.h
include/net/tcp.h
include/net/transp_v6.h
init/main.c
kernel/irq/chip.c
kernel/ptrace.c
kernel/taskstats.c
kernel/tsacct.c
lib/xz/xz_dec_bcj.c
mm/backing-dev.c
mm/slub.c
net/bluetooth/hci_event.c
net/bridge/netfilter/Kconfig
net/caif/caif_dev.c
net/can/af_can.c
net/core/dev.c
net/core/fib_rules.c
net/core/flow.c
net/core/skbuff.c
net/ethernet/eth.c
net/ipv4/af_inet.c
net/ipv4/fib_semantics.c
net/ipv4/netfilter/ip_queue.c
net/ipv4/proc.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv6/addrconf.c
net/ipv6/datagram.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ipv6_sockglue.c
net/ipv6/netfilter/ip6_queue.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/irda/irsysctl.c
net/irda/qos.c
net/mac80211/sta_info.c
net/netfilter/nf_conntrack_pptp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nfnetlink_queue.c
net/netfilter/xt_rateest.c
net/sched/cls_rsvp.h
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/wireless/reg.c
net/wireless/sme.c
net/xfrm/xfrm_input.c
sound/pci/fm801.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/soc/blackfin/bf5xx-ad73311.c
sound/soc/codecs/wm8962.c
sound/soc/soc-core.c
sound/usb/card.c
tools/perf/Makefile
tools/perf/builtin-record.c
tools/perf/builtin-test.c
tools/perf/builtin-top.c
tools/perf/util/event.c
tools/perf/util/event.h
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/probe-finder.c
tools/perf/util/python.c
tools/perf/util/session.h
tools/perf/util/sort.c
tools/perf/util/symbol.c

index 8006c22..25320bf 100644 (file)
@@ -1,3 +1,5 @@
+Note: This driver doesn't have a maintainer.
+
 Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver for Linux.
 
 This program is free software; you can redistribute it and/or
@@ -55,7 +57,6 @@ Test and make sure PCI latency is now correct for all cases.
 Authors:
 
 Sten Wang <sten_wang@davicom.com.tw >   : Original Author
-Tobias Ringstrom <tori@unhappy.mine.nu> : Current Maintainer
 
 Contributors:
 
index 0924aac..29bdf62 100644 (file)
@@ -123,10 +123,11 @@ be automatically shutdown if it's set to "never".
 khugepaged runs usually at low frequency so while one may not want to
 invoke defrag algorithms synchronously during the page faults, it
 should be worth invoking defrag at least in khugepaged. However it's
-also possible to disable defrag in khugepaged:
+also possible to disable defrag in khugepaged by writing 0 or enable
+defrag in khugepaged by writing 1:
 
-echo yes >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
-echo no >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
+echo 0 >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
+echo 1 >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
 
 You can also control how many pages khugepaged should scan at each
 pass:
index 0acf9ab..ae8820e 100644 (file)
@@ -1278,7 +1278,6 @@ F:        drivers/input/misc/ati_remote2.c
 ATLX ETHERNET DRIVERS
 M:     Jay Cliburn <jcliburn@gmail.com>
 M:     Chris Snook <chris.snook@gmail.com>
-M:     Jie Yang <jie.yang@atheros.com>
 L:     netdev@vger.kernel.org
 W:     http://sourceforge.net/projects/atl1
 W:     http://atl1.sourceforge.net
@@ -1574,7 +1573,6 @@ F:        drivers/scsi/bfa/
 
 BROCADE BNA 10 GIGABIT ETHERNET DRIVER
 M:     Rasesh Mody <rmody@brocade.com>
-M:     Debashis Dutt <ddutt@brocade.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/bna/
@@ -1758,7 +1756,6 @@ F:        Documentation/zh_CN/
 
 CISCO VIC ETHERNET NIC DRIVER
 M:     Christian Benvenuti <benve@cisco.com>
-M:     Vasanthy Kolluri <vkolluri@cisco.com>
 M:     Roopa Prabhu <roprabhu@cisco.com>
 M:     David Wang <dwang2@cisco.com>
 S:     Supported
@@ -4415,7 +4412,8 @@ L:        netfilter@vger.kernel.org
 L:     coreteam@netfilter.org
 W:     http://www.netfilter.org/
 W:     http://www.iptables.org/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git
 S:     Supported
 F:     include/linux/netfilter*
 F:     include/linux/netfilter/
index 522fa47..733dcba 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 1
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = "Divemaster Edition"
 
 # *DOCUMENTATION*
index 4c05334..e581866 100644 (file)
        };
 
        sdhci@c8000200 {
-               gpios = <&gpio 69 0>, /* cd, gpio PI5 */
-                       <&gpio 57 0>, /* wp, gpio PH1 */
-                       <&gpio 155 0>; /* power, gpio PT3 */
+               cd-gpios = <&gpio 69 0>; /* gpio PI5 */
+               wp-gpios = <&gpio 57 0>; /* gpio PH1 */
+               power-gpios = <&gpio 155 0>; /* gpio PT3 */
        };
 
        sdhci@c8000600 {
-               gpios = <&gpio 58 0>, /* cd, gpio PH2 */
-                       <&gpio 59 0>, /* wp, gpio PH3 */
-                       <&gpio 70 0>; /* power, gpio PI6 */
+               cd-gpios = <&gpio 58 0>; /* gpio PH2 */
+               wp-gpios = <&gpio 59 0>; /* gpio PH3 */
+               power-gpios = <&gpio 70 0>; /* gpio PI6 */
        };
 };
index 1940cae..64cedca 100644 (file)
@@ -21,8 +21,8 @@
        };
 
        sdhci@c8000400 {
-               gpios = <&gpio 69 0>, /* cd, gpio PI5 */
-                       <&gpio 57 0>, /* wp, gpio PH1 */
-                       <&gpio 70 0>; /* power, gpio PI6 */
+               cd-gpios = <&gpio 69 0>; /* gpio PI5 */
+               wp-gpios = <&gpio 57 0>; /* gpio PH1 */
+               power-gpios = <&gpio 70 0>; /* gpio PI6 */
        };
 };
index 83dce85..a9e0dae 100644 (file)
@@ -158,7 +158,7 @@ void __init dove_spi0_init(void)
 
 void __init dove_spi1_init(void)
 {
-       orion_spi_init(DOVE_SPI1_PHYS_BASE, get_tclk());
+       orion_spi_1_init(DOVE_SPI1_PHYS_BASE, get_tclk());
 }
 
 /*****************************************************************************
index 1561b03..79d6cd0 100644 (file)
@@ -1160,7 +1160,7 @@ void __init_or_cpufreq exynos4_setup_clocks(void)
 
        vpllsrc = clk_get_rate(&clk_vpllsrc.clk);
        vpll = s5p_get_pll46xx(vpllsrc, __raw_readl(S5P_VPLL_CON0),
-                               __raw_readl(S5P_VPLL_CON1), pll_4650);
+                               __raw_readl(S5P_VPLL_CON1), pll_4650c);
 
        clk_fout_apll.ops = &exynos4_fout_apll_ops;
        clk_fout_mpll.rate = mpll;
index 1ae059b..ddd8686 100644 (file)
@@ -132,12 +132,18 @@ static cycle_t exynos4_frc_read(struct clocksource *cs)
        return ((cycle_t)hi << 32) | lo;
 }
 
+static void exynos4_frc_resume(struct clocksource *cs)
+{
+       exynos4_mct_frc_start(0, 0);
+}
+
 struct clocksource mct_frc = {
        .name           = "mct-frc",
        .rating         = 400,
        .read           = exynos4_frc_read,
        .mask           = CLOCKSOURCE_MASK(64),
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
+       .resume         = exynos4_frc_resume,
 };
 
 static void __init exynos4_clocksource_init(void)
@@ -389,9 +395,11 @@ static void exynos4_mct_tick_init(struct clock_event_device *evt)
 }
 
 /* Setup the local clock events for a CPU */
-void __cpuinit local_timer_setup(struct clock_event_device *evt)
+int __cpuinit local_timer_setup(struct clock_event_device *evt)
 {
        exynos4_mct_tick_init(evt);
+
+       return 0;
 }
 
 int local_timer_ack(void)
index 7c2282c..df6ef1b 100644 (file)
@@ -106,6 +106,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
         */
        spin_lock(&boot_lock);
        spin_unlock(&boot_lock);
+
+       set_cpu_online(cpu, true);
 }
 
 int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
index 1ee0ebf..7862bfb 100644 (file)
@@ -19,15 +19,16 @@ void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols)
 
        if (rows > 8) {
                /* Set all the necessary GPX2 pins: KP_ROW[0~7] */
-               s3c_gpio_cfgrange_nopull(EXYNOS4_GPX2(0), 8, S3C_GPIO_SFN(3));
+               s3c_gpio_cfgall_range(EXYNOS4_GPX2(0), 8, S3C_GPIO_SFN(3),
+                                       S3C_GPIO_PULL_UP);
 
                /* Set all the necessary GPX3 pins: KP_ROW[8~] */
-               s3c_gpio_cfgrange_nopull(EXYNOS4_GPX3(0), (rows - 8),
-                                        S3C_GPIO_SFN(3));
+               s3c_gpio_cfgall_range(EXYNOS4_GPX3(0), (rows - 8),
+                                        S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP);
        } else {
                /* Set all the necessary GPX2 pins: KP_ROW[x] */
-               s3c_gpio_cfgrange_nopull(EXYNOS4_GPX2(0), rows,
-                                        S3C_GPIO_SFN(3));
+               s3c_gpio_cfgall_range(EXYNOS4_GPX2(0), rows, S3C_GPIO_SFN(3),
+                                       S3C_GPIO_PULL_UP);
        }
 
        /* Set all the necessary GPX1 pins to special-function 3: KP_COL[x] */
index fcf0ae9..8cdc730 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/mtd/physmap.h>
+#include <video/vga.h>
 
 #include <mach/hardware.h>
 #include <mach/platform.h>
@@ -154,6 +155,7 @@ static struct map_desc ap_io_desc[] __initdata = {
 static void __init ap_map_io(void)
 {
        iotable_init(ap_io_desc, ARRAY_SIZE(ap_io_desc));
+       vga_base = PCI_MEMORY_VADDR;
 }
 
 #define INTEGRATOR_SC_VALID_INT        0x003fffff
index dd56bfb..11b86e5 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/spinlock.h>
 #include <linux/init.h>
 #include <linux/io.h>
-#include <video/vga.h>
 
 #include <mach/hardware.h>
 #include <mach/platform.h>
@@ -505,7 +504,6 @@ void __init pci_v3_preinit(void)
 
        pcibios_min_io = 0x6000;
        pcibios_min_mem = 0x00100000;
-       vga_base = PCI_MEMORY_VADDR;
 
        /*
         * Hook in our fault handler for PCI errors
index ecbea92..a9f3183 100644 (file)
@@ -262,45 +262,6 @@ static struct samsung_keypad_platdata smdk6410_keypad_data __initdata = {
        .cols           = 8,
 };
 
-static int smdk6410_backlight_init(struct device *dev)
-{
-       int ret;
-
-       ret = gpio_request(S3C64XX_GPF(15), "Backlight");
-       if (ret) {
-               printk(KERN_ERR "failed to request GPF for PWM-OUT1\n");
-               return ret;
-       }
-
-       /* Configure GPIO pin with S3C64XX_GPF15_PWM_TOUT1 */
-       s3c_gpio_cfgpin(S3C64XX_GPF(15), S3C_GPIO_SFN(2));
-
-       return 0;
-}
-
-static void smdk6410_backlight_exit(struct device *dev)
-{
-       s3c_gpio_cfgpin(S3C64XX_GPF(15), S3C_GPIO_OUTPUT);
-       gpio_free(S3C64XX_GPF(15));
-}
-
-static struct platform_pwm_backlight_data smdk6410_backlight_data = {
-       .pwm_id         = 1,
-       .max_brightness = 255,
-       .dft_brightness = 255,
-       .pwm_period_ns  = 78770,
-       .init           = smdk6410_backlight_init,
-       .exit           = smdk6410_backlight_exit,
-};
-
-static struct platform_device smdk6410_backlight_device = {
-       .name           = "pwm-backlight",
-       .dev            = {
-               .parent         = &s3c_device_timer[1].dev,
-               .platform_data  = &smdk6410_backlight_data,
-       },
-};
-
 static struct map_desc smdk6410_iodesc[] = {};
 
 static struct platform_device *smdk6410_devices[] __initdata = {
index 302c426..3b44519 100644 (file)
@@ -64,6 +64,17 @@ static LIST_HEAD(clocks);
  */
 DEFINE_SPINLOCK(clocks_lock);
 
+/* Global watchdog clock used by arch_wtd_reset() callback */
+struct clk *s3c2410_wdtclk;
+static int __init s3c_wdt_reset_init(void)
+{
+       s3c2410_wdtclk = clk_get(NULL, "watchdog");
+       if (IS_ERR(s3c2410_wdtclk))
+               printk(KERN_WARNING "%s: warning: cannot get watchdog clock\n", __func__);
+       return 0;
+}
+arch_initcall(s3c_wdt_reset_init);
+
 /* enable and disable calls for use with the clk struct */
 
 static int clk_null_enable(struct clk *clk, int enable)
index 87d5b38..73c66d4 100644 (file)
@@ -9,6 +9,9 @@
  * published by the Free Software Foundation.
 */
 
+#ifndef __ASM_PLAT_CLOCK_H
+#define __ASM_PLAT_CLOCK_H __FILE__
+
 #include <linux/spinlock.h>
 #include <linux/clkdev.h>
 
@@ -121,3 +124,8 @@ extern int s3c64xx_sclk_ctrl(struct clk *clk, int enable);
 
 extern void s3c_pwmclk_init(void);
 
+/* Global watchdog clock used by arch_wtd_reset() callback */
+
+extern struct clk *s3c2410_wdtclk;
+
+#endif /* __ASM_PLAT_CLOCK_H */
index 54b762a..40dbb2b 100644 (file)
@@ -10,6 +10,7 @@
  * published by the Free Software Foundation.
 */
 
+#include <plat/clock.h>
 #include <plat/regs-watchdog.h>
 #include <mach/map.h>
 
 
 static inline void arch_wdt_reset(void)
 {
-       struct clk *wdtclk;
-
        printk("arch_reset: attempting watchdog reset\n");
 
        __raw_writel(0, S3C2410_WTCON);   /* disable watchdog, to be safe  */
 
-       wdtclk = clk_get(NULL, "watchdog");
-       if (!IS_ERR(wdtclk)) {
-               clk_enable(wdtclk);
-       } else
-               printk(KERN_WARNING "%s: warning: cannot get watchdog clock\n", __func__);
+       if (s3c2410_wdtclk)
+               clk_enable(s3c2410_wdtclk);
 
        /* put initial values into count and data */
        __raw_writel(0x80, S3C2410_WTCNT);
index 519eb5f..c0cb794 100644 (file)
@@ -658,12 +658,14 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste)
  * struct gmap_struct - guest address space
  * @mm: pointer to the parent mm_struct
  * @table: pointer to the page directory
+ * @asce: address space control element for gmap page table
  * @crst_list: list of all crst tables used in the guest address space
  */
 struct gmap {
        struct list_head list;
        struct mm_struct *mm;
        unsigned long *table;
+       unsigned long asce;
        struct list_head crst_list;
 };
 
index 532fd43..2b45591 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/sched.h>
 #include <asm/vdso.h>
 #include <asm/sigp.h>
+#include <asm/pgtable.h>
 
 /*
  * Make sure that the compiler is new enough. We want a compiler that
@@ -126,6 +127,7 @@ int main(void)
        DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
        DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
        DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
+       DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
        DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
        DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
        DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
@@ -151,6 +153,7 @@ int main(void)
        DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
        DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
        DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp));
+       DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
 #endif /* CONFIG_32BIT */
        return 0;
 }
index 5f729d6..713da07 100644 (file)
@@ -1076,6 +1076,11 @@ sie_loop:
        lg      %r14,__LC_THREAD_INFO           # pointer thread_info struct
        tm      __TI_flags+7(%r14),_TIF_EXIT_SIE
        jnz     sie_exit
+       lg      %r14,__LC_GMAP                  # get gmap pointer
+       ltgr    %r14,%r14
+       jz      sie_gmap
+       lctlg   %c1,%c1,__GMAP_ASCE(%r14)       # load primary asce
+sie_gmap:
        lg      %r14,__SF_EMPTY(%r15)           # get control block pointer
        SPP     __SF_EMPTY(%r15)                # set guest id
        sie     0(%r14)
@@ -1083,6 +1088,7 @@ sie_done:
        SPP     __LC_CMF_HPP                    # set host id
        lg      %r14,__LC_THREAD_INFO           # pointer thread_info struct
 sie_exit:
+       lctlg   %c1,%c1,__LC_USER_ASCE          # load primary asce
        ni      __TI_flags+6(%r14),255-(_TIF_SIE>>8)
        lg      %r14,__SF_EMPTY+8(%r15)         # load guest register save area
        stmg    %r0,%r13,0(%r14)                # save guest gprs 0-13
index f17296e..dc2b580 100644 (file)
@@ -123,6 +123,7 @@ int kvm_dev_ioctl_check_extension(long ext)
 
        switch (ext) {
        case KVM_CAP_S390_PSW:
+       case KVM_CAP_S390_GMAP:
                r = 1;
                break;
        default:
@@ -263,10 +264,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
        restore_fp_regs(&vcpu->arch.guest_fpregs);
        restore_access_regs(vcpu->arch.guest_acrs);
+       gmap_enable(vcpu->arch.gmap);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
+       gmap_disable(vcpu->arch.gmap);
        save_fp_regs(&vcpu->arch.guest_fpregs);
        save_access_regs(vcpu->arch.guest_acrs);
        restore_fp_regs(&vcpu->arch.host_fpregs);
@@ -461,7 +464,6 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
        local_irq_disable();
        kvm_guest_enter();
        local_irq_enable();
-       gmap_enable(vcpu->arch.gmap);
        VCPU_EVENT(vcpu, 6, "entering sie flags %x",
                   atomic_read(&vcpu->arch.sie_block->cpuflags));
        if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
@@ -470,7 +472,6 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
        }
        VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
                   vcpu->arch.sie_block->icptcode);
-       gmap_disable(vcpu->arch.gmap);
        local_irq_disable();
        kvm_guest_exit();
        local_irq_enable();
index 4d1f2bc..f69ff3c 100644 (file)
@@ -160,6 +160,8 @@ struct gmap *gmap_alloc(struct mm_struct *mm)
        table = (unsigned long *) page_to_phys(page);
        crst_table_init(table, _REGION1_ENTRY_EMPTY);
        gmap->table = table;
+       gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
+                    _ASCE_USER_BITS | __pa(table);
        list_add(&gmap->list, &mm->context.gmap_list);
        return gmap;
 
@@ -240,10 +242,6 @@ EXPORT_SYMBOL_GPL(gmap_free);
  */
 void gmap_enable(struct gmap *gmap)
 {
-       /* Load primary space page table origin. */
-       S390_lowcore.user_asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
-                                _ASCE_USER_BITS | __pa(gmap->table);
-       asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) );
        S390_lowcore.gmap = (unsigned long) gmap;
 }
 EXPORT_SYMBOL_GPL(gmap_enable);
@@ -254,10 +252,6 @@ EXPORT_SYMBOL_GPL(gmap_enable);
  */
 void gmap_disable(struct gmap *gmap)
 {
-       /* Load primary space page table origin. */
-       S390_lowcore.user_asce =
-               gmap->mm->context.asce_bits | __pa(gmap->mm->pgd);
-       asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) );
        S390_lowcore.gmap = 0UL;
 }
 EXPORT_SYMBOL_GPL(gmap_disable);
index bcaf16e..b596e54 100644 (file)
@@ -785,10 +785,10 @@ static int blkio_policy_parse_and_set(char *buf,
 {
        char *s[4], *p, *major_s = NULL, *minor_s = NULL;
        int ret;
-       unsigned long major, minor, temp;
+       unsigned long major, minor;
        int i = 0;
        dev_t dev;
-       u64 bps, iops;
+       u64 temp;
 
        memset(s, 0, sizeof(s));
 
@@ -826,20 +826,23 @@ static int blkio_policy_parse_and_set(char *buf,
 
        dev = MKDEV(major, minor);
 
-       ret = blkio_check_dev_num(dev);
+       ret = strict_strtoull(s[1], 10, &temp);
        if (ret)
-               return ret;
+               return -EINVAL;
 
-       newpn->dev = dev;
+       /* For rule removal, do not check for device presence. */
+       if (temp) {
+               ret = blkio_check_dev_num(dev);
+               if (ret)
+                       return ret;
+       }
 
-       if (s[1] == NULL)
-               return -EINVAL;
+       newpn->dev = dev;
 
        switch (plid) {
        case BLKIO_POLICY_PROP:
-               ret = strict_strtoul(s[1], 10, &temp);
-               if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
-                       temp > BLKIO_WEIGHT_MAX)
+               if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
+                    temp > BLKIO_WEIGHT_MAX)
                        return -EINVAL;
 
                newpn->plid = plid;
@@ -850,26 +853,18 @@ static int blkio_policy_parse_and_set(char *buf,
                switch(fileid) {
                case BLKIO_THROTL_read_bps_device:
                case BLKIO_THROTL_write_bps_device:
-                       ret = strict_strtoull(s[1], 10, &bps);
-                       if (ret)
-                               return -EINVAL;
-
                        newpn->plid = plid;
                        newpn->fileid = fileid;
-                       newpn->val.bps = bps;
+                       newpn->val.bps = temp;
                        break;
                case BLKIO_THROTL_read_iops_device:
                case BLKIO_THROTL_write_iops_device:
-                       ret = strict_strtoull(s[1], 10, &iops);
-                       if (ret)
-                               return -EINVAL;
-
-                       if (iops > THROTL_IOPS_MAX)
+                       if (temp > THROTL_IOPS_MAX)
                                return -EINVAL;
 
                        newpn->plid = plid;
                        newpn->fileid = fileid;
-                       newpn->val.iops = (unsigned int)iops;
+                       newpn->val.iops = (unsigned int)temp;
                        break;
                }
                break;
index 90e1ffd..b2ed78a 100644 (file)
@@ -1167,7 +1167,7 @@ static bool bio_attempt_front_merge(struct request_queue *q,
  * true if merge was successful, otherwise false.
  */
 static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,
-                              struct bio *bio)
+                              struct bio *bio, unsigned int *request_count)
 {
        struct blk_plug *plug;
        struct request *rq;
@@ -1176,10 +1176,13 @@ static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,
        plug = tsk->plug;
        if (!plug)
                goto out;
+       *request_count = 0;
 
        list_for_each_entry_reverse(rq, &plug->list, queuelist) {
                int el_ret;
 
+               (*request_count)++;
+
                if (rq->q != q)
                        continue;
 
@@ -1219,6 +1222,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
        struct blk_plug *plug;
        int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
        struct request *req;
+       unsigned int request_count = 0;
 
        /*
         * low level driver can indicate that it wants pages above a
@@ -1237,7 +1241,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
         * Check if we can merge with the plugged list before grabbing
         * any locks.
         */
-       if (attempt_plug_merge(current, q, bio))
+       if (attempt_plug_merge(current, q, bio, &request_count))
                goto out;
 
        spin_lock_irq(q->queue_lock);
@@ -1302,11 +1306,10 @@ get_rq:
                        if (__rq->q != q)
                                plug->should_sort = 1;
                }
+               if (request_count >= BLK_MAX_REQUEST_COUNT)
+                       blk_flush_plug_list(plug, false);
                list_add_tail(&req->queuelist, &plug->list);
-               plug->count++;
                drive_stat_acct(req, 1);
-               if (plug->count >= BLK_MAX_REQUEST_COUNT)
-                       blk_flush_plug_list(plug, false);
        } else {
                spin_lock_irq(q->queue_lock);
                add_acct_request(q, req, where);
@@ -2634,7 +2637,6 @@ void blk_start_plug(struct blk_plug *plug)
        INIT_LIST_HEAD(&plug->list);
        INIT_LIST_HEAD(&plug->cb_list);
        plug->should_sort = 0;
-       plug->count = 0;
 
        /*
         * If this is a nested plug, don't actually assign it. It will be
@@ -2718,7 +2720,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
                return;
 
        list_splice_init(&plug->list, &list);
-       plug->count = 0;
 
        if (plug->should_sort) {
                list_sort(NULL, &list, plug_rq_cmp);
index 58340d0..1366a89 100644 (file)
@@ -115,7 +115,7 @@ void __blk_complete_request(struct request *req)
        /*
         * Select completion CPU
         */
-       if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1) {
+       if (req->cpu != -1) {
                ccpu = req->cpu;
                if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) {
                        ccpu = blk_cpu_to_group(ccpu);
index 0ee17b5..e681805 100644 (file)
@@ -258,11 +258,13 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
 
        ret = queue_var_store(&val, page, count);
        spin_lock_irq(q->queue_lock);
-       if (val) {
+       if (val == 2) {
                queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
-               if (val == 2)
-                       queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
-       } else {
+               queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
+       } else if (val == 1) {
+               queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+               queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
+       } else if (val == 0) {
                queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
                queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
        }
index a33bd43..16ace89 100644 (file)
@@ -130,8 +130,8 @@ struct cfq_queue {
        unsigned long slice_end;
        long slice_resid;
 
-       /* pending metadata requests */
-       int meta_pending;
+       /* pending priority requests */
+       int prio_pending;
        /* number of requests that are on the dispatch list or inside driver */
        int dispatched;
 
@@ -684,8 +684,8 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
        if (rq_is_sync(rq1) != rq_is_sync(rq2))
                return rq_is_sync(rq1) ? rq1 : rq2;
 
-       if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_META)
-               return rq1->cmd_flags & REQ_META ? rq1 : rq2;
+       if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
+               return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
 
        s1 = blk_rq_pos(rq1);
        s2 = blk_rq_pos(rq2);
@@ -1612,9 +1612,9 @@ static void cfq_remove_request(struct request *rq)
        cfqq->cfqd->rq_queued--;
        cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
                                        rq_data_dir(rq), rq_is_sync(rq));
-       if (rq->cmd_flags & REQ_META) {
-               WARN_ON(!cfqq->meta_pending);
-               cfqq->meta_pending--;
+       if (rq->cmd_flags & REQ_PRIO) {
+               WARN_ON(!cfqq->prio_pending);
+               cfqq->prio_pending--;
        }
 }
 
@@ -3372,7 +3372,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
         * So both queues are sync. Let the new request get disk time if
         * it's a metadata request and the current queue is doing regular IO.
         */
-       if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
+       if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
                return true;
 
        /*
@@ -3439,8 +3439,8 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        struct cfq_io_context *cic = RQ_CIC(rq);
 
        cfqd->rq_queued++;
-       if (rq->cmd_flags & REQ_META)
-               cfqq->meta_pending++;
+       if (rq->cmd_flags & REQ_PRIO)
+               cfqq->prio_pending++;
 
        cfq_update_io_thinktime(cfqd, cfqq, cic);
        cfq_update_io_seektime(cfqd, cfqq, rq);
index 98de8f4..9955a53 100644 (file)
@@ -4250,7 +4250,7 @@ static int __init floppy_init(void)
        use_virtual_dma = can_use_virtual_dma & 1;
        fdc_state[0].address = FDC1;
        if (fdc_state[0].address == -1) {
-               del_timer(&fd_timeout);
+               del_timer_sync(&fd_timeout);
                err = -ENODEV;
                goto out_unreg_region;
        }
@@ -4261,7 +4261,7 @@ static int __init floppy_init(void)
        fdc = 0;                /* reset fdc in case of unexpected interrupt */
        err = floppy_grab_irq_and_dma();
        if (err) {
-               del_timer(&fd_timeout);
+               del_timer_sync(&fd_timeout);
                err = -EBUSY;
                goto out_unreg_region;
        }
@@ -4318,7 +4318,7 @@ static int __init floppy_init(void)
                user_reset_fdc(-1, FD_RESET_ALWAYS, false);
        }
        fdc = 0;
-       del_timer(&fd_timeout);
+       del_timer_sync(&fd_timeout);
        current_drive = 0;
        initialized = true;
        if (have_no_fdc) {
@@ -4368,7 +4368,7 @@ out_unreg_blkdev:
        unregister_blkdev(FLOPPY_MAJOR, "fd");
 out_put_disk:
        while (dr--) {
-               del_timer(&motor_off_timer[dr]);
+               del_timer_sync(&motor_off_timer[dr]);
                if (disks[dr]->queue)
                        blk_cleanup_queue(disks[dr]->queue);
                put_disk(disks[dr]);
index 9e40b28..00c57c9 100644 (file)
@@ -46,7 +46,7 @@
 
 #define DRV_PFX "xen-blkback:"
 #define DPRINTK(fmt, args...)                          \
-       pr_debug(DRV_PFX "(%s:%d) " fmt ".\n",  \
+       pr_debug(DRV_PFX "(%s:%d) " fmt ".\n",          \
                 __func__, __LINE__, ##args)
 
 
index 3f129b4..5fd2010 100644 (file)
@@ -590,7 +590,7 @@ static void frontend_changed(struct xenbus_device *dev,
 
                /*
                 * Enforce precondition before potential leak point.
-                * blkif_disconnect() is idempotent.
+                * xen_blkif_disconnect() is idempotent.
                 */
                xen_blkif_disconnect(be->blkif);
 
@@ -601,17 +601,17 @@ static void frontend_changed(struct xenbus_device *dev,
                break;
 
        case XenbusStateClosing:
-               xen_blkif_disconnect(be->blkif);
                xenbus_switch_state(dev, XenbusStateClosing);
                break;
 
        case XenbusStateClosed:
+               xen_blkif_disconnect(be->blkif);
                xenbus_switch_state(dev, XenbusStateClosed);
                if (xenbus_dev_is_online(dev))
                        break;
                /* fall through if not online */
        case XenbusStateUnknown:
-               /* implies blkif_disconnect() via blkback_remove() */
+               /* implies xen_blkif_disconnect() via xen_blkbk_remove() */
                device_unregister(&dev->dev);
                break;
 
index 3ef4760..9cbac6b 100644 (file)
@@ -72,9 +72,15 @@ static struct usb_device_id btusb_table[] = {
        /* Apple MacBookAir3,1, MacBookAir3,2 */
        { USB_DEVICE(0x05ac, 0x821b) },
 
+       /* Apple MacBookAir4,1 */
+       { USB_DEVICE(0x05ac, 0x821f) },
+
        /* Apple MacBookPro8,2 */
        { USB_DEVICE(0x05ac, 0x821a) },
 
+       /* Apple MacMini5,1 */
+       { USB_DEVICE(0x05ac, 0x8281) },
+
        /* AVM BlueFRITZ! USB v2.0 */
        { USB_DEVICE(0x057c, 0x3800) },
 
index 65d27af..04d353f 100644 (file)
@@ -124,6 +124,13 @@ static long st_receive(void *priv_data, struct sk_buff *skb)
 /* ------- Interfaces to HCI layer ------ */
 /* protocol structure registered with shared transport */
 static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
+       {
+               .chnl_id = HCI_EVENT_PKT, /* HCI Events */
+               .hdr_len = sizeof(struct hci_event_hdr),
+               .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen),
+               .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
+               .reserve = 8,
+       },
        {
                .chnl_id = HCI_ACLDATA_PKT, /* ACL */
                .hdr_len = sizeof(struct hci_acl_hdr),
@@ -138,13 +145,6 @@ static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
                .len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */
                .reserve = 8,
        },
-       {
-               .chnl_id = HCI_EVENT_PKT, /* HCI Events */
-               .hdr_len = sizeof(struct hci_event_hdr),
-               .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen),
-               .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
-               .reserve = 8,
-       },
 };
 
 /* Called from HCI core to initialize the device */
@@ -240,7 +240,7 @@ static int ti_st_close(struct hci_dev *hdev)
        if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
                return 0;
 
-       for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
+       for (i = MAX_BT_CHNL_IDS-1; i >= 0; i--) {
                err = st_unregister(&ti_st_proto[i]);
                if (err)
                        BT_ERR("st_unregister(%d) failed with error %d",
index f6595ab..fa567f1 100644 (file)
@@ -43,6 +43,7 @@ config TCG_NSC
 
 config TCG_ATMEL
        tristate "Atmel TPM Interface"
+       depends on PPC64 || HAS_IOPORT
        ---help---
          If you have a TPM security chip from Atmel say Yes and it 
          will be accessible from within Linux.  To compile this driver 
index caf8012..9ca5c02 100644 (file)
@@ -383,6 +383,9 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
        u32 count, ordinal;
        unsigned long stop;
 
+       if (bufsiz > TPM_BUFSIZE)
+               bufsiz = TPM_BUFSIZE;
+
        count = be32_to_cpu(*((__be32 *) (buf + 2)));
        ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
        if (count == 0)
@@ -1102,6 +1105,7 @@ ssize_t tpm_read(struct file *file, char __user *buf,
 {
        struct tpm_chip *chip = file->private_data;
        ssize_t ret_size;
+       int rc;
 
        del_singleshot_timer_sync(&chip->user_read_timer);
        flush_work_sync(&chip->work);
@@ -1112,8 +1116,11 @@ ssize_t tpm_read(struct file *file, char __user *buf,
                        ret_size = size;
 
                mutex_lock(&chip->buffer_mutex);
-               if (copy_to_user(buf, chip->data_buffer, ret_size))
+               rc = copy_to_user(buf, chip->data_buffer, ret_size);
+               memset(chip->data_buffer, 0, ret_size);
+               if (rc)
                        ret_size = -EFAULT;
+
                mutex_unlock(&chip->buffer_mutex);
        }
 
index 82facc9..4d24648 100644 (file)
@@ -396,8 +396,6 @@ static void __exit cleanup_nsc(void)
        if (pdev) {
                tpm_nsc_remove(&pdev->dev);
                platform_device_unregister(pdev);
-               kfree(pdev);
-               pdev = NULL;
        }
 
        platform_driver_unregister(&nsc_drv);
index 57cd3a4..fd7170a 100644 (file)
@@ -290,6 +290,9 @@ static const struct {
        {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
                QUIRK_CYCLE_TIMER},
 
+       {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID,
+               QUIRK_NO_MSI},
+
        {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
                QUIRK_CYCLE_TIMER},
 
index dc0a5b5..e8a7467 100644 (file)
@@ -1404,7 +1404,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
        WREG32(CP_RB_RPTR_WR, 0);
-       WREG32(CP_RB_WPTR, 0);
+       rdev->cp.wptr = 0;
+       WREG32(CP_RB_WPTR, rdev->cp.wptr);
 
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB_RPTR_ADDR,
@@ -1426,7 +1427,6 @@ int evergreen_cp_resume(struct radeon_device *rdev)
        WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
 
        rdev->cp.rptr = RREG32(CP_RB_RPTR);
-       rdev->cp.wptr = RREG32(CP_RB_WPTR);
 
        evergreen_cp_start(rdev);
        rdev->cp.ready = true;
@@ -3171,21 +3171,23 @@ int evergreen_suspend(struct radeon_device *rdev)
 }
 
 int evergreen_copy_blit(struct radeon_device *rdev,
-                       uint64_t src_offset, uint64_t dst_offset,
-                       unsigned num_pages, struct radeon_fence *fence)
+                       uint64_t src_offset,
+                       uint64_t dst_offset,
+                       unsigned num_gpu_pages,
+                       struct radeon_fence *fence)
 {
        int r;
 
        mutex_lock(&rdev->r600_blit.mutex);
        rdev->r600_blit.vb_ib = NULL;
-       r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+       r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
        if (r) {
                if (rdev->r600_blit.vb_ib)
                        radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
                mutex_unlock(&rdev->r600_blit.mutex);
                return r;
        }
-       evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
+       evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
        evergreen_blit_done_copy(rdev, fence);
        mutex_unlock(&rdev->r600_blit.mutex);
        return 0;
index cbf57d7..99fbd79 100644 (file)
@@ -1187,7 +1187,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
 
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
-       WREG32(CP_RB0_WPTR, 0);
+       rdev->cp.wptr = 0;
+       WREG32(CP_RB0_WPTR, rdev->cp.wptr);
 
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1207,7 +1208,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
        WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
 
        rdev->cp.rptr = RREG32(CP_RB0_RPTR);
-       rdev->cp.wptr = RREG32(CP_RB0_WPTR);
 
        /* ring1  - compute only */
        /* Set ring buffer size */
@@ -1220,7 +1220,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
 
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
-       WREG32(CP_RB1_WPTR, 0);
+       rdev->cp1.wptr = 0;
+       WREG32(CP_RB1_WPTR, rdev->cp1.wptr);
 
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1232,7 +1233,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
        WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
 
        rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
-       rdev->cp1.wptr = RREG32(CP_RB1_WPTR);
 
        /* ring2 - compute only */
        /* Set ring buffer size */
@@ -1245,7 +1245,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
 
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
-       WREG32(CP_RB2_WPTR, 0);
+       rdev->cp2.wptr = 0;
+       WREG32(CP_RB2_WPTR, rdev->cp2.wptr);
 
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1257,7 +1258,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
        WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
 
        rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
-       rdev->cp2.wptr = RREG32(CP_RB2_WPTR);
 
        /* start the rings */
        cayman_cp_start(rdev);
index f2204cb..7fcdbbb 100644 (file)
@@ -721,11 +721,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
 int r100_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset,
                   uint64_t dst_offset,
-                  unsigned num_pages,
+                  unsigned num_gpu_pages,
                   struct radeon_fence *fence)
 {
        uint32_t cur_pages;
-       uint32_t stride_bytes = PAGE_SIZE;
+       uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
        uint32_t pitch;
        uint32_t stride_pixels;
        unsigned ndw;
@@ -737,7 +737,7 @@ int r100_copy_blit(struct radeon_device *rdev,
        /* radeon pitch is /64 */
        pitch = stride_bytes / 64;
        stride_pixels = stride_bytes / 4;
-       num_loops = DIV_ROUND_UP(num_pages, 8191);
+       num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
 
        /* Ask for enough room for blit + flush + fence */
        ndw = 64 + (10 * num_loops);
@@ -746,12 +746,12 @@ int r100_copy_blit(struct radeon_device *rdev,
                DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
                return -EINVAL;
        }
-       while (num_pages > 0) {
-               cur_pages = num_pages;
+       while (num_gpu_pages > 0) {
+               cur_pages = num_gpu_pages;
                if (cur_pages > 8191) {
                        cur_pages = 8191;
                }
-               num_pages -= cur_pages;
+               num_gpu_pages -= cur_pages;
 
                /* pages are in Y direction - height
                   page width in X direction - width */
@@ -773,8 +773,8 @@ int r100_copy_blit(struct radeon_device *rdev,
                radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
                radeon_ring_write(rdev, 0);
                radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
-               radeon_ring_write(rdev, num_pages);
-               radeon_ring_write(rdev, num_pages);
+               radeon_ring_write(rdev, num_gpu_pages);
+               radeon_ring_write(rdev, num_gpu_pages);
                radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
        }
        radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
@@ -990,7 +990,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
        /* Force read & write ptr to 0 */
        WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
        WREG32(RADEON_CP_RB_RPTR_WR, 0);
-       WREG32(RADEON_CP_RB_WPTR, 0);
+       rdev->cp.wptr = 0;
+       WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
 
        /* set the wb address whether it's enabled or not */
        WREG32(R_00070C_CP_RB_RPTR_ADDR,
@@ -1007,9 +1008,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
        WREG32(RADEON_CP_RB_CNTL, tmp);
        udelay(10);
        rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
-       rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
-       /* protect against crazy HW on resume */
-       rdev->cp.wptr &= rdev->cp.ptr_mask;
        /* Set cp mode to bus mastering & enable cp*/
        WREG32(RADEON_CP_CSQ_MODE,
               REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
index f240583..a1f3ba0 100644 (file)
@@ -84,7 +84,7 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
 int r200_copy_dma(struct radeon_device *rdev,
                  uint64_t src_offset,
                  uint64_t dst_offset,
-                 unsigned num_pages,
+                 unsigned num_gpu_pages,
                  struct radeon_fence *fence)
 {
        uint32_t size;
@@ -93,7 +93,7 @@ int r200_copy_dma(struct radeon_device *rdev,
        int r = 0;
 
        /* radeon pitch is /64 */
-       size = num_pages << PAGE_SHIFT;
+       size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
        num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
        r = radeon_ring_lock(rdev, num_loops * 4 + 64);
        if (r) {
index aa5571b..720dd99 100644 (file)
@@ -2209,7 +2209,8 @@ int r600_cp_resume(struct radeon_device *rdev)
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
        WREG32(CP_RB_RPTR_WR, 0);
-       WREG32(CP_RB_WPTR, 0);
+       rdev->cp.wptr = 0;
+       WREG32(CP_RB_WPTR, rdev->cp.wptr);
 
        /* set the wb address whether it's enabled or not */
        WREG32(CP_RB_RPTR_ADDR,
@@ -2231,7 +2232,6 @@ int r600_cp_resume(struct radeon_device *rdev)
        WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
 
        rdev->cp.rptr = RREG32(CP_RB_RPTR);
-       rdev->cp.wptr = RREG32(CP_RB_WPTR);
 
        r600_cp_start(rdev);
        rdev->cp.ready = true;
@@ -2353,21 +2353,23 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
 }
 
 int r600_copy_blit(struct radeon_device *rdev,
-                  uint64_t src_offset, uint64_t dst_offset,
-                  unsigned num_pages, struct radeon_fence *fence)
+                  uint64_t src_offset,
+                  uint64_t dst_offset,
+                  unsigned num_gpu_pages,
+                  struct radeon_fence *fence)
 {
        int r;
 
        mutex_lock(&rdev->r600_blit.mutex);
        rdev->r600_blit.vb_ib = NULL;
-       r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+       r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
        if (r) {
                if (rdev->r600_blit.vb_ib)
                        radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
                mutex_unlock(&rdev->r600_blit.mutex);
                return r;
        }
-       r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
+       r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
        r600_blit_done_copy(rdev, fence);
        mutex_unlock(&rdev->r600_blit.mutex);
        return 0;
index 32807ba..c1e056b 100644 (file)
@@ -322,6 +322,7 @@ union radeon_gart_table {
 
 #define RADEON_GPU_PAGE_SIZE 4096
 #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
+#define RADEON_GPU_PAGE_SHIFT 12
 
 struct radeon_gart {
        dma_addr_t                      table_addr;
@@ -914,17 +915,17 @@ struct radeon_asic {
        int (*copy_blit)(struct radeon_device *rdev,
                         uint64_t src_offset,
                         uint64_t dst_offset,
-                        unsigned num_pages,
+                        unsigned num_gpu_pages,
                         struct radeon_fence *fence);
        int (*copy_dma)(struct radeon_device *rdev,
                        uint64_t src_offset,
                        uint64_t dst_offset,
-                       unsigned num_pages,
+                       unsigned num_gpu_pages,
                        struct radeon_fence *fence);
        int (*copy)(struct radeon_device *rdev,
                    uint64_t src_offset,
                    uint64_t dst_offset,
-                   unsigned num_pages,
+                   unsigned num_gpu_pages,
                    struct radeon_fence *fence);
        uint32_t (*get_engine_clock)(struct radeon_device *rdev);
        void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
index 3d7a0d7..3dedaa0 100644 (file)
@@ -75,7 +75,7 @@ uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
 int r100_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset,
                   uint64_t dst_offset,
-                  unsigned num_pages,
+                  unsigned num_gpu_pages,
                   struct radeon_fence *fence);
 int r100_set_surface_reg(struct radeon_device *rdev, int reg,
                         uint32_t tiling_flags, uint32_t pitch,
@@ -143,7 +143,7 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
 extern int r200_copy_dma(struct radeon_device *rdev,
                         uint64_t src_offset,
                         uint64_t dst_offset,
-                        unsigned num_pages,
+                        unsigned num_gpu_pages,
                         struct radeon_fence *fence);
 void r200_set_safe_registers(struct radeon_device *rdev);
 
@@ -311,7 +311,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int r600_ring_test(struct radeon_device *rdev);
 int r600_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset, uint64_t dst_offset,
-                  unsigned num_pages, struct radeon_fence *fence);
+                  unsigned num_gpu_pages, struct radeon_fence *fence);
 void r600_hpd_init(struct radeon_device *rdev);
 void r600_hpd_fini(struct radeon_device *rdev);
 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -403,7 +403,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev);
 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int evergreen_copy_blit(struct radeon_device *rdev,
                        uint64_t src_offset, uint64_t dst_offset,
-                       unsigned num_pages, struct radeon_fence *fence);
+                       unsigned num_gpu_pages, struct radeon_fence *fence);
 void evergreen_hpd_init(struct radeon_device *rdev);
 void evergreen_hpd_fini(struct radeon_device *rdev);
 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
index 6cc17fb..6adb3e5 100644 (file)
@@ -473,8 +473,8 @@ pflip_cleanup:
        spin_lock_irqsave(&dev->event_lock, flags);
        radeon_crtc->unpin_work = NULL;
 unlock_free:
-       drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
        spin_unlock_irqrestore(&dev->event_lock, flags);
+       drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
        radeon_fence_unref(&work->fence);
        kfree(work);
 
index 319d85d..13690f3 100644 (file)
@@ -1507,7 +1507,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
                switch (mode) {
                case DRM_MODE_DPMS_ON:
                        args.ucAction = ATOM_ENABLE;
-                       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+                       /* workaround for DVOOutputControl on some RS690 systems */
+                       if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
+                               u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
+                               WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
+                               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+                               WREG32(RADEON_BIOS_3_SCRATCH, reg);
+                       } else
+                               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
                        if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
                                args.ucAction = ATOM_LCD_BLON;
                                atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
index 9b86fb0..0b5468b 100644 (file)
@@ -277,7 +277,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
                DRM_ERROR("Trying to move memory with CP turned off.\n");
                return -EINVAL;
        }
-       r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
+
+       BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
+
+       r = radeon_copy(rdev, old_start, new_start,
+                       new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
+                       fence);
        /* FIXME: handle copy error */
        r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
                                      evict, no_wait_reserve, no_wait_gpu, new_mem);
index a4d38d8..ef06194 100644 (file)
@@ -394,7 +394,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 
        if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
                if (bo->ttm == NULL) {
-                       ret = ttm_bo_add_ttm(bo, false);
+                       bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
+                       ret = ttm_bo_add_ttm(bo, zero);
                        if (ret)
                                goto out_err;
                }
index 3dc9bef..6dcc7e2 100644 (file)
@@ -1388,7 +1388,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
                return ret;
        }
 
-       ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
+       ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
        if (ret)
                printk(KERN_ERR "IOMMU: can't request irq\n");
        return ret;
index 5d1fca0..f83103b 100644 (file)
@@ -135,10 +135,13 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
        max8997->dev = &i2c->dev;
        max8997->i2c = i2c;
        max8997->type = id->driver_data;
+       max8997->irq = i2c->irq;
 
        if (!pdata)
                goto err;
 
+       max8997->irq_base = pdata->irq_base;
+       max8997->ono = pdata->ono;
        max8997->wakeup = pdata->wakeup;
 
        mutex_init(&max8997->iolock);
@@ -152,6 +155,8 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
 
        pm_runtime_set_active(max8997->dev);
 
+       max8997_irq_init(max8997);
+
        mfd_add_devices(max8997->dev, -1, max8997_devs,
                        ARRAY_SIZE(max8997_devs),
                        NULL, 0);
index 29601e7..86e1458 100644 (file)
@@ -17,6 +17,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
@@ -676,7 +677,6 @@ static void usbhs_omap_tll_init(struct device *dev, u8 tll_channel_count)
                                | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
                                | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
 
-                       reg |= (1 << (i + 1));
                } else
                        continue;
 
index 2bfad5c..a56be93 100644 (file)
@@ -178,8 +178,10 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,
        switch (tps65910_chip_id(tps65910)) {
        case TPS65910:
                tps65910->irq_num = TPS65910_NUM_IRQ;
+               break;
        case TPS65911:
                tps65910->irq_num = TPS65911_NUM_IRQ;
+               break;
        }
 
        /* Register with genirq */
index b5d598c..7cbf2aa 100644 (file)
@@ -510,8 +510,9 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
        u8 ch_msb, ch_lsb;
        int ret;
 
-       if (!req)
+       if (!req || !twl4030_madc)
                return -EINVAL;
+
        mutex_lock(&twl4030_madc->lock);
        if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) {
                ret = -EINVAL;
@@ -706,6 +707,8 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev)
        if (!madc)
                return -ENOMEM;
 
+       madc->dev = &pdev->dev;
+
        /*
         * Phoenix provides 2 interrupt lines. The first one is connected to
         * the OMAP. The other one can be connected to the other processor such
index ebf99be..d584f6b 100644 (file)
@@ -37,7 +37,7 @@ static int gpio_set_dir(struct wm8350 *wm8350, int gpio, int dir)
        return ret;
 }
 
-static int gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db)
+static int wm8350_gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db)
 {
        if (db == WM8350_GPIO_DEBOUNCE_ON)
                return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE,
@@ -210,7 +210,7 @@ int wm8350_gpio_config(struct wm8350 *wm8350, int gpio, int dir, int func,
                goto err;
        if (gpio_set_polarity(wm8350, gpio, pol))
                goto err;
-       if (gpio_set_debounce(wm8350, gpio, debounce))
+       if (wm8350_gpio_set_debounce(wm8350, gpio, debounce))
                goto err;
        if (gpio_set_dir(wm8350, gpio, dir))
                goto err;
index 1ff5486..4c1a648 100644 (file)
@@ -926,6 +926,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
        /*
         * Reliable writes are used to implement Forced Unit Access and
         * REQ_META accesses, and are supported only on MMCs.
+        *
+        * XXX: this really needs a good explanation of why REQ_META
+        * is treated special.
         */
        bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
                          (req->cmd_flags & REQ_META)) &&
index 8d0314d..a44874e 100644 (file)
@@ -2535,7 +2535,7 @@ config S6GMAC
 source "drivers/net/stmmac/Kconfig"
 
 config PCH_GBE
-       tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE"
+       tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
        depends on PCI
        select MII
        ---help---
@@ -2548,10 +2548,11 @@ config PCH_GBE
          This driver enables Gigabit Ethernet function.
 
          This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
-         Output Hub), ML7223.
-         ML7223 IOH is for MP(Media Phone) use.
-         ML7223 is companion chip for Intel Atom E6xx series.
-         ML7223 is completely compatible for Intel EG20T PCH.
+         Output Hub), ML7223/ML7831.
+         ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general
+         purpose use.
+         ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+         ML7223/ML7831 is completely compatible for Intel EG20T PCH.
 
 config FTGMAC100
        tristate "Faraday FTGMAC100 Gigabit Ethernet support"
index c423504..e46df53 100644 (file)
@@ -315,6 +315,14 @@ union db_prod {
        u32             raw;
 };
 
+/* dropless fc FW/HW related params */
+#define BRB_SIZE(bp)           (CHIP_IS_E3(bp) ? 1024 : 512)
+#define MAX_AGG_QS(bp)         (CHIP_IS_E1(bp) ? \
+                                       ETH_MAX_AGGREGATION_QUEUES_E1 :\
+                                       ETH_MAX_AGGREGATION_QUEUES_E1H_E2)
+#define FW_DROP_LEVEL(bp)      (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp))
+#define FW_PREFETCH_CNT                16
+#define DROPLESS_FC_HEADROOM   100
 
 /* MC hsi */
 #define BCM_PAGE_SHIFT         12
@@ -331,15 +339,35 @@ union db_prod {
 /* SGE ring related macros */
 #define NUM_RX_SGE_PAGES       2
 #define RX_SGE_CNT             (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
-#define MAX_RX_SGE_CNT         (RX_SGE_CNT - 2)
+#define NEXT_PAGE_SGE_DESC_CNT 2
+#define MAX_RX_SGE_CNT         (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT)
 /* RX_SGE_CNT is promised to be a power of 2 */
 #define RX_SGE_MASK            (RX_SGE_CNT - 1)
 #define NUM_RX_SGE             (RX_SGE_CNT * NUM_RX_SGE_PAGES)
 #define MAX_RX_SGE             (NUM_RX_SGE - 1)
 #define NEXT_SGE_IDX(x)                ((((x) & RX_SGE_MASK) == \
-                                 (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1)
+                                 (MAX_RX_SGE_CNT - 1)) ? \
+                                       (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \
+                                       (x) + 1)
 #define RX_SGE(x)              ((x) & MAX_RX_SGE)
 
+/*
+ * Number of required  SGEs is the sum of two:
+ * 1. Number of possible opened aggregations (next packet for
+ *    these aggregations will probably consume SGE immidiatelly)
+ * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
+ *    after placement on BD for new TPA aggregation)
+ *
+ * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page
+ */
+#define NUM_SGE_REQ            (MAX_AGG_QS(bp) + \
+                                       (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2)
+#define NUM_SGE_PG_REQ         ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \
+                                               MAX_RX_SGE_CNT)
+#define SGE_TH_LO(bp)          (NUM_SGE_REQ + \
+                                NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT)
+#define SGE_TH_HI(bp)          (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
 /* Manipulate a bit vector defined as an array of u64 */
 
 /* Number of bits in one sge_mask array element */
@@ -551,24 +579,43 @@ struct bnx2x_fastpath {
 
 #define NUM_TX_RINGS           16
 #define TX_DESC_CNT            (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
-#define MAX_TX_DESC_CNT                (TX_DESC_CNT - 1)
+#define NEXT_PAGE_TX_DESC_CNT  1
+#define MAX_TX_DESC_CNT                (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT)
 #define NUM_TX_BD              (TX_DESC_CNT * NUM_TX_RINGS)
 #define MAX_TX_BD              (NUM_TX_BD - 1)
 #define MAX_TX_AVAIL           (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
 #define NEXT_TX_IDX(x)         ((((x) & MAX_TX_DESC_CNT) == \
-                                 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
+                                 (MAX_TX_DESC_CNT - 1)) ? \
+                                       (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \
+                                       (x) + 1)
 #define TX_BD(x)               ((x) & MAX_TX_BD)
 #define TX_BD_POFF(x)          ((x) & MAX_TX_DESC_CNT)
 
 /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
 #define NUM_RX_RINGS           8
 #define RX_DESC_CNT            (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
-#define MAX_RX_DESC_CNT                (RX_DESC_CNT - 2)
+#define NEXT_PAGE_RX_DESC_CNT  2
+#define MAX_RX_DESC_CNT                (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT)
 #define RX_DESC_MASK           (RX_DESC_CNT - 1)
 #define NUM_RX_BD              (RX_DESC_CNT * NUM_RX_RINGS)
 #define MAX_RX_BD              (NUM_RX_BD - 1)
 #define MAX_RX_AVAIL           (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
-#define MIN_RX_AVAIL           128
+
+/* dropless fc calculations for BDs
+ *
+ * Number of BDs should as number of buffers in BRB:
+ * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT
+ * "next" elements on each page
+ */
+#define NUM_BD_REQ             BRB_SIZE(bp)
+#define NUM_BD_PG_REQ          ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \
+                                             MAX_RX_DESC_CNT)
+#define BD_TH_LO(bp)           (NUM_BD_REQ + \
+                                NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \
+                                FW_DROP_LEVEL(bp))
+#define BD_TH_HI(bp)           (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
+#define MIN_RX_AVAIL           ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)
 
 #define MIN_RX_SIZE_TPA_HW     (CHIP_IS_E1(bp) ? \
                                        ETH_MIN_RX_CQES_WITH_TPA_E1 : \
@@ -579,7 +626,9 @@ struct bnx2x_fastpath {
                                                                MIN_RX_AVAIL))
 
 #define NEXT_RX_IDX(x)         ((((x) & RX_DESC_MASK) == \
-                                 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
+                                 (MAX_RX_DESC_CNT - 1)) ? \
+                                       (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \
+                                       (x) + 1)
 #define RX_BD(x)               ((x) & MAX_RX_BD)
 
 /*
@@ -589,14 +638,31 @@ struct bnx2x_fastpath {
 #define CQE_BD_REL     (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
 #define NUM_RCQ_RINGS          (NUM_RX_RINGS * CQE_BD_REL)
 #define RCQ_DESC_CNT           (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
-#define MAX_RCQ_DESC_CNT       (RCQ_DESC_CNT - 1)
+#define NEXT_PAGE_RCQ_DESC_CNT 1
+#define MAX_RCQ_DESC_CNT       (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT)
 #define NUM_RCQ_BD             (RCQ_DESC_CNT * NUM_RCQ_RINGS)
 #define MAX_RCQ_BD             (NUM_RCQ_BD - 1)
 #define MAX_RCQ_AVAIL          (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
 #define NEXT_RCQ_IDX(x)                ((((x) & MAX_RCQ_DESC_CNT) == \
-                                 (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
+                                 (MAX_RCQ_DESC_CNT - 1)) ? \
+                                       (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \
+                                       (x) + 1)
 #define RCQ_BD(x)              ((x) & MAX_RCQ_BD)
 
+/* dropless fc calculations for RCQs
+ *
+ * Number of RCQs should be as number of buffers in BRB:
+ * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT
+ * "next" elements on each page
+ */
+#define NUM_RCQ_REQ            BRB_SIZE(bp)
+#define NUM_RCQ_PG_REQ         ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \
+                                             MAX_RCQ_DESC_CNT)
+#define RCQ_TH_LO(bp)          (NUM_RCQ_REQ + \
+                                NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \
+                                FW_DROP_LEVEL(bp))
+#define RCQ_TH_HI(bp)          (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
 
 /* This is needed for determining of last_max */
 #define SUB_S16(a, b)          (s16)((s16)(a) - (s16)(b))
@@ -685,24 +751,17 @@ struct bnx2x_fastpath {
 #define FP_CSB_FUNC_OFF        \
                        offsetof(struct cstorm_status_block_c, func)
 
-#define HC_INDEX_TOE_RX_CQ_CONS                0 /* Formerly Ustorm TOE CQ index */
-                                         /* (HC_INDEX_U_TOE_RX_CQ_CONS)  */
-#define HC_INDEX_ETH_RX_CQ_CONS                1 /* Formerly Ustorm ETH CQ index */
-                                         /* (HC_INDEX_U_ETH_RX_CQ_CONS)  */
-#define HC_INDEX_ETH_RX_BD_CONS                2 /* Formerly Ustorm ETH BD index */
-                                         /* (HC_INDEX_U_ETH_RX_BD_CONS)  */
-
-#define HC_INDEX_TOE_TX_CQ_CONS                4 /* Formerly Cstorm TOE CQ index   */
-                                         /* (HC_INDEX_C_TOE_TX_CQ_CONS)    */
-#define HC_INDEX_ETH_TX_CQ_CONS_COS0   5 /* Formerly Cstorm ETH CQ index   */
-                                         /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */
-#define HC_INDEX_ETH_TX_CQ_CONS_COS1   6 /* Formerly Cstorm ETH CQ index   */
-                                         /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */
-#define HC_INDEX_ETH_TX_CQ_CONS_COS2   7 /* Formerly Cstorm ETH CQ index   */
-                                         /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */
+#define HC_INDEX_ETH_RX_CQ_CONS                1
 
-#define HC_INDEX_ETH_FIRST_TX_CQ_CONS  HC_INDEX_ETH_TX_CQ_CONS_COS0
+#define HC_INDEX_OOO_TX_CQ_CONS                4
 
+#define HC_INDEX_ETH_TX_CQ_CONS_COS0   5
+
+#define HC_INDEX_ETH_TX_CQ_CONS_COS1   6
+
+#define HC_INDEX_ETH_TX_CQ_CONS_COS2   7
+
+#define HC_INDEX_ETH_FIRST_TX_CQ_CONS  HC_INDEX_ETH_TX_CQ_CONS_COS0
 
 #define BNX2X_RX_SB_INDEX \
        (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
@@ -1100,11 +1159,12 @@ struct bnx2x {
 #define BP_PORT(bp)                    (bp->pfid & 1)
 #define BP_FUNC(bp)                    (bp->pfid)
 #define BP_ABS_FUNC(bp)                        (bp->pf_num)
-#define BP_E1HVN(bp)                   (bp->pfid >> 1)
-#define BP_VN(bp)                      (BP_E1HVN(bp)) /*remove when approved*/
-#define BP_L_ID(bp)                    (BP_E1HVN(bp) << 2)
-#define BP_FW_MB_IDX(bp)               (BP_PORT(bp) +\
-         BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2  : 1))
+#define BP_VN(bp)                      ((bp)->pfid >> 1)
+#define BP_MAX_VN_NUM(bp)              (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4)
+#define BP_L_ID(bp)                    (BP_VN(bp) << 2)
+#define BP_FW_MB_IDX_VN(bp, vn)                (BP_PORT(bp) +\
+         (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2  : 1))
+#define BP_FW_MB_IDX(bp)               BP_FW_MB_IDX_VN(bp, BP_VN(bp))
 
        struct net_device       *dev;
        struct pci_dev          *pdev;
@@ -1767,7 +1827,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 
 #define MAX_DMAE_C_PER_PORT            8
 #define INIT_DMAE_C(bp)                        (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
-                                        BP_E1HVN(bp))
+                                        BP_VN(bp))
 #define PMF_DMAE_C(bp)                 (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
                                         E1HVN_MAX)
 
@@ -1793,7 +1853,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 
 /* must be used on a CID before placing it on a HW ring */
 #define HW_CID(bp, x)                  ((BP_PORT(bp) << 23) | \
-                                        (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \
+                                        (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
                                         (x))
 
 #define SP_DESC_CNT            (BCM_PAGE_SIZE / sizeof(struct eth_spe))
index 37e5790..c4cbf97 100644 (file)
@@ -987,8 +987,6 @@ void __bnx2x_link_report(struct bnx2x *bp)
 void bnx2x_init_rx_rings(struct bnx2x *bp)
 {
        int func = BP_FUNC(bp);
-       int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
-                                             ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
        u16 ring_prod;
        int i, j;
 
@@ -1001,7 +999,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
 
                if (!fp->disable_tpa) {
                        /* Fill the per-aggregtion pool */
-                       for (i = 0; i < max_agg_queues; i++) {
+                       for (i = 0; i < MAX_AGG_QS(bp); i++) {
                                struct bnx2x_agg_info *tpa_info =
                                        &fp->tpa_info[i];
                                struct sw_rx_bd *first_buf =
@@ -1041,7 +1039,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
                                        bnx2x_free_rx_sge_range(bp, fp,
                                                                ring_prod);
                                        bnx2x_free_tpa_pool(bp, fp,
-                                                           max_agg_queues);
+                                                           MAX_AGG_QS(bp));
                                        fp->disable_tpa = 1;
                                        ring_prod = 0;
                                        break;
@@ -1137,9 +1135,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
                bnx2x_free_rx_bds(fp);
 
                if (!fp->disable_tpa)
-                       bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
-                                           ETH_MAX_AGGREGATION_QUEUES_E1 :
-                                           ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
+                       bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
        }
 }
 
@@ -3095,15 +3091,20 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
        struct bnx2x_fastpath *fp = &bp->fp[index];
        int ring_size = 0;
        u8 cos;
+       int rx_ring_size = 0;
 
        /* if rx_ring_size specified - use it */
-       int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
-                          MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
+       if (!bp->rx_ring_size) {
 
-       /* allocate at least number of buffers required by FW */
-       rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
-                                                   MIN_RX_SIZE_TPA,
-                                 rx_ring_size);
+               rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
+
+               /* allocate at least number of buffers required by FW */
+               rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
+                                    MIN_RX_SIZE_TPA, rx_ring_size);
+
+               bp->rx_ring_size = rx_ring_size;
+       } else
+               rx_ring_size = bp->rx_ring_size;
 
        /* Common */
        sb = &bnx2x_fp(bp, index, status_blk);
index 2218630..cf3e479 100644 (file)
@@ -363,13 +363,50 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                }
 
                /* advertise the requested speed and duplex if supported */
-               cmd->advertising &= bp->port.supported[cfg_idx];
+               if (cmd->advertising & ~(bp->port.supported[cfg_idx])) {
+                       DP(NETIF_MSG_LINK, "Advertisement parameters "
+                                          "are not supported\n");
+                       return -EINVAL;
+               }
 
                bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
-               bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL;
-               bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg |
+               bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+               bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
                                         cmd->advertising);
+               if (cmd->advertising) {
+
+                       bp->link_params.speed_cap_mask[cfg_idx] = 0;
+                       if (cmd->advertising & ADVERTISED_10baseT_Half) {
+                               bp->link_params.speed_cap_mask[cfg_idx] |=
+                               PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
+                       }
+                       if (cmd->advertising & ADVERTISED_10baseT_Full)
+                               bp->link_params.speed_cap_mask[cfg_idx] |=
+                               PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
 
+                       if (cmd->advertising & ADVERTISED_100baseT_Full)
+                               bp->link_params.speed_cap_mask[cfg_idx] |=
+                               PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
+
+                       if (cmd->advertising & ADVERTISED_100baseT_Half) {
+                               bp->link_params.speed_cap_mask[cfg_idx] |=
+                                    PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
+                       }
+                       if (cmd->advertising & ADVERTISED_1000baseT_Half) {
+                               bp->link_params.speed_cap_mask[cfg_idx] |=
+                                       PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
+                       }
+                       if (cmd->advertising & (ADVERTISED_1000baseT_Full |
+                                               ADVERTISED_1000baseKX_Full))
+                               bp->link_params.speed_cap_mask[cfg_idx] |=
+                                       PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
+
+                       if (cmd->advertising & (ADVERTISED_10000baseT_Full |
+                                               ADVERTISED_10000baseKX4_Full |
+                                               ADVERTISED_10000baseKR_Full))
+                               bp->link_params.speed_cap_mask[cfg_idx] |=
+                                       PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
+               }
        } else { /* forced speed */
                /* advertise the requested speed and duplex if supported */
                switch (speed) {
@@ -1310,10 +1347,7 @@ static void bnx2x_get_ringparam(struct net_device *dev,
        if (bp->rx_ring_size)
                ering->rx_pending = bp->rx_ring_size;
        else
-               if (bp->state == BNX2X_STATE_OPEN && bp->num_queues)
-                       ering->rx_pending = MAX_RX_AVAIL/bp->num_queues;
-               else
-                       ering->rx_pending = MAX_RX_AVAIL;
+               ering->rx_pending = MAX_RX_AVAIL;
 
        ering->rx_mini_pending = 0;
        ering->rx_jumbo_pending = 0;
index d45b155..ba15bdc 100644 (file)
@@ -778,9 +778,9 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
 {
        u32 nig_reg_adress_crd_weight = 0;
        u32 pbf_reg_adress_crd_weight = 0;
-       /* Calculate and set BW for this COS*/
-       const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw;
-       const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw;
+       /* Calculate and set BW for this COS - use 1 instead of 0 for BW */
+       const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
+       const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
 
        switch (cos_entry) {
        case 0:
@@ -852,18 +852,12 @@ static int bnx2x_ets_e3b0_get_total_bw(
        /* Calculate total BW requested */
        for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
                if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
-
-                       if (0 == ets_params->cos[cos_idx].params.bw_params.bw) {
-                               DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
-                                                  "was set to 0\n");
-                       return -EINVAL;
+                       *total_bw +=
+                               ets_params->cos[cos_idx].params.bw_params.bw;
                }
-               *total_bw +=
-                   ets_params->cos[cos_idx].params.bw_params.bw;
-           }
        }
 
-       /*Check taotl BW is valid */
+       /* Check total BW is valid */
        if ((100 != *total_bw) || (0 == *total_bw)) {
                if (0 == *total_bw) {
                        DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW"
@@ -1726,7 +1720,7 @@ static int bnx2x_xmac_enable(struct link_params *params,
 
        /* Check loopback mode */
        if (lb)
-               val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK;
+               val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
        REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
        bnx2x_set_xumac_nig(params,
                            ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
@@ -3630,6 +3624,12 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
        bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
                         MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16);
 
+       /* Advertised and set FEC (Forward Error Correction) */
+       bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+                        MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2,
+                        (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY |
+                         MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ));
+
        /* Enable CL37 BAM */
        if (REG_RD(bp, params->shmem_base +
                   offsetof(struct shmem_region, dev_info.
@@ -5924,7 +5924,7 @@ int bnx2x_set_led(struct link_params *params,
                                        (tmp | EMAC_LED_OVERRIDE));
                                /*
                                 * return here without enabling traffic
-                                * LED blink andsetting rate in ON mode.
+                                * LED blink and setting rate in ON mode.
                                 * In oper mode, enabling LED blink
                                 * and setting rate is needed.
                                 */
@@ -5936,7 +5936,11 @@ int bnx2x_set_led(struct link_params *params,
                         * This is a work-around for HW issue found when link
                         * is up in CL73
                         */
-                       REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+                       if ((!CHIP_IS_E3(bp)) ||
+                           (CHIP_IS_E3(bp) &&
+                            mode == LED_MODE_ON))
+                               REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+
                        if (CHIP_IS_E1x(bp) ||
                            CHIP_IS_E2(bp) ||
                            (mode == LED_MODE_ON))
@@ -10638,8 +10642,7 @@ static struct bnx2x_phy phy_warpcore = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
        .addr           = 0xff,
        .def_md_devad   = 0,
-       .flags          = (FLAGS_HW_LOCK_REQUIRED |
-                          FLAGS_TX_ERROR_CHECK),
+       .flags          = FLAGS_HW_LOCK_REQUIRED,
        .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .mdio_ctrl      = 0,
@@ -10765,8 +10768,7 @@ static struct bnx2x_phy phy_8706 = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
        .addr           = 0xff,
        .def_md_devad   = 0,
-       .flags          = (FLAGS_INIT_XGXS_FIRST |
-                          FLAGS_TX_ERROR_CHECK),
+       .flags          = FLAGS_INIT_XGXS_FIRST,
        .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .mdio_ctrl      = 0,
@@ -10797,8 +10799,7 @@ static struct bnx2x_phy phy_8726 = {
        .addr           = 0xff,
        .def_md_devad   = 0,
        .flags          = (FLAGS_HW_LOCK_REQUIRED |
-                          FLAGS_INIT_XGXS_FIRST |
-                          FLAGS_TX_ERROR_CHECK),
+                          FLAGS_INIT_XGXS_FIRST),
        .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .mdio_ctrl      = 0,
@@ -10829,8 +10830,7 @@ static struct bnx2x_phy phy_8727 = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
        .addr           = 0xff,
        .def_md_devad   = 0,
-       .flags          = (FLAGS_FAN_FAILURE_DET_REQ |
-                          FLAGS_TX_ERROR_CHECK),
+       .flags          = FLAGS_FAN_FAILURE_DET_REQ,
        .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .mdio_ctrl      = 0,
index f74582a..c027e93 100644 (file)
@@ -407,8 +407,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
        opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
 
        opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
-       opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
-                  (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
+       opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
+                  (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
        opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
 
 #ifdef __BIG_ENDIAN
@@ -1419,7 +1419,7 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
        if (!CHIP_IS_E1(bp)) {
                /* init leading/trailing edge */
                if (IS_MF(bp)) {
-                       val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
+                       val = (0xee0f | (1 << (BP_VN(bp) + 4)));
                        if (bp->port.pmf)
                                /* enable nig and gpio3 attention */
                                val |= 0x1100;
@@ -1471,7 +1471,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
 
        /* init leading/trailing edge */
        if (IS_MF(bp)) {
-               val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
+               val = (0xee0f | (1 << (BP_VN(bp) + 4)));
                if (bp->port.pmf)
                        /* enable nig and gpio3 attention */
                        val |= 0x1100;
@@ -2287,7 +2287,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
        int vn;
 
        bp->vn_weight_sum = 0;
-       for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+       for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
                u32 vn_cfg = bp->mf_config[vn];
                u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
                                   FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
@@ -2320,12 +2320,18 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
                                        CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 }
 
+/* returns func by VN for current port */
+static inline int func_by_vn(struct bnx2x *bp, int vn)
+{
+       return 2 * vn + BP_PORT(bp);
+}
+
 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
 {
        struct rate_shaping_vars_per_vn m_rs_vn;
        struct fairness_vars_per_vn m_fair_vn;
        u32 vn_cfg = bp->mf_config[vn];
-       int func = 2*vn + BP_PORT(bp);
+       int func = func_by_vn(bp, vn);
        u16 vn_min_rate, vn_max_rate;
        int i;
 
@@ -2422,7 +2428,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
         *
         *      and there are 2 functions per port
         */
-       for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+       for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
                int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
 
                if (func >= E1H_FUNC_MAX)
@@ -2454,7 +2460,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
 
                /* calculate and set min-max rate for each vn */
                if (bp->port.pmf)
-                       for (vn = VN_0; vn < E1HVN_MAX; vn++)
+                       for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
                                bnx2x_init_vn_minmax(bp, vn);
 
                /* always enable rate shaping and fairness */
@@ -2473,16 +2479,15 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
 
 static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
 {
-       int port = BP_PORT(bp);
        int func;
        int vn;
 
        /* Set the attention towards other drivers on the same port */
-       for (vn = VN_0; vn < E1HVN_MAX; vn++) {
-               if (vn == BP_E1HVN(bp))
+       for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+               if (vn == BP_VN(bp))
                        continue;
 
-               func = ((vn << 1) | port);
+               func = func_by_vn(bp, vn);
                REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
                       (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
        }
@@ -2577,7 +2582,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
        bnx2x_dcbx_pmf_update(bp);
 
        /* enable nig attention */
-       val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
+       val = (0xff0f | (1 << (BP_VN(bp) + 4)));
        if (bp->common.int_block == INT_BLOCK_HC) {
                REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
                REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
@@ -2756,8 +2761,14 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
        u16 tpa_agg_size = 0;
 
        if (!fp->disable_tpa) {
-               pause->sge_th_hi = 250;
-               pause->sge_th_lo = 150;
+               pause->sge_th_lo = SGE_TH_LO(bp);
+               pause->sge_th_hi = SGE_TH_HI(bp);
+
+               /* validate SGE ring has enough to cross high threshold */
+               WARN_ON(bp->dropless_fc &&
+                               pause->sge_th_hi + FW_PREFETCH_CNT >
+                               MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
+
                tpa_agg_size = min_t(u32,
                        (min_t(u32, 8, MAX_SKB_FRAGS) *
                        SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
@@ -2771,10 +2782,21 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
 
        /* pause - not for e1 */
        if (!CHIP_IS_E1(bp)) {
-               pause->bd_th_hi = 350;
-               pause->bd_th_lo = 250;
-               pause->rcq_th_hi = 350;
-               pause->rcq_th_lo = 250;
+               pause->bd_th_lo = BD_TH_LO(bp);
+               pause->bd_th_hi = BD_TH_HI(bp);
+
+               pause->rcq_th_lo = RCQ_TH_LO(bp);
+               pause->rcq_th_hi = RCQ_TH_HI(bp);
+               /*
+                * validate that rings have enough entries to cross
+                * high thresholds
+                */
+               WARN_ON(bp->dropless_fc &&
+                               pause->bd_th_hi + FW_PREFETCH_CNT >
+                               bp->rx_ring_size);
+               WARN_ON(bp->dropless_fc &&
+                               pause->rcq_th_hi + FW_PREFETCH_CNT >
+                               NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
 
                pause->pri_map = 1;
        }
@@ -2802,9 +2824,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
         * For PF Clients it should be the maximum avaliable number.
         * VF driver(s) may want to define it to a smaller value.
         */
-       rxq_init->max_tpa_queues =
-               (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
-               ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
+       rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
 
        rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
        rxq_init->fw_sb_id = fp->fw_sb_id;
@@ -4808,6 +4828,37 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
        hc_sm->time_to_expire = 0xFFFFFFFF;
 }
 
+
+/* allocates state machine ids. */
+static inline
+void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
+{
+       /* zero out state machine indices */
+       /* rx indices */
+       index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
+
+       /* tx indices */
+       index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
+       index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
+       index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
+       index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
+
+       /* map indices */
+       /* rx indices */
+       index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
+               SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+
+       /* tx indices */
+       index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
+               SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+       index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
+               SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+       index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
+               SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+       index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
+               SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+}
+
 static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
                          u8 vf_valid, int fw_sb_id, int igu_sb_id)
 {
@@ -4839,6 +4890,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
                hc_sm_p = sb_data_e2.common.state_machine;
                sb_data_p = (u32 *)&sb_data_e2;
                data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
+               bnx2x_map_sb_state_machines(sb_data_e2.index_data);
        } else {
                memset(&sb_data_e1x, 0,
                       sizeof(struct hc_status_block_data_e1x));
@@ -4853,6 +4905,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
                hc_sm_p = sb_data_e1x.common.state_machine;
                sb_data_p = (u32 *)&sb_data_e1x;
                data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
+               bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
        }
 
        bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
@@ -5802,7 +5855,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
         * take the UNDI lock to protect undi_unload flow from accessing
         * registers while we're resetting the chip
         */
-       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
 
        bnx2x_reset_common(bp);
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
@@ -5814,7 +5867,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
        }
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
 
-       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
 
        bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
 
@@ -6671,12 +6724,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
                        if (CHIP_MODE_IS_4_PORT(bp))
                                dsb_idx = BP_FUNC(bp);
                        else
-                               dsb_idx = BP_E1HVN(bp);
+                               dsb_idx = BP_VN(bp);
 
                        prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
                                       IGU_BC_BASE_DSB_PROD + dsb_idx :
                                       IGU_NORM_BASE_DSB_PROD + dsb_idx);
 
+                       /*
+                        * igu prods come in chunks of E1HVN_MAX (4) -
+                        * does not matters what is the current chip mode
+                        */
                        for (i = 0; i < (num_segs * E1HVN_MAX);
                             i += E1HVN_MAX) {
                                addr = IGU_REG_PROD_CONS_MEMORY +
@@ -7570,7 +7627,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
                u32 val;
                /* The mac address is written to entries 1-4 to
                   preserve entry 0 which is used by the PMF */
-               u8 entry = (BP_E1HVN(bp) + 1)*8;
+               u8 entry = (BP_VN(bp) + 1)*8;
 
                val = (mac_addr[0] << 8) | mac_addr[1];
                EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
@@ -8546,10 +8603,12 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
        /* Check if there is any driver already loaded */
        val = REG_RD(bp, MISC_REG_UNPREPARED);
        if (val == 0x1) {
-               /* Check if it is the UNDI driver
+
+               bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+               /*
+                * Check if it is the UNDI driver
                 * UNDI driver initializes CID offset for normal bell to 0x7
                 */
-               bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
                val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
                if (val == 0x7) {
                        u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
@@ -8587,9 +8646,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
                                bnx2x_fw_command(bp, reset_code, 0);
                        }
 
-                       /* now it's safe to release the lock */
-                       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
-
                        bnx2x_undi_int_disable(bp);
                        port = BP_PORT(bp);
 
@@ -8639,8 +8695,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
                        bp->fw_seq =
                              (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
                                DRV_MSG_SEQ_NUMBER_MASK);
-               } else
-                       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+               }
+
+               /* now it's safe to release the lock */
+               bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
        }
 }
 
@@ -8777,13 +8835,13 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
 static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
 {
        int pfid = BP_FUNC(bp);
-       int vn = BP_E1HVN(bp);
        int igu_sb_id;
        u32 val;
        u8 fid, igu_sb_cnt = 0;
 
        bp->igu_base_sb = 0xff;
        if (CHIP_INT_MODE_IS_BC(bp)) {
+               int vn = BP_VN(bp);
                igu_sb_cnt = bp->igu_sb_cnt;
                bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
                        FP_SB_MAX_E1x;
@@ -9416,6 +9474,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
                bp->igu_base_sb = 0;
        } else {
                bp->common.int_block = INT_BLOCK_IGU;
+
+               /* do not allow device reset during IGU info preocessing */
+               bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
                val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
 
                if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
@@ -9447,6 +9509,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
 
                bnx2x_get_igu_cam_info(bp);
 
+               bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
        }
 
        /*
@@ -9473,7 +9536,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
 
        bp->mf_ov = 0;
        bp->mf_mode = 0;
-       vn = BP_E1HVN(bp);
+       vn = BP_VN(bp);
 
        if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
                BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
@@ -9593,13 +9656,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
        /* port info */
        bnx2x_get_port_hwinfo(bp);
 
-       if (!BP_NOMCP(bp)) {
-               bp->fw_seq =
-                       (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
-                        DRV_MSG_SEQ_NUMBER_MASK);
-               BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
-       }
-
        /* Get MAC addresses */
        bnx2x_get_mac_hwinfo(bp);
 
@@ -9765,6 +9821,14 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
        if (!BP_NOMCP(bp))
                bnx2x_undi_unload(bp);
 
+       /* init fw_seq after undi_unload! */
+       if (!BP_NOMCP(bp)) {
+               bp->fw_seq =
+                       (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+                        DRV_MSG_SEQ_NUMBER_MASK);
+               BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+       }
+
        if (CHIP_REV_IS_FPGA(bp))
                dev_err(&bp->pdev->dev, "FPGA detected\n");
 
@@ -10259,17 +10323,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
        /* clean indirect addresses */
        pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
                               PCICFG_VENDOR_ID_OFFSET);
-       /* Clean the following indirect addresses for all functions since it
+       /*
+        * Clean the following indirect addresses for all functions since it
         * is not used by the driver.
         */
        REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
        REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
        REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
        REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
-       REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
-       REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
-       REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
-       REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+
+       if (CHIP_IS_E1x(bp)) {
+               REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
+               REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
+               REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
+               REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+       }
 
        /*
         * Enable internal target-read (in case we are probed after PF FLR).
index 40266c1..750e844 100644 (file)
 #define XCM_REG_XX_OVFL_EVNT_ID                                 0x20058
 #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS   (0x1<<0)
 #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS  (0x1<<1)
-#define XMAC_CTRL_REG_CORE_LOCAL_LPBK                           (0x1<<3)
+#define XMAC_CTRL_REG_LINE_LOCAL_LPBK                           (0x1<<2)
 #define XMAC_CTRL_REG_RX_EN                                     (0x1<<1)
 #define XMAC_CTRL_REG_SOFT_RESET                                (0x1<<6)
 #define XMAC_CTRL_REG_TX_EN                                     (0x1<<0)
 #define HW_LOCK_RESOURCE_RECOVERY_LEADER_0                      8
 #define HW_LOCK_RESOURCE_RECOVERY_LEADER_1                      9
 #define HW_LOCK_RESOURCE_SPIO                                   2
-#define HW_LOCK_RESOURCE_UNDI                                   5
+#define HW_LOCK_RESOURCE_RESET                                  5
 #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT                   (0x1<<4)
 #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR                   (0x1<<5)
 #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR                   (0x1<<18)
@@ -6853,6 +6853,9 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_WC_REG_IEEE0BLK_AUTONEGNP                 0x7
 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0      0x10
 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1      0x11
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2      0x12
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY    0x4000
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ                0x8000
 #define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150  0x96
 #define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL               0x8000
 #define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1              0x800e
index 771f680..9908f2b 100644 (file)
@@ -710,7 +710,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
                break;
 
        case MAC_TYPE_NONE: /* unreached */
-               BNX2X_ERR("stats updated by DMAE but no MAC active\n");
+               DP(BNX2X_MSG_STATS,
+                  "stats updated by DMAE but no MAC active\n");
                return -1;
 
        default: /* unreached */
@@ -1391,7 +1392,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
 
 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
 {
-       int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX;
+       int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX;
        u32 func_stx;
 
        /* sanity */
@@ -1404,7 +1405,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp)
        func_stx = bp->func_stx;
 
        for (vn = VN_0; vn < vn_max; vn++) {
-               int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn;
+               int mb_idx = BP_FW_MB_IDX_VN(bp, vn);
 
                bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
                bnx2x_func_stats_init(bp);
index a812492..2adc294 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/skbuff.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/io.h>
 
 #include <linux/can/dev.h>
 #include <linux/can/error.h>
index 8545c7a..a5a89ec 100644 (file)
@@ -4026,6 +4026,12 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
                checksum += eeprom_data;
        }
 
+#ifdef CONFIG_PARISC
+       /* This is a signature and not a checksum on HP c8000 */
+       if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6))
+               return E1000_SUCCESS;
+
+#endif
        if (checksum == (u16) EEPROM_SUM)
                return E1000_SUCCESS;
        else {
index 25a8c2a..0caf3c3 100644 (file)
@@ -1669,10 +1669,10 @@ static int gfar_get_cls_all(struct gfar_private *priv,
        u32 i = 0;
 
        list_for_each_entry(comp, &priv->rx_list.list, list) {
-               if (i <= cmd->rule_cnt) {
-                       rule_locs[i] = comp->fs.location;
-                       i++;
-               }
+               if (i == cmd->rule_cnt)
+                       return -EMSGSIZE;
+               rule_locs[i] = comp->fs.location;
+               i++;
        }
 
        cmd->data = MAX_FILER_IDX;
index 16ce45c..52a3900 100644 (file)
@@ -428,6 +428,7 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
        dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
 
        status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
+       greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN;
 
        /* Wrap around descriptor ring */
        if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
@@ -490,7 +491,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
        if (nr_frags != 0)
                status = GRETH_TXBD_MORE;
 
-       status |= GRETH_TXBD_CSALL;
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               status |= GRETH_TXBD_CSALL;
        status |= skb_headlen(skb) & GRETH_BD_LEN;
        if (greth->tx_next == GRETH_TXBD_NUM_MASK)
                status |= GRETH_BD_WR;
@@ -513,7 +515,9 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
                greth->tx_skbuff[curr_tx] = NULL;
                bdp = greth->tx_bd_base + curr_tx;
 
-               status = GRETH_TXBD_CSALL | GRETH_BD_EN;
+               status = GRETH_BD_EN;
+               if (skb->ip_summed == CHECKSUM_PARTIAL)
+                       status |= GRETH_TXBD_CSALL;
                status |= frag->size & GRETH_BD_LEN;
 
                /* Wrap around descriptor ring */
@@ -641,6 +645,7 @@ static void greth_clean_tx(struct net_device *dev)
                                dev->stats.tx_fifo_errors++;
                }
                dev->stats.tx_packets++;
+               dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last];
                greth->tx_last = NEXT_TX(greth->tx_last);
                greth->tx_free++;
        }
@@ -695,6 +700,7 @@ static void greth_clean_tx_gbit(struct net_device *dev)
                greth->tx_skbuff[greth->tx_last] = NULL;
 
                greth_update_tx_stats(dev, stat);
+               dev->stats.tx_bytes += skb->len;
 
                bdp = greth->tx_bd_base + greth->tx_last;
 
@@ -796,6 +802,7 @@ static int greth_rx(struct net_device *dev, int limit)
                                memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
 
                                skb->protocol = eth_type_trans(skb, dev);
+                               dev->stats.rx_bytes += pkt_len;
                                dev->stats.rx_packets++;
                                netif_receive_skb(skb);
                        }
@@ -910,6 +917,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
 
                                skb->protocol = eth_type_trans(skb, dev);
                                dev->stats.rx_packets++;
+                               dev->stats.rx_bytes += pkt_len;
                                netif_receive_skb(skb);
 
                                greth->rx_skbuff[greth->rx_cur] = newskb;
index 9a0040d..232a622 100644 (file)
@@ -103,6 +103,7 @@ struct greth_private {
 
        unsigned char *tx_bufs[GRETH_TXBD_NUM];
        unsigned char *rx_bufs[GRETH_RXBD_NUM];
+       u16 tx_bufs_length[GRETH_TXBD_NUM];
 
        u16 tx_next;
        u16 tx_last;
index 3e66792..8dd5fcc 100644 (file)
@@ -757,7 +757,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
        struct ibmveth_adapter *adapter = netdev_priv(dev);
        unsigned long set_attr, clr_attr, ret_attr;
        unsigned long set_attr6, clr_attr6;
-       long ret, ret6;
+       long ret, ret4, ret6;
        int rc1 = 0, rc2 = 0;
        int restart = 0;
 
@@ -770,6 +770,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
 
        set_attr = 0;
        clr_attr = 0;
+       set_attr6 = 0;
+       clr_attr6 = 0;
 
        if (data) {
                set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
@@ -784,16 +786,20 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
        if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
            !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
            (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
-               ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
+               ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
                                         set_attr, &ret_attr);
 
-               if (ret != H_SUCCESS) {
+               if (ret4 != H_SUCCESS) {
                        netdev_err(dev, "unable to change IPv4 checksum "
                                        "offload settings. %d rc=%ld\n",
-                                       data, ret);
+                                       data, ret4);
+
+                       h_illan_attributes(adapter->vdev->unit_address,
+                                          set_attr, clr_attr, &ret_attr);
+
+                       if (data == 1)
+                               dev->features &= ~NETIF_F_IP_CSUM;
 
-                       ret = h_illan_attributes(adapter->vdev->unit_address,
-                                                set_attr, clr_attr, &ret_attr);
                } else {
                        adapter->fw_ipv4_csum_support = data;
                }
@@ -804,15 +810,18 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
                if (ret6 != H_SUCCESS) {
                        netdev_err(dev, "unable to change IPv6 checksum "
                                        "offload settings. %d rc=%ld\n",
-                                       data, ret);
+                                       data, ret6);
+
+                       h_illan_attributes(adapter->vdev->unit_address,
+                                          set_attr6, clr_attr6, &ret_attr);
+
+                       if (data == 1)
+                               dev->features &= ~NETIF_F_IPV6_CSUM;
 
-                       ret = h_illan_attributes(adapter->vdev->unit_address,
-                                                set_attr6, clr_attr6,
-                                                &ret_attr);
                } else
                        adapter->fw_ipv6_csum_support = data;
 
-               if (ret != H_SUCCESS || ret6 != H_SUCCESS)
+               if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
                        adapter->rx_csum = data;
                else
                        rc1 = -EIO;
@@ -930,6 +939,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
        union ibmveth_buf_desc descs[6];
        int last, i;
        int force_bounce = 0;
+       dma_addr_t dma_addr;
 
        /*
         * veth handles a maximum of 6 segments including the header, so
@@ -994,17 +1004,16 @@ retry_bounce:
        }
 
        /* Map the header */
-       descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
-                                                skb_headlen(skb),
-                                                DMA_TO_DEVICE);
-       if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
+       dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
+                                 skb_headlen(skb), DMA_TO_DEVICE);
+       if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
                goto map_failed;
 
        descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
+       descs[0].fields.address = dma_addr;
 
        /* Map the frags */
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               unsigned long dma_addr;
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
                dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
@@ -1026,7 +1035,12 @@ retry_bounce:
                netdev->stats.tx_bytes += skb->len;
        }
 
-       for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++)
+       dma_unmap_single(&adapter->vdev->dev,
+                        descs[0].fields.address,
+                        descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+                        DMA_TO_DEVICE);
+
+       for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
                dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
                               descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
                               DMA_TO_DEVICE);
index 2279039..e1fcc95 100644 (file)
@@ -1321,8 +1321,8 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                if (ring_is_rsc_enabled(rx_ring))
                        pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
 
-               /* if this is a skb from previous receive DMA will be 0 */
-               if (rx_buffer_info->dma) {
+               /* linear means we are building an skb from multiple pages */
+               if (!skb_is_nonlinear(skb)) {
                        u16 hlen;
                        if (pkt_is_rsc &&
                            !(staterr & IXGBE_RXD_STAT_EOP) &&
index dfc8272..ed2a397 100644 (file)
@@ -799,5 +799,11 @@ static void __exit cleanup_netconsole(void)
        }
 }
 
-module_init(init_netconsole);
+/*
+ * Use late_initcall to ensure netconsole is
+ * initialized after network device driver if built-in.
+ *
+ * late_initcall() and module_init() are identical if built as module.
+ */
+late_initcall(init_netconsole);
 module_exit(cleanup_netconsole);
index 59fac77..a09a071 100644 (file)
@@ -127,8 +127,8 @@ struct pch_gbe_regs {
 
 /* Reset */
 #define PCH_GBE_ALL_RST         0x80000000  /* All reset */
-#define PCH_GBE_TX_RST          0x40000000  /* TX MAC, TX FIFO, TX DMA reset */
-#define PCH_GBE_RX_RST          0x04000000  /* RX MAC, RX FIFO, RX DMA reset */
+#define PCH_GBE_TX_RST          0x00008000  /* TX MAC, TX FIFO, TX DMA reset */
+#define PCH_GBE_RX_RST          0x00004000  /* RX MAC, RX FIFO, RX DMA reset */
 
 /* TCP/IP Accelerator Control */
 #define PCH_GBE_EX_LIST_EN      0x00000008  /* External List Enable */
@@ -276,6 +276,9 @@ struct pch_gbe_regs {
 #define PCH_GBE_RX_DMA_EN       0x00000002   /* Enables Receive DMA */
 #define PCH_GBE_TX_DMA_EN       0x00000001   /* Enables Transmission DMA */
 
+/* RX DMA STATUS */
+#define PCH_GBE_IDLE_CHECK       0xFFFFFFFE
+
 /* Wake On LAN Status */
 #define PCH_GBE_WLS_BR          0x00000008 /* Broadcas Address */
 #define PCH_GBE_WLS_MLT         0x00000004 /* Multicast Address */
@@ -471,6 +474,7 @@ struct pch_gbe_tx_desc {
 struct pch_gbe_buffer {
        struct sk_buff *skb;
        dma_addr_t dma;
+       unsigned char *rx_buffer;
        unsigned long time_stamp;
        u16 length;
        bool mapped;
@@ -511,6 +515,9 @@ struct pch_gbe_tx_ring {
 struct pch_gbe_rx_ring {
        struct pch_gbe_rx_desc *desc;
        dma_addr_t dma;
+       unsigned char *rx_buff_pool;
+       dma_addr_t rx_buff_pool_logic;
+       unsigned int rx_buff_pool_size;
        unsigned int size;
        unsigned int count;
        unsigned int next_to_use;
@@ -622,6 +629,7 @@ struct pch_gbe_adapter {
        unsigned long rx_buffer_len;
        unsigned long tx_queue_len;
        bool have_msi;
+       bool rx_stop_flag;
 };
 
 extern const char pch_driver_version[];
index eac3c5c..567ff10 100644 (file)
@@ -20,7 +20,6 @@
 
 #include "pch_gbe.h"
 #include "pch_gbe_api.h"
-#include <linux/prefetch.h>
 
 #define DRV_VERSION     "1.00"
 const char pch_driver_version[] = DRV_VERSION;
@@ -34,11 +33,15 @@ const char pch_driver_version[] = DRV_VERSION;
 #define PCH_GBE_WATCHDOG_PERIOD                (1 * HZ)        /* watchdog time */
 #define PCH_GBE_COPYBREAK_DEFAULT      256
 #define PCH_GBE_PCI_BAR                        1
+#define PCH_GBE_RESERVE_MEMORY         0x200000        /* 2MB */
 
 /* Macros for ML7223 */
 #define PCI_VENDOR_ID_ROHM                     0x10db
 #define PCI_DEVICE_ID_ROHM_ML7223_GBE          0x8013
 
+/* Macros for ML7831 */
+#define PCI_DEVICE_ID_ROHM_ML7831_GBE          0x8802
+
 #define PCH_GBE_TX_WEIGHT         64
 #define PCH_GBE_RX_WEIGHT         64
 #define PCH_GBE_RX_BUFFER_WRITE   16
@@ -52,6 +55,7 @@ const char pch_driver_version[] = DRV_VERSION;
        )
 
 /* Ethertype field values */
+#define PCH_GBE_MAX_RX_BUFFER_SIZE      0x2880
 #define PCH_GBE_MAX_JUMBO_FRAME_SIZE    10318
 #define PCH_GBE_FRAME_SIZE_2048         2048
 #define PCH_GBE_FRAME_SIZE_4096         4096
@@ -83,10 +87,12 @@ const char pch_driver_version[] = DRV_VERSION;
 #define PCH_GBE_INT_ENABLE_MASK ( \
        PCH_GBE_INT_RX_DMA_CMPLT |    \
        PCH_GBE_INT_RX_DSC_EMP   |    \
+       PCH_GBE_INT_RX_FIFO_ERR  |    \
        PCH_GBE_INT_WOL_DET      |    \
        PCH_GBE_INT_TX_CMPLT          \
        )
 
+#define PCH_GBE_INT_DISABLE_ALL                0
 
 static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
 
@@ -138,6 +144,27 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
        if (!tmp)
                pr_err("Error: busy bit is not cleared\n");
 }
+
+/**
+ * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
+ * @reg:       Pointer of register
+ * @busy:      Busy bit
+ */
+static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
+{
+       u32 tmp;
+       int ret = -1;
+       /* wait busy */
+       tmp = 20;
+       while ((ioread32(reg) & bit) && --tmp)
+               udelay(5);
+       if (!tmp)
+               pr_err("Error: busy bit is not cleared\n");
+       else
+               ret = 0;
+       return ret;
+}
+
 /**
  * pch_gbe_mac_mar_set - Set MAC address register
  * @hw:            Pointer to the HW structure
@@ -189,6 +216,17 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
        return;
 }
 
+static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
+{
+       /* Read the MAC address. and store to the private data */
+       pch_gbe_mac_read_mac_addr(hw);
+       iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
+       pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
+       /* Setup the MAC address */
+       pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
+       return;
+}
+
 /**
  * pch_gbe_mac_init_rx_addrs - Initialize receive address's
  * @hw:        Pointer to the HW structure
@@ -671,13 +709,8 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
 
        tcpip = ioread32(&hw->reg->TCPIP_ACC);
 
-       if (netdev->features & NETIF_F_RXCSUM) {
-               tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF;
-               tcpip |= PCH_GBE_RX_TCPIPACC_EN;
-       } else {
-               tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
-               tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
-       }
+       tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
+       tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
        iowrite32(tcpip, &hw->reg->TCPIP_ACC);
        return;
 }
@@ -717,13 +750,6 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
        iowrite32(rdba, &hw->reg->RX_DSC_BASE);
        iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
        iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
-
-       /* Enables Receive DMA */
-       rxdma = ioread32(&hw->reg->DMA_CTRL);
-       rxdma |= PCH_GBE_RX_DMA_EN;
-       iowrite32(rxdma, &hw->reg->DMA_CTRL);
-       /* Enables Receive */
-       iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
 }
 
 /**
@@ -1097,6 +1123,48 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
        spin_unlock_irqrestore(&adapter->stats_lock, flags);
 }
 
+static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
+{
+       struct pch_gbe_hw *hw = &adapter->hw;
+       u32 rxdma;
+       u16 value;
+       int ret;
+
+       /* Disable Receive DMA */
+       rxdma = ioread32(&hw->reg->DMA_CTRL);
+       rxdma &= ~PCH_GBE_RX_DMA_EN;
+       iowrite32(rxdma, &hw->reg->DMA_CTRL);
+       /* Wait Rx DMA BUS is IDLE */
+       ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
+       if (ret) {
+               /* Disable Bus master */
+               pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
+               value &= ~PCI_COMMAND_MASTER;
+               pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
+               /* Stop Receive */
+               pch_gbe_mac_reset_rx(hw);
+               /* Enable Bus master */
+               value |= PCI_COMMAND_MASTER;
+               pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
+       } else {
+               /* Stop Receive */
+               pch_gbe_mac_reset_rx(hw);
+       }
+}
+
+static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
+{
+       u32 rxdma;
+
+       /* Enables Receive DMA */
+       rxdma = ioread32(&hw->reg->DMA_CTRL);
+       rxdma |= PCH_GBE_RX_DMA_EN;
+       iowrite32(rxdma, &hw->reg->DMA_CTRL);
+       /* Enables Receive */
+       iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
+       return;
+}
+
 /**
  * pch_gbe_intr - Interrupt Handler
  * @irq:   Interrupt number
@@ -1123,7 +1191,15 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
        if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
                adapter->stats.intr_rx_frame_err_count++;
        if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
-               adapter->stats.intr_rx_fifo_err_count++;
+               if (!adapter->rx_stop_flag) {
+                       adapter->stats.intr_rx_fifo_err_count++;
+                       pr_debug("Rx fifo over run\n");
+                       adapter->rx_stop_flag = true;
+                       int_en = ioread32(&hw->reg->INT_EN);
+                       iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
+                                 &hw->reg->INT_EN);
+                       pch_gbe_stop_receive(adapter);
+               }
        if (int_st & PCH_GBE_INT_RX_DMA_ERR)
                adapter->stats.intr_rx_dma_err_count++;
        if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
@@ -1135,7 +1211,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
        /* When Rx descriptor is empty  */
        if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
                adapter->stats.intr_rx_dsc_empty_count++;
-               pr_err("Rx descriptor is empty\n");
+               pr_debug("Rx descriptor is empty\n");
                int_en = ioread32(&hw->reg->INT_EN);
                iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
                if (hw->mac.tx_fc_enable) {
@@ -1185,29 +1261,23 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
        unsigned int i;
        unsigned int bufsz;
 
-       bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN;
+       bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
        i = rx_ring->next_to_use;
 
        while ((cleaned_count--)) {
                buffer_info = &rx_ring->buffer_info[i];
-               skb = buffer_info->skb;
-               if (skb) {
-                       skb_trim(skb, 0);
-               } else {
-                       skb = netdev_alloc_skb(netdev, bufsz);
-                       if (unlikely(!skb)) {
-                               /* Better luck next round */
-                               adapter->stats.rx_alloc_buff_failed++;
-                               break;
-                       }
-                       /* 64byte align */
-                       skb_reserve(skb, PCH_GBE_DMA_ALIGN);
-
-                       buffer_info->skb = skb;
-                       buffer_info->length = adapter->rx_buffer_len;
+               skb = netdev_alloc_skb(netdev, bufsz);
+               if (unlikely(!skb)) {
+                       /* Better luck next round */
+                       adapter->stats.rx_alloc_buff_failed++;
+                       break;
                }
+               /* align */
+               skb_reserve(skb, NET_IP_ALIGN);
+               buffer_info->skb = skb;
+
                buffer_info->dma = dma_map_single(&pdev->dev,
-                                                 skb->data,
+                                                 buffer_info->rx_buffer,
                                                  buffer_info->length,
                                                  DMA_FROM_DEVICE);
                if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
@@ -1240,6 +1310,36 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
        return;
 }
 
+static int
+pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
+                        struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct pch_gbe_buffer *buffer_info;
+       unsigned int i;
+       unsigned int bufsz;
+       unsigned int size;
+
+       bufsz = adapter->rx_buffer_len;
+
+       size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
+       rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
+                                               &rx_ring->rx_buff_pool_logic,
+                                               GFP_KERNEL);
+       if (!rx_ring->rx_buff_pool) {
+               pr_err("Unable to allocate memory for the receive poll buffer\n");
+               return -ENOMEM;
+       }
+       memset(rx_ring->rx_buff_pool, 0, size);
+       rx_ring->rx_buff_pool_size = size;
+       for (i = 0; i < rx_ring->count; i++) {
+               buffer_info = &rx_ring->buffer_info[i];
+               buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
+               buffer_info->length = bufsz;
+       }
+       return 0;
+}
+
 /**
  * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
  * @adapter:   Board private structure
@@ -1380,7 +1480,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
        unsigned int i;
        unsigned int cleaned_count = 0;
        bool cleaned = false;
-       struct sk_buff *skb, *new_skb;
+       struct sk_buff *skb;
        u8 dma_status;
        u16 gbec_status;
        u32 tcp_ip_status;
@@ -1401,13 +1501,12 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
                rx_desc->gbec_status = DSC_INIT16;
                buffer_info = &rx_ring->buffer_info[i];
                skb = buffer_info->skb;
+               buffer_info->skb = NULL;
 
                /* unmap dma */
                dma_unmap_single(&pdev->dev, buffer_info->dma,
                                   buffer_info->length, DMA_FROM_DEVICE);
                buffer_info->mapped = false;
-               /* Prefetch the packet */
-               prefetch(skb->data);
 
                pr_debug("RxDecNo = 0x%04x  Status[DMA:0x%02x GBE:0x%04x "
                         "TCP:0x%08x]  BufInf = 0x%p\n",
@@ -1427,70 +1526,16 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
                        pr_err("Receive CRC Error\n");
                } else {
                        /* get receive length */
-                       /* length convert[-3] */
-                       length = (rx_desc->rx_words_eob) - 3;
-
-                       /* Decide the data conversion method */
-                       if (!(netdev->features & NETIF_F_RXCSUM)) {
-                               /* [Header:14][payload] */
-                               if (NET_IP_ALIGN) {
-                                       /* Because alignment differs,
-                                        * the new_skb is newly allocated,
-                                        * and data is copied to new_skb.*/
-                                       new_skb = netdev_alloc_skb(netdev,
-                                                        length + NET_IP_ALIGN);
-                                       if (!new_skb) {
-                                               /* dorrop error */
-                                               pr_err("New skb allocation "
-                                                       "Error\n");
-                                               goto dorrop;
-                                       }
-                                       skb_reserve(new_skb, NET_IP_ALIGN);
-                                       memcpy(new_skb->data, skb->data,
-                                              length);
-                                       skb = new_skb;
-                               } else {
-                                       /* DMA buffer is used as SKB as it is.*/
-                                       buffer_info->skb = NULL;
-                               }
-                       } else {
-                               /* [Header:14][padding:2][payload] */
-                               /* The length includes padding length */
-                               length = length - PCH_GBE_DMA_PADDING;
-                               if ((length < copybreak) ||
-                                   (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
-                                       /* Because alignment differs,
-                                        * the new_skb is newly allocated,
-                                        * and data is copied to new_skb.
-                                        * Padding data is deleted
-                                        * at the time of a copy.*/
-                                       new_skb = netdev_alloc_skb(netdev,
-                                                        length + NET_IP_ALIGN);
-                                       if (!new_skb) {
-                                               /* dorrop error */
-                                               pr_err("New skb allocation "
-                                                       "Error\n");
-                                               goto dorrop;
-                                       }
-                                       skb_reserve(new_skb, NET_IP_ALIGN);
-                                       memcpy(new_skb->data, skb->data,
-                                              ETH_HLEN);
-                                       memcpy(&new_skb->data[ETH_HLEN],
-                                              &skb->data[ETH_HLEN +
-                                              PCH_GBE_DMA_PADDING],
-                                              length - ETH_HLEN);
-                                       skb = new_skb;
-                               } else {
-                                       /* Padding data is deleted
-                                        * by moving header data.*/
-                                       memmove(&skb->data[PCH_GBE_DMA_PADDING],
-                                               &skb->data[0], ETH_HLEN);
-                                       skb_reserve(skb, NET_IP_ALIGN);
-                                       buffer_info->skb = NULL;
-                               }
-                       }
-                       /* The length includes FCS length */
-                       length = length - ETH_FCS_LEN;
+                       /* length convert[-3], length includes FCS length */
+                       length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
+                       if (rx_desc->rx_words_eob & 0x02)
+                               length = length - 4;
+                       /*
+                        * buffer_info->rx_buffer: [Header:14][payload]
+                        * skb->data: [Reserve:2][Header:14][payload]
+                        */
+                       memcpy(skb->data, buffer_info->rx_buffer, length);
+
                        /* update status of driver */
                        adapter->stats.rx_bytes += length;
                        adapter->stats.rx_packets++;
@@ -1509,7 +1554,6 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
                        pr_debug("Receive skb->ip_summed: %d length: %d\n",
                                 skb->ip_summed, length);
                }
-dorrop:
                /* return some buffers to hardware, one at a time is too slow */
                if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
                        pch_gbe_alloc_rx_buffers(adapter, rx_ring,
@@ -1714,9 +1758,15 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
                pr_err("Error: can't bring device up\n");
                return err;
        }
+       err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
+       if (err) {
+               pr_err("Error: can't bring device up\n");
+               return err;
+       }
        pch_gbe_alloc_tx_buffers(adapter, tx_ring);
        pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
        adapter->tx_queue_len = netdev->tx_queue_len;
+       pch_gbe_start_receive(&adapter->hw);
 
        mod_timer(&adapter->watchdog_timer, jiffies);
 
@@ -1734,6 +1784,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
 void pch_gbe_down(struct pch_gbe_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
+       struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
 
        /* signal that we're down so the interrupt handler does not
         * reschedule our watchdog timer */
@@ -1752,6 +1803,12 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
        pch_gbe_reset(adapter);
        pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
        pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
+
+       pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
+                           rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
+       rx_ring->rx_buff_pool_logic = 0;
+       rx_ring->rx_buff_pool_size = 0;
+       rx_ring->rx_buff_pool = NULL;
 }
 
 /**
@@ -2004,6 +2061,8 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
        int max_frame;
+       unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
+       int err;
 
        max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
        if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
@@ -2018,14 +2077,24 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
        else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
                adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
        else
-               adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE;
-       netdev->mtu = new_mtu;
-       adapter->hw.mac.max_frame_size = max_frame;
+               adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
 
-       if (netif_running(netdev))
-               pch_gbe_reinit_locked(adapter);
-       else
+       if (netif_running(netdev)) {
+               pch_gbe_down(adapter);
+               err = pch_gbe_up(adapter);
+               if (err) {
+                       adapter->rx_buffer_len = old_rx_buffer_len;
+                       pch_gbe_up(adapter);
+                       return -ENOMEM;
+               } else {
+                       netdev->mtu = new_mtu;
+                       adapter->hw.mac.max_frame_size = max_frame;
+               }
+       } else {
                pch_gbe_reset(adapter);
+               netdev->mtu = new_mtu;
+               adapter->hw.mac.max_frame_size = max_frame;
+       }
 
        pr_debug("max_frame : %d  rx_buffer_len : %d  mtu : %d  max_frame_size : %d\n",
                 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
@@ -2103,6 +2172,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
        int work_done = 0;
        bool poll_end_flag = false;
        bool cleaned = false;
+       u32 int_en;
 
        pr_debug("budget : %d\n", budget);
 
@@ -2110,8 +2180,15 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
        if (!netif_carrier_ok(netdev)) {
                poll_end_flag = true;
        } else {
-               cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
                pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
+               if (adapter->rx_stop_flag) {
+                       adapter->rx_stop_flag = false;
+                       pch_gbe_start_receive(&adapter->hw);
+                       int_en = ioread32(&adapter->hw.reg->INT_EN);
+                       iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
+                                       &adapter->hw.reg->INT_EN);
+               }
+               cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
 
                if (cleaned)
                        work_done = budget;
@@ -2452,6 +2529,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
         .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
         .class_mask = (0xFFFF00)
         },
+       {.vendor = PCI_VENDOR_ID_ROHM,
+        .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
+        .subvendor = PCI_ANY_ID,
+        .subdevice = PCI_ANY_ID,
+        .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+        .class_mask = (0xFFFF00)
+        },
        /* required last entry */
        {0}
 };
index 10e5d98..edfa15d 100644 (file)
@@ -1465,7 +1465,12 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
                        continue;
                }
 
-               mtu = pch->chan->mtu - hdrlen;
+               /*
+                * hdrlen includes the 2-byte PPP protocol field, but the
+                * MTU counts only the payload excluding the protocol field.
+                * (RFC1661 Section 2)
+                */
+               mtu = pch->chan->mtu - (hdrlen - 2);
                if (mtu < 4)
                        mtu = 4;
                if (flen > mtu)
index 1a3033d..d17d062 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/clk.h>
 #include <linux/phy.h>
 #include <linux/io.h>
+#include <linux/interrupt.h>
 #include <linux/types.h>
 #include <asm/pgtable.h>
 #include <asm/system.h>
index 02339b3..c236670 100644 (file)
@@ -407,6 +407,7 @@ enum rtl_register_content {
        RxOK            = 0x0001,
 
        /* RxStatusDesc */
+       RxBOVF  = (1 << 24),
        RxFOVF  = (1 << 23),
        RxRWT   = (1 << 22),
        RxRES   = (1 << 21),
@@ -682,6 +683,7 @@ struct rtl8169_private {
        struct mii_if_info mii;
        struct rtl8169_counters counters;
        u32 saved_wolopts;
+       u32 opts1_mask;
 
        struct rtl_fw {
                const struct firmware *fw;
@@ -710,6 +712,7 @@ MODULE_FIRMWARE(FIRMWARE_8168D_1);
 MODULE_FIRMWARE(FIRMWARE_8168D_2);
 MODULE_FIRMWARE(FIRMWARE_8168E_1);
 MODULE_FIRMWARE(FIRMWARE_8168E_2);
+MODULE_FIRMWARE(FIRMWARE_8168E_3);
 MODULE_FIRMWARE(FIRMWARE_8105E_1);
 
 static int rtl8169_open(struct net_device *dev);
@@ -3077,6 +3080,14 @@ static void rtl8169_phy_reset(struct net_device *dev,
        netif_err(tp, link, dev, "PHY reset failed\n");
 }
 
+static bool rtl_tbi_enabled(struct rtl8169_private *tp)
+{
+       void __iomem *ioaddr = tp->mmio_addr;
+
+       return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
+           (RTL_R8(PHYstatus) & TBI_Enable);
+}
+
 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
 {
        void __iomem *ioaddr = tp->mmio_addr;
@@ -3109,7 +3120,7 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
                           ADVERTISED_1000baseT_Half |
                           ADVERTISED_1000baseT_Full : 0));
 
-       if (RTL_R8(PHYstatus) & TBI_Enable)
+       if (rtl_tbi_enabled(tp))
                netif_info(tp, link, dev, "TBI auto-negotiating\n");
 }
 
@@ -3319,9 +3330,16 @@ static void r810x_phy_power_up(struct rtl8169_private *tp)
 
 static void r810x_pll_power_down(struct rtl8169_private *tp)
 {
+       void __iomem *ioaddr = tp->mmio_addr;
+
        if (__rtl8169_get_wol(tp) & WAKE_ANY) {
                rtl_writephy(tp, 0x1f, 0x0000);
                rtl_writephy(tp, MII_BMCR, 0x0000);
+
+               if (tp->mac_version == RTL_GIGA_MAC_VER_29 ||
+                   tp->mac_version == RTL_GIGA_MAC_VER_30)
+                       RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
+                               AcceptMulticast | AcceptMyPhys);
                return;
        }
 
@@ -3417,7 +3435,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
                rtl_writephy(tp, MII_BMCR, 0x0000);
 
                if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
-                   tp->mac_version == RTL_GIGA_MAC_VER_33)
+                   tp->mac_version == RTL_GIGA_MAC_VER_33 ||
+                   tp->mac_version == RTL_GIGA_MAC_VER_34)
                        RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
                                AcceptMulticast | AcceptMyPhys);
                return;
@@ -3727,8 +3746,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
        RTL_W8(Cfg9346, Cfg9346_Lock);
 
-       if ((tp->mac_version <= RTL_GIGA_MAC_VER_06) &&
-           (RTL_R8(PHYstatus) & TBI_Enable)) {
+       if (rtl_tbi_enabled(tp)) {
                tp->set_speed = rtl8169_set_speed_tbi;
                tp->get_settings = rtl8169_gset_tbi;
                tp->phy_reset_enable = rtl8169_tbi_reset_enable;
@@ -3777,6 +3795,9 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        tp->intr_event = cfg->intr_event;
        tp->napi_event = cfg->napi_event;
 
+       tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
+               ~(RxBOVF | RxFOVF) : ~0;
+
        init_timer(&tp->timer);
        tp->timer.data = (unsigned long) dev;
        tp->timer.function = rtl8169_phy_timer;
@@ -3988,6 +4009,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
                while (RTL_R8(TxPoll) & NPQ)
                        udelay(20);
        } else if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+               RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
                while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
                        udelay(100);
        } else {
@@ -5314,7 +5336,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
                u32 status;
 
                rmb();
-               status = le32_to_cpu(desc->opts1);
+               status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
 
                if (status & DescOwn)
                        break;
index faca764..b59abc7 100644 (file)
@@ -1050,7 +1050,6 @@ static int efx_init_io(struct efx_nic *efx)
 {
        struct pci_dev *pci_dev = efx->pci_dev;
        dma_addr_t dma_mask = efx->type->max_dma_mask;
-       bool use_wc;
        int rc;
 
        netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
@@ -1101,21 +1100,8 @@ static int efx_init_io(struct efx_nic *efx)
                rc = -EIO;
                goto fail3;
        }
-
-       /* bug22643: If SR-IOV is enabled then tx push over a write combined
-        * mapping is unsafe. We need to disable write combining in this case.
-        * MSI is unsupported when SR-IOV is enabled, and the firmware will
-        * have removed the MSI capability. So write combining is safe if
-        * there is an MSI capability.
-        */
-       use_wc = (!EFX_WORKAROUND_22643(efx) ||
-                 pci_find_capability(pci_dev, PCI_CAP_ID_MSI));
-       if (use_wc)
-               efx->membase = ioremap_wc(efx->membase_phys,
-                                         efx->type->mem_map_size);
-       else
-               efx->membase = ioremap_nocache(efx->membase_phys,
-                                              efx->type->mem_map_size);
+       efx->membase = ioremap_nocache(efx->membase_phys,
+                                      efx->type->mem_map_size);
        if (!efx->membase) {
                netif_err(efx, probe, efx->net_dev,
                          "could not map memory BAR at %llx+%x\n",
index cc97880..751d1ec 100644 (file)
@@ -103,7 +103,6 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
        _efx_writed(efx, value->u32[2], reg + 8);
        _efx_writed(efx, value->u32[3], reg + 12);
 #endif
-       wmb();
        mmiowb();
        spin_unlock_irqrestore(&efx->biu_lock, flags);
 }
@@ -126,7 +125,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
        __raw_writel((__force u32)value->u32[0], membase + addr);
        __raw_writel((__force u32)value->u32[1], membase + addr + 4);
 #endif
-       wmb();
        mmiowb();
        spin_unlock_irqrestore(&efx->biu_lock, flags);
 }
@@ -141,7 +139,6 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
 
        /* No lock required */
        _efx_writed(efx, value->u32[0], reg);
-       wmb();
 }
 
 /* Read a 128-bit CSR, locking as appropriate. */
@@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
 
        spin_lock_irqsave(&efx->biu_lock, flags);
        value->u32[0] = _efx_readd(efx, reg + 0);
-       rmb();
        value->u32[1] = _efx_readd(efx, reg + 4);
        value->u32[2] = _efx_readd(efx, reg + 8);
        value->u32[3] = _efx_readd(efx, reg + 12);
@@ -175,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
        value->u64[0] = (__force __le64)__raw_readq(membase + addr);
 #else
        value->u32[0] = (__force __le32)__raw_readl(membase + addr);
-       rmb();
        value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
 #endif
        spin_unlock_irqrestore(&efx->biu_lock, flags);
@@ -249,7 +244,6 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
        _efx_writed(efx, value->u32[2], reg + 8);
        _efx_writed(efx, value->u32[3], reg + 12);
 #endif
-       wmb();
 }
 #define efx_writeo_page(efx, value, reg, page)                         \
        _efx_writeo_page(efx, value,                                    \
index 3dd45ed..81a4253 100644 (file)
@@ -50,20 +50,6 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
        return &nic_data->mcdi;
 }
 
-static inline void
-efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg)
-{
-       struct siena_nic_data *nic_data = efx->nic_data;
-       value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg);
-}
-
-static inline void
-efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg)
-{
-       struct siena_nic_data *nic_data = efx->nic_data;
-       __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg);
-}
-
 void efx_mcdi_init(struct efx_nic *efx)
 {
        struct efx_mcdi_iface *mcdi;
@@ -84,8 +70,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
                            const u8 *inbuf, size_t inlen)
 {
        struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
-       unsigned pdu = MCDI_PDU(efx);
-       unsigned doorbell = MCDI_DOORBELL(efx);
+       unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+       unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
        unsigned int i;
        efx_dword_t hdr;
        u32 xflags, seqno;
@@ -106,28 +92,29 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
                             MCDI_HEADER_SEQ, seqno,
                             MCDI_HEADER_XFLAGS, xflags);
 
-       efx_mcdi_writed(efx, &hdr, pdu);
+       efx_writed(efx, &hdr, pdu);
 
        for (i = 0; i < inlen; i += 4)
-               efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i),
-                               pdu + 4 + i);
+               _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
+
+       /* Ensure the payload is written out before the header */
+       wmb();
 
        /* ring the doorbell with a distinctive value */
-       EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc);
-       efx_mcdi_writed(efx, &hdr, doorbell);
+       _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
 }
 
 static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
 {
        struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
-       unsigned int pdu = MCDI_PDU(efx);
+       unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
        int i;
 
        BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
        BUG_ON(outlen & 3 || outlen >= 0x100);
 
        for (i = 0; i < outlen; i += 4)
-               efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i);
+               *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
 }
 
 static int efx_mcdi_poll(struct efx_nic *efx)
@@ -135,7 +122,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
        struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
        unsigned int time, finish;
        unsigned int respseq, respcmd, error;
-       unsigned int pdu = MCDI_PDU(efx);
+       unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
        unsigned int rc, spins;
        efx_dword_t reg;
 
@@ -161,7 +148,8 @@ static int efx_mcdi_poll(struct efx_nic *efx)
 
                time = get_seconds();
 
-               efx_mcdi_readd(efx, &reg, pdu);
+               rmb();
+               efx_readd(efx, &reg, pdu);
 
                /* All 1's indicates that shared memory is in reset (and is
                 * not a valid header). Wait for it to come out reset before
@@ -188,7 +176,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
                          respseq, mcdi->seqno);
                rc = EIO;
        } else if (error) {
-               efx_mcdi_readd(efx, &reg, pdu + 4);
+               efx_readd(efx, &reg, pdu + 4);
                switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
 #define TRANSLATE_ERROR(name)                                  \
                case MC_CMD_ERR_ ## name:                       \
@@ -222,21 +210,21 @@ out:
 /* Test and clear MC-rebooted flag for this port/function */
 int efx_mcdi_poll_reboot(struct efx_nic *efx)
 {
-       unsigned int addr = MCDI_REBOOT_FLAG(efx);
+       unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
        efx_dword_t reg;
        uint32_t value;
 
        if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
                return false;
 
-       efx_mcdi_readd(efx, &reg, addr);
+       efx_readd(efx, &reg, addr);
        value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
 
        if (value == 0)
                return 0;
 
        EFX_ZERO_DWORD(reg);
-       efx_mcdi_writed(efx, &reg, addr);
+       efx_writed(efx, &reg, addr);
 
        if (value == MC_STATUS_DWORD_ASSERT)
                return -EINTR;
index bafa23a..3edfbaf 100644 (file)
@@ -1936,13 +1936,6 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
 
                size = min_t(size_t, table->step, 16);
 
-               if (table->offset >= efx->type->mem_map_size) {
-                       /* No longer mapped; return dummy data */
-                       memcpy(buf, "\xde\xc0\xad\xde", 4);
-                       buf += table->rows * size;
-                       continue;
-               }
-
                for (i = 0; i < table->rows; i++) {
                        switch (table->step) {
                        case 4: /* 32-bit register or SRAM */
index 4bd1f28..7443f99 100644 (file)
@@ -143,12 +143,10 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
 /**
  * struct siena_nic_data - Siena NIC state
  * @mcdi: Management-Controller-to-Driver Interface
- * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
  * @wol_filter_id: Wake-on-LAN packet filter id
  */
 struct siena_nic_data {
        struct efx_mcdi_iface mcdi;
-       void __iomem *mcdi_smem;
        int wol_filter_id;
 };
 
index 5735e84..2c3bd93 100644 (file)
@@ -250,26 +250,12 @@ static int siena_probe_nic(struct efx_nic *efx)
        efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
        efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
 
-       /* Initialise MCDI */
-       nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
-                                             FR_CZ_MC_TREG_SMEM,
-                                             FR_CZ_MC_TREG_SMEM_STEP *
-                                             FR_CZ_MC_TREG_SMEM_ROWS);
-       if (!nic_data->mcdi_smem) {
-               netif_err(efx, probe, efx->net_dev,
-                         "could not map MCDI at %llx+%x\n",
-                         (unsigned long long)efx->membase_phys +
-                         FR_CZ_MC_TREG_SMEM,
-                         FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
-               rc = -ENOMEM;
-               goto fail1;
-       }
        efx_mcdi_init(efx);
 
        /* Recover from a failed assertion before probing */
        rc = efx_mcdi_handle_assertion(efx);
        if (rc)
-               goto fail2;
+               goto fail1;
 
        /* Let the BMC know that the driver is now in charge of link and
         * filter settings. We must do this before we reset the NIC */
@@ -324,7 +310,6 @@ fail4:
 fail3:
        efx_mcdi_drv_attach(efx, false, NULL);
 fail2:
-       iounmap(nic_data->mcdi_smem);
 fail1:
        kfree(efx->nic_data);
        return rc;
@@ -404,8 +389,6 @@ static int siena_init_nic(struct efx_nic *efx)
 
 static void siena_remove_nic(struct efx_nic *efx)
 {
-       struct siena_nic_data *nic_data = efx->nic_data;
-
        efx_nic_free_buffer(efx, &efx->irq_status);
 
        siena_reset_hw(efx, RESET_TYPE_ALL);
@@ -415,8 +398,7 @@ static void siena_remove_nic(struct efx_nic *efx)
                efx_mcdi_drv_attach(efx, false, NULL);
 
        /* Tear down the private nic state */
-       iounmap(nic_data->mcdi_smem);
-       kfree(nic_data);
+       kfree(efx->nic_data);
        efx->nic_data = NULL;
 }
 
@@ -656,7 +638,8 @@ const struct efx_nic_type siena_a0_nic_type = {
        .default_mac_ops = &efx_mcdi_mac_operations,
 
        .revision = EFX_REV_SIENA_A0,
-       .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */
+       .mem_map_size = (FR_CZ_MC_TREG_SMEM +
+                        FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
        .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
        .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
        .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
index 99ff114..e4dd3a7 100644 (file)
@@ -38,8 +38,6 @@
 #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
 /* Legacy interrupt storm when interrupt fifo fills */
 #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
-/* Write combining and sriov=enabled are incompatible */
-#define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA
 
 /* Spurious parity errors in TSORT buffers */
 #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
index dc3fbf6..4a1374d 100644 (file)
@@ -6234,12 +6234,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                }
        }
 
-#ifdef BCM_KERNEL_SUPPORTS_8021Q
        if (vlan_tx_tag_present(skb)) {
                base_flags |= TXD_FLAG_VLAN;
                vlan = vlan_tx_tag_get(skb);
        }
-#endif
 
        if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
            !mss && skb->len > VLAN_ETH_FRAME_LEN)
index 15772b1..13c1f04 100644 (file)
@@ -59,6 +59,7 @@
 #define USB_PRODUCT_IPHONE_3G   0x1292
 #define USB_PRODUCT_IPHONE_3GS  0x1294
 #define USB_PRODUCT_IPHONE_4   0x1297
+#define USB_PRODUCT_IPHONE_4_VZW 0x129c
 
 #define IPHETH_USBINTF_CLASS    255
 #define IPHETH_USBINTF_SUBCLASS 253
@@ -98,6 +99,10 @@ static struct usb_device_id ipheth_table[] = {
                USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
                IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
                IPHETH_USBINTF_PROTO) },
+       { USB_DEVICE_AND_INTERFACE_INFO(
+               USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
+               IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+               IPHETH_USBINTF_PROTO) },
        { }
 };
 MODULE_DEVICE_TABLE(usb, ipheth_table);
index 2d4c091..2d394af 100644 (file)
@@ -41,7 +41,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
        case ADC_DC_CAL:
                /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
                if (!IS_CHAN_B(chan) &&
-                   !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan)))
+                   !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
+                     IS_CHAN_HT20(chan)))
                        supported = true;
                break;
        }
index 1baca8e..fcafec0 100644 (file)
@@ -671,7 +671,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
                REG_WRITE_ARRAY(&ah->iniModesAdditional,
                                modesIndex, regWrites);
 
-       if (AR_SREV_9300(ah))
+       if (AR_SREV_9330(ah))
                REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites);
 
        if (AR_SREV_9340(ah) && !ah->is_clk_25mhz)
index 6530694..722967b 100644 (file)
@@ -2303,6 +2303,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
        mutex_lock(&sc->mutex);
        cancel_delayed_work_sync(&sc->tx_complete_work);
 
+       if (ah->ah_flags & AH_UNPLUGGED) {
+               ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n");
+               mutex_unlock(&sc->mutex);
+               return;
+       }
+
        if (sc->sc_flags & SC_OP_INVALID) {
                ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
                mutex_unlock(&sc->mutex);
index 26f1ab8..e293a79 100644 (file)
@@ -1632,7 +1632,8 @@ static void handle_irq_beacon(struct b43_wldev *dev)
        u32 cmd, beacon0_valid, beacon1_valid;
 
        if (!b43_is_mode(wl, NL80211_IFTYPE_AP) &&
-           !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT))
+           !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) &&
+           !b43_is_mode(wl, NL80211_IFTYPE_ADHOC))
                return;
 
        /* This is the bottom half of the asynchronous beacon update. */
index 3774dd0..ef9ad79 100644 (file)
@@ -1901,17 +1901,19 @@ static void ipw2100_down(struct ipw2100_priv *priv)
 
 /* Called by register_netdev() */
 static int ipw2100_net_init(struct net_device *dev)
+{
+       struct ipw2100_priv *priv = libipw_priv(dev);
+
+       return ipw2100_up(priv, 1);
+}
+
+static int ipw2100_wdev_init(struct net_device *dev)
 {
        struct ipw2100_priv *priv = libipw_priv(dev);
        const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
        struct wireless_dev *wdev = &priv->ieee->wdev;
-       int ret;
        int i;
 
-       ret = ipw2100_up(priv, 1);
-       if (ret)
-               return ret;
-
        memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
 
        /* fill-out priv->ieee->bg_band */
@@ -6350,9 +6352,13 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
                       "Error calling register_netdev.\n");
                goto fail;
        }
+       registered = 1;
+
+       err = ipw2100_wdev_init(dev);
+       if (err)
+               goto fail;
 
        mutex_lock(&priv->action_mutex);
-       registered = 1;
 
        IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev));
 
@@ -6389,7 +6395,8 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
 
       fail_unlock:
        mutex_unlock(&priv->action_mutex);
-
+       wiphy_unregister(priv->ieee->wdev.wiphy);
+       kfree(priv->ieee->bg_band.channels);
       fail:
        if (dev) {
                if (registered)
index 87813c3..4ffebed 100644 (file)
@@ -11424,17 +11424,24 @@ static void ipw_bg_down(struct work_struct *work)
 
 /* Called by register_netdev() */
 static int ipw_net_init(struct net_device *dev)
+{
+       int rc = 0;
+       struct ipw_priv *priv = libipw_priv(dev);
+
+       mutex_lock(&priv->mutex);
+       if (ipw_up(priv))
+               rc = -EIO;
+       mutex_unlock(&priv->mutex);
+
+       return rc;
+}
+
+static int ipw_wdev_init(struct net_device *dev)
 {
        int i, rc = 0;
        struct ipw_priv *priv = libipw_priv(dev);
        const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
        struct wireless_dev *wdev = &priv->ieee->wdev;
-       mutex_lock(&priv->mutex);
-
-       if (ipw_up(priv)) {
-               rc = -EIO;
-               goto out;
-       }
 
        memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
 
@@ -11519,13 +11526,9 @@ static int ipw_net_init(struct net_device *dev)
        set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
 
        /* With that information in place, we can now register the wiphy... */
-       if (wiphy_register(wdev->wiphy)) {
+       if (wiphy_register(wdev->wiphy))
                rc = -EIO;
-               goto out;
-       }
-
 out:
-       mutex_unlock(&priv->mutex);
        return rc;
 }
 
@@ -11832,14 +11835,22 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
                goto out_remove_sysfs;
        }
 
+       err = ipw_wdev_init(net_dev);
+       if (err) {
+               IPW_ERROR("failed to register wireless device\n");
+               goto out_unregister_netdev;
+       }
+
 #ifdef CONFIG_IPW2200_PROMISCUOUS
        if (rtap_iface) {
                err = ipw_prom_alloc(priv);
                if (err) {
                        IPW_ERROR("Failed to register promiscuous network "
                                  "device (error %d).\n", err);
-                       unregister_netdev(priv->net_dev);
-                       goto out_remove_sysfs;
+                       wiphy_unregister(priv->ieee->wdev.wiphy);
+                       kfree(priv->ieee->a_band.channels);
+                       kfree(priv->ieee->bg_band.channels);
+                       goto out_unregister_netdev;
                }
        }
 #endif
@@ -11851,6 +11862,8 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
 
        return 0;
 
+      out_unregister_netdev:
+       unregister_netdev(priv->net_dev);
       out_remove_sysfs:
        sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
       out_release_irq:
index 977bd24..164bcae 100644 (file)
@@ -822,12 +822,15 @@ static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
 
  out:
 
-       rs_sta->last_txrate_idx = index;
-       if (sband->band == IEEE80211_BAND_5GHZ)
-               info->control.rates[0].idx = rs_sta->last_txrate_idx -
-                               IWL_FIRST_OFDM_RATE;
-       else
+       if (sband->band == IEEE80211_BAND_5GHZ) {
+               if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
+                       index = IWL_FIRST_OFDM_RATE;
+               rs_sta->last_txrate_idx = index;
+               info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
+       } else {
+               rs_sta->last_txrate_idx = index;
                info->control.rates[0].idx = rs_sta->last_txrate_idx;
+       }
 
        IWL_DEBUG_RATE(priv, "leave: %d\n", index);
 }
index a895a09..5621100 100644 (file)
@@ -167,7 +167,7 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
 
        memset(&cmd, 0, sizeof(cmd));
        iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
-       memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib));
+       memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib));
        if (!(cmd.radio_sensor_offset))
                cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
 
index b0ae4de..f9c3cd9 100644 (file)
@@ -2140,7 +2140,12 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
                    IEEE80211_HW_SPECTRUM_MGMT |
                    IEEE80211_HW_REPORTS_TX_ACK_STATUS;
 
+       /*
+        * Including the following line will crash some AP's.  This
+        * workaround removes the stimulus which causes the crash until
+        * the AP software can be fixed.
        hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+        */
 
        hw->flags |= IEEE80211_HW_SUPPORTS_PS |
                     IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
index a6b2b1d..222d410 100644 (file)
@@ -771,6 +771,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
        cmd = txq->cmd[cmd_index];
        meta = &txq->meta[cmd_index];
 
+       txq->time_stamp = jiffies;
+
        iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
 
        /* Input error checking is done when commands are added to queue. */
index ef67f67..0019dfd 100644 (file)
@@ -3697,14 +3697,15 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
        rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg);
 
        /* Apparently the data is read from end to start */
-       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3,
-                                       (u32 *)&rt2x00dev->eeprom[i]);
-       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2,
-                                       (u32 *)&rt2x00dev->eeprom[i + 2]);
-       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1,
-                                       (u32 *)&rt2x00dev->eeprom[i + 4]);
-       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0,
-                                       (u32 *)&rt2x00dev->eeprom[i + 6]);
+       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
+       /* The returned value is in CPU order, but eeprom is le */
+       rt2x00dev->eeprom[i] = cpu_to_le32(reg);
+       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
+       *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
+       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
+       *(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
+       rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, &reg);
+       *(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
 
        mutex_unlock(&rt2x00dev->csr_mutex);
 }
@@ -3870,19 +3871,23 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
                return -ENODEV;
        }
 
-       if (!rt2x00_rf(rt2x00dev, RF2820) &&
-           !rt2x00_rf(rt2x00dev, RF2850) &&
-           !rt2x00_rf(rt2x00dev, RF2720) &&
-           !rt2x00_rf(rt2x00dev, RF2750) &&
-           !rt2x00_rf(rt2x00dev, RF3020) &&
-           !rt2x00_rf(rt2x00dev, RF2020) &&
-           !rt2x00_rf(rt2x00dev, RF3021) &&
-           !rt2x00_rf(rt2x00dev, RF3022) &&
-           !rt2x00_rf(rt2x00dev, RF3052) &&
-           !rt2x00_rf(rt2x00dev, RF3320) &&
-           !rt2x00_rf(rt2x00dev, RF5370) &&
-           !rt2x00_rf(rt2x00dev, RF5390)) {
-               ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
+       switch (rt2x00dev->chip.rf) {
+       case RF2820:
+       case RF2850:
+       case RF2720:
+       case RF2750:
+       case RF3020:
+       case RF2020:
+       case RF3021:
+       case RF3022:
+       case RF3052:
+       case RF3320:
+       case RF5370:
+       case RF5390:
+               break;
+       default:
+               ERROR(rt2x00dev, "Invalid RF chipset 0x%x detected.\n",
+                     rt2x00dev->chip.rf);
                return -ENODEV;
        }
 
index 1bdc1aa..04c4e9e 100644 (file)
@@ -610,6 +610,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
 
                        mac->link_state = MAC80211_NOLINK;
                        memset(mac->bssid, 0, 6);
+
+                       /* reset sec info */
+                       rtl_cam_reset_sec_info(hw);
+
+                       rtl_cam_reset_all_entry(hw);
                        mac->vendor = PEER_UNKNOWN;
 
                        RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
@@ -1063,6 +1068,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                 *or clear all entry here.
                 */
                rtl_cam_delete_one_entry(hw, mac_addr, key_idx);
+
+               rtl_cam_reset_sec_info(hw);
+
                break;
        default:
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
index 906e7aa..3e52a54 100644 (file)
@@ -549,15 +549,16 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
                               (tcb_desc->rts_use_shortpreamble ? 1 : 0)
                               : (tcb_desc->rts_use_shortgi ? 1 : 0)));
        if (mac->bw_40) {
-               if (tcb_desc->packet_bw) {
+               if (rate_flag & IEEE80211_TX_RC_DUP_DATA) {
                        SET_TX_DESC_DATA_BW(txdesc, 1);
                        SET_TX_DESC_DATA_SC(txdesc, 3);
+               } else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){
+                       SET_TX_DESC_DATA_BW(txdesc, 1);
+                       SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc);
                } else {
                        SET_TX_DESC_DATA_BW(txdesc, 0);
-                               if (rate_flag & IEEE80211_TX_RC_DUP_DATA)
-                                       SET_TX_DESC_DATA_SC(txdesc,
-                                                         mac->cur_40_prime_sc);
-                       }
+                       SET_TX_DESC_DATA_SC(txdesc, 0);
+               }
        } else {
                SET_TX_DESC_DATA_BW(txdesc, 0);
                SET_TX_DESC_DATA_SC(txdesc, 0);
index 8d9dae8..3878b73 100644 (file)
@@ -837,6 +837,7 @@ config SCSI_ISCI
        # (temporary): known alpha quality driver
        depends on EXPERIMENTAL
        select SCSI_SAS_LIBSAS
+       select SCSI_SAS_HOST_SMP
        ---help---
          This driver supports the 6Gb/s SAS capabilities of the storage
          control unit found in the Intel(R) C600 series chipset.
index 646fc52..8a7591f 100644 (file)
@@ -1507,8 +1507,8 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
 
                        if (k != blocks_done) {
                                qla_printk(KERN_WARNING, sp->fcport->vha->hw,
-                                   "unexpected tag values tag:lba=%x:%lx)\n",
-                                   e_ref_tag, lba_s);
+                                   "unexpected tag values tag:lba=%x:%llx)\n",
+                                   e_ref_tag, (unsigned long long)lba_s);
                                return 1;
                        }
 
index d240755..24cacff 100644 (file)
@@ -825,6 +825,9 @@ static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
 {
        struct device *dev = mspi->dev;
 
+       if (!(mspi->flags & SPI_CPM_MODE))
+               return;
+
        dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
        dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
        cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
index 8ac6542..fa594d6 100644 (file)
@@ -786,9 +786,11 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
                int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
                if (cs_gpio < 0)
                        cs_gpio = mxc_platform_info->chipselect[i];
+
+               spi_imx->chipselect[i] = cs_gpio;
                if (cs_gpio < 0)
                        continue;
-               spi_imx->chipselect[i] = cs_gpio;
+
                ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
                if (ret) {
                        while (i > 0) {
index 6859af0..7611def 100644 (file)
@@ -241,8 +241,10 @@ static int labpc_eeprom_write_insn(struct comedi_device *dev,
                                   struct comedi_insn *insn,
                                   unsigned int *data);
 static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd);
-#ifdef CONFIG_COMEDI_PCI
+#ifdef CONFIG_ISA_DMA_API
 static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd);
+#endif
+#ifdef CONFIG_COMEDI_PCI
 static int labpc_find_device(struct comedi_device *dev, int bus, int slot);
 #endif
 static int labpc_dio_mem_callback(int dir, int port, int data,
index a3f5162..462fbc2 100644 (file)
@@ -1242,7 +1242,7 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
        int ret = 0;
 
        BUG_ON(!is_ephemeral(pool));
-       zbud_decompress(virt_to_page(data), pampd);
+       zbud_decompress((struct page *)(data), pampd);
        zbud_free_and_delist((struct zbud_hdr *)pampd);
        atomic_dec(&zcache_curr_eph_pampd_count);
        return ret;
index 497b2e7..5b77316 100644 (file)
@@ -1430,7 +1430,7 @@ static int iscsi_enforce_integrity_rules(
        u8 DataSequenceInOrder = 0;
        u8 ErrorRecoveryLevel = 0, SessionType = 0;
        u8 IFMarker = 0, OFMarker = 0;
-       u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0;
+       u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1;
        u32 FirstBurstLength = 0, MaxBurstLength = 0;
        struct iscsi_param *param = NULL;
 
index a0d23bc..f00137f 100644 (file)
@@ -874,40 +874,6 @@ void iscsit_inc_session_usage_count(struct iscsi_session *sess)
        spin_unlock_bh(&sess->session_usage_lock);
 }
 
-/*
- *     Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker
- *     array counts needed for sync and steering.
- */
-static int iscsit_determine_sync_and_steering_counts(
-       struct iscsi_conn *conn,
-       struct iscsi_data_count *count)
-{
-       u32 length = count->data_length;
-       u32 marker, markint;
-
-       count->sync_and_steering = 1;
-
-       marker = (count->type == ISCSI_RX_DATA) ?
-                       conn->of_marker : conn->if_marker;
-       markint = (count->type == ISCSI_RX_DATA) ?
-                       (conn->conn_ops->OFMarkInt * 4) :
-                       (conn->conn_ops->IFMarkInt * 4);
-       count->ss_iov_count = count->iov_count;
-
-       while (length > 0) {
-               if (length >= marker) {
-                       count->ss_iov_count += 3;
-                       count->ss_marker_count += 2;
-
-                       length -= marker;
-                       marker = markint;
-               } else
-                       length = 0;
-       }
-
-       return 0;
-}
-
 /*
  *     Setup conn->if_marker and conn->of_marker values based upon
  *     the initial marker-less interval. (see iSCSI v19 A.2)
@@ -1290,7 +1256,7 @@ int iscsit_fe_sendpage_sg(
        struct kvec iov;
        u32 tx_hdr_size, data_len;
        u32 offset = cmd->first_data_sg_off;
-       int tx_sent;
+       int tx_sent, iov_off;
 
 send_hdr:
        tx_hdr_size = ISCSI_HDR_LEN;
@@ -1310,9 +1276,19 @@ send_hdr:
        }
 
        data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
-       if (conn->conn_ops->DataDigest)
+       /*
+        * Set iov_off used by padding and data digest tx_data() calls below
+        * in order to determine proper offset into cmd->iov_data[]
+        */
+       if (conn->conn_ops->DataDigest) {
                data_len -= ISCSI_CRC_LEN;
-
+               if (cmd->padding)
+                       iov_off = (cmd->iov_data_count - 2);
+               else
+                       iov_off = (cmd->iov_data_count - 1);
+       } else {
+               iov_off = (cmd->iov_data_count - 1);
+       }
        /*
         * Perform sendpage() for each page in the scatterlist
         */
@@ -1341,8 +1317,7 @@ send_pg:
 
 send_padding:
        if (cmd->padding) {
-               struct kvec *iov_p =
-                       &cmd->iov_data[cmd->iov_data_count-1];
+               struct kvec *iov_p = &cmd->iov_data[iov_off++];
 
                tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
                if (cmd->padding != tx_sent) {
@@ -1356,8 +1331,7 @@ send_padding:
 
 send_datacrc:
        if (conn->conn_ops->DataDigest) {
-               struct kvec *iov_d =
-                       &cmd->iov_data[cmd->iov_data_count];
+               struct kvec *iov_d = &cmd->iov_data[iov_off];
 
                tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
                if (ISCSI_CRC_LEN != tx_sent) {
@@ -1431,8 +1405,7 @@ static int iscsit_do_rx_data(
        struct iscsi_data_count *count)
 {
        int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
-       u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0;
-       struct kvec iov[count->ss_iov_count], *iov_p;
+       struct kvec *iov_p;
        struct msghdr msg;
 
        if (!conn || !conn->sock || !conn->conn_ops)
@@ -1440,93 +1413,8 @@ static int iscsit_do_rx_data(
 
        memset(&msg, 0, sizeof(struct msghdr));
 
-       if (count->sync_and_steering) {
-               int size = 0;
-               u32 i, orig_iov_count = 0;
-               u32 orig_iov_len = 0, orig_iov_loc = 0;
-               u32 iov_count = 0, per_iov_bytes = 0;
-               u32 *rx_marker, old_rx_marker = 0;
-               struct kvec *iov_record;
-
-               memset(&rx_marker_val, 0,
-                               count->ss_marker_count * sizeof(u32));
-               memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
-
-               iov_record = count->iov;
-               orig_iov_count = count->iov_count;
-               rx_marker = &conn->of_marker;
-
-               i = 0;
-               size = data;
-               orig_iov_len = iov_record[orig_iov_loc].iov_len;
-               while (size > 0) {
-                       pr_debug("rx_data: #1 orig_iov_len %u,"
-                       " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
-                       pr_debug("rx_data: #2 rx_marker %u, size"
-                               " %u\n", *rx_marker, size);
-
-                       if (orig_iov_len >= *rx_marker) {
-                               iov[iov_count].iov_len = *rx_marker;
-                               iov[iov_count++].iov_base =
-                                       (iov_record[orig_iov_loc].iov_base +
-                                               per_iov_bytes);
-
-                               iov[iov_count].iov_len = (MARKER_SIZE / 2);
-                               iov[iov_count++].iov_base =
-                                       &rx_marker_val[rx_marker_iov++];
-                               iov[iov_count].iov_len = (MARKER_SIZE / 2);
-                               iov[iov_count++].iov_base =
-                                       &rx_marker_val[rx_marker_iov++];
-                               old_rx_marker = *rx_marker;
-
-                               /*
-                                * OFMarkInt is in 32-bit words.
-                                */
-                               *rx_marker = (conn->conn_ops->OFMarkInt * 4);
-                               size -= old_rx_marker;
-                               orig_iov_len -= old_rx_marker;
-                               per_iov_bytes += old_rx_marker;
-
-                               pr_debug("rx_data: #3 new_rx_marker"
-                                       " %u, size %u\n", *rx_marker, size);
-                       } else {
-                               iov[iov_count].iov_len = orig_iov_len;
-                               iov[iov_count++].iov_base =
-                                       (iov_record[orig_iov_loc].iov_base +
-                                               per_iov_bytes);
-
-                               per_iov_bytes = 0;
-                               *rx_marker -= orig_iov_len;
-                               size -= orig_iov_len;
-
-                               if (size)
-                                       orig_iov_len =
-                                       iov_record[++orig_iov_loc].iov_len;
-
-                               pr_debug("rx_data: #4 new_rx_marker"
-                                       " %u, size %u\n", *rx_marker, size);
-                       }
-               }
-               data += (rx_marker_iov * (MARKER_SIZE / 2));
-
-               iov_p   = &iov[0];
-               iov_len = iov_count;
-
-               if (iov_count > count->ss_iov_count) {
-                       pr_err("iov_count: %d, count->ss_iov_count:"
-                               " %d\n", iov_count, count->ss_iov_count);
-                       return -1;
-               }
-               if (rx_marker_iov > count->ss_marker_count) {
-                       pr_err("rx_marker_iov: %d, count->ss_marker"
-                               "_count: %d\n", rx_marker_iov,
-                               count->ss_marker_count);
-                       return -1;
-               }
-       } else {
-               iov_p = count->iov;
-               iov_len = count->iov_count;
-       }
+       iov_p = count->iov;
+       iov_len = count->iov_count;
 
        while (total_rx < data) {
                rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
@@ -1541,16 +1429,6 @@ static int iscsit_do_rx_data(
                                rx_loop, total_rx, data);
        }
 
-       if (count->sync_and_steering) {
-               int j;
-               for (j = 0; j < rx_marker_iov; j++) {
-                       pr_debug("rx_data: #5 j: %d, offset: %d\n",
-                               j, rx_marker_val[j]);
-                       conn->of_marker_offset = rx_marker_val[j];
-               }
-               total_rx -= (rx_marker_iov * (MARKER_SIZE / 2));
-       }
-
        return total_rx;
 }
 
@@ -1559,8 +1437,7 @@ static int iscsit_do_tx_data(
        struct iscsi_data_count *count)
 {
        int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
-       u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0;
-       struct kvec iov[count->ss_iov_count], *iov_p;
+       struct kvec *iov_p;
        struct msghdr msg;
 
        if (!conn || !conn->sock || !conn->conn_ops)
@@ -1573,98 +1450,8 @@ static int iscsit_do_tx_data(
 
        memset(&msg, 0, sizeof(struct msghdr));
 
-       if (count->sync_and_steering) {
-               int size = 0;
-               u32 i, orig_iov_count = 0;
-               u32 orig_iov_len = 0, orig_iov_loc = 0;
-               u32 iov_count = 0, per_iov_bytes = 0;
-               u32 *tx_marker, old_tx_marker = 0;
-               struct kvec *iov_record;
-
-               memset(&tx_marker_val, 0,
-                       count->ss_marker_count * sizeof(u32));
-               memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
-
-               iov_record = count->iov;
-               orig_iov_count = count->iov_count;
-               tx_marker = &conn->if_marker;
-
-               i = 0;
-               size = data;
-               orig_iov_len = iov_record[orig_iov_loc].iov_len;
-               while (size > 0) {
-                       pr_debug("tx_data: #1 orig_iov_len %u,"
-                       " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
-                       pr_debug("tx_data: #2 tx_marker %u, size"
-                               " %u\n", *tx_marker, size);
-
-                       if (orig_iov_len >= *tx_marker) {
-                               iov[iov_count].iov_len = *tx_marker;
-                               iov[iov_count++].iov_base =
-                                       (iov_record[orig_iov_loc].iov_base +
-                                               per_iov_bytes);
-
-                               tx_marker_val[tx_marker_iov] =
-                                               (size - *tx_marker);
-                               iov[iov_count].iov_len = (MARKER_SIZE / 2);
-                               iov[iov_count++].iov_base =
-                                       &tx_marker_val[tx_marker_iov++];
-                               iov[iov_count].iov_len = (MARKER_SIZE / 2);
-                               iov[iov_count++].iov_base =
-                                       &tx_marker_val[tx_marker_iov++];
-                               old_tx_marker = *tx_marker;
-
-                               /*
-                                * IFMarkInt is in 32-bit words.
-                                */
-                               *tx_marker = (conn->conn_ops->IFMarkInt * 4);
-                               size -= old_tx_marker;
-                               orig_iov_len -= old_tx_marker;
-                               per_iov_bytes += old_tx_marker;
-
-                               pr_debug("tx_data: #3 new_tx_marker"
-                                       " %u, size %u\n", *tx_marker, size);
-                               pr_debug("tx_data: #4 offset %u\n",
-                                       tx_marker_val[tx_marker_iov-1]);
-                       } else {
-                               iov[iov_count].iov_len = orig_iov_len;
-                               iov[iov_count++].iov_base
-                                       = (iov_record[orig_iov_loc].iov_base +
-                                               per_iov_bytes);
-
-                               per_iov_bytes = 0;
-                               *tx_marker -= orig_iov_len;
-                               size -= orig_iov_len;
-
-                               if (size)
-                                       orig_iov_len =
-                                       iov_record[++orig_iov_loc].iov_len;
-
-                               pr_debug("tx_data: #5 new_tx_marker"
-                                       " %u, size %u\n", *tx_marker, size);
-                       }
-               }
-
-               data += (tx_marker_iov * (MARKER_SIZE / 2));
-
-               iov_p = &iov[0];
-               iov_len = iov_count;
-
-               if (iov_count > count->ss_iov_count) {
-                       pr_err("iov_count: %d, count->ss_iov_count:"
-                               " %d\n", iov_count, count->ss_iov_count);
-                       return -1;
-               }
-               if (tx_marker_iov > count->ss_marker_count) {
-                       pr_err("tx_marker_iov: %d, count->ss_marker"
-                               "_count: %d\n", tx_marker_iov,
-                               count->ss_marker_count);
-                       return -1;
-               }
-       } else {
-               iov_p = count->iov;
-               iov_len = count->iov_count;
-       }
+       iov_p = count->iov;
+       iov_len = count->iov_count;
 
        while (total_tx < data) {
                tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
@@ -1679,9 +1466,6 @@ static int iscsit_do_tx_data(
                                        tx_loop, total_tx, data);
        }
 
-       if (count->sync_and_steering)
-               total_tx -= (tx_marker_iov * (MARKER_SIZE / 2));
-
        return total_tx;
 }
 
@@ -1702,12 +1486,6 @@ int rx_data(
        c.data_length = data;
        c.type = ISCSI_RX_DATA;
 
-       if (conn->conn_ops->OFMarker &&
-          (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
-               if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
-                       return -1;
-       }
-
        return iscsit_do_rx_data(conn, &c);
 }
 
@@ -1728,12 +1506,6 @@ int tx_data(
        c.data_length = data;
        c.type = ISCSI_TX_DATA;
 
-       if (conn->conn_ops->IFMarker &&
-          (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
-               if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
-                       return -1;
-       }
-
        return iscsit_do_tx_data(conn, &c);
 }
 
index 89ae923..f04d4ef 100644 (file)
@@ -24,6 +24,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/ctype.h>
 #include <asm/unaligned.h>
 #include <scsi/scsi.h>
 
@@ -154,6 +155,37 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
        return 0;
 }
 
+static void
+target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_off)
+{
+       unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
+       unsigned char *buf = buf_off;
+       int cnt = 0, next = 1;
+       /*
+        * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
+        * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
+        * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
+        * to complete the payload.  These are based from VPD=0x80 PRODUCT SERIAL
+        * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
+        * per device uniqeness.
+        */
+       while (*p != '\0') {
+               if (cnt >= 13)
+                       break;
+               if (!isxdigit(*p)) {
+                       p++;
+                       continue;
+               }
+               if (next != 0) {
+                       buf[cnt++] |= hex_to_bin(*p++);
+                       next = 0;
+               } else {
+                       buf[cnt] = hex_to_bin(*p++) << 4;
+                       next = 1;
+               }
+       }
+}
+
 /*
  * Device identification VPD, for a complete list of
  * DESIGNATOR TYPEs see spc4r17 Table 459.
@@ -219,8 +251,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
         * VENDOR_SPECIFIC_IDENTIFIER and
         * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
         */
-       buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]);
-       hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12);
+       target_parse_naa_6h_vendor_specific(dev, &buf[off]);
 
        len = 20;
        off = (len + 4);
index 8d0c58e..a4b0a8d 100644 (file)
@@ -977,15 +977,17 @@ static void target_qf_do_work(struct work_struct *work)
 {
        struct se_device *dev = container_of(work, struct se_device,
                                        qf_work_queue);
+       LIST_HEAD(qf_cmd_list);
        struct se_cmd *cmd, *cmd_tmp;
 
        spin_lock_irq(&dev->qf_cmd_lock);
-       list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) {
+       list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
+       spin_unlock_irq(&dev->qf_cmd_lock);
 
+       list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
                list_del(&cmd->se_qf_node);
                atomic_dec(&dev->dev_qf_count);
                smp_mb__after_atomic_dec();
-               spin_unlock_irq(&dev->qf_cmd_lock);
 
                pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
                        " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
@@ -997,10 +999,7 @@ static void target_qf_do_work(struct work_struct *work)
                 * has been added to head of queue
                 */
                transport_add_cmd_to_queue(cmd, cmd->t_state);
-
-               spin_lock_irq(&dev->qf_cmd_lock);
        }
-       spin_unlock_irq(&dev->qf_cmd_lock);
 }
 
 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
index bd4fe21..3749d8b 100644 (file)
@@ -98,8 +98,7 @@ struct ft_tpg {
        struct list_head list;          /* linkage in ft_lport_acl tpg_list */
        struct list_head lun_list;      /* head of LUNs */
        struct se_portal_group se_tpg;
-       struct task_struct *thread;     /* processing thread */
-       struct se_queue_obj qobj;       /* queue for processing thread */
+       struct workqueue_struct *workqueue;
 };
 
 struct ft_lport_acl {
@@ -110,16 +109,10 @@ struct ft_lport_acl {
        struct se_wwn fc_lport_wwn;
 };
 
-enum ft_cmd_state {
-       FC_CMD_ST_NEW = 0,
-       FC_CMD_ST_REJ
-};
-
 /*
  * Commands
  */
 struct ft_cmd {
-       enum ft_cmd_state state;
        u32 lun;                        /* LUN from request */
        struct ft_sess *sess;           /* session held for cmd */
        struct fc_seq *seq;             /* sequence in exchange mgr */
@@ -127,7 +120,7 @@ struct ft_cmd {
        struct fc_frame *req_frame;
        unsigned char *cdb;             /* pointer to CDB inside frame */
        u32 write_data_len;             /* data received on writes */
-       struct se_queue_req se_req;
+       struct work_struct work;
        /* Local sense buffer */
        unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
        u32 was_ddp_setup:1;            /* Set only if ddp is setup */
@@ -177,7 +170,6 @@ int ft_is_state_remove(struct se_cmd *);
 /*
  * other internal functions.
  */
-int ft_thread(void *);
 void ft_recv_req(struct ft_sess *, struct fc_frame *);
 struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
 struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
index 5654dc2..80fbcde 100644 (file)
@@ -62,8 +62,8 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
        int count;
 
        se_cmd = &cmd->se_cmd;
-       pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
-               caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
+       pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n",
+               caller, cmd, cmd->sess, cmd->seq, se_cmd);
        pr_debug("%s: cmd %p cdb %p\n",
                caller, cmd, cmd->cdb);
        pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
@@ -90,38 +90,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
                16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
 }
 
-static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
-{
-       struct ft_tpg *tpg = sess->tport->tpg;
-       struct se_queue_obj *qobj = &tpg->qobj;
-       unsigned long flags;
-
-       qobj = &sess->tport->tpg->qobj;
-       spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-       list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
-       atomic_inc(&qobj->queue_cnt);
-       spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-
-       wake_up_process(tpg->thread);
-}
-
-static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
-{
-       unsigned long flags;
-       struct se_queue_req *qr;
-
-       spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-       if (list_empty(&qobj->qobj_list)) {
-               spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-               return NULL;
-       }
-       qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list);
-       list_del(&qr->qr_list);
-       atomic_dec(&qobj->queue_cnt);
-       spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-       return container_of(qr, struct ft_cmd, se_req);
-}
-
 static void ft_free_cmd(struct ft_cmd *cmd)
 {
        struct fc_frame *fp;
@@ -282,9 +250,7 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
 
 int ft_get_cmd_state(struct se_cmd *se_cmd)
 {
-       struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
-
-       return cmd->state;
+       return 0;
 }
 
 int ft_is_state_remove(struct se_cmd *se_cmd)
@@ -505,6 +471,8 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)
        return 0;
 }
 
+static void ft_send_work(struct work_struct *work);
+
 /*
  * Handle incoming FCP command.
  */
@@ -523,7 +491,9 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
                goto busy;
        }
        cmd->req_frame = fp;            /* hold frame during cmd */
-       ft_queue_cmd(sess, cmd);
+
+       INIT_WORK(&cmd->work, ft_send_work);
+       queue_work(sess->tport->tpg->workqueue, &cmd->work);
        return;
 
 busy:
@@ -563,12 +533,13 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
 /*
  * Send new command to target.
  */
-static void ft_send_cmd(struct ft_cmd *cmd)
+static void ft_send_work(struct work_struct *work)
 {
+       struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
        struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
        struct se_cmd *se_cmd;
        struct fcp_cmnd *fcp;
-       int data_dir;
+       int data_dir = 0;
        u32 data_len;
        int task_attr;
        int ret;
@@ -675,42 +646,3 @@ static void ft_send_cmd(struct ft_cmd *cmd)
 err:
        ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
 }
-
-/*
- * Handle request in the command thread.
- */
-static void ft_exec_req(struct ft_cmd *cmd)
-{
-       pr_debug("cmd state %x\n", cmd->state);
-       switch (cmd->state) {
-       case FC_CMD_ST_NEW:
-               ft_send_cmd(cmd);
-               break;
-       default:
-               break;
-       }
-}
-
-/*
- * Processing thread.
- * Currently one thread per tpg.
- */
-int ft_thread(void *arg)
-{
-       struct ft_tpg *tpg = arg;
-       struct se_queue_obj *qobj = &tpg->qobj;
-       struct ft_cmd *cmd;
-
-       while (!kthread_should_stop()) {
-               schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
-               if (kthread_should_stop())
-                       goto out;
-
-               cmd = ft_dequeue_cmd(qobj);
-               if (cmd)
-                       ft_exec_req(cmd);
-       }
-
-out:
-       return 0;
-}
index b15879d..8fa39b7 100644 (file)
@@ -327,7 +327,6 @@ static struct se_portal_group *ft_add_tpg(
        tpg->index = index;
        tpg->lport_acl = lacl;
        INIT_LIST_HEAD(&tpg->lun_list);
-       transport_init_queue_obj(&tpg->qobj);
 
        ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
                                tpg, TRANSPORT_TPG_TYPE_NORMAL);
@@ -336,8 +335,8 @@ static struct se_portal_group *ft_add_tpg(
                return NULL;
        }
 
-       tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index);
-       if (IS_ERR(tpg->thread)) {
+       tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1);
+       if (!tpg->workqueue) {
                kfree(tpg);
                return NULL;
        }
@@ -356,7 +355,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
        pr_debug("del tpg %s\n",
                    config_item_name(&tpg->se_tpg.tpg_group.cg_item));
 
-       kthread_stop(tpg->thread);
+       destroy_workqueue(tpg->workqueue);
 
        /* Wait for sessions to be freed thru RCU, for BUG_ON below */
        synchronize_rcu();
index c37f4cd..d35ea5a 100644 (file)
@@ -219,43 +219,41 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
        if (cmd->was_ddp_setup) {
                BUG_ON(!ep);
                BUG_ON(!lport);
-       }
-
-       /*
-        * Doesn't expect payload if DDP is setup. Payload
-        * is expected to be copied directly to user buffers
-        * due to DDP (Large Rx offload),
-        */
-       buf = fc_frame_payload_get(fp, 1);
-       if (buf)
-               pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
+               /*
+                * Since DDP (Large Rx offload) was setup for this request,
+                * payload is expected to be copied directly to user buffers.
+                */
+               buf = fc_frame_payload_get(fp, 1);
+               if (buf)
+                       pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
                                "cmd->sg_cnt 0x%x. DDP was setup"
                                " hence not expected to receive frame with "
-                               "payload, Frame will be dropped if "
-                               "'Sequence Initiative' bit in f_ctl is "
+                               "payload, Frame will be dropped if"
+                               "'Sequence Initiative' bit in f_ctl is"
                                "not set\n", __func__, ep->xid, f_ctl,
                                cmd->sg, cmd->sg_cnt);
-       /*
-        * Invalidate HW DDP context if it was setup for respective
-        * command. Invalidation of HW DDP context is requited in both
-        * situation (success and error). 
-        */
-       ft_invl_hw_context(cmd);
+               /*
+                * Invalidate HW DDP context if it was setup for respective
+                * command. Invalidation of HW DDP context is requited in both
+                * situation (success and error).
+                */
+               ft_invl_hw_context(cmd);
 
-       /*
-        * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
-        * write data frame is received successfully where payload is
-        * posted directly to user buffer and only the last frame's
-        * header is posted in receive queue.
-        *
-        * If "Sequence Initiative (TSI)" bit is not set, means error
-        * condition w.r.t. DDP, hence drop the packet and let explict
-        * ABORTS from other end of exchange timer trigger the recovery.
-        */
-       if (f_ctl & FC_FC_SEQ_INIT)
-               goto last_frame;
-       else
-               goto drop;
+               /*
+                * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
+                * write data frame is received successfully where payload is
+                * posted directly to user buffer and only the last frame's
+                * header is posted in receive queue.
+                *
+                * If "Sequence Initiative (TSI)" bit is not set, means error
+                * condition w.r.t. DDP, hence drop the packet and let explict
+                * ABORTS from other end of exchange timer trigger the recovery.
+                */
+               if (f_ctl & FC_FC_SEQ_INIT)
+                       goto last_frame;
+               else
+                       goto drop;
+       }
 
        rel_off = ntohl(fh->fh_parm_offset);
        frame_len = fr_len(fp);
index 1e96d1f..723f823 100644 (file)
@@ -761,7 +761,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
        memset(buf, 0, retval);
        status = 0;
 
-       mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC;
+       mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;
 
        spin_lock_irqsave(&xhci->lock, flags);
        /* For each port, did anything change?  If so, set that bit in buf. */
index 54139a2..952e2de 100644 (file)
@@ -1934,8 +1934,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
        int status = -EINPROGRESS;
        struct urb_priv *urb_priv;
        struct xhci_ep_ctx *ep_ctx;
+       struct list_head *tmp;
        u32 trb_comp_code;
        int ret = 0;
+       int td_num = 0;
 
        slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
        xdev = xhci->devs[slot_id];
@@ -1957,6 +1959,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                return -ENODEV;
        }
 
+       /* Count current td numbers if ep->skip is set */
+       if (ep->skip) {
+               list_for_each(tmp, &ep_ring->td_list)
+                       td_num++;
+       }
+
        event_dma = le64_to_cpu(event->buffer);
        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
        /* Look for common error cases */
@@ -2068,7 +2076,18 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                        goto cleanup;
                }
 
+               /* We've skipped all the TDs on the ep ring when ep->skip set */
+               if (ep->skip && td_num == 0) {
+                       ep->skip = false;
+                       xhci_dbg(xhci, "All tds on the ep_ring skipped. "
+                                               "Clear skip flag.\n");
+                       ret = 0;
+                       goto cleanup;
+               }
+
                td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
+               if (ep->skip)
+                       td_num--;
 
                /* Is this a TRB in the currently executing TD? */
                event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
index 410fba4..809cbda 100644 (file)
@@ -494,15 +494,16 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
                asminline_call(&cmn_regs, cru_rom_addr);
        die_nmi_called = 1;
        spin_unlock_irqrestore(&rom_lock, rom_pl);
+
+       if (allow_kdump)
+               hpwdt_stop();
+
        if (!is_icru) {
                if (cmn_regs.u1.ral == 0) {
-                       printk(KERN_WARNING "hpwdt: An NMI occurred, "
+                       panic("An NMI occurred, "
                                "but unable to determine source.\n");
                }
        }
-
-       if (allow_kdump)
-               hpwdt_stop();
        panic("An NMI occurred, please see the Integrated "
                "Management Log for details.\n");
 
index 7d82ada..102aed0 100644 (file)
@@ -51,16 +51,16 @@ static int ltq_wdt_ok_to_close;
 static void
 ltq_wdt_enable(void)
 {
-       ltq_wdt_timeout = ltq_wdt_timeout *
+       unsigned long int timeout = ltq_wdt_timeout *
                        (ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000;
-       if (ltq_wdt_timeout > LTQ_MAX_TIMEOUT)
-               ltq_wdt_timeout = LTQ_MAX_TIMEOUT;
+       if (timeout > LTQ_MAX_TIMEOUT)
+               timeout = LTQ_MAX_TIMEOUT;
 
        /* write the first password magic */
        ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
        /* write the second magic plus the configuration and new timeout */
        ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV |
-               LTQ_WDT_PW2 | ltq_wdt_timeout, ltq_wdt_membase + LTQ_WDT_CR);
+               LTQ_WDT_PW2 | timeout, ltq_wdt_membase + LTQ_WDT_CR);
 }
 
 static void
index 3066a51..eaca366 100644 (file)
@@ -173,7 +173,7 @@ static struct notifier_block epx_c3_notifier = {
        .notifier_call = epx_c3_notify_sys,
 };
 
-static const char banner[] __initdata = KERN_INFO PFX
+static const char banner[] __initconst = KERN_INFO PFX
        "Hardware Watchdog Timer for Winsystems EPX-C3 SBC: 0.1\n";
 
 static int __init watchdog_init(void)
index d33520d..1199da0 100644 (file)
@@ -59,7 +59,7 @@ static struct watchdog_device *wdd;
 
 static int watchdog_ping(struct watchdog_device *wddev)
 {
-       if (test_bit(WDOG_ACTIVE, &wdd->status)) {
+       if (test_bit(WDOG_ACTIVE, &wddev->status)) {
                if (wddev->ops->ping)
                        return wddev->ops->ping(wddev);  /* ping the watchdog */
                else
@@ -81,12 +81,12 @@ static int watchdog_start(struct watchdog_device *wddev)
 {
        int err;
 
-       if (!test_bit(WDOG_ACTIVE, &wdd->status)) {
+       if (!test_bit(WDOG_ACTIVE, &wddev->status)) {
                err = wddev->ops->start(wddev);
                if (err < 0)
                        return err;
 
-               set_bit(WDOG_ACTIVE, &wdd->status);
+               set_bit(WDOG_ACTIVE, &wddev->status);
        }
        return 0;
 }
@@ -105,18 +105,18 @@ static int watchdog_stop(struct watchdog_device *wddev)
 {
        int err = -EBUSY;
 
-       if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) {
+       if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) {
                pr_info("%s: nowayout prevents watchdog to be stopped!\n",
-                                                       wdd->info->identity);
+                                                       wddev->info->identity);
                return err;
        }
 
-       if (test_bit(WDOG_ACTIVE, &wdd->status)) {
+       if (test_bit(WDOG_ACTIVE, &wddev->status)) {
                err = wddev->ops->stop(wddev);
                if (err < 0)
                        return err;
 
-               clear_bit(WDOG_ACTIVE, &wdd->status);
+               clear_bit(WDOG_ACTIVE, &wddev->status);
        }
        return 0;
 }
index e0c2807..181fa81 100644 (file)
@@ -148,10 +148,10 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
        }
        platform_set_drvdata(pdev, bus);
 
-       /* Register all devices */
        pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n",
                 zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s");
 
+       /* First identify all devices ... */
        for (i = 0; i < zorro_num_autocon; i++) {
                z = &zorro_autocon[i];
                z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8);
@@ -172,6 +172,11 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
                dev_set_name(&z->dev, "%02x", i);
                z->dev.parent = &bus->dev;
                z->dev.bus = &zorro_bus_type;
+       }
+
+       /* ... then register them */
+       for (i = 0; i < zorro_num_autocon; i++) {
+               z = &zorro_autocon[i];
                error = device_register(&z->dev);
                if (error) {
                        dev_err(&bus->dev, "Error registering device %s\n",
index 3c3abff..a381cd2 100644 (file)
@@ -1817,6 +1817,11 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
                goto out;
        case SEEK_DATA:
        case SEEK_HOLE:
+               if (offset >= i_size_read(inode)) {
+                       mutex_unlock(&inode->i_mutex);
+                       return -ENXIO;
+               }
+
                ret = find_desired_extent(inode, &offset, origin);
                if (ret) {
                        mutex_unlock(&inode->i_mutex);
@@ -1825,11 +1830,11 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
        }
 
        if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
-               ret = -EINVAL;
+               offset = -EINVAL;
                goto out;
        }
        if (offset > inode->i_sb->s_maxbytes) {
-               ret = -EINVAL;
+               offset = -EINVAL;
                goto out;
        }
 
index 4d14de6..b2d004a 100644 (file)
@@ -4018,7 +4018,8 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
                memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key));
                kfree(dentry->d_fsdata);
                dentry->d_fsdata = NULL;
-               d_clear_need_lookup(dentry);
+               /* This thing is hashed, drop it for now */
+               d_drop(dentry);
        } else {
                ret = btrfs_inode_by_name(dir, dentry, &location);
        }
@@ -4085,7 +4086,15 @@ static void btrfs_dentry_release(struct dentry *dentry)
 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
                                   struct nameidata *nd)
 {
-       return d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
+       struct dentry *ret;
+
+       ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
+       if (unlikely(d_need_lookup(dentry))) {
+               spin_lock(&dentry->d_lock);
+               dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
+               spin_unlock(&dentry->d_lock);
+       }
+       return ret;
 }
 
 unsigned char btrfs_filetype_table[] = {
@@ -4125,7 +4134,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
 
        /* special case for "." */
        if (filp->f_pos == 0) {
-               over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR);
+               over = filldir(dirent, ".", 1,
+                              filp->f_pos, btrfs_ino(inode), DT_DIR);
                if (over)
                        return 0;
                filp->f_pos = 1;
@@ -4134,7 +4144,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
        if (filp->f_pos == 1) {
                u64 pino = parent_ino(filp->f_path.dentry);
                over = filldir(dirent, "..", 2,
-                              2, pino, DT_DIR);
+                              filp->f_pos, pino, DT_DIR);
                if (over)
                        return 0;
                filp->f_pos = 2;
index 3351b1b..538f65a 100644 (file)
@@ -2177,6 +2177,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
        if (!(src_file->f_mode & FMODE_READ))
                goto out_fput;
 
+       /* don't make the dst file partly checksummed */
+       if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
+           (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
+               goto out_fput;
+
        ret = -EISDIR;
        if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
                goto out_fput;
@@ -2226,6 +2231,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                        goto out_unlock;
        }
 
+       /* truncate page cache pages from target inode range */
+       truncate_inode_pages_range(&inode->i_data, destoff,
+                                  PAGE_CACHE_ALIGN(destoff + len) - 1);
+
        /* do any pending delalloc/csum calc on src, one way or
           another, and lock file content */
        while (1) {
@@ -2242,10 +2251,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                btrfs_wait_ordered_range(src, off, len);
        }
 
-       /* truncate page cache pages from target inode range */
-       truncate_inode_pages_range(&inode->i_data, off,
-                                  ALIGN(off + len, PAGE_CACHE_SIZE) - 1);
-
        /* clone data */
        key.objectid = btrfs_ino(src);
        key.type = BTRFS_EXTENT_DATA_KEY;
@@ -2323,7 +2328,12 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                        else
                                new_key.offset = destoff;
 
-                       trans = btrfs_start_transaction(root, 1);
+                       /*
+                        * 1 - adjusting old extent (we may have to split it)
+                        * 1 - add new extent
+                        * 1 - inode update
+                        */
+                       trans = btrfs_start_transaction(root, 3);
                        if (IS_ERR(trans)) {
                                ret = PTR_ERR(trans);
                                goto out;
@@ -2442,7 +2452,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                        if (endoff > inode->i_size)
                                btrfs_i_size_write(inode, endoff);
 
-                       BTRFS_I(inode)->flags = BTRFS_I(src)->flags;
                        ret = btrfs_update_inode(trans, root, inode);
                        BUG_ON(ret);
                        btrfs_end_transaction(trans, root);
index e76bfeb..30acd22 100644 (file)
@@ -351,9 +351,7 @@ static int
 build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
 {
        unsigned int dlen;
-       unsigned int wlen;
-       unsigned int size = 6 * sizeof(struct ntlmssp2_name);
-       __le64  curtime;
+       unsigned int size = 2 * sizeof(struct ntlmssp2_name);
        char *defdmname = "WORKGROUP";
        unsigned char *blobptr;
        struct ntlmssp2_name *attrptr;
@@ -365,15 +363,14 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
        }
 
        dlen = strlen(ses->domainName);
-       wlen = strlen(ses->server->hostname);
 
-       /* The length of this blob is a size which is
-        * six times the size of a structure which holds name/size +
-        * two times the unicode length of a domain name +
-        * two times the unicode length of a server name +
-        * size of a timestamp (which is 8 bytes).
+       /*
+        * The length of this blob is two times the size of a
+        * structure (av pair) which holds name/size
+        * ( for NTLMSSP_AV_NB_DOMAIN_NAME followed by NTLMSSP_AV_EOL ) +
+        * unicode length of a netbios domain name
         */
-       ses->auth_key.len = size + 2 * (2 * dlen) + 2 * (2 * wlen) + 8;
+       ses->auth_key.len = size + 2 * dlen;
        ses->auth_key.response = kzalloc(ses->auth_key.len, GFP_KERNEL);
        if (!ses->auth_key.response) {
                ses->auth_key.len = 0;
@@ -384,44 +381,15 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
        blobptr = ses->auth_key.response;
        attrptr = (struct ntlmssp2_name *) blobptr;
 
+       /*
+        * As defined in MS-NTLM 3.3.2, just this av pair field
+        * is sufficient as part of the temp
+        */
        attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_DOMAIN_NAME);
        attrptr->length = cpu_to_le16(2 * dlen);
        blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
        cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
 
-       blobptr += 2 * dlen;
-       attrptr = (struct ntlmssp2_name *) blobptr;
-
-       attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_COMPUTER_NAME);
-       attrptr->length = cpu_to_le16(2 * wlen);
-       blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
-       cifs_strtoUCS((__le16 *)blobptr, ses->server->hostname, wlen, nls_cp);
-
-       blobptr += 2 * wlen;
-       attrptr = (struct ntlmssp2_name *) blobptr;
-
-       attrptr->type = cpu_to_le16(NTLMSSP_AV_DNS_DOMAIN_NAME);
-       attrptr->length = cpu_to_le16(2 * dlen);
-       blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
-       cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
-
-       blobptr += 2 * dlen;
-       attrptr = (struct ntlmssp2_name *) blobptr;
-
-       attrptr->type = cpu_to_le16(NTLMSSP_AV_DNS_COMPUTER_NAME);
-       attrptr->length = cpu_to_le16(2 * wlen);
-       blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
-       cifs_strtoUCS((__le16 *)blobptr, ses->server->hostname, wlen, nls_cp);
-
-       blobptr += 2 * wlen;
-       attrptr = (struct ntlmssp2_name *) blobptr;
-
-       attrptr->type = cpu_to_le16(NTLMSSP_AV_TIMESTAMP);
-       attrptr->length = cpu_to_le16(sizeof(__le64));
-       blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
-       curtime = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
-       memcpy(blobptr, &curtime, sizeof(__le64));
-
        return 0;
 }
 
index f93eb94..54b8f1e 100644 (file)
@@ -548,6 +548,12 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
                struct inode *dir = dentry->d_inode;
                struct dentry *child;
 
+               if (!dir) {
+                       dput(dentry);
+                       dentry = ERR_PTR(-ENOENT);
+                       break;
+               }
+
                /* skip separators */
                while (*s == sep)
                        s++;
@@ -563,10 +569,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
                mutex_unlock(&dir->i_mutex);
                dput(dentry);
                dentry = child;
-               if (!dentry->d_inode) {
-                       dput(dentry);
-                       dentry = ERR_PTR(-ENOENT);
-               }
        } while (!IS_ERR(dentry));
        _FreeXid(xid);
        kfree(full_path);
index aac37d9..a80f7bd 100644 (file)
@@ -4079,7 +4079,8 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
        T2_FNEXT_RSP_PARMS *parms;
        char *response_data;
        int rc = 0;
-       int bytes_returned, name_len;
+       int bytes_returned;
+       unsigned int name_len;
        __u16 params, byte_count;
 
        cFYI(1, "In FindNext");
index 633c246..f4af4cc 100644 (file)
@@ -1298,7 +1298,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                        /* ignore */
                } else if (strnicmp(data, "guest", 5) == 0) {
                        /* ignore */
-               } else if (strnicmp(data, "rw", 2) == 0) {
+               } else if (strnicmp(data, "rw", 2) == 0 && strlen(data) == 2) {
                        /* ignore */
                } else if (strnicmp(data, "ro", 2) == 0) {
                        /* ignore */
@@ -1401,7 +1401,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                        vol->server_ino = 1;
                } else if (strnicmp(data, "noserverino", 9) == 0) {
                        vol->server_ino = 0;
-               } else if (strnicmp(data, "rwpidforward", 4) == 0) {
+               } else if (strnicmp(data, "rwpidforward", 12) == 0) {
                        vol->rwpidforward = 1;
                } else if (strnicmp(data, "cifsacl", 7) == 0) {
                        vol->cifs_acl = 1;
index 04da6ac..12661e1 100644 (file)
@@ -1134,7 +1134,7 @@ struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
                return bh;
        if (buffer_uptodate(bh))
                return bh;
-       ll_rw_block(READ_META, 1, &bh);
+       ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
        wait_on_buffer(bh);
        if (buffer_uptodate(bh))
                return bh;
@@ -2807,7 +2807,7 @@ make_io:
                trace_ext3_load_inode(inode);
                get_bh(bh);
                bh->b_end_io = end_buffer_read_sync;
-               submit_bh(READ_META, bh);
+               submit_bh(READ | REQ_META | REQ_PRIO, bh);
                wait_on_buffer(bh);
                if (!buffer_uptodate(bh)) {
                        ext3_error(inode->i_sb, "ext3_get_inode_loc",
index 5571708..0629e09 100644 (file)
@@ -922,7 +922,8 @@ restart:
                                bh = ext3_getblk(NULL, dir, b++, 0, &err);
                                bh_use[ra_max] = bh;
                                if (bh)
-                                       ll_rw_block(READ_META, 1, &bh);
+                                       ll_rw_block(READ | REQ_META | REQ_PRIO,
+                                                   1, &bh);
                        }
                }
                if ((bh = bh_use[ra_ptr++]) == NULL)
index 18d2558..986e238 100644 (file)
@@ -647,7 +647,7 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
                return bh;
        if (buffer_uptodate(bh))
                return bh;
-       ll_rw_block(READ_META, 1, &bh);
+       ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
        wait_on_buffer(bh);
        if (buffer_uptodate(bh))
                return bh;
@@ -3298,7 +3298,7 @@ make_io:
                trace_ext4_load_inode(inode);
                get_bh(bh);
                bh->b_end_io = end_buffer_read_sync;
-               submit_bh(READ_META, bh);
+               submit_bh(READ | REQ_META | REQ_PRIO, bh);
                wait_on_buffer(bh);
                if (!buffer_uptodate(bh)) {
                        EXT4_ERROR_INODE_BLOCK(inode, block,
index f8068c7..1c924fa 100644 (file)
@@ -922,7 +922,8 @@ restart:
                                bh = ext4_getblk(NULL, dir, b++, 0, &err);
                                bh_use[ra_max] = bh;
                                if (bh)
-                                       ll_rw_block(READ_META, 1, &bh);
+                                       ll_rw_block(READ | REQ_META | REQ_PRIO,
+                                                   1, &bh);
                        }
                }
                if ((bh = bh_use[ra_ptr++]) == NULL)
index 85c6292..5986464 100644 (file)
@@ -624,9 +624,9 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
        bh->b_end_io = end_buffer_write_sync;
        get_bh(bh);
        if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
-               submit_bh(WRITE_SYNC | REQ_META, bh);
+               submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh);
        else
-               submit_bh(WRITE_FLUSH_FUA | REQ_META, bh);
+               submit_bh(WRITE_FLUSH_FUA | REQ_META | REQ_PRIO, bh);
        wait_on_buffer(bh);
 
        if (!buffer_uptodate(bh))
index 747238c..be29858 100644 (file)
@@ -37,7 +37,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
 {
        struct buffer_head *bh, *head;
        int nr_underway = 0;
-       int write_op = REQ_META |
+       int write_op = REQ_META | REQ_PRIO |
                (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
 
        BUG_ON(!PageLocked(page));
@@ -225,7 +225,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
        }
        bh->b_end_io = end_buffer_read_sync;
        get_bh(bh);
-       submit_bh(READ_SYNC | REQ_META, bh);
+       submit_bh(READ_SYNC | REQ_META | REQ_PRIO, bh);
        if (!(flags & DIO_WAIT))
                return 0;
 
@@ -435,7 +435,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
        if (buffer_uptodate(first_bh))
                goto out;
        if (!buffer_locked(first_bh))
-               ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh);
+               ll_rw_block(READ_SYNC | REQ_META | REQ_PRIO, 1, &first_bh);
 
        dblock++;
        extlen--;
index 3bc073a..079587e 100644 (file)
@@ -224,7 +224,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
 
        bio->bi_end_io = end_bio_io_page;
        bio->bi_private = page;
-       submit_bio(READ_SYNC | REQ_META, bio);
+       submit_bio(READ_SYNC | REQ_META | REQ_PRIO, bio);
        wait_on_page_locked(page);
        bio_put(bio);
        if (!PageUptodate(page)) {
index 42e8d23..0e8bb13 100644 (file)
@@ -709,7 +709,7 @@ get_a_page:
                set_buffer_uptodate(bh);
 
        if (!buffer_uptodate(bh)) {
-               ll_rw_block(READ_META, 1, &bh);
+               ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
                wait_on_buffer(bh);
                if (!buffer_uptodate(bh))
                        goto unlock_out;
index 25b6a88..5afaa58 100644 (file)
@@ -877,30 +877,54 @@ struct numa_maps_private {
        struct numa_maps md;
 };
 
-static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty)
+static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
+                       unsigned long nr_pages)
 {
        int count = page_mapcount(page);
 
-       md->pages++;
+       md->pages += nr_pages;
        if (pte_dirty || PageDirty(page))
-               md->dirty++;
+               md->dirty += nr_pages;
 
        if (PageSwapCache(page))
-               md->swapcache++;
+               md->swapcache += nr_pages;
 
        if (PageActive(page) || PageUnevictable(page))
-               md->active++;
+               md->active += nr_pages;
 
        if (PageWriteback(page))
-               md->writeback++;
+               md->writeback += nr_pages;
 
        if (PageAnon(page))
-               md->anon++;
+               md->anon += nr_pages;
 
        if (count > md->mapcount_max)
                md->mapcount_max = count;
 
-       md->node[page_to_nid(page)]++;
+       md->node[page_to_nid(page)] += nr_pages;
+}
+
+static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
+               unsigned long addr)
+{
+       struct page *page;
+       int nid;
+
+       if (!pte_present(pte))
+               return NULL;
+
+       page = vm_normal_page(vma, addr, pte);
+       if (!page)
+               return NULL;
+
+       if (PageReserved(page))
+               return NULL;
+
+       nid = page_to_nid(page);
+       if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
+               return NULL;
+
+       return page;
 }
 
 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
@@ -912,26 +936,32 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
        pte_t *pte;
 
        md = walk->private;
-       orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
-       do {
-               struct page *page;
-               int nid;
+       spin_lock(&walk->mm->page_table_lock);
+       if (pmd_trans_huge(*pmd)) {
+               if (pmd_trans_splitting(*pmd)) {
+                       spin_unlock(&walk->mm->page_table_lock);
+                       wait_split_huge_page(md->vma->anon_vma, pmd);
+               } else {
+                       pte_t huge_pte = *(pte_t *)pmd;
+                       struct page *page;
 
-               if (!pte_present(*pte))
-                       continue;
+                       page = can_gather_numa_stats(huge_pte, md->vma, addr);
+                       if (page)
+                               gather_stats(page, md, pte_dirty(huge_pte),
+                                               HPAGE_PMD_SIZE/PAGE_SIZE);
+                       spin_unlock(&walk->mm->page_table_lock);
+                       return 0;
+               }
+       } else {
+               spin_unlock(&walk->mm->page_table_lock);
+       }
 
-               page = vm_normal_page(md->vma, addr, *pte);
+       orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+       do {
+               struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
                if (!page)
                        continue;
-
-               if (PageReserved(page))
-                       continue;
-
-               nid = page_to_nid(page);
-               if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
-                       continue;
-
-               gather_stats(page, md, pte_dirty(*pte));
+               gather_stats(page, md, pte_dirty(*pte), 1);
 
        } while (pte++, addr += PAGE_SIZE, addr != end);
        pte_unmap_unlock(orig_pte, ptl);
@@ -952,7 +982,7 @@ static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
                return 0;
 
        md = walk->private;
-       gather_stats(page, md, pte_dirty(*pte));
+       gather_stats(page, md, pte_dirty(*pte), 1);
        return 0;
 }
 
index 32f0076..71fc53b 100644 (file)
@@ -124,6 +124,7 @@ enum rq_flag_bits {
 
        __REQ_SYNC,             /* request is sync (sync write or read) */
        __REQ_META,             /* metadata io request */
+       __REQ_PRIO,             /* boost priority in cfq */
        __REQ_DISCARD,          /* request to discard sectors */
        __REQ_SECURE,           /* secure discard (used with __REQ_DISCARD) */
 
@@ -161,14 +162,15 @@ enum rq_flag_bits {
 #define REQ_FAILFAST_DRIVER    (1 << __REQ_FAILFAST_DRIVER)
 #define REQ_SYNC               (1 << __REQ_SYNC)
 #define REQ_META               (1 << __REQ_META)
+#define REQ_PRIO               (1 << __REQ_PRIO)
 #define REQ_DISCARD            (1 << __REQ_DISCARD)
 #define REQ_NOIDLE             (1 << __REQ_NOIDLE)
 
 #define REQ_FAILFAST_MASK \
        (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
 #define REQ_COMMON_MASK \
-       (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
-        REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
+       (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
+        REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
 #define REQ_CLONE_MASK         REQ_COMMON_MASK
 
 #define REQ_RAHEAD             (1 << __REQ_RAHEAD)
index 84b15d5..7fbaa91 100644 (file)
@@ -873,7 +873,6 @@ struct blk_plug {
        struct list_head list;
        struct list_head cb_list;
        unsigned int should_sort;
-       unsigned int count;
 };
 #define BLK_MAX_REQUEST_COUNT 16
 
index c2bd68f..277f497 100644 (file)
@@ -162,10 +162,8 @@ struct inodes_stat_t {
 #define READA                  RWA_MASK
 
 #define READ_SYNC              (READ | REQ_SYNC)
-#define READ_META              (READ | REQ_META)
 #define WRITE_SYNC             (WRITE | REQ_SYNC | REQ_NOIDLE)
 #define WRITE_ODIRECT          (WRITE | REQ_SYNC)
-#define WRITE_META             (WRITE | REQ_META)
 #define WRITE_FLUSH            (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH)
 #define WRITE_FUA              (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)
 #define WRITE_FLUSH_FUA                (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
index 2c366b5..aace6b8 100644 (file)
@@ -553,6 +553,7 @@ struct kvm_ppc_pvinfo {
 #define KVM_CAP_SPAPR_TCE 63
 #define KVM_CAP_PPC_SMT 64
 #define KVM_CAP_PPC_RMA        65
+#define KVM_CAP_S390_GMAP 71
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index d12f8d6..97cf4f2 100644 (file)
@@ -26,7 +26,7 @@ struct wm8994_ldo_pdata {
        struct regulator_init_data *init_data;
 };
 
-#define WM8994_CONFIGURE_GPIO 0x8000
+#define WM8994_CONFIGURE_GPIO 0x10000
 
 #define WM8994_DRC_REGS 5
 #define WM8994_EQ_REGS  20
index 7b996ed..8bd383c 100644 (file)
@@ -524,6 +524,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
 extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
 
 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
+extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
 extern struct sk_buff *skb_clone(struct sk_buff *skb,
                                 gfp_t priority);
 extern struct sk_buff *skb_copy(const struct sk_buff *skb,
index 12b2b18..e16557a 100644 (file)
@@ -231,6 +231,8 @@ enum
        LINUX_MIB_TCPDEFERACCEPTDROP,
        LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */
        LINUX_MIB_TCPTIMEWAITOVERFLOW,          /* TCPTimeWaitOverflow */
+       LINUX_MIB_TCPREQQFULLDOCOOKIES,         /* TCPReqQFullDoCookies */
+       LINUX_MIB_TCPREQQFULLDROP,              /* TCPReqQFullDrop */
        __LINUX_MIB_MAX
 };
 
index 78113da..a094477 100644 (file)
@@ -7,6 +7,7 @@
 #ifndef _NET_FLOW_H
 #define _NET_FLOW_H
 
+#include <linux/socket.h>
 #include <linux/in6.h>
 #include <linux/atomic.h>
 
@@ -68,7 +69,7 @@ struct flowi4 {
 #define fl4_ipsec_spi          uli.spi
 #define fl4_mh_type            uli.mht.type
 #define fl4_gre_key            uli.gre_key
-};
+} __attribute__((__aligned__(BITS_PER_LONG/8)));
 
 static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
                                      __u32 mark, __u8 tos, __u8 scope,
@@ -112,7 +113,7 @@ struct flowi6 {
 #define fl6_ipsec_spi          uli.spi
 #define fl6_mh_type            uli.mht.type
 #define fl6_gre_key            uli.gre_key
-};
+} __attribute__((__aligned__(BITS_PER_LONG/8)));
 
 struct flowidn {
        struct flowi_common     __fl_common;
@@ -127,7 +128,7 @@ struct flowidn {
        union flowi_uli         uli;
 #define fld_sport              uli.ports.sport
 #define fld_dport              uli.ports.dport
-};
+} __attribute__((__aligned__(BITS_PER_LONG/8)));
 
 struct flowi {
        union {
@@ -161,6 +162,24 @@ static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
        return container_of(fldn, struct flowi, u.dn);
 }
 
+typedef unsigned long flow_compare_t;
+
+static inline size_t flow_key_size(u16 family)
+{
+       switch (family) {
+       case AF_INET:
+               BUILD_BUG_ON(sizeof(struct flowi4) % sizeof(flow_compare_t));
+               return sizeof(struct flowi4) / sizeof(flow_compare_t);
+       case AF_INET6:
+               BUILD_BUG_ON(sizeof(struct flowi6) % sizeof(flow_compare_t));
+               return sizeof(struct flowi6) / sizeof(flow_compare_t);
+       case AF_DECnet:
+               BUILD_BUG_ON(sizeof(struct flowidn) % sizeof(flow_compare_t));
+               return sizeof(struct flowidn) / sizeof(flow_compare_t);
+       }
+       return 0;
+}
+
 #define FLOW_DIR_IN    0
 #define FLOW_DIR_OUT   1
 #define FLOW_DIR_FWD   2
index 99e6e19..4c0766e 100644 (file)
@@ -96,7 +96,8 @@ extern int sysctl_max_syn_backlog;
  */
 struct listen_sock {
        u8                      max_qlen_log;
-       /* 3 bytes hole, try to use */
+       u8                      synflood_warned;
+       /* 2 bytes hole, try to use */
        int                     qlen;
        int                     qlen_young;
        int                     clock_hand;
index 6506458..712b3be 100644 (file)
@@ -109,6 +109,7 @@ typedef enum {
        SCTP_CMD_SEND_MSG,       /* Send the whole use message */
        SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
        SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
+       SCTP_CMD_SET_ASOC,       /* Restore association context */
        SCTP_CMD_LAST
 } sctp_verb_t;
 
index 149a415..acc620a 100644 (file)
@@ -431,17 +431,34 @@ extern int tcp_disconnect(struct sock *sk, int flags);
 extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
 extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, 
                                    struct ip_options *opt);
+#ifdef CONFIG_SYN_COOKIES
 extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, 
                                     __u16 *mss);
+#else
+static inline __u32 cookie_v4_init_sequence(struct sock *sk,
+                                           struct sk_buff *skb,
+                                           __u16 *mss)
+{
+       return 0;
+}
+#endif
 
 extern __u32 cookie_init_timestamp(struct request_sock *req);
 extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
 
 /* From net/ipv6/syncookies.c */
 extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
+#ifdef CONFIG_SYN_COOKIES
 extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb,
                                     __u16 *mss);
-
+#else
+static inline __u32 cookie_v6_init_sequence(struct sock *sk,
+                                           struct sk_buff *skb,
+                                           __u16 *mss)
+{
+       return 0;
+}
+#endif
 /* tcp_output.c */
 
 extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
@@ -460,6 +477,9 @@ extern int tcp_write_wakeup(struct sock *);
 extern void tcp_send_fin(struct sock *sk);
 extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
 extern int tcp_send_synack(struct sock *);
+extern int tcp_syn_flood_action(struct sock *sk,
+                               const struct sk_buff *skb,
+                               const char *proto);
 extern void tcp_push_one(struct sock *, unsigned int mss_now);
 extern void tcp_send_ack(struct sock *sk);
 extern void tcp_send_delayed_ack(struct sock *sk);
index 5271a74..498433d 100644 (file)
@@ -39,6 +39,7 @@ extern int                    datagram_recv_ctl(struct sock *sk,
                                                  struct sk_buff *skb);
 
 extern int                     datagram_send_ctl(struct net *net,
+                                                 struct sock *sk,
                                                  struct msghdr *msg,
                                                  struct flowi6 *fl6,
                                                  struct ipv6_txoptions *opt,
index 9c51ee7..2a9b88a 100644 (file)
@@ -209,8 +209,19 @@ early_param("quiet", quiet_kernel);
 
 static int __init loglevel(char *str)
 {
-       get_option(&str, &console_loglevel);
-       return 0;
+       int newlevel;
+
+       /*
+        * Only update loglevel value when a correct setting was passed,
+        * to prevent blind crashes (when loglevel being set to 0) that
+        * are quite hard to debug
+        */
+       if (get_option(&str, &newlevel)) {
+               console_loglevel = newlevel;
+               return 0;
+       }
+
+       return -EINVAL;
 }
 
 early_param("loglevel", loglevel);
index d5a3009..dc5114b 100644 (file)
@@ -178,7 +178,7 @@ void irq_shutdown(struct irq_desc *desc)
        desc->depth = 1;
        if (desc->irq_data.chip->irq_shutdown)
                desc->irq_data.chip->irq_shutdown(&desc->irq_data);
-       if (desc->irq_data.chip->irq_disable)
+       else if (desc->irq_data.chip->irq_disable)
                desc->irq_data.chip->irq_disable(&desc->irq_data);
        else
                desc->irq_data.chip->irq_mask(&desc->irq_data);
index 9de3ecf..a70d2a5 100644 (file)
@@ -744,20 +744,17 @@ int ptrace_request(struct task_struct *child, long request,
                        break;
 
                si = child->last_siginfo;
-               if (unlikely(!si || si->si_code >> 8 != PTRACE_EVENT_STOP))
-                       break;
-
-               child->jobctl |= JOBCTL_LISTENING;
-
-               /*
-                * If NOTIFY is set, it means event happened between start
-                * of this trap and now.  Trigger re-trap immediately.
-                */
-               if (child->jobctl & JOBCTL_TRAP_NOTIFY)
-                       signal_wake_up(child, true);
-
+               if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
+                       child->jobctl |= JOBCTL_LISTENING;
+                       /*
+                        * If NOTIFY is set, it means event happened between
+                        * start of this trap and now.  Trigger re-trap.
+                        */
+                       if (child->jobctl & JOBCTL_TRAP_NOTIFY)
+                               signal_wake_up(child, true);
+                       ret = 0;
+               }
                unlock_task_sighand(child, &flags);
-               ret = 0;
                break;
 
        case PTRACE_DETACH:      /* detach a process that was attached. */
index e19ce14..e660464 100644 (file)
@@ -655,6 +655,7 @@ static struct genl_ops taskstats_ops = {
        .cmd            = TASKSTATS_CMD_GET,
        .doit           = taskstats_user_cmd,
        .policy         = taskstats_cmd_get_policy,
+       .flags          = GENL_ADMIN_PERM,
 };
 
 static struct genl_ops cgroupstats_ops = {
index 24dc60d..5bbfac8 100644 (file)
@@ -78,6 +78,7 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
 
 #define KB 1024
 #define MB (1024*KB)
+#define KB_MASK (~(KB-1))
 /*
  * fill in extended accounting fields
  */
@@ -95,14 +96,14 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
                stats->hiwater_vm    = get_mm_hiwater_vm(mm)  * PAGE_SIZE / KB;
                mmput(mm);
        }
-       stats->read_char        = p->ioac.rchar;
-       stats->write_char       = p->ioac.wchar;
-       stats->read_syscalls    = p->ioac.syscr;
-       stats->write_syscalls   = p->ioac.syscw;
+       stats->read_char        = p->ioac.rchar & KB_MASK;
+       stats->write_char       = p->ioac.wchar & KB_MASK;
+       stats->read_syscalls    = p->ioac.syscr & KB_MASK;
+       stats->write_syscalls   = p->ioac.syscw & KB_MASK;
 #ifdef CONFIG_TASK_IO_ACCOUNTING
-       stats->read_bytes       = p->ioac.read_bytes;
-       stats->write_bytes      = p->ioac.write_bytes;
-       stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes;
+       stats->read_bytes       = p->ioac.read_bytes & KB_MASK;
+       stats->write_bytes      = p->ioac.write_bytes & KB_MASK;
+       stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes & KB_MASK;
 #else
        stats->read_bytes       = 0;
        stats->write_bytes      = 0;
index e51e255..a768e6d 100644 (file)
@@ -441,8 +441,12 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
         * next filter in the chain. Apply the BCJ filter on the new data
         * in the output buffer. If everything cannot be filtered, copy it
         * to temp and rewind the output buffer position accordingly.
+        *
+        * This needs to be always run when temp.size == 0 to handle a special
+        * case where the output buffer is full and the next filter has no
+        * more output coming but hasn't returned XZ_STREAM_END yet.
         */
-       if (s->temp.size < b->out_size - b->out_pos) {
+       if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) {
                out_start = b->out_pos;
                memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size);
                b->out_pos += s->temp.size;
@@ -465,16 +469,25 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
                s->temp.size = b->out_pos - out_start;
                b->out_pos -= s->temp.size;
                memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size);
+
+               /*
+                * If there wasn't enough input to the next filter to fill
+                * the output buffer with unfiltered data, there's no point
+                * to try decoding more data to temp.
+                */
+               if (b->out_pos + s->temp.size < b->out_size)
+                       return XZ_OK;
        }
 
        /*
-        * If we have unfiltered data in temp, try to fill by decoding more
-        * data from the next filter. Apply the BCJ filter on temp. Then we
-        * hopefully can fill the actual output buffer by copying filtered
-        * data from temp. A mix of filtered and unfiltered data may be left
-        * in temp; it will be taken care on the next call to this function.
+        * We have unfiltered data in temp. If the output buffer isn't full
+        * yet, try to fill the temp buffer by decoding more data from the
+        * next filter. Apply the BCJ filter on temp. Then we hopefully can
+        * fill the actual output buffer by copying filtered data from temp.
+        * A mix of filtered and unfiltered data may be left in temp; it will
+        * be taken care on the next call to this function.
         */
-       if (s->temp.size > 0) {
+       if (b->out_pos < b->out_size) {
                /* Make b->out{,_pos,_size} temporarily point to s->temp. */
                s->out = b->out;
                s->out_pos = b->out_pos;
index d6edf8d..a87da52 100644 (file)
@@ -359,6 +359,17 @@ static unsigned long bdi_longest_inactive(void)
        return max(5UL * 60 * HZ, interval);
 }
 
+/*
+ * Clear pending bit and wakeup anybody waiting for flusher thread creation or
+ * shutdown
+ */
+static void bdi_clear_pending(struct backing_dev_info *bdi)
+{
+       clear_bit(BDI_pending, &bdi->state);
+       smp_mb__after_clear_bit();
+       wake_up_bit(&bdi->state, BDI_pending);
+}
+
 static int bdi_forker_thread(void *ptr)
 {
        struct bdi_writeback *me = ptr;
@@ -390,6 +401,13 @@ static int bdi_forker_thread(void *ptr)
                }
 
                spin_lock_bh(&bdi_lock);
+               /*
+                * In the following loop we are going to check whether we have
+                * some work to do without any synchronization with tasks
+                * waking us up to do work for them. So we have to set task
+                * state already here so that we don't miss wakeups coming
+                * after we verify some condition.
+                */
                set_current_state(TASK_INTERRUPTIBLE);
 
                list_for_each_entry(bdi, &bdi_list, bdi_list) {
@@ -469,11 +487,13 @@ static int bdi_forker_thread(void *ptr)
                                spin_unlock_bh(&bdi->wb_lock);
                                wake_up_process(task);
                        }
+                       bdi_clear_pending(bdi);
                        break;
 
                case KILL_THREAD:
                        __set_current_state(TASK_RUNNING);
                        kthread_stop(task);
+                       bdi_clear_pending(bdi);
                        break;
 
                case NO_ACTION:
@@ -489,16 +509,8 @@ static int bdi_forker_thread(void *ptr)
                        else
                                schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
                        try_to_freeze();
-                       /* Back to the main loop */
-                       continue;
+                       break;
                }
-
-               /*
-                * Clear pending bit and wakeup anybody waiting to tear us down.
-                */
-               clear_bit(BDI_pending, &bdi->state);
-               smp_mb__after_clear_bit();
-               wake_up_bit(&bdi->state, BDI_pending);
        }
 
        return 0;
index 9f662d7..7c54fe8 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2377,7 +2377,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
                 */
                if (unlikely(!prior)) {
                        remove_full(s, page);
-                       add_partial(n, page, 0);
+                       add_partial(n, page, 1);
                        stat(s, FREE_ADD_PARTIAL);
                }
        }
index a40170e..7ef4eb4 100644 (file)
@@ -58,8 +58,8 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
        if (status)
                return;
 
-       if (test_bit(HCI_MGMT, &hdev->flags) &&
-                               test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
+       if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
+                       test_bit(HCI_MGMT, &hdev->flags))
                mgmt_discovering(hdev->id, 0);
 
        hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
@@ -76,8 +76,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
        if (status)
                return;
 
-       if (test_bit(HCI_MGMT, &hdev->flags) &&
-                               test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
+       if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
+                               test_bit(HCI_MGMT, &hdev->flags))
                mgmt_discovering(hdev->id, 0);
 
        hci_conn_check_pending(hdev);
@@ -959,9 +959,8 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
                return;
        }
 
-       if (test_bit(HCI_MGMT, &hdev->flags) &&
-                                       !test_and_set_bit(HCI_INQUIRY,
-                                                       &hdev->flags))
+       if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) &&
+                               test_bit(HCI_MGMT, &hdev->flags))
                mgmt_discovering(hdev->id, 1);
 }
 
@@ -1340,8 +1339,8 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff
 
        BT_DBG("%s status %d", hdev->name, status);
 
-       if (test_bit(HCI_MGMT, &hdev->flags) &&
-                               test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
+       if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
+                               test_bit(HCI_MGMT, &hdev->flags))
                mgmt_discovering(hdev->id, 0);
 
        hci_req_complete(hdev, HCI_OP_INQUIRY, status);
index ba6f73e..a9aff9c 100644 (file)
@@ -4,7 +4,7 @@
 
 menuconfig BRIDGE_NF_EBTABLES
        tristate "Ethernet Bridge tables (ebtables) support"
-       depends on BRIDGE && BRIDGE_NETFILTER
+       depends on BRIDGE && NETFILTER
        select NETFILTER_XTABLES
        help
          ebtables is a general, extensible frame/packet identification
index 7c2fa0a..7f9ac07 100644 (file)
@@ -93,10 +93,14 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
        caifdevs = caif_device_list(dev_net(dev));
        BUG_ON(!caifdevs);
 
-       caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC);
+       caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
        if (!caifd)
                return NULL;
        caifd->pcpu_refcnt = alloc_percpu(int);
+       if (!caifd->pcpu_refcnt) {
+               kfree(caifd);
+               return NULL;
+       }
        caifd->netdev = dev;
        dev_hold(dev);
        return caifd;
index 8ce926d..9b0c32a 100644 (file)
@@ -857,7 +857,7 @@ static __exit void can_exit(void)
        struct net_device *dev;
 
        if (stats_timer)
-               del_timer(&can_stattimer);
+               del_timer_sync(&can_stattimer);
 
        can_remove_proc();
 
index 17d67b5..b10ff0a 100644 (file)
@@ -1515,6 +1515,14 @@ static inline bool is_skb_forwardable(struct net_device *dev,
  */
 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
 {
+       if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
+               if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
+                       atomic_long_inc(&dev->rx_dropped);
+                       kfree_skb(skb);
+                       return NET_RX_DROP;
+               }
+       }
+
        skb_orphan(skb);
        nf_reset(skb);
 
index e7ab0c0..3231b46 100644 (file)
@@ -384,8 +384,8 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
                 */
                list_for_each_entry(r, &ops->rules_list, list) {
                        if (r->action == FR_ACT_GOTO &&
-                           r->target == rule->pref) {
-                               BUG_ON(rtnl_dereference(r->ctarget) != NULL);
+                           r->target == rule->pref &&
+                           rtnl_dereference(r->ctarget) == NULL) {
                                rcu_assign_pointer(r->ctarget, rule);
                                if (--ops->unresolved_rules == 0)
                                        break;
index bf32c33..555a456 100644 (file)
@@ -30,6 +30,7 @@ struct flow_cache_entry {
                struct hlist_node       hlist;
                struct list_head        gc_list;
        } u;
+       struct net                      *net;
        u16                             family;
        u8                              dir;
        u32                             genid;
@@ -172,29 +173,26 @@ static void flow_new_hash_rnd(struct flow_cache *fc,
 
 static u32 flow_hash_code(struct flow_cache *fc,
                          struct flow_cache_percpu *fcp,
-                         const struct flowi *key)
+                         const struct flowi *key,
+                         size_t keysize)
 {
        const u32 *k = (const u32 *) key;
+       const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
 
-       return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
+       return jhash2(k, length, fcp->hash_rnd)
                & (flow_cache_hash_size(fc) - 1);
 }
 
-typedef unsigned long flow_compare_t;
-
 /* I hear what you're saying, use memcmp.  But memcmp cannot make
- * important assumptions that we can here, such as alignment and
- * constant size.
+ * important assumptions that we can here, such as alignment.
  */
-static int flow_key_compare(const struct flowi *key1, const struct flowi *key2)
+static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
+                           size_t keysize)
 {
        const flow_compare_t *k1, *k1_lim, *k2;
-       const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
-
-       BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
 
        k1 = (const flow_compare_t *) key1;
-       k1_lim = k1 + n_elem;
+       k1_lim = k1 + keysize;
 
        k2 = (const flow_compare_t *) key2;
 
@@ -215,6 +213,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
        struct flow_cache_entry *fle, *tfle;
        struct hlist_node *entry;
        struct flow_cache_object *flo;
+       size_t keysize;
        unsigned int hash;
 
        local_bh_disable();
@@ -222,6 +221,11 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
 
        fle = NULL;
        flo = NULL;
+
+       keysize = flow_key_size(family);
+       if (!keysize)
+               goto nocache;
+
        /* Packet really early in init?  Making flow_cache_init a
         * pre-smp initcall would solve this.  --RR */
        if (!fcp->hash_table)
@@ -230,11 +234,12 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
        if (fcp->hash_rnd_recalc)
                flow_new_hash_rnd(fc, fcp);
 
-       hash = flow_hash_code(fc, fcp, key);
+       hash = flow_hash_code(fc, fcp, key, keysize);
        hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
-               if (tfle->family == family &&
+               if (tfle->net == net &&
+                   tfle->family == family &&
                    tfle->dir == dir &&
-                   flow_key_compare(key, &tfle->key) == 0) {
+                   flow_key_compare(key, &tfle->key, keysize) == 0) {
                        fle = tfle;
                        break;
                }
@@ -246,9 +251,10 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
 
                fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
                if (fle) {
+                       fle->net = net;
                        fle->family = family;
                        fle->dir = dir;
-                       memcpy(&fle->key, key, sizeof(*key));
+                       memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
                        fle->object = NULL;
                        hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
                        fcp->hash_count++;
index 27002df..387703f 100644 (file)
@@ -611,8 +611,21 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
 }
 EXPORT_SYMBOL_GPL(skb_morph);
 
-/* skb frags copy userspace buffers to kernel */
-static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
+/*     skb_copy_ubufs  -       copy userspace skb frags buffers to kernel
+ *     @skb: the skb to modify
+ *     @gfp_mask: allocation priority
+ *
+ *     This must be called on SKBTX_DEV_ZEROCOPY skb.
+ *     It will copy all frags into kernel and drop the reference
+ *     to userspace pages.
+ *
+ *     If this function is called from an interrupt gfp_mask() must be
+ *     %GFP_ATOMIC.
+ *
+ *     Returns 0 on success or a negative error code on failure
+ *     to allocate kernel memory to copy to.
+ */
+int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
 {
        int i;
        int num_frags = skb_shinfo(skb)->nr_frags;
@@ -652,6 +665,8 @@ static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
                skb_shinfo(skb)->frags[i - 1].page = head;
                head = (struct page *)head->private;
        }
+
+       skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
        return 0;
 }
 
@@ -677,7 +692,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
        if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
                if (skb_copy_ubufs(skb, gfp_mask))
                        return NULL;
-               skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
        }
 
        n = skb + 1;
@@ -803,7 +817,6 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
                                n = NULL;
                                goto out;
                        }
-                       skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
                }
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                        skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
@@ -896,7 +909,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
                if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
                        if (skb_copy_ubufs(skb, gfp_mask))
                                goto nofrags;
-                       skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
                }
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
                        get_page(skb_shinfo(skb)->frags[i].page);
index 27997d3..a246836 100644 (file)
@@ -340,7 +340,7 @@ void ether_setup(struct net_device *dev)
        dev->addr_len           = ETH_ALEN;
        dev->tx_queue_len       = 1000; /* Ethernet wants good queues */
        dev->flags              = IFF_BROADCAST|IFF_MULTICAST;
-       dev->priv_flags         = IFF_TX_SKB_SHARING;
+       dev->priv_flags         |= IFF_TX_SKB_SHARING;
 
        memset(dev->broadcast, 0xFF, ETH_ALEN);
 
index 1b745d4..dd2b947 100644 (file)
@@ -466,8 +466,13 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                goto out;
 
        if (addr->sin_family != AF_INET) {
+               /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
+                * only if s_addr is INADDR_ANY.
+                */
                err = -EAFNOSUPPORT;
-               goto out;
+               if (addr->sin_family != AF_UNSPEC ||
+                   addr->sin_addr.s_addr != htonl(INADDR_ANY))
+                       goto out;
        }
 
        chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
index 33e2c35..80106d8 100644 (file)
@@ -142,6 +142,14 @@ const struct fib_prop fib_props[RTN_MAX + 1] = {
 };
 
 /* Release a nexthop info record */
+static void free_fib_info_rcu(struct rcu_head *head)
+{
+       struct fib_info *fi = container_of(head, struct fib_info, rcu);
+
+       if (fi->fib_metrics != (u32 *) dst_default_metrics)
+               kfree(fi->fib_metrics);
+       kfree(fi);
+}
 
 void free_fib_info(struct fib_info *fi)
 {
@@ -156,7 +164,7 @@ void free_fib_info(struct fib_info *fi)
        } endfor_nexthops(fi);
        fib_info_cnt--;
        release_net(fi->fib_net);
-       kfree_rcu(fi, rcu);
+       call_rcu(&fi->rcu, free_fib_info_rcu);
 }
 
 void fib_release_info(struct fib_info *fi)
index 5c9b9d9..e59aabd 100644 (file)
@@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
        return skb;
 
 nlmsg_failure:
+       kfree_skb(skb);
        *errp = -EINVAL;
        printk(KERN_ERR "ip_queue: error creating packet message\n");
        return NULL;
@@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
 {
        struct nf_queue_entry *entry;
 
-       if (vmsg->value > NF_MAX_VERDICT)
+       if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
                return -EINVAL;
 
        entry = ipq_find_dequeue_entry(vmsg->id);
@@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
                break;
 
        case IPQM_VERDICT:
-               if (pmsg->msg.verdict.value > NF_MAX_VERDICT)
-                       status = -EINVAL;
-               else
-                       status = ipq_set_verdict(&pmsg->msg.verdict,
-                                                len - sizeof(*pmsg));
-                       break;
+               status = ipq_set_verdict(&pmsg->msg.verdict,
+                                        len - sizeof(*pmsg));
+               break;
        default:
                status = -EINVAL;
        }
index b14ec7d..4bfad5d 100644 (file)
@@ -254,6 +254,8 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
        SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER),
        SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW),
+       SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES),
+       SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP),
        SNMP_MIB_SENTINEL
 };
 
index ea0d218..21fab3e 100644 (file)
@@ -1124,7 +1124,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
                return 0;
 
        /* ...Then it's D-SACK, and must reside below snd_una completely */
-       if (!after(end_seq, tp->snd_una))
+       if (after(end_seq, tp->snd_una))
                return 0;
 
        if (!before(start_seq, tp->undo_marker))
index 1c12b8e..c34f015 100644 (file)
@@ -808,20 +808,38 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
        kfree(inet_rsk(req)->opt);
 }
 
-static void syn_flood_warning(const struct sk_buff *skb)
+/*
+ * Return 1 if a syncookie should be sent
+ */
+int tcp_syn_flood_action(struct sock *sk,
+                        const struct sk_buff *skb,
+                        const char *proto)
 {
-       const char *msg;
+       const char *msg = "Dropping request";
+       int want_cookie = 0;
+       struct listen_sock *lopt;
+
+
 
 #ifdef CONFIG_SYN_COOKIES
-       if (sysctl_tcp_syncookies)
+       if (sysctl_tcp_syncookies) {
                msg = "Sending cookies";
-       else
+               want_cookie = 1;
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
+       } else
 #endif
-               msg = "Dropping request";
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
 
-       pr_info("TCP: Possible SYN flooding on port %d. %s.\n",
-                               ntohs(tcp_hdr(skb)->dest), msg);
+       lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
+       if (!lopt->synflood_warned) {
+               lopt->synflood_warned = 1;
+               pr_info("%s: Possible SYN flooding on port %d. %s. "
+                       " Check SNMP counters.\n",
+                       proto, ntohs(tcp_hdr(skb)->dest), msg);
+       }
+       return want_cookie;
 }
+EXPORT_SYMBOL(tcp_syn_flood_action);
 
 /*
  * Save and compile IPv4 options into the request_sock if needed.
@@ -1235,11 +1253,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        __be32 saddr = ip_hdr(skb)->saddr;
        __be32 daddr = ip_hdr(skb)->daddr;
        __u32 isn = TCP_SKB_CB(skb)->when;
-#ifdef CONFIG_SYN_COOKIES
        int want_cookie = 0;
-#else
-#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
-#endif
 
        /* Never answer to SYNs send to broadcast or multicast */
        if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1250,14 +1264,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
         * evidently real one.
         */
        if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
-               if (net_ratelimit())
-                       syn_flood_warning(skb);
-#ifdef CONFIG_SYN_COOKIES
-               if (sysctl_tcp_syncookies) {
-                       want_cookie = 1;
-               } else
-#endif
-               goto drop;
+               want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
+               if (!want_cookie)
+                       goto drop;
        }
 
        /* Accept backlog is full. If we have already queued enough
@@ -1303,9 +1312,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                while (l-- > 0)
                        *c++ ^= *hash_location++;
 
-#ifdef CONFIG_SYN_COOKIES
                want_cookie = 0;        /* not our kind of cookie */
-#endif
                tmp_ext.cookie_out_never = 0; /* false */
                tmp_ext.cookie_plus = tmp_opt.cookie_plus;
        } else if (!tp->rx_opt.cookie_in_always) {
index f012ebd..12368c5 100644 (file)
@@ -374,8 +374,8 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
                        "%s(): cannot allocate memory for statistics; dev=%s.\n",
                        __func__, dev->name));
                neigh_parms_release(&nd_tbl, ndev->nd_parms);
-               ndev->dead = 1;
-               in6_dev_finish_destroy(ndev);
+               dev_put(dev);
+               kfree(ndev);
                return NULL;
        }
 
index 9ef1831..b46e9f8 100644 (file)
@@ -599,7 +599,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
        return 0;
 }
 
-int datagram_send_ctl(struct net *net,
+int datagram_send_ctl(struct net *net, struct sock *sk,
                      struct msghdr *msg, struct flowi6 *fl6,
                      struct ipv6_txoptions *opt,
                      int *hlimit, int *tclass, int *dontfrag)
@@ -658,7 +658,8 @@ int datagram_send_ctl(struct net *net,
 
                        if (addr_type != IPV6_ADDR_ANY) {
                                int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
-                               if (!ipv6_chk_addr(net, &src_info->ipi6_addr,
+                               if (!inet_sk(sk)->transparent &&
+                                   !ipv6_chk_addr(net, &src_info->ipi6_addr,
                                                   strict ? dev : NULL, 0))
                                        err = -EINVAL;
                                else
index f3caf1b..5430394 100644 (file)
@@ -322,8 +322,8 @@ static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned lo
 }
 
 static struct ip6_flowlabel *
-fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
-         int optlen, int *err_p)
+fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
+         char __user *optval, int optlen, int *err_p)
 {
        struct ip6_flowlabel *fl = NULL;
        int olen;
@@ -360,7 +360,7 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
                msg.msg_control = (void*)(fl->opt+1);
                memset(&flowi6, 0, sizeof(flowi6));
 
-               err = datagram_send_ctl(net, &msg, &flowi6, fl->opt, &junk,
+               err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk,
                                        &junk, &junk);
                if (err)
                        goto done;
@@ -528,7 +528,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
                if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
                        return -EINVAL;
 
-               fl = fl_create(net, &freq, optval, optlen, &err);
+               fl = fl_create(net, sk, &freq, optval, optlen, &err);
                if (fl == NULL)
                        return err;
                sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
index 147ede3..2fbda5f 100644 (file)
@@ -475,7 +475,7 @@ sticky_done:
                msg.msg_controllen = optlen;
                msg.msg_control = (void*)(opt+1);
 
-               retv = datagram_send_ctl(net, &msg, &fl6, opt, &junk, &junk,
+               retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk,
                                         &junk);
                if (retv)
                        goto done;
index 2493948..e63c397 100644 (file)
@@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
        return skb;
 
 nlmsg_failure:
+       kfree_skb(skb);
        *errp = -EINVAL;
        printk(KERN_ERR "ip6_queue: error creating packet message\n");
        return NULL;
@@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
 {
        struct nf_queue_entry *entry;
 
-       if (vmsg->value > NF_MAX_VERDICT)
+       if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
                return -EINVAL;
 
        entry = ipq_find_dequeue_entry(vmsg->id);
@@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
                break;
 
        case IPQM_VERDICT:
-               if (pmsg->msg.verdict.value > NF_MAX_VERDICT)
-                       status = -EINVAL;
-               else
-                       status = ipq_set_verdict(&pmsg->msg.verdict,
-                                                len - sizeof(*pmsg));
-                       break;
+               status = ipq_set_verdict(&pmsg->msg.verdict,
+                                        len - sizeof(*pmsg));
+               break;
        default:
                status = -EINVAL;
        }
index 6a79f30..343852e 100644 (file)
@@ -817,8 +817,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(struct ipv6_txoptions);
 
-               err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit,
-                                       &tclass, &dontfrag);
+               err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+                                       &hlimit, &tclass, &dontfrag);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
index 9e69eb0..1250f90 100644 (file)
@@ -104,6 +104,9 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
        struct inet_peer *peer;
        u32 *p = NULL;
 
+       if (!(rt->dst.flags & DST_HOST))
+               return NULL;
+
        if (!rt->rt6i_peer)
                rt6_bind_peer(rt, 1);
 
@@ -252,6 +255,9 @@ static void ip6_dst_destroy(struct dst_entry *dst)
        struct inet6_dev *idev = rt->rt6i_idev;
        struct inet_peer *peer = rt->rt6i_peer;
 
+       if (!(rt->dst.flags & DST_HOST))
+               dst_destroy_metrics_generic(dst);
+
        if (idev != NULL) {
                rt->rt6i_idev = NULL;
                in6_dev_put(idev);
@@ -723,9 +729,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
                        ipv6_addr_copy(&rt->rt6i_gateway, daddr);
                }
 
-               rt->rt6i_dst.plen = 128;
                rt->rt6i_flags |= RTF_CACHE;
-               rt->dst.flags |= DST_HOST;
 
 #ifdef CONFIG_IPV6_SUBTREES
                if (rt->rt6i_src.plen && saddr) {
@@ -775,9 +779,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
        struct rt6_info *rt = ip6_rt_copy(ort, daddr);
 
        if (rt) {
-               rt->rt6i_dst.plen = 128;
                rt->rt6i_flags |= RTF_CACHE;
-               rt->dst.flags |= DST_HOST;
                dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));
        }
        return rt;
@@ -1078,12 +1080,15 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
                        neigh = NULL;
        }
 
-       rt->rt6i_idev     = idev;
+       rt->dst.flags |= DST_HOST;
+       rt->dst.output  = ip6_output;
        dst_set_neighbour(&rt->dst, neigh);
        atomic_set(&rt->dst.__refcnt, 1);
-       ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
        dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
-       rt->dst.output  = ip6_output;
+
+       ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
+       rt->rt6i_dst.plen = 128;
+       rt->rt6i_idev     = idev;
 
        spin_lock_bh(&icmp6_dst_lock);
        rt->dst.next = icmp6_dst_gc_list;
@@ -1261,6 +1266,14 @@ int ip6_route_add(struct fib6_config *cfg)
        if (rt->rt6i_dst.plen == 128)
               rt->dst.flags |= DST_HOST;
 
+       if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) {
+               u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+               if (!metrics) {
+                       err = -ENOMEM;
+                       goto out;
+               }
+               dst_init_metrics(&rt->dst, metrics, 0);
+       }
 #ifdef CONFIG_IPV6_SUBTREES
        ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
        rt->rt6i_src.plen = cfg->fc_src_len;
@@ -1607,9 +1620,6 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
        if (on_link)
                nrt->rt6i_flags &= ~RTF_GATEWAY;
 
-       nrt->rt6i_dst.plen = 128;
-       nrt->dst.flags |= DST_HOST;
-
        ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
        dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
 
@@ -1754,9 +1764,10 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
        if (rt) {
                rt->dst.input = ort->dst.input;
                rt->dst.output = ort->dst.output;
+               rt->dst.flags |= DST_HOST;
 
                ipv6_addr_copy(&rt->rt6i_dst.addr, dest);
-               rt->rt6i_dst.plen = ort->rt6i_dst.plen;
+               rt->rt6i_dst.plen = 128;
                dst_copy_metrics(&rt->dst, &ort->dst);
                rt->dst.error = ort->dst.error;
                rt->rt6i_idev = ort->rt6i_idev;
index d1fb63f..3c9fa61 100644 (file)
@@ -531,20 +531,6 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
        return tcp_v6_send_synack(sk, req, rvp);
 }
 
-static inline void syn_flood_warning(struct sk_buff *skb)
-{
-#ifdef CONFIG_SYN_COOKIES
-       if (sysctl_tcp_syncookies)
-               printk(KERN_INFO
-                      "TCPv6: Possible SYN flooding on port %d. "
-                      "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
-       else
-#endif
-               printk(KERN_INFO
-                      "TCPv6: Possible SYN flooding on port %d. "
-                      "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
-}
-
 static void tcp_v6_reqsk_destructor(struct request_sock *req)
 {
        kfree_skb(inet6_rsk(req)->pktopts);
@@ -1179,11 +1165,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        struct tcp_sock *tp = tcp_sk(sk);
        __u32 isn = TCP_SKB_CB(skb)->when;
        struct dst_entry *dst = NULL;
-#ifdef CONFIG_SYN_COOKIES
        int want_cookie = 0;
-#else
-#define want_cookie 0
-#endif
 
        if (skb->protocol == htons(ETH_P_IP))
                return tcp_v4_conn_request(sk, skb);
@@ -1192,14 +1174,9 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                goto drop;
 
        if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
-               if (net_ratelimit())
-                       syn_flood_warning(skb);
-#ifdef CONFIG_SYN_COOKIES
-               if (sysctl_tcp_syncookies)
-                       want_cookie = 1;
-               else
-#endif
-               goto drop;
+               want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
+               if (!want_cookie)
+                       goto drop;
        }
 
        if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
@@ -1249,9 +1226,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                while (l-- > 0)
                        *c++ ^= *hash_location++;
 
-#ifdef CONFIG_SYN_COOKIES
                want_cookie = 0;        /* not our kind of cookie */
-#endif
                tmp_ext.cookie_out_never = 0; /* false */
                tmp_ext.cookie_plus = tmp_opt.cookie_plus;
        } else if (!tp->rx_opt.cookie_in_always) {
index 29213b5..bb95e8e 100644 (file)
@@ -1090,8 +1090,8 @@ do_udp_sendmsg:
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(*opt);
 
-               err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit,
-                                       &tclass, &dontfrag);
+               err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+                                       &hlimit, &tclass, &dontfrag);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
index d0b70da..2615ffc 100644 (file)
@@ -40,9 +40,9 @@ extern int  sysctl_slot_timeout;
 extern int  sysctl_fast_poll_increase;
 extern char sysctl_devname[];
 extern int  sysctl_max_baud_rate;
-extern int  sysctl_min_tx_turn_time;
-extern int  sysctl_max_tx_data_size;
-extern int  sysctl_max_tx_window;
+extern unsigned int sysctl_min_tx_turn_time;
+extern unsigned int sysctl_max_tx_data_size;
+extern unsigned int sysctl_max_tx_window;
 extern int  sysctl_max_noreply_time;
 extern int  sysctl_warn_noreply_time;
 extern int  sysctl_lap_keepalive_time;
index 1b51bcf..4369f7f 100644 (file)
@@ -60,7 +60,7 @@ int sysctl_max_noreply_time = 12;
  * Default is 10us which means using the unmodified value given by the
  * peer except if it's 0 (0 is likely a bug in the other stack).
  */
-unsigned sysctl_min_tx_turn_time = 10;
+unsigned int sysctl_min_tx_turn_time = 10;
 /*
  * Maximum data size to be used in transmission in payload of LAP frame.
  * There is a bit of confusion in the IrDA spec :
@@ -75,13 +75,13 @@ unsigned sysctl_min_tx_turn_time = 10;
  * bytes frames or all negotiated frame sizes, but you can use the sysctl
  * to play with this value anyway.
  * Jean II */
-unsigned sysctl_max_tx_data_size = 2042;
+unsigned int sysctl_max_tx_data_size = 2042;
 /*
  * Maximum transmit window, i.e. number of LAP frames between turn-around.
  * This allow to override what the peer told us. Some peers are buggy and
  * don't always support what they tell us.
  * Jean II */
-unsigned sysctl_max_tx_window = 7;
+unsigned int sysctl_max_tx_window = 7;
 
 static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get);
 static int irlap_param_link_disconnect(void *instance, irda_param_t *parm,
index 3db78b6..21070e9 100644 (file)
@@ -665,7 +665,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
                BUG_ON(!sdata->bss);
 
                atomic_dec(&sdata->bss->num_sta_ps);
-               __sta_info_clear_tim_bit(sdata->bss, sta);
+               sta_info_clear_tim_bit(sta);
        }
 
        local->num_sta--;
index 2fd4565..31d56b2 100644 (file)
@@ -364,6 +364,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
                break;
 
        case PPTP_WAN_ERROR_NOTIFY:
+       case PPTP_SET_LINK_INFO:
        case PPTP_ECHO_REQUEST:
        case PPTP_ECHO_REPLY:
                /* I don't have to explain these ;) */
index 37bf943..8235b86 100644 (file)
@@ -409,7 +409,7 @@ static void tcp_options(const struct sk_buff *skb,
                        if (opsize < 2) /* "silly options" */
                                return;
                        if (opsize > length)
-                               break;  /* don't parse partial options */
+                               return; /* don't parse partial options */
 
                        if (opcode == TCPOPT_SACK_PERM
                            && opsize == TCPOLEN_SACK_PERM)
@@ -447,7 +447,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
        BUG_ON(ptr == NULL);
 
        /* Fast path for timestamp-only option */
-       if (length == TCPOLEN_TSTAMP_ALIGNED*4
+       if (length == TCPOLEN_TSTAMP_ALIGNED
            && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
                                       | (TCPOPT_NOP << 16)
                                       | (TCPOPT_TIMESTAMP << 8)
@@ -469,7 +469,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
                        if (opsize < 2) /* "silly options" */
                                return;
                        if (opsize > length)
-                               break;  /* don't parse partial options */
+                               return; /* don't parse partial options */
 
                        if (opcode == TCPOPT_SACK
                            && opsize >= (TCPOLEN_SACK_BASE
index 00bd475..a80b0cb 100644 (file)
@@ -646,8 +646,8 @@ verdicthdr_get(const struct nlattr * const nfqa[])
                return NULL;
 
        vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
-       verdict = ntohl(vhdr->verdict);
-       if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT)
+       verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
+       if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
                return NULL;
        return vhdr;
 }
index 76a0831..ed0db15 100644 (file)
@@ -78,7 +78,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
 {
        struct xt_rateest_match_info *info = par->matchinfo;
        struct xt_rateest *est1, *est2;
-       int ret = false;
+       int ret = -EINVAL;
 
        if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS |
                                     XT_RATEEST_MATCH_REL)) != 1)
@@ -101,13 +101,12 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
        if (!est1)
                goto err1;
 
+       est2 = NULL;
        if (info->flags & XT_RATEEST_MATCH_REL) {
                est2 = xt_rateest_lookup(info->name2);
                if (!est2)
                        goto err2;
-       } else
-               est2 = NULL;
-
+       }
 
        info->est1 = est1;
        info->est2 = est2;
@@ -116,7 +115,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
 err2:
        xt_rateest_put(est1);
 err1:
-       return -EINVAL;
+       return ret;
 }
 
 static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par)
index be4505e..b014279 100644 (file)
@@ -425,7 +425,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
        struct rsvp_filter *f, **fp;
        struct rsvp_session *s, **sp;
        struct tc_rsvp_pinfo *pinfo = NULL;
-       struct nlattr *opt = tca[TCA_OPTIONS-1];
+       struct nlattr *opt = tca[TCA_OPTIONS];
        struct nlattr *tb[TCA_RSVP_MAX + 1];
        struct tcf_exts e;
        unsigned int h1, h2;
@@ -439,7 +439,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
        if (err < 0)
                return err;
 
-       err = tcf_exts_validate(tp, tb, tca[TCA_RATE-1], &e, &rsvp_ext_map);
+       err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &rsvp_ext_map);
        if (err < 0)
                return err;
 
@@ -449,8 +449,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
 
                if (f->handle != handle && handle)
                        goto errout2;
-               if (tb[TCA_RSVP_CLASSID-1]) {
-                       f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]);
+               if (tb[TCA_RSVP_CLASSID]) {
+                       f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
                        tcf_bind_filter(tp, &f->res, base);
                }
 
@@ -462,7 +462,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
        err = -EINVAL;
        if (handle)
                goto errout2;
-       if (tb[TCA_RSVP_DST-1] == NULL)
+       if (tb[TCA_RSVP_DST] == NULL)
                goto errout2;
 
        err = -ENOBUFS;
@@ -471,19 +471,19 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
                goto errout2;
 
        h2 = 16;
-       if (tb[TCA_RSVP_SRC-1]) {
-               memcpy(f->src, nla_data(tb[TCA_RSVP_SRC-1]), sizeof(f->src));
+       if (tb[TCA_RSVP_SRC]) {
+               memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
                h2 = hash_src(f->src);
        }
-       if (tb[TCA_RSVP_PINFO-1]) {
-               pinfo = nla_data(tb[TCA_RSVP_PINFO-1]);
+       if (tb[TCA_RSVP_PINFO]) {
+               pinfo = nla_data(tb[TCA_RSVP_PINFO]);
                f->spi = pinfo->spi;
                f->tunnelhdr = pinfo->tunnelhdr;
        }
-       if (tb[TCA_RSVP_CLASSID-1])
-               f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]);
+       if (tb[TCA_RSVP_CLASSID])
+               f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
 
-       dst = nla_data(tb[TCA_RSVP_DST-1]);
+       dst = nla_data(tb[TCA_RSVP_DST]);
        h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
 
        err = -ENOMEM;
@@ -642,8 +642,7 @@ nla_put_failure:
        return -1;
 }
 
-static struct tcf_proto_ops RSVP_OPS = {
-       .next           =       NULL,
+static struct tcf_proto_ops RSVP_OPS __read_mostly = {
        .kind           =       RSVP_ID,
        .classify       =       rsvp_classify,
        .init           =       rsvp_init,
index 167c880..76388b0 100644 (file)
@@ -1689,6 +1689,11 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                case SCTP_CMD_PURGE_ASCONF_QUEUE:
                        sctp_asconf_queue_teardown(asoc);
                        break;
+
+               case SCTP_CMD_SET_ASOC:
+                       asoc = cmd->obj.asoc;
+                       break;
+
                default:
                        pr_warn("Impossible command: %u, %p\n",
                                cmd->verb, cmd->obj.ptr);
index 49b847b..a0f31e6 100644 (file)
@@ -2047,6 +2047,12 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
        sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
 
+       /* Restore association pointer to provide SCTP command interpeter
+        * with a valid context in case it needs to manipulate
+        * the queues */
+       sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC,
+                        SCTP_ASOC((struct sctp_association *)asoc));
+
        return retval;
 
 nomem:
index 02751db..68a471b 100644 (file)
@@ -852,6 +852,7 @@ static void handle_channel(struct wiphy *wiphy,
                return;
        }
 
+       chan->beacon_found = false;
        chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
        chan->max_antenna_gain = min(chan->orig_mag,
                (int) MBI_TO_DBI(power_rule->max_antenna_gain));
index b7b6ff8..dec0fa2 100644 (file)
@@ -118,6 +118,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
                             i++, j++)
                                request->channels[i] =
                                        &wdev->wiphy->bands[band]->channels[j];
+                       request->rates[band] =
+                               (1 << wdev->wiphy->bands[band]->n_bitrates) - 1;
                }
        }
        request->n_channels = n_channels;
index a026b0e..54a0dc2 100644 (file)
@@ -212,6 +212,11 @@ resume:
                /* only the first xfrm gets the encap type */
                encap_type = 0;
 
+               if (async && x->repl->check(x, skb, seq)) {
+                       XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
+                       goto drop_unlock;
+               }
+
                x->repl->advance(x, seq);
 
                x->curlft.bytes += skb->len;
index f9123f0..32b02d9 100644 (file)
@@ -68,6 +68,7 @@ MODULE_PARM_DESC(enable, "Enable FM801 soundcard.");
 module_param_array(tea575x_tuner, int, NULL, 0444);
 MODULE_PARM_DESC(tea575x_tuner, "TEA575x tuner access method (0 = auto, 1 = SF256-PCS, 2=SF256-PCP, 3=SF64-PCR, 8=disable, +16=tuner-only).");
 
+#define TUNER_DISABLED         (1<<3)
 #define TUNER_ONLY             (1<<4)
 #define TUNER_TYPE_MASK                (~TUNER_ONLY & 0xFFFF)
 
@@ -1150,7 +1151,8 @@ static int snd_fm801_free(struct fm801 *chip)
 
       __end_hw:
 #ifdef CONFIG_SND_FM801_TEA575X_BOOL
-       snd_tea575x_exit(&chip->tea);
+       if (!(chip->tea575x_tuner & TUNER_DISABLED))
+               snd_tea575x_exit(&chip->tea);
 #endif
        if (chip->irq >= 0)
                free_irq(chip->irq, chip);
@@ -1236,7 +1238,6 @@ static int __devinit snd_fm801_create(struct snd_card *card,
            (tea575x_tuner & TUNER_TYPE_MASK) < 4) {
                if (snd_tea575x_init(&chip->tea)) {
                        snd_printk(KERN_ERR "TEA575x radio not found\n");
-                       snd_fm801_free(chip);
                        return -ENODEV;
                }
        } else if ((tea575x_tuner & TUNER_TYPE_MASK) == 0) {
@@ -1251,11 +1252,15 @@ static int __devinit snd_fm801_create(struct snd_card *card,
                }
                if (tea575x_tuner == 4) {
                        snd_printk(KERN_ERR "TEA575x radio not found\n");
-                       snd_fm801_free(chip);
-                       return -ENODEV;
+                       chip->tea575x_tuner = TUNER_DISABLED;
                }
        }
-       strlcpy(chip->tea.card, snd_fm801_tea575x_gpios[(tea575x_tuner & TUNER_TYPE_MASK) - 1].name, sizeof(chip->tea.card));
+       if (!(chip->tea575x_tuner & TUNER_DISABLED)) {
+               strlcpy(chip->tea.card,
+                       snd_fm801_tea575x_gpios[(tea575x_tuner &
+                                                TUNER_TYPE_MASK) - 1].name,
+                       sizeof(chip->tea.card));
+       }
 #endif
 
        *rchip = chip;
index 7cabd73..0503c99 100644 (file)
@@ -168,7 +168,7 @@ struct alc_spec {
        unsigned int auto_mic_valid_imux:1;     /* valid imux for auto-mic */
        unsigned int automute:1;        /* HP automute enabled */
        unsigned int detect_line:1;     /* Line-out detection enabled */
-       unsigned int automute_lines:1;  /* automute line-out as well */
+       unsigned int automute_lines:1;  /* automute line-out as well; NOP when automute_hp_lo isn't set */
        unsigned int automute_hp_lo:1;  /* both HP and LO available */
 
        /* other flags */
@@ -551,7 +551,7 @@ static void update_speakers(struct hda_codec *codec)
        if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0] ||
            spec->autocfg.line_out_pins[0] == spec->autocfg.speaker_pins[0])
                return;
-       if (!spec->automute_lines || !spec->automute)
+       if (!spec->automute || (spec->automute_hp_lo && !spec->automute_lines))
                on = 0;
        else
                on = spec->jack_present;
@@ -803,7 +803,7 @@ static int alc_automute_mode_get(struct snd_kcontrol *kcontrol,
        unsigned int val;
        if (!spec->automute)
                val = 0;
-       else if (!spec->automute_lines)
+       else if (!spec->automute_hp_lo || !spec->automute_lines)
                val = 1;
        else
                val = 2;
@@ -824,7 +824,8 @@ static int alc_automute_mode_put(struct snd_kcontrol *kcontrol,
                spec->automute = 0;
                break;
        case 1:
-               if (spec->automute && !spec->automute_lines)
+               if (spec->automute &&
+                   (!spec->automute_hp_lo || !spec->automute_lines))
                        return 0;
                spec->automute = 1;
                spec->automute_lines = 0;
index 5145b66..1b7c114 100644 (file)
@@ -6573,6 +6573,7 @@ static const struct hda_codec_preset snd_hda_preset_sigmatel[] = {
        { .id = 0x111d76cc, .name = "92HD89F3", .patch = patch_stac92hd73xx },
        { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx },
        { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76df, .name = "92HD93BXX", .patch = patch_stac92hd83xxx},
        { .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx},
        { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx},
        { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx},
index 732a247..b94eb7e 100644 (file)
@@ -128,7 +128,7 @@ static int snd_ad73311_configure(void)
        return 0;
 }
 
-static int bf5xx_probe(struct platform_device *pdev)
+static int bf5xx_probe(struct snd_soc_card *card)
 {
        int err;
        if (gpio_request(GPIO_SE, "AD73311_SE")) {
index 1725550..d2c315f 100644 (file)
@@ -3479,31 +3479,6 @@ int wm8962_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack)
 }
 EXPORT_SYMBOL_GPL(wm8962_mic_detect);
 
-#ifdef CONFIG_PM
-static int wm8962_resume(struct snd_soc_codec *codec)
-{
-       u16 *reg_cache = codec->reg_cache;
-       int i;
-
-       /* Restore the registers */
-       for (i = 1; i < codec->driver->reg_cache_size; i++) {
-               switch (i) {
-               case WM8962_SOFTWARE_RESET:
-                       continue;
-               default:
-                       break;
-               }
-
-               if (reg_cache[i] != wm8962_reg[i])
-                       snd_soc_write(codec, i, reg_cache[i]);
-       }
-
-       return 0;
-}
-#else
-#define wm8962_resume NULL
-#endif
-
 #if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
 static int beep_rates[] = {
        500, 1000, 2000, 4000,
@@ -4015,7 +3990,6 @@ static int wm8962_remove(struct snd_soc_codec *codec)
 static struct snd_soc_codec_driver soc_codec_dev_wm8962 = {
        .probe =        wm8962_probe,
        .remove =       wm8962_remove,
-       .resume =       wm8962_resume,
        .set_bias_level = wm8962_set_bias_level,
        .reg_cache_size = WM8962_MAX_REGISTER + 1,
        .reg_word_size = sizeof(u16),
index d2ef014..ef69f5a 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/bitops.h>
 #include <linux/debugfs.h>
 #include <linux/platform_device.h>
+#include <linux/ctype.h>
 #include <linux/slab.h>
 #include <sound/ac97_codec.h>
 #include <sound/core.h>
@@ -1434,9 +1435,20 @@ static void snd_soc_instantiate_card(struct snd_soc_card *card)
                 "%s", card->name);
        snprintf(card->snd_card->longname, sizeof(card->snd_card->longname),
                 "%s", card->long_name ? card->long_name : card->name);
-       if (card->driver_name)
-               strlcpy(card->snd_card->driver, card->driver_name,
-                       sizeof(card->snd_card->driver));
+       snprintf(card->snd_card->driver, sizeof(card->snd_card->driver),
+                "%s", card->driver_name ? card->driver_name : card->name);
+       for (i = 0; i < ARRAY_SIZE(card->snd_card->driver); i++) {
+               switch (card->snd_card->driver[i]) {
+               case '_':
+               case '-':
+               case '\0':
+                       break;
+               default:
+                       if (!isalnum(card->snd_card->driver[i]))
+                               card->snd_card->driver[i] = '_';
+                       break;
+               }
+       }
 
        if (card->late_probe) {
                ret = card->late_probe(card);
index 781d9e6..ed120ca 100644 (file)
@@ -532,6 +532,7 @@ snd_usb_audio_probe(struct usb_device *dev,
  __error:
        if (chip && !chip->num_interfaces)
                snd_card_free(chip->card);
+       chip->probing = 0;
        mutex_unlock(&register_mutex);
  __err_val:
        return NULL;
index 3b8f7b8..e9d5c27 100644 (file)
@@ -30,6 +30,8 @@ endif
 # Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds.
 #
 # Define NO_DWARF if you do not want debug-info analysis feature at all.
+#
+# Define WERROR=0 to disable treating any warnings as errors.
 
 $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
        @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
@@ -63,6 +65,11 @@ ifeq ($(ARCH),x86_64)
        endif
 endif
 
+# Treat warnings as errors unless directed not to
+ifneq ($(WERROR),0)
+       CFLAGS_WERROR := -Werror
+endif
+
 #
 # Include saner warnings here, which can catch bugs:
 #
@@ -95,7 +102,7 @@ ifndef PERF_DEBUG
   CFLAGS_OPTIMIZE = -O6
 endif
 
-CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
+CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
 EXTLIBS = -lpthread -lrt -lelf -lm
 ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
 ALL_LDFLAGS = $(LDFLAGS)
index 6b0519f..f4c3fbe 100644 (file)
@@ -161,6 +161,7 @@ static void config_attr(struct perf_evsel *evsel, struct perf_evlist *evlist)
        struct perf_event_attr *attr = &evsel->attr;
        int track = !evsel->idx; /* only the first counter needs these */
 
+       attr->disabled          = 1;
        attr->inherit           = !no_inherit;
        attr->read_format       = PERF_FORMAT_TOTAL_TIME_ENABLED |
                                  PERF_FORMAT_TOTAL_TIME_RUNNING |
@@ -671,6 +672,8 @@ static int __cmd_record(int argc, const char **argv)
                }
        }
 
+       perf_evlist__enable(evsel_list);
+
        /*
         * Let the child rip
         */
index 55f4c76..efe696f 100644 (file)
@@ -561,7 +561,7 @@ static int test__basic_mmap(void)
                }
 
                err = perf_event__parse_sample(event, attr.sample_type, sample_size,
-                                              false, &sample);
+                                              false, &sample, false);
                if (err) {
                        pr_err("Can't parse sample, err = %d\n", err);
                        goto out_munmap;
index a43433f..d28013b 100644 (file)
@@ -191,7 +191,8 @@ static void __zero_source_counters(struct sym_entry *syme)
        symbol__annotate_zero_histograms(sym);
 }
 
-static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
+static void record_precise_ip(struct sym_entry *syme, struct map *map,
+                             int counter, u64 ip)
 {
        struct annotation *notes;
        struct symbol *sym;
@@ -205,8 +206,8 @@ static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
        if (pthread_mutex_trylock(&notes->lock))
                return;
 
-       ip = syme->map->map_ip(syme->map, ip);
-       symbol__inc_addr_samples(sym, syme->map, counter, ip);
+       ip = map->map_ip(map, ip);
+       symbol__inc_addr_samples(sym, map, counter, ip);
 
        pthread_mutex_unlock(&notes->lock);
 }
@@ -810,7 +811,7 @@ static void perf_event__process_sample(const union perf_event *event,
                evsel = perf_evlist__id2evsel(top.evlist, sample->id);
                assert(evsel != NULL);
                syme->count[evsel->idx]++;
-               record_precise_ip(syme, evsel->idx, ip);
+               record_precise_ip(syme, al.map, evsel->idx, ip);
                pthread_mutex_lock(&top.active_symbols_lock);
                if (list_empty(&syme->node) || !syme->node.next) {
                        static bool first = true;
index 3c1b8a6..437f8ca 100644 (file)
@@ -169,12 +169,17 @@ static int perf_event__synthesize_mmap_events(union perf_event *event,
                        continue;
                pbf += n + 3;
                if (*pbf == 'x') { /* vm_exec */
+                       char anonstr[] = "//anon\n";
                        char *execname = strchr(bf, '/');
 
                        /* Catch VDSO */
                        if (execname == NULL)
                                execname = strstr(bf, "[vdso]");
 
+                       /* Catch anonymous mmaps */
+                       if ((execname == NULL) && !strstr(bf, "["))
+                               execname = anonstr;
+
                        if (execname == NULL)
                                continue;
 
index 1d7f664..357a85b 100644 (file)
@@ -186,6 +186,6 @@ const char *perf_event__name(unsigned int id);
 
 int perf_event__parse_sample(const union perf_event *event, u64 type,
                             int sample_size, bool sample_id_all,
-                            struct perf_sample *sample);
+                            struct perf_sample *sample, bool swapped);
 
 #endif /* __PERF_RECORD_H */
index c12bd47..72e9f48 100644 (file)
@@ -113,6 +113,19 @@ void perf_evlist__disable(struct perf_evlist *evlist)
        }
 }
 
+void perf_evlist__enable(struct perf_evlist *evlist)
+{
+       int cpu, thread;
+       struct perf_evsel *pos;
+
+       for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+               list_for_each_entry(pos, &evlist->entries, node) {
+                       for (thread = 0; thread < evlist->threads->nr; thread++)
+                               ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE);
+               }
+       }
+}
+
 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
 {
        int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
index ce85ae9..f349150 100644 (file)
@@ -54,6 +54,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
 void perf_evlist__munmap(struct perf_evlist *evlist);
 
 void perf_evlist__disable(struct perf_evlist *evlist);
+void perf_evlist__enable(struct perf_evlist *evlist);
 
 static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
                                         struct cpu_map *cpus,
index a03a36b..c5748c5 100644 (file)
@@ -7,6 +7,8 @@
  * Released under the GPL v2. (and only v2, not any later version)
  */
 
+#include <byteswap.h>
+#include "asm/bug.h"
 #include "evsel.h"
 #include "evlist.h"
 #include "util.h"
@@ -342,10 +344,20 @@ static bool sample_overlap(const union perf_event *event,
 
 int perf_event__parse_sample(const union perf_event *event, u64 type,
                             int sample_size, bool sample_id_all,
-                            struct perf_sample *data)
+                            struct perf_sample *data, bool swapped)
 {
        const u64 *array;
 
+       /*
+        * used for cross-endian analysis. See git commit 65014ab3
+        * for why this goofiness is needed.
+        */
+       union {
+               u64 val64;
+               u32 val32[2];
+       } u;
+
+
        data->cpu = data->pid = data->tid = -1;
        data->stream_id = data->id = data->time = -1ULL;
 
@@ -366,9 +378,16 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
        }
 
        if (type & PERF_SAMPLE_TID) {
-               u32 *p = (u32 *)array;
-               data->pid = p[0];
-               data->tid = p[1];
+               u.val64 = *array;
+               if (swapped) {
+                       /* undo swap of u64, then swap on individual u32s */
+                       u.val64 = bswap_64(u.val64);
+                       u.val32[0] = bswap_32(u.val32[0]);
+                       u.val32[1] = bswap_32(u.val32[1]);
+               }
+
+               data->pid = u.val32[0];
+               data->tid = u.val32[1];
                array++;
        }
 
@@ -395,8 +414,15 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
        }
 
        if (type & PERF_SAMPLE_CPU) {
-               u32 *p = (u32 *)array;
-               data->cpu = *p;
+
+               u.val64 = *array;
+               if (swapped) {
+                       /* undo swap of u64, then swap on individual u32s */
+                       u.val64 = bswap_64(u.val64);
+                       u.val32[0] = bswap_32(u.val32[0]);
+               }
+
+               data->cpu = u.val32[0];
                array++;
        }
 
@@ -423,18 +449,24 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
        }
 
        if (type & PERF_SAMPLE_RAW) {
-               u32 *p = (u32 *)array;
+               u.val64 = *array;
+               if (WARN_ONCE(swapped,
+                             "Endianness of raw data not corrected!\n")) {
+                       /* undo swap of u64, then swap on individual u32s */
+                       u.val64 = bswap_64(u.val64);
+                       u.val32[0] = bswap_32(u.val32[0]);
+                       u.val32[1] = bswap_32(u.val32[1]);
+               }
 
                if (sample_overlap(event, array, sizeof(u32)))
                        return -EFAULT;
 
-               data->raw_size = *p;
-               p++;
+               data->raw_size = u.val32[0];
 
-               if (sample_overlap(event, p, data->raw_size))
+               if (sample_overlap(event, &u.val32[1], data->raw_size))
                        return -EFAULT;
 
-               data->raw_data = p;
+               data->raw_data = &u.val32[1];
        }
 
        return 0;
index 555fc38..5d73262 100644 (file)
@@ -659,7 +659,7 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
                if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die))
                        ret = -ENOENT;
        }
-       if (ret == 0)
+       if (ret >= 0)
                ret = convert_variable(&vr_die, pf);
 
        if (ret < 0)
index cbc8f21..7624324 100644 (file)
@@ -803,7 +803,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
                first = list_entry(evlist->entries.next, struct perf_evsel, node);
                err = perf_event__parse_sample(event, first->attr.sample_type,
                                               perf_evsel__sample_size(first),
-                                              sample_id_all, &pevent->sample);
+                                              sample_id_all, &pevent->sample, false);
                if (err)
                        return PyErr_Format(PyExc_OSError,
                                            "perf: can't parse sample, err=%d", err);
index 170601e..974d0cb 100644 (file)
@@ -162,7 +162,8 @@ static inline int perf_session__parse_sample(struct perf_session *session,
 {
        return perf_event__parse_sample(event, session->sample_type,
                                        session->sample_size,
-                                       session->sample_id_all, sample);
+                                       session->sample_id_all, sample,
+                                       session->header.needs_swap);
 }
 
 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
index 401e220..1ee8f1e 100644 (file)
@@ -151,11 +151,17 @@ sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
 {
        u64 ip_l, ip_r;
 
+       if (!left->ms.sym && !right->ms.sym)
+               return right->level - left->level;
+
+       if (!left->ms.sym || !right->ms.sym)
+               return cmp_null(left->ms.sym, right->ms.sym);
+
        if (left->ms.sym == right->ms.sym)
                return 0;
 
-       ip_l = left->ms.sym ? left->ms.sym->start : left->ip;
-       ip_r = right->ms.sym ? right->ms.sym->start : right->ip;
+       ip_l = left->ms.sym->start;
+       ip_r = right->ms.sym->start;
 
        return (int64_t)(ip_r - ip_l);
 }
index 469c026..40eeaf0 100644 (file)
@@ -74,16 +74,104 @@ static void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
 
 bool symbol_type__is_a(char symbol_type, enum map_type map_type)
 {
+       symbol_type = toupper(symbol_type);
+
        switch (map_type) {
        case MAP__FUNCTION:
                return symbol_type == 'T' || symbol_type == 'W';
        case MAP__VARIABLE:
-               return symbol_type == 'D' || symbol_type == 'd';
+               return symbol_type == 'D';
        default:
                return false;
        }
 }
 
+static int prefix_underscores_count(const char *str)
+{
+       const char *tail = str;
+
+       while (*tail == '_')
+               tail++;
+
+       return tail - str;
+}
+
+#define SYMBOL_A 0
+#define SYMBOL_B 1
+
+static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
+{
+       s64 a;
+       s64 b;
+
+       /* Prefer a symbol with non zero length */
+       a = syma->end - syma->start;
+       b = symb->end - symb->start;
+       if ((b == 0) && (a > 0))
+               return SYMBOL_A;
+       else if ((a == 0) && (b > 0))
+               return SYMBOL_B;
+
+       /* Prefer a non weak symbol over a weak one */
+       a = syma->binding == STB_WEAK;
+       b = symb->binding == STB_WEAK;
+       if (b && !a)
+               return SYMBOL_A;
+       if (a && !b)
+               return SYMBOL_B;
+
+       /* Prefer a global symbol over a non global one */
+       a = syma->binding == STB_GLOBAL;
+       b = symb->binding == STB_GLOBAL;
+       if (a && !b)
+               return SYMBOL_A;
+       if (b && !a)
+               return SYMBOL_B;
+
+       /* Prefer a symbol with less underscores */
+       a = prefix_underscores_count(syma->name);
+       b = prefix_underscores_count(symb->name);
+       if (b > a)
+               return SYMBOL_A;
+       else if (a > b)
+               return SYMBOL_B;
+
+       /* If all else fails, choose the symbol with the longest name */
+       if (strlen(syma->name) >= strlen(symb->name))
+               return SYMBOL_A;
+       else
+               return SYMBOL_B;
+}
+
+static void symbols__fixup_duplicate(struct rb_root *symbols)
+{
+       struct rb_node *nd;
+       struct symbol *curr, *next;
+
+       nd = rb_first(symbols);
+
+       while (nd) {
+               curr = rb_entry(nd, struct symbol, rb_node);
+again:
+               nd = rb_next(&curr->rb_node);
+               next = rb_entry(nd, struct symbol, rb_node);
+
+               if (!nd)
+                       break;
+
+               if (curr->start != next->start)
+                       continue;
+
+               if (choose_best_symbol(curr, next) == SYMBOL_A) {
+                       rb_erase(&next->rb_node, symbols);
+                       goto again;
+               } else {
+                       nd = rb_next(&curr->rb_node);
+                       rb_erase(&curr->rb_node, symbols);
+               }
+       }
+}
+
 static void symbols__fixup_end(struct rb_root *symbols)
 {
        struct rb_node *nd, *prevnd = rb_first(symbols);
@@ -438,18 +526,11 @@ int kallsyms__parse(const char *filename, void *arg,
        char *line = NULL;
        size_t n;
        int err = -1;
-       u64 prev_start = 0;
-       char prev_symbol_type = 0;
-       char *prev_symbol_name;
        FILE *file = fopen(filename, "r");
 
        if (file == NULL)
                goto out_failure;
 
-       prev_symbol_name = malloc(KSYM_NAME_LEN);
-       if (prev_symbol_name == NULL)
-               goto out_close;
-
        err = 0;
 
        while (!feof(file)) {
@@ -470,7 +551,7 @@ int kallsyms__parse(const char *filename, void *arg,
                if (len + 2 >= line_len)
                        continue;
 
-               symbol_type = toupper(line[len]);
+               symbol_type = line[len];
                len += 2;
                symbol_name = line + len;
                len = line_len - len;
@@ -480,24 +561,18 @@ int kallsyms__parse(const char *filename, void *arg,
                        break;
                }
 
-               if (prev_symbol_type) {
-                       u64 end = start;
-                       if (end != prev_start)
-                               --end;
-                       err = process_symbol(arg, prev_symbol_name,
-                                            prev_symbol_type, prev_start, end);
-                       if (err)
-                               break;
-               }
-
-               memcpy(prev_symbol_name, symbol_name, len + 1);
-               prev_symbol_type = symbol_type;
-               prev_start = start;
+               /*
+                * module symbols are not sorted so we add all
+                * symbols with zero length and rely on
+                * symbols__fixup_end() to fix it up.
+                */
+               err = process_symbol(arg, symbol_name,
+                                    symbol_type, start, start);
+               if (err)
+                       break;
        }
 
-       free(prev_symbol_name);
        free(line);
-out_close:
        fclose(file);
        return err;
 
@@ -703,6 +778,9 @@ int dso__load_kallsyms(struct dso *dso, const char *filename,
        if (dso__load_all_kallsyms(dso, filename, map) < 0)
                return -1;
 
+       symbols__fixup_duplicate(&dso->symbols[map->type]);
+       symbols__fixup_end(&dso->symbols[map->type]);
+
        if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
                dso->symtab_type = SYMTAB__GUEST_KALLSYMS;
        else
@@ -1092,8 +1170,7 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
        if (dso->has_build_id) {
                u8 build_id[BUILD_ID_SIZE];
 
-               if (elf_read_build_id(elf, build_id,
-                                     BUILD_ID_SIZE) != BUILD_ID_SIZE)
+               if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0)
                        goto out_elf_end;
 
                if (!dso__build_id_equal(dso, build_id))
@@ -1111,6 +1188,8 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
        }
 
        opdsec = elf_section_by_name(elf, &ehdr, &opdshdr, ".opd", &opdidx);
+       if (opdshdr.sh_type != SHT_PROGBITS)
+               opdsec = NULL;
        if (opdsec)
                opddata = elf_rawdata(opdsec, NULL);
 
@@ -1276,6 +1355,7 @@ new_symbol:
         * For misannotated, zeroed, ASM function sizes.
         */
        if (nr > 0) {
+               symbols__fixup_duplicate(&dso->symbols[map->type]);
                symbols__fixup_end(&dso->symbols[map->type]);
                if (kmap) {
                        /*
@@ -1362,8 +1442,8 @@ static int elf_read_build_id(Elf *elf, void *bf, size_t size)
        ptr = data->d_buf;
        while (ptr < (data->d_buf + data->d_size)) {
                GElf_Nhdr *nhdr = ptr;
-               int namesz = NOTE_ALIGN(nhdr->n_namesz),
-                   descsz = NOTE_ALIGN(nhdr->n_descsz);
+               size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
+                      descsz = NOTE_ALIGN(nhdr->n_descsz);
                const char *name;
 
                ptr += sizeof(*nhdr);
@@ -1372,8 +1452,10 @@ static int elf_read_build_id(Elf *elf, void *bf, size_t size)
                if (nhdr->n_type == NT_GNU_BUILD_ID &&
                    nhdr->n_namesz == sizeof("GNU")) {
                        if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
-                               memcpy(bf, ptr, BUILD_ID_SIZE);
-                               err = BUILD_ID_SIZE;
+                               size_t sz = min(size, descsz);
+                               memcpy(bf, ptr, sz);
+                               memset(bf + sz, 0, size - sz);
+                               err = descsz;
                                break;
                        }
                }
@@ -1425,7 +1507,7 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
        while (1) {
                char bf[BUFSIZ];
                GElf_Nhdr nhdr;
-               int namesz, descsz;
+               size_t namesz, descsz;
 
                if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
                        break;
@@ -1434,15 +1516,16 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
                descsz = NOTE_ALIGN(nhdr.n_descsz);
                if (nhdr.n_type == NT_GNU_BUILD_ID &&
                    nhdr.n_namesz == sizeof("GNU")) {
-                       if (read(fd, bf, namesz) != namesz)
+                       if (read(fd, bf, namesz) != (ssize_t)namesz)
                                break;
                        if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
-                               if (read(fd, build_id,
-                                   BUILD_ID_SIZE) == BUILD_ID_SIZE) {
+                               size_t sz = min(descsz, size);
+                               if (read(fd, build_id, sz) == (ssize_t)sz) {
+                                       memset(build_id + sz, 0, size - sz);
                                        err = 0;
                                        break;
                                }
-                       } else if (read(fd, bf, descsz) != descsz)
+                       } else if (read(fd, bf, descsz) != (ssize_t)descsz)
                                break;
                } else {
                        int n = namesz + descsz;