Merge tag 'clk-fixes-for-linus' of git://git.linaro.org/people/mike.turquette/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 29 May 2014 20:59:18 +0000 (13:59 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 29 May 2014 20:59:18 +0000 (13:59 -0700)
Pull clock fixes from Mike Turquette:
 "Small number of user-visible regression fixes for clock drivers.

  There is a memory leak fix for an ST platform, an infinite Loop Of
  Doom fix for the recent changes to the basic clock divider (hopefully
  the last fix for those recent changes) and some Tegra PLL changes
  which keep PCI from being hosed on that platform"

* tag 'clk-fixes-for-linus' of git://git.linaro.org/people/mike.turquette/linux:
  clk: st: Fix memory leak
  clk: divider: Fix table round up function
  clk: tegra: Fix enabling of PLLE
  clk: tegra: Introduce divider mask and shift helpers
  clk: tegra: Fix PLLE programming

226 files changed:
Documentation/DocBook/drm.tmpl
Documentation/DocBook/media/Makefile
Documentation/devicetree/bindings/net/mdio-gpio.txt
Documentation/email-clients.txt
Documentation/filesystems/proc.txt
Documentation/hwmon/sysfs-interface
Documentation/java.txt
Documentation/networking/filter.txt
Documentation/networking/packet_mmap.txt
Documentation/virtual/kvm/api.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/armada-380.dtsi
arch/arm/boot/dts/armada-385.dtsi
arch/arm/boot/dts/at91sam9260.dtsi
arch/arm/boot/dts/exynos4412-trats2.dts
arch/arm/boot/dts/exynos5250-arndale.dts
arch/arm/boot/dts/exynos5420-arndale-octa.dts
arch/arm/boot/dts/exynos5420.dtsi
arch/arm/configs/exynos_defconfig
arch/arm/include/asm/trusted_foundations.h
arch/arm/mach-at91/at91sam9260_devices.c
arch/arm/mach-exynos/firmware.c
arch/arm/mach-imx/devices/platform-ipu-core.c
arch/arm/mach-mvebu/mvebu-soc-id.c
arch/arm/mach-omap2/board-flash.c
arch/arm/mach-omap2/cclock3xxx_data.c
arch/arm/mach-omap2/cpuidle44xx.c
arch/arm/mach-omap2/omap_hwmod_54xx_data.c
arch/arm/plat-omap/dma.c
arch/parisc/kernel/syscall_table.S
arch/powerpc/Makefile
arch/powerpc/include/asm/ppc_asm.h
arch/powerpc/include/asm/sections.h
arch/powerpc/kernel/kvm.c
arch/powerpc/kernel/machine_kexec_64.c
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_hv_rm_mmu.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/mm/hash_utils_64.c
arch/s390/kvm/kvm-s390.c
arch/s390/net/bpf_jit_comp.c
arch/sparc/include/asm/pgtable_64.h
arch/sparc/kernel/sysfs.c
arch/sparc/lib/NG2memcpy.S
arch/sparc/mm/fault_64.c
arch/sparc/mm/tsb.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/net/bpf_jit_comp.c
drivers/block/virtio_blk.c
drivers/clocksource/tcb_clksrc.c
drivers/clocksource/timer-marco.c
drivers/dma/dmaengine.c
drivers/dma/dw/core.c
drivers/dma/mv_xor.c
drivers/dma/sa11x0-dma.c
drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_vce.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/sid.h
drivers/hwmon/Kconfig
drivers/hwmon/ntc_thermistor.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/qp.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_options.c
drivers/net/bonding/bonding.h
drivers/net/can/c_can/Kconfig
drivers/net/can/c_can/c_can.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/altera/Makefile
drivers/net/ethernet/altera/altera_msgdma.c
drivers/net/ethernet/altera/altera_msgdmahw.h
drivers/net/ethernet/altera/altera_sgdma.c
drivers/net/ethernet/altera/altera_sgdmahw.h
drivers/net/ethernet/altera/altera_tse.h
drivers/net/ethernet/altera/altera_tse_ethtool.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/altera/altera_utils.c
drivers/net/ethernet/altera/altera_utils.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/ec_bhf.c [new file with mode: 0644]
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/sfc/nic.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/macvlan.c
drivers/net/phy/mdio-gpio.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/usb/cdc_mbim.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/pinctrl/vt8500/pinctrl-wmt.c
drivers/ptp/Kconfig
drivers/scsi/scsi_transport_sas.c
fs/afs/cmservice.c
fs/afs/internal.h
fs/afs/rxrpc.c
fs/nfsd/nfs4acl.c
fs/nfsd/nfs4state.c
fs/ocfs2/dlm/dlmmaster.c
fs/splice.c
include/linux/dmaengine.h
include/linux/if_macvlan.h
include/linux/if_vlan.h
include/linux/mlx4/qp.h
include/linux/net.h
include/linux/netdevice.h
include/linux/of_mdio.h
include/linux/omap-dma.h
include/linux/perf_event.h
include/linux/rtnetlink.h
include/linux/sched.h
include/net/cfg80211.h
include/net/ip6_route.h
include/net/netns/ipv4.h
include/uapi/linux/nl80211.h
kernel/events/core.c
kernel/kexec.c
kernel/sched/core.c
kernel/sched/cpudeadline.c
kernel/sched/cpupri.c
kernel/sched/cputime.c
kernel/sched/deadline.c
kernel/sched/fair.c
mm/filemap.c
mm/madvise.c
mm/memcontrol.c
mm/memory-failure.c
net/8021q/vlan.c
net/8021q/vlan_dev.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/fragmentation.c
net/batman-adv/gateway_client.c
net/batman-adv/hard-interface.c
net/batman-adv/originator.c
net/bridge/br_netfilter.c
net/core/dev.c
net/core/neighbour.c
net/core/net_namespace.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/utils.c
net/dsa/dsa.c
net/ipv4/af_inet.c
net/ipv4/fib_semantics.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_forward.c
net/ipv4/ip_fragment.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_vti.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/ping.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/xfrm4_output.c
net/ipv4/xfrm4_protocol.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ndisc.c
net/ipv6/netfilter.c
net/ipv6/route.c
net/ipv6/tcpv6_offload.c
net/ipv6/xfrm6_output.c
net/ipv6/xfrm6_protocol.c
net/iucv/af_iucv.c
net/mac80211/ieee80211_i.h
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/status.c
net/mac80211/trace.h
net/mac80211/util.c
net/mac80211/vht.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_tables_core.c
net/netfilter/nfnetlink.c
net/rxrpc/ar-key.c
net/sched/cls_tcindex.c
net/wireless/scan.c
net/wireless/sme.c
sound/core/pcm_dmaengine.c
sound/pci/hda/hda_intel.c
tools/Makefile
tools/lib/lockdep/Makefile

index 677a025..ba60d93 100644 (file)
@@ -79,7 +79,7 @@
   <partintro>
     <para>
       This first part of the DRM Developer's Guide documents core DRM code,
-      helper libraries for writting drivers and generic userspace interfaces
+      helper libraries for writing drivers and generic userspace interfaces
       exposed by DRM drivers.
     </para>
   </partintro>
@@ -459,7 +459,7 @@ char *date;</synopsis>
       providing a solution to every graphics memory-related problems, GEM
       identified common code between drivers and created a support library to
       share it. GEM has simpler initialization and execution requirements than
-      TTM, but has no video RAM management capabitilies and is thus limited to
+      TTM, but has no video RAM management capabilities and is thus limited to
       UMA devices.
     </para>
     <sect2>
@@ -889,7 +889,7 @@ int (*prime_fd_to_handle)(struct drm_device *dev,
            vice versa. Drivers must use the kernel dma-buf buffer sharing framework
            to manage the PRIME file descriptors. Similar to the mode setting
            API PRIME is agnostic to the underlying buffer object manager, as
-           long as handles are 32bit unsinged integers.
+           long as handles are 32bit unsigned integers.
          </para>
          <para>
            While non-GEM drivers must implement the operations themselves, GEM
@@ -2356,7 +2356,7 @@ void intel_crt_init(struct drm_device *dev)
       first create properties and then create and associate individual instances
       of those properties to objects. A property can be instantiated multiple
       times and associated with different objects. Values are stored in property
-      instances, and all other property information are stored in the propery
+      instances, and all other property information are stored in the property
       and shared between all instances of the property.
     </para>
     <para>
@@ -2697,10 +2697,10 @@ int num_ioctls;</synopsis>
   <sect1>
     <title>Legacy Support Code</title>
     <para>
-      The section very brievely covers some of the old legacy support code which
+      The section very briefly covers some of the old legacy support code which
       is only used by old DRM drivers which have done a so-called shadow-attach
       to the underlying device instead of registering as a real driver. This
-      also includes some of the old generic buffer mangement and command
+      also includes some of the old generic buffer management and command
       submission code. Do not use any of this in new and modern drivers.
     </para>
 
index f9fd615..1d27f0a 100644 (file)
@@ -195,7 +195,7 @@ DVB_DOCUMENTED = \
 #
 
 install_media_images = \
-       $(Q)cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
+       $(Q)-cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
 
 $(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64
        $(Q)base64 -d $< >$@
index c79bab0..8dbcf82 100644 (file)
@@ -14,7 +14,7 @@ node.
 Example:
 
 aliases {
-       mdio-gpio0 = <&mdio0>;
+       mdio-gpio0 = &mdio0;
 };
 
 mdio0: mdio {
index e9f5dac..4e30eba 100644 (file)
@@ -201,20 +201,15 @@ To beat some sense out of the internal editor, do this:
 
 - Edit your Thunderbird config settings so that it won't use format=flowed.
   Go to "edit->preferences->advanced->config editor" to bring up the
-  thunderbird's registry editor, and set "mailnews.send_plaintext_flowed" to
-  "false".
+  thunderbird's registry editor.
 
-- Disable HTML Format: Set "mail.identity.id1.compose_html" to "false".
+- Set "mailnews.send_plaintext_flowed" to "false"
 
-- Enable "preformat" mode: Set "editor.quotesPreformatted" to "true".
+- Set "mailnews.wraplength" from "72" to "0"
 
-- Enable UTF8: Set "prefs.converted-to-utf8" to "true".
+- "View" > "Message Body As" > "Plain Text"
 
-- Install the "toggle wordwrap" extension.  Download the file from:
-    https://addons.mozilla.org/thunderbird/addon/2351/
-  Then go to "tools->add ons", select "install" at the bottom of the screen,
-  and browse to where you saved the .xul file.  This adds an "Enable
-  Wordwrap" entry under the Options menu of the message composer.
+- "View" > "Character Encoding" > "Unicode (UTF-8)"
 
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 TkRat (GUI)
index 8b9cd8e..264bcde 100644 (file)
@@ -1245,8 +1245,9 @@ second).  The meanings of the columns are as follows, from left to right:
 
 The "intr" line gives counts of interrupts  serviced since boot time, for each
 of the  possible system interrupts.   The first  column  is the  total of  all
-interrupts serviced; each  subsequent column is the  total for that particular
-interrupt.
+interrupts serviced  including  unnumbered  architecture specific  interrupts;
+each  subsequent column is the  total for that particular numbered interrupt.
+Unnumbered interrupts are not shown, only summed into the total.
 
 The "ctxt" line gives the total number of context switches across all CPUs.
 
index 79f8257..2cc95ad 100644 (file)
@@ -327,6 +327,13 @@ temp[1-*]_max_hyst
                from the max value.
                RW
 
+temp[1-*]_min_hyst
+               Temperature hysteresis value for min limit.
+               Unit: millidegree Celsius
+               Must be reported as an absolute temperature, NOT a delta
+               from the min value.
+               RW
+
 temp[1-*]_input Temperature input value.
                Unit: millidegree Celsius
                RO
@@ -362,6 +369,13 @@ temp[1-*]_lcrit    Temperature critical min value, typically lower than
                Unit: millidegree Celsius
                RW
 
+temp[1-*]_lcrit_hyst
+               Temperature hysteresis value for critical min limit.
+               Unit: millidegree Celsius
+               Must be reported as an absolute temperature, NOT a delta
+               from the critical min value.
+               RW
+
 temp[1-*]_offset
                Temperature offset which is added to the temperature reading
                by the chip.
index e6a7232..4180205 100644 (file)
@@ -188,6 +188,9 @@ shift
 #define CP_METHODREF 10
 #define CP_INTERFACEMETHODREF 11
 #define CP_NAMEANDTYPE 12
+#define CP_METHODHANDLE 15
+#define CP_METHODTYPE 16
+#define CP_INVOKEDYNAMIC 18
 
 /* Define some commonly used error messages */
 
@@ -242,14 +245,19 @@ void skip_constant(FILE *classfile, u_int16_t *cur)
                break;
        case CP_CLASS:
        case CP_STRING:
+       case CP_METHODTYPE:
                seekerr = fseek(classfile, 2, SEEK_CUR);
                break;
+       case CP_METHODHANDLE:
+               seekerr = fseek(classfile, 3, SEEK_CUR);
+               break;
        case CP_INTEGER:
        case CP_FLOAT:
        case CP_FIELDREF:
        case CP_METHODREF:
        case CP_INTERFACEMETHODREF:
        case CP_NAMEANDTYPE:
+       case CP_INVOKEDYNAMIC:
                seekerr = fseek(classfile, 4, SEEK_CUR);
                break;
        case CP_LONG:
index 81f940f..e3ba753 100644 (file)
@@ -277,7 +277,7 @@ Possible BPF extensions are shown in the following table:
   mark                                  skb->mark
   queue                                 skb->queue_mapping
   hatype                                skb->dev->type
-  rxhash                                skb->rxhash
+  rxhash                                skb->hash
   cpu                                   raw_smp_processor_id()
   vlan_tci                              vlan_tx_tag_get(skb)
   vlan_pr                               vlan_tx_tag_present(skb)
index 6fea79e..38112d5 100644 (file)
@@ -578,7 +578,7 @@ processes. This also works in combination with mmap(2) on packet sockets.
 
 Currently implemented fanout policies are:
 
-  - PACKET_FANOUT_HASH: schedule to socket by skb's rxhash
+  - PACKET_FANOUT_HASH: schedule to socket by skb's packet hash
   - PACKET_FANOUT_LB: schedule to socket by round-robin
   - PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on
   - PACKET_FANOUT_RND: schedule to socket by random selection
index a9380ba..b4f5365 100644 (file)
@@ -2126,7 +2126,7 @@ into the hash PTE second double word).
 4.75 KVM_IRQFD
 
 Capability: KVM_CAP_IRQFD
-Architectures: x86
+Architectures: x86 s390
 Type: vm ioctl
 Parameters: struct kvm_irqfd (in)
 Returns: 0 on success, -1 on error
index 6846c7c..8ccf31c 100644 (file)
@@ -537,7 +537,7 @@ L:  linux-alpha@vger.kernel.org
 F:     arch/alpha/
 
 ALTERA TRIPLE SPEED ETHERNET DRIVER
-M:     Vince Bridgers <vbridgers2013@gmail.com
+M:     Vince Bridgers <vbridgers2013@gmail.com>
 L:     netdev@vger.kernel.org
 L:     nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
 S:     Maintained
@@ -6514,10 +6514,10 @@ T:      git git://openrisc.net/~jonas/linux
 F:     arch/openrisc/
 
 OPENVSWITCH
-M:     Jesse Gross <jesse@nicira.com>
+M:     Pravin Shelar <pshelar@nicira.com>
 L:     dev@openvswitch.org
 W:     http://openvswitch.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pshelar/openvswitch.git
 S:     Maintained
 F:     net/openvswitch/
 
@@ -7405,6 +7405,14 @@ F:       drivers/rpmsg/
 F:     Documentation/rpmsg.txt
 F:     include/linux/rpmsg.h
 
+RESET CONTROLLER FRAMEWORK
+M:     Philipp Zabel <p.zabel@pengutronix.de>
+S:     Maintained
+F:     drivers/reset/
+F:     Documentation/devicetree/bindings/reset/
+F:     include/linux/reset.h
+F:     include/linux/reset-controller.h
+
 RFKILL
 M:     Johannes Berg <johannes@sipsolutions.net>
 L:     linux-wireless@vger.kernel.org
index 9d99378..cf3412d 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
index 068031f..6d0f03c 100644 (file)
@@ -99,7 +99,7 @@
                        pcie@3,0 {
                                device_type = "pci";
                                assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
-                               reg = <0x1000 0 0 0 0>;
+                               reg = <0x1800 0 0 0 0>;
                                #address-cells = <3>;
                                #size-cells = <2>;
                                #interrupt-cells = <1>;
index e2919f0..da80196 100644 (file)
                        pcie@3,0 {
                                device_type = "pci";
                                assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
-                               reg = <0x1000 0 0 0 0>;
+                               reg = <0x1800 0 0 0 0>;
                                #address-cells = <3>;
                                #size-cells = <2>;
                                #interrupt-cells = <1>;
                        pcie@4,0 {
                                device_type = "pci";
                                assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
-                               reg = <0x1000 0 0 0 0>;
+                               reg = <0x2000 0 0 0 0>;
                                #address-cells = <3>;
                                #size-cells = <2>;
                                #interrupt-cells = <1>;
index 366fc2c..c0e0eae 100644 (file)
                                trigger@3 {
                                        reg = <3>;
                                        trigger-name = "external";
-                                       trigger-value = <0x13>;
+                                       trigger-value = <0xd>;
                                        trigger-external;
                                };
                        };
index 9583563..8a558b7 100644 (file)
                status = "okay";
 
                ak8975@0c {
-                       compatible = "ak,ak8975";
+                       compatible = "asahi-kasei,ak8975";
                        reg = <0x0c>;
                        gpios = <&gpj0 7 0>;
                };
index 090f983..cde19c8 100644 (file)
                                        regulator-name = "VDD_IOPERI_1.8V";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
+                                       regulator-always-on;
                                        op_mode = <1>;
                                };
 
index 80a3bf4..896a2a6 100644 (file)
                        gpio-key,wakeup;
                };
        };
-
-       amba {
-               mdma1: mdma@11C10000 {
-                       /*
-                        * MDMA1 can support both secure and non-secure
-                        * AXI transactions. When this is enabled in the kernel
-                        * for boards that run in secure mode, we are getting
-                        * imprecise external aborts causing the kernel to oops.
-                        */
-                       status = "disabled";
-               };
-       };
 };
index c3a9a66..b69fbcb 100644 (file)
                reg = <0x100440C0 0x20>;
        };
 
-       mau_pd: power-domain@100440E0 {
-               compatible = "samsung,exynos4210-pd";
-               reg = <0x100440E0 0x20>;
-       };
-
-       g2d_pd: power-domain@10044100 {
-               compatible = "samsung,exynos4210-pd";
-               reg = <0x10044100 0x20>;
-       };
-
        msc_pd: power-domain@10044120 {
                compatible = "samsung,exynos4210-pd";
                reg = <0x10044120 0x20>;
                        #dma-cells = <1>;
                        #dma-channels = <8>;
                        #dma-requests = <1>;
+                       /*
+                        * MDMA1 can support both secure and non-secure
+                        * AXI transactions. When this is enabled in the kernel
+                        * for boards that run in secure mode, we are getting
+                        * imprecise external aborts causing the kernel to oops.
+                        */
+                       status = "disabled";
                };
        };
 
        spi_0: spi@12d20000 {
                compatible = "samsung,exynos4210-spi";
                reg = <0x12d20000 0x100>;
-               interrupts = <0 66 0>;
+               interrupts = <0 68 0>;
                dmas = <&pdma0 5
                        &pdma0 4>;
                dma-names = "tx", "rx";
        spi_1: spi@12d30000 {
                compatible = "samsung,exynos4210-spi";
                reg = <0x12d30000 0x100>;
-               interrupts = <0 67 0>;
+               interrupts = <0 69 0>;
                dmas = <&pdma1 5
                        &pdma1 4>;
                dma-names = "tx", "rx";
        spi_2: spi@12d40000 {
                compatible = "samsung,exynos4210-spi";
                reg = <0x12d40000 0x100>;
-               interrupts = <0 68 0>;
+               interrupts = <0 70 0>;
                dmas = <&pdma0 7
                        &pdma0 6>;
                dma-names = "tx", "rx";
                interrupts = <0 112 0>;
                clocks = <&clock 471>;
                clock-names = "secss";
-               samsung,power-domain = <&g2d_pd>;
        };
 };
index 4ce7b70..e07a227 100644 (file)
@@ -65,6 +65,7 @@ CONFIG_TCG_TIS_I2C_INFINEON=y
 CONFIG_I2C=y
 CONFIG_I2C_MUX=y
 CONFIG_I2C_ARB_GPIO_CHALLENGE=y
+CONFIG_I2C_EXYNOS5=y
 CONFIG_I2C_S3C2410=y
 CONFIG_DEBUG_GPIO=y
 # CONFIG_HWMON is not set
index b5f7705..624e1d4 100644 (file)
@@ -54,7 +54,9 @@ static inline void register_trusted_foundations(
         */
        pr_err("No support for Trusted Foundations, continuing in degraded mode.\n");
        pr_err("Secondary processors as well as CPU PM will be disabled.\n");
+#if IS_ENABLED(CONFIG_SMP)
        setup_max_cpus = 0;
+#endif
        cpu_idle_poll_ctrl(true);
 }
 
index a028292..7cd6f19 100644 (file)
@@ -1308,19 +1308,19 @@ static struct platform_device at91_adc_device = {
 static struct at91_adc_trigger at91_adc_triggers[] = {
        [0] = {
                .name = "timer-counter-0",
-               .value = AT91_ADC_TRGSEL_TC0 | AT91_ADC_TRGEN,
+               .value = 0x1,
        },
        [1] = {
                .name = "timer-counter-1",
-               .value = AT91_ADC_TRGSEL_TC1 | AT91_ADC_TRGEN,
+               .value = 0x3,
        },
        [2] = {
                .name = "timer-counter-2",
-               .value = AT91_ADC_TRGSEL_TC2 | AT91_ADC_TRGEN,
+               .value = 0x5,
        },
        [3] = {
                .name = "external",
-               .value = AT91_ADC_TRGSEL_EXTERNAL | AT91_ADC_TRGEN,
+               .value = 0xd,
                .is_external = true,
        },
 };
index 932129e..aa01c42 100644 (file)
@@ -18,6 +18,8 @@
 
 #include <mach/map.h>
 
+#include <plat/cpu.h>
+
 #include "smc.h"
 
 static int exynos_do_idle(void)
@@ -28,13 +30,24 @@ static int exynos_do_idle(void)
 
 static int exynos_cpu_boot(int cpu)
 {
+       /*
+        * The second parameter of SMC_CMD_CPU1BOOT command means CPU id.
+        * But, Exynos4212 has only one secondary CPU so second parameter
+        * isn't used for informing secure firmware about CPU id.
+        */
+       if (soc_is_exynos4212())
+               cpu = 0;
+
        exynos_smc(SMC_CMD_CPU1BOOT, cpu, 0, 0);
        return 0;
 }
 
 static int exynos_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
 {
-       void __iomem *boot_reg = S5P_VA_SYSRAM_NS + 0x1c + 4*cpu;
+       void __iomem *boot_reg = S5P_VA_SYSRAM_NS + 0x1c;
+
+       if (!soc_is_exynos4212())
+               boot_reg += 4*cpu;
 
        __raw_writel(boot_addr, boot_reg);
        return 0;
index fc4dd7c..6bd7c3f 100644 (file)
@@ -77,7 +77,7 @@ struct platform_device *__init imx_alloc_mx3_camera(
 
        pdev = platform_device_alloc("mx3-camera", 0);
        if (!pdev)
-               goto err;
+               return ERR_PTR(-ENOMEM);
 
        pdev->dev.dma_mask = kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
        if (!pdev->dev.dma_mask)
index f3d4cf5..09520e1 100644 (file)
@@ -108,7 +108,18 @@ static int __init mvebu_soc_id_init(void)
        iounmap(pci_base);
 
 res_ioremap:
-       clk_disable_unprepare(clk);
+       /*
+        * If the PCIe unit is actually enabled and we have PCI
+        * support in the kernel, we intentionally do not release the
+        * reference to the clock. We want to keep it running since
+        * the bootloader does some PCIe link configuration that the
+        * kernel is for now unable to do, and gating the clock would
+        * make us loose this precious configuration.
+        */
+       if (!of_device_is_available(child) || !IS_ENABLED(CONFIG_PCI_MVEBU)) {
+               clk_disable_unprepare(clk);
+               clk_put(clk);
+       }
 
 clk_err:
        of_node_put(child);
index ac82512..b6885e4 100644 (file)
@@ -142,7 +142,7 @@ __init board_nand_init(struct mtd_partition *nand_parts, u8 nr_parts, u8 cs,
        board_nand_data.nr_parts        = nr_parts;
        board_nand_data.devsize         = nand_type;
 
-       board_nand_data.ecc_opt = OMAP_ECC_BCH8_CODE_HW;
+       board_nand_data.ecc_opt = OMAP_ECC_HAM1_CODE_HW;
        gpmc_nand_init(&board_nand_data, gpmc_t);
 }
 #endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */
index 8f5121b..eb8c75e 100644 (file)
@@ -456,7 +456,8 @@ static struct clk_hw_omap dpll4_m5x2_ck_hw = {
        .clkdm_name     = "dpll4_clkdm",
 };
 
-DEFINE_STRUCT_CLK(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names, dpll4_m5x2_ck_ops);
+DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names,
+                       dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
 
 static struct clk dpll4_m5x2_ck_3630 = {
        .name           = "dpll4_m5x2_ck",
index 01fc710..2498ab0 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/cpuidle.h>
 #include <linux/cpu_pm.h>
 #include <linux/export.h>
+#include <linux/clockchips.h>
 
 #include <asm/cpuidle.h>
 #include <asm/proc-fns.h>
@@ -83,6 +84,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
 {
        struct idle_statedata *cx = state_ptr + index;
        u32 mpuss_can_lose_context = 0;
+       int cpu_id = smp_processor_id();
 
        /*
         * CPU0 has to wait and stay ON until CPU1 is OFF state.
@@ -110,6 +112,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
        mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
                                 (cx->mpu_logic_state == PWRDM_POWER_OFF);
 
+       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
+
        /*
         * Call idle CPU PM enter notifier chain so that
         * VFP and per CPU interrupt context is saved.
@@ -165,6 +169,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
        if (dev->cpu == 0 && mpuss_can_lose_context)
                cpu_cluster_pm_exit();
 
+       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
+
 fail:
        cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
        cpu_done[dev->cpu] = false;
@@ -172,6 +178,16 @@ fail:
        return index;
 }
 
+/*
+ * For each cpu, setup the broadcast timer because local timers
+ * stops for the states above C1.
+ */
+static void omap_setup_broadcast_timer(void *arg)
+{
+       int cpu = smp_processor_id();
+       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
+}
+
 static struct cpuidle_driver omap4_idle_driver = {
        .name                           = "omap4_idle",
        .owner                          = THIS_MODULE,
@@ -189,8 +205,7 @@ static struct cpuidle_driver omap4_idle_driver = {
                        /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
                        .exit_latency = 328 + 440,
                        .target_residency = 960,
-                       .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
-                                CPUIDLE_FLAG_TIMER_STOP,
+                       .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
                        .enter = omap_enter_idle_coupled,
                        .name = "C2",
                        .desc = "CPUx OFF, MPUSS CSWR",
@@ -199,8 +214,7 @@ static struct cpuidle_driver omap4_idle_driver = {
                        /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
                        .exit_latency = 460 + 518,
                        .target_residency = 1100,
-                       .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
-                                CPUIDLE_FLAG_TIMER_STOP,
+                       .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
                        .enter = omap_enter_idle_coupled,
                        .name = "C3",
                        .desc = "CPUx OFF, MPUSS OSWR",
@@ -231,5 +245,8 @@ int __init omap4_idle_init(void)
        if (!cpu_clkdm[0] || !cpu_clkdm[1])
                return -ENODEV;
 
+       /* Configure the broadcast timer on each cpu */
+       on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
+
        return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
 }
index 8923172..e829664 100644 (file)
@@ -895,7 +895,7 @@ static struct omap_hwmod omap54xx_mcpdm_hwmod = {
         * current exception.
         */
 
-       .flags          = HWMOD_EXT_OPT_MAIN_CLK,
+       .flags          = HWMOD_EXT_OPT_MAIN_CLK | HWMOD_SWSUP_SIDLE,
        .main_clk       = "pad_clks_ck",
        .prcm = {
                .omap4 = {
index 5f5b975..b5608b1 100644 (file)
@@ -70,6 +70,7 @@ static u32 errata;
 
 static struct omap_dma_global_context_registers {
        u32 dma_irqenable_l0;
+       u32 dma_irqenable_l1;
        u32 dma_ocp_sysconfig;
        u32 dma_gcr;
 } omap_dma_global_context;
@@ -1973,10 +1974,17 @@ static struct irqaction omap24xx_dma_irq;
 
 /*----------------------------------------------------------------------------*/
 
+/*
+ * Note that we are currently using only IRQENABLE_L0 and L1.
+ * As the DSP may be using IRQENABLE_L2 and L3, let's not
+ * touch those for now.
+ */
 void omap_dma_global_context_save(void)
 {
        omap_dma_global_context.dma_irqenable_l0 =
                p->dma_read(IRQENABLE_L0, 0);
+       omap_dma_global_context.dma_irqenable_l1 =
+               p->dma_read(IRQENABLE_L1, 0);
        omap_dma_global_context.dma_ocp_sysconfig =
                p->dma_read(OCP_SYSCONFIG, 0);
        omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
@@ -1991,6 +1999,8 @@ void omap_dma_global_context_restore(void)
                OCP_SYSCONFIG, 0);
        p->dma_write(omap_dma_global_context.dma_irqenable_l0,
                IRQENABLE_L0, 0);
+       p->dma_write(omap_dma_global_context.dma_irqenable_l1,
+               IRQENABLE_L1, 0);
 
        if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
                p->dma_write(0x3 , IRQSTATUS_L0, 0);
index f1432da..c5fa7a6 100644 (file)
        ENTRY_SAME(sched_setattr)
        ENTRY_SAME(sched_getattr)       /* 335 */
        ENTRY_COMP(utimes)
-       ENTRY_COMP(renameat2)
+       ENTRY_SAME(renameat2)
 
        /* Nothing yet */
 
index 4c0cedf..ce4c68a 100644 (file)
@@ -150,7 +150,9 @@ endif
 
 CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell)
 
-KBUILD_CPPFLAGS        += -Iarch/$(ARCH)
+asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
+
+KBUILD_CPPFLAGS        += -Iarch/$(ARCH) $(asinstr)
 KBUILD_AFLAGS  += -Iarch/$(ARCH)
 KBUILD_CFLAGS  += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
 CPP            = $(CC) -E $(KBUILD_CFLAGS)
index 6586a40..cded7c1 100644 (file)
@@ -318,11 +318,16 @@ n:
        addi    reg,reg,(name - 0b)@l;
 
 #ifdef __powerpc64__
+#ifdef HAVE_AS_ATHIGH
+#define __AS_ATHIGH high
+#else
+#define __AS_ATHIGH h
+#endif
 #define LOAD_REG_IMMEDIATE(reg,expr)           \
        lis     reg,(expr)@highest;             \
        ori     reg,reg,(expr)@higher;  \
        rldicr  reg,reg,32,31;          \
-       oris    reg,reg,(expr)@h;               \
+       oris    reg,reg,(expr)@__AS_ATHIGH;     \
        ori     reg,reg,(expr)@l;
 
 #define LOAD_REG_ADDR(reg,name)                        \
index d0e784e..5217903 100644 (file)
@@ -39,6 +39,17 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
                (unsigned long)_stext < end;
 }
 
+static inline int overlaps_kvm_tmp(unsigned long start, unsigned long end)
+{
+#ifdef CONFIG_KVM_GUEST
+       extern char kvm_tmp[];
+       return start < (unsigned long)kvm_tmp &&
+               (unsigned long)&kvm_tmp[1024 * 1024] < end;
+#else
+       return 0;
+#endif
+}
+
 #undef dereference_function_descriptor
 static inline void *dereference_function_descriptor(void *ptr)
 {
index 6a01752..dd8695f 100644 (file)
@@ -74,7 +74,7 @@
 #define KVM_INST_MTSRIN                0x7c0001e4
 
 static bool kvm_patching_worked = true;
-static char kvm_tmp[1024 * 1024];
+char kvm_tmp[1024 * 1024];
 static int kvm_tmp_index;
 
 static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
index 59d229a..879b3aa 100644 (file)
@@ -237,7 +237,7 @@ static void wake_offline_cpus(void)
                if (!cpu_online(cpu)) {
                        printk(KERN_INFO "kexec: Waking offline cpu %d.\n",
                               cpu);
-                       cpu_up(cpu);
+                       WARN_ON(cpu_up(cpu));
                }
        }
 }
index 94e597e..7af190a 100644 (file)
@@ -886,7 +886,7 @@ static int kvmppc_book3s_init(void)
        r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
        if (r)
                return r;
-#ifdef CONFIG_KVM_BOOK3S_32
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
        r = kvmppc_book3s_init_pr();
 #endif
        return r;
@@ -895,7 +895,7 @@ static int kvmppc_book3s_init(void)
 
 static void kvmppc_book3s_exit(void)
 {
-#ifdef CONFIG_KVM_BOOK3S_32
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
        kvmppc_book3s_exit_pr();
 #endif
        kvm_exit();
@@ -905,7 +905,7 @@ module_init(kvmppc_book3s_init);
 module_exit(kvmppc_book3s_exit);
 
 /* On 32bit this is our one and only kernel module */
-#ifdef CONFIG_KVM_BOOK3S_32
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
 MODULE_ALIAS_MISCDEV(KVM_MINOR);
 MODULE_ALIAS("devname:kvm");
 #endif
index 1d6c56a..8fcc363 100644 (file)
@@ -234,7 +234,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
                pte_size = psize;
                pte = lookup_linux_pte_and_update(pgdir, hva, writing,
                                                  &pte_size);
-               if (pte_present(pte)) {
+               if (pte_present(pte) && !pte_numa(pte)) {
                        if (writing && !pte_write(pte))
                                /* make the actual HPTE be read-only */
                                ptel = hpte_make_readonly(ptel);
index b031f93..07c8b5b 100644 (file)
@@ -1323,6 +1323,110 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
        mr      r3, r9
        bl      kvmppc_save_fp
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+       b       2f
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
+       /* Turn on TM. */
+       mfmsr   r8
+       li      r0, 1
+       rldimi  r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+       mtmsrd  r8
+
+       ld      r5, VCPU_MSR(r9)
+       rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+       beq     1f      /* TM not active in guest. */
+
+       li      r3, TM_CAUSE_KVM_RESCHED
+
+       /* Clear the MSR RI since r1, r13 are all going to be foobar. */
+       li      r5, 0
+       mtmsrd  r5, 1
+
+       /* All GPRs are volatile at this point. */
+       TRECLAIM(R3)
+
+       /* Temporarily store r13 and r9 so we have some regs to play with */
+       SET_SCRATCH0(r13)
+       GET_PACA(r13)
+       std     r9, PACATMSCRATCH(r13)
+       ld      r9, HSTATE_KVM_VCPU(r13)
+
+       /* Get a few more GPRs free. */
+       std     r29, VCPU_GPRS_TM(29)(r9)
+       std     r30, VCPU_GPRS_TM(30)(r9)
+       std     r31, VCPU_GPRS_TM(31)(r9)
+
+       /* Save away PPR and DSCR soon so don't run with user values. */
+       mfspr   r31, SPRN_PPR
+       HMT_MEDIUM
+       mfspr   r30, SPRN_DSCR
+       ld      r29, HSTATE_DSCR(r13)
+       mtspr   SPRN_DSCR, r29
+
+       /* Save all but r9, r13 & r29-r31 */
+       reg = 0
+       .rept   29
+       .if (reg != 9) && (reg != 13)
+       std     reg, VCPU_GPRS_TM(reg)(r9)
+       .endif
+       reg = reg + 1
+       .endr
+       /* ... now save r13 */
+       GET_SCRATCH0(r4)
+       std     r4, VCPU_GPRS_TM(13)(r9)
+       /* ... and save r9 */
+       ld      r4, PACATMSCRATCH(r13)
+       std     r4, VCPU_GPRS_TM(9)(r9)
+
+       /* Reload stack pointer and TOC. */
+       ld      r1, HSTATE_HOST_R1(r13)
+       ld      r2, PACATOC(r13)
+
+       /* Set MSR RI now we have r1 and r13 back. */
+       li      r5, MSR_RI
+       mtmsrd  r5, 1
+
+       /* Save away checkpinted SPRs. */
+       std     r31, VCPU_PPR_TM(r9)
+       std     r30, VCPU_DSCR_TM(r9)
+       mflr    r5
+       mfcr    r6
+       mfctr   r7
+       mfspr   r8, SPRN_AMR
+       mfspr   r10, SPRN_TAR
+       std     r5, VCPU_LR_TM(r9)
+       stw     r6, VCPU_CR_TM(r9)
+       std     r7, VCPU_CTR_TM(r9)
+       std     r8, VCPU_AMR_TM(r9)
+       std     r10, VCPU_TAR_TM(r9)
+
+       /* Restore r12 as trap number. */
+       lwz     r12, VCPU_TRAP(r9)
+
+       /* Save FP/VSX. */
+       addi    r3, r9, VCPU_FPRS_TM
+       bl      .store_fp_state
+       addi    r3, r9, VCPU_VRS_TM
+       bl      .store_vr_state
+       mfspr   r6, SPRN_VRSAVE
+       stw     r6, VCPU_VRSAVE_TM(r9)
+1:
+       /*
+        * We need to save these SPRs after the treclaim so that the software
+        * error code is recorded correctly in the TEXASR.  Also the user may
+        * change these outside of a transaction, so they must always be
+        * context switched.
+        */
+       mfspr   r5, SPRN_TFHAR
+       mfspr   r6, SPRN_TFIAR
+       mfspr   r7, SPRN_TEXASR
+       std     r5, VCPU_TFHAR(r9)
+       std     r6, VCPU_TFIAR(r9)
+       std     r7, VCPU_TEXASR(r9)
+2:
+#endif
+
        /* Increment yield count if they have a VPA */
        ld      r8, VCPU_VPA(r9)        /* do they have a VPA? */
        cmpdi   r8, 0
index c5c052a..02f1def 100644 (file)
@@ -1153,7 +1153,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
                goto free_vcpu;
        vcpu->arch.book3s = vcpu_book3s;
 
-#ifdef CONFIG_KVM_BOOK3S_32
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
        vcpu->arch.shadow_vcpu =
                kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
        if (!vcpu->arch.shadow_vcpu)
@@ -1198,7 +1198,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
 uninit_vcpu:
        kvm_vcpu_uninit(vcpu);
 free_shadow_vcpu:
-#ifdef CONFIG_KVM_BOOK3S_32
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
        kfree(vcpu->arch.shadow_vcpu);
 free_vcpu3s:
 #endif
@@ -1215,7 +1215,7 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
 
        free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
        kvm_vcpu_uninit(vcpu);
-#ifdef CONFIG_KVM_BOOK3S_32
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
        kfree(vcpu->arch.shadow_vcpu);
 #endif
        vfree(vcpu_book3s);
index d766d6e..06ba83b 100644 (file)
@@ -207,6 +207,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
                if (overlaps_kernel_text(vaddr, vaddr + step))
                        tprot &= ~HPTE_R_N;
 
+               /* Make kvm guest trampolines executable */
+               if (overlaps_kvm_tmp(vaddr, vaddr + step))
+                       tprot &= ~HPTE_R_N;
+
                /*
                 * If relocatable, check if it overlaps interrupt vectors that
                 * are copied down to real 0. For relocatable kernel
index b3ecb8f..9ae6664 100644 (file)
@@ -158,6 +158,7 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_ONE_REG:
        case KVM_CAP_ENABLE_CAP:
        case KVM_CAP_S390_CSS_SUPPORT:
+       case KVM_CAP_IRQFD:
        case KVM_CAP_IOEVENTFD:
        case KVM_CAP_DEVICE_CTRL:
        case KVM_CAP_ENABLE_CAP_VM:
index 452d3eb..e9f8fa9 100644 (file)
@@ -811,7 +811,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
                return NULL;
        memset(header, 0, sz);
        header->pages = sz / PAGE_SIZE;
-       hole = sz - (bpfsize + sizeof(*header));
+       hole = min(sz - (bpfsize + sizeof(*header)), PAGE_SIZE - sizeof(*header));
        /* Insert random number of illegal instructions before BPF code
         * and make sure the first instruction starts at an even address.
         */
index fde5aba..1a49ffd 100644 (file)
@@ -24,7 +24,8 @@
 
 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
  * The page copy blockops can use 0x6000000 to 0x8000000.
- * The TSB is mapped in the 0x8000000 to 0xa000000 range.
+ * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
+ * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
  * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
  * The vmalloc area spans 0x100000000 to 0x200000000.
  * Since modules need to be in the lowest 32-bits of the address space,
@@ -33,7 +34,8 @@
  * 0x400000000.
  */
 #define        TLBTEMP_BASE            _AC(0x0000000006000000,UL)
-#define        TSBMAP_BASE             _AC(0x0000000008000000,UL)
+#define        TSBMAP_8K_BASE          _AC(0x0000000008000000,UL)
+#define        TSBMAP_4M_BASE          _AC(0x0000000008400000,UL)
 #define MODULES_VADDR          _AC(0x0000000010000000,UL)
 #define MODULES_LEN            _AC(0x00000000e0000000,UL)
 #define MODULES_END            _AC(0x00000000f0000000,UL)
index a364000..7f41d40 100644 (file)
@@ -151,7 +151,7 @@ static ssize_t store_mmustat_enable(struct device *s,
                        size_t count)
 {
        unsigned long val, err;
-       int ret = sscanf(buf, "%ld", &val);
+       int ret = sscanf(buf, "%lu", &val);
 
        if (ret != 1)
                return -EINVAL;
index 2c20ad6..30eee6e 100644 (file)
@@ -236,6 +236,7 @@ FUNC_NAME:  /* %o0=dst, %o1=src, %o2=len */
         */
        VISEntryHalf
 
+       membar          #Sync
        alignaddr       %o1, %g0, %g0
 
        add             %o1, (64 - 1), %o4
index a8ff0d1..4ced3fc 100644 (file)
@@ -281,18 +281,6 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
        show_regs(regs);
 }
 
-static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
-                                                        unsigned long addr)
-{
-       static int times;
-
-       if (times++ < 10)
-               printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
-                      "reports 64-bit fault address [%lx]\n",
-                      current->comm, current->pid, addr);
-       show_regs(regs);
-}
-
 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
 {
        enum ctx_state prev_state = exception_enter();
@@ -322,10 +310,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
                                goto intr_or_no_mm;
                        }
                }
-               if (unlikely((address >> 32) != 0)) {
-                       bogus_32bit_fault_address(regs, address);
+               if (unlikely((address >> 32) != 0))
                        goto intr_or_no_mm;
-               }
        }
 
        if (regs->tstate & TSTATE_PRIV) {
index f5d506f..fe19b81 100644 (file)
@@ -133,7 +133,19 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
        mm->context.tsb_block[tsb_idx].tsb_nentries =
                tsb_bytes / sizeof(struct tsb);
 
-       base = TSBMAP_BASE;
+       switch (tsb_idx) {
+       case MM_TSB_BASE:
+               base = TSBMAP_8K_BASE;
+               break;
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+       case MM_TSB_HUGE:
+               base = TSBMAP_4M_BASE;
+               break;
+#endif
+       default:
+               BUG();
+       }
+
        tte = pgprot_val(PAGE_KERNEL_LOCKED);
        tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
        BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
index aa333d9..adb02aa 100644 (file)
@@ -169,7 +169,6 @@ static struct event_constraint intel_slm_event_constraints[] __read_mostly =
 {
        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
-       FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF */
        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
        EVENT_CONSTRAINT_END
 };
index 33e8c02..138ceff 100644 (file)
@@ -7778,7 +7778,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
        exec_control = vmcs12->pin_based_vm_exec_control;
        exec_control |= vmcs_config.pin_based_exec_ctrl;
-       exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+       exec_control &= ~(PIN_BASED_VMX_PREEMPTION_TIMER |
+                          PIN_BASED_POSTED_INTR);
        vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
 
        vmx->nested.preemption_timer_expired = false;
@@ -7815,7 +7816,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
                if (!vmx->rdtscp_enabled)
                        exec_control &= ~SECONDARY_EXEC_RDTSCP;
                /* Take the following fields only from vmcs12 */
-               exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+               exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+                                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
+                                  SECONDARY_EXEC_APIC_REGISTER_VIRT);
                if (nested_cpu_has(vmcs12,
                                CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
                        exec_control |= vmcs12->secondary_vm_exec_control;
index b6c0bac..20316c6 100644 (file)
@@ -106,6 +106,8 @@ EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
 static u32 tsc_tolerance_ppm = 250;
 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
 
+static bool backwards_tsc_observed = false;
+
 #define KVM_NR_SHARED_MSRS 16
 
 struct kvm_shared_msrs_global {
@@ -1486,7 +1488,8 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
                                        &ka->master_kernel_ns,
                                        &ka->master_cycle_now);
 
-       ka->use_master_clock = host_tsc_clocksource & vcpus_matched;
+       ka->use_master_clock = host_tsc_clocksource && vcpus_matched
+                               && !backwards_tsc_observed;
 
        if (ka->use_master_clock)
                atomic_set(&kvm_guest_has_master_clock, 1);
@@ -6945,6 +6948,7 @@ int kvm_arch_hardware_enable(void *garbage)
         */
        if (backwards_tsc) {
                u64 delta_cyc = max_tsc - local_tsc;
+               backwards_tsc_observed = true;
                list_for_each_entry(kvm, &vm_list, vm_list) {
                        kvm_for_each_vcpu(i, vcpu, kvm) {
                                vcpu->arch.tsc_offset_adjustment += delta_cyc;
index dc01773..6d5663a 100644 (file)
@@ -171,7 +171,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
        memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
 
        header->pages = sz / PAGE_SIZE;
-       hole = sz - (proglen + sizeof(*header));
+       hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
 
        /* insert a random number of int3 instructions before BPF code */
        *image_ptr = &header->image[prandom_u32() % hole];
index 6d8a87f..cb9b1f8 100644 (file)
@@ -144,11 +144,11 @@ static void virtblk_done(struct virtqueue *vq)
                if (unlikely(virtqueue_is_broken(vq)))
                        break;
        } while (!virtqueue_enable_cb(vq));
-       spin_unlock_irqrestore(&vblk->vq_lock, flags);
 
        /* In case queue is stopped waiting for more buffers. */
        if (req_done)
                blk_mq_start_stopped_hw_queues(vblk->disk->queue);
+       spin_unlock_irqrestore(&vblk->vq_lock, flags);
 }
 
 static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
@@ -202,8 +202,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
        err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num);
        if (err) {
                virtqueue_kick(vblk->vq);
-               spin_unlock_irqrestore(&vblk->vq_lock, flags);
                blk_mq_stop_hw_queue(hctx);
+               spin_unlock_irqrestore(&vblk->vq_lock, flags);
                /* Out of mem doesn't actually happen, since we fall back
                 * to direct descriptors */
                if (err == -ENOMEM || err == -ENOSPC)
index 00fdd11..a8d7ea1 100644 (file)
@@ -100,7 +100,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
                        || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
                __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
                __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
-               clk_disable_unprepare(tcd->clk);
+               clk_disable(tcd->clk);
        }
 
        switch (m) {
@@ -109,7 +109,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
         * of oneshot, we get lower overhead and improved accuracy.
         */
        case CLOCK_EVT_MODE_PERIODIC:
-               clk_prepare_enable(tcd->clk);
+               clk_enable(tcd->clk);
 
                /* slow clock, count up to RC, then irq and restart */
                __raw_writel(timer_clock
@@ -126,7 +126,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
                break;
 
        case CLOCK_EVT_MODE_ONESHOT:
-               clk_prepare_enable(tcd->clk);
+               clk_enable(tcd->clk);
 
                /* slow clock, count up to RC, then irq and stop */
                __raw_writel(timer_clock | ATMEL_TC_CPCSTOP
@@ -194,7 +194,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
        ret = clk_prepare_enable(t2_clk);
        if (ret)
                return ret;
-       clk_disable_unprepare(t2_clk);
+       clk_disable(t2_clk);
 
        clkevt.regs = tc->regs;
        clkevt.clk = t2_clk;
index b52e1c0..7f5374d 100644 (file)
@@ -199,7 +199,7 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
 
        action->dev_id = ce;
        BUG_ON(setup_irq(ce->irq, action));
-       irq_set_affinity(action->irq, cpumask_of(cpu));
+       irq_force_affinity(action->irq, cpumask_of(cpu));
 
        clockevents_register_device(ce);
        return 0;
index a886713..d5d30ed 100644 (file)
@@ -1009,6 +1009,7 @@ static void dmaengine_unmap(struct kref *kref)
                dma_unmap_page(dev, unmap->addr[i], unmap->len,
                               DMA_BIDIRECTIONAL);
        }
+       cnt = unmap->map_cnt;
        mempool_free(unmap, __get_unmap_pool(cnt)->pool);
 }
 
@@ -1074,6 +1075,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
        memset(unmap, 0, sizeof(*unmap));
        kref_init(&unmap->kref);
        unmap->dev = dev;
+       unmap->map_cnt = nr;
 
        return unmap;
 }
index cfdbb92..7a74076 100644 (file)
@@ -1548,11 +1548,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
        /* Disable BLOCK interrupts as well */
        channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
 
-       err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
-                              IRQF_SHARED, "dw_dmac", dw);
-       if (err)
-               return err;
-
        /* Create a pool of consistent memory blocks for hardware descriptors */
        dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
                                         sizeof(struct dw_desc), 4, 0);
@@ -1563,6 +1558,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
 
        tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
 
+       err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
+                         "dw_dmac", dw);
+       if (err)
+               return err;
+
        INIT_LIST_HEAD(&dw->dma.channels);
        for (i = 0; i < nr_channels; i++) {
                struct dw_dma_chan      *dwc = &dw->chan[i];
@@ -1667,6 +1667,7 @@ int dw_dma_remove(struct dw_dma_chip *chip)
        dw_dma_off(dw);
        dma_async_device_unregister(&dw->dma);
 
+       free_irq(chip->irq, dw);
        tasklet_kill(&dw->tasklet);
 
        list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
index 766b68e..394cbc5 100644 (file)
@@ -191,12 +191,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
 
 static void mv_chan_activate(struct mv_xor_chan *chan)
 {
-       u32 activation;
-
        dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
-       activation = readl_relaxed(XOR_ACTIVATION(chan));
-       activation |= 0x1;
-       writel_relaxed(activation, XOR_ACTIVATION(chan));
+
+       /* writel ensures all descriptors are flushed before activation */
+       writel(BIT(0), XOR_ACTIVATION(chan));
 }
 
 static char mv_chan_is_busy(struct mv_xor_chan *chan)
index ab26d46..5ebdfbc 100644 (file)
@@ -113,11 +113,9 @@ struct sa11x0_dma_phy {
        struct sa11x0_dma_desc  *txd_load;
        unsigned                sg_done;
        struct sa11x0_dma_desc  *txd_done;
-#ifdef CONFIG_PM_SLEEP
        u32                     dbs[2];
        u32                     dbt[2];
        u32                     dcsr;
-#endif
 };
 
 struct sa11x0_dma_dev {
@@ -984,7 +982,6 @@ static int sa11x0_dma_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
 static int sa11x0_dma_suspend(struct device *dev)
 {
        struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
@@ -1054,7 +1051,6 @@ static int sa11x0_dma_resume(struct device *dev)
 
        return 0;
 }
-#endif
 
 static const struct dev_pm_ops sa11x0_dma_pm_ops = {
        .suspend_noirq = sa11x0_dma_suspend,
index 7762665..876de9a 100644 (file)
@@ -1009,7 +1009,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
        }
 
        if (outp == 8)
-               return false;
+               return conf;
 
        data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
        if (data == 0x0000)
index 43fec17..bbf117b 100644 (file)
@@ -40,6 +40,7 @@ pwm_info(struct nouveau_therm *therm, int line)
                case 0x00: return 2;
                case 0x19: return 1;
                case 0x1c: return 0;
+               case 0x1e: return 2;
                default:
                        break;
                }
index 6852861..8149e7c 100644 (file)
@@ -1642,6 +1642,7 @@ struct radeon_vce {
        unsigned                fb_version;
        atomic_t                handles[RADEON_MAX_VCE_HANDLES];
        struct drm_file         *filp[RADEON_MAX_VCE_HANDLES];
+       unsigned                img_size[RADEON_MAX_VCE_HANDLES];
        struct delayed_work     idle_work;
 };
 
@@ -1655,7 +1656,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
                               uint32_t handle, struct radeon_fence **fence);
 void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
 void radeon_vce_note_usage(struct radeon_device *rdev);
-int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi);
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
 int radeon_vce_cs_parse(struct radeon_cs_parser *p);
 bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
                               struct radeon_ring *ring,
@@ -2640,7 +2641,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
 #define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
 #define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI))
 #define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE))
-#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI))
+#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
+                            (rdev->family == CHIP_MULLINS))
 
 #define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
                              (rdev->ddev->pdev->device == 0x6850) || \
index b3633d9..9ab3097 100644 (file)
@@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
                }
        }
 
+       if (!found) {
+               while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+                       dhandle = ACPI_HANDLE(&pdev->dev);
+                       if (!dhandle)
+                               continue;
+
+                       status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+                       if (!ACPI_FAILURE(status)) {
+                               found = true;
+                               break;
+                       }
+               }
+       }
+
        if (!found)
                return false;
 
index 408b6ac..f00dbbf 100644 (file)
@@ -999,7 +999,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
 
        /* avoid high jitter with small fractional dividers */
        if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
-               fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60);
+               fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50);
                if (fb_div < fb_div_min) {
                        unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
                        fb_div *= tmp;
index 0cc47f1..eaaedba 100644 (file)
@@ -577,28 +577,29 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                        return r;
                }
 
-               r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
-               if (r) {
-                       radeon_vm_fini(rdev, &fpriv->vm);
-                       kfree(fpriv);
-                       return r;
-               }
+               if (rdev->accel_working) {
+                       r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+                       if (r) {
+                               radeon_vm_fini(rdev, &fpriv->vm);
+                               kfree(fpriv);
+                               return r;
+                       }
 
-               /* map the ib pool buffer read only into
-                * virtual address space */
-               bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
-                                        rdev->ring_tmp_bo.bo);
-               r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
-                                         RADEON_VM_PAGE_READABLE |
-                                         RADEON_VM_PAGE_SNOOPED);
+                       /* map the ib pool buffer read only into
+                        * virtual address space */
+                       bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
+                                                rdev->ring_tmp_bo.bo);
+                       r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+                                                 RADEON_VM_PAGE_READABLE |
+                                                 RADEON_VM_PAGE_SNOOPED);
 
-               radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
-               if (r) {
-                       radeon_vm_fini(rdev, &fpriv->vm);
-                       kfree(fpriv);
-                       return r;
+                       radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+                       if (r) {
+                               radeon_vm_fini(rdev, &fpriv->vm);
+                               kfree(fpriv);
+                               return r;
+                       }
                }
-
                file_priv->driver_priv = fpriv;
        }
 
@@ -626,13 +627,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
                struct radeon_bo_va *bo_va;
                int r;
 
-               r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
-               if (!r) {
-                       bo_va = radeon_vm_bo_find(&fpriv->vm,
-                                                 rdev->ring_tmp_bo.bo);
-                       if (bo_va)
-                               radeon_vm_bo_rmv(rdev, bo_va);
-                       radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+               if (rdev->accel_working) {
+                       r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+                       if (!r) {
+                               bo_va = radeon_vm_bo_find(&fpriv->vm,
+                                                         rdev->ring_tmp_bo.bo);
+                               if (bo_va)
+                                       radeon_vm_bo_rmv(rdev, bo_va);
+                               radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+                       }
                }
 
                radeon_vm_fini(rdev, &fpriv->vm);
index 19bec0d..4faa4d6 100644 (file)
@@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
                         * into account. We don't want to disallow buffer moves
                         * completely.
                         */
-                       if (current_domain != RADEON_GEM_DOMAIN_CPU &&
+                       if ((lobj->alt_domain & current_domain) != 0 &&
                            (domain & current_domain) == 0 && /* will be moved */
                            bytes_moved > bytes_moved_threshold) {
                                /* don't move it */
@@ -699,22 +699,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
        rbo = container_of(bo, struct radeon_bo, tbo);
        radeon_bo_check_tiling(rbo, 0, 0);
        rdev = rbo->rdev;
-       if (bo->mem.mem_type == TTM_PL_VRAM) {
-               size = bo->mem.num_pages << PAGE_SHIFT;
-               offset = bo->mem.start << PAGE_SHIFT;
-               if ((offset + size) > rdev->mc.visible_vram_size) {
-                       /* hurrah the memory is not visible ! */
-                       radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
-                       rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
-                       r = ttm_bo_validate(bo, &rbo->placement, false, false);
-                       if (unlikely(r != 0))
-                               return r;
-                       offset = bo->mem.start << PAGE_SHIFT;
-                       /* this should not happen */
-                       if ((offset + size) > rdev->mc.visible_vram_size)
-                               return -EINVAL;
-               }
+       if (bo->mem.mem_type != TTM_PL_VRAM)
+               return 0;
+
+       size = bo->mem.num_pages << PAGE_SHIFT;
+       offset = bo->mem.start << PAGE_SHIFT;
+       if ((offset + size) <= rdev->mc.visible_vram_size)
+               return 0;
+
+       /* hurrah the memory is not visible ! */
+       radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+       rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+       r = ttm_bo_validate(bo, &rbo->placement, false, false);
+       if (unlikely(r == -ENOMEM)) {
+               radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+               return ttm_bo_validate(bo, &rbo->placement, false, false);
+       } else if (unlikely(r != 0)) {
+               return r;
        }
+
+       offset = bo->mem.start << PAGE_SHIFT;
+       /* this should never happen */
+       if ((offset + size) > rdev->mc.visible_vram_size)
+               return -EINVAL;
+
        return 0;
 }
 
index f30b842..53d6e1b 100644 (file)
@@ -361,6 +361,11 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct radeon_device *rdev = ddev->dev_private;
 
+       /* Can't set profile when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        mutex_lock(&rdev->pm.mutex);
        if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
                if (strncmp("default", buf, strlen("default")) == 0)
@@ -409,6 +414,13 @@ static ssize_t radeon_set_pm_method(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct radeon_device *rdev = ddev->dev_private;
 
+       /* Can't set method when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+               count = -EINVAL;
+               goto fail;
+       }
+
        /* we don't support the legacy modes with dpm */
        if (rdev->pm.pm_method == PM_METHOD_DPM) {
                count = -EINVAL;
@@ -446,6 +458,10 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
        struct radeon_device *rdev = ddev->dev_private;
        enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
 
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return snprintf(buf, PAGE_SIZE, "off\n");
+
        return snprintf(buf, PAGE_SIZE, "%s\n",
                        (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
                        (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -459,6 +475,11 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct radeon_device *rdev = ddev->dev_private;
 
+       /* Can't set dpm state when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        mutex_lock(&rdev->pm.mutex);
        if (strncmp("battery", buf, strlen("battery")) == 0)
                rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
@@ -485,6 +506,10 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
        struct radeon_device *rdev = ddev->dev_private;
        enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
 
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return snprintf(buf, PAGE_SIZE, "off\n");
+
        return snprintf(buf, PAGE_SIZE, "%s\n",
                        (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
                        (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
@@ -500,6 +525,11 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
        enum radeon_dpm_forced_level level;
        int ret = 0;
 
+       /* Can't force performance level when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        mutex_lock(&rdev->pm.mutex);
        if (strncmp("low", buf, strlen("low")) == 0) {
                level = RADEON_DPM_FORCED_LEVEL_LOW;
@@ -538,8 +568,14 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
                                      char *buf)
 {
        struct radeon_device *rdev = dev_get_drvdata(dev);
+       struct drm_device *ddev = rdev->ddev;
        int temp;
 
+       /* Can't get temperature when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        if (rdev->asic->pm.get_temperature)
                temp = radeon_get_temperature(rdev);
        else
@@ -1614,8 +1650,12 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct radeon_device *rdev = dev->dev_private;
+       struct drm_device *ddev = rdev->ddev;
 
-       if (rdev->pm.dpm_enabled) {
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+               seq_printf(m, "PX asic powered off\n");
+       } else if (rdev->pm.dpm_enabled) {
                mutex_lock(&rdev->pm.mutex);
                if (rdev->asic->dpm.debugfs_print_current_performance_level)
                        radeon_dpm_debugfs_print_current_performance_level(rdev, m);
index f73324c..3971d96 100644 (file)
@@ -443,13 +443,16 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
  * @p: parser context
  * @lo: address of lower dword
  * @hi: address of higher dword
+ * @size: size of checker for relocation buffer
  *
  * Patch relocation inside command stream with real buffer address
  */
-int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
+                       unsigned size)
 {
        struct radeon_cs_chunk *relocs_chunk;
-       uint64_t offset;
+       struct radeon_cs_reloc *reloc;
+       uint64_t start, end, offset;
        unsigned idx;
 
        relocs_chunk = &p->chunks[p->chunk_relocs_idx];
@@ -462,14 +465,59 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
                return -EINVAL;
        }
 
-       offset += p->relocs_ptr[(idx / 4)]->gpu_offset;
+       reloc = p->relocs_ptr[(idx / 4)];
+       start = reloc->gpu_offset;
+       end = start + radeon_bo_size(reloc->robj);
+       start += offset;
 
-        p->ib.ptr[lo] = offset & 0xFFFFFFFF;
-        p->ib.ptr[hi] = offset >> 32;
+       p->ib.ptr[lo] = start & 0xFFFFFFFF;
+       p->ib.ptr[hi] = start >> 32;
+
+       if (end <= start) {
+               DRM_ERROR("invalid reloc offset %llX!\n", offset);
+               return -EINVAL;
+       }
+       if ((end - start) < size) {
+               DRM_ERROR("buffer to small (%d / %d)!\n",
+                       (unsigned)(end - start), size);
+               return -EINVAL;
+       }
 
        return 0;
 }
 
+/**
+ * radeon_vce_validate_handle - validate stream handle
+ *
+ * @p: parser context
+ * @handle: handle to validate
+ *
+ * Validates the handle and return the found session index or -EINVAL
+ * we we don't have another free session index.
+ */
+int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
+{
+       unsigned i;
+
+       /* validate the handle */
+       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+               if (atomic_read(&p->rdev->vce.handles[i]) == handle)
+                       return i;
+       }
+
+       /* handle not found try to alloc a new one */
+       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+               if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
+                       p->rdev->vce.filp[i] = p->filp;
+                       p->rdev->vce.img_size[i] = 0;
+                       return i;
+               }
+       }
+
+       DRM_ERROR("No more free VCE handles!\n");
+       return -EINVAL;
+}
+
 /**
  * radeon_vce_cs_parse - parse and validate the command stream
  *
@@ -478,8 +526,10 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
  */
 int radeon_vce_cs_parse(struct radeon_cs_parser *p)
 {
-       uint32_t handle = 0;
-       bool destroy = false;
+       int session_idx = -1;
+       bool destroyed = false;
+       uint32_t tmp, handle = 0;
+       uint32_t *size = &tmp;
        int i, r;
 
        while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
@@ -491,13 +541,29 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
                        return -EINVAL;
                }
 
+               if (destroyed) {
+                       DRM_ERROR("No other command allowed after destroy!\n");
+                       return -EINVAL;
+               }
+
                switch (cmd) {
                case 0x00000001: // session
                        handle = radeon_get_ib_value(p, p->idx + 2);
+                       session_idx = radeon_vce_validate_handle(p, handle);
+                       if (session_idx < 0)
+                               return session_idx;
+                       size = &p->rdev->vce.img_size[session_idx];
                        break;
 
                case 0x00000002: // task info
+                       break;
+
                case 0x01000001: // create
+                       *size = radeon_get_ib_value(p, p->idx + 8) *
+                               radeon_get_ib_value(p, p->idx + 10) *
+                               8 * 3 / 2;
+                       break;
+
                case 0x04000001: // config extension
                case 0x04000002: // pic control
                case 0x04000005: // rate control
@@ -506,23 +572,39 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
                        break;
 
                case 0x03000001: // encode
-                       r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9);
+                       r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
+                                               *size);
                        if (r)
                                return r;
 
-                       r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11);
+                       r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
+                                               *size / 3);
                        if (r)
                                return r;
                        break;
 
                case 0x02000001: // destroy
-                       destroy = true;
+                       destroyed = true;
                        break;
 
                case 0x05000001: // context buffer
+                       r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+                                               *size * 2);
+                       if (r)
+                               return r;
+                       break;
+
                case 0x05000004: // video bitstream buffer
+                       tmp = radeon_get_ib_value(p, p->idx + 4);
+                       r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+                                               tmp);
+                       if (r)
+                               return r;
+                       break;
+
                case 0x05000005: // feedback buffer
-                       r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2);
+                       r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+                                               4096);
                        if (r)
                                return r;
                        break;
@@ -532,33 +614,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
                        return -EINVAL;
                }
 
+               if (session_idx == -1) {
+                       DRM_ERROR("no session command at start of IB\n");
+                       return -EINVAL;
+               }
+
                p->idx += len / 4;
        }
 
-       if (destroy) {
+       if (destroyed) {
                /* IB contains a destroy msg, free the handle */
                for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
                        atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
-
-               return 0;
-        }
-
-       /* create or encode, validate the handle */
-       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
-               if (atomic_read(&p->rdev->vce.handles[i]) == handle)
-                       return 0;
        }
 
-       /* handle not found try to alloc a new one */
-       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
-               if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
-                       p->rdev->vce.filp[i] = p->filp;
-                       return 0;
-               }
-       }
-
-       DRM_ERROR("No more free VCE handles!\n");
-       return -EINVAL;
+       return 0;
 }
 
 /**
index 2aae6ce..d9ab99f 100644 (file)
@@ -595,7 +595,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
        ndw = 64;
 
        /* assume the worst case */
-       ndw += vm->max_pde_used * 12;
+       ndw += vm->max_pde_used * 16;
 
        /* update too big for an IB */
        if (ndw > 0xfffff)
index 683532f..7321283 100644 (file)
 #define                SPLL_CHG_STATUS                         (1 << 1)
 #define        SPLL_CNTL_MODE                                  0x618
 #define                SPLL_SW_DIR_CONTROL                     (1 << 0)
-#      define SPLL_REFCLK_SEL(x)                       ((x) << 8)
-#      define SPLL_REFCLK_SEL_MASK                     0xFF00
+#      define SPLL_REFCLK_SEL(x)                       ((x) << 26)
+#      define SPLL_REFCLK_SEL_MASK                     (3 << 26)
 
 #define        CG_SPLL_SPREAD_SPECTRUM                         0x620
 #define                SSEN                                    (1 << 0)
index bc196f4..4af0da9 100644 (file)
@@ -1053,7 +1053,7 @@ config SENSORS_PC87427
 
 config SENSORS_NTC_THERMISTOR
        tristate "NTC thermistor support"
-       depends on (!OF && !IIO) || (OF && IIO)
+       depends on !OF || IIO=n || IIO
        help
          This driver supports NTC thermistors sensor reading and its
          interpretation. The driver can also monitor the temperature and
index 8a17f01..e76feb8 100644 (file)
@@ -44,6 +44,7 @@ struct ntc_compensation {
        unsigned int    ohm;
 };
 
+/* Order matters, ntc_match references the entries by index */
 static const struct platform_device_id ntc_thermistor_id[] = {
        { "ncp15wb473", TYPE_NCPXXWB473 },
        { "ncp18wb473", TYPE_NCPXXWB473 },
@@ -141,7 +142,7 @@ struct ntc_data {
        char name[PLATFORM_NAME_SIZE];
 };
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
 static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
 {
        struct iio_channel *channel = pdata->chan;
@@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
 
 static const struct of_device_id ntc_match[] = {
        { .compatible = "ntc,ncp15wb473",
-               .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+               .data = &ntc_thermistor_id[0] },
        { .compatible = "ntc,ncp18wb473",
-               .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+               .data = &ntc_thermistor_id[1] },
        { .compatible = "ntc,ncp21wb473",
-               .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+               .data = &ntc_thermistor_id[2] },
        { .compatible = "ntc,ncp03wb473",
-               .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+               .data = &ntc_thermistor_id[3] },
        { .compatible = "ntc,ncp15wl333",
-               .data = &ntc_thermistor_id[TYPE_NCPXXWL333] },
+               .data = &ntc_thermistor_id[4] },
        { },
 };
 MODULE_DEVICE_TABLE(of, ntc_match);
@@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
        return NULL;
 }
 
+#define ntc_match      NULL
+
 static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata)
 { }
 #endif
index 1b6dbe1..199c789 100644 (file)
@@ -48,6 +48,7 @@
 
 #include <linux/mlx4/driver.h>
 #include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
 
 #include "mlx4_ib.h"
 #include "user.h"
@@ -1614,6 +1615,53 @@ static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
 }
 #endif
 
+#define MLX4_IB_INVALID_MAC    ((u64)-1)
+static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
+                              struct net_device *dev,
+                              int port)
+{
+       u64 new_smac = 0;
+       u64 release_mac = MLX4_IB_INVALID_MAC;
+       struct mlx4_ib_qp *qp;
+
+       read_lock(&dev_base_lock);
+       new_smac = mlx4_mac_to_u64(dev->dev_addr);
+       read_unlock(&dev_base_lock);
+
+       mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
+       qp = ibdev->qp1_proxy[port - 1];
+       if (qp) {
+               int new_smac_index;
+               u64 old_smac = qp->pri.smac;
+               struct mlx4_update_qp_params update_params;
+
+               if (new_smac == old_smac)
+                       goto unlock;
+
+               new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
+
+               if (new_smac_index < 0)
+                       goto unlock;
+
+               update_params.smac_index = new_smac_index;
+               if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
+                                  &update_params)) {
+                       release_mac = new_smac;
+                       goto unlock;
+               }
+
+               qp->pri.smac = new_smac;
+               qp->pri.smac_index = new_smac_index;
+
+               release_mac = old_smac;
+       }
+
+unlock:
+       mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
+       if (release_mac != MLX4_IB_INVALID_MAC)
+               mlx4_unregister_mac(ibdev->dev, port, release_mac);
+}
+
 static void mlx4_ib_get_dev_addr(struct net_device *dev,
                                 struct mlx4_ib_dev *ibdev, u8 port)
 {
@@ -1689,9 +1737,13 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
        return 0;
 }
 
-static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
+static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
+                                struct net_device *dev,
+                                unsigned long event)
+
 {
        struct mlx4_ib_iboe *iboe;
+       int update_qps_port = -1;
        int port;
 
        iboe = &ibdev->iboe;
@@ -1719,6 +1771,11 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
                }
                curr_master = iboe->masters[port - 1];
 
+               if (dev == iboe->netdevs[port - 1] &&
+                   (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
+                    event == NETDEV_UP || event == NETDEV_CHANGE))
+                       update_qps_port = port;
+
                if (curr_netdev) {
                        port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
                                                IB_PORT_ACTIVE : IB_PORT_DOWN;
@@ -1752,6 +1809,9 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
        }
 
        spin_unlock(&iboe->lock);
+
+       if (update_qps_port > 0)
+               mlx4_ib_update_qps(ibdev, dev, update_qps_port);
 }
 
 static int mlx4_ib_netdev_event(struct notifier_block *this,
@@ -1764,7 +1824,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this,
                return NOTIFY_DONE;
 
        ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
-       mlx4_ib_scan_netdevs(ibdev);
+       mlx4_ib_scan_netdevs(ibdev, dev, event);
 
        return NOTIFY_DONE;
 }
@@ -2043,6 +2103,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                goto err_map;
 
        for (i = 0; i < ibdev->num_ports; ++i) {
+               mutex_init(&ibdev->qp1_proxy_lock[i]);
                if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
                                                IB_LINK_LAYER_ETHERNET) {
                        err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
@@ -2126,7 +2187,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                for (i = 1 ; i <= ibdev->num_ports ; ++i)
                        reset_gid_table(ibdev, i);
                rtnl_lock();
-               mlx4_ib_scan_netdevs(ibdev);
+               mlx4_ib_scan_netdevs(ibdev, NULL, 0);
                rtnl_unlock();
                mlx4_ib_init_gid_table(ibdev);
        }
index f589522..66b0b7d 100644 (file)
@@ -522,6 +522,9 @@ struct mlx4_ib_dev {
        int steer_qpn_count;
        int steer_qpn_base;
        int steering_support;
+       struct mlx4_ib_qp      *qp1_proxy[MLX4_MAX_PORTS];
+       /* lock when destroying qp1_proxy and getting netdev events */
+       struct mutex            qp1_proxy_lock[MLX4_MAX_PORTS];
 };
 
 struct ib_event_work {
index 41308af..dc57482 100644 (file)
@@ -1132,6 +1132,12 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
        if (is_qp0(dev, mqp))
                mlx4_CLOSE_PORT(dev->dev, mqp->port);
 
+       if (dev->qp1_proxy[mqp->port - 1] == mqp) {
+               mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
+               dev->qp1_proxy[mqp->port - 1] = NULL;
+               mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
+       }
+
        pd = get_pd(mqp);
        destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
 
@@ -1646,6 +1652,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                                err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
                                if (err)
                                        return -EINVAL;
+                               if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
+                                       dev->qp1_proxy[qp->port - 1] = qp;
                        }
                }
        }
index 9f69e81..93580a4 100644 (file)
@@ -82,7 +82,8 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
 }
 
 /* Forward declaration */
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]);
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+                                     bool strict_match);
 static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
 static void rlb_src_unlink(struct bonding *bond, u32 index);
 static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
@@ -459,7 +460,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
 
        bond->alb_info.rlb_promisc_timeout_counter = 0;
 
-       alb_send_learning_packets(bond->curr_active_slave, addr);
+       alb_send_learning_packets(bond->curr_active_slave, addr, true);
 }
 
 /* slave being removed should not be active at this point
@@ -995,7 +996,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
 /*********************** tlb/rlb shared functions *********************/
 
 static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
-                           u16 vid)
+                           __be16 vlan_proto, u16 vid)
 {
        struct learning_pkt pkt;
        struct sk_buff *skb;
@@ -1021,7 +1022,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
        skb->dev = slave->dev;
 
        if (vid) {
-               skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid);
+               skb = vlan_put_tag(skb, vlan_proto, vid);
                if (!skb) {
                        pr_err("%s: Error: failed to insert VLAN tag\n",
                               slave->bond->dev->name);
@@ -1032,22 +1033,32 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
        dev_queue_xmit(skb);
 }
 
-
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+                                     bool strict_match)
 {
        struct bonding *bond = bond_get_bond_by_slave(slave);
        struct net_device *upper;
        struct list_head *iter;
 
        /* send untagged */
-       alb_send_lp_vid(slave, mac_addr, 0);
+       alb_send_lp_vid(slave, mac_addr, 0, 0);
 
        /* loop through vlans and send one packet for each */
        rcu_read_lock();
        netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
-               if (upper->priv_flags & IFF_802_1Q_VLAN)
-                       alb_send_lp_vid(slave, mac_addr,
-                                       vlan_dev_vlan_id(upper));
+               if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
+                       if (strict_match &&
+                           ether_addr_equal_64bits(mac_addr,
+                                                   upper->dev_addr)) {
+                               alb_send_lp_vid(slave, mac_addr,
+                                               vlan_dev_vlan_proto(upper),
+                                               vlan_dev_vlan_id(upper));
+                       } else if (!strict_match) {
+                               alb_send_lp_vid(slave, upper->dev_addr,
+                                               vlan_dev_vlan_proto(upper),
+                                               vlan_dev_vlan_id(upper));
+                       }
+               }
        }
        rcu_read_unlock();
 }
@@ -1107,7 +1118,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
 
        /* fasten the change in the switch */
        if (SLAVE_IS_OK(slave1)) {
-               alb_send_learning_packets(slave1, slave1->dev->dev_addr);
+               alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
                if (bond->alb_info.rlb_enabled) {
                        /* inform the clients that the mac address
                         * has changed
@@ -1119,7 +1130,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
        }
 
        if (SLAVE_IS_OK(slave2)) {
-               alb_send_learning_packets(slave2, slave2->dev->dev_addr);
+               alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
                if (bond->alb_info.rlb_enabled) {
                        /* inform the clients that the mac address
                         * has changed
@@ -1490,6 +1501,8 @@ void bond_alb_monitor(struct work_struct *work)
 
        /* send learning packets */
        if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
+               bool strict_match;
+
                /* change of curr_active_slave involves swapping of mac addresses.
                 * in order to avoid this swapping from happening while
                 * sending the learning packets, the curr_slave_lock must be held for
@@ -1497,8 +1510,15 @@ void bond_alb_monitor(struct work_struct *work)
                 */
                read_lock(&bond->curr_slave_lock);
 
-               bond_for_each_slave_rcu(bond, slave, iter)
-                       alb_send_learning_packets(slave, slave->dev->dev_addr);
+               bond_for_each_slave_rcu(bond, slave, iter) {
+                       /* If updating current_active, use all currently
+                        * user mac addreses (!strict_match).  Otherwise, only
+                        * use mac of the slave device.
+                        */
+                       strict_match = (slave != bond->curr_active_slave);
+                       alb_send_learning_packets(slave, slave->dev->dev_addr,
+                                                 strict_match);
+               }
 
                read_unlock(&bond->curr_slave_lock);
 
@@ -1721,7 +1741,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
        } else {
                /* set the new_slave to the bond mac address */
                alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
-               alb_send_learning_packets(new_slave, bond->dev->dev_addr);
+               alb_send_learning_packets(new_slave, bond->dev->dev_addr,
+                                         false);
        }
 
        write_lock_bh(&bond->curr_slave_lock);
@@ -1764,7 +1785,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
                alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
 
                read_lock(&bond->lock);
-               alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
+               alb_send_learning_packets(bond->curr_active_slave,
+                                         bond_dev->dev_addr, false);
                if (bond->alb_info.rlb_enabled) {
                        /* inform clients mac address has changed */
                        rlb_req_update_slave_clients(bond, bond->curr_active_slave);
index 69aff72..d3a6789 100644 (file)
@@ -2126,10 +2126,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
  */
 static void bond_arp_send(struct net_device *slave_dev, int arp_op,
                          __be32 dest_ip, __be32 src_ip,
-                         struct bond_vlan_tag *inner,
-                         struct bond_vlan_tag *outer)
+                         struct bond_vlan_tag *tags)
 {
        struct sk_buff *skb;
+       int i;
 
        pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
                 arp_op, slave_dev->name, &dest_ip, &src_ip);
@@ -2141,21 +2141,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
                net_err_ratelimited("ARP packet allocation failed\n");
                return;
        }
-       if (outer->vlan_id) {
-               if (inner->vlan_id) {
-                       pr_debug("inner tag: proto %X vid %X\n",
-                                ntohs(inner->vlan_proto), inner->vlan_id);
-                       skb = __vlan_put_tag(skb, inner->vlan_proto,
-                                            inner->vlan_id);
-                       if (!skb) {
-                               net_err_ratelimited("failed to insert inner VLAN tag\n");
-                               return;
-                       }
-               }
 
-               pr_debug("outer reg: proto %X vid %X\n",
-                        ntohs(outer->vlan_proto), outer->vlan_id);
-               skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id);
+       /* Go through all the tags backwards and add them to the packet */
+       for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) {
+               if (!tags[i].vlan_id)
+                       continue;
+
+               pr_debug("inner tag: proto %X vid %X\n",
+                        ntohs(tags[i].vlan_proto), tags[i].vlan_id);
+               skb = __vlan_put_tag(skb, tags[i].vlan_proto,
+                                    tags[i].vlan_id);
+               if (!skb) {
+                       net_err_ratelimited("failed to insert inner VLAN tag\n");
+                       return;
+               }
+       }
+       /* Set the outer tag */
+       if (tags[0].vlan_id) {
+               pr_debug("outer tag: proto %X vid %X\n",
+                        ntohs(tags[0].vlan_proto), tags[0].vlan_id);
+               skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id);
                if (!skb) {
                        net_err_ratelimited("failed to insert outer VLAN tag\n");
                        return;
@@ -2164,22 +2169,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
        arp_xmit(skb);
 }
 
+/* Validate the device path between the @start_dev and the @end_dev.
+ * The path is valid if the @end_dev is reachable through device
+ * stacking.
+ * When the path is validated, collect any vlan information in the
+ * path.
+ */
+static bool bond_verify_device_path(struct net_device *start_dev,
+                                   struct net_device *end_dev,
+                                   struct bond_vlan_tag *tags)
+{
+       struct net_device *upper;
+       struct list_head  *iter;
+       int  idx;
+
+       if (start_dev == end_dev)
+               return true;
+
+       netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
+               if (bond_verify_device_path(upper, end_dev, tags)) {
+                       if (is_vlan_dev(upper)) {
+                               idx = vlan_get_encap_level(upper);
+                               if (idx >= BOND_MAX_VLAN_ENCAP)
+                                       return false;
+
+                               tags[idx].vlan_proto =
+                                                   vlan_dev_vlan_proto(upper);
+                               tags[idx].vlan_id = vlan_dev_vlan_id(upper);
+                       }
+                       return true;
+               }
+       }
+
+       return false;
+}
 
 static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 {
-       struct net_device *upper, *vlan_upper;
-       struct list_head *iter, *vlan_iter;
        struct rtable *rt;
-       struct bond_vlan_tag inner, outer;
+       struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
        __be32 *targets = bond->params.arp_targets, addr;
        int i;
+       bool ret;
 
        for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
                pr_debug("basa: target %pI4\n", &targets[i]);
-               inner.vlan_proto = 0;
-               inner.vlan_id = 0;
-               outer.vlan_proto = 0;
-               outer.vlan_id = 0;
+               memset(tags, 0, sizeof(tags));
 
                /* Find out through which dev should the packet go */
                rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
@@ -2192,7 +2227,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                                net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
                                                     bond->dev->name,
                                                     &targets[i]);
-                       bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer);
+                       bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
+                                     0, tags);
                        continue;
                }
 
@@ -2201,52 +2237,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                        goto found;
 
                rcu_read_lock();
-               /* first we search only for vlan devices. for every vlan
-                * found we verify its upper dev list, searching for the
-                * rt->dst.dev. If found we save the tag of the vlan and
-                * proceed to send the packet.
-                */
-               netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
-                                                 vlan_iter) {
-                       if (!is_vlan_dev(vlan_upper))
-                               continue;
-
-                       if (vlan_upper == rt->dst.dev) {
-                               outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
-                               outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
-                               rcu_read_unlock();
-                               goto found;
-                       }
-                       netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
-                                                         iter) {
-                               if (upper == rt->dst.dev) {
-                                       /* If the upper dev is a vlan dev too,
-                                        *  set the vlan tag to inner tag.
-                                        */
-                                       if (is_vlan_dev(upper)) {
-                                               inner.vlan_proto = vlan_dev_vlan_proto(upper);
-                                               inner.vlan_id = vlan_dev_vlan_id(upper);
-                                       }
-                                       outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
-                                       outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
-                                       rcu_read_unlock();
-                                       goto found;
-                               }
-                       }
-               }
-
-               /* if the device we're looking for is not on top of any of
-                * our upper vlans, then just search for any dev that
-                * matches, and in case it's a vlan - save the id
-                */
-               netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
-                       if (upper == rt->dst.dev) {
-                               rcu_read_unlock();
-                               goto found;
-                       }
-               }
+               ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags);
                rcu_read_unlock();
 
+               if (ret)
+                       goto found;
+
                /* Not our device - skip */
                pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
                         bond->dev->name, &targets[i],
@@ -2259,7 +2255,7 @@ found:
                addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
                ip_rt_put(rt);
                bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
-                             addr, &inner, &outer);
+                             addr, tags);
        }
 }
 
index 724e30f..8320702 100644 (file)
@@ -125,6 +125,7 @@ static const struct bond_opt_value bond_fail_over_mac_tbl[] = {
 static const struct bond_opt_value bond_intmax_tbl[] = {
        { "off",     0,       BOND_VALFLAG_DEFAULT},
        { "maxval",  INT_MAX, BOND_VALFLAG_MAX},
+       { NULL,      -1,      0}
 };
 
 static const struct bond_opt_value bond_lacp_rate_tbl[] = {
index b8bdd0a..00bea32 100644 (file)
@@ -36,6 +36,7 @@
 
 #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
 
+#define BOND_MAX_VLAN_ENCAP    2
 #define BOND_MAX_ARP_TARGETS   16
 
 #define BOND_DEFAULT_MIIMON    100
index 8ab7103..61ffc12 100644 (file)
@@ -14,13 +14,6 @@ config CAN_C_CAN_PLATFORM
          SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
          boards like am335x, dm814x, dm813x and dm811x.
 
-config CAN_C_CAN_STRICT_FRAME_ORDERING
-       bool "Force a strict RX CAN frame order (may cause frame loss)"
-       ---help---
-         The RX split buffer prevents packet reordering but can cause packet
-         loss. Only enable this option when you accept to lose CAN frames
-         in favour of getting the received CAN frames in the correct order.
-
 config CAN_C_CAN_PCI
        tristate "Generic PCI Bus based C_CAN/D_CAN driver"
        depends on PCI
index a2ca820..95e04e2 100644 (file)
@@ -732,26 +732,12 @@ static u32 c_can_adjust_pending(u32 pend)
 static inline void c_can_rx_object_get(struct net_device *dev,
                                       struct c_can_priv *priv, u32 obj)
 {
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-       if (obj < C_CAN_MSG_RX_LOW_LAST)
-               c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW);
-       else
-#endif
                c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
 }
 
 static inline void c_can_rx_finalize(struct net_device *dev,
                                     struct c_can_priv *priv, u32 obj)
 {
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-       if (obj < C_CAN_MSG_RX_LOW_LAST)
-               priv->rxmasked |= BIT(obj - 1);
-       else if (obj == C_CAN_MSG_RX_LOW_LAST) {
-               priv->rxmasked = 0;
-               /* activate all lower message objects */
-               c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
-       }
-#endif
        if (priv->type != BOSCH_D_CAN)
                c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
 }
@@ -799,9 +785,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
 {
        u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
 
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-       pend &= ~priv->rxmasked;
-#endif
        return pend;
 }
 
@@ -814,25 +797,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
  * has arrived. To work-around this issue, we keep two groups of message
  * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
  *
- * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y
- *
- * To ensure in-order frame reception we use the following
- * approach while re-activating a message object to receive further
- * frames:
- * - if the current message object number is lower than
- *   C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
- *   the INTPND bit.
- * - if the current message object number is equal to
- *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
- *   receive message objects.
- * - if the current message object number is greater than
- *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
- *   only this message object.
- *
- * This can cause packet loss!
- *
- * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n
- *
  * We clear the newdat bit right away.
  *
  * This can result in packet reordering when the readout is slow.
index c540e3d..564933a 100644 (file)
@@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct sja1000_priv *priv;
        struct peak_pci_chan *chan;
-       struct net_device *dev;
+       struct net_device *dev, *prev_dev;
        void __iomem *cfg_base, *reg_base;
        u16 sub_sys_id, icr;
        int i, err, channels;
@@ -688,11 +688,13 @@ failure_remove_channels:
        writew(0x0, cfg_base + PITA_ICR + 2);
 
        chan = NULL;
-       for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
-               unregister_sja1000dev(dev);
-               free_sja1000dev(dev);
+       for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
                priv = netdev_priv(dev);
                chan = priv->priv;
+               prev_dev = chan->prev_dev;
+
+               unregister_sja1000dev(dev);
+               free_sja1000dev(dev);
        }
 
        /* free any PCIeC resources too */
@@ -726,10 +728,12 @@ static void peak_pci_remove(struct pci_dev *pdev)
 
        /* Loop over all registered devices */
        while (1) {
+               struct net_device *prev_dev = chan->prev_dev;
+
                dev_info(&pdev->dev, "removing device %s\n", dev->name);
                unregister_sja1000dev(dev);
                free_sja1000dev(dev);
-               dev = chan->prev_dev;
+               dev = prev_dev;
 
                if (!dev) {
                        /* do that only for first channel */
index 39b26fe..d740101 100644 (file)
@@ -35,6 +35,18 @@ source "drivers/net/ethernet/calxeda/Kconfig"
 source "drivers/net/ethernet/chelsio/Kconfig"
 source "drivers/net/ethernet/cirrus/Kconfig"
 source "drivers/net/ethernet/cisco/Kconfig"
+
+config CX_ECAT
+       tristate "Beckhoff CX5020 EtherCAT master support"
+       depends on PCI
+       ---help---
+         Driver for EtherCAT master module located on CCAT FPGA
+         that can be found on Beckhoff CX5020, and possibly other of CX
+         Beckhoff CX series industrial PCs.
+
+         To compile this driver as a module, choose M here. The module
+         will be called ec_bhf.
+
 source "drivers/net/ethernet/davicom/Kconfig"
 
 config DNET
index 545d0b3..35190e3 100644 (file)
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
 obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
 obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
 obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
+obj-$(CONFIG_CX_ECAT) += ec_bhf.o
 obj-$(CONFIG_DM9000) += davicom/
 obj-$(CONFIG_DNET) += dnet.o
 obj-$(CONFIG_NET_VENDOR_DEC) += dec/
index d4a187e..3eff2fd 100644 (file)
@@ -5,3 +5,4 @@
 obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
 altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
 altera_msgdma.o altera_sgdma.o altera_utils.o
+ccflags-y += -D__CHECK_ENDIAN__
index 4d1f2fd..0fb986b 100644 (file)
@@ -37,18 +37,16 @@ void msgdma_start_rxdma(struct altera_tse_private *priv)
 void msgdma_reset(struct altera_tse_private *priv)
 {
        int counter;
-       struct msgdma_csr *txcsr =
-               (struct msgdma_csr *)priv->tx_dma_csr;
-       struct msgdma_csr *rxcsr =
-               (struct msgdma_csr *)priv->rx_dma_csr;
 
        /* Reset Rx mSGDMA */
-       iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
-       iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control);
+       csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr,
+               msgdma_csroffs(status));
+       csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr,
+               msgdma_csroffs(control));
 
        counter = 0;
        while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-               if (tse_bit_is_clear(&rxcsr->status,
+               if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status),
                                     MSGDMA_CSR_STAT_RESETTING))
                        break;
                udelay(1);
@@ -59,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv)
                           "TSE Rx mSGDMA resetting bit never cleared!\n");
 
        /* clear all status bits */
-       iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
+       csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status));
 
        /* Reset Tx mSGDMA */
-       iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
-       iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control);
+       csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr,
+               msgdma_csroffs(status));
+
+       csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr,
+               msgdma_csroffs(control));
 
        counter = 0;
        while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-               if (tse_bit_is_clear(&txcsr->status,
+               if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status),
                                     MSGDMA_CSR_STAT_RESETTING))
                        break;
                udelay(1);
@@ -78,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv)
                           "TSE Tx mSGDMA resetting bit never cleared!\n");
 
        /* clear all status bits */
-       iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
+       csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status));
 }
 
 void msgdma_disable_rxirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->rx_dma_csr;
-       tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+       tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control),
+                     MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_enable_rxirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->rx_dma_csr;
-       tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+       tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control),
+                   MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_disable_txirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->tx_dma_csr;
-       tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+       tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control),
+                     MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_enable_txirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->tx_dma_csr;
-       tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+       tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control),
+                   MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_clear_rxirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->rx_dma_csr;
-       iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+       csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status));
 }
 
 void msgdma_clear_txirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->tx_dma_csr;
-       iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+       csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status));
 }
 
 /* return 0 to indicate transmit is pending */
 int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 {
-       struct msgdma_extended_desc *desc = priv->tx_dma_desc;
-
-       iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo);
-       iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi);
-       iowrite32(0, &desc->write_addr_lo);
-       iowrite32(0, &desc->write_addr_hi);
-       iowrite32(buffer->len, &desc->len);
-       iowrite32(0, &desc->burst_seq_num);
-       iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride);
-       iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control);
+       csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc,
+               msgdma_descroffs(read_addr_lo));
+       csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc,
+               msgdma_descroffs(read_addr_hi));
+       csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo));
+       csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi));
+       csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len));
+       csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num));
+       csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc,
+               msgdma_descroffs(stride));
+       csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc,
+               msgdma_descroffs(control));
        return 0;
 }
 
@@ -138,17 +139,16 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
        u32 ready = 0;
        u32 inuse;
        u32 status;
-       struct msgdma_csr *txcsr =
-               (struct msgdma_csr *)priv->tx_dma_csr;
 
        /* Get number of sent descriptors */
-       inuse = ioread32(&txcsr->rw_fill_level) & 0xffff;
+       inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level))
+                       & 0xffff;
 
        if (inuse) { /* Tx FIFO is not empty */
                ready = priv->tx_prod - priv->tx_cons - inuse - 1;
        } else {
                /* Check for buffered last packet */
-               status = ioread32(&txcsr->status);
+               status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
                if (status & MSGDMA_CSR_STAT_BUSY)
                        ready = priv->tx_prod - priv->tx_cons - 1;
                else
@@ -162,7 +162,6 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
 void msgdma_add_rx_desc(struct altera_tse_private *priv,
                        struct tse_buffer *rxbuffer)
 {
-       struct msgdma_extended_desc *desc = priv->rx_dma_desc;
        u32 len = priv->rx_dma_buf_sz;
        dma_addr_t dma_addr = rxbuffer->dma_addr;
        u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
@@ -172,14 +171,16 @@ void msgdma_add_rx_desc(struct altera_tse_private *priv,
                        | MSGDMA_DESC_CTL_TR_ERR_IRQ
                        | MSGDMA_DESC_CTL_GO);
 
-       iowrite32(0, &desc->read_addr_lo);
-       iowrite32(0, &desc->read_addr_hi);
-       iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo);
-       iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi);
-       iowrite32(len, &desc->len);
-       iowrite32(0, &desc->burst_seq_num);
-       iowrite32(0x00010001, &desc->stride);
-       iowrite32(control, &desc->control);
+       csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo));
+       csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi));
+       csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc,
+               msgdma_descroffs(write_addr_lo));
+       csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc,
+               msgdma_descroffs(write_addr_hi));
+       csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len));
+       csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num));
+       csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride));
+       csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control));
 }
 
 /* status is returned on upper 16 bits,
@@ -190,14 +191,13 @@ u32 msgdma_rx_status(struct altera_tse_private *priv)
        u32 rxstatus = 0;
        u32 pktlength;
        u32 pktstatus;
-       struct msgdma_csr *rxcsr =
-               (struct msgdma_csr *)priv->rx_dma_csr;
-       struct msgdma_response *rxresp =
-               (struct msgdma_response *)priv->rx_dma_resp;
-
-       if (ioread32(&rxcsr->resp_fill_level) & 0xffff) {
-               pktlength = ioread32(&rxresp->bytes_transferred);
-               pktstatus = ioread32(&rxresp->status);
+
+       if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level))
+           & 0xffff) {
+               pktlength = csrrd32(priv->rx_dma_resp,
+                                   msgdma_respoffs(bytes_transferred));
+               pktstatus = csrrd32(priv->rx_dma_resp,
+                                   msgdma_respoffs(status));
                rxstatus = pktstatus;
                rxstatus = rxstatus << 16;
                rxstatus |= (pktlength & 0xffff);
index d7b59ba..e335626 100644 (file)
 #ifndef __ALTERA_MSGDMAHW_H__
 #define __ALTERA_MSGDMAHW_H__
 
-/* mSGDMA standard descriptor format
- */
-struct msgdma_desc {
-       u32 read_addr;  /* data buffer source address */
-       u32 write_addr; /* data buffer destination address */
-       u32 len;        /* the number of bytes to transfer per descriptor */
-       u32 control;    /* characteristics of the transfer */
-};
-
 /* mSGDMA extended descriptor format
  */
 struct msgdma_extended_desc {
@@ -159,6 +150,10 @@ struct msgdma_response {
        u32 status;
 };
 
+#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a))
+#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a))
+#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a))
+
 /* mSGDMA response register bit definitions
  */
 #define MSGDMA_RESP_EARLY_TERM BIT(8)
index 9ce8630..99cc56f 100644 (file)
@@ -20,8 +20,8 @@
 #include "altera_sgdmahw.h"
 #include "altera_sgdma.h"
 
-static void sgdma_setup_descrip(struct sgdma_descrip *desc,
-                               struct sgdma_descrip *ndesc,
+static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+                               struct sgdma_descrip __iomem *ndesc,
                                dma_addr_t ndesc_phys,
                                dma_addr_t raddr,
                                dma_addr_t waddr,
@@ -31,17 +31,17 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
                                int wfixed);
 
 static int sgdma_async_write(struct altera_tse_private *priv,
-                             struct sgdma_descrip *desc);
+                             struct sgdma_descrip __iomem *desc);
 
 static int sgdma_async_read(struct altera_tse_private *priv);
 
 static dma_addr_t
 sgdma_txphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc);
+                struct sgdma_descrip __iomem *desc);
 
 static dma_addr_t
 sgdma_rxphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc);
+                struct sgdma_descrip __iomem *desc);
 
 static int sgdma_txbusy(struct altera_tse_private *priv);
 
@@ -79,7 +79,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
        priv->rxdescphys = (dma_addr_t) 0;
        priv->txdescphys = (dma_addr_t) 0;
 
-       priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
+       priv->rxdescphys = dma_map_single(priv->device,
+                                         (void __force *)priv->rx_dma_desc,
                                          priv->rxdescmem, DMA_BIDIRECTIONAL);
 
        if (dma_mapping_error(priv->device, priv->rxdescphys)) {
@@ -88,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
                return -EINVAL;
        }
 
-       priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc,
+       priv->txdescphys = dma_map_single(priv->device,
+                                         (void __force *)priv->tx_dma_desc,
                                          priv->txdescmem, DMA_TO_DEVICE);
 
        if (dma_mapping_error(priv->device, priv->txdescphys)) {
@@ -98,8 +100,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
        }
 
        /* Initialize descriptor memory to all 0's, sync memory to cache */
-       memset(priv->tx_dma_desc, 0, priv->txdescmem);
-       memset(priv->rx_dma_desc, 0, priv->rxdescmem);
+       memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+       memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
 
        dma_sync_single_for_device(priv->device, priv->txdescphys,
                                   priv->txdescmem, DMA_TO_DEVICE);
@@ -126,22 +128,15 @@ void sgdma_uninitialize(struct altera_tse_private *priv)
  */
 void sgdma_reset(struct altera_tse_private *priv)
 {
-       u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
-       u32 txdescriplen   = priv->txdescmem;
-       u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
-       u32 rxdescriplen   = priv->rxdescmem;
-       struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
-       struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
-
        /* Initialize descriptor memory to 0 */
-       memset(ptxdescripmem, 0, txdescriplen);
-       memset(prxdescripmem, 0, rxdescriplen);
+       memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+       memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
 
-       iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
-       iowrite32(0, &ptxsgdma->control);
+       csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
+       csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
 
-       iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
-       iowrite32(0, &prxsgdma->control);
+       csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
+       csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
 }
 
 /* For SGDMA, interrupts remain enabled after initially enabling,
@@ -167,14 +162,14 @@ void sgdma_disable_txirq(struct altera_tse_private *priv)
 
 void sgdma_clear_rxirq(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+       tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
+                   SGDMA_CTRLREG_CLRINT);
 }
 
 void sgdma_clear_txirq(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
-       tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+       tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
+                   SGDMA_CTRLREG_CLRINT);
 }
 
 /* transmits buffer through SGDMA. Returns number of buffers
@@ -184,12 +179,11 @@ void sgdma_clear_txirq(struct altera_tse_private *priv)
  */
 int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 {
-       int pktstx = 0;
-       struct sgdma_descrip *descbase =
-               (struct sgdma_descrip *)priv->tx_dma_desc;
+       struct sgdma_descrip __iomem *descbase =
+               (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
 
-       struct sgdma_descrip *cdesc = &descbase[0];
-       struct sgdma_descrip *ndesc = &descbase[1];
+       struct sgdma_descrip __iomem *cdesc = &descbase[0];
+       struct sgdma_descrip __iomem *ndesc = &descbase[1];
 
        /* wait 'til the tx sgdma is ready for the next transmit request */
        if (sgdma_txbusy(priv))
@@ -205,7 +199,7 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
                            0,                          /* read fixed */
                            SGDMA_CONTROL_WR_FIXED);    /* Generate SOP */
 
-       pktstx = sgdma_async_write(priv, cdesc);
+       sgdma_async_write(priv, cdesc);
 
        /* enqueue the request to the pending transmit queue */
        queue_tx(priv, buffer);
@@ -219,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 u32 sgdma_tx_completions(struct altera_tse_private *priv)
 {
        u32 ready = 0;
-       struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
 
        if (!sgdma_txbusy(priv) &&
-           ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
+           ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
+            & SGDMA_CONTROL_HW_OWNED) == 0) &&
            (dequeue_tx(priv))) {
                ready = 1;
        }
@@ -246,32 +240,31 @@ void sgdma_add_rx_desc(struct altera_tse_private *priv,
  */
 u32 sgdma_rx_status(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc;
-       struct sgdma_descrip *desc = NULL;
-       int pktsrx;
-       unsigned int rxstatus = 0;
-       unsigned int pktlength = 0;
-       unsigned int pktstatus = 0;
+       struct sgdma_descrip __iomem *base =
+               (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
+       struct sgdma_descrip __iomem *desc = NULL;
        struct tse_buffer *rxbuffer = NULL;
+       unsigned int rxstatus = 0;
 
-       u32 sts = ioread32(&csr->status);
+       u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
 
        desc = &base[0];
        if (sts & SGDMA_STSREG_EOP) {
+               unsigned int pktlength = 0;
+               unsigned int pktstatus = 0;
                dma_sync_single_for_cpu(priv->device,
                                        priv->rxdescphys,
                                        priv->sgdmadesclen,
                                        DMA_FROM_DEVICE);
 
-               pktlength = desc->bytes_xferred;
-               pktstatus = desc->status & 0x3f;
-               rxstatus = pktstatus;
+               pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
+               pktstatus = csrrd8(desc, sgdma_descroffs(status));
+               rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
                rxstatus = rxstatus << 16;
                rxstatus |= (pktlength & 0xffff);
 
                if (rxstatus) {
-                       desc->status = 0;
+                       csrwr8(0, desc, sgdma_descroffs(status));
 
                        rxbuffer = dequeue_rx(priv);
                        if (rxbuffer == NULL)
@@ -279,12 +272,12 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
                                            "sgdma rx and rx queue empty!\n");
 
                        /* Clear control */
-                       iowrite32(0, &csr->control);
+                       csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
                        /* clear status */
-                       iowrite32(0xf, &csr->status);
+                       csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
 
                        /* kick the rx sgdma after reaping this descriptor */
-                       pktsrx = sgdma_async_read(priv);
+                       sgdma_async_read(priv);
 
                } else {
                        /* If the SGDMA indicated an end of packet on recv,
@@ -298,10 +291,11 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
                         */
                        netdev_err(priv->dev,
                                   "SGDMA RX Error Info: %x, %x, %x\n",
-                                  sts, desc->status, rxstatus);
+                                  sts, csrrd8(desc, sgdma_descroffs(status)),
+                                  rxstatus);
                }
        } else if (sts == 0) {
-               pktsrx = sgdma_async_read(priv);
+               sgdma_async_read(priv);
        }
 
        return rxstatus;
@@ -309,8 +303,8 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
 
 
 /* Private functions */
-static void sgdma_setup_descrip(struct sgdma_descrip *desc,
-                               struct sgdma_descrip *ndesc,
+static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+                               struct sgdma_descrip __iomem *ndesc,
                                dma_addr_t ndesc_phys,
                                dma_addr_t raddr,
                                dma_addr_t waddr,
@@ -320,27 +314,30 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
                                int wfixed)
 {
        /* Clear the next descriptor as not owned by hardware */
-       u32 ctrl = ndesc->control;
+
+       u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
        ctrl &= ~SGDMA_CONTROL_HW_OWNED;
-       ndesc->control = ctrl;
+       csrwr8(ctrl, ndesc, sgdma_descroffs(control));
 
-       ctrl = 0;
        ctrl = SGDMA_CONTROL_HW_OWNED;
        ctrl |= generate_eop;
        ctrl |= rfixed;
        ctrl |= wfixed;
 
        /* Channel is implicitly zero, initialized to 0 by default */
-
-       desc->raddr = raddr;
-       desc->waddr = waddr;
-       desc->next = lower_32_bits(ndesc_phys);
-       desc->control = ctrl;
-       desc->status = 0;
-       desc->rburst = 0;
-       desc->wburst = 0;
-       desc->bytes = length;
-       desc->bytes_xferred = 0;
+       csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
+       csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
+
+       csrwr32(0, desc, sgdma_descroffs(pad1));
+       csrwr32(0, desc, sgdma_descroffs(pad2));
+       csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
+
+       csrwr8(ctrl, desc, sgdma_descroffs(control));
+       csrwr8(0, desc, sgdma_descroffs(status));
+       csrwr8(0, desc, sgdma_descroffs(wburst));
+       csrwr8(0, desc, sgdma_descroffs(rburst));
+       csrwr16(length, desc, sgdma_descroffs(bytes));
+       csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
 }
 
 /* If hardware is busy, don't restart async read.
@@ -351,12 +348,11 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
  */
 static int sgdma_async_read(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       struct sgdma_descrip *descbase =
-               (struct sgdma_descrip *)priv->rx_dma_desc;
+       struct sgdma_descrip __iomem *descbase =
+               (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
 
-       struct sgdma_descrip *cdesc = &descbase[0];
-       struct sgdma_descrip *ndesc = &descbase[1];
+       struct sgdma_descrip __iomem *cdesc = &descbase[0];
+       struct sgdma_descrip __iomem *ndesc = &descbase[1];
 
        struct tse_buffer *rxbuffer = NULL;
 
@@ -382,11 +378,13 @@ static int sgdma_async_read(struct altera_tse_private *priv)
                                           priv->sgdmadesclen,
                                           DMA_TO_DEVICE);
 
-               iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
-                         &csr->next_descrip);
+               csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
+                       priv->rx_dma_csr,
+                       sgdma_csroffs(next_descrip));
 
-               iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
-                         &csr->control);
+               csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
+                       priv->rx_dma_csr,
+                       sgdma_csroffs(control));
 
                return 1;
        }
@@ -395,32 +393,32 @@ static int sgdma_async_read(struct altera_tse_private *priv)
 }
 
 static int sgdma_async_write(struct altera_tse_private *priv,
-                            struct sgdma_descrip *desc)
+                            struct sgdma_descrip __iomem *desc)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
-
        if (sgdma_txbusy(priv))
                return 0;
 
        /* clear control and status */
-       iowrite32(0, &csr->control);
-       iowrite32(0x1f, &csr->status);
+       csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
+       csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
 
        dma_sync_single_for_device(priv->device, priv->txdescphys,
                                   priv->sgdmadesclen, DMA_TO_DEVICE);
 
-       iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
-                 &csr->next_descrip);
+       csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
+               priv->tx_dma_csr,
+               sgdma_csroffs(next_descrip));
 
-       iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
-                 &csr->control);
+       csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
+               priv->tx_dma_csr,
+               sgdma_csroffs(control));
 
        return 1;
 }
 
 static dma_addr_t
 sgdma_txphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc)
+                struct sgdma_descrip __iomem *desc)
 {
        dma_addr_t paddr = priv->txdescmem_busaddr;
        uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
@@ -429,7 +427,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv,
 
 static dma_addr_t
 sgdma_rxphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc)
+                struct sgdma_descrip __iomem *desc)
 {
        dma_addr_t paddr = priv->rxdescmem_busaddr;
        uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
@@ -518,8 +516,8 @@ queue_rx_peekhead(struct altera_tse_private *priv)
  */
 static int sgdma_rxbusy(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
+       return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
+                      & SGDMA_STSREG_BUSY;
 }
 
 /* waits for the tx sgdma to finish it's current operation, returns 0
@@ -528,13 +526,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv)
 static int sgdma_txbusy(struct altera_tse_private *priv)
 {
        int delay = 0;
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
 
        /* if DMA is busy, wait for current transactino to finish */
-       while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
+       while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+               & SGDMA_STSREG_BUSY) && (delay++ < 100))
                udelay(1);
 
-       if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
+       if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+           & SGDMA_STSREG_BUSY) {
                netdev_err(priv->dev, "timeout waiting for tx dma\n");
                return 1;
        }
index ba3334f..85bc33b 100644 (file)
 
 /* SGDMA descriptor structure */
 struct sgdma_descrip {
-       unsigned int    raddr; /* address of data to be read */
-       unsigned int    pad1;
-       unsigned int    waddr;
-       unsigned int    pad2;
-       unsigned int    next;
-       unsigned int    pad3;
-       unsigned short  bytes;
-       unsigned char   rburst;
-       unsigned char   wburst;
-       unsigned short  bytes_xferred;  /* 16 bits, bytes xferred */
+       u32     raddr; /* address of data to be read */
+       u32     pad1;
+       u32     waddr;
+       u32     pad2;
+       u32     next;
+       u32     pad3;
+       u16     bytes;
+       u8      rburst;
+       u8      wburst;
+       u16     bytes_xferred;  /* 16 bits, bytes xferred */
 
        /* bit 0: error
         * bit 1: length error
@@ -39,7 +39,7 @@ struct sgdma_descrip {
         * bit 6: reserved
         * bit 7: status eop for recv case
         */
-       unsigned char   status;
+       u8      status;
 
        /* bit 0: eop
         * bit 1: read_fixed
@@ -47,7 +47,7 @@ struct sgdma_descrip {
         * bits 3,4,5,6: Channel (always 0)
         * bit 7: hardware owned
         */
-       unsigned char   control;
+       u8      control;
 } __packed;
 
 
@@ -101,6 +101,8 @@ struct sgdma_csr {
        u32     pad3[3];
 };
 
+#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a))
+#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a))
 
 #define SGDMA_STSREG_ERR       BIT(0) /* Error */
 #define SGDMA_STSREG_EOP       BIT(1) /* EOP */
index 465c4aa..2adb24d 100644 (file)
@@ -357,6 +357,8 @@ struct altera_tse_mac {
        u32 reserved5[42];
 };
 
+#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a))
+
 /* Transmit and Receive Command Registers Bit Definitions
  */
 #define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC                BIT(17)
@@ -487,4 +489,49 @@ struct altera_tse_private {
  */
 void altera_tse_set_ethtool_ops(struct net_device *);
 
+static inline
+u32 csrrd32(void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+       return readl(paddr);
+}
+
+static inline
+u16 csrrd16(void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+       return readw(paddr);
+}
+
+static inline
+u8 csrrd8(void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+       return readb(paddr);
+}
+
+static inline
+void csrwr32(u32 val, void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+       writel(val, paddr);
+}
+
+static inline
+void csrwr16(u16 val, void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+       writew(val, paddr);
+}
+
+static inline
+void csrwr8(u8 val, void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+       writeb(val, paddr);
+}
+
 #endif /* __ALTERA_TSE_H__ */
index 76133ca..54c25ef 100644 (file)
@@ -96,54 +96,89 @@ static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
                           u64 *buf)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
        u64 ext;
 
-       buf[0] = ioread32(&mac->frames_transmitted_ok);
-       buf[1] = ioread32(&mac->frames_received_ok);
-       buf[2] = ioread32(&mac->frames_check_sequence_errors);
-       buf[3] = ioread32(&mac->alignment_errors);
+       buf[0] = csrrd32(priv->mac_dev,
+                        tse_csroffs(frames_transmitted_ok));
+       buf[1] = csrrd32(priv->mac_dev,
+                        tse_csroffs(frames_received_ok));
+       buf[2] = csrrd32(priv->mac_dev,
+                        tse_csroffs(frames_check_sequence_errors));
+       buf[3] = csrrd32(priv->mac_dev,
+                        tse_csroffs(alignment_errors));
 
        /* Extended aOctetsTransmittedOK counter */
-       ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32;
-       ext |= ioread32(&mac->octets_transmitted_ok);
+       ext = (u64) csrrd32(priv->mac_dev,
+                           tse_csroffs(msb_octets_transmitted_ok)) << 32;
+
+       ext |= csrrd32(priv->mac_dev,
+                      tse_csroffs(octets_transmitted_ok));
        buf[4] = ext;
 
        /* Extended aOctetsReceivedOK counter */
-       ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32;
-       ext |= ioread32(&mac->octets_received_ok);
+       ext = (u64) csrrd32(priv->mac_dev,
+                           tse_csroffs(msb_octets_received_ok)) << 32;
+
+       ext |= csrrd32(priv->mac_dev,
+                      tse_csroffs(octets_received_ok));
        buf[5] = ext;
 
-       buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames);
-       buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames);
-       buf[8] = ioread32(&mac->if_in_errors);
-       buf[9] = ioread32(&mac->if_out_errors);
-       buf[10] = ioread32(&mac->if_in_ucast_pkts);
-       buf[11] = ioread32(&mac->if_in_multicast_pkts);
-       buf[12] = ioread32(&mac->if_in_broadcast_pkts);
-       buf[13] = ioread32(&mac->if_out_discards);
-       buf[14] = ioread32(&mac->if_out_ucast_pkts);
-       buf[15] = ioread32(&mac->if_out_multicast_pkts);
-       buf[16] = ioread32(&mac->if_out_broadcast_pkts);
-       buf[17] = ioread32(&mac->ether_stats_drop_events);
+       buf[6] = csrrd32(priv->mac_dev,
+                        tse_csroffs(tx_pause_mac_ctrl_frames));
+       buf[7] = csrrd32(priv->mac_dev,
+                        tse_csroffs(rx_pause_mac_ctrl_frames));
+       buf[8] = csrrd32(priv->mac_dev,
+                        tse_csroffs(if_in_errors));
+       buf[9] = csrrd32(priv->mac_dev,
+                        tse_csroffs(if_out_errors));
+       buf[10] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_in_ucast_pkts));
+       buf[11] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_in_multicast_pkts));
+       buf[12] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_in_broadcast_pkts));
+       buf[13] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_discards));
+       buf[14] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_ucast_pkts));
+       buf[15] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_multicast_pkts));
+       buf[16] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_broadcast_pkts));
+       buf[17] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_drop_events));
 
        /* Extended etherStatsOctets counter */
-       ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32;
-       ext |= ioread32(&mac->ether_stats_octets);
+       ext = (u64) csrrd32(priv->mac_dev,
+                           tse_csroffs(msb_ether_stats_octets)) << 32;
+       ext |= csrrd32(priv->mac_dev,
+                      tse_csroffs(ether_stats_octets));
        buf[18] = ext;
 
-       buf[19] = ioread32(&mac->ether_stats_pkts);
-       buf[20] = ioread32(&mac->ether_stats_undersize_pkts);
-       buf[21] = ioread32(&mac->ether_stats_oversize_pkts);
-       buf[22] = ioread32(&mac->ether_stats_pkts_64_octets);
-       buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets);
-       buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets);
-       buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets);
-       buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets);
-       buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets);
-       buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets);
-       buf[29] = ioread32(&mac->ether_stats_jabbers);
-       buf[30] = ioread32(&mac->ether_stats_fragments);
+       buf[19] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts));
+       buf[20] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_undersize_pkts));
+       buf[21] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_oversize_pkts));
+       buf[22] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_64_octets));
+       buf[23] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_65to127_octets));
+       buf[24] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_128to255_octets));
+       buf[25] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_256to511_octets));
+       buf[26] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_512to1023_octets));
+       buf[27] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_1024to1518_octets));
+       buf[28] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_1519tox_octets));
+       buf[29] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_jabbers));
+       buf[30] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_fragments));
 }
 
 static int tse_sset_count(struct net_device *dev, int sset)
@@ -178,7 +213,6 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 {
        int i;
        struct altera_tse_private *priv = netdev_priv(dev);
-       u32 *tse_mac_regs = (u32 *)priv->mac_dev;
        u32 *buf = regbuf;
 
        /* Set version to a known value, so ethtool knows
@@ -196,7 +230,7 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
        regs->version = 1;
 
        for (i = 0; i < TSE_NUM_REGS; i++)
-               buf[i] = ioread32(&tse_mac_regs[i]);
+               buf[i] = csrrd32(priv->mac_dev, i * 4);
 }
 
 static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
index e44a4ae..7330681 100644 (file)
@@ -100,29 +100,30 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
  */
 static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 {
-       struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
-       unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
-       u32 data;
+       struct net_device *ndev = bus->priv;
+       struct altera_tse_private *priv = netdev_priv(ndev);
 
        /* set MDIO address */
-       iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+       csrwr32((mii_id & 0x1f), priv->mac_dev,
+               tse_csroffs(mdio_phy0_addr));
 
        /* get the data */
-       data = ioread32(&mdio_regs[regnum]) & 0xffff;
-       return data;
+       return csrrd32(priv->mac_dev,
+                      tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
 }
 
 static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
                                 u16 value)
 {
-       struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
-       unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
+       struct net_device *ndev = bus->priv;
+       struct altera_tse_private *priv = netdev_priv(ndev);
 
        /* set MDIO address */
-       iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+       csrwr32((mii_id & 0x1f), priv->mac_dev,
+               tse_csroffs(mdio_phy0_addr));
 
        /* write the data */
-       iowrite32((u32) value, &mdio_regs[regnum]);
+       csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
        return 0;
 }
 
@@ -168,7 +169,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
        for (i = 0; i < PHY_MAX_ADDR; i++)
                mdio->irq[i] = PHY_POLL;
 
-       mdio->priv = priv->mac_dev;
+       mdio->priv = dev;
        mdio->parent = priv->device;
 
        ret = of_mdiobus_register(mdio, mdio_node);
@@ -563,7 +564,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
        unsigned int nopaged_len = skb_headlen(skb);
        enum netdev_tx ret = NETDEV_TX_OK;
        dma_addr_t dma_addr;
-       int txcomplete = 0;
 
        spin_lock_bh(&priv->tx_lock);
 
@@ -599,7 +599,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
        dma_sync_single_for_device(priv->device, buffer->dma_addr,
                                   buffer->len, DMA_TO_DEVICE);
 
-       txcomplete = priv->dmaops->tx_buffer(priv, buffer);
+       priv->dmaops->tx_buffer(priv, buffer);
 
        skb_tx_timestamp(skb);
 
@@ -698,7 +698,6 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
        struct altera_tse_private *priv = netdev_priv(dev);
        struct phy_device *phydev = NULL;
        char phy_id_fmt[MII_BUS_ID_SIZE + 3];
-       int ret;
 
        if (priv->phy_addr != POLL_PHY) {
                snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
@@ -712,6 +711,7 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
                        netdev_err(dev, "Could not attach to PHY\n");
 
        } else {
+               int ret;
                phydev = phy_find_first(priv->mdio);
                if (phydev == NULL) {
                        netdev_err(dev, "No PHY found\n");
@@ -791,7 +791,6 @@ static int init_phy(struct net_device *dev)
 
 static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
 {
-       struct altera_tse_mac *mac = priv->mac_dev;
        u32 msb;
        u32 lsb;
 
@@ -799,8 +798,8 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
        lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
 
        /* Set primary MAC address */
-       iowrite32(msb, &mac->mac_addr_0);
-       iowrite32(lsb, &mac->mac_addr_1);
+       csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
+       csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
 }
 
 /* MAC software reset.
@@ -811,26 +810,26 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
  */
 static int reset_mac(struct altera_tse_private *priv)
 {
-       void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
        int counter;
        u32 dat;
 
-       dat = ioread32(cmd_cfg_reg);
+       dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
        dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
        dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
-       iowrite32(dat, cmd_cfg_reg);
+       csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 
        counter = 0;
        while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-               if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET))
+               if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
+                                    MAC_CMDCFG_SW_RESET))
                        break;
                udelay(1);
        }
 
        if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-               dat = ioread32(cmd_cfg_reg);
+               dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
                dat &= ~MAC_CMDCFG_SW_RESET;
-               iowrite32(dat, cmd_cfg_reg);
+               csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
                return -1;
        }
        return 0;
@@ -840,41 +839,57 @@ static int reset_mac(struct altera_tse_private *priv)
 */
 static int init_mac(struct altera_tse_private *priv)
 {
-       struct altera_tse_mac *mac = priv->mac_dev;
        unsigned int cmd = 0;
        u32 frm_length;
 
        /* Setup Rx FIFO */
-       iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
-                 &mac->rx_section_empty);
-       iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full);
-       iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty);
-       iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full);
+       csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
+               priv->mac_dev, tse_csroffs(rx_section_empty));
+
+       csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
+               tse_csroffs(rx_section_full));
+
+       csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
+               tse_csroffs(rx_almost_empty));
+
+       csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
+               tse_csroffs(rx_almost_full));
 
        /* Setup Tx FIFO */
-       iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
-                 &mac->tx_section_empty);
-       iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full);
-       iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty);
-       iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full);
+       csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
+               priv->mac_dev, tse_csroffs(tx_section_empty));
+
+       csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
+               tse_csroffs(tx_section_full));
+
+       csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
+               tse_csroffs(tx_almost_empty));
+
+       csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
+               tse_csroffs(tx_almost_full));
 
        /* MAC Address Configuration */
        tse_update_mac_addr(priv, priv->dev->dev_addr);
 
        /* MAC Function Configuration */
        frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
-       iowrite32(frm_length, &mac->frm_length);
-       iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length);
+       csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
+
+       csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
+               tse_csroffs(tx_ipg_length));
 
        /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
         * start address
         */
-       tse_set_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
-       tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
-                                        ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
+       tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
+                   ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
+
+       tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
+                     ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
+                     ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
 
        /* Set the MAC options */
-       cmd = ioread32(&mac->command_config);
+       cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
        cmd &= ~MAC_CMDCFG_PAD_EN;      /* No padding Removal on Receive */
        cmd &= ~MAC_CMDCFG_CRC_FWD;     /* CRC Removal */
        cmd |= MAC_CMDCFG_RX_ERR_DISC;  /* Automatically discard frames
@@ -889,9 +904,10 @@ static int init_mac(struct altera_tse_private *priv)
        cmd &= ~MAC_CMDCFG_ETH_SPEED;
        cmd &= ~MAC_CMDCFG_ENA_10;
 
-       iowrite32(cmd, &mac->command_config);
+       csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
 
-       iowrite32(ALTERA_TSE_PAUSE_QUANTA, &mac->pause_quanta);
+       csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
+               tse_csroffs(pause_quanta));
 
        if (netif_msg_hw(priv))
                dev_dbg(priv->device,
@@ -904,15 +920,14 @@ static int init_mac(struct altera_tse_private *priv)
  */
 static void tse_set_mac(struct altera_tse_private *priv, bool enable)
 {
-       struct altera_tse_mac *mac = priv->mac_dev;
-       u32 value = ioread32(&mac->command_config);
+       u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 
        if (enable)
                value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
        else
                value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 
-       iowrite32(value, &mac->command_config);
+       csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
 }
 
 /* Change the MTU
@@ -942,13 +957,12 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
 static void altera_tse_set_mcfilter(struct net_device *dev)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
        int i;
        struct netdev_hw_addr *ha;
 
        /* clear the hash filter */
        for (i = 0; i < 64; i++)
-               iowrite32(0, &(mac->hash_table[i]));
+               csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
 
        netdev_for_each_mc_addr(ha, dev) {
                unsigned int hash = 0;
@@ -964,7 +978,7 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
 
                        hash = (hash << 1) | xor_bit;
                }
-               iowrite32(1, &(mac->hash_table[hash]));
+               csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
        }
 }
 
@@ -972,12 +986,11 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
 static void altera_tse_set_mcfilterall(struct net_device *dev)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
        int i;
 
        /* set the hash filter */
        for (i = 0; i < 64; i++)
-               iowrite32(1, &(mac->hash_table[i]));
+               csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
 }
 
 /* Set or clear the multicast filter for this adaptor
@@ -985,12 +998,12 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
 static void tse_set_rx_mode_hashfilter(struct net_device *dev)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
 
        spin_lock(&priv->mac_cfg_lock);
 
        if (dev->flags & IFF_PROMISC)
-               tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+               tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
+                           MAC_CMDCFG_PROMIS_EN);
 
        if (dev->flags & IFF_ALLMULTI)
                altera_tse_set_mcfilterall(dev);
@@ -1005,15 +1018,16 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
 static void tse_set_rx_mode(struct net_device *dev)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
 
        spin_lock(&priv->mac_cfg_lock);
 
        if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
            !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
-               tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+               tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
+                           MAC_CMDCFG_PROMIS_EN);
        else
-               tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+               tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
+                             MAC_CMDCFG_PROMIS_EN);
 
        spin_unlock(&priv->mac_cfg_lock);
 }
@@ -1362,6 +1376,11 @@ static int altera_tse_probe(struct platform_device *pdev)
                of_property_read_bool(pdev->dev.of_node,
                                      "altr,has-hash-multicast-filter");
 
+       /* Set hash filter to not set for now until the
+        * multicast filter receive issue is debugged
+        */
+       priv->hash_filter = 0;
+
        /* get supplemental address settings for this instance */
        priv->added_unicast =
                of_property_read_bool(pdev->dev.of_node,
@@ -1493,7 +1512,7 @@ static int altera_tse_remove(struct platform_device *pdev)
        return 0;
 }
 
-struct altera_dmaops altera_dtype_sgdma = {
+static const struct altera_dmaops altera_dtype_sgdma = {
        .altera_dtype = ALTERA_DTYPE_SGDMA,
        .dmamask = 32,
        .reset_dma = sgdma_reset,
@@ -1512,7 +1531,7 @@ struct altera_dmaops altera_dtype_sgdma = {
        .start_rxdma = sgdma_start_rxdma,
 };
 
-struct altera_dmaops altera_dtype_msgdma = {
+static const struct altera_dmaops altera_dtype_msgdma = {
        .altera_dtype = ALTERA_DTYPE_MSGDMA,
        .dmamask = 64,
        .reset_dma = msgdma_reset,
index 70fa13f..d7eeb17 100644 (file)
 #include "altera_tse.h"
 #include "altera_utils.h"
 
-void tse_set_bit(void __iomem *ioaddr, u32 bit_mask)
+void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-       u32 value = ioread32(ioaddr);
+       u32 value = csrrd32(ioaddr, offs);
        value |= bit_mask;
-       iowrite32(value, ioaddr);
+       csrwr32(value, ioaddr, offs);
 }
 
-void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask)
+void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-       u32 value = ioread32(ioaddr);
+       u32 value = csrrd32(ioaddr, offs);
        value &= ~bit_mask;
-       iowrite32(value, ioaddr);
+       csrwr32(value, ioaddr, offs);
 }
 
-int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask)
+int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-       u32 value = ioread32(ioaddr);
+       u32 value = csrrd32(ioaddr, offs);
        return (value & bit_mask) ? 1 : 0;
 }
 
-int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask)
+int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-       u32 value = ioread32(ioaddr);
+       u32 value = csrrd32(ioaddr, offs);
        return (value & bit_mask) ? 0 : 1;
 }
index ce1db36..baf100c 100644 (file)
@@ -19,9 +19,9 @@
 #ifndef __ALTERA_UTILS_H__
 #define __ALTERA_UTILS_H__
 
-void tse_set_bit(void __iomem *ioaddr, u32 bit_mask);
-void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask);
-int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask);
-int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask);
+void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask);
 
 #endif /* __ALTERA_UTILS_H__*/
index b260913..3b0d431 100644 (file)
@@ -10051,8 +10051,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
 #define BCM_5710_UNDI_FW_MF_MAJOR      (0x07)
 #define BCM_5710_UNDI_FW_MF_MINOR      (0x08)
 #define BCM_5710_UNDI_FW_MF_VERS       (0x05)
-#define BNX2X_PREV_UNDI_MF_PORT(p)     (0x1a150c + ((p) << 4))
-#define BNX2X_PREV_UNDI_MF_FUNC(f)     (0x1a184c + ((f) << 4))
+#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
+#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
 static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
 {
        u8 major, minor, version;
@@ -10352,6 +10352,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
        /* Reset should be performed after BRB is emptied */
        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
                u32 timer_count = 1000;
+               bool need_write = true;
 
                /* Close the MAC Rx to prevent BRB from filling up */
                bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10398,7 +10399,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
                         * cleaning methods - might be redundant but harmless.
                         */
                        if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
-                               bnx2x_prev_unload_undi_mf(bp);
+                               if (need_write) {
+                                       bnx2x_prev_unload_undi_mf(bp);
+                                       need_write = false;
+                               }
                        } else if (prev_undi) {
                                /* If UNDI resides in memory,
                                 * manually increment it
index 81cc2d9..b8078d5 100644 (file)
@@ -2695,7 +2695,7 @@ out:
                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
        }
 
-       return 0;
+       return rc;
 }
 
 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
index 0c067e8..784c715 100644 (file)
@@ -747,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
 out:
        bnx2x_vfpf_finalize(bp, &req->first_tlv);
 
-       return 0;
+       return rc;
 }
 
 /* request pf to config rss table for vf queues*/
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
new file mode 100644 (file)
index 0000000..4884205
--- /dev/null
@@ -0,0 +1,706 @@
+ /*
+ * drivers/net/ethernet/beckhoff/ec_bhf.c
+ *
+ * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* This is a driver for EtherCAT master module present on CCAT FPGA.
+ * Those can be found on Bechhoff CX50xx industrial PCs.
+ */
+
+#if 0
+#define DEBUG
+#endif
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/stat.h>
+
+#define TIMER_INTERVAL_NSEC    20000
+
+#define INFO_BLOCK_SIZE                0x10
+#define INFO_BLOCK_TYPE                0x0
+#define INFO_BLOCK_REV         0x2
+#define INFO_BLOCK_BLK_CNT     0x4
+#define INFO_BLOCK_TX_CHAN     0x4
+#define INFO_BLOCK_RX_CHAN     0x5
+#define INFO_BLOCK_OFFSET      0x8
+
+#define EC_MII_OFFSET          0x4
+#define EC_FIFO_OFFSET         0x8
+#define EC_MAC_OFFSET          0xc
+
+#define MAC_FRAME_ERR_CNT      0x0
+#define MAC_RX_ERR_CNT         0x1
+#define MAC_CRC_ERR_CNT                0x2
+#define MAC_LNK_LST_ERR_CNT    0x3
+#define MAC_TX_FRAME_CNT       0x10
+#define MAC_RX_FRAME_CNT       0x14
+#define MAC_TX_FIFO_LVL                0x20
+#define MAC_DROPPED_FRMS       0x28
+#define MAC_CONNECTED_CCAT_FLAG        0x78
+
+#define MII_MAC_ADDR           0x8
+#define MII_MAC_FILT_FLAG      0xe
+#define MII_LINK_STATUS                0xf
+
+#define FIFO_TX_REG            0x0
+#define FIFO_TX_RESET          0x8
+#define FIFO_RX_REG            0x10
+#define FIFO_RX_ADDR_VALID     (1u << 31)
+#define FIFO_RX_RESET          0x18
+
+#define DMA_CHAN_OFFSET                0x1000
+#define DMA_CHAN_SIZE          0x8
+
+#define DMA_WINDOW_SIZE_MASK   0xfffffffc
+
+static struct pci_device_id ids[] = {
+       { PCI_DEVICE(0x15ec, 0x5000), },
+       { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ids);
+
+struct rx_header {
+#define RXHDR_NEXT_ADDR_MASK   0xffffffu
+#define RXHDR_NEXT_VALID       (1u << 31)
+       __le32 next;
+#define RXHDR_NEXT_RECV_FLAG   0x1
+       __le32 recv;
+#define RXHDR_LEN_MASK         0xfffu
+       __le16 len;
+       __le16 port;
+       __le32 reserved;
+       u8 timestamp[8];
+} __packed;
+
+#define PKT_PAYLOAD_SIZE       0x7e8
+struct rx_desc {
+       struct rx_header header;
+       u8 data[PKT_PAYLOAD_SIZE];
+} __packed;
+
+struct tx_header {
+       __le16 len;
+#define TX_HDR_PORT_0          0x1
+#define TX_HDR_PORT_1          0x2
+       u8 port;
+       u8 ts_enable;
+#define TX_HDR_SENT            0x1
+       __le32 sent;
+       u8 timestamp[8];
+} __packed;
+
+struct tx_desc {
+       struct tx_header header;
+       u8 data[PKT_PAYLOAD_SIZE];
+} __packed;
+
+#define FIFO_SIZE              64
+
+static long polling_frequency = TIMER_INTERVAL_NSEC;
+
+struct bhf_dma {
+       u8 *buf;
+       size_t len;
+       dma_addr_t buf_phys;
+
+       u8 *alloc;
+       size_t alloc_len;
+       dma_addr_t alloc_phys;
+};
+
+struct ec_bhf_priv {
+       struct net_device *net_dev;
+
+       struct pci_dev *dev;
+
+       void * __iomem io;
+       void * __iomem dma_io;
+
+       struct hrtimer hrtimer;
+
+       int tx_dma_chan;
+       int rx_dma_chan;
+       void * __iomem ec_io;
+       void * __iomem fifo_io;
+       void * __iomem mii_io;
+       void * __iomem mac_io;
+
+       struct bhf_dma rx_buf;
+       struct rx_desc *rx_descs;
+       int rx_dnext;
+       int rx_dcount;
+
+       struct bhf_dma tx_buf;
+       struct tx_desc *tx_descs;
+       int tx_dcount;
+       int tx_dnext;
+
+       u64 stat_rx_bytes;
+       u64 stat_tx_bytes;
+};
+
+#define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
+
+#define ETHERCAT_MASTER_ID     0x14
+
+static void ec_bhf_print_status(struct ec_bhf_priv *priv)
+{
+       struct device *dev = PRIV_TO_DEV(priv);
+
+       dev_dbg(dev, "Frame error counter: %d\n",
+               ioread8(priv->mac_io + MAC_FRAME_ERR_CNT));
+       dev_dbg(dev, "RX error counter: %d\n",
+               ioread8(priv->mac_io + MAC_RX_ERR_CNT));
+       dev_dbg(dev, "CRC error counter: %d\n",
+               ioread8(priv->mac_io + MAC_CRC_ERR_CNT));
+       dev_dbg(dev, "TX frame counter: %d\n",
+               ioread32(priv->mac_io + MAC_TX_FRAME_CNT));
+       dev_dbg(dev, "RX frame counter: %d\n",
+               ioread32(priv->mac_io + MAC_RX_FRAME_CNT));
+       dev_dbg(dev, "TX fifo level: %d\n",
+               ioread8(priv->mac_io + MAC_TX_FIFO_LVL));
+       dev_dbg(dev, "Dropped frames: %d\n",
+               ioread8(priv->mac_io + MAC_DROPPED_FRMS));
+       dev_dbg(dev, "Connected with CCAT slot: %d\n",
+               ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG));
+       dev_dbg(dev, "Link status: %d\n",
+               ioread8(priv->mii_io + MII_LINK_STATUS));
+}
+
+static void ec_bhf_reset(struct ec_bhf_priv *priv)
+{
+       iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
+       iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
+       iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
+       iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
+       iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
+       iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
+       iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
+
+       iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
+       iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
+
+       iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
+}
+
+static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
+{
+       u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
+       u32 addr = (u8 *)desc - priv->tx_buf.buf;
+
+       iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
+
+       dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n");
+}
+
+static int ec_bhf_desc_sent(struct tx_desc *desc)
+{
+       return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
+}
+
+static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
+{
+       if (unlikely(netif_queue_stopped(priv->net_dev))) {
+               /* Make sure that we perceive changes to tx_dnext. */
+               smp_rmb();
+
+               if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
+                       netif_wake_queue(priv->net_dev);
+       }
+}
+
+static int ec_bhf_pkt_received(struct rx_desc *desc)
+{
+       return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
+}
+
+static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
+{
+       iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
+                 priv->fifo_io + FIFO_RX_REG);
+}
+
+static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
+{
+       struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
+       struct device *dev = PRIV_TO_DEV(priv);
+
+       while (ec_bhf_pkt_received(desc)) {
+               int pkt_size = (le16_to_cpu(desc->header.len) &
+                              RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
+               u8 *data = desc->data;
+               struct sk_buff *skb;
+
+               skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
+               dev_dbg(dev, "Received packet, size: %d\n", pkt_size);
+
+               if (skb) {
+                       memcpy(skb_put(skb, pkt_size), data, pkt_size);
+                       skb->protocol = eth_type_trans(skb, priv->net_dev);
+                       dev_dbg(dev, "Protocol type: %x\n", skb->protocol);
+
+                       priv->stat_rx_bytes += pkt_size;
+
+                       netif_rx(skb);
+               } else {
+                       dev_err_ratelimited(dev,
+                               "Couldn't allocate a skb_buff for a packet of size %u\n",
+                               pkt_size);
+               }
+
+               desc->header.recv = 0;
+
+               ec_bhf_add_rx_desc(priv, desc);
+
+               priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
+               desc = &priv->rx_descs[priv->rx_dnext];
+       }
+
+}
+
+static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
+{
+       struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
+                                               hrtimer);
+       ec_bhf_process_rx(priv);
+       ec_bhf_process_tx(priv);
+
+       if (!netif_running(priv->net_dev))
+               return HRTIMER_NORESTART;
+
+       hrtimer_forward_now(timer, ktime_set(0, polling_frequency));
+       return HRTIMER_RESTART;
+}
+
+static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
+{
+       struct device *dev = PRIV_TO_DEV(priv);
+       unsigned block_count, i;
+       void * __iomem ec_info;
+
+       dev_dbg(dev, "Info block:\n");
+       dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
+       dev_dbg(dev, "Revision of function: %x\n",
+               (unsigned)ioread16(priv->io + INFO_BLOCK_REV));
+
+       block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
+       dev_dbg(dev, "Number of function blocks: %x\n", block_count);
+
+       for (i = 0; i < block_count; i++) {
+               u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
+                                   INFO_BLOCK_TYPE);
+               if (type == ETHERCAT_MASTER_ID)
+                       break;
+       }
+       if (i == block_count) {
+               dev_err(dev, "EtherCAT master with DMA block not found\n");
+               return -ENODEV;
+       }
+       dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i);
+
+       ec_info = priv->io + i * INFO_BLOCK_SIZE;
+       dev_dbg(dev, "EtherCAT master revision: %d\n",
+               ioread16(ec_info + INFO_BLOCK_REV));
+
+       priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
+       dev_dbg(dev, "EtherCAT master tx dma channel: %d\n",
+               priv->tx_dma_chan);
+
+       priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
+       dev_dbg(dev, "EtherCAT master rx dma channel: %d\n",
+                priv->rx_dma_chan);
+
+       priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
+       priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
+       priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
+       priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
+
+       dev_dbg(dev,
+               "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n",
+               priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io);
+
+       return 0;
+}
+
+static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
+                                    struct net_device *net_dev)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+       struct tx_desc *desc;
+       unsigned len;
+
+       dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n");
+
+       desc = &priv->tx_descs[priv->tx_dnext];
+
+       skb_copy_and_csum_dev(skb, desc->data);
+       len = skb->len;
+
+       memset(&desc->header, 0, sizeof(desc->header));
+       desc->header.len = cpu_to_le16(len);
+       desc->header.port = TX_HDR_PORT_0;
+
+       ec_bhf_send_packet(priv, desc);
+
+       priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
+
+       if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
+               /* Make sure that update updates to tx_dnext are perceived
+                * by timer routine.
+                */
+               smp_wmb();
+
+               netif_stop_queue(net_dev);
+
+               dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n");
+               ec_bhf_print_status(priv);
+       }
+
+       priv->stat_tx_bytes += len;
+
+       dev_kfree_skb(skb);
+
+       return NETDEV_TX_OK;
+}
+
+static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
+                               struct bhf_dma *buf,
+                               int channel,
+                               int size)
+{
+       int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
+       struct device *dev = PRIV_TO_DEV(priv);
+       u32 mask;
+
+       iowrite32(0xffffffff, priv->dma_io + offset);
+
+       mask = ioread32(priv->dma_io + offset);
+       mask &= DMA_WINDOW_SIZE_MASK;
+       dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel);
+
+       /* We want to allocate a chunk of memory that is:
+        * - aligned to the mask we just read
+        * - is of size 2^mask bytes (at most)
+        * In order to ensure that we will allocate buffer of
+        * 2 * 2^mask bytes.
+        */
+       buf->len = min_t(int, ~mask + 1, size);
+       buf->alloc_len = 2 * buf->len;
+
+       dev_dbg(dev, "Allocating %d bytes for channel %d",
+               (int)buf->alloc_len, channel);
+       buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
+                                       GFP_KERNEL);
+       if (buf->alloc == NULL) {
+               dev_info(dev, "Failed to allocate buffer\n");
+               return -ENOMEM;
+       }
+
+       buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
+       buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
+
+       iowrite32(0, priv->dma_io + offset + 4);
+       iowrite32(buf->buf_phys, priv->dma_io + offset);
+       dev_dbg(dev, "Buffer: %x and read from dev: %x",
+               (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset));
+
+       return 0;
+}
+
+static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
+{
+       int i = 0;
+
+       priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
+       priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf;
+       priv->tx_dnext = 0;
+
+       for (i = 0; i < priv->tx_dcount; i++)
+               priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
+}
+
+static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
+{
+       int i;
+
+       priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
+       priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf;
+       priv->rx_dnext = 0;
+
+       for (i = 0; i < priv->rx_dcount; i++) {
+               struct rx_desc *desc = &priv->rx_descs[i];
+               u32 next;
+
+               if (i != priv->rx_dcount - 1)
+                       next = (u8 *)(desc + 1) - priv->rx_buf.buf;
+               else
+                       next = 0;
+               next |= RXHDR_NEXT_VALID;
+               desc->header.next = cpu_to_le32(next);
+               desc->header.recv = 0;
+               ec_bhf_add_rx_desc(priv, desc);
+       }
+}
+
+static int ec_bhf_open(struct net_device *net_dev)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+       struct device *dev = PRIV_TO_DEV(priv);
+       int err = 0;
+
+       dev_info(dev, "Opening device\n");
+
+       ec_bhf_reset(priv);
+
+       err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
+                                  FIFO_SIZE * sizeof(struct rx_desc));
+       if (err) {
+               dev_err(dev, "Failed to allocate rx buffer\n");
+               goto out;
+       }
+       ec_bhf_setup_rx_descs(priv);
+
+       dev_info(dev, "RX buffer allocated, address: %x\n",
+                (unsigned)priv->rx_buf.buf_phys);
+
+       err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
+                                  FIFO_SIZE * sizeof(struct tx_desc));
+       if (err) {
+               dev_err(dev, "Failed to allocate tx buffer\n");
+               goto error_rx_free;
+       }
+       dev_dbg(dev, "TX buffer allocated, addres: %x\n",
+               (unsigned)priv->tx_buf.buf_phys);
+
+       iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
+
+       ec_bhf_setup_tx_descs(priv);
+
+       netif_start_queue(net_dev);
+
+       hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       priv->hrtimer.function = ec_bhf_timer_fun;
+       hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
+                     HRTIMER_MODE_REL);
+
+       dev_info(PRIV_TO_DEV(priv), "Device open\n");
+
+       ec_bhf_print_status(priv);
+
+       return 0;
+
+error_rx_free:
+       dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
+                         priv->rx_buf.alloc_len);
+out:
+       return err;
+}
+
+static int ec_bhf_stop(struct net_device *net_dev)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+       struct device *dev = PRIV_TO_DEV(priv);
+
+       hrtimer_cancel(&priv->hrtimer);
+
+       ec_bhf_reset(priv);
+
+       netif_tx_disable(net_dev);
+
+       dma_free_coherent(dev, priv->tx_buf.alloc_len,
+                         priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
+       dma_free_coherent(dev, priv->rx_buf.alloc_len,
+                         priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
+
+       return 0;
+}
+
+static struct rtnl_link_stats64 *
+ec_bhf_get_stats(struct net_device *net_dev,
+                struct rtnl_link_stats64 *stats)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+
+       stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
+                               ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
+                               ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
+       stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
+       stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
+       stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
+
+       stats->tx_bytes = priv->stat_tx_bytes;
+       stats->rx_bytes = priv->stat_rx_bytes;
+
+       return stats;
+}
+
+static const struct net_device_ops ec_bhf_netdev_ops = {
+       .ndo_start_xmit         = ec_bhf_start_xmit,
+       .ndo_open               = ec_bhf_open,
+       .ndo_stop               = ec_bhf_stop,
+       .ndo_get_stats64        = ec_bhf_get_stats,
+       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = eth_mac_addr
+};
+
+static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+       struct net_device *net_dev;
+       struct ec_bhf_priv *priv;
+       void * __iomem dma_io;
+       void * __iomem io;
+       int err = 0;
+
+       err = pci_enable_device(dev);
+       if (err)
+               return err;
+
+       pci_set_master(dev);
+
+       err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
+       if (err) {
+               dev_err(&dev->dev,
+                       "Required dma mask not supported, failed to initialize device\n");
+               err = -EIO;
+               goto err_disable_dev;
+       }
+
+       err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
+       if (err) {
+               dev_err(&dev->dev,
+                       "Required dma mask not supported, failed to initialize device\n");
+               goto err_disable_dev;
+       }
+
+       err = pci_request_regions(dev, "ec_bhf");
+       if (err) {
+               dev_err(&dev->dev, "Failed to request pci memory regions\n");
+               goto err_disable_dev;
+       }
+
+       io = pci_iomap(dev, 0, 0);
+       if (!io) {
+               dev_err(&dev->dev, "Failed to map pci card memory bar 0");
+               err = -EIO;
+               goto err_release_regions;
+       }
+
+       dma_io = pci_iomap(dev, 2, 0);
+       if (!dma_io) {
+               dev_err(&dev->dev, "Failed to map pci card memory bar 2");
+               err = -EIO;
+               goto err_unmap;
+       }
+
+       net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
+       if (net_dev == 0) {
+               err = -ENOMEM;
+               goto err_unmap_dma_io;
+       }
+
+       pci_set_drvdata(dev, net_dev);
+       SET_NETDEV_DEV(net_dev, &dev->dev);
+
+       net_dev->features = 0;
+       net_dev->flags |= IFF_NOARP;
+
+       net_dev->netdev_ops = &ec_bhf_netdev_ops;
+
+       priv = netdev_priv(net_dev);
+       priv->net_dev = net_dev;
+       priv->io = io;
+       priv->dma_io = dma_io;
+       priv->dev = dev;
+
+       err = ec_bhf_setup_offsets(priv);
+       if (err < 0)
+               goto err_free_net_dev;
+
+       memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
+
+       dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n",
+               net_dev->dev_addr);
+
+       err = register_netdev(net_dev);
+       if (err < 0)
+               goto err_free_net_dev;
+
+       return 0;
+
+err_free_net_dev:
+       free_netdev(net_dev);
+err_unmap_dma_io:
+       pci_iounmap(dev, dma_io);
+err_unmap:
+       pci_iounmap(dev, io);
+err_release_regions:
+       pci_release_regions(dev);
+err_disable_dev:
+       pci_clear_master(dev);
+       pci_disable_device(dev);
+
+       return err;
+}
+
+static void ec_bhf_remove(struct pci_dev *dev)
+{
+       struct net_device *net_dev = pci_get_drvdata(dev);
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+
+       unregister_netdev(net_dev);
+       free_netdev(net_dev);
+
+       pci_iounmap(dev, priv->dma_io);
+       pci_iounmap(dev, priv->io);
+       pci_release_regions(dev);
+       pci_clear_master(dev);
+       pci_disable_device(dev);
+}
+
+static struct pci_driver pci_driver = {
+       .name           = "ec_bhf",
+       .id_table       = ids,
+       .probe          = ec_bhf_probe,
+       .remove         = ec_bhf_remove,
+};
+
+static int __init ec_bhf_init(void)
+{
+       return pci_register_driver(&pci_driver);
+}
+
+static void __exit ec_bhf_exit(void)
+{
+       pci_unregister_driver(&pci_driver);
+}
+
+module_init(ec_bhf_init);
+module_exit(ec_bhf_exit);
+
+module_param(polling_frequency, long, S_IRUGO);
+MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
index a186454..dc19bc5 100644 (file)
@@ -4949,6 +4949,12 @@ static void be_eeh_resume(struct pci_dev *pdev)
        if (status)
                goto err;
 
+       /* On some BE3 FW versions, after a HW reset,
+        * interrupts will remain disabled for each function.
+        * So, explicitly enable interrupts
+        */
+       be_intr_set(adapter, true);
+
        /* tell fw we're ready to fire cmds */
        status = be_cmd_fw_init(adapter);
        if (status)
index b0c6050..b78378c 100644 (file)
@@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme,
        return idx;
 }
 
-static void
+static int
 jme_fill_tx_map(struct pci_dev *pdev,
                struct txdesc *txdesc,
                struct jme_buffer_info *txbi,
@@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev,
                                len,
                                PCI_DMA_TODEVICE);
 
+       if (unlikely(pci_dma_mapping_error(pdev, dmaaddr)))
+               return -EINVAL;
+
        pci_dma_sync_single_for_device(pdev,
                                       dmaaddr,
                                       len,
@@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev,
 
        txbi->mapping = dmaaddr;
        txbi->len = len;
+       return 0;
 }
 
-static void
+static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
+{
+       struct jme_ring *txring = &(jme->txring[0]);
+       struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
+       int mask = jme->tx_ring_mask;
+       int j;
+
+       for (j = 0 ; j < count ; j++) {
+               ctxbi = txbi + ((startidx + j + 2) & (mask));
+               pci_unmap_page(jme->pdev,
+                               ctxbi->mapping,
+                               ctxbi->len,
+                               PCI_DMA_TODEVICE);
+
+                               ctxbi->mapping = 0;
+                               ctxbi->len = 0;
+       }
+
+}
+
+static int
 jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
 {
        struct jme_ring *txring = &(jme->txring[0]);
@@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        int mask = jme->tx_ring_mask;
        const struct skb_frag_struct *frag;
        u32 len;
+       int ret = 0;
 
        for (i = 0 ; i < nr_frags ; ++i) {
                frag = &skb_shinfo(skb)->frags[i];
                ctxdesc = txdesc + ((idx + i + 2) & (mask));
                ctxbi = txbi + ((idx + i + 2) & (mask));
 
-               jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
+               ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
                                skb_frag_page(frag),
                                frag->page_offset, skb_frag_size(frag), hidma);
+               if (ret) {
+                       jme_drop_tx_map(jme, idx, i);
+                       goto out;
+               }
+
        }
 
        len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
        ctxdesc = txdesc + ((idx + 1) & (mask));
        ctxbi = txbi + ((idx + 1) & (mask));
-       jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
+       ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
                        offset_in_page(skb->data), len, hidma);
+       if (ret)
+               jme_drop_tx_map(jme, idx, i);
+
+out:
+       return ret;
 
 }
 
+
 static int
 jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
 {
@@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        struct txdesc *txdesc;
        struct jme_buffer_info *txbi;
        u8 flags;
+       int ret = 0;
 
        txdesc = (struct txdesc *)txring->desc + idx;
        txbi = txring->bufinf + idx;
@@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
                jme_tx_csum(jme, skb, &flags);
        jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
-       jme_map_tx_skb(jme, skb, idx);
+       ret = jme_map_tx_skb(jme, skb, idx);
+       if (ret)
+               return ret;
+
        txdesc->desc1.flags = flags;
        /*
         * Set tx buffer info after telling NIC to send
@@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
                return NETDEV_TX_BUSY;
        }
 
-       jme_fill_tx_desc(jme, skb, idx);
+       if (jme_fill_tx_desc(jme, skb, idx))
+               return NETDEV_TX_OK;
 
        jwrite32(jme, JME_TXCS, jme->reg_txcs |
                                TXCS_SELECT_QUEUE0 |
index 78099ea..92d3249 100644 (file)
@@ -1253,12 +1253,12 @@ static struct mlx4_cmd_info cmd_info[] = {
        },
        {
                .opcode = MLX4_CMD_UPDATE_QP,
-               .has_inbox = false,
+               .has_inbox = true,
                .has_outbox = false,
                .out_is_imm = false,
                .encode_slave_id = false,
                .verify = NULL,
-               .wrapper = mlx4_CMD_EPERM_wrapper
+               .wrapper = mlx4_UPDATE_QP_wrapper
        },
        {
                .opcode = MLX4_CMD_GET_OP_REQ,
index f9c4651..212cea4 100644 (file)
@@ -1195,6 +1195,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                           struct mlx4_cmd_mailbox *outbox,
                           struct mlx4_cmd_info *cmd);
 
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+
 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
                         struct mlx4_vhcr *vhcr,
                         struct mlx4_cmd_mailbox *inbox,
index 61d64eb..fbd32af 100644 (file)
@@ -389,6 +389,41 @@ err_icm:
 
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 
+#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
+int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                  enum mlx4_update_qp_attr attr,
+                  struct mlx4_update_qp_params *params)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_update_qp_context *cmd;
+       u64 pri_addr_path_mask = 0;
+       int err = 0;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       cmd = (struct mlx4_update_qp_context *)mailbox->buf;
+
+       if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
+               return -EINVAL;
+
+       if (attr & MLX4_UPDATE_QP_SMAC) {
+               pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
+               cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
+       }
+
+       cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
+
+       err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
+                      MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+                      MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_update_qp);
+
 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
 {
        struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
index 1c3fdd4..8f1254a 100644 (file)
@@ -3895,6 +3895,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
 
 }
 
+#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd_info)
+{
+       int err;
+       u32 qpn = vhcr->in_modifier & 0xffffff;
+       struct res_qp *rqp;
+       u64 mac;
+       unsigned port;
+       u64 pri_addr_path_mask;
+       struct mlx4_update_qp_context *cmd;
+       int smac_index;
+
+       cmd = (struct mlx4_update_qp_context *)inbox->buf;
+
+       pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
+       if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
+           (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
+               return -EPERM;
+
+       /* Just change the smac for the QP */
+       err = get_res(dev, slave, qpn, RES_QP, &rqp);
+       if (err) {
+               mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
+               return err;
+       }
+
+       port = (rqp->sched_queue >> 6 & 1) + 1;
+       smac_index = cmd->qp_context.pri_path.grh_mylmc;
+       err = mac_find_smac_ix_in_slave(dev, slave, port,
+                                       smac_index, &mac);
+       if (err) {
+               mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
+                        qpn, smac_index);
+               goto err_mac;
+       }
+
+       err = mlx4_cmd(dev, inbox->dma,
+                      vhcr->in_modifier, 0,
+                      MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+                      MLX4_CMD_NATIVE);
+       if (err) {
+               mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
+               goto err_mac;
+       }
+
+err_mac:
+       put_res(dev, slave, qpn, RES_QP);
+       return err;
+}
+
 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                                         struct mlx4_vhcr *vhcr,
                                         struct mlx4_cmd_mailbox *inbox,
index 7b52a88..f785d01 100644 (file)
@@ -1719,22 +1719,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
                                tx_ring->producer;
 }
 
-static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
-                                            struct net_device *netdev)
-{
-       int err;
-
-       netdev->num_tx_queues = adapter->drv_tx_rings;
-       netdev->real_num_tx_queues = adapter->drv_tx_rings;
-
-       err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
-       if (err)
-               netdev_err(netdev, "failed to set %d Tx queues\n",
-                          adapter->drv_tx_rings);
-
-       return err;
-}
-
 struct qlcnic_nic_template {
        int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
        int (*config_led) (struct qlcnic_adapter *, u32, u32);
index 0bc9148..7e55e88 100644 (file)
@@ -2206,6 +2206,31 @@ static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
        ahw->max_uc_count = count;
 }
 
+static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
+                                     u8 tx_queues, u8 rx_queues)
+{
+       struct net_device *netdev = adapter->netdev;
+       int err = 0;
+
+       if (tx_queues) {
+               err = netif_set_real_num_tx_queues(netdev, tx_queues);
+               if (err) {
+                       netdev_err(netdev, "failed to set %d Tx queues\n",
+                                  tx_queues);
+                       return err;
+               }
+       }
+
+       if (rx_queues) {
+               err = netif_set_real_num_rx_queues(netdev, rx_queues);
+               if (err)
+                       netdev_err(netdev, "failed to set %d Rx queues\n",
+                                  rx_queues);
+       }
+
+       return err;
+}
+
 int
 qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
                    int pci_using_dac)
@@ -2269,7 +2294,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
        netdev->priv_flags |= IFF_UNICAST_FLT;
        netdev->irq = adapter->msix_entries[0].vector;
 
-       err = qlcnic_set_real_num_queues(adapter, netdev);
+       err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
+                                        adapter->drv_sds_rings);
        if (err)
                return err;
 
@@ -2943,9 +2969,13 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
                            tx_ring->tx_stats.xmit_called,
                            tx_ring->tx_stats.xmit_on,
                            tx_ring->tx_stats.xmit_off);
+
+               if (tx_ring->crb_intr_mask)
+                       netdev_info(netdev, "crb_intr_mask=%d\n",
+                                   readl(tx_ring->crb_intr_mask));
+
                netdev_info(netdev,
-                           "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
-                           readl(tx_ring->crb_intr_mask),
+                           "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
                            readl(tx_ring->crb_cmd_producer),
                            tx_ring->producer, tx_ring->sw_consumer,
                            le32_to_cpu(*(tx_ring->hw_consumer)));
@@ -3978,12 +4008,21 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
 int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
+       u8 tx_rings, rx_rings;
        int err;
 
        if (test_bit(__QLCNIC_RESETTING, &adapter->state))
                return -EBUSY;
 
+       tx_rings = adapter->drv_tss_rings;
+       rx_rings = adapter->drv_rss_rings;
+
        netif_device_detach(netdev);
+
+       err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings);
+       if (err)
+               goto done;
+
        if (netif_running(netdev))
                __qlcnic_down(adapter, netdev);
 
@@ -4003,7 +4042,17 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
                return err;
        }
 
-       netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
+       /* Check if we need to update real_num_{tx|rx}_queues because
+        * qlcnic_setup_intr() may change Tx/Rx rings size
+        */
+       if ((tx_rings != adapter->drv_tx_rings) ||
+           (rx_rings != adapter->drv_sds_rings)) {
+               err = qlcnic_set_real_num_queues(adapter,
+                                                adapter->drv_tx_rings,
+                                                adapter->drv_sds_rings);
+               if (err)
+                       goto done;
+       }
 
        if (qlcnic_83xx_check(adapter)) {
                qlcnic_83xx_initialize_nic(adapter, 1);
index 32d969e..89b83e5 100644 (file)
@@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
        efx->net_dev->rx_cpu_rmap = NULL;
 #endif
 
-       /* Disable MSI/MSI-X interrupts */
-       efx_for_each_channel(channel, efx)
-               free_irq(channel->irq, &efx->msi_context[channel->channel]);
-
-       /* Disable legacy interrupt */
-       if (efx->legacy_irq)
+       if (EFX_INT_MODE_USE_MSI(efx)) {
+               /* Disable MSI/MSI-X interrupts */
+               efx_for_each_channel(channel, efx)
+                       free_irq(channel->irq,
+                                &efx->msi_context[channel->channel]);
+       } else {
+               /* Disable legacy interrupt */
                free_irq(efx->legacy_irq, efx);
+       }
 }
 
 /* Register dump */
index d940034..0f4841d 100644 (file)
@@ -1704,7 +1704,7 @@ static int stmmac_open(struct net_device *dev)
                if (ret) {
                        pr_err("%s: Cannot attach to PHY (error: %d)\n",
                               __func__, ret);
-                       goto phy_error;
+                       return ret;
                }
        }
 
@@ -1779,8 +1779,6 @@ init_error:
 dma_desc_error:
        if (priv->phydev)
                phy_disconnect(priv->phydev);
-phy_error:
-       clk_disable_unprepare(priv->stmmac_clk);
 
        return ret;
 }
index df8d383..b9ac20f 100644 (file)
@@ -246,7 +246,7 @@ static inline void cas_lock_tx(struct cas *cp)
        int i;
 
        for (i = 0; i < N_TX_RINGS; i++)
-               spin_lock(&cp->tx_lock[i]);
+               spin_lock_nested(&cp->tx_lock[i], i);
 }
 
 static inline void cas_lock_all(struct cas *cp)
index 36aa109..c331b7e 100644 (file)
@@ -1871,18 +1871,13 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
                phyid = be32_to_cpup(parp+1);
                mdio = of_find_device_by_node(mdio_node);
-
-               if (strncmp(mdio->name, "gpio", 4) == 0) {
-                       /* GPIO bitbang MDIO driver attached */
-                       struct mii_bus *bus = dev_get_drvdata(&mdio->dev);
-
-                       snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
-                                PHY_ID_FMT, bus->id, phyid);
-               } else {
-                       /* davinci MDIO driver attached */
-                       snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
-                                PHY_ID_FMT, mdio->name, phyid);
+               of_node_put(mdio_node);
+               if (!mdio) {
+                       pr_err("Missing mdio platform device\n");
+                       return -EINVAL;
                }
+               snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+                        PHY_ID_FMT, mdio->name, phyid);
 
                mac_addr = of_get_mac_address(slave_node);
                if (mac_addr)
index b0e2865..d53e299 100644 (file)
@@ -458,8 +458,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
        struct macvlan_dev *vlan = netdev_priv(dev);
        struct net_device *lowerdev = vlan->lowerdev;
 
-       if (change & IFF_ALLMULTI)
-               dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+       if (dev->flags & IFF_UP) {
+               if (change & IFF_ALLMULTI)
+                       dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+       }
 }
 
 static void macvlan_set_mac_lists(struct net_device *dev)
@@ -515,6 +517,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
 #define MACVLAN_STATE_MASK \
        ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
 
+static int macvlan_get_nest_level(struct net_device *dev)
+{
+       return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
+}
+
 static void macvlan_set_lockdep_class_one(struct net_device *dev,
                                          struct netdev_queue *txq,
                                          void *_unused)
@@ -525,8 +532,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev,
 
 static void macvlan_set_lockdep_class(struct net_device *dev)
 {
-       lockdep_set_class(&dev->addr_list_lock,
-                         &macvlan_netdev_addr_lock_key);
+       lockdep_set_class_and_subclass(&dev->addr_list_lock,
+                                      &macvlan_netdev_addr_lock_key,
+                                      macvlan_get_nest_level(dev));
        netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
 }
 
@@ -721,6 +729,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
        .ndo_fdb_add            = macvlan_fdb_add,
        .ndo_fdb_del            = macvlan_fdb_del,
        .ndo_fdb_dump           = ndo_dflt_fdb_dump,
+       .ndo_get_lock_subclass  = macvlan_get_nest_level,
 };
 
 void macvlan_common_setup(struct net_device *dev)
@@ -849,6 +858,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
        vlan->dev      = dev;
        vlan->port     = port;
        vlan->set_features = MACVLAN_FEATURES;
+       vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
 
        vlan->mode     = MACVLAN_MODE_VEPA;
        if (data && data[IFLA_MACVLAN_MODE])
index 9c4defd..5f1a225 100644 (file)
@@ -215,6 +215,10 @@ static int mdio_gpio_probe(struct platform_device *pdev)
        if (pdev->dev.of_node) {
                pdata = mdio_gpio_of_get_data(pdev);
                bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
+               if (bus_id < 0) {
+                       dev_warn(&pdev->dev, "failed to get alias id\n");
+                       bus_id = 0;
+               }
        } else {
                pdata = dev_get_platdata(&pdev->dev);
                bus_id = pdev->id;
index a972056..3bc079a 100644 (file)
@@ -715,7 +715,7 @@ void phy_state_machine(struct work_struct *work)
        struct delayed_work *dwork = to_delayed_work(work);
        struct phy_device *phydev =
                        container_of(dwork, struct phy_device, state_queue);
-       int needs_aneg = 0, do_suspend = 0;
+       bool needs_aneg = false, do_suspend = false, do_resume = false;
        int err = 0;
 
        mutex_lock(&phydev->lock);
@@ -727,7 +727,7 @@ void phy_state_machine(struct work_struct *work)
        case PHY_PENDING:
                break;
        case PHY_UP:
-               needs_aneg = 1;
+               needs_aneg = true;
 
                phydev->link_timeout = PHY_AN_TIMEOUT;
 
@@ -757,7 +757,7 @@ void phy_state_machine(struct work_struct *work)
                        phydev->adjust_link(phydev->attached_dev);
 
                } else if (0 == phydev->link_timeout--)
-                       needs_aneg = 1;
+                       needs_aneg = true;
                break;
        case PHY_NOLINK:
                err = phy_read_status(phydev);
@@ -791,7 +791,7 @@ void phy_state_machine(struct work_struct *work)
                        netif_carrier_on(phydev->attached_dev);
                } else {
                        if (0 == phydev->link_timeout--)
-                               needs_aneg = 1;
+                               needs_aneg = true;
                }
 
                phydev->adjust_link(phydev->attached_dev);
@@ -827,7 +827,7 @@ void phy_state_machine(struct work_struct *work)
                        phydev->link = 0;
                        netif_carrier_off(phydev->attached_dev);
                        phydev->adjust_link(phydev->attached_dev);
-                       do_suspend = 1;
+                       do_suspend = true;
                }
                break;
        case PHY_RESUMING:
@@ -876,6 +876,7 @@ void phy_state_machine(struct work_struct *work)
                        }
                        phydev->adjust_link(phydev->attached_dev);
                }
+               do_resume = true;
                break;
        }
 
@@ -883,9 +884,10 @@ void phy_state_machine(struct work_struct *work)
 
        if (needs_aneg)
                err = phy_start_aneg(phydev);
-
-       if (do_suspend)
+       else if (do_suspend)
                phy_suspend(phydev);
+       else if (do_resume)
+               phy_resume(phydev);
 
        if (err < 0)
                phy_error(phydev);
index 0ce6066..4987a1c 100644 (file)
@@ -614,8 +614,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
        err = phy_init_hw(phydev);
        if (err)
                phy_detach(phydev);
-
-       phy_resume(phydev);
+       else
+               phy_resume(phydev);
 
        return err;
 }
index c9f3281..2e025dd 100644 (file)
@@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
        cdc_ncm_unbind(dev, intf);
 }
 
+/* verify that the ethernet protocol is IPv4 or IPv6 */
+static bool is_ip_proto(__be16 proto)
+{
+       switch (proto) {
+       case htons(ETH_P_IP):
+       case htons(ETH_P_IPV6):
+               return true;
+       }
+       return false;
+}
 
 static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
 {
@@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
        struct cdc_ncm_ctx *ctx = info->ctx;
        __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
        u16 tci = 0;
+       bool is_ip;
        u8 *c;
 
        if (!ctx)
@@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
                if (skb->len <= ETH_HLEN)
                        goto error;
 
+               /* Some applications using e.g. packet sockets will
+                * bypass the VLAN acceleration and create tagged
+                * ethernet frames directly.  We primarily look for
+                * the accelerated out-of-band tag, but fall back if
+                * required
+                */
+               skb_reset_mac_header(skb);
+               if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
+                   __vlan_get_tag(skb, &tci) == 0) {
+                       is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+                       skb_pull(skb, VLAN_ETH_HLEN);
+               } else {
+                       is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
+                       skb_pull(skb, ETH_HLEN);
+               }
+
                /* mapping VLANs to MBIM sessions:
                 *   no tag     => IPS session <0>
                 *   1 - 255    => IPS session <vlanid>
                 *   256 - 511  => DSS session <vlanid - 256>
                 *   512 - 4095 => unsupported, drop
                 */
-               vlan_get_tag(skb, &tci);
-
                switch (tci & 0x0f00) {
                case 0x0000: /* VLAN ID 0 - 255 */
-                       /* verify that datagram is IPv4 or IPv6 */
-                       skb_reset_mac_header(skb);
-                       switch (eth_hdr(skb)->h_proto) {
-                       case htons(ETH_P_IP):
-                       case htons(ETH_P_IPV6):
-                               break;
-                       default:
+                       if (!is_ip)
                                goto error;
-                       }
                        c = (u8 *)&sign;
                        c[3] = tci;
                        break;
@@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
                                  "unsupported tci=0x%04x\n", tci);
                        goto error;
                }
-               skb_pull(skb, ETH_HLEN);
        }
 
        spin_lock_bh(&ctx->mtx);
@@ -204,17 +221,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
                return;
 
        /* need to send the NA on the VLAN dev, if any */
-       if (tci)
+       rcu_read_lock();
+       if (tci) {
                netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
                                              tci);
-       else
+               if (!netdev) {
+                       rcu_read_unlock();
+                       return;
+               }
+       } else {
                netdev = dev->net;
-       if (!netdev)
-               return;
+       }
+       dev_hold(netdev);
+       rcu_read_unlock();
 
        in6_dev = in6_dev_get(netdev);
        if (!in6_dev)
-               return;
+               goto out;
        is_router = !!in6_dev->cnf.forwarding;
        in6_dev_put(in6_dev);
 
@@ -224,6 +247,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
                                 true /* solicited */,
                                 false /* override */,
                                 true /* inc_opt */);
+out:
+       dev_put(netdev);
 }
 
 static bool is_neigh_solicit(u8 *buf, size_t len)
index f46cd02..5627917 100644 (file)
@@ -95,8 +95,10 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
 
        if ((vif->type == NL80211_IFTYPE_AP ||
             vif->type == NL80211_IFTYPE_MESH_POINT) &&
-           bss_conf->enable_beacon)
+           bss_conf->enable_beacon) {
                priv->reconfig_beacon = true;
+               priv->rearm_ani = true;
+       }
 
        if (bss_conf->assoc) {
                priv->rearm_ani = true;
@@ -257,6 +259,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
 
        ath9k_htc_ps_wakeup(priv);
 
+       ath9k_htc_stop_ani(priv);
        del_timer_sync(&priv->tx.cleanup_timer);
        ath9k_htc_tx_drain(priv);
 
index afb3d15..be19852 100644 (file)
@@ -4948,7 +4948,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
        if (!err) {
                /* only set 2G bandwidth using bw_cap command */
                band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
-               band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT);
+               band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
                err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
                                               sizeof(band_bwcap));
        } else {
index fa858d5..0489314 100644 (file)
@@ -611,14 +611,14 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
                bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
 
        if (IWL_MVM_BT_COEX_CORUNNING) {
-               bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 |
-                                                   BT_VALID_CORUN_LUT_40);
+               bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
+                                                    BT_VALID_CORUN_LUT_40);
                bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
        }
 
        if (IWL_MVM_BT_COEX_MPLUT) {
                bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
-               bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
+               bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
        }
 
        if (mvm->cfg->bt_shared_single_ant)
index 9426905..d73a89e 100644 (file)
@@ -183,9 +183,9 @@ enum iwl_scan_type {
  *     this number of packets were received (typically 1)
  * @passive2active: is auto switching from passive to active during scan allowed
  * @rxchain_sel_flags: RXON_RX_CHAIN_*
- * @max_out_time: in usecs, max out of serving channel time
+ * @max_out_time: in TUs, max out of serving channel time
  * @suspend_time: how long to pause scan when returning to service channel:
- *     bits 0-19: beacon interal in usecs (suspend before executing)
+ *     bits 0-19: beacon interal in TUs (suspend before executing)
  *     bits 20-23: reserved
  *     bits 24-31: number of beacons (suspend between channels)
  * @rxon_flags: RXON_FLG_*
@@ -383,8 +383,8 @@ enum scan_framework_client {
  * @quiet_plcp_th:     quiet channel num of packets threshold
  * @good_CRC_th:       passive to active promotion threshold
  * @rx_chain:          RXON rx chain.
- * @max_out_time:      max uSec to be out of assoceated channel
- * @suspend_time:      pause scan this long when returning to service channel
+ * @max_out_time:      max TUs to be out of assoceated channel
+ * @suspend_time:      pause scan this TUs when returning to service channel
  * @flags:             RXON flags
  * @filter_flags:      RXONfilter
  * @tx_cmd:            tx command for active scan; for 2GHz and for 5GHz.
index f0cebf1..b41dc84 100644 (file)
@@ -1007,7 +1007,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
        memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
        len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
        if (ret)
                IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
 }
@@ -1023,7 +1023,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
        if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
                return;
 
-       ieee80211_iterate_active_interfaces(
+       ieee80211_iterate_active_interfaces_atomic(
                mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
                iwl_mvm_mc_iface_iterator, &iter_data);
 }
@@ -1807,6 +1807,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
+       if (!iwl_mvm_is_idle(mvm)) {
+               ret = -EBUSY;
+               goto out;
+       }
+
        switch (mvm->scan_status) {
        case IWL_MVM_SCAN_OS:
                IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
index d564233..f1ec098 100644 (file)
@@ -1003,6 +1003,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
        return mvmvif->low_latency;
 }
 
+/* Assoc status */
+bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
+
 /* Thermal management and CT-kill */
 void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
 void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
index 9f52c5b..e1c8388 100644 (file)
@@ -1010,7 +1010,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
                return;
        }
 
-#ifdef CPTCFG_MAC80211_DEBUGFS
+#ifdef CONFIG_MAC80211_DEBUGFS
        /* Disable last tx check if we are debugging with fixed rate */
        if (lq_sta->dbg_fixed_rate) {
                IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
index c91dc84..c28de54 100644 (file)
@@ -277,51 +277,22 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
                                            IEEE80211_IFACE_ITER_NORMAL,
                                            iwl_mvm_scan_condition_iterator,
                                            &global_bound);
-       /*
-        * Under low latency traffic passive scan is fragmented meaning
-        * that dwell on a particular channel will be fragmented. Each fragment
-        * dwell time is 20ms and fragments period is 105ms. Skipping to next
-        * channel will be delayed by the same period - 105ms. So suspend_time
-        * parameter describing both fragments and channels skipping periods is
-        * set to 105ms. This value is chosen so that overall passive scan
-        * duration will not be too long. Max_out_time in this case is set to
-        * 70ms, so for active scanning operating channel will be left for 70ms
-        * while for passive still for 20ms (fragment dwell).
-        */
-       if (global_bound) {
-               if (!iwl_mvm_low_latency(mvm)) {
-                       params->suspend_time = ieee80211_tu_to_usec(100);
-                       params->max_out_time = ieee80211_tu_to_usec(600);
-               } else {
-                       params->suspend_time = ieee80211_tu_to_usec(105);
-                       /* P2P doesn't support fragmented passive scan, so
-                        * configure max_out_time to be at least longest dwell
-                        * time for passive scan.
-                        */
-                       if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
-                               params->max_out_time = ieee80211_tu_to_usec(70);
-                               params->passive_fragmented = true;
-                       } else {
-                               u32 passive_dwell;
 
-                               /*
-                                * Use band G so that passive channel dwell time
-                                * will be assigned with maximum value.
-                                */
-                               band = IEEE80211_BAND_2GHZ;
-                               passive_dwell = iwl_mvm_get_passive_dwell(band);
-                               params->max_out_time =
-                                       ieee80211_tu_to_usec(passive_dwell);
-                       }
-               }
+       if (!global_bound)
+               goto not_bound;
+
+       params->suspend_time = 100;
+       params->max_out_time = 600;
+
+       if (iwl_mvm_low_latency(mvm)) {
+               params->suspend_time = 250;
+               params->max_out_time = 250;
        }
 
+not_bound:
+
        for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
-               if (params->passive_fragmented)
-                       params->dwell[band].passive = 20;
-               else
-                       params->dwell[band].passive =
-                               iwl_mvm_get_passive_dwell(band);
+               params->dwell[band].passive = iwl_mvm_get_passive_dwell(band);
                params->dwell[band].active = iwl_mvm_get_active_dwell(band,
                                                                      n_ssids);
        }
@@ -761,7 +732,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
        int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
        int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
        int head = 0;
-       int tail = band_2ghz + band_5ghz;
+       int tail = band_2ghz + band_5ghz - 1;
        u32 ssid_bitmap;
        int cmd_len;
        int ret;
index d619851..2180902 100644 (file)
@@ -644,3 +644,22 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
 
        return result;
 }
+
+static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+       bool *idle = _data;
+
+       if (!vif->bss_conf.idle)
+               *idle = false;
+}
+
+bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
+{
+       bool idle = true;
+
+       ieee80211_iterate_active_interfaces_atomic(
+                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                       iwl_mvm_idle_iter, &idle);
+
+       return idle;
+}
index dcfd6d8..2365553 100644 (file)
@@ -1749,6 +1749,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
         * PCI Tx retries from interfering with C3 CPU state */
        pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
 
+       trans->dev = &pdev->dev;
+       trans_pcie->pci_dev = pdev;
+       iwl_disable_interrupts(trans);
+
        err = pci_enable_msi(pdev);
        if (err) {
                dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
@@ -1760,8 +1764,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                }
        }
 
-       trans->dev = &pdev->dev;
-       trans_pcie->pci_dev = pdev;
        trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
        trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
        snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
@@ -1787,8 +1789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                goto out_pci_disable_msi;
        }
 
-       trans_pcie->inta_mask = CSR_INI_SET_MASK;
-
        if (iwl_pcie_alloc_ict(trans))
                goto out_free_cmd_pool;
 
@@ -1800,6 +1800,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                goto out_free_ict;
        }
 
+       trans_pcie->inta_mask = CSR_INI_SET_MASK;
+
        return trans;
 
 out_free_ict:
index 630a3fc..0d4a285 100644 (file)
@@ -226,7 +226,7 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
                              grant_ref_t rx_ring_ref);
 
 /* Check for SKBs from frontend and schedule backend processing */
-void xenvif_check_rx_xenvif(struct xenvif *vif);
+void xenvif_napi_schedule_or_enable_events(struct xenvif *vif);
 
 /* Prevent the device from generating any further traffic. */
 void xenvif_carrier_off(struct xenvif *vif);
index ef05c5c..20e9def 100644 (file)
@@ -75,32 +75,8 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
        work_done = xenvif_tx_action(vif, budget);
 
        if (work_done < budget) {
-               int more_to_do = 0;
-               unsigned long flags;
-
-               /* It is necessary to disable IRQ before calling
-                * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
-                * lose event from the frontend.
-                *
-                * Consider:
-                *   RING_HAS_UNCONSUMED_REQUESTS
-                *   <frontend generates event to trigger napi_schedule>
-                *   __napi_complete
-                *
-                * This handler is still in scheduled state so the
-                * event has no effect at all. After __napi_complete
-                * this handler is descheduled and cannot get
-                * scheduled again. We lose event in this case and the ring
-                * will be completely stalled.
-                */
-
-               local_irq_save(flags);
-
-               RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
-               if (!more_to_do)
-                       __napi_complete(napi);
-
-               local_irq_restore(flags);
+               napi_complete(napi);
+               xenvif_napi_schedule_or_enable_events(vif);
        }
 
        return work_done;
@@ -194,7 +170,7 @@ static void xenvif_up(struct xenvif *vif)
        enable_irq(vif->tx_irq);
        if (vif->tx_irq != vif->rx_irq)
                enable_irq(vif->rx_irq);
-       xenvif_check_rx_xenvif(vif);
+       xenvif_napi_schedule_or_enable_events(vif);
 }
 
 static void xenvif_down(struct xenvif *vif)
index 7666540..7367208 100644 (file)
@@ -104,7 +104,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
 
 /* Find the containing VIF's structure from a pointer in pending_tx_info array
  */
-static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
+static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf)
 {
        u16 pending_idx = ubuf->desc;
        struct pending_tx_info *temp =
@@ -322,6 +322,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
        }
 }
 
+/*
+ * Find the grant ref for a given frag in a chain of struct ubuf_info's
+ * skb: the skb itself
+ * i: the frag's number
+ * ubuf: a pointer to an element in the chain. It should not be NULL
+ *
+ * Returns a pointer to the element in the chain where the page were found. If
+ * not found, returns NULL.
+ * See the definition of callback_struct in common.h for more details about
+ * the chain.
+ */
+static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
+                                               const int i,
+                                               const struct ubuf_info *ubuf)
+{
+       struct xenvif *foreign_vif = ubuf_to_vif(ubuf);
+
+       do {
+               u16 pending_idx = ubuf->desc;
+
+               if (skb_shinfo(skb)->frags[i].page.p ==
+                   foreign_vif->mmap_pages[pending_idx])
+                       break;
+               ubuf = (struct ubuf_info *) ubuf->ctx;
+       } while (ubuf);
+
+       return ubuf;
+}
+
 /*
  * Prepare an SKB to be transmitted to the frontend.
  *
@@ -346,9 +375,8 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        int head = 1;
        int old_meta_prod;
        int gso_type;
-       struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
-       grant_ref_t foreign_grefs[MAX_SKB_FRAGS];
-       struct xenvif *foreign_vif = NULL;
+       const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
+       const struct ubuf_info *const head_ubuf = ubuf;
 
        old_meta_prod = npo->meta_prod;
 
@@ -386,19 +414,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        npo->copy_off = 0;
        npo->copy_gref = req->gref;
 
-       if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
-                (ubuf->callback == &xenvif_zerocopy_callback)) {
-               int i = 0;
-               foreign_vif = ubuf_to_vif(ubuf);
-
-               do {
-                       u16 pending_idx = ubuf->desc;
-                       foreign_grefs[i++] =
-                               foreign_vif->pending_tx_info[pending_idx].req.gref;
-                       ubuf = (struct ubuf_info *) ubuf->ctx;
-               } while (ubuf);
-       }
-
        data = skb->data;
        while (data < skb_tail_pointer(skb)) {
                unsigned int offset = offset_in_page(data);
@@ -415,13 +430,60 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        }
 
        for (i = 0; i < nr_frags; i++) {
+               /* This variable also signals whether foreign_gref has a real
+                * value or not.
+                */
+               struct xenvif *foreign_vif = NULL;
+               grant_ref_t foreign_gref;
+
+               if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
+                       (ubuf->callback == &xenvif_zerocopy_callback)) {
+                       const struct ubuf_info *const startpoint = ubuf;
+
+                       /* Ideally ubuf points to the chain element which
+                        * belongs to this frag. Or if frags were removed from
+                        * the beginning, then shortly before it.
+                        */
+                       ubuf = xenvif_find_gref(skb, i, ubuf);
+
+                       /* Try again from the beginning of the list, if we
+                        * haven't tried from there. This only makes sense in
+                        * the unlikely event of reordering the original frags.
+                        * For injected local pages it's an unnecessary second
+                        * run.
+                        */
+                       if (unlikely(!ubuf) && startpoint != head_ubuf)
+                               ubuf = xenvif_find_gref(skb, i, head_ubuf);
+
+                       if (likely(ubuf)) {
+                               u16 pending_idx = ubuf->desc;
+
+                               foreign_vif = ubuf_to_vif(ubuf);
+                               foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref;
+                               /* Just a safety measure. If this was the last
+                                * element on the list, the for loop will
+                                * iterate again if a local page were added to
+                                * the end. Using head_ubuf here prevents the
+                                * second search on the chain. Or the original
+                                * frags changed order, but that's less likely.
+                                * In any way, ubuf shouldn't be NULL.
+                                */
+                               ubuf = ubuf->ctx ?
+                                       (struct ubuf_info *) ubuf->ctx :
+                                       head_ubuf;
+                       } else
+                               /* This frag was a local page, added to the
+                                * array after the skb left netback.
+                                */
+                               ubuf = head_ubuf;
+               }
                xenvif_gop_frag_copy(vif, skb, npo,
                                     skb_frag_page(&skb_shinfo(skb)->frags[i]),
                                     skb_frag_size(&skb_shinfo(skb)->frags[i]),
                                     skb_shinfo(skb)->frags[i].page_offset,
                                     &head,
                                     foreign_vif,
-                                    foreign_grefs[i]);
+                                    foreign_vif ? foreign_gref : UINT_MAX);
        }
 
        return npo->meta_prod - old_meta_prod;
@@ -654,7 +716,7 @@ done:
                notify_remote_via_irq(vif->rx_irq);
 }
 
-void xenvif_check_rx_xenvif(struct xenvif *vif)
+void xenvif_napi_schedule_or_enable_events(struct xenvif *vif)
 {
        int more_to_do;
 
@@ -688,7 +750,7 @@ static void tx_credit_callback(unsigned long data)
 {
        struct xenvif *vif = (struct xenvif *)data;
        tx_add_credit(vif);
-       xenvif_check_rx_xenvif(vif);
+       xenvif_napi_schedule_or_enable_events(vif);
 }
 
 static void xenvif_tx_err(struct xenvif *vif,
index 9802b67..2c61281 100644 (file)
@@ -523,17 +523,6 @@ static int wmt_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
                return GPIOF_DIR_IN;
 }
 
-static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
-static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
-                                    int value)
-{
-       return pinctrl_gpio_direction_output(chip->base + offset);
-}
-
 static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset)
 {
        struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev);
@@ -568,6 +557,18 @@ static void wmt_gpio_set_value(struct gpio_chip *chip, unsigned offset,
                wmt_clearbits(data, reg_data_out, BIT(bit));
 }
 
+static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+       return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+                                    int value)
+{
+       wmt_gpio_set_value(chip, offset, value);
+       return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
 static struct gpio_chip wmt_gpio_chip = {
        .label = "gpio-wmt",
        .owner = THIS_MODULE,
index 6963bdf..6aea373 100644 (file)
@@ -6,6 +6,7 @@ menu "PTP clock support"
 
 config PTP_1588_CLOCK
        tristate "PTP clock support"
+       depends on NET
        select PPS
        select NET_PTP_CLASSIFY
        help
@@ -74,7 +75,7 @@ config DP83640_PHY
 config PTP_1588_CLOCK_PCH
        tristate "Intel PCH EG20T as PTP clock"
        depends on X86 || COMPILE_TEST
-       depends on HAS_IOMEM
+       depends on HAS_IOMEM && NET
        select PTP_1588_CLOCK
        help
          This driver adds support for using the PCH EG20T as a PTP
index 1b68142..c341f85 100644 (file)
@@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy)
        list_del(&rphy->list);
        mutex_unlock(&sas_host->lock);
 
-       sas_bsg_remove(shost, rphy);
-
        transport_destroy_device(dev);
 
        put_device(dev);
@@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy)
        }
 
        sas_rphy_unlink(rphy);
+       sas_bsg_remove(NULL, rphy);
        transport_remove_device(dev);
        device_del(dev);
 }
index 1c8c6cc..4b0eff6 100644 (file)
@@ -130,6 +130,15 @@ static void afs_cm_destructor(struct afs_call *call)
 {
        _enter("");
 
+       /* Break the callbacks here so that we do it after the final ACK is
+        * received.  The step number here must match the final number in
+        * afs_deliver_cb_callback().
+        */
+       if (call->unmarshall == 6) {
+               ASSERT(call->server && call->count && call->request);
+               afs_break_callbacks(call->server, call->count, call->request);
+       }
+
        afs_put_server(call->server);
        call->server = NULL;
        kfree(call->buffer);
@@ -272,6 +281,16 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
                _debug("trailer");
                if (skb->len != 0)
                        return -EBADMSG;
+
+               /* Record that the message was unmarshalled successfully so
+                * that the call destructor can know do the callback breaking
+                * work, even if the final ACK isn't received.
+                *
+                * If the step number changes, then afs_cm_destructor() must be
+                * updated also.
+                */
+               call->unmarshall++;
+       case 6:
                break;
        }
 
index be75b50..590b55f 100644 (file)
@@ -75,7 +75,7 @@ struct afs_call {
        const struct afs_call_type *type;       /* type of call */
        const struct afs_wait_mode *wait_mode;  /* completion wait mode */
        wait_queue_head_t       waitq;          /* processes awaiting completion */
-       work_func_t             async_workfn;
+       void (*async_workfn)(struct afs_call *call); /* asynchronous work function */
        struct work_struct      async_work;     /* asynchronous work processor */
        struct work_struct      work;           /* actual work processor */
        struct sk_buff_head     rx_queue;       /* received packets */
index ef943df..03a3beb 100644 (file)
@@ -25,7 +25,7 @@ static void afs_wake_up_call_waiter(struct afs_call *);
 static int afs_wait_for_call_to_complete(struct afs_call *);
 static void afs_wake_up_async_call(struct afs_call *);
 static int afs_dont_wait_for_call_to_complete(struct afs_call *);
-static void afs_process_async_call(struct work_struct *);
+static void afs_process_async_call(struct afs_call *);
 static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *);
 static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool);
 
@@ -58,6 +58,13 @@ static void afs_collect_incoming_call(struct work_struct *);
 static struct sk_buff_head afs_incoming_calls;
 static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call);
 
+static void afs_async_workfn(struct work_struct *work)
+{
+       struct afs_call *call = container_of(work, struct afs_call, async_work);
+
+       call->async_workfn(call);
+}
+
 /*
  * open an RxRPC socket and bind it to be a server for callback notifications
  * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
@@ -183,6 +190,28 @@ static void afs_free_call(struct afs_call *call)
        kfree(call);
 }
 
+/*
+ * End a call but do not free it
+ */
+static void afs_end_call_nofree(struct afs_call *call)
+{
+       if (call->rxcall) {
+               rxrpc_kernel_end_call(call->rxcall);
+               call->rxcall = NULL;
+       }
+       if (call->type->destructor)
+               call->type->destructor(call);
+}
+
+/*
+ * End a call and free it
+ */
+static void afs_end_call(struct afs_call *call)
+{
+       afs_end_call_nofree(call);
+       afs_free_call(call);
+}
+
 /*
  * allocate a call with flat request and reply buffers
  */
@@ -326,7 +355,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
               atomic_read(&afs_outstanding_calls));
 
        call->wait_mode = wait_mode;
-       INIT_WORK(&call->async_work, afs_process_async_call);
+       call->async_workfn = afs_process_async_call;
+       INIT_WORK(&call->async_work, afs_async_workfn);
 
        memset(&srx, 0, sizeof(srx));
        srx.srx_family = AF_RXRPC;
@@ -383,11 +413,8 @@ error_do_abort:
        rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
        while ((skb = skb_dequeue(&call->rx_queue)))
                afs_free_skb(skb);
-       rxrpc_kernel_end_call(rxcall);
-       call->rxcall = NULL;
 error_kill_call:
-       call->type->destructor(call);
-       afs_free_call(call);
+       afs_end_call(call);
        _leave(" = %d", ret);
        return ret;
 }
@@ -509,12 +536,8 @@ static void afs_deliver_to_call(struct afs_call *call)
        if (call->state >= AFS_CALL_COMPLETE) {
                while ((skb = skb_dequeue(&call->rx_queue)))
                        afs_free_skb(skb);
-               if (call->incoming) {
-                       rxrpc_kernel_end_call(call->rxcall);
-                       call->rxcall = NULL;
-                       call->type->destructor(call);
-                       afs_free_call(call);
-               }
+               if (call->incoming)
+                       afs_end_call(call);
        }
 
        _leave("");
@@ -564,10 +587,7 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
        }
 
        _debug("call complete");
-       rxrpc_kernel_end_call(call->rxcall);
-       call->rxcall = NULL;
-       call->type->destructor(call);
-       afs_free_call(call);
+       afs_end_call(call);
        _leave(" = %d", ret);
        return ret;
 }
@@ -603,11 +623,8 @@ static int afs_dont_wait_for_call_to_complete(struct afs_call *call)
 /*
  * delete an asynchronous call
  */
-static void afs_delete_async_call(struct work_struct *work)
+static void afs_delete_async_call(struct afs_call *call)
 {
-       struct afs_call *call =
-               container_of(work, struct afs_call, async_work);
-
        _enter("");
 
        afs_free_call(call);
@@ -620,11 +637,8 @@ static void afs_delete_async_call(struct work_struct *work)
  * - on a multiple-thread workqueue this work item may try to run on several
  *   CPUs at the same time
  */
-static void afs_process_async_call(struct work_struct *work)
+static void afs_process_async_call(struct afs_call *call)
 {
-       struct afs_call *call =
-               container_of(work, struct afs_call, async_work);
-
        _enter("");
 
        if (!skb_queue_empty(&call->rx_queue))
@@ -637,10 +651,7 @@ static void afs_process_async_call(struct work_struct *work)
                call->reply = NULL;
 
                /* kill the call */
-               rxrpc_kernel_end_call(call->rxcall);
-               call->rxcall = NULL;
-               if (call->type->destructor)
-                       call->type->destructor(call);
+               afs_end_call_nofree(call);
 
                /* we can't just delete the call because the work item may be
                 * queued */
@@ -663,13 +674,6 @@ void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb)
        call->reply_size += len;
 }
 
-static void afs_async_workfn(struct work_struct *work)
-{
-       struct afs_call *call = container_of(work, struct afs_call, async_work);
-
-       call->async_workfn(work);
-}
-
 /*
  * accept the backlog of incoming calls
  */
@@ -790,10 +794,7 @@ void afs_send_empty_reply(struct afs_call *call)
                _debug("oom");
                rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
        default:
-               rxrpc_kernel_end_call(call->rxcall);
-               call->rxcall = NULL;
-               call->type->destructor(call);
-               afs_free_call(call);
+               afs_end_call(call);
                _leave(" [error]");
                return;
        }
@@ -823,17 +824,16 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
        call->state = AFS_CALL_AWAIT_ACK;
        n = rxrpc_kernel_send_data(call->rxcall, &msg, len);
        if (n >= 0) {
+               /* Success */
                _leave(" [replied]");
                return;
        }
+
        if (n == -ENOMEM) {
                _debug("oom");
                rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
        }
-       rxrpc_kernel_end_call(call->rxcall);
-       call->rxcall = NULL;
-       call->type->destructor(call);
-       afs_free_call(call);
+       afs_end_call(call);
        _leave(" [error]");
 }
 
index b6f4601..f66c66b 100644 (file)
@@ -590,7 +590,7 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
                add_to_mask(state, &state->groups->aces[i].perms);
        }
 
-       if (!state->users->n && !state->groups->n) {
+       if (state->users->n || state->groups->n) {
                pace++;
                pace->e_tag = ACL_MASK;
                low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags);
index 32b699b..9a77a5a 100644 (file)
@@ -3717,9 +3717,16 @@ out:
 static __be32
 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
 {
-       if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
+       struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
+
+       if (check_for_locks(stp->st_file, lo))
                return nfserr_locks_held;
-       release_lock_stateid(stp);
+       /*
+        * Currently there's a 1-1 lock stateid<->lockowner
+        * correspondance, and we have to delete the lockowner when we
+        * delete the lock stateid:
+        */
+       unhash_lockowner(lo);
        return nfs_ok;
 }
 
@@ -4159,6 +4166,10 @@ static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, c
 
        if (!same_owner_str(&lo->lo_owner, owner, clid))
                return false;
+       if (list_empty(&lo->lo_owner.so_stateids)) {
+               WARN_ON_ONCE(1);
+               return false;
+       }
        lst = list_first_entry(&lo->lo_owner.so_stateids,
                               struct nfs4_ol_stateid, st_perstateowner);
        return lst->st_file->fi_inode == inode;
index af3f7aa..ee1f884 100644 (file)
@@ -472,11 +472,15 @@ bail:
 
 void dlm_destroy_master_caches(void)
 {
-       if (dlm_lockname_cache)
+       if (dlm_lockname_cache) {
                kmem_cache_destroy(dlm_lockname_cache);
+               dlm_lockname_cache = NULL;
+       }
 
-       if (dlm_lockres_cache)
+       if (dlm_lockres_cache) {
                kmem_cache_destroy(dlm_lockres_cache);
+               dlm_lockres_cache = NULL;
+       }
 }
 
 static void dlm_lockres_release(struct kref *kref)
index 9bc07d2..e246954 100644 (file)
@@ -1537,7 +1537,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
        struct iovec iovstack[UIO_FASTIOV];
        struct iovec *iov = iovstack;
        struct iov_iter iter;
-       ssize_t count = 0;
+       ssize_t count;
 
        pipe = get_pipe_info(file);
        if (!pipe)
@@ -1546,8 +1546,9 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
        ret = rw_copy_check_uvector(READ, uiov, nr_segs,
                                    ARRAY_SIZE(iovstack), iovstack, &iov);
        if (ret <= 0)
-               return ret;
+               goto out;
 
+       count = ret;
        iov_iter_init(&iter, iov, nr_segs, count, 0);
 
        sd.len = 0;
@@ -1560,6 +1561,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
        ret = __splice_from_pipe(pipe, &sd, pipe_to_user);
        pipe_unlock(pipe);
 
+out:
        if (iov != iovstack)
                kfree(iov);
 
index 8300fb8..72cb0dd 100644 (file)
@@ -429,6 +429,7 @@ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
 typedef void (*dma_async_tx_callback)(void *dma_async_param);
 
 struct dmaengine_unmap_data {
+       u8 map_cnt;
        u8 to_cnt;
        u8 from_cnt;
        u8 bidi_cnt;
index 7c8b20b..a9a53b1 100644 (file)
@@ -56,6 +56,7 @@ struct macvlan_dev {
        int                     numqueues;
        netdev_features_t       tap_features;
        int                     minor;
+       int                     nest_level;
 };
 
 static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
index 13bbbde..b2acc4a 100644 (file)
@@ -73,7 +73,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
 /* found in socket.c */
 extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
 
-static inline int is_vlan_dev(struct net_device *dev)
+static inline bool is_vlan_dev(struct net_device *dev)
 {
         return dev->priv_flags & IFF_802_1Q_VLAN;
 }
@@ -159,6 +159,7 @@ struct vlan_dev_priv {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll                          *netpoll;
 #endif
+       unsigned int                            nest_level;
 };
 
 static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
@@ -197,6 +198,12 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
                                 const struct net_device *by_dev);
 
 extern bool vlan_uses_dev(const struct net_device *dev);
+
+static inline int vlan_get_encap_level(struct net_device *dev)
+{
+       BUG_ON(!is_vlan_dev(dev));
+       return vlan_dev_priv(dev)->nest_level;
+}
 #else
 static inline struct net_device *
 __vlan_find_dev_deep(struct net_device *real_dev,
@@ -263,6 +270,11 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
 {
        return false;
 }
+static inline int vlan_get_encap_level(struct net_device *dev)
+{
+       BUG();
+       return 0;
+}
 #endif
 
 static inline bool vlan_hw_offload_capable(netdev_features_t features,
@@ -483,4 +495,5 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
                 */
                skb->protocol = htons(ETH_P_802_2);
 }
+
 #endif /* !(_LINUX_IF_VLAN_H_) */
index b66e761..7040dc9 100644 (file)
@@ -421,6 +421,17 @@ struct mlx4_wqe_inline_seg {
        __be32                  byte_count;
 };
 
+enum mlx4_update_qp_attr {
+       MLX4_UPDATE_QP_SMAC             = 1 << 0,
+};
+
+struct mlx4_update_qp_params {
+       u8      smac_index;
+};
+
+int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                  enum mlx4_update_qp_attr attr,
+                  struct mlx4_update_qp_params *params);
 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
                   enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
                   struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
index 94734a6..17d8339 100644 (file)
@@ -248,24 +248,17 @@ do {                                                              \
 bool __net_get_random_once(void *buf, int nbytes, bool *done,
                           struct static_key *done_key);
 
-#ifdef HAVE_JUMP_LABEL
-#define ___NET_RANDOM_STATIC_KEY_INIT ((struct static_key) \
-               { .enabled = ATOMIC_INIT(0), .entries = (void *)1 })
-#else /* !HAVE_JUMP_LABEL */
-#define ___NET_RANDOM_STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
-#endif /* HAVE_JUMP_LABEL */
-
 #define net_get_random_once(buf, nbytes)                               \
        ({                                                              \
                bool ___ret = false;                                    \
                static bool ___done = false;                            \
-               static struct static_key ___done_key =                  \
-                       ___NET_RANDOM_STATIC_KEY_INIT;                  \
-               if (!static_key_true(&___done_key))                     \
+               static struct static_key ___once_key =                  \
+                       STATIC_KEY_INIT_TRUE;                           \
+               if (static_key_true(&___once_key))                      \
                        ___ret = __net_get_random_once(buf,             \
                                                       nbytes,          \
                                                       &___done,        \
-                                                      &___done_key);   \
+                                                      &___once_key);   \
                ___ret;                                                 \
        })
 
index 7ed3a3a..b42d07b 100644 (file)
@@ -1144,6 +1144,7 @@ struct net_device_ops {
        netdev_tx_t             (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
                                                        struct net_device *dev,
                                                        void *priv);
+       int                     (*ndo_get_lock_subclass)(struct net_device *dev);
 };
 
 /**
@@ -2950,7 +2951,12 @@ static inline void netif_addr_lock(struct net_device *dev)
 
 static inline void netif_addr_lock_nested(struct net_device *dev)
 {
-       spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
+       int subclass = SINGLE_DEPTH_NESTING;
+
+       if (dev->netdev_ops->ndo_get_lock_subclass)
+               subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
+
+       spin_lock_nested(&dev->addr_list_lock, subclass);
 }
 
 static inline void netif_addr_lock_bh(struct net_device *dev)
@@ -3050,9 +3056,18 @@ extern int               weight_p;
 extern int             bpf_jit_enable;
 
 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
+struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
+                                                    struct list_head **iter);
 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
                                                     struct list_head **iter);
 
+/* iterate through upper list, must be called under RCU read lock */
+#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
+       for (iter = &(dev)->adj_list.upper, \
+            updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
+            updev; \
+            updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
+
 /* iterate through upper list, must be called under RCU read lock */
 #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
        for (iter = &(dev)->all_adj_list.upper, \
@@ -3077,6 +3092,14 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
             priv; \
             priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
 
+void *netdev_lower_get_next(struct net_device *dev,
+                               struct list_head **iter);
+#define netdev_for_each_lower_dev(dev, ldev, iter) \
+       for (iter = &(dev)->adj_list.lower, \
+            ldev = netdev_lower_get_next(dev, &(iter)); \
+            ldev; \
+            ldev = netdev_lower_get_next(dev, &(iter)))
+
 void *netdev_adjacent_get_private(struct list_head *adj_list);
 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
@@ -3092,6 +3115,8 @@ void netdev_upper_dev_unlink(struct net_device *dev,
 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
 void *netdev_lower_dev_get_private(struct net_device *dev,
                                   struct net_device *lower_dev);
+int dev_get_nest_level(struct net_device *dev,
+                      bool (*type_check)(struct net_device *dev));
 int skb_checksum_help(struct sk_buff *skb);
 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
                                  netdev_features_t features, bool tx_path);
@@ -3180,12 +3205,7 @@ void netdev_change_features(struct net_device *dev);
 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
                                        struct net_device *dev);
 
-netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
-                                        const struct net_device *dev);
-static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
-{
-       return netif_skb_dev_features(skb, skb->dev);
-}
+netdev_features_t netif_skb_features(struct sk_buff *skb);
 
 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
 {
index 6fe8464..881a7c3 100644 (file)
@@ -31,7 +31,12 @@ extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
 #else /* CONFIG_OF */
 static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
 {
-       return -ENOSYS;
+       /*
+        * Fall back to the non-DT function to register a bus.
+        * This way, we don't have to keep compat bits around in drivers.
+        */
+
+       return mdiobus_register(mdio);
 }
 
 static inline struct phy_device *of_phy_find_device(struct device_node *phy_np)
index 41a13e7..7944cdc 100644 (file)
@@ -10,7 +10,7 @@
 
 struct dma_chan;
 
-#if defined(CONFIG_DMA_OMAP) || defined(CONFIG_DMA_OMAP_MODULE)
+#if defined(CONFIG_DMA_OMAP) || (defined(CONFIG_DMA_OMAP_MODULE) && defined(MODULE))
 bool omap_dma_filter_fn(struct dma_chan *, void *);
 #else
 static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d)
index 3356abc..3ef6ea1 100644 (file)
@@ -402,6 +402,8 @@ struct perf_event {
 
        struct ring_buffer              *rb;
        struct list_head                rb_entry;
+       unsigned long                   rcu_batches;
+       int                             rcu_pending;
 
        /* poll related */
        wait_queue_head_t               waitq;
index 8e3e66a..953937e 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/mutex.h>
 #include <linux/netdevice.h>
+#include <linux/wait.h>
 #include <uapi/linux/rtnetlink.h>
 
 extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
@@ -22,6 +23,10 @@ extern void rtnl_lock(void);
 extern void rtnl_unlock(void);
 extern int rtnl_trylock(void);
 extern int rtnl_is_locked(void);
+
+extern wait_queue_head_t netdev_unregistering_wq;
+extern struct mutex net_mutex;
+
 #ifdef CONFIG_PROVE_LOCKING
 extern int lockdep_rtnl_is_held(void);
 #else
index 25f54c7..221b2bd 100644 (file)
@@ -220,7 +220,7 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
 #define TASK_PARKED            512
 #define TASK_STATE_MAX         1024
 
-#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
+#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
 
 extern char ___assert_task_state[1 - 2*!!(
                sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@ -1153,9 +1153,12 @@ struct sched_dl_entity {
         *
         * @dl_boosted tells if we are boosted due to DI. If so we are
         * outside bandwidth enforcement mechanism (but only until we
-        * exit the critical section).
+        * exit the critical section);
+        *
+        * @dl_yielded tells if task gave up the cpu before consuming
+        * all its available runtime during the last job.
         */
-       int dl_throttled, dl_new, dl_boosted;
+       int dl_throttled, dl_new, dl_boosted, dl_yielded;
 
        /*
         * Bandwidth enforcement timer. Each -deadline task has its
index f3539a1..f856e5a 100644 (file)
@@ -3668,6 +3668,18 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy);
  */
 void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
 
+/**
+ * cfg80211_sched_scan_stopped_rtnl - notify that the scheduled scan has stopped
+ *
+ * @wiphy: the wiphy on which the scheduled scan stopped
+ *
+ * The driver can call this function to inform cfg80211 that the
+ * scheduled scan had to be stopped, for whatever reason.  The driver
+ * is then called back via the sched_scan_stop operation when done.
+ * This function should be called with rtnl locked.
+ */
+void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy);
+
 /**
  * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame
  *
index 6c4f5ea..216cecc 100644 (file)
@@ -127,6 +127,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg);
 void rt6_ifdown(struct net *net, struct net_device *dev);
 void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
+void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
 
 
 /*
index 80f500a..b2704fd 100644 (file)
@@ -20,6 +20,11 @@ struct local_ports {
        int             range[2];
 };
 
+struct ping_group_range {
+       seqlock_t       lock;
+       kgid_t          range[2];
+};
+
 struct netns_ipv4 {
 #ifdef CONFIG_SYSCTL
        struct ctl_table_header *forw_hdr;
@@ -66,13 +71,13 @@ struct netns_ipv4 {
        int sysctl_icmp_ratemask;
        int sysctl_icmp_errors_use_inbound_ifaddr;
 
-       struct local_ports sysctl_local_ports;
+       struct local_ports ip_local_ports;
 
        int sysctl_tcp_ecn;
        int sysctl_ip_no_pmtu_disc;
        int sysctl_ip_fwd_use_pmtu;
 
-       kgid_t sysctl_ping_group_range[2];
+       struct ping_group_range ping_group_range;
 
        atomic_t dev_addr_genid;
 
index 1ba9d62..194c1ea 100644 (file)
@@ -3856,6 +3856,8 @@ enum nl80211_ap_sme_features {
  * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested
  *     to work properly to suppport receiving regulatory hints from
  *     cellular base stations.
+ * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: (no longer available, only
+ *     here to reserve the value for API/ABI compatibility)
  * @NL80211_FEATURE_SAE: This driver supports simultaneous authentication of
  *     equals (SAE) with user space SME (NL80211_CMD_AUTHENTICATE) in station
  *     mode
@@ -3897,7 +3899,7 @@ enum nl80211_feature_flags {
        NL80211_FEATURE_HT_IBSS                         = 1 << 1,
        NL80211_FEATURE_INACTIVITY_TIMER                = 1 << 2,
        NL80211_FEATURE_CELL_BASE_REG_HINTS             = 1 << 3,
-       /* bit 4 is reserved - don't use */
+       NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL        = 1 << 4,
        NL80211_FEATURE_SAE                             = 1 << 5,
        NL80211_FEATURE_LOW_PRIORITY_SCAN               = 1 << 6,
        NL80211_FEATURE_SCAN_FLUSH                      = 1 << 7,
index f83a71a..440eefc 100644 (file)
@@ -1443,6 +1443,11 @@ group_sched_out(struct perf_event *group_event,
                cpuctx->exclusive = 0;
 }
 
+struct remove_event {
+       struct perf_event *event;
+       bool detach_group;
+};
+
 /*
  * Cross CPU call to remove a performance event
  *
@@ -1451,12 +1456,15 @@ group_sched_out(struct perf_event *group_event,
  */
 static int __perf_remove_from_context(void *info)
 {
-       struct perf_event *event = info;
+       struct remove_event *re = info;
+       struct perf_event *event = re->event;
        struct perf_event_context *ctx = event->ctx;
        struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
 
        raw_spin_lock(&ctx->lock);
        event_sched_out(event, cpuctx, ctx);
+       if (re->detach_group)
+               perf_group_detach(event);
        list_del_event(event, ctx);
        if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
                ctx->is_active = 0;
@@ -1481,10 +1489,14 @@ static int __perf_remove_from_context(void *info)
  * When called from perf_event_exit_task, it's OK because the
  * context has been detached from its task.
  */
-static void perf_remove_from_context(struct perf_event *event)
+static void perf_remove_from_context(struct perf_event *event, bool detach_group)
 {
        struct perf_event_context *ctx = event->ctx;
        struct task_struct *task = ctx->task;
+       struct remove_event re = {
+               .event = event,
+               .detach_group = detach_group,
+       };
 
        lockdep_assert_held(&ctx->mutex);
 
@@ -1493,12 +1505,12 @@ static void perf_remove_from_context(struct perf_event *event)
                 * Per cpu events are removed via an smp call and
                 * the removal is always successful.
                 */
-               cpu_function_call(event->cpu, __perf_remove_from_context, event);
+               cpu_function_call(event->cpu, __perf_remove_from_context, &re);
                return;
        }
 
 retry:
-       if (!task_function_call(task, __perf_remove_from_context, event))
+       if (!task_function_call(task, __perf_remove_from_context, &re))
                return;
 
        raw_spin_lock_irq(&ctx->lock);
@@ -1515,6 +1527,8 @@ retry:
         * Since the task isn't running, its safe to remove the event, us
         * holding the ctx->lock ensures the task won't get scheduled in.
         */
+       if (detach_group)
+               perf_group_detach(event);
        list_del_event(event, ctx);
        raw_spin_unlock_irq(&ctx->lock);
 }
@@ -3178,7 +3192,8 @@ static void free_event_rcu(struct rcu_head *head)
 }
 
 static void ring_buffer_put(struct ring_buffer *rb);
-static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
+static void ring_buffer_attach(struct perf_event *event,
+                              struct ring_buffer *rb);
 
 static void unaccount_event_cpu(struct perf_event *event, int cpu)
 {
@@ -3238,8 +3253,6 @@ static void free_event(struct perf_event *event)
        unaccount_event(event);
 
        if (event->rb) {
-               struct ring_buffer *rb;
-
                /*
                 * Can happen when we close an event with re-directed output.
                 *
@@ -3247,12 +3260,7 @@ static void free_event(struct perf_event *event)
                 * over us; possibly making our ring_buffer_put() the last.
                 */
                mutex_lock(&event->mmap_mutex);
-               rb = event->rb;
-               if (rb) {
-                       rcu_assign_pointer(event->rb, NULL);
-                       ring_buffer_detach(event, rb);
-                       ring_buffer_put(rb); /* could be last */
-               }
+               ring_buffer_attach(event, NULL);
                mutex_unlock(&event->mmap_mutex);
        }
 
@@ -3281,10 +3289,7 @@ int perf_event_release_kernel(struct perf_event *event)
         *     to trigger the AB-BA case.
         */
        mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
-       raw_spin_lock_irq(&ctx->lock);
-       perf_group_detach(event);
-       raw_spin_unlock_irq(&ctx->lock);
-       perf_remove_from_context(event);
+       perf_remove_from_context(event, true);
        mutex_unlock(&ctx->mutex);
 
        free_event(event);
@@ -3839,28 +3844,47 @@ unlock:
 static void ring_buffer_attach(struct perf_event *event,
                               struct ring_buffer *rb)
 {
+       struct ring_buffer *old_rb = NULL;
        unsigned long flags;
 
-       if (!list_empty(&event->rb_entry))
-               return;
+       if (event->rb) {
+               /*
+                * Should be impossible, we set this when removing
+                * event->rb_entry and wait/clear when adding event->rb_entry.
+                */
+               WARN_ON_ONCE(event->rcu_pending);
 
-       spin_lock_irqsave(&rb->event_lock, flags);
-       if (list_empty(&event->rb_entry))
-               list_add(&event->rb_entry, &rb->event_list);
-       spin_unlock_irqrestore(&rb->event_lock, flags);
-}
+               old_rb = event->rb;
+               event->rcu_batches = get_state_synchronize_rcu();
+               event->rcu_pending = 1;
 
-static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
-{
-       unsigned long flags;
+               spin_lock_irqsave(&old_rb->event_lock, flags);
+               list_del_rcu(&event->rb_entry);
+               spin_unlock_irqrestore(&old_rb->event_lock, flags);
+       }
 
-       if (list_empty(&event->rb_entry))
-               return;
+       if (event->rcu_pending && rb) {
+               cond_synchronize_rcu(event->rcu_batches);
+               event->rcu_pending = 0;
+       }
+
+       if (rb) {
+               spin_lock_irqsave(&rb->event_lock, flags);
+               list_add_rcu(&event->rb_entry, &rb->event_list);
+               spin_unlock_irqrestore(&rb->event_lock, flags);
+       }
+
+       rcu_assign_pointer(event->rb, rb);
 
-       spin_lock_irqsave(&rb->event_lock, flags);
-       list_del_init(&event->rb_entry);
-       wake_up_all(&event->waitq);
-       spin_unlock_irqrestore(&rb->event_lock, flags);
+       if (old_rb) {
+               ring_buffer_put(old_rb);
+               /*
+                * Since we detached before setting the new rb, so that we
+                * could attach the new rb, we could have missed a wakeup.
+                * Provide it now.
+                */
+               wake_up_all(&event->waitq);
+       }
 }
 
 static void ring_buffer_wakeup(struct perf_event *event)
@@ -3929,7 +3953,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
 {
        struct perf_event *event = vma->vm_file->private_data;
 
-       struct ring_buffer *rb = event->rb;
+       struct ring_buffer *rb = ring_buffer_get(event);
        struct user_struct *mmap_user = rb->mmap_user;
        int mmap_locked = rb->mmap_locked;
        unsigned long size = perf_data_size(rb);
@@ -3937,18 +3961,14 @@ static void perf_mmap_close(struct vm_area_struct *vma)
        atomic_dec(&rb->mmap_count);
 
        if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
-               return;
+               goto out_put;
 
-       /* Detach current event from the buffer. */
-       rcu_assign_pointer(event->rb, NULL);
-       ring_buffer_detach(event, rb);
+       ring_buffer_attach(event, NULL);
        mutex_unlock(&event->mmap_mutex);
 
        /* If there's still other mmap()s of this buffer, we're done. */
-       if (atomic_read(&rb->mmap_count)) {
-               ring_buffer_put(rb); /* can't be last */
-               return;
-       }
+       if (atomic_read(&rb->mmap_count))
+               goto out_put;
 
        /*
         * No other mmap()s, detach from all other events that might redirect
@@ -3978,11 +3998,9 @@ again:
                 * still restart the iteration to make sure we're not now
                 * iterating the wrong list.
                 */
-               if (event->rb == rb) {
-                       rcu_assign_pointer(event->rb, NULL);
-                       ring_buffer_detach(event, rb);
-                       ring_buffer_put(rb); /* can't be last, we still have one */
-               }
+               if (event->rb == rb)
+                       ring_buffer_attach(event, NULL);
+
                mutex_unlock(&event->mmap_mutex);
                put_event(event);
 
@@ -4007,6 +4025,7 @@ again:
        vma->vm_mm->pinned_vm -= mmap_locked;
        free_uid(mmap_user);
 
+out_put:
        ring_buffer_put(rb); /* could be last */
 }
 
@@ -4124,7 +4143,6 @@ again:
        vma->vm_mm->pinned_vm += extra;
 
        ring_buffer_attach(event, rb);
-       rcu_assign_pointer(event->rb, rb);
 
        perf_event_init_userpage(event);
        perf_event_update_userpage(event);
@@ -5408,6 +5426,9 @@ struct swevent_htable {
 
        /* Recursion avoidance in each contexts */
        int                             recursion[PERF_NR_CONTEXTS];
+
+       /* Keeps track of cpu being initialized/exited */
+       bool                            online;
 };
 
 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
@@ -5654,8 +5675,14 @@ static int perf_swevent_add(struct perf_event *event, int flags)
        hwc->state = !(flags & PERF_EF_START);
 
        head = find_swevent_head(swhash, event);
-       if (WARN_ON_ONCE(!head))
+       if (!head) {
+               /*
+                * We can race with cpu hotplug code. Do not
+                * WARN if the cpu just got unplugged.
+                */
+               WARN_ON_ONCE(swhash->online);
                return -EINVAL;
+       }
 
        hlist_add_head_rcu(&event->hlist_entry, head);
 
@@ -6914,7 +6941,7 @@ err_size:
 static int
 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
 {
-       struct ring_buffer *rb = NULL, *old_rb = NULL;
+       struct ring_buffer *rb = NULL;
        int ret = -EINVAL;
 
        if (!output_event)
@@ -6942,8 +6969,6 @@ set:
        if (atomic_read(&event->mmap_count))
                goto unlock;
 
-       old_rb = event->rb;
-
        if (output_event) {
                /* get the rb we want to redirect to */
                rb = ring_buffer_get(output_event);
@@ -6951,23 +6976,7 @@ set:
                        goto unlock;
        }
 
-       if (old_rb)
-               ring_buffer_detach(event, old_rb);
-
-       if (rb)
-               ring_buffer_attach(event, rb);
-
-       rcu_assign_pointer(event->rb, rb);
-
-       if (old_rb) {
-               ring_buffer_put(old_rb);
-               /*
-                * Since we detached before setting the new rb, so that we
-                * could attach the new rb, we could have missed a wakeup.
-                * Provide it now.
-                */
-               wake_up_all(&event->waitq);
-       }
+       ring_buffer_attach(event, rb);
 
        ret = 0;
 unlock:
@@ -7018,6 +7027,9 @@ SYSCALL_DEFINE5(perf_event_open,
        if (attr.freq) {
                if (attr.sample_freq > sysctl_perf_event_sample_rate)
                        return -EINVAL;
+       } else {
+               if (attr.sample_period & (1ULL << 63))
+                       return -EINVAL;
        }
 
        /*
@@ -7165,7 +7177,7 @@ SYSCALL_DEFINE5(perf_event_open,
                struct perf_event_context *gctx = group_leader->ctx;
 
                mutex_lock(&gctx->mutex);
-               perf_remove_from_context(group_leader);
+               perf_remove_from_context(group_leader, false);
 
                /*
                 * Removing from the context ends up with disabled
@@ -7175,7 +7187,7 @@ SYSCALL_DEFINE5(perf_event_open,
                perf_event__state_init(group_leader);
                list_for_each_entry(sibling, &group_leader->sibling_list,
                                    group_entry) {
-                       perf_remove_from_context(sibling);
+                       perf_remove_from_context(sibling, false);
                        perf_event__state_init(sibling);
                        put_ctx(gctx);
                }
@@ -7305,7 +7317,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
        mutex_lock(&src_ctx->mutex);
        list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
                                 event_entry) {
-               perf_remove_from_context(event);
+               perf_remove_from_context(event, false);
                unaccount_event_cpu(event, src_cpu);
                put_ctx(src_ctx);
                list_add(&event->migrate_entry, &events);
@@ -7367,13 +7379,7 @@ __perf_event_exit_task(struct perf_event *child_event,
                         struct perf_event_context *child_ctx,
                         struct task_struct *child)
 {
-       if (child_event->parent) {
-               raw_spin_lock_irq(&child_ctx->lock);
-               perf_group_detach(child_event);
-               raw_spin_unlock_irq(&child_ctx->lock);
-       }
-
-       perf_remove_from_context(child_event);
+       perf_remove_from_context(child_event, !!child_event->parent);
 
        /*
         * It can happen that the parent exits first, and has events
@@ -7724,6 +7730,8 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
         * swapped under us.
         */
        parent_ctx = perf_pin_task_context(parent, ctxn);
+       if (!parent_ctx)
+               return 0;
 
        /*
         * No need to check if parent_ctx != NULL here; since we saw
@@ -7835,6 +7843,7 @@ static void perf_event_init_cpu(int cpu)
        struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
        mutex_lock(&swhash->hlist_mutex);
+       swhash->online = true;
        if (swhash->hlist_refcount > 0) {
                struct swevent_hlist *hlist;
 
@@ -7857,14 +7866,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
 
 static void __perf_event_exit_context(void *__info)
 {
+       struct remove_event re = { .detach_group = false };
        struct perf_event_context *ctx = __info;
-       struct perf_event *event;
 
        perf_pmu_rotate_stop(ctx->pmu);
 
        rcu_read_lock();
-       list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
-               __perf_remove_from_context(event);
+       list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
+               __perf_remove_from_context(&re);
        rcu_read_unlock();
 }
 
@@ -7892,6 +7901,7 @@ static void perf_event_exit_cpu(int cpu)
        perf_event_exit_cpu_context(cpu);
 
        mutex_lock(&swhash->hlist_mutex);
+       swhash->online = false;
        swevent_hlist_release(swhash);
        mutex_unlock(&swhash->hlist_mutex);
 }
index c8380ad..28c5706 100644 (file)
@@ -1683,6 +1683,14 @@ int kernel_kexec(void)
                kexec_in_progress = true;
                kernel_restart_prepare(NULL);
                migrate_to_reboot_cpu();
+
+               /*
+                * migrate_to_reboot_cpu() disables CPU hotplug assuming that
+                * no further code needs to use CPU hotplug (which is true in
+                * the reboot case). However, the kexec path depends on using
+                * CPU hotplug again; so re-enable it here.
+                */
+               cpu_hotplug_enable();
                printk(KERN_EMERG "Starting new kernel\n");
                machine_shutdown();
        }
index d9d8ece..204d3d2 100644 (file)
@@ -2592,8 +2592,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
        if (likely(prev->sched_class == class &&
                   rq->nr_running == rq->cfs.h_nr_running)) {
                p = fair_sched_class.pick_next_task(rq, prev);
-               if (likely(p && p != RETRY_TASK))
-                       return p;
+               if (unlikely(p == RETRY_TASK))
+                       goto again;
+
+               /* assumes fair_sched_class->next == idle_sched_class */
+               if (unlikely(!p))
+                       p = idle_sched_class.pick_next_task(rq, prev);
+
+               return p;
        }
 
 again:
@@ -3124,6 +3130,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
        dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
        dl_se->dl_throttled = 0;
        dl_se->dl_new = 1;
+       dl_se->dl_yielded = 0;
 }
 
 static void __setscheduler_params(struct task_struct *p,
@@ -3639,6 +3646,7 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
  * sys_sched_setattr - same as above, but with extended sched_attr
  * @pid: the pid in question.
  * @uattr: structure containing the extended parameters.
+ * @flags: for future extension.
  */
 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
                               unsigned int, flags)
@@ -3783,6 +3791,7 @@ err_size:
  * @pid: the pid in question.
  * @uattr: structure containing the extended parameters.
  * @size: sizeof(attr) for fwd/bwd comp.
+ * @flags: for future extension.
  */
 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
                unsigned int, size, unsigned int, flags)
@@ -6017,6 +6026,8 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
                                        ,
                .last_balance           = jiffies,
                .balance_interval       = sd_weight,
+               .max_newidle_lb_cost    = 0,
+               .next_decay_max_lb_cost = jiffies,
        };
        SD_INIT_NAME(sd, NUMA);
        sd->private = &tl->data;
index 5b9bb42..ab001b5 100644 (file)
@@ -210,7 +210,5 @@ int cpudl_init(struct cpudl *cp)
  */
 void cpudl_cleanup(struct cpudl *cp)
 {
-       /*
-        * nothing to do for the moment
-        */
+       free_cpumask_var(cp->free_cpus);
 }
index 8b836b3..3031bac 100644 (file)
@@ -70,8 +70,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
        int idx = 0;
        int task_pri = convert_prio(p->prio);
 
-       if (task_pri >= MAX_RT_PRIO)
-               return 0;
+       BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
 
        for (idx = 0; idx < task_pri; idx++) {
                struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
index a95097c..72fdf06 100644 (file)
@@ -332,50 +332,50 @@ out:
  * softirq as those do not count in task exec_runtime any more.
  */
 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
-                                               struct rq *rq)
+                                        struct rq *rq, int ticks)
 {
-       cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
+       cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
+       u64 cputime = (__force u64) cputime_one_jiffy;
        u64 *cpustat = kcpustat_this_cpu->cpustat;
 
        if (steal_account_process_tick())
                return;
 
+       cputime *= ticks;
+       scaled *= ticks;
+
        if (irqtime_account_hi_update()) {
-               cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
+               cpustat[CPUTIME_IRQ] += cputime;
        } else if (irqtime_account_si_update()) {
-               cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
+               cpustat[CPUTIME_SOFTIRQ] += cputime;
        } else if (this_cpu_ksoftirqd() == p) {
                /*
                 * ksoftirqd time do not get accounted in cpu_softirq_time.
                 * So, we have to handle it separately here.
                 * Also, p->stime needs to be updated for ksoftirqd.
                 */
-               __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
-                                       CPUTIME_SOFTIRQ);
+               __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
        } else if (user_tick) {
-               account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
+               account_user_time(p, cputimescaled);
        } else if (p == rq->idle) {
-               account_idle_time(cputime_one_jiffy);
+               account_idle_time(cputime);
        } else if (p->flags & PF_VCPU) { /* System time or guest time */
-               account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
+               account_guest_time(p, cputimescaled);
        } else {
-               __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
-                                       CPUTIME_SYSTEM);
+               __account_system_time(p, cputime, scaled,       CPUTIME_SYSTEM);
        }
 }
 
 static void irqtime_account_idle_ticks(int ticks)
 {
-       int i;
        struct rq *rq = this_rq();
 
-       for (i = 0; i < ticks; i++)
-               irqtime_account_process_tick(current, 0, rq);
+       irqtime_account_process_tick(current, 0, rq, ticks);
 }
 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
 static inline void irqtime_account_idle_ticks(int ticks) {}
 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
-                                               struct rq *rq) {}
+                                               struct rq *rq, int nr_ticks) {}
 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
 
 /*
@@ -464,7 +464,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
                return;
 
        if (sched_clock_irqtime) {
-               irqtime_account_process_tick(p, user_tick, rq);
+               irqtime_account_process_tick(p, user_tick, rq, 1);
                return;
        }
 
index b080957..800e99b 100644 (file)
@@ -528,6 +528,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
        sched_clock_tick();
        update_rq_clock(rq);
        dl_se->dl_throttled = 0;
+       dl_se->dl_yielded = 0;
        if (p->on_rq) {
                enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
                if (task_has_dl_policy(rq->curr))
@@ -893,10 +894,10 @@ static void yield_task_dl(struct rq *rq)
         * We make the task go to sleep until its current deadline by
         * forcing its runtime to zero. This way, update_curr_dl() stops
         * it and the bandwidth timer will wake it up and will give it
-        * new scheduling parameters (thanks to dl_new=1).
+        * new scheduling parameters (thanks to dl_yielded=1).
         */
        if (p->dl.runtime > 0) {
-               rq->curr->dl.dl_new = 1;
+               rq->curr->dl.dl_yielded = 1;
                p->dl.runtime = 0;
        }
        update_curr_dl(rq);
index 7570dd9..0fdb96d 100644 (file)
@@ -6653,6 +6653,7 @@ static int idle_balance(struct rq *this_rq)
        int this_cpu = this_rq->cpu;
 
        idle_enter_fair(this_rq);
+
        /*
         * We must set idle_stamp _before_ calling idle_balance(), such that we
         * measure the duration of idle_balance() as idle time.
@@ -6705,14 +6706,16 @@ static int idle_balance(struct rq *this_rq)
 
        raw_spin_lock(&this_rq->lock);
 
+       if (curr_cost > this_rq->max_idle_balance_cost)
+               this_rq->max_idle_balance_cost = curr_cost;
+
        /*
-        * While browsing the domains, we released the rq lock.
-        * A task could have be enqueued in the meantime
+        * While browsing the domains, we released the rq lock, a task could
+        * have been enqueued in the meantime. Since we're not going idle,
+        * pretend we pulled a task.
         */
-       if (this_rq->cfs.h_nr_running && !pulled_task) {
+       if (this_rq->cfs.h_nr_running && !pulled_task)
                pulled_task = 1;
-               goto out;
-       }
 
        if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
                /*
@@ -6722,9 +6725,6 @@ static int idle_balance(struct rq *this_rq)
                this_rq->next_balance = next_balance;
        }
 
-       if (curr_cost > this_rq->max_idle_balance_cost)
-               this_rq->max_idle_balance_cost = curr_cost;
-
 out:
        /* Is there a task of a high priority class? */
        if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
index 000a220..088358c 100644 (file)
@@ -257,9 +257,11 @@ static int filemap_check_errors(struct address_space *mapping)
 {
        int ret = 0;
        /* Check for outstanding write errors */
-       if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
+       if (test_bit(AS_ENOSPC, &mapping->flags) &&
+           test_and_clear_bit(AS_ENOSPC, &mapping->flags))
                ret = -ENOSPC;
-       if (test_and_clear_bit(AS_EIO, &mapping->flags))
+       if (test_bit(AS_EIO, &mapping->flags) &&
+           test_and_clear_bit(AS_EIO, &mapping->flags))
                ret = -EIO;
        return ret;
 }
index 539eeb9..a402f8f 100644 (file)
@@ -195,7 +195,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
        for (; start < end; start += PAGE_SIZE) {
                index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 
-               page = find_get_page(mapping, index);
+               page = find_get_entry(mapping, index);
                if (!radix_tree_exceptional_entry(page)) {
                        if (page)
                                page_cache_release(page);
index c47dffd..5177c6d 100644 (file)
@@ -1077,9 +1077,18 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
 
        rcu_read_lock();
        do {
-               memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
-               if (unlikely(!memcg))
+               /*
+                * Page cache insertions can happen withou an
+                * actual mm context, e.g. during disk probing
+                * on boot, loopback IO, acct() writes etc.
+                */
+               if (unlikely(!mm))
                        memcg = root_mem_cgroup;
+               else {
+                       memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+                       if (unlikely(!memcg))
+                               memcg = root_mem_cgroup;
+               }
        } while (!css_tryget(&memcg->css));
        rcu_read_unlock();
        return memcg;
@@ -3958,17 +3967,9 @@ int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
                return 0;
        }
 
-       /*
-        * Page cache insertions can happen without an actual mm
-        * context, e.g. during disk probing on boot.
-        */
-       if (unlikely(!mm))
-               memcg = root_mem_cgroup;
-       else {
-               memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
-               if (!memcg)
-                       return -ENOMEM;
-       }
+       memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
+       if (!memcg)
+               return -ENOMEM;
        __mem_cgroup_commit_charge(memcg, page, 1, type, false);
        return 0;
 }
index 35ef28a..9ccef39 100644 (file)
@@ -1081,15 +1081,16 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
                        return 0;
                } else if (PageHuge(hpage)) {
                        /*
-                        * Check "just unpoisoned", "filter hit", and
-                        * "race with other subpage."
+                        * Check "filter hit" and "race with other subpage."
                         */
                        lock_page(hpage);
-                       if (!PageHWPoison(hpage)
-                           || (hwpoison_filter(p) && TestClearPageHWPoison(p))
-                           || (p != hpage && TestSetPageHWPoison(hpage))) {
-                               atomic_long_sub(nr_pages, &num_poisoned_pages);
-                               return 0;
+                       if (PageHWPoison(hpage)) {
+                               if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
+                                   || (p != hpage && TestSetPageHWPoison(hpage))) {
+                                       atomic_long_sub(nr_pages, &num_poisoned_pages);
+                                       unlock_page(hpage);
+                                       return 0;
+                               }
                        }
                        set_page_hwpoison_huge_page(hpage);
                        res = dequeue_hwpoisoned_huge_page(hpage);
@@ -1152,6 +1153,8 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
         */
        if (!PageHWPoison(p)) {
                printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
+               atomic_long_sub(nr_pages, &num_poisoned_pages);
+               put_page(hpage);
                res = 0;
                goto out;
        }
index 175273f..44ebd5c 100644 (file)
@@ -169,6 +169,7 @@ int register_vlan_dev(struct net_device *dev)
        if (err < 0)
                goto out_uninit_mvrp;
 
+       vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1;
        err = register_netdevice(dev);
        if (err < 0)
                goto out_uninit_mvrp;
index 733ec28..019efb7 100644 (file)
@@ -493,48 +493,10 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
        }
 }
 
-static int vlan_calculate_locking_subclass(struct net_device *real_dev)
-{
-       int subclass = 0;
-
-       while (is_vlan_dev(real_dev)) {
-               subclass++;
-               real_dev = vlan_dev_priv(real_dev)->real_dev;
-       }
-
-       return subclass;
-}
-
-static void vlan_dev_mc_sync(struct net_device *to, struct net_device *from)
-{
-       int err = 0, subclass;
-
-       subclass = vlan_calculate_locking_subclass(to);
-
-       spin_lock_nested(&to->addr_list_lock, subclass);
-       err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
-       if (!err)
-               __dev_set_rx_mode(to);
-       spin_unlock(&to->addr_list_lock);
-}
-
-static void vlan_dev_uc_sync(struct net_device *to, struct net_device *from)
-{
-       int err = 0, subclass;
-
-       subclass = vlan_calculate_locking_subclass(to);
-
-       spin_lock_nested(&to->addr_list_lock, subclass);
-       err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
-       if (!err)
-               __dev_set_rx_mode(to);
-       spin_unlock(&to->addr_list_lock);
-}
-
 static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
 {
-       vlan_dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
-       vlan_dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+       dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+       dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
 }
 
 /*
@@ -562,6 +524,11 @@ static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
        netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
 }
 
+static int vlan_dev_get_lock_subclass(struct net_device *dev)
+{
+       return vlan_dev_priv(dev)->nest_level;
+}
+
 static const struct header_ops vlan_header_ops = {
        .create  = vlan_dev_hard_header,
        .rebuild = vlan_dev_rebuild_header,
@@ -597,7 +564,6 @@ static const struct net_device_ops vlan_netdev_ops;
 static int vlan_dev_init(struct net_device *dev)
 {
        struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
-       int subclass = 0;
 
        netif_carrier_off(dev);
 
@@ -646,8 +612,7 @@ static int vlan_dev_init(struct net_device *dev)
 
        SET_NETDEV_DEVTYPE(dev, &vlan_type);
 
-       subclass = vlan_calculate_locking_subclass(dev);
-       vlan_dev_set_lockdep_class(dev, subclass);
+       vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
 
        vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
        if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
@@ -819,6 +784,7 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_netpoll_cleanup    = vlan_dev_netpoll_cleanup,
 #endif
        .ndo_fix_features       = vlan_dev_fix_features,
+       .ndo_get_lock_subclass  = vlan_dev_get_lock_subclass,
 };
 
 void vlan_setup(struct net_device *dev)
index b3bd4ec..f04224c 100644 (file)
@@ -1545,6 +1545,8 @@ out_neigh:
        if ((orig_neigh_node) && (!is_single_hop_neigh))
                batadv_orig_node_free_ref(orig_neigh_node);
 out:
+       if (router_ifinfo)
+               batadv_neigh_ifinfo_free_ref(router_ifinfo);
        if (router)
                batadv_neigh_node_free_ref(router);
        if (router_router)
index b25fd64..aa5d494 100644 (file)
@@ -940,8 +940,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                 * additional DAT answer may trigger kernel warnings about
                 * a packet coming from the wrong port.
                 */
-               if (batadv_is_my_client(bat_priv, dat_entry->mac_addr,
-                                       BATADV_NO_FLAGS)) {
+               if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, vid)) {
                        ret = true;
                        goto out;
                }
index bcc4bea..f14e54a 100644 (file)
@@ -418,12 +418,13 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
                             struct batadv_neigh_node *neigh_node)
 {
        struct batadv_priv *bat_priv;
-       struct batadv_hard_iface *primary_if;
+       struct batadv_hard_iface *primary_if = NULL;
        struct batadv_frag_packet frag_header;
        struct sk_buff *skb_fragment;
        unsigned mtu = neigh_node->if_incoming->net_dev->mtu;
        unsigned header_size = sizeof(frag_header);
        unsigned max_fragment_size, max_packet_size;
+       bool ret = false;
 
        /* To avoid merge and refragmentation at next-hops we never send
         * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
@@ -483,7 +484,11 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
                           skb->len + ETH_HLEN);
        batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
 
-       return true;
+       ret = true;
+
 out_err:
-       return false;
+       if (primary_if)
+               batadv_hardif_free_ref(primary_if);
+
+       return ret;
 }
index c835e13..90cff58 100644 (file)
 
 static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node)
 {
-       if (atomic_dec_and_test(&gw_node->refcount))
+       if (atomic_dec_and_test(&gw_node->refcount)) {
+               batadv_orig_node_free_ref(gw_node->orig_node);
                kfree_rcu(gw_node, rcu);
+       }
 }
 
 static struct batadv_gw_node *
@@ -406,9 +408,14 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
        if (gateway->bandwidth_down == 0)
                return;
 
+       if (!atomic_inc_not_zero(&orig_node->refcount))
+               return;
+
        gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
-       if (!gw_node)
+       if (!gw_node) {
+               batadv_orig_node_free_ref(orig_node);
                return;
+       }
 
        INIT_HLIST_NODE(&gw_node->list);
        gw_node->orig_node = orig_node;
index b851cc5..fbda6b5 100644 (file)
@@ -83,7 +83,7 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
                return true;
 
        /* no more parents..stop recursion */
-       if (net_dev->iflink == net_dev->ifindex)
+       if (net_dev->iflink == 0 || net_dev->iflink == net_dev->ifindex)
                return false;
 
        /* recurse over the parent device */
index ffd9dfb..6a48451 100644 (file)
@@ -501,12 +501,17 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
 static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
 {
        struct batadv_orig_ifinfo *orig_ifinfo;
+       struct batadv_neigh_node *router;
 
        orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
 
        if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
                batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
 
+       /* this is the last reference to this object */
+       router = rcu_dereference_protected(orig_ifinfo->router, true);
+       if (router)
+               batadv_neigh_node_free_ref_now(router);
        kfree(orig_ifinfo);
 }
 
@@ -701,6 +706,47 @@ free_orig_node:
        return NULL;
 }
 
+/**
+ * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
+ * @bat_priv: the bat priv with all the soft interface information
+ * @neigh: orig node which is to be checked
+ */
+static void
+batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
+                         struct batadv_neigh_node *neigh)
+{
+       struct batadv_neigh_ifinfo *neigh_ifinfo;
+       struct batadv_hard_iface *if_outgoing;
+       struct hlist_node *node_tmp;
+
+       spin_lock_bh(&neigh->ifinfo_lock);
+
+       /* for all ifinfo objects for this neighinator */
+       hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
+                                 &neigh->ifinfo_list, list) {
+               if_outgoing = neigh_ifinfo->if_outgoing;
+
+               /* always keep the default interface */
+               if (if_outgoing == BATADV_IF_DEFAULT)
+                       continue;
+
+               /* don't purge if the interface is not (going) down */
+               if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
+                   (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
+                   (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
+                       continue;
+
+               batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+                          "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
+                          neigh->addr, if_outgoing->net_dev->name);
+
+               hlist_del_rcu(&neigh_ifinfo->list);
+               batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
+       }
+
+       spin_unlock_bh(&neigh->ifinfo_lock);
+}
+
 /**
  * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
  * @bat_priv: the bat priv with all the soft interface information
@@ -800,6 +846,11 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
 
                        hlist_del_rcu(&neigh_node->list);
                        batadv_neigh_node_free_ref(neigh_node);
+               } else {
+                       /* only necessary if not the whole neighbor is to be
+                        * deleted, but some interface has been removed.
+                        */
+                       batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
                }
        }
 
@@ -857,7 +908,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
 {
        struct batadv_neigh_node *best_neigh_node;
        struct batadv_hard_iface *hard_iface;
-       bool changed;
+       bool changed_ifinfo, changed_neigh;
 
        if (batadv_has_timed_out(orig_node->last_seen,
                                 2 * BATADV_PURGE_TIMEOUT)) {
@@ -867,10 +918,10 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
                           jiffies_to_msecs(orig_node->last_seen));
                return true;
        }
-       changed = batadv_purge_orig_ifinfo(bat_priv, orig_node);
-       changed = changed || batadv_purge_orig_neighbors(bat_priv, orig_node);
+       changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
+       changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
 
-       if (!changed)
+       if (!changed_ifinfo && !changed_neigh)
                return false;
 
        /* first for NULL ... */
@@ -1028,7 +1079,8 @@ int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
        bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
 
 out:
-       batadv_hardif_free_ref(hard_iface);
+       if (hard_iface)
+               batadv_hardif_free_ref(hard_iface);
        return 0;
 }
 
index 80e1b0f..2acf7fa 100644 (file)
@@ -859,12 +859,12 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
        return NF_STOLEN;
 }
 
-#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4)
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
 {
        int ret;
 
-       if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
+       if (skb->protocol == htons(ETH_P_IP) &&
            skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
            !skb_is_gso(skb)) {
                if (br_parse_ip_options(skb))
index d2c8a06..9abc503 100644 (file)
@@ -2418,7 +2418,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
  * 2. No high memory really exists on this machine.
  */
 
-static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
+static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
 {
 #ifdef CONFIG_HIGHMEM
        int i;
@@ -2493,38 +2493,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
 }
 
 static netdev_features_t harmonize_features(struct sk_buff *skb,
-                                           const struct net_device *dev,
-                                           netdev_features_t features)
+       netdev_features_t features)
 {
        int tmp;
 
        if (skb->ip_summed != CHECKSUM_NONE &&
            !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
                features &= ~NETIF_F_ALL_CSUM;
-       } else if (illegal_highdma(dev, skb)) {
+       } else if (illegal_highdma(skb->dev, skb)) {
                features &= ~NETIF_F_SG;
        }
 
        return features;
 }
 
-netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
-                                        const struct net_device *dev)
+netdev_features_t netif_skb_features(struct sk_buff *skb)
 {
        __be16 protocol = skb->protocol;
-       netdev_features_t features = dev->features;
+       netdev_features_t features = skb->dev->features;
 
-       if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
+       if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
                features &= ~NETIF_F_GSO_MASK;
 
        if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
                struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
                protocol = veh->h_vlan_encapsulated_proto;
        } else if (!vlan_tx_tag_present(skb)) {
-               return harmonize_features(skb, dev, features);
+               return harmonize_features(skb, features);
        }
 
-       features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
+       features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
                                               NETIF_F_HW_VLAN_STAG_TX);
 
        if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
@@ -2532,9 +2530,9 @@ netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
                                NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
                                NETIF_F_HW_VLAN_STAG_TX;
 
-       return harmonize_features(skb, dev, features);
+       return harmonize_features(skb, features);
 }
-EXPORT_SYMBOL(netif_skb_dev_features);
+EXPORT_SYMBOL(netif_skb_features);
 
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                        struct netdev_queue *txq)
@@ -3953,6 +3951,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
        }
        NAPI_GRO_CB(skb)->count = 1;
        NAPI_GRO_CB(skb)->age = jiffies;
+       NAPI_GRO_CB(skb)->last = skb;
        skb_shinfo(skb)->gso_size = skb_gro_len(skb);
        skb->next = napi->gro_list;
        napi->gro_list = skb;
@@ -4542,6 +4541,32 @@ void *netdev_adjacent_get_private(struct list_head *adj_list)
 }
 EXPORT_SYMBOL(netdev_adjacent_get_private);
 
+/**
+ * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next device from the dev's upper list, starting from iter
+ * position. The caller must hold RCU read lock.
+ */
+struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
+                                                struct list_head **iter)
+{
+       struct netdev_adjacent *upper;
+
+       WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
+
+       upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
+
+       if (&upper->list == &dev->adj_list.upper)
+               return NULL;
+
+       *iter = &upper->list;
+
+       return upper->dev;
+}
+EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
+
 /**
  * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
  * @dev: device
@@ -4623,6 +4648,32 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
 }
 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
 
+/**
+ * netdev_lower_get_next - Get the next device from the lower neighbour
+ *                         list
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next netdev_adjacent from the dev's lower neighbour
+ * list, starting from iter position. The caller must hold RTNL lock or
+ * its own locking that guarantees that the neighbour lower
+ * list will remain unchainged.
+ */
+void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
+{
+       struct netdev_adjacent *lower;
+
+       lower = list_entry((*iter)->next, struct netdev_adjacent, list);
+
+       if (&lower->list == &dev->adj_list.lower)
+               return NULL;
+
+       *iter = &lower->list;
+
+       return lower->dev;
+}
+EXPORT_SYMBOL(netdev_lower_get_next);
+
 /**
  * netdev_lower_get_first_private_rcu - Get the first ->private from the
  *                                    lower neighbour list, RCU
@@ -5073,6 +5124,30 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
 }
 EXPORT_SYMBOL(netdev_lower_dev_get_private);
 
+
+int dev_get_nest_level(struct net_device *dev,
+                      bool (*type_check)(struct net_device *dev))
+{
+       struct net_device *lower = NULL;
+       struct list_head *iter;
+       int max_nest = -1;
+       int nest;
+
+       ASSERT_RTNL();
+
+       netdev_for_each_lower_dev(dev, lower, iter) {
+               nest = dev_get_nest_level(lower, type_check);
+               if (max_nest < nest)
+                       max_nest = nest;
+       }
+
+       if (type_check(dev))
+               max_nest++;
+
+       return max_nest;
+}
+EXPORT_SYMBOL(dev_get_nest_level);
+
 static void dev_change_rx_flags(struct net_device *dev, int flags)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
@@ -5238,7 +5313,6 @@ void __dev_set_rx_mode(struct net_device *dev)
        if (ops->ndo_set_rx_mode)
                ops->ndo_set_rx_mode(dev);
 }
-EXPORT_SYMBOL(__dev_set_rx_mode);
 
 void dev_set_rx_mode(struct net_device *dev)
 {
@@ -5543,7 +5617,7 @@ static int dev_new_index(struct net *net)
 
 /* Delayed registration/unregisteration */
 static LIST_HEAD(net_todo_list);
-static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
+DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
 
 static void net_set_todo(struct net_device *dev)
 {
index 8f8a96e..32d872e 100644 (file)
@@ -1248,8 +1248,8 @@ void __neigh_set_probe_once(struct neighbour *neigh)
        neigh->updated = jiffies;
        if (!(neigh->nud_state & NUD_FAILED))
                return;
-       neigh->nud_state = NUD_PROBE;
-       atomic_set(&neigh->probes, NEIGH_VAR(neigh->parms, UCAST_PROBES));
+       neigh->nud_state = NUD_INCOMPLETE;
+       atomic_set(&neigh->probes, neigh_max_probes(neigh));
        neigh_add_timer(neigh,
                        jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
 }
index 81d3a9a..7c8ffd9 100644 (file)
@@ -24,7 +24,7 @@
 
 static LIST_HEAD(pernet_list);
 static struct list_head *first_device = &pernet_list;
-static DEFINE_MUTEX(net_mutex);
+DEFINE_MUTEX(net_mutex);
 
 LIST_HEAD(net_namespace_list);
 EXPORT_SYMBOL_GPL(net_namespace_list);
index 9837beb..2d8d8fc 100644 (file)
@@ -353,15 +353,46 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
 }
 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
 
+/* Return with the rtnl_lock held when there are no network
+ * devices unregistering in any network namespace.
+ */
+static void rtnl_lock_unregistering_all(void)
+{
+       struct net *net;
+       bool unregistering;
+       DEFINE_WAIT(wait);
+
+       for (;;) {
+               prepare_to_wait(&netdev_unregistering_wq, &wait,
+                               TASK_UNINTERRUPTIBLE);
+               unregistering = false;
+               rtnl_lock();
+               for_each_net(net) {
+                       if (net->dev_unreg_count > 0) {
+                               unregistering = true;
+                               break;
+                       }
+               }
+               if (!unregistering)
+                       break;
+               __rtnl_unlock();
+               schedule();
+       }
+       finish_wait(&netdev_unregistering_wq, &wait);
+}
+
 /**
  * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
  * @ops: struct rtnl_link_ops * to unregister
  */
 void rtnl_link_unregister(struct rtnl_link_ops *ops)
 {
-       rtnl_lock();
+       /* Close the race with cleanup_net() */
+       mutex_lock(&net_mutex);
+       rtnl_lock_unregistering_all();
        __rtnl_link_unregister(ops);
        rtnl_unlock();
+       mutex_unlock(&net_mutex);
 }
 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
 
index 1b62343..8383b2b 100644 (file)
@@ -3076,7 +3076,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
        if (unlikely(p->len + len >= 65536))
                return -E2BIG;
 
-       lp = NAPI_GRO_CB(p)->last ?: p;
+       lp = NAPI_GRO_CB(p)->last;
        pinfo = skb_shinfo(lp);
 
        if (headlen <= offset) {
@@ -3192,7 +3192,7 @@ merge:
 
        __skb_pull(skb, offset);
 
-       if (!NAPI_GRO_CB(p)->last)
+       if (NAPI_GRO_CB(p)->last == p)
                skb_shinfo(p)->frag_list = skb;
        else
                NAPI_GRO_CB(p)->last->next = skb;
index 2f737bf..eed3433 100644 (file)
@@ -348,8 +348,8 @@ static void __net_random_once_deferred(struct work_struct *w)
 {
        struct __net_random_once_work *work =
                container_of(w, struct __net_random_once_work, work);
-       if (!static_key_enabled(work->key))
-               static_key_slow_inc(work->key);
+       BUG_ON(!static_key_enabled(work->key));
+       static_key_slow_dec(work->key);
        kfree(work);
 }
 
@@ -367,7 +367,7 @@ static void __net_random_once_disable_jump(struct static_key *key)
 }
 
 bool __net_get_random_once(void *buf, int nbytes, bool *done,
-                          struct static_key *done_key)
+                          struct static_key *once_key)
 {
        static DEFINE_SPINLOCK(lock);
        unsigned long flags;
@@ -382,7 +382,7 @@ bool __net_get_random_once(void *buf, int nbytes, bool *done,
        *done = true;
        spin_unlock_irqrestore(&lock, flags);
 
-       __net_random_once_disable_jump(done_key);
+       __net_random_once_disable_jump(once_key);
 
        return true;
 }
index 0eb5d5e..5db37ce 100644 (file)
@@ -406,8 +406,9 @@ static int dsa_of_probe(struct platform_device *pdev)
                goto out_free;
        }
 
-       chip_index = 0;
+       chip_index = -1;
        for_each_available_child_of_node(np, child) {
+               chip_index++;
                cd = &pd->chip[chip_index];
 
                cd->mii_bus = &mdio_bus->dev;
index 8c54870..6d6dd34 100644 (file)
@@ -1650,6 +1650,39 @@ static int __init init_ipv4_mibs(void)
        return register_pernet_subsys(&ipv4_mib_ops);
 }
 
+static __net_init int inet_init_net(struct net *net)
+{
+       /*
+        * Set defaults for local port range
+        */
+       seqlock_init(&net->ipv4.ip_local_ports.lock);
+       net->ipv4.ip_local_ports.range[0] =  32768;
+       net->ipv4.ip_local_ports.range[1] =  61000;
+
+       seqlock_init(&net->ipv4.ping_group_range.lock);
+       /*
+        * Sane defaults - nobody may create ping sockets.
+        * Boot scripts should set this to distro-specific group.
+        */
+       net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
+       net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
+       return 0;
+}
+
+static __net_exit void inet_exit_net(struct net *net)
+{
+}
+
+static __net_initdata struct pernet_operations af_inet_ops = {
+       .init = inet_init_net,
+       .exit = inet_exit_net,
+};
+
+static int __init init_inet_pernet_ops(void)
+{
+       return register_pernet_subsys(&af_inet_ops);
+}
+
 static int ipv4_proc_init(void);
 
 /*
@@ -1794,6 +1827,9 @@ static int __init inet_init(void)
        if (ip_mr_init())
                pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
 #endif
+
+       if (init_inet_pernet_ops())
+               pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
        /*
         *      Initialise per-cpu ipv4 mibs
         */
index 8a043f0..b10cd43 100644 (file)
@@ -821,13 +821,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
        fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
        if (fi == NULL)
                goto failure;
+       fib_info_cnt++;
        if (cfg->fc_mx) {
                fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
                if (!fi->fib_metrics)
                        goto failure;
        } else
                fi->fib_metrics = (u32 *) dst_default_metrics;
-       fib_info_cnt++;
 
        fi->fib_net = hold_net(net);
        fi->fib_protocol = cfg->fc_protocol;
index 0d1e2cb..a56b8e6 100644 (file)
@@ -37,11 +37,11 @@ void inet_get_local_port_range(struct net *net, int *low, int *high)
        unsigned int seq;
 
        do {
-               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
 
-               *low = net->ipv4.sysctl_local_ports.range[0];
-               *high = net->ipv4.sysctl_local_ports.range[1];
-       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
+               *low = net->ipv4.ip_local_ports.range[0];
+               *high = net->ipv4.ip_local_ports.range[1];
+       } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
 }
 EXPORT_SYMBOL(inet_get_local_port_range);
 
index be8abe7..6f111e4 100644 (file)
 static bool ip_may_fragment(const struct sk_buff *skb)
 {
        return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
-              !skb->local_df;
+               skb->local_df;
 }
 
 static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
 {
-       if (skb->len <= mtu || skb->local_df)
+       if (skb->len <= mtu)
                return false;
 
        if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
@@ -56,53 +56,6 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
        return true;
 }
 
-static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
-{
-       unsigned int mtu;
-
-       if (skb->local_df || !skb_is_gso(skb))
-               return false;
-
-       mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true);
-
-       /* if seglen > mtu, do software segmentation for IP fragmentation on
-        * output.  DF bit cannot be set since ip_forward would have sent
-        * icmp error.
-        */
-       return skb_gso_network_seglen(skb) > mtu;
-}
-
-/* called if GSO skb needs to be fragmented on forward */
-static int ip_forward_finish_gso(struct sk_buff *skb)
-{
-       struct dst_entry *dst = skb_dst(skb);
-       netdev_features_t features;
-       struct sk_buff *segs;
-       int ret = 0;
-
-       features = netif_skb_dev_features(skb, dst->dev);
-       segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
-       if (IS_ERR(segs)) {
-               kfree_skb(skb);
-               return -ENOMEM;
-       }
-
-       consume_skb(skb);
-
-       do {
-               struct sk_buff *nskb = segs->next;
-               int err;
-
-               segs->next = NULL;
-               err = dst_output(segs);
-
-               if (err && ret == 0)
-                       ret = err;
-               segs = nskb;
-       } while (segs);
-
-       return ret;
-}
 
 static int ip_forward_finish(struct sk_buff *skb)
 {
@@ -114,9 +67,6 @@ static int ip_forward_finish(struct sk_buff *skb)
        if (unlikely(opt->optlen))
                ip_forward_options(skb);
 
-       if (ip_gso_exceeds_dst_mtu(skb))
-               return ip_forward_finish_gso(skb);
-
        return dst_output(skb);
 }
 
index c10a3ce..ed32313 100644 (file)
@@ -232,8 +232,9 @@ static void ip_expire(unsigned long arg)
                 * "Fragment Reassembly Timeout" message, per RFC792.
                 */
                if (qp->user == IP_DEFRAG_AF_PACKET ||
-                   (qp->user == IP_DEFRAG_CONNTRACK_IN &&
-                    skb_rtable(head)->rt_type != RTN_LOCAL))
+                   ((qp->user >= IP_DEFRAG_CONNTRACK_IN) &&
+                    (qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) &&
+                    (skb_rtable(head)->rt_type != RTN_LOCAL)))
                        goto out_rcu_unlock;
 
 
index 1cbeba5..a52f501 100644 (file)
@@ -211,6 +211,48 @@ static inline int ip_finish_output2(struct sk_buff *skb)
        return -EINVAL;
 }
 
+static int ip_finish_output_gso(struct sk_buff *skb)
+{
+       netdev_features_t features;
+       struct sk_buff *segs;
+       int ret = 0;
+
+       /* common case: locally created skb or seglen is <= mtu */
+       if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
+             skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb))
+               return ip_finish_output2(skb);
+
+       /* Slowpath -  GSO segment length is exceeding the dst MTU.
+        *
+        * This can happen in two cases:
+        * 1) TCP GRO packet, DF bit not set
+        * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly
+        * from host network stack.
+        */
+       features = netif_skb_features(skb);
+       segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+       if (IS_ERR(segs)) {
+               kfree_skb(skb);
+               return -ENOMEM;
+       }
+
+       consume_skb(skb);
+
+       do {
+               struct sk_buff *nskb = segs->next;
+               int err;
+
+               segs->next = NULL;
+               err = ip_fragment(segs, ip_finish_output2);
+
+               if (err && ret == 0)
+                       ret = err;
+               segs = nskb;
+       } while (segs);
+
+       return ret;
+}
+
 static int ip_finish_output(struct sk_buff *skb)
 {
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
@@ -220,10 +262,13 @@ static int ip_finish_output(struct sk_buff *skb)
                return dst_output(skb);
        }
 #endif
-       if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
+       if (skb_is_gso(skb))
+               return ip_finish_output_gso(skb);
+
+       if (skb->len > ip_skb_dst_mtu(skb))
                return ip_fragment(skb, ip_finish_output2);
-       else
-               return ip_finish_output2(skb);
+
+       return ip_finish_output2(skb);
 }
 
 int ip_mc_output(struct sock *sk, struct sk_buff *skb)
index b3f8597..2acc233 100644 (file)
@@ -540,9 +540,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        unsigned int max_headroom;      /* The extra header space needed */
        __be32 dst;
        int err;
-       bool connected = true;
+       bool connected;
 
        inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
+       connected = (tunnel->parms.iph.daddr != 0);
 
        dst = tnl_params->daddr;
        if (dst == 0) {
@@ -882,6 +883,7 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
         */
        if (!IS_ERR(itn->fb_tunnel_dev)) {
                itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
+               itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
                ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
        }
        rtnl_unlock();
index afcee51..13ef00f 100644 (file)
@@ -239,6 +239,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 static int vti4_err(struct sk_buff *skb, u32 info)
 {
        __be32 spi;
+       __u32 mark;
        struct xfrm_state *x;
        struct ip_tunnel *tunnel;
        struct ip_esp_hdr *esph;
@@ -254,6 +255,8 @@ static int vti4_err(struct sk_buff *skb, u32 info)
        if (!tunnel)
                return -1;
 
+       mark = be32_to_cpu(tunnel->parms.o_key);
+
        switch (protocol) {
        case IPPROTO_ESP:
                esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
@@ -281,7 +284,7 @@ static int vti4_err(struct sk_buff *skb, u32 info)
                return 0;
        }
 
-       x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+       x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
                              spi, protocol, AF_INET);
        if (!x)
                return 0;
index 12e13bd..f40f321 100644 (file)
@@ -22,7 +22,6 @@
 #endif
 #include <net/netfilter/nf_conntrack_zones.h>
 
-/* Returns new sk_buff, or NULL */
 static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
 {
        int err;
@@ -33,8 +32,10 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
        err = ip_defrag(skb, user);
        local_bh_enable();
 
-       if (!err)
+       if (!err) {
                ip_send_check(ip_hdr(skb));
+               skb->local_df = 1;
+       }
 
        return err;
 }
index 8210964..044a0dd 100644 (file)
@@ -236,15 +236,15 @@ exit:
 static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
                                          kgid_t *high)
 {
-       kgid_t *data = net->ipv4.sysctl_ping_group_range;
+       kgid_t *data = net->ipv4.ping_group_range.range;
        unsigned int seq;
 
        do {
-               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
 
                *low = data[0];
                *high = data[1];
-       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
+       } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
 }
 
 
index db1e0da..5e676be 100644 (file)
@@ -1519,7 +1519,7 @@ static int __mkroute_input(struct sk_buff *skb,
        struct in_device *out_dev;
        unsigned int flags = 0;
        bool do_cache;
-       u32 itag;
+       u32 itag = 0;
 
        /* get a working reference to the output device */
        out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
index 44eba05..5cde8f2 100644 (file)
@@ -45,10 +45,10 @@ static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
 /* Update system visible IP port range */
 static void set_local_port_range(struct net *net, int range[2])
 {
-       write_seqlock(&net->ipv4.sysctl_local_ports.lock);
-       net->ipv4.sysctl_local_ports.range[0] = range[0];
-       net->ipv4.sysctl_local_ports.range[1] = range[1];
-       write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
+       write_seqlock(&net->ipv4.ip_local_ports.lock);
+       net->ipv4.ip_local_ports.range[0] = range[0];
+       net->ipv4.ip_local_ports.range[1] = range[1];
+       write_sequnlock(&net->ipv4.ip_local_ports.lock);
 }
 
 /* Validate changes from /proc interface. */
@@ -57,7 +57,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
                                 size_t *lenp, loff_t *ppos)
 {
        struct net *net =
-               container_of(table->data, struct net, ipv4.sysctl_local_ports.range);
+               container_of(table->data, struct net, ipv4.ip_local_ports.range);
        int ret;
        int range[2];
        struct ctl_table tmp = {
@@ -87,14 +87,14 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
 {
        kgid_t *data = table->data;
        struct net *net =
-               container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
+               container_of(table->data, struct net, ipv4.ping_group_range.range);
        unsigned int seq;
        do {
-               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
 
                *low = data[0];
                *high = data[1];
-       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
+       } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
 }
 
 /* Update system visible IP port range */
@@ -102,11 +102,11 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
 {
        kgid_t *data = table->data;
        struct net *net =
-               container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
-       write_seqlock(&net->ipv4.sysctl_local_ports.lock);
+               container_of(table->data, struct net, ipv4.ping_group_range.range);
+       write_seqlock(&net->ipv4.ip_local_ports.lock);
        data[0] = low;
        data[1] = high;
-       write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
+       write_sequnlock(&net->ipv4.ip_local_ports.lock);
 }
 
 /* Validate changes from /proc interface. */
@@ -805,7 +805,7 @@ static struct ctl_table ipv4_net_table[] = {
        },
        {
                .procname       = "ping_group_range",
-               .data           = &init_net.ipv4.sysctl_ping_group_range,
+               .data           = &init_net.ipv4.ping_group_range.range,
                .maxlen         = sizeof(gid_t)*2,
                .mode           = 0644,
                .proc_handler   = ipv4_ping_group_range,
@@ -819,8 +819,8 @@ static struct ctl_table ipv4_net_table[] = {
        },
        {
                .procname       = "ip_local_port_range",
-               .maxlen         = sizeof(init_net.ipv4.sysctl_local_ports.range),
-               .data           = &init_net.ipv4.sysctl_local_ports.range,
+               .maxlen         = sizeof(init_net.ipv4.ip_local_ports.range),
+               .data           = &init_net.ipv4.ip_local_ports.range,
                .mode           = 0644,
                .proc_handler   = ipv4_local_port_range,
        },
@@ -858,20 +858,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
                        table[i].data += (void *)net - (void *)&init_net;
        }
 
-       /*
-        * Sane defaults - nobody may create ping sockets.
-        * Boot scripts should set this to distro-specific group.
-        */
-       net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1);
-       net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0);
-
-       /*
-        * Set defaults for local port range
-        */
-       seqlock_init(&net->ipv4.sysctl_local_ports.lock);
-       net->ipv4.sysctl_local_ports.range[0] =  32768;
-       net->ipv4.sysctl_local_ports.range[1] =  61000;
-
        net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
        if (net->ipv4.ipv4_hdr == NULL)
                goto err_reg;
index 40e701f..186a8ec 100644 (file)
@@ -62,10 +62,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
        if (err)
                return err;
 
-       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
-       IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED;
-
-       skb->protocol = htons(ETH_P_IP);
+       IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
 
        return x->outer_mode->output2(x, skb);
 }
@@ -73,27 +70,34 @@ EXPORT_SYMBOL(xfrm4_prepare_output);
 
 int xfrm4_output_finish(struct sk_buff *skb)
 {
+       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+       skb->protocol = htons(ETH_P_IP);
+
+#ifdef CONFIG_NETFILTER
+       IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
+#endif
+
+       return xfrm_output(skb);
+}
+
+static int __xfrm4_output(struct sk_buff *skb)
+{
+       struct xfrm_state *x = skb_dst(skb)->xfrm;
+
 #ifdef CONFIG_NETFILTER
-       if (!skb_dst(skb)->xfrm) {
+       if (!x) {
                IPCB(skb)->flags |= IPSKB_REROUTED;
                return dst_output(skb);
        }
-
-       IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
 #endif
 
-       skb->protocol = htons(ETH_P_IP);
-       return xfrm_output(skb);
+       return x->outer_mode->afinfo->output_finish(skb);
 }
 
 int xfrm4_output(struct sock *sk, struct sk_buff *skb)
 {
-       struct dst_entry *dst = skb_dst(skb);
-       struct xfrm_state *x = dst->xfrm;
-
        return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
-                           NULL, dst->dev,
-                           x->outer_mode->afinfo->output_finish,
+                           NULL, skb_dst(skb)->dev, __xfrm4_output,
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
 
index 7f7b243..a2ce010 100644 (file)
@@ -50,8 +50,12 @@ int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
 {
        int ret;
        struct xfrm4_protocol *handler;
+       struct xfrm4_protocol __rcu **head = proto_handlers(protocol);
 
-       for_each_protocol_rcu(*proto_handlers(protocol), handler)
+       if (!head)
+               return 0;
+
+       for_each_protocol_rcu(*head, handler)
                if ((ret = handler->cb_handler(skb, err)) <= 0)
                        return ret;
 
@@ -64,15 +68,20 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
 {
        int ret;
        struct xfrm4_protocol *handler;
+       struct xfrm4_protocol __rcu **head = proto_handlers(nexthdr);
 
        XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
        XFRM_SPI_SKB_CB(skb)->family = AF_INET;
        XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
 
-       for_each_protocol_rcu(*proto_handlers(nexthdr), handler)
+       if (!head)
+               goto out;
+
+       for_each_protocol_rcu(*head, handler)
                if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL)
                        return ret;
 
+out:
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 
        kfree_skb(skb);
@@ -208,6 +217,9 @@ int xfrm4_protocol_register(struct xfrm4_protocol *handler,
        int ret = -EEXIST;
        int priority = handler->priority;
 
+       if (!proto_handlers(protocol) || !netproto(protocol))
+               return -EINVAL;
+
        mutex_lock(&xfrm4_protocol_mutex);
 
        if (!rcu_dereference_protected(*proto_handlers(protocol),
@@ -250,6 +262,9 @@ int xfrm4_protocol_deregister(struct xfrm4_protocol *handler,
        struct xfrm4_protocol *t;
        int ret = -ENOENT;
 
+       if (!proto_handlers(protocol) || !netproto(protocol))
+               return -EINVAL;
+
        mutex_lock(&xfrm4_protocol_mutex);
 
        for (pprev = proto_handlers(protocol);
index 59f95af..b2f0915 100644 (file)
@@ -196,7 +196,6 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
        unsigned int off;
        u16 flush = 1;
        int proto;
-       __wsum csum;
 
        off = skb_gro_offset(skb);
        hlen = off + sizeof(*iph);
@@ -264,13 +263,10 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
 
        NAPI_GRO_CB(skb)->flush |= flush;
 
-       csum = skb->csum;
-       skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
+       skb_gro_postpull_rcsum(skb, iph, nlen);
 
        pp = ops->callbacks.gro_receive(head, skb);
 
-       skb->csum = csum;
-
 out_unlock:
        rcu_read_unlock();
 
index 40e7581..fbf1156 100644 (file)
@@ -344,12 +344,16 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
 
 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 {
-       if (skb->len <= mtu || skb->local_df)
+       if (skb->len <= mtu)
                return false;
 
+       /* ipv6 conntrack defrag sets max_frag_size + local_df */
        if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
                return true;
 
+       if (skb->local_df)
+               return false;
+
        if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
                return false;
 
@@ -1225,7 +1229,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                unsigned int maxnonfragsize, headersize;
 
                headersize = sizeof(struct ipv6hdr) +
-                            (opt ? opt->tot_len : 0) +
+                            (opt ? opt->opt_flen + opt->opt_nflen : 0) +
                             (dst_allfrag(&rt->dst) ?
                              sizeof(struct frag_hdr) : 0) +
                             rt->rt6i_nfheader_len;
index b05b609..f6a66bb 100644 (file)
@@ -1557,7 +1557,7 @@ static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
 {
        u8 proto;
 
-       if (!data)
+       if (!data || !data[IFLA_IPTUN_PROTO])
                return 0;
 
        proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
index b7c0f82..6cc9f93 100644 (file)
@@ -511,6 +511,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                    u8 type, u8 code, int offset, __be32 info)
 {
        __be32 spi;
+       __u32 mark;
        struct xfrm_state *x;
        struct ip6_tnl *t;
        struct ip_esp_hdr *esph;
@@ -524,6 +525,8 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        if (!t)
                return -1;
 
+       mark = be32_to_cpu(t->parms.o_key);
+
        switch (protocol) {
        case IPPROTO_ESP:
                esph = (struct ip_esp_hdr *)(skb->data + offset);
@@ -545,7 +548,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
            type != NDISC_REDIRECT)
                return 0;
 
-       x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+       x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
                              spi, protocol, AF_INET6);
        if (!x)
                return 0;
@@ -1097,7 +1100,6 @@ static int __init vti6_tunnel_init(void)
 
        err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP);
        if (err < 0) {
-               unregister_pernet_device(&vti6_net_ops);
                pr_err("%s: can't register vti6 protocol\n", __func__);
 
                goto out;
@@ -1106,7 +1108,6 @@ static int __init vti6_tunnel_init(void)
        err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH);
        if (err < 0) {
                xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
-               unregister_pernet_device(&vti6_net_ops);
                pr_err("%s: can't register vti6 protocol\n", __func__);
 
                goto out;
@@ -1116,7 +1117,6 @@ static int __init vti6_tunnel_init(void)
        if (err < 0) {
                xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
                xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
-               unregister_pernet_device(&vti6_net_ops);
                pr_err("%s: can't register vti6 protocol\n", __func__);
 
                goto out;
index 09a22f4..ca8d4ea 100644 (file)
@@ -851,7 +851,7 @@ out:
 static void ndisc_recv_na(struct sk_buff *skb)
 {
        struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
-       const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
+       struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
        const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
        u8 *lladdr = NULL;
        u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) +
@@ -944,10 +944,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
                        /*
                         * Change: router to host
                         */
-                       struct rt6_info *rt;
-                       rt = rt6_get_dflt_router(saddr, dev);
-                       if (rt)
-                               ip6_del_rt(rt);
+                       rt6_clean_tohost(dev_net(dev),  saddr);
                }
 
 out:
index 95f3f1d..d38e6a8 100644 (file)
@@ -30,13 +30,15 @@ int ip6_route_me_harder(struct sk_buff *skb)
                .daddr = iph->daddr,
                .saddr = iph->saddr,
        };
+       int err;
 
        dst = ip6_route_output(net, skb->sk, &fl6);
-       if (dst->error) {
+       err = dst->error;
+       if (err) {
                IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
                LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
                dst_release(dst);
-               return dst->error;
+               return err;
        }
 
        /* Drop old route. */
index 004fffb..6ebdb7b 100644 (file)
@@ -2234,6 +2234,27 @@ void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
        fib6_clean_all(net, fib6_remove_prefsrc, &adni);
 }
 
+#define RTF_RA_ROUTER          (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
+#define RTF_CACHE_GATEWAY      (RTF_GATEWAY | RTF_CACHE)
+
+/* Remove routers and update dst entries when gateway turn into host. */
+static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
+{
+       struct in6_addr *gateway = (struct in6_addr *)arg;
+
+       if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
+            ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
+            ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
+               return -1;
+       }
+       return 0;
+}
+
+void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
+{
+       fib6_clean_all(net, fib6_clean_tohost, gateway);
+}
+
 struct arg_dev_net {
        struct net_device *dev;
        struct net *net;
@@ -2709,6 +2730,9 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh)
        if (tb[RTA_OIF])
                oif = nla_get_u32(tb[RTA_OIF]);
 
+       if (tb[RTA_MARK])
+               fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
+
        if (iif) {
                struct net_device *dev;
                int flags = 0;
index 0d78132..8517d3c 100644 (file)
@@ -42,7 +42,7 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
        if (NAPI_GRO_CB(skb)->flush)
                goto skip_csum;
 
-       wsum = skb->csum;
+       wsum = NAPI_GRO_CB(skb)->csum;
 
        switch (skb->ip_summed) {
        case CHECKSUM_NONE:
index 19ef329..b930d08 100644 (file)
@@ -114,12 +114,6 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
        if (err)
                return err;
 
-       memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
-#ifdef CONFIG_NETFILTER
-       IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
-#endif
-
-       skb->protocol = htons(ETH_P_IPV6);
        skb->local_df = 1;
 
        return x->outer_mode->output2(x, skb);
@@ -128,11 +122,13 @@ EXPORT_SYMBOL(xfrm6_prepare_output);
 
 int xfrm6_output_finish(struct sk_buff *skb)
 {
+       memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+       skb->protocol = htons(ETH_P_IPV6);
+
 #ifdef CONFIG_NETFILTER
        IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
 #endif
 
-       skb->protocol = htons(ETH_P_IPV6);
        return xfrm_output(skb);
 }
 
@@ -142,6 +138,13 @@ static int __xfrm6_output(struct sk_buff *skb)
        struct xfrm_state *x = dst->xfrm;
        int mtu;
 
+#ifdef CONFIG_NETFILTER
+       if (!x) {
+               IP6CB(skb)->flags |= IP6SKB_REROUTED;
+               return dst_output(skb);
+       }
+#endif
+
        if (skb->protocol == htons(ETH_P_IPV6))
                mtu = ip6_skb_dst_mtu(skb);
        else
@@ -165,6 +168,7 @@ static int __xfrm6_output(struct sk_buff *skb)
 
 int xfrm6_output(struct sock *sk, struct sk_buff *skb)
 {
-       return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL,
-                      skb_dst(skb)->dev, __xfrm6_output);
+       return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb,
+                           NULL, skb_dst(skb)->dev, __xfrm6_output,
+                           !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
index 6ab989c..54d13f8 100644 (file)
@@ -50,6 +50,10 @@ int xfrm6_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
 {
        int ret;
        struct xfrm6_protocol *handler;
+       struct xfrm6_protocol __rcu **head = proto_handlers(protocol);
+
+       if (!head)
+               return 0;
 
        for_each_protocol_rcu(*proto_handlers(protocol), handler)
                if ((ret = handler->cb_handler(skb, err)) <= 0)
@@ -184,10 +188,12 @@ int xfrm6_protocol_register(struct xfrm6_protocol *handler,
        struct xfrm6_protocol __rcu **pprev;
        struct xfrm6_protocol *t;
        bool add_netproto = false;
-
        int ret = -EEXIST;
        int priority = handler->priority;
 
+       if (!proto_handlers(protocol) || !netproto(protocol))
+               return -EINVAL;
+
        mutex_lock(&xfrm6_protocol_mutex);
 
        if (!rcu_dereference_protected(*proto_handlers(protocol),
@@ -230,6 +236,9 @@ int xfrm6_protocol_deregister(struct xfrm6_protocol *handler,
        struct xfrm6_protocol *t;
        int ret = -ENOENT;
 
+       if (!proto_handlers(protocol) || !netproto(protocol))
+               return -EINVAL;
+
        mutex_lock(&xfrm6_protocol_mutex);
 
        for (pprev = proto_handlers(protocol);
index 01e77b0..8c9d730 100644 (file)
@@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
                spin_lock_irqsave(&list->lock, flags);
 
                while (list_skb != (struct sk_buff *)list) {
-                       if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
+                       if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
                                this = list_skb;
                                break;
                        }
index 222c28b..f169b6e 100644 (file)
@@ -317,6 +317,7 @@ struct ieee80211_roc_work {
 
        bool started, abort, hw_begun, notified;
        bool to_be_freed;
+       bool on_channel;
 
        unsigned long hw_start_time;
 
index dee50ae..27600a9 100644 (file)
@@ -3598,18 +3598,24 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
 
        sdata_lock(sdata);
 
-       if (ifmgd->auth_data) {
+       if (ifmgd->auth_data || ifmgd->assoc_data) {
+               const u8 *bssid = ifmgd->auth_data ?
+                               ifmgd->auth_data->bss->bssid :
+                               ifmgd->assoc_data->bss->bssid;
+
                /*
-                * If we are trying to authenticate while suspending, cfg80211
-                * won't know and won't actually abort those attempts, thus we
-                * need to do that ourselves.
+                * If we are trying to authenticate / associate while suspending,
+                * cfg80211 won't know and won't actually abort those attempts,
+                * thus we need to do that ourselves.
                 */
-               ieee80211_send_deauth_disassoc(sdata,
-                                              ifmgd->auth_data->bss->bssid,
+               ieee80211_send_deauth_disassoc(sdata, bssid,
                                               IEEE80211_STYPE_DEAUTH,
                                               WLAN_REASON_DEAUTH_LEAVING,
                                               false, frame_buf);
-               ieee80211_destroy_auth_data(sdata, false);
+               if (ifmgd->assoc_data)
+                       ieee80211_destroy_assoc_data(sdata, false);
+               if (ifmgd->auth_data)
+                       ieee80211_destroy_auth_data(sdata, false);
                cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
                                      IEEE80211_DEAUTH_FRAME_LEN);
        }
index 6fb3855..7a17dec 100644 (file)
@@ -333,7 +333,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
                container_of(work, struct ieee80211_roc_work, work.work);
        struct ieee80211_sub_if_data *sdata = roc->sdata;
        struct ieee80211_local *local = sdata->local;
-       bool started;
+       bool started, on_channel;
 
        mutex_lock(&local->mtx);
 
@@ -354,14 +354,26 @@ void ieee80211_sw_roc_work(struct work_struct *work)
        if (!roc->started) {
                struct ieee80211_roc_work *dep;
 
-               /* start this ROC */
-               ieee80211_offchannel_stop_vifs(local);
+               WARN_ON(local->use_chanctx);
+
+               /* If actually operating on the desired channel (with at least
+                * 20 MHz channel width) don't stop all the operations but still
+                * treat it as though the ROC operation started properly, so
+                * other ROC operations won't interfere with this one.
+                */
+               roc->on_channel = roc->chan == local->_oper_chandef.chan &&
+                                 local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
+                                 local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
 
-               /* switch channel etc */
+               /* start this ROC */
                ieee80211_recalc_idle(local);
 
-               local->tmp_channel = roc->chan;
-               ieee80211_hw_config(local, 0);
+               if (!roc->on_channel) {
+                       ieee80211_offchannel_stop_vifs(local);
+
+                       local->tmp_channel = roc->chan;
+                       ieee80211_hw_config(local, 0);
+               }
 
                /* tell userspace or send frame */
                ieee80211_handle_roc_started(roc);
@@ -380,9 +392,10 @@ void ieee80211_sw_roc_work(struct work_struct *work)
  finish:
                list_del(&roc->list);
                started = roc->started;
+               on_channel = roc->on_channel;
                ieee80211_roc_notify_destroy(roc, !roc->abort);
 
-               if (started) {
+               if (started && !on_channel) {
                        ieee80211_flush_queues(local, NULL);
 
                        local->tmp_channel = NULL;
index 216c45b..2b608b2 100644 (file)
@@ -1231,7 +1231,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
                if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
                    test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
                        sta->last_rx = jiffies;
-                       if (ieee80211_is_data(hdr->frame_control)) {
+                       if (ieee80211_is_data(hdr->frame_control) &&
+                           !is_multicast_ether_addr(hdr->addr1)) {
                                sta->last_rx_rate_idx = status->rate_idx;
                                sta->last_rx_rate_flag = status->flag;
                                sta->last_rx_rate_vht_flag = status->vht_flag;
index 137a192..847d92f 100644 (file)
@@ -1148,7 +1148,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
        atomic_dec(&ps->num_sta_ps);
 
        /* This station just woke up and isn't aware of our SMPS state */
-       if (!ieee80211_smps_is_restrictive(sta->known_smps_mode,
+       if (!ieee80211_vif_is_mesh(&sdata->vif) &&
+           !ieee80211_smps_is_restrictive(sta->known_smps_mode,
                                           sdata->smps_mode) &&
            sta->known_smps_mode != sdata->bss->req_smps &&
            sta_info_tx_streams(sta) != 1) {
index 00ba90b..60cb7a6 100644 (file)
@@ -314,10 +314,9 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
            !is_multicast_ether_addr(hdr->addr1))
                txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
 
-       if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
-           (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
+       if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
                txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
-       else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+       if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
                txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
 
        put_unaligned_le16(txflags, pos);
index a0b0aea..cec5b60 100644 (file)
 
 #define VIF_ENTRY      __field(enum nl80211_iftype, vif_type) __field(void *, sdata)   \
                        __field(bool, p2p)                                              \
-                       __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
+                       __string(vif_name, sdata->name)
 #define VIF_ASSIGN     __entry->vif_type = sdata->vif.type; __entry->sdata = sdata;    \
                        __entry->p2p = sdata->vif.p2p;                                  \
-                       __assign_str(vif_name, sdata->dev ? sdata->dev->name : sdata->name)
+                       __assign_str(vif_name, sdata->name)
 #define VIF_PR_FMT     " vif:%s(%d%s)"
 #define VIF_PR_ARG     __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
 
index 275c94f..3c36583 100644 (file)
@@ -1780,7 +1780,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        mutex_unlock(&local->mtx);
 
        if (sched_scan_stopped)
-               cfg80211_sched_scan_stopped(local->hw.wiphy);
+               cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy);
 
        /*
         * If this is for hw restart things are still running.
index e9e36a2..9265adf 100644 (file)
@@ -129,9 +129,12 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
        if (!vht_cap_ie || !sband->vht_cap.vht_supported)
                return;
 
-       /* A VHT STA must support 40 MHz */
-       if (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
-               return;
+       /*
+        * A VHT STA must support 40 MHz, but if we verify that here
+        * then we break a few things - some APs (e.g. Netgear R6300v2
+        * and others based on the BCM4360 chipset) will unset this
+        * capability bit when operating in 20 MHz.
+        */
 
        vht_cap->vht_supported = true;
 
index ccc46fa..5857963 100644 (file)
@@ -1336,6 +1336,9 @@ ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
 #ifdef CONFIG_NF_NAT_NEEDED
        int ret;
 
+       if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
+               return 0;
+
        ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
                                        cda[CTA_NAT_DST]);
        if (ret < 0)
index 8041053..345acfb 100644 (file)
@@ -66,20 +66,6 @@ struct nft_jumpstack {
        int                     rulenum;
 };
 
-static inline void
-nft_chain_stats(const struct nft_chain *this, const struct nft_pktinfo *pkt,
-               struct nft_jumpstack *jumpstack, unsigned int stackptr)
-{
-       struct nft_stats __percpu *stats;
-       const struct nft_chain *chain = stackptr ? jumpstack[0].chain : this;
-
-       rcu_read_lock_bh();
-       stats = rcu_dereference(nft_base_chain(chain)->stats);
-       __this_cpu_inc(stats->pkts);
-       __this_cpu_add(stats->bytes, pkt->skb->len);
-       rcu_read_unlock_bh();
-}
-
 enum nft_trace {
        NFT_TRACE_RULE,
        NFT_TRACE_RETURN,
@@ -117,13 +103,14 @@ static void nft_trace_packet(const struct nft_pktinfo *pkt,
 unsigned int
 nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
 {
-       const struct nft_chain *chain = ops->priv;
+       const struct nft_chain *chain = ops->priv, *basechain = chain;
        const struct nft_rule *rule;
        const struct nft_expr *expr, *last;
        struct nft_data data[NFT_REG_MAX + 1];
        unsigned int stackptr = 0;
        struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
-       int rulenum = 0;
+       struct nft_stats __percpu *stats;
+       int rulenum;
        /*
         * Cache cursor to avoid problems in case that the cursor is updated
         * while traversing the ruleset.
@@ -131,6 +118,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
        unsigned int gencursor = ACCESS_ONCE(chain->net->nft.gencursor);
 
 do_chain:
+       rulenum = 0;
        rule = list_entry(&chain->rules, struct nft_rule, list);
 next_rule:
        data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
@@ -156,8 +144,10 @@ next_rule:
                switch (data[NFT_REG_VERDICT].verdict) {
                case NFT_BREAK:
                        data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
-                       /* fall through */
+                       continue;
                case NFT_CONTINUE:
+                       if (unlikely(pkt->skb->nf_trace))
+                               nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
                        continue;
                }
                break;
@@ -183,37 +173,44 @@ next_rule:
                jumpstack[stackptr].rule  = rule;
                jumpstack[stackptr].rulenum = rulenum;
                stackptr++;
-               /* fall through */
+               chain = data[NFT_REG_VERDICT].chain;
+               goto do_chain;
        case NFT_GOTO:
+               if (unlikely(pkt->skb->nf_trace))
+                       nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
+
                chain = data[NFT_REG_VERDICT].chain;
                goto do_chain;
        case NFT_RETURN:
                if (unlikely(pkt->skb->nf_trace))
                        nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN);
-
-               /* fall through */
+               break;
        case NFT_CONTINUE:
+               if (unlikely(pkt->skb->nf_trace && !(chain->flags & NFT_BASE_CHAIN)))
+                       nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
                break;
        default:
                WARN_ON(1);
        }
 
        if (stackptr > 0) {
-               if (unlikely(pkt->skb->nf_trace))
-                       nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
-
                stackptr--;
                chain = jumpstack[stackptr].chain;
                rule  = jumpstack[stackptr].rule;
                rulenum = jumpstack[stackptr].rulenum;
                goto next_rule;
        }
-       nft_chain_stats(chain, pkt, jumpstack, stackptr);
 
        if (unlikely(pkt->skb->nf_trace))
-               nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_POLICY);
+               nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
+
+       rcu_read_lock_bh();
+       stats = rcu_dereference(nft_base_chain(basechain)->stats);
+       __this_cpu_inc(stats->pkts);
+       __this_cpu_add(stats->bytes, pkt->skb->len);
+       rcu_read_unlock_bh();
 
-       return nft_base_chain(chain)->policy;
+       return nft_base_chain(basechain)->policy;
 }
 EXPORT_SYMBOL_GPL(nft_do_chain);
 
index e009087..23ef77c 100644 (file)
@@ -256,15 +256,15 @@ replay:
 #endif
                {
                        nfnl_unlock(subsys_id);
-                       kfree_skb(nskb);
-                       return netlink_ack(skb, nlh, -EOPNOTSUPP);
+                       netlink_ack(skb, nlh, -EOPNOTSUPP);
+                       return kfree_skb(nskb);
                }
        }
 
        if (!ss->commit || !ss->abort) {
                nfnl_unlock(subsys_id);
-               kfree_skb(nskb);
-               return netlink_ack(skb, nlh, -EOPNOTSUPP);
+               netlink_ack(skb, nlh, -EOPNOTSUPP);
+               return kfree_skb(skb);
        }
 
        while (skb->len >= nlmsg_total_size(0)) {
index 7633a75..0ad0807 100644 (file)
@@ -99,7 +99,7 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
        _debug("tktlen: %x", tktlen);
        if (tktlen > AFSTOKEN_RK_TIX_MAX)
                return -EKEYREJECTED;
-       if (8 * 4 + tktlen != toklen)
+       if (toklen < 8 * 4 + tktlen)
                return -EKEYREJECTED;
 
        plen = sizeof(*token) + sizeof(*token->kad) + tktlen;
index eed8404..f435a88 100644 (file)
@@ -188,6 +188,12 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
        [TCA_TCINDEX_CLASSID]           = { .type = NLA_U32 },
 };
 
+static void tcindex_filter_result_init(struct tcindex_filter_result *r)
+{
+       memset(r, 0, sizeof(*r));
+       tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+}
+
 static int
 tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
                  u32 handle, struct tcindex_data *p,
@@ -207,15 +213,11 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
                return err;
 
        memcpy(&cp, p, sizeof(cp));
-       memset(&new_filter_result, 0, sizeof(new_filter_result));
-       tcf_exts_init(&new_filter_result.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+       tcindex_filter_result_init(&new_filter_result);
 
+       tcindex_filter_result_init(&cr);
        if (old_r)
-               memcpy(&cr, r, sizeof(cr));
-       else {
-               memset(&cr, 0, sizeof(cr));
-               tcf_exts_init(&cr.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
-       }
+               cr.res = r->res;
 
        if (tb[TCA_TCINDEX_HASH])
                cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
@@ -267,9 +269,14 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
        err = -ENOMEM;
        if (!cp.perfect && !cp.h) {
                if (valid_perfect_hash(&cp)) {
+                       int i;
+
                        cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
                        if (!cp.perfect)
                                goto errout;
+                       for (i = 0; i < cp.hash; i++)
+                               tcf_exts_init(&cp.perfect[i].exts, TCA_TCINDEX_ACT,
+                                             TCA_TCINDEX_POLICE);
                        balloc = 1;
                } else {
                        cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
@@ -295,14 +302,17 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
                tcf_bind_filter(tp, &cr.res, base);
        }
 
-       tcf_exts_change(tp, &cr.exts, &e);
+       if (old_r)
+               tcf_exts_change(tp, &r->exts, &e);
+       else
+               tcf_exts_change(tp, &cr.exts, &e);
 
        tcf_tree_lock(tp);
        if (old_r && old_r != r)
-               memset(old_r, 0, sizeof(*old_r));
+               tcindex_filter_result_init(old_r);
 
        memcpy(p, &cp, sizeof(cp));
-       memcpy(r, &cr, sizeof(cr));
+       r->res = cr.res;
 
        if (r == &new_filter_result) {
                struct tcindex_filter **fp;
index 7d09a71..88f108e 100644 (file)
@@ -284,14 +284,22 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy)
 }
 EXPORT_SYMBOL(cfg80211_sched_scan_results);
 
-void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
+void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy)
 {
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
+       ASSERT_RTNL();
+
        trace_cfg80211_sched_scan_stopped(wiphy);
 
-       rtnl_lock();
        __cfg80211_stop_sched_scan(rdev, true);
+}
+EXPORT_SYMBOL(cfg80211_sched_scan_stopped_rtnl);
+
+void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
+{
+       rtnl_lock();
+       cfg80211_sched_scan_stopped_rtnl(wiphy);
        rtnl_unlock();
 }
 EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
index acdcb4a..3546a77 100644 (file)
@@ -234,7 +234,6 @@ void cfg80211_conn_work(struct work_struct *work)
                                        NULL, 0, NULL, 0,
                                        WLAN_STATUS_UNSPECIFIED_FAILURE,
                                        false, NULL);
-                       cfg80211_sme_free(wdev);
                }
                wdev_unlock(wdev);
        }
@@ -648,6 +647,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                        cfg80211_unhold_bss(bss_from_pub(bss));
                        cfg80211_put_bss(wdev->wiphy, bss);
                }
+               cfg80211_sme_free(wdev);
                return;
        }
 
index 94d0873..76cbb9e 100644 (file)
@@ -182,6 +182,7 @@ static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
 int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
 {
        struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+       struct snd_pcm_runtime *runtime = substream->runtime;
        int ret;
 
        switch (cmd) {
@@ -196,6 +197,11 @@ int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
                dmaengine_resume(prtd->dma_chan);
                break;
        case SNDRV_PCM_TRIGGER_SUSPEND:
+               if (runtime->info & SNDRV_PCM_INFO_PAUSE)
+                       dmaengine_pause(prtd->dma_chan);
+               else
+                       dmaengine_terminate_all(prtd->dma_chan);
+               break;
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
                dmaengine_pause(prtd->dma_chan);
                break;
index 2c54629..6cc3cf2 100644 (file)
@@ -1743,6 +1743,9 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        /* Lynx Point */
        { PCI_DEVICE(0x8086, 0x8c20),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+       /* 9 Series */
+       { PCI_DEVICE(0x8086, 0x8ca0),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
        /* Wellsburg */
        { PCI_DEVICE(0x8086, 0x8d20),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
index bcae806..9a617ad 100644 (file)
@@ -44,6 +44,9 @@ cpupower: FORCE
 cgroup firewire hv guest usb virtio vm net: FORCE
        $(call descend,$@)
 
+liblockdep: FORCE
+       $(call descend,lib/lockdep)
+
 libapikfs: FORCE
        $(call descend,lib/api)
 
@@ -91,6 +94,9 @@ cpupower_clean:
 cgroup_clean hv_clean firewire_clean lguest_clean usb_clean virtio_clean vm_clean net_clean:
        $(call descend,$(@:_clean=),clean)
 
+liblockdep_clean:
+       $(call descend,lib/lockdep,clean)
+
 libapikfs_clean:
        $(call descend,lib/api,clean)
 
index cb09d3f..bba2f52 100644 (file)
@@ -1,8 +1,7 @@
 # file format version
 FILE_VERSION = 1
 
-MAKEFLAGS += --no-print-directory
-LIBLOCKDEP_VERSION=$(shell make -sC ../../.. kernelversion)
+LIBLOCKDEP_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion)
 
 # Makefiles suck: This macro sets a default value of $(2) for the
 # variable named by $(1), unless the variable has been set by
@@ -231,7 +230,7 @@ install_lib: all_cmd
 install: install_lib
 
 clean:
-       $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d
+       $(RM) *.o *~ $(TARGETS) *.a *liblockdep*.so* $(VERSION_FILES) .*.d
        $(RM) tags TAGS
 
 endif # skip-makefile