Merge branches 'arm/exynos', 'arm/omap', 'arm/smmu', 'x86/vt-d', 'x86/amd' and 'core...
authorJoerg Roedel <jroedel@suse.de>
Thu, 2 Oct 2014 10:24:45 +0000 (12:24 +0200)
committerJoerg Roedel <jroedel@suse.de>
Thu, 2 Oct 2014 10:24:45 +0000 (12:24 +0200)
Conflicts:
drivers/iommu/arm-smmu.c

299 files changed:
Documentation/DocBook/media/v4l/compat.xml
Documentation/DocBook/media/v4l/func-poll.xml
Documentation/DocBook/media/v4l/v4l2.xml
Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml
Documentation/cgroups/cpusets.txt
Documentation/devicetree/bindings/iommu/arm,smmu.txt
Documentation/devicetree/bindings/staging/imx-drm/ldb.txt
Documentation/devicetree/of_selftest.txt [new file with mode: 0644]
Documentation/networking/filter.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/dra7-evm.dts
arch/arm/boot/dts/imx53.dtsi
arch/arm/boot/dts/k2e-clocks.dtsi
arch/arm/boot/dts/omap5-cm-t54.dts
arch/arm/mach-imx/clk-gate2.c
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/prm3xxx.c
arch/arm/mach-pxa/generic.c
arch/arm/plat-omap/Kconfig
arch/ia64/configs/bigsur_defconfig
arch/ia64/configs/generic_defconfig
arch/ia64/configs/gensparse_defconfig
arch/ia64/configs/sim_defconfig
arch/ia64/configs/tiger_defconfig
arch/ia64/configs/zx1_defconfig
arch/mips/configs/gpr_defconfig
arch/mips/configs/ip27_defconfig
arch/mips/configs/jazz_defconfig
arch/mips/configs/loongson3_defconfig
arch/mips/configs/malta_defconfig
arch/mips/configs/malta_kvm_defconfig
arch/mips/configs/malta_kvm_guest_defconfig
arch/mips/configs/mtx1_defconfig
arch/mips/configs/nlm_xlp_defconfig
arch/mips/configs/nlm_xlr_defconfig
arch/mips/configs/rm200_defconfig
arch/mips/kernel/mcount.S
arch/mips/math-emu/cp1emu.c
arch/mips/mm/init.c
arch/parisc/Makefile
arch/parisc/configs/a500_defconfig
arch/parisc/configs/c8000_defconfig
arch/parisc/kernel/ptrace.c
arch/powerpc/configs/c2k_defconfig
arch/powerpc/configs/pmac32_defconfig
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/configs/ppc64e_defconfig
arch/powerpc/configs/pseries_defconfig
arch/powerpc/configs/pseries_le_defconfig
arch/s390/configs/default_defconfig
arch/s390/configs/gcov_defconfig
arch/s390/configs/performance_defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/defconfig
arch/s390/mm/init.c
arch/sh/configs/sdk7780_defconfig
arch/sh/configs/sh2007_defconfig
arch/sparc/configs/sparc64_defconfig
arch/sparc/net/bpf_jit_asm.S
arch/sparc/net/bpf_jit_comp.c
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/aslr.c
arch/x86/boot/compressed/eboot.c
arch/x86/boot/compressed/eboot.h
arch/x86/boot/compressed/head_32.S
arch/x86/boot/compressed/head_64.S
arch/x86/crypto/aesni-intel_glue.c
arch/x86/include/asm/efi.h
arch/x86/include/asm/fixmap.h
arch/x86/kernel/smpboot.c
block/blk-exec.c
block/blk-mq.c
block/genhd.c
drivers/acpi/acpi_lpss.c
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/acobject.h
drivers/acpi/acpica/dsfield.c
drivers/acpi/acpica/evregion.c
drivers/acpi/acpica/exfield.c
drivers/acpi/acpica/exprep.c
drivers/acpi/container.c
drivers/acpi/scan.c
drivers/acpi/video.c
drivers/base/core.c
drivers/bus/omap_l3_noc.h
drivers/clk/at91/clk-slow.c
drivers/clk/clk-efm32gg.c
drivers/clk/clk.c
drivers/clk/qcom/gcc-ipq806x.c
drivers/clk/rockchip/clk-rk3288.c
drivers/clk/ti/clk-dra7-atl.c
drivers/clk/ti/divider.c
drivers/cpufreq/cpufreq.c
drivers/crypto/ccp/ccp-crypto-main.c
drivers/crypto/ccp/ccp-dev.c
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
drivers/dma/omap-dma.c
drivers/firmware/efi/Makefile
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/si.c
drivers/hwmon/fam15h_power.c
drivers/hwmon/tmp103.c
drivers/i2c/Makefile
drivers/i2c/busses/i2c-ismt.c
drivers/i2c/busses/i2c-mxs.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-rk3x.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/i2c-acpi.c [deleted file]
drivers/i2c/i2c-core.c
drivers/infiniband/core/umem.c
drivers/infiniband/core/uverbs_marshall.c
drivers/infiniband/hw/ipath/ipath_user_pages.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/qib/qib_debugfs.c
drivers/infiniband/hw/qib/qib_qp.c
drivers/infiniband/hw/qib/qib_user_pages.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/input/serio/i8042-x86ia64io.h
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_types.h
drivers/iommu/arm-smmu.c
drivers/iommu/dmar.c
drivers/iommu/exynos-iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel_irq_remapping.c
drivers/iommu/omap-iommu.c
drivers/iommu/omap-iommu.h
drivers/md/raid1.c
drivers/media/common/cx2341x.c
drivers/media/dvb-frontends/cx24123.c
drivers/media/i2c/adv7604.c
drivers/media/radio/radio-miropcm20.c
drivers/media/usb/em28xx/em28xx-video.c
drivers/media/usb/em28xx/em28xx.h
drivers/media/v4l2-core/videobuf2-core.c
drivers/media/v4l2-core/videobuf2-dma-sg.c
drivers/message/fusion/Kconfig
drivers/net/bonding/bond_main.c
drivers/net/can/at91_can.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/flexcan.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/arc/emac_main.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/mr.c
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/octeon/octeon_mgmt.c
drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sfc/farch.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/macvlan.c
drivers/net/phy/micrel.c
drivers/net/usb/r8152.c
drivers/net/wireless/ath/ath9k/common-beacon.c
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/brcm80211/Kconfig
drivers/net/wireless/brcm80211/brcmfmac/Makefile
drivers/net/wireless/brcm80211/brcmfmac/bcdc.h
drivers/net/wireless/brcm80211/brcmfmac/fweh.c
drivers/net/wireless/brcm80211/brcmfmac/fweh.h
drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/iwlwifi/dvm/power.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/power.c
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/sf.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/nfc/microread/microread.c
drivers/nfc/st21nfca/Makefile
drivers/nfc/st21nfcb/Makefile
drivers/of/base.c
drivers/of/dynamic.c
drivers/of/fdt.c
drivers/parisc/pdc_stable.c
drivers/pci/hotplug/pciehp_hpc.c
drivers/pci/probe.c
drivers/rtc/rtc-efi.c
drivers/scsi/Kconfig
drivers/scsi/bnx2fc/Kconfig
drivers/scsi/bnx2i/Kconfig
drivers/scsi/csiostor/Kconfig
drivers/scsi/qla2xxx/Kconfig
drivers/soc/qcom/qcom_gsbi.c
fs/buffer.c
fs/cachefiles/bind.c
fs/cachefiles/daemon.c
fs/cachefiles/internal.h
fs/cachefiles/main.c
fs/cachefiles/namei.c
fs/cachefiles/rdwr.c
fs/cachefiles/xattr.c
fs/dcache.c
fs/direct-io.c
fs/fscache/object.c
fs/fscache/page.c
fs/fuse/file.c
fs/nilfs2/inode.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/super.c
fs/proc/task_mmu.c
fs/ufs/ialloc.c
fs/ufs/namei.c
include/acpi/acpi_bus.h
include/linux/ccp.h
include/linux/cpuset.h
include/linux/device.h
include/linux/dmar.h
include/linux/i2c.h
include/linux/mlx4/device.h
include/linux/mlx4/qp.h
include/linux/percpu-refcount.h
include/linux/sched.h
include/linux/uio.h
include/linux/workqueue.h
include/media/videobuf2-core.h
include/net/addrconf.h
include/net/dst.h
include/net/genetlink.h
include/net/sch_generic.h
include/rdma/ib_umem.h
kernel/cgroup.c
kernel/cpuset.c
kernel/power/snapshot.c
lib/genalloc.c
lib/percpu-refcount.c
lib/rhashtable.c
mm/iov_iter.c
mm/memory.c
mm/shmem.c
mm/slab.c
net/bridge/br_private.h
net/bridge/br_vlan.c
net/core/dev.c
net/core/sock.c
net/ipv4/ip_tunnel.c
net/ipv4/route.c
net/ipv6/addrconf.c
net/ipv6/anycast.c
net/ipv6/ip6_output.c
net/mac80211/sta_info.c
net/openvswitch/datapath.c
net/rfkill/rfkill-gpio.c
net/rxrpc/ar-key.c
net/sched/sch_choke.c
net/socket.c
net/wireless/nl80211.c
net/xfrm/xfrm_policy.c
scripts/tags.sh
sound/core/pcm_lib.c
sound/pci/hda/patch_conexant.c
sound/usb/caiaq/control.c
virt/kvm/arm/vgic-v2.c
virt/kvm/kvm_main.c

index eee6f0f..3a626d1 100644 (file)
@@ -2545,6 +2545,30 @@ fields changed from _s32 to _u32.
       </orderedlist>
     </section>
 
+    <section>
+      <title>V4L2 in Linux 3.16</title>
+      <orderedlist>
+        <listitem>
+         <para>Added event V4L2_EVENT_SOURCE_CHANGE.
+         </para>
+        </listitem>
+      </orderedlist>
+    </section>
+
+    <section>
+      <title>V4L2 in Linux 3.17</title>
+      <orderedlist>
+        <listitem>
+         <para>Extended &v4l2-pix-format;. Added format flags.
+         </para>
+        </listitem>
+        <listitem>
+         <para>Added compound control types and &VIDIOC-QUERY-EXT-CTRL;.
+         </para>
+        </listitem>
+      </orderedlist>
+    </section>
+
     <section id="other">
       <title>Relation of V4L2 to other Linux multimedia APIs</title>
 
index 85cad8b..4c73f11 100644 (file)
@@ -29,9 +29,12 @@ can suspend execution until the driver has captured data or is ready
 to accept data for output.</para>
 
     <para>When streaming I/O has been negotiated this function waits
-until a buffer has been filled or displayed and can be dequeued with
-the &VIDIOC-DQBUF; ioctl. When buffers are already in the outgoing
-queue of the driver the function returns immediately.</para>
+until a buffer has been filled by the capture device and can be dequeued
+with the &VIDIOC-DQBUF; ioctl. For output devices this function waits
+until the device is ready to accept a new buffer to be queued up with
+the &VIDIOC-QBUF; ioctl for display. When buffers are already in the outgoing
+queue of the driver (capture) or the incoming queue isn't full (display)
+the function returns immediately.</para>
 
     <para>On success <function>poll()</function> returns the number of
 file descriptors that have been selected (that is, file descriptors
@@ -44,10 +47,22 @@ Capture devices set the <constant>POLLIN</constant> and
 flags. When the function timed out it returns a value of zero, on
 failure it returns <returnvalue>-1</returnvalue> and the
 <varname>errno</varname> variable is set appropriately. When the
-application did not call &VIDIOC-QBUF; or &VIDIOC-STREAMON; yet the
+application did not call &VIDIOC-STREAMON; the
 <function>poll()</function> function succeeds, but sets the
 <constant>POLLERR</constant> flag in the
-<structfield>revents</structfield> field.</para>
+<structfield>revents</structfield> field. When the
+application has called &VIDIOC-STREAMON; for a capture device but hasn't
+yet called &VIDIOC-QBUF;, the <function>poll()</function> function
+succeeds and sets the <constant>POLLERR</constant> flag in the
+<structfield>revents</structfield> field. For output devices this
+same situation will cause <function>poll()</function> to succeed
+as well, but it sets the <constant>POLLOUT</constant> and
+<constant>POLLWRNORM</constant> flags in the <structfield>revents</structfield>
+field.</para>
+
+    <para>If an event occurred (see &VIDIOC-DQEVENT;) then
+<constant>POLLPRI</constant> will be set in the <structfield>revents</structfield>
+field and <function>poll()</function> will return.</para>
 
     <para>When use of the <function>read()</function> function has
 been negotiated and the driver does not capture yet, the
@@ -58,10 +73,18 @@ continuously (as opposed to, for example, still images) the function
 may return immediately.</para>
 
     <para>When use of the <function>write()</function> function has
-been negotiated the <function>poll</function> function just waits
+been negotiated and the driver does not stream yet, the
+<function>poll</function> function starts streaming. When that fails
+it returns a <constant>POLLERR</constant> as above. Otherwise it waits
 until the driver is ready for a non-blocking
 <function>write()</function> call.</para>
 
+    <para>If the caller is only interested in events (just
+<constant>POLLPRI</constant> is set in the <structfield>events</structfield>
+field), then <function>poll()</function> will <emphasis>not</emphasis>
+start streaming if the driver does not stream yet. This makes it
+possible to just poll for events and not for buffers.</para>
+
     <para>All drivers implementing the <function>read()</function> or
 <function>write()</function> function or streaming I/O must also
 support the <function>poll()</function> function.</para>
index f2f81f0..7cfe618 100644 (file)
@@ -152,10 +152,11 @@ structs, ioctls) must be noted in more detail in the history chapter
 applications. -->
 
       <revision>
-       <revnumber>3.16</revnumber>
-       <date>2014-05-27</date>
-       <authorinitials>lp</authorinitials>
-       <revremark>Extended &v4l2-pix-format;. Added format flags.
+       <revnumber>3.17</revnumber>
+       <date>2014-08-04</date>
+       <authorinitials>lp, hv</authorinitials>
+       <revremark>Extended &v4l2-pix-format;. Added format flags. Added compound control types
+and VIDIOC_QUERY_EXT_CTRL.
        </revremark>
       </revision>
 
@@ -538,7 +539,7 @@ and discussions on the V4L mailing list.</revremark>
 </partinfo>
 
 <title>Video for Linux Two API Specification</title>
- <subtitle>Revision 3.14</subtitle>
+ <subtitle>Revision 3.17</subtitle>
 
   <chapter id="common">
     &sub-common;
index 1ba9e99..c62a736 100644 (file)
          </row>
          <row>
            <entry>&v4l2-rect;</entry>
-           <entry><structfield>rect</structfield></entry>
+           <entry><structfield>r</structfield></entry>
            <entry>Selection rectangle, in pixels.</entry>
          </row>
          <row>
index 7740038..3c94ff3 100644 (file)
@@ -345,14 +345,14 @@ the named feature on.
 The implementation is simple.
 
 Setting the flag 'cpuset.memory_spread_page' turns on a per-process flag
-PF_SPREAD_PAGE for each task that is in that cpuset or subsequently
+PFA_SPREAD_PAGE for each task that is in that cpuset or subsequently
 joins that cpuset.  The page allocation calls for the page cache
-is modified to perform an inline check for this PF_SPREAD_PAGE task
+is modified to perform an inline check for this PFA_SPREAD_PAGE task
 flag, and if set, a call to a new routine cpuset_mem_spread_node()
 returns the node to prefer for the allocation.
 
 Similarly, setting 'cpuset.memory_spread_slab' turns on the flag
-PF_SPREAD_SLAB, and appropriately marked slab caches will allocate
+PFA_SPREAD_SLAB, and appropriately marked slab caches will allocate
 pages from the node returned by cpuset_mem_spread_node().
 
 The cpuset_mem_spread_node() routine is also simple.  It uses the
index 2d0f7cd..0676050 100644 (file)
@@ -14,6 +14,7 @@ conditions.
                         "arm,smmu-v1"
                         "arm,smmu-v2"
                         "arm,mmu-400"
+                        "arm,mmu-401"
                         "arm,mmu-500"
 
                   depending on the particular implementation and/or the
index 578a1fc..443bcb6 100644 (file)
@@ -56,6 +56,9 @@ Required properties:
  - fsl,data-width : should be <18> or <24>
  - port: A port node with endpoint definitions as defined in
    Documentation/devicetree/bindings/media/video-interfaces.txt.
+   On i.MX5, the internal two-input-multiplexer is used.
+   Due to hardware limitations, only one port (port@[0,1])
+   can be used for each channel (lvds-channel@[0,1], respectively)
    On i.MX6, there should be four ports (port@[0-3]) that correspond
    to the four LVDS multiplexer inputs.
 
@@ -78,6 +81,8 @@ ldb: ldb@53fa8008 {
                      "di0", "di1";
 
        lvds-channel@0 {
+               #address-cells = <1>;
+               #size-cells = <0>;
                reg = <0>;
                fsl,data-mapping = "spwg";
                fsl,data-width = <24>;
@@ -86,7 +91,9 @@ ldb: ldb@53fa8008 {
                        /* ... */
                };
 
-               port {
+               port@0 {
+                       reg = <0>;
+
                        lvds0_in: endpoint {
                                remote-endpoint = <&ipu_di0_lvds0>;
                        };
@@ -94,6 +101,8 @@ ldb: ldb@53fa8008 {
        };
 
        lvds-channel@1 {
+               #address-cells = <1>;
+               #size-cells = <0>;
                reg = <1>;
                fsl,data-mapping = "spwg";
                fsl,data-width = <24>;
@@ -102,7 +111,9 @@ ldb: ldb@53fa8008 {
                        /* ... */
                };
 
-               port {
+               port@1 {
+                       reg = <1>;
+
                        lvds1_in: endpoint {
                                remote-endpoint = <&ipu_di1_lvds1>;
                        };
diff --git a/Documentation/devicetree/of_selftest.txt b/Documentation/devicetree/of_selftest.txt
new file mode 100644 (file)
index 0000000..3a2f54d
--- /dev/null
@@ -0,0 +1,211 @@
+Open Firmware Device Tree Selftest
+----------------------------------
+
+Author: Gaurav Minocha <gaurav.minocha.os@gmail.com>
+
+1. Introduction
+
+This document explains how the test data required for executing OF selftest
+is attached to the live tree dynamically, independent of the machine's
+architecture.
+
+It is recommended to read the following documents before moving ahead.
+
+[1] Documentation/devicetree/usage-model.txt
+[2] http://www.devicetree.org/Device_Tree_Usage
+
+OF Selftest has been designed to test the interface (include/linux/of.h)
+provided to device driver developers to fetch the device information..etc.
+from the unflattened device tree data structure. This interface is used by
+most of the device drivers in various use cases.
+
+
+2. Test-data
+
+The Device Tree Source file (drivers/of/testcase-data/testcases.dts) contains
+the test data required for executing the unit tests automated in
+drivers/of/selftests.c. Currently, following Device Tree Source Include files
+(.dtsi) are included in testcase.dts:
+
+drivers/of/testcase-data/tests-interrupts.dtsi
+drivers/of/testcase-data/tests-platform.dtsi
+drivers/of/testcase-data/tests-phandle.dtsi
+drivers/of/testcase-data/tests-match.dtsi
+
+When the kernel is build with OF_SELFTEST enabled, then the following make rule
+
+$(obj)/%.dtb: $(src)/%.dts FORCE
+       $(call if_changed_dep, dtc)
+
+is used to compile the DT source file (testcase.dts) into a binary blob
+(testcase.dtb), also referred as flattened DT.
+
+After that, using the following rule the binary blob above is wrapped as an
+assembly file (testcase.dtb.S).
+
+$(obj)/%.dtb.S: $(obj)/%.dtb
+       $(call cmd, dt_S_dtb)
+
+The assembly file is compiled into an object file (testcase.dtb.o), and is
+linked into the kernel image.
+
+
+2.1. Adding the test data
+
+Un-flattened device tree structure:
+
+Un-flattened device tree consists of connected device_node(s) in form of a tree
+structure described below.
+
+// following struct members are used to construct the tree
+struct device_node {
+    ...
+    struct  device_node *parent;
+    struct  device_node *child;
+    struct  device_node *sibling;
+    struct  device_node *allnext;   /* next in list of all nodes */
+    ...
+ };
+
+Figure 1, describes a generic structure of machine’s un-flattened device tree
+considering only child and sibling pointers. There exists another pointer,
+*parent, that is used to traverse the tree in the reverse direction. So, at
+a particular level the child node and all the sibling nodes will have a parent
+pointer pointing to a common node (e.g. child1, sibling2, sibling3, sibling4’s
+parent points to root node)
+
+root (‘/’)
+   |
+child1 -> sibling2 -> sibling3 -> sibling4 -> null
+   |         |           |           |
+   |         |           |          null
+   |         |           |
+   |         |        child31 -> sibling32 -> null
+   |         |           |          |
+   |         |          null       null
+   |         |
+   |      child21 -> sibling22 -> sibling23 -> null
+   |         |          |            |
+   |        null       null         null
+   |
+child11 -> sibling12 -> sibling13 -> sibling14 -> null
+   |           |           |            |
+   |           |           |           null
+   |           |           |
+  null        null       child131 -> null
+                           |
+                          null
+
+Figure 1: Generic structure of un-flattened device tree
+
+
+*allnext: it is used to link all the nodes of DT into a list. So, for the
+ above tree the list would be as follows:
+
+root->child1->child11->sibling12->sibling13->child131->sibling14->sibling2->
+child21->sibling22->sibling23->sibling3->child31->sibling32->sibling4->null
+
+Before executing OF selftest, it is required to attach the test data to
+machine's device tree (if present). So, when selftest_data_add() is called,
+at first it reads the flattened device tree data linked into the kernel image
+via the following kernel symbols:
+
+__dtb_testcases_begin - address marking the start of test data blob
+__dtb_testcases_end   - address marking the end of test data blob
+
+Secondly, it calls of_fdt_unflatten_device_tree() to unflatten the flattened
+blob. And finally, if the machine’s device tree (i.e live tree) is present,
+then it attaches the unflattened test data tree to the live tree, else it
+attaches itself as a live device tree.
+
+attach_node_and_children() uses of_attach_node() to attach the nodes into the
+live tree as explained below. To explain the same, the test data tree described
+ in Figure 2 is attached to the live tree described in Figure 1.
+
+root (‘/’)
+    |
+ testcase-data
+    |
+ test-child0 -> test-sibling1 -> test-sibling2 -> test-sibling3 -> null
+    |               |                |                |
+ test-child01      null             null             null
+
+
+allnext list:
+
+root->testcase-data->test-child0->test-child01->test-sibling1->test-sibling2
+->test-sibling3->null
+
+Figure 2: Example test data tree to be attached to live tree.
+
+According to the scenario above, the live tree is already present so it isn’t
+required to attach the root(‘/’) node. All other nodes are attached by calling
+of_attach_node() on each node.
+
+In the function of_attach_node(), the new node is attached as the child of the
+given parent in live tree. But, if parent already has a child then the new node
+replaces the current child and turns it into its sibling. So, when the testcase
+data node is attached to the live tree above (Figure 1), the final structure is
+ as shown in Figure 3.
+
+root (‘/’)
+   |
+testcase-data -> child1 -> sibling2 -> sibling3 -> sibling4 -> null
+   |               |          |           |           |
+ (...)             |          |           |          null
+                   |          |         child31 -> sibling32 -> null
+                   |          |           |           |
+                   |          |          null        null
+                   |          |
+                   |        child21 -> sibling22 -> sibling23 -> null
+                   |          |           |            |
+                   |         null        null         null
+                   |
+                child11 -> sibling12 -> sibling13 -> sibling14 -> null
+                   |          |            |            |
+                  null       null          |           null
+                                           |
+                                        child131 -> null
+                                           |
+                                          null
+-----------------------------------------------------------------------
+
+root (‘/’)
+   |
+testcase-data -> child1 -> sibling2 -> sibling3 -> sibling4 -> null
+   |               |          |           |           |
+   |             (...)      (...)       (...)        null
+   |
+test-sibling3 -> test-sibling2 -> test-sibling1 -> test-child0 -> null
+   |                |                   |                |
+  null             null                null         test-child01
+
+
+Figure 3: Live device tree structure after attaching the testcase-data.
+
+
+Astute readers would have noticed that test-child0 node becomes the last
+sibling compared to the earlier structure (Figure 2). After attaching first
+test-child0 the test-sibling1 is attached that pushes the child node
+(i.e. test-child0) to become a sibling and makes itself a child node,
+ as mentioned above.
+
+If a duplicate node is found (i.e. if a node with same full_name property is
+already present in the live tree), then the node isn’t attached rather its
+properties are updated to the live tree’s node by calling the function
+update_node_properties().
+
+
+2.2. Removing the test data
+
+Once the test case execution is complete, selftest_data_remove is called in
+order to remove the device nodes attached initially (first the leaf nodes are
+detached and then moving up the parent nodes are removed, and eventually the
+whole tree). selftest_data_remove() calls detach_node_and_children() that uses
+of_detach_node() to detach the nodes from the live device tree.
+
+To detach a node, of_detach_node() first updates all_next linked list, by
+attaching the previous node’s allnext to current node’s allnext pointer. And
+then, it either updates the child pointer of given node’s parent to its
+sibling or attaches the previous sibling to the given node’s sibling, as
+appropriate. That is it :)
index c48a970..d16f424 100644 (file)
@@ -462,9 +462,9 @@ JIT compiler
 ------------
 
 The Linux kernel has a built-in BPF JIT compiler for x86_64, SPARC, PowerPC,
-ARM and s390 and can be enabled through CONFIG_BPF_JIT. The JIT compiler is
-transparently invoked for each attached filter from user space or for internal
-kernel users if it has been previously enabled by root:
+ARM, MIPS and s390 and can be enabled through CONFIG_BPF_JIT. The JIT compiler
+is transparently invoked for each attached filter from user space or for
+internal kernel users if it has been previously enabled by root:
 
   echo 1 > /proc/sys/net/core/bpf_jit_enable
 
index 670b3dc..3705430 100644 (file)
@@ -3012,9 +3012,8 @@ S:        Supported
 F:     drivers/acpi/dock.c
 
 DOCUMENTATION
-M:     Randy Dunlap <rdunlap@infradead.org>
+M:     Jiri Kosina <jkosina@suse.cz>
 L:     linux-doc@vger.kernel.org
-T:     quilt http://www.infradead.org/~rdunlap/Doc/patches/
 S:     Maintained
 F:     Documentation/
 X:     Documentation/ABI/
@@ -4477,7 +4476,6 @@ M:        Mika Westerberg <mika.westerberg@linux.intel.com>
 L:     linux-i2c@vger.kernel.org
 L:     linux-acpi@vger.kernel.org
 S:     Maintained
-F:     drivers/i2c/i2c-acpi.c
 
 I2C-TAOS-EVM DRIVER
 M:     Jean Delvare <jdelvare@suse.de>
index a192280..be79944 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 17
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
index e03fbf3..b40cdad 100644 (file)
                gpmc,device-width = <2>;
                gpmc,sync-clk-ps = <0>;
                gpmc,cs-on-ns = <0>;
-               gpmc,cs-rd-off-ns = <40>;
-               gpmc,cs-wr-off-ns = <40>;
+               gpmc,cs-rd-off-ns = <80>;
+               gpmc,cs-wr-off-ns = <80>;
                gpmc,adv-on-ns = <0>;
-               gpmc,adv-rd-off-ns = <30>;
-               gpmc,adv-wr-off-ns = <30>;
-               gpmc,we-on-ns = <5>;
-               gpmc,we-off-ns = <25>;
-               gpmc,oe-on-ns = <2>;
-               gpmc,oe-off-ns = <20>;
-               gpmc,access-ns = <20>;
-               gpmc,wr-access-ns = <40>;
-               gpmc,rd-cycle-ns = <40>;
-               gpmc,wr-cycle-ns = <40>;
-               gpmc,wait-pin = <0>;
-               gpmc,wait-on-read;
-               gpmc,wait-on-write;
+               gpmc,adv-rd-off-ns = <60>;
+               gpmc,adv-wr-off-ns = <60>;
+               gpmc,we-on-ns = <10>;
+               gpmc,we-off-ns = <50>;
+               gpmc,oe-on-ns = <4>;
+               gpmc,oe-off-ns = <40>;
+               gpmc,access-ns = <40>;
+               gpmc,wr-access-ns = <80>;
+               gpmc,rd-cycle-ns = <80>;
+               gpmc,wr-cycle-ns = <80>;
                gpmc,bus-turnaround-ns = <0>;
                gpmc,cycle2cycle-delay-ns = <0>;
                gpmc,clk-activation-ns = <0>;
index c6c58c1..6b675a0 100644 (file)
                                status = "disabled";
 
                                lvds-channel@0 {
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
                                        reg = <0>;
                                        status = "disabled";
 
-                                       port {
+                                       port@0 {
+                                               reg = <0>;
+
                                                lvds0_in: endpoint {
                                                        remote-endpoint = <&ipu_di0_lvds0>;
                                                };
                                };
 
                                lvds-channel@1 {
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
                                        reg = <1>;
                                        status = "disabled";
 
-                                       port {
+                                       port@1 {
+                                               reg = <1>;
+
                                                lvds1_in: endpoint {
                                                        remote-endpoint = <&ipu_di1_lvds1>;
                                                };
index 598afe9..4773d6a 100644 (file)
@@ -40,7 +40,7 @@ clocks {
                #clock-cells = <0>;
                compatible = "ti,keystone,psc-clock";
                clocks = <&chipclk16>;
-               clock-output-names = "usb";
+               clock-output-names = "usb1";
                reg = <0x02350004 0xb00>, <0x02350000 0x400>;
                reg-names = "control", "domain";
                domain-id = <0>;
@@ -60,8 +60,8 @@ clocks {
                #clock-cells = <0>;
                compatible = "ti,keystone,psc-clock";
                clocks = <&chipclk12>;
-               clock-output-names = "pcie";
-               reg = <0x0235006c 0xb00>, <0x02350000 0x400>;
+               clock-output-names = "pcie1";
+               reg = <0x0235006c 0xb00>, <0x02350048 0x400>;
                reg-names = "control", "domain";
                domain-id = <18>;
        };
index b8698ca..429471a 100644 (file)
                                };
 
                                ldo8_reg: ldo8 {
-                                       /* VDD_3v0: Does not go anywhere */
+                                       /* VDD_3V_GP: act led/serial console */
                                        regulator-name = "ldo8";
                                        regulator-min-microvolt = <3000000>;
                                        regulator-max-microvolt = <3000000>;
+                                       regulator-always-on;
                                        regulator-boot-on;
-                                       /* Unused */
-                                       status = "disabled";
                                };
 
                                ldo9_reg: ldo9 {
index 84acdfd..5a75cdc 100644 (file)
@@ -97,7 +97,7 @@ static int clk_gate2_is_enabled(struct clk_hw *hw)
        struct clk_gate2 *gate = to_clk_gate2(hw);
 
        if (gate->share_count)
-               return !!(*gate->share_count);
+               return !!__clk_get_enable_count(hw->clk);
        else
                return clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx);
 }
@@ -127,10 +127,6 @@ struct clk *clk_register_gate2(struct device *dev, const char *name,
        gate->bit_idx = bit_idx;
        gate->flags = clk_gate2_flags;
        gate->lock = lock;
-
-       /* Initialize share_count per hardware state */
-       if (share_count)
-               *share_count = clk_gate2_reg_is_enabled(reg, bit_idx) ? 1 : 0;
        gate->share_count = share_count;
 
        init.name = name;
index e7189dc..08d4167 100644 (file)
@@ -1,9 +1,6 @@
 menu "TI OMAP/AM/DM/DRA Family"
        depends on ARCH_MULTI_V6 || ARCH_MULTI_V7
 
-config ARCH_OMAP
-       bool
-
 config ARCH_OMAP2
        bool "TI OMAP2"
        depends on ARCH_MULTI_V6
index 8fd87a3..9e91a4e 100644 (file)
@@ -2065,7 +2065,7 @@ static void _reconfigure_io_chain(void)
 
        spin_lock_irqsave(&io_chain_lock, flags);
 
-       if (cpu_is_omap34xx() && omap3_has_io_chain_ctrl())
+       if (cpu_is_omap34xx())
                omap3xxx_prm_reconfigure_io_chain();
        else if (cpu_is_omap44xx())
                omap44xx_prm_reconfigure_io_chain();
index 2458be6..372de3e 100644 (file)
@@ -45,7 +45,7 @@ static struct omap_prcm_irq_setup omap3_prcm_irq_setup = {
        .ocp_barrier            = &omap3xxx_prm_ocp_barrier,
        .save_and_clear_irqen   = &omap3xxx_prm_save_and_clear_irqen,
        .restore_irqen          = &omap3xxx_prm_restore_irqen,
-       .reconfigure_io_chain   = &omap3xxx_prm_reconfigure_io_chain,
+       .reconfigure_io_chain   = NULL,
 };
 
 /*
@@ -369,15 +369,30 @@ void __init omap3_prm_init_pm(bool has_uart4, bool has_iva)
 }
 
 /**
- * omap3xxx_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain
+ * omap3430_pre_es3_1_reconfigure_io_chain - restart wake-up daisy chain
+ *
+ * The ST_IO_CHAIN bit does not exist in 3430 before es3.1. The only
+ * thing we can do is toggle EN_IO bit for earlier omaps.
+ */
+void omap3430_pre_es3_1_reconfigure_io_chain(void)
+{
+       omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
+                                    PM_WKEN);
+       omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
+                                  PM_WKEN);
+       omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN);
+}
+
+/**
+ * omap3_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain
  *
  * Clear any previously-latched I/O wakeup events and ensure that the
  * I/O wakeup gates are aligned with the current mux settings.  Works
  * by asserting WUCLKIN, waiting for WUCLKOUT to be asserted, and then
  * deasserting WUCLKIN and clearing the ST_IO_CHAIN WKST bit.  No
- * return value.
+ * return value. These registers are only available in 3430 es3.1 and later.
  */
-void omap3xxx_prm_reconfigure_io_chain(void)
+void omap3_prm_reconfigure_io_chain(void)
 {
        int i = 0;
 
@@ -399,6 +414,15 @@ void omap3xxx_prm_reconfigure_io_chain(void)
        omap2_prm_read_mod_reg(WKUP_MOD, PM_WKST);
 }
 
+/**
+ * omap3xxx_prm_reconfigure_io_chain - reconfigure I/O chain
+ */
+void omap3xxx_prm_reconfigure_io_chain(void)
+{
+       if (omap3_prcm_irq_setup.reconfigure_io_chain)
+               omap3_prcm_irq_setup.reconfigure_io_chain();
+}
+
 /**
  * omap3xxx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches
  *
@@ -656,6 +680,13 @@ static int omap3xxx_prm_late_init(void)
        if (!(prm_features & PRM_HAS_IO_WAKEUP))
                return 0;
 
+       if (omap3_has_io_chain_ctrl())
+               omap3_prcm_irq_setup.reconfigure_io_chain =
+                       omap3_prm_reconfigure_io_chain;
+       else
+               omap3_prcm_irq_setup.reconfigure_io_chain =
+                       omap3430_pre_es3_1_reconfigure_io_chain;
+
        omap3xxx_prm_enable_io_wakeup();
        ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup);
        if (!ret)
index 630fa91..04b013f 100644 (file)
@@ -61,7 +61,7 @@ EXPORT_SYMBOL(get_clock_tick_rate);
 /*
  * For non device-tree builds, keep legacy timer init
  */
-void pxa_timer_init(void)
+void __init pxa_timer_init(void)
 {
        pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x40a00000),
                            get_clock_tick_rate());
index 02fc10d..d055db3 100644 (file)
@@ -1,3 +1,6 @@
+config ARCH_OMAP
+       bool
+
 if ARCH_OMAP
 
 menu "TI OMAP Common Features"
index 4c4ac16..b6bda18 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -6,6 +5,8 @@ CONFIG_PROFILING=y
 CONFIG_OPROFILE=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_SGI_PARTITION=y
 CONFIG_IA64_DIG=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=2
@@ -51,9 +52,6 @@ CONFIG_DM_MIRROR=m
 CONFIG_DM_ZERO=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_NET_PCI=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
@@ -85,7 +83,6 @@ CONFIG_EXT3_FS=y
 CONFIG_XFS_FS=y
 CONFIG_XFS_QUOTA=y
 CONFIG_XFS_POSIX_ACL=y
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
@@ -95,17 +92,13 @@ CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
 CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
 CONFIG_NFSD=m
 CONFIG_NFSD_V4=y
 CONFIG_CIFS=m
 CONFIG_CIFS_STATS=y
 CONFIG_CIFS_XATTR=y
 CONFIG_CIFS_POSIX=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_SGI_PARTITION=y
-CONFIG_EFI_PARTITION=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_NLS_UTF8=m
index e8ed3ae..81f686d 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_IKCONFIG=y
@@ -6,13 +5,13 @@ CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=20
 CONFIG_CGROUPS=y
 CONFIG_CPUSETS=y
-CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_SGI_PARTITION=y
 CONFIG_MCKINLEY=y
 CONFIG_IA64_PAGE_SIZE_64KB=y
 CONFIG_IA64_CYCLONE=y
@@ -29,14 +28,13 @@ CONFIG_ACPI_BUTTON=m
 CONFIG_ACPI_FAN=m
 CONFIG_ACPI_DOCK=y
 CONFIG_ACPI_PROCESSOR=m
-CONFIG_ACPI_CONTAINER=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
-CONFIG_ARPD=y
 CONFIG_SYN_COOKIES=y
 # CONFIG_IPV6 is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -82,16 +80,13 @@ CONFIG_FUSION_FC=m
 CONFIG_FUSION_SAS=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_NET_ETHERNET=y
+CONFIG_NETCONSOLE=y
+CONFIG_TIGON3=y
 CONFIG_NET_TULIP=y
 CONFIG_TULIP=m
-CONFIG_NET_PCI=y
-CONFIG_NET_VENDOR_INTEL=y
 CONFIG_E100=m
 CONFIG_E1000=y
 CONFIG_IGB=y
-CONFIG_TIGON3=y
-CONFIG_NETCONSOLE=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_GAMEPORT=m
 CONFIG_SERIAL_NONSTANDARD=y
@@ -151,6 +146,7 @@ CONFIG_USB_STORAGE=m
 CONFIG_INFINIBAND=m
 CONFIG_INFINIBAND_MTHCA=m
 CONFIG_INFINIBAND_IPOIB=m
+CONFIG_INTEL_IOMMU=y
 CONFIG_MSPEC=m
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
@@ -164,7 +160,6 @@ CONFIG_REISERFS_FS_XATTR=y
 CONFIG_REISERFS_FS_POSIX_ACL=y
 CONFIG_REISERFS_FS_SECURITY=y
 CONFIG_XFS_FS=y
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
@@ -175,16 +170,10 @@ CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
 CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
 CONFIG_NFSD=m
 CONFIG_NFSD_V4=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
 CONFIG_CIFS=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_SGI_PARTITION=y
-CONFIG_EFI_PARTITION=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
 CONFIG_NLS_CODEPAGE_775=m
@@ -225,11 +214,7 @@ CONFIG_NLS_UTF8=m
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_MUTEXES=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_CRYPTO_ECB=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_MD5=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_CRC_T10DIF=y
-CONFIG_INTEL_IOMMU=y
index d663efd..5b4fcdd 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_IKCONFIG=y
@@ -9,6 +8,8 @@ CONFIG_KALLSYMS_ALL=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_SGI_PARTITION=y
 CONFIG_MCKINLEY=y
 CONFIG_IA64_CYCLONE=y
 CONFIG_SMP=y
@@ -24,14 +25,12 @@ CONFIG_BINFMT_MISC=m
 CONFIG_ACPI_BUTTON=m
 CONFIG_ACPI_FAN=m
 CONFIG_ACPI_PROCESSOR=m
-CONFIG_ACPI_CONTAINER=m
 CONFIG_HOTPLUG_PCI=y
-CONFIG_HOTPLUG_PCI_ACPI=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
-CONFIG_ARPD=y
 CONFIG_SYN_COOKIES=y
 # CONFIG_IPV6 is not set
 CONFIG_BLK_DEV_LOOP=m
@@ -71,15 +70,12 @@ CONFIG_FUSION_SPI=y
 CONFIG_FUSION_FC=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_NET_ETHERNET=y
+CONFIG_NETCONSOLE=y
+CONFIG_TIGON3=y
 CONFIG_NET_TULIP=y
 CONFIG_TULIP=m
-CONFIG_NET_PCI=y
-CONFIG_NET_VENDOR_INTEL=y
 CONFIG_E100=m
 CONFIG_E1000=y
-CONFIG_TIGON3=y
-CONFIG_NETCONSOLE=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_GAMEPORT=m
 CONFIG_SERIAL_NONSTANDARD=y
@@ -146,7 +142,6 @@ CONFIG_REISERFS_FS_XATTR=y
 CONFIG_REISERFS_FS_POSIX_ACL=y
 CONFIG_REISERFS_FS_SECURITY=y
 CONFIG_XFS_FS=y
-CONFIG_AUTOFS_FS=y
 CONFIG_AUTOFS4_FS=y
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
@@ -157,16 +152,10 @@ CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
 CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
 CONFIG_NFSD=m
 CONFIG_NFSD_V4=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
 CONFIG_CIFS=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_SGI_PARTITION=y
-CONFIG_EFI_PARTITION=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
 CONFIG_NLS_CODEPAGE_775=m
index b4548a3..f0f69fd 100644 (file)
@@ -1,13 +1,12 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=16
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
 CONFIG_IA64_HP_SIM=y
 CONFIG_MCKINLEY=y
 CONFIG_IA64_PAGE_SIZE_64KB=y
@@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_MULTI_LUN=y
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_LOGGING=y
 CONFIG_SCSI_SPI_ATTRS=y
@@ -49,8 +47,6 @@ CONFIG_HUGETLBFS=y
 CONFIG_NFS_FS=y
 CONFIG_NFSD=y
 CONFIG_NFSD_V3=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_EFI_PARTITION=y
+CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_INFO=y
index c8a3f40..192ed15 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_IKCONFIG=y
@@ -11,6 +10,8 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
 # CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_SGI_PARTITION=y
 CONFIG_IA64_DIG=y
 CONFIG_MCKINLEY=y
 CONFIG_IA64_PAGE_SIZE_64KB=y
@@ -29,14 +30,12 @@ CONFIG_BINFMT_MISC=m
 CONFIG_ACPI_BUTTON=m
 CONFIG_ACPI_FAN=m
 CONFIG_ACPI_PROCESSOR=m
-CONFIG_ACPI_CONTAINER=m
 CONFIG_HOTPLUG_PCI=y
-CONFIG_HOTPLUG_PCI_ACPI=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
-CONFIG_ARPD=y
 CONFIG_SYN_COOKIES=y
 # CONFIG_IPV6 is not set
 CONFIG_BLK_DEV_LOOP=m
@@ -53,6 +52,7 @@ CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=m
 CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_FC_ATTRS=y
 CONFIG_SCSI_SYM53C8XX_2=y
 CONFIG_SCSI_QLOGIC_1280=y
 CONFIG_MD=y
@@ -72,15 +72,12 @@ CONFIG_FUSION_FC=y
 CONFIG_FUSION_CTL=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_NET_ETHERNET=y
+CONFIG_NETCONSOLE=y
+CONFIG_TIGON3=y
 CONFIG_NET_TULIP=y
 CONFIG_TULIP=m
-CONFIG_NET_PCI=y
-CONFIG_NET_VENDOR_INTEL=y
 CONFIG_E100=m
 CONFIG_E1000=y
-CONFIG_TIGON3=y
-CONFIG_NETCONSOLE=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_GAMEPORT=m
 CONFIG_SERIAL_NONSTANDARD=y
@@ -118,7 +115,6 @@ CONFIG_REISERFS_FS_XATTR=y
 CONFIG_REISERFS_FS_POSIX_ACL=y
 CONFIG_REISERFS_FS_SECURITY=y
 CONFIG_XFS_FS=y
-CONFIG_AUTOFS_FS=y
 CONFIG_AUTOFS4_FS=y
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
@@ -129,16 +125,10 @@ CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
 CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
 CONFIG_NFSD=m
 CONFIG_NFSD_V4=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
 CONFIG_CIFS=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_SGI_PARTITION=y
-CONFIG_EFI_PARTITION=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
 CONFIG_NLS_CODEPAGE_775=m
@@ -180,6 +170,5 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_MUTEXES=y
 CONFIG_IA64_GRANULE_16MB=y
-CONFIG_CRYPTO_ECB=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_MD5=y
index 54bc72e..b504c8e 100644 (file)
@@ -1,9 +1,9 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
+CONFIG_PARTITION_ADVANCED=y
 CONFIG_IA64_HP_ZX1=y
 CONFIG_MCKINLEY=y
 CONFIG_SMP=y
@@ -18,6 +18,7 @@ CONFIG_EFI_VARS=y
 CONFIG_BINFMT_MISC=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
@@ -37,9 +38,9 @@ CONFIG_CHR_DEV_OSST=y
 CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_FC_ATTRS=y
 CONFIG_SCSI_SYM53C8XX_2=y
 CONFIG_SCSI_QLOGIC_1280=y
 CONFIG_FUSION=y
@@ -48,18 +49,15 @@ CONFIG_FUSION_FC=y
 CONFIG_FUSION_CTL=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
-CONFIG_NET_ETHERNET=y
+CONFIG_TIGON3=y
 CONFIG_NET_TULIP=y
 CONFIG_TULIP=y
 CONFIG_TULIP_MWI=y
 CONFIG_TULIP_MMIO=y
 CONFIG_TULIP_NAPI=y
 CONFIG_TULIP_NAPI_HW_MITIGATION=y
-CONFIG_NET_PCI=y
-CONFIG_NET_VENDOR_INTEL=y
 CONFIG_E100=y
 CONFIG_E1000=y
-CONFIG_TIGON3=y
 CONFIG_INPUT_JOYDEV=y
 CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_KEYBOARD is not set
@@ -100,7 +98,6 @@ CONFIG_USB_STORAGE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT3_FS=y
-CONFIG_AUTOFS_FS=y
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_UDF_FS=y
@@ -110,12 +107,9 @@ CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
 CONFIG_NFSD=y
 CONFIG_NFSD_V3=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_EFI_PARTITION=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=y
 CONFIG_NLS_CODEPAGE_775=y
index 8f219da..e24feb0 100644 (file)
@@ -19,6 +19,7 @@ CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PCI=y
 CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
index cc07560..48e16d9 100644 (file)
@@ -28,6 +28,7 @@ CONFIG_MIPS32_COMPAT=y
 CONFIG_MIPS32_O32=y
 CONFIG_MIPS32_N32=y
 CONFIG_PM=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index 2575302..4f37a59 100644 (file)
@@ -18,6 +18,7 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_BINFMT_MISC=m
 CONFIG_PM=y
+CONFIG_NET=y
 CONFIG_PACKET=m
 CONFIG_UNIX=y
 CONFIG_NET_KEY=m
index 4cb787f..1c6191e 100644 (file)
@@ -59,6 +59,7 @@ CONFIG_MIPS32_COMPAT=y
 CONFIG_MIPS32_O32=y
 CONFIG_MIPS32_N32=y
 CONFIG_PM_RUNTIME=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
index e18741e..f57b96d 100644 (file)
@@ -19,6 +19,7 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
 CONFIG_PCI=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index cf0e01f..d41742d 100644 (file)
@@ -20,6 +20,7 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
 CONFIG_PCI=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index edd9ec9..a7806e8 100644 (file)
@@ -19,6 +19,7 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
 CONFIG_PCI=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index d269a53..9b6926d 100644 (file)
@@ -27,6 +27,7 @@ CONFIG_PD6729=m
 CONFIG_I82092=m
 CONFIG_BINFMT_MISC=m
 CONFIG_PM=y
+CONFIG_NET=y
 CONFIG_PACKET=m
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index 2f660e9..70509a4 100644 (file)
@@ -63,6 +63,7 @@ CONFIG_MIPS32_O32=y
 CONFIG_MIPS32_N32=y
 CONFIG_PM_RUNTIME=y
 CONFIG_PM_DEBUG=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index c6f8465..82207e8 100644 (file)
@@ -43,6 +43,7 @@ CONFIG_PCI_DEBUG=y
 CONFIG_BINFMT_MISC=m
 CONFIG_PM_RUNTIME=y
 CONFIG_PM_DEBUG=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index 29d79ae..db029f4 100644 (file)
@@ -20,6 +20,7 @@ CONFIG_MODVERSIONS=y
 CONFIG_PCI=y
 CONFIG_BINFMT_MISC=m
 CONFIG_PM=y
+CONFIG_NET=y
 CONFIG_PACKET=m
 CONFIG_UNIX=y
 CONFIG_NET_KEY=m
index 5d25462..2f7c734 100644 (file)
@@ -129,7 +129,11 @@ NESTED(_mcount, PT_SIZE, ra)
         nop
 #endif
        b       ftrace_stub
+#ifdef CONFIG_32BIT
+        addiu sp, sp, 8
+#else
         nop
+#endif
 
 static_trace:
        MCOUNT_SAVE_REGS
@@ -139,6 +143,9 @@ static_trace:
         move   a1, AT          /* arg2: parent's return address */
 
        MCOUNT_RESTORE_REGS
+#ifdef CONFIG_32BIT
+       addiu sp, sp, 8
+#endif
        .globl ftrace_stub
 ftrace_stub:
        RETURN_BACK
@@ -183,6 +190,11 @@ NESTED(ftrace_graph_caller, PT_SIZE, ra)
        jal     prepare_ftrace_return
         nop
        MCOUNT_RESTORE_REGS
+#ifndef CONFIG_DYNAMIC_FTRACE
+#ifdef CONFIG_32BIT
+       addiu sp, sp, 8
+#endif
+#endif
        RETURN_BACK
        END(ftrace_graph_caller)
 
index bf0fc6b..7a47277 100644 (file)
@@ -650,9 +650,9 @@ static inline int cop1_64bit(struct pt_regs *xcp)
 #define SIFROMREG(si, x)                                               \
 do {                                                                   \
        if (cop1_64bit(xcp))                                            \
-               (si) = get_fpr32(&ctx->fpr[x], 0);                      \
+               (si) = (int)get_fpr32(&ctx->fpr[x], 0);                 \
        else                                                            \
-               (si) = get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1);         \
+               (si) = (int)get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1);    \
 } while (0)
 
 #define SITOREG(si, x)                                                 \
@@ -667,7 +667,7 @@ do {                                                                        \
        }                                                               \
 } while (0)
 
-#define SIFROMHREG(si, x)      ((si) = get_fpr32(&ctx->fpr[x], 1))
+#define SIFROMHREG(si, x)      ((si) = (int)get_fpr32(&ctx->fpr[x], 1))
 
 #define SITOHREG(si, x)                                                        \
 do {                                                                   \
index 571aab0..f42e35e 100644 (file)
@@ -53,6 +53,7 @@
  */
 unsigned long empty_zero_page, zero_page_mask;
 EXPORT_SYMBOL_GPL(empty_zero_page);
+EXPORT_SYMBOL(zero_page_mask);
 
 /*
  * Not static inline because used by IP27 special magic initialization code
index 7187664..5db8882 100644 (file)
@@ -48,7 +48,12 @@ cflags-y     := -pipe
 
 # These flags should be implied by an hppa-linux configuration, but they
 # are not in gcc 3.2.
-cflags-y       += -mno-space-regs -mfast-indirect-calls
+cflags-y       += -mno-space-regs
+
+# -mfast-indirect-calls is only relevant for 32-bit kernels.
+ifndef CONFIG_64BIT
+cflags-y       += -mfast-indirect-calls
+endif
 
 # Currently we save and restore fpregs on all kernel entry/interruption paths.
 # If that gets optimized, we might need to disable the use of fpregs in the
index 9002532..0490199 100644 (file)
@@ -31,6 +31,7 @@ CONFIG_PD6729=m
 CONFIG_I82092=m
 # CONFIG_SUPERIO is not set
 # CONFIG_CHASSIS_LCD_LED is not set
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index 8249ac9..269c23d 100644 (file)
@@ -33,6 +33,7 @@ CONFIG_PCI_LBA=y
 # CONFIG_PDC_CHASSIS_WARN is not set
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index 3bab724..92438c2 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/user.h>
 #include <linux/personality.h>
 #include <linux/security.h>
+#include <linux/seccomp.h>
 #include <linux/compat.h>
 #include <linux/signal.h>
 #include <linux/audit.h>
@@ -271,10 +272,7 @@ long do_syscall_trace_enter(struct pt_regs *regs)
        long ret = 0;
 
        /* Do the secure computing check first. */
-       if (secure_computing(regs->gr[20])) {
-               /* seccomp failures shouldn't expose any additional code. */
-               return -1;
-       }
+       secure_computing_strict(regs->gr[20]);
 
        if (test_thread_flag(TIF_SYSCALL_TRACE) &&
            tracehook_report_syscall_entry(regs))
index 5e2aa43..5973491 100644 (file)
@@ -29,6 +29,7 @@ CONFIG_PM=y
 CONFIG_PCI_MSI=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_SHPC=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
index 553e662..0351b5f 100644 (file)
@@ -31,6 +31,7 @@ CONFIG_HIBERNATION=y
 CONFIG_APM_EMULATION=y
 CONFIG_PCCARD=m
 CONFIG_YENTA=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
index f6c02f8..3651887 100644 (file)
@@ -58,6 +58,7 @@ CONFIG_ELECTRA_CF=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_RPA=m
 CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index 587f551..c3a3269 100644 (file)
@@ -33,6 +33,7 @@ CONFIG_SPARSEMEM_MANUAL=y
 CONFIG_PCI_MSI=y
 CONFIG_PCCARD=y
 CONFIG_HOTPLUG_PCI=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index 50375f1..dd2a9ca 100644 (file)
@@ -53,6 +53,7 @@ CONFIG_SCHED_SMT=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_RPA=m
 CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index 4428ee4..63392f4 100644 (file)
@@ -55,6 +55,7 @@ CONFIG_SCHED_SMT=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_RPA=m
 CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index 3ca1894..9d94fdd 100644 (file)
@@ -63,6 +63,7 @@ CONFIG_CRASH_DUMP=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
index 4830aa6..90f514b 100644 (file)
@@ -61,6 +61,7 @@ CONFIG_CRASH_DUMP=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
index 61db449..13559d3 100644 (file)
@@ -59,6 +59,7 @@ CONFIG_CRASH_DUMP=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
index 948e0e0..e376789 100644 (file)
@@ -23,6 +23,7 @@ CONFIG_CRASH_DUMP=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 # CONFIG_SECCOMP is not set
 # CONFIG_IUCV is not set
+CONFIG_NET=y
 CONFIG_ATM=y
 CONFIG_ATM_LANE=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
index 2e56498..fab35a8 100644 (file)
@@ -50,6 +50,7 @@ CONFIG_CMA=y
 CONFIG_CRASH_DUMP=y
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_NET_KEY=y
index 0c1073e..c7235e0 100644 (file)
@@ -43,6 +43,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
 
 unsigned long empty_zero_page, zero_page_mask;
 EXPORT_SYMBOL(empty_zero_page);
+EXPORT_SYMBOL(zero_page_mask);
 
 static void __init setup_zero_pages(void)
 {
index 6a96b9a..bbd4c22 100644 (file)
@@ -30,6 +30,7 @@ CONFIG_PCI_DEBUG=y
 CONFIG_PCCARD=y
 CONFIG_YENTA=y
 CONFIG_HOTPLUG_PCI=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
index e741b1e..df25ae7 100644 (file)
@@ -25,6 +25,7 @@ CONFIG_CMDLINE_OVERWRITE=y
 CONFIG_CMDLINE="console=ttySC1,115200 ip=dhcp root=/dev/nfs rw nfsroot=/nfs/rootfs,rsize=1024,wsize=1024 earlyprintk=sh-sci.1"
 CONFIG_PCCARD=y
 CONFIG_BINFMT_MISC=y
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
index 9d8521b..6b68f12 100644 (file)
@@ -29,6 +29,7 @@ CONFIG_PCI=y
 CONFIG_PCI_MSI=y
 CONFIG_SUN_OPENPROMFS=m
 CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
index 9d016c7..8c83f4b 100644 (file)
@@ -6,10 +6,12 @@
 #define SAVE_SZ                176
 #define SCRATCH_OFF    STACK_BIAS + 128
 #define BE_PTR(label)  be,pn %xcc, label
+#define SIGN_EXTEND(reg)       sra reg, 0, reg
 #else
 #define SAVE_SZ                96
 #define SCRATCH_OFF    72
 #define BE_PTR(label)  be label
+#define SIGN_EXTEND(reg)
 #endif
 
 #define SKF_MAX_NEG_OFF        (-0x200000) /* SKF_LL_OFF from filter.h */
@@ -135,6 +137,7 @@ bpf_slow_path_byte_msh:
        save    %sp, -SAVE_SZ, %sp;                     \
        mov     %i0, %o0;                               \
        mov     r_OFF, %o1;                             \
+       SIGN_EXTEND(%o1);                               \
        call    bpf_internal_load_pointer_neg_helper;   \
         mov    (LEN), %o2;                             \
        mov     %o0, r_TMP;                             \
index 1f76c22..ece4af0 100644 (file)
@@ -184,7 +184,7 @@ do {                                                                \
         */
 #define emit_alu_K(OPCODE, K)                                  \
 do {                                                           \
-       if (K) {                                                \
+       if (K || OPCODE == AND || OPCODE == MUL) {              \
                unsigned int _insn = OPCODE;                    \
                _insn |= RS1(r_A) | RD(r_A);                    \
                if (is_simm13(K)) {                             \
@@ -234,12 +234,18 @@ do {      BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8));        \
        __emit_load8(BASE, STRUCT, FIELD, DEST);                        \
 } while (0)
 
-#define emit_ldmem(OFF, DEST)                                  \
-do {   *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(DEST);     \
+#ifdef CONFIG_SPARC64
+#define BIAS (STACK_BIAS - 4)
+#else
+#define BIAS (-4)
+#endif
+
+#define emit_ldmem(OFF, DEST)                                          \
+do {   *prog++ = LD32I | RS1(SP) | S13(BIAS - (OFF)) | RD(DEST);       \
 } while (0)
 
-#define emit_stmem(OFF, SRC)                                   \
-do {   *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(SRC);      \
+#define emit_stmem(OFF, SRC)                                           \
+do {   *prog++ = ST32I | RS1(SP) | S13(BIAS - (OFF)) | RD(SRC);        \
 } while (0)
 
 #ifdef CONFIG_SMP
@@ -615,10 +621,11 @@ void bpf_jit_compile(struct bpf_prog *fp)
                        case BPF_ANC | SKF_AD_VLAN_TAG:
                        case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
                                emit_skb_load16(vlan_tci, r_A);
-                               if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
-                                       emit_andi(r_A, VLAN_VID_MASK, r_A);
+                               if (code != (BPF_ANC | SKF_AD_VLAN_TAG)) {
+                                       emit_alu_K(SRL, 12);
+                                       emit_andi(r_A, 1, r_A);
                                } else {
-                                       emit_loadimm(VLAN_TAG_PRESENT, r_TMP);
+                                       emit_loadimm(~VLAN_TAG_PRESENT, r_TMP);
                                        emit_and(r_A, r_TMP, r_A);
                                }
                                break;
@@ -630,15 +637,19 @@ void bpf_jit_compile(struct bpf_prog *fp)
                                emit_loadimm(K, r_X);
                                break;
                        case BPF_LD | BPF_MEM:
+                               seen |= SEEN_MEM;
                                emit_ldmem(K * 4, r_A);
                                break;
                        case BPF_LDX | BPF_MEM:
+                               seen |= SEEN_MEM | SEEN_XREG;
                                emit_ldmem(K * 4, r_X);
                                break;
                        case BPF_ST:
+                               seen |= SEEN_MEM;
                                emit_stmem(K * 4, r_A);
                                break;
                        case BPF_STX:
+                               seen |= SEEN_MEM | SEEN_XREG;
                                emit_stmem(K * 4, r_X);
                                break;
 
index 7a801a3..0fcd913 100644 (file)
@@ -33,8 +33,7 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \
 $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
 
 ifeq ($(CONFIG_EFI_STUB), y)
-       VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \
-                               $(objtree)/drivers/firmware/efi/libstub/lib.a
+       VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o
 endif
 
 $(obj)/vmlinux: $(VMLINUX_OBJS) FORCE
index fc6091a..d39189b 100644 (file)
@@ -183,12 +183,27 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
 static bool mem_avoid_overlap(struct mem_vector *img)
 {
        int i;
+       struct setup_data *ptr;
 
        for (i = 0; i < MEM_AVOID_MAX; i++) {
                if (mem_overlaps(img, &mem_avoid[i]))
                        return true;
        }
 
+       /* Avoid all entries in the setup_data linked list. */
+       ptr = (struct setup_data *)(unsigned long)real_mode->hdr.setup_data;
+       while (ptr) {
+               struct mem_vector avoid;
+
+               avoid.start = (u64)ptr;
+               avoid.size = sizeof(*ptr) + ptr->len;
+
+               if (mem_overlaps(img, &avoid))
+                       return true;
+
+               ptr = (struct setup_data *)(unsigned long)ptr->next;
+       }
+
        return false;
 }
 
index dca9842..de8eebd 100644 (file)
 
 static efi_system_table_t *sys_table;
 
-struct efi_config *efi_early;
+static struct efi_config *efi_early;
+
+#define efi_call_early(f, ...)                                         \
+       efi_early->call(efi_early->f, __VA_ARGS__);
 
 #define BOOT_SERVICES(bits)                                            \
 static void setup_boot_services##bits(struct efi_config *c)            \
@@ -265,21 +268,25 @@ void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str)
 
                offset = offsetof(typeof(*out), output_string);
                output_string = efi_early->text_output + offset;
+               out = (typeof(out))(unsigned long)efi_early->text_output;
                func = (u64 *)output_string;
 
-               efi_early->call(*func, efi_early->text_output, str);
+               efi_early->call(*func, out, str);
        } else {
                struct efi_simple_text_output_protocol_32 *out;
                u32 *func;
 
                offset = offsetof(typeof(*out), output_string);
                output_string = efi_early->text_output + offset;
+               out = (typeof(out))(unsigned long)efi_early->text_output;
                func = (u32 *)output_string;
 
-               efi_early->call(*func, efi_early->text_output, str);
+               efi_early->call(*func, out, str);
        }
 }
 
+#include "../../../../drivers/firmware/efi/libstub/efi-stub-helper.c"
+
 static void find_bits(unsigned long mask, u8 *pos, u8 *size)
 {
        u8 first, len;
@@ -360,7 +367,7 @@ free_struct:
        return status;
 }
 
-static efi_status_t
+static void
 setup_efi_pci32(struct boot_params *params, void **pci_handle,
                unsigned long size)
 {
@@ -403,8 +410,6 @@ setup_efi_pci32(struct boot_params *params, void **pci_handle,
                data = (struct setup_data *)rom;
 
        }
-
-       return status;
 }
 
 static efi_status_t
@@ -463,7 +468,7 @@ free_struct:
 
 }
 
-static efi_status_t
+static void
 setup_efi_pci64(struct boot_params *params, void **pci_handle,
                unsigned long size)
 {
@@ -506,11 +511,18 @@ setup_efi_pci64(struct boot_params *params, void **pci_handle,
                data = (struct setup_data *)rom;
 
        }
-
-       return status;
 }
 
-static efi_status_t setup_efi_pci(struct boot_params *params)
+/*
+ * There's no way to return an informative status from this function,
+ * because any analysis (and printing of error messages) needs to be
+ * done directly at the EFI function call-site.
+ *
+ * For example, EFI_INVALID_PARAMETER could indicate a bug or maybe we
+ * just didn't find any PCI devices, but there's no way to tell outside
+ * the context of the call.
+ */
+static void setup_efi_pci(struct boot_params *params)
 {
        efi_status_t status;
        void **pci_handle = NULL;
@@ -527,7 +539,7 @@ static efi_status_t setup_efi_pci(struct boot_params *params)
                                        size, (void **)&pci_handle);
 
                if (status != EFI_SUCCESS)
-                       return status;
+                       return;
 
                status = efi_call_early(locate_handle,
                                        EFI_LOCATE_BY_PROTOCOL, &pci_proto,
@@ -538,13 +550,12 @@ static efi_status_t setup_efi_pci(struct boot_params *params)
                goto free_handle;
 
        if (efi_early->is64)
-               status = setup_efi_pci64(params, pci_handle, size);
+               setup_efi_pci64(params, pci_handle, size);
        else
-               status = setup_efi_pci32(params, pci_handle, size);
+               setup_efi_pci32(params, pci_handle, size);
 
 free_handle:
        efi_call_early(free_pool, pci_handle);
-       return status;
 }
 
 static void
@@ -1380,10 +1391,7 @@ struct boot_params *efi_main(struct efi_config *c,
 
        setup_graphics(boot_params);
 
-       status = setup_efi_pci(boot_params);
-       if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "setup_efi_pci() failed!\n");
-       }
+       setup_efi_pci(boot_params);
 
        status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
                                sizeof(*gdt), (void **)&gdt);
index d487e72..c88c31e 100644 (file)
@@ -103,4 +103,20 @@ struct efi_uga_draw_protocol {
        void *blt;
 };
 
+struct efi_config {
+       u64 image_handle;
+       u64 table;
+       u64 allocate_pool;
+       u64 allocate_pages;
+       u64 get_memory_map;
+       u64 free_pool;
+       u64 free_pages;
+       u64 locate_handle;
+       u64 handle_protocol;
+       u64 exit_boot_services;
+       u64 text_output;
+       efi_status_t (*call)(unsigned long, ...);
+       bool is64;
+} __packed;
+
 #endif /* BOOT_COMPRESSED_EBOOT_H */
index d6b8aa4..cbed140 100644 (file)
 #include <asm/boot.h>
 #include <asm/asm-offsets.h>
 
-/*
- * Adjust our own GOT
- *
- * The relocation base must be in %ebx
- *
- * It is safe to call this macro more than once, because in some of the
- * code paths multiple invocations are inevitable, e.g. via the efi*
- * entry points.
- *
- * Relocation is only performed the first time.
- */
-.macro FIXUP_GOT
-       cmpb    $1, got_fixed(%ebx)
-       je      2f
-
-       leal    _got(%ebx), %edx
-       leal    _egot(%ebx), %ecx
-1:
-       cmpl    %ecx, %edx
-       jae     2f
-       addl    %ebx, (%edx)
-       addl    $4, %edx
-       jmp     1b
-2:
-       movb    $1, got_fixed(%ebx)
-.endm
-
        __HEAD
 ENTRY(startup_32)
 #ifdef CONFIG_EFI_STUB
@@ -83,9 +56,6 @@ ENTRY(efi_pe_entry)
        add     %esi, 88(%eax)
        pushl   %eax
 
-       movl    %esi, %ebx
-       FIXUP_GOT
-
        call    make_boot_params
        cmpl    $0, %eax
        je      fail
@@ -111,10 +81,6 @@ ENTRY(efi32_stub_entry)
        leal    efi32_config(%esi), %eax
        add     %esi, 88(%eax)
        pushl   %eax
-
-       movl    %esi, %ebx
-       FIXUP_GOT
-
 2:
        call    efi_main
        cmpl    $0, %eax
@@ -224,7 +190,19 @@ relocated:
        shrl    $2, %ecx
        rep     stosl
 
-       FIXUP_GOT
+/*
+ * Adjust our own GOT
+ */
+       leal    _got(%ebx), %edx
+       leal    _egot(%ebx), %ecx
+1:
+       cmpl    %ecx, %edx
+       jae     2f
+       addl    %ebx, (%edx)
+       addl    $4, %edx
+       jmp     1b
+2:
+
 /*
  * Do the decompression, and jump to the new kernel..
  */
@@ -247,12 +225,8 @@ relocated:
        xorl    %ebx, %ebx
        jmp     *%eax
 
-       .data
-/* Have we relocated the GOT? */
-got_fixed:
-       .byte 0
-
 #ifdef CONFIG_EFI_STUB
+       .data
 efi32_config:
        .fill 11,8,0
        .long efi_call_phys
index 50f69c7..2884e0c 100644 (file)
 #include <asm/processor-flags.h>
 #include <asm/asm-offsets.h>
 
-/*
- * Adjust our own GOT
- *
- * The relocation base must be in %rbx
- *
- * It is safe to call this macro more than once, because in some of the
- * code paths multiple invocations are inevitable, e.g. via the efi*
- * entry points.
- *
- * Relocation is only performed the first time.
- */
-.macro FIXUP_GOT
-       cmpb    $1, got_fixed(%rip)
-       je      2f
-
-       leaq    _got(%rip), %rdx
-       leaq    _egot(%rip), %rcx
-1:
-       cmpq    %rcx, %rdx
-       jae     2f
-       addq    %rbx, (%rdx)
-       addq    $8, %rdx
-       jmp     1b
-2:
-       movb    $1, got_fixed(%rip)
-.endm
-
        __HEAD
        .code32
 ENTRY(startup_32)
@@ -279,13 +252,10 @@ ENTRY(efi_pe_entry)
        subq    $1b, %rbp
 
        /*
-        * Relocate efi_config->call() and the GOT entries.
+        * Relocate efi_config->call().
         */
        addq    %rbp, efi64_config+88(%rip)
 
-       movq    %rbp, %rbx
-       FIXUP_GOT
-
        movq    %rax, %rdi
        call    make_boot_params
        cmpq    $0,%rax
@@ -301,13 +271,10 @@ handover_entry:
        subq    $1b, %rbp
 
        /*
-        * Relocate efi_config->call() and the GOT entries.
+        * Relocate efi_config->call().
         */
        movq    efi_config(%rip), %rax
        addq    %rbp, 88(%rax)
-
-       movq    %rbp, %rbx
-       FIXUP_GOT
 2:
        movq    efi_config(%rip), %rdi
        call    efi_main
@@ -418,8 +385,19 @@ relocated:
        shrq    $3, %rcx
        rep     stosq
 
-       FIXUP_GOT
-
+/*
+ * Adjust our own GOT
+ */
+       leaq    _got(%rip), %rdx
+       leaq    _egot(%rip), %rcx
+1:
+       cmpq    %rcx, %rdx
+       jae     2f
+       addq    %rbx, (%rdx)
+       addq    $8, %rdx
+       jmp     1b
+2:
+       
 /*
  * Do the decompression, and jump to the new kernel..
  */
@@ -459,10 +437,6 @@ gdt:
        .quad   0x0000000000000000      /* TS continued */
 gdt_end:
 
-/* Have we relocated the GOT? */
-got_fixed:
-       .byte   0
-
 #ifdef CONFIG_EFI_STUB
 efi_config:
        .quad   0
index 888950f..a7ccd57 100644 (file)
@@ -481,7 +481,7 @@ static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
        crypto_inc(ctrblk, AES_BLOCK_SIZE);
 }
 
-#ifdef CONFIG_AS_AVX
+#if 0  /* temporary disabled due to failing crypto tests */
 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
                              const u8 *in, unsigned int len, u8 *iv)
 {
@@ -1522,7 +1522,7 @@ static int __init aesni_init(void)
                aesni_gcm_dec_tfm = aesni_gcm_dec;
        }
        aesni_ctr_enc_tfm = aesni_ctr_enc;
-#ifdef CONFIG_AS_AVX
+#if 0  /* temporary disabled due to failing crypto tests */
        if (cpu_has_avx) {
                /* optimize performance of ctr mode encryption transform */
                aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
index 044a2fd..0ec241e 100644 (file)
@@ -159,30 +159,6 @@ static inline efi_status_t efi_thunk_set_virtual_address_map(
 }
 #endif /* CONFIG_EFI_MIXED */
 
-
-/* arch specific definitions used by the stub code */
-
-struct efi_config {
-       u64 image_handle;
-       u64 table;
-       u64 allocate_pool;
-       u64 allocate_pages;
-       u64 get_memory_map;
-       u64 free_pool;
-       u64 free_pages;
-       u64 locate_handle;
-       u64 handle_protocol;
-       u64 exit_boot_services;
-       u64 text_output;
-       efi_status_t (*call)(unsigned long, ...);
-       bool is64;
-} __packed;
-
-extern struct efi_config *efi_early;
-
-#define efi_call_early(f, ...)                                         \
-       efi_early->call(efi_early->f, __VA_ARGS__);
-
 extern bool efi_reboot_required(void);
 
 #else
index b0910f9..ffb1733 100644 (file)
@@ -106,14 +106,14 @@ enum fixed_addresses {
        __end_of_permanent_fixed_addresses,
 
        /*
-        * 256 temporary boot-time mappings, used by early_ioremap(),
+        * 512 temporary boot-time mappings, used by early_ioremap(),
         * before ioremap() is functional.
         *
-        * If necessary we round it up to the next 256 pages boundary so
+        * If necessary we round it up to the next 512 pages boundary so
         * that we can have a single pgd entry and a single pte table:
         */
 #define NR_FIX_BTMAPS          64
-#define FIX_BTMAPS_SLOTS       4
+#define FIX_BTMAPS_SLOTS       8
 #define TOTAL_FIX_BTMAPS       (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
        FIX_BTMAP_END =
         (__end_of_permanent_fixed_addresses ^
index 2d872e0..42a2dca 100644 (file)
@@ -1284,6 +1284,9 @@ static void remove_siblinginfo(int cpu)
 
        for_each_cpu(sibling, cpu_sibling_mask(cpu))
                cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
+       for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
+               cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
+       cpumask_clear(cpu_llc_shared_mask(cpu));
        cpumask_clear(cpu_sibling_mask(cpu));
        cpumask_clear(cpu_core_mask(cpu));
        c->phys_proc_id = 0;
index f4d27b1..9924725 100644 (file)
@@ -56,6 +56,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
        bool is_pm_resume;
 
        WARN_ON(irqs_disabled());
+       WARN_ON(rq->cmd_type == REQ_TYPE_FS);
 
        rq->rq_disk = bd_disk;
        rq->end_io = done;
index 383ea0c..df8e1e0 100644 (file)
@@ -119,7 +119,16 @@ void blk_mq_freeze_queue(struct request_queue *q)
        spin_unlock_irq(q->queue_lock);
 
        if (freeze) {
-               percpu_ref_kill(&q->mq_usage_counter);
+               /*
+                * XXX: Temporary kludge to work around SCSI blk-mq stall.
+                * SCSI synchronously creates and destroys many queues
+                * back-to-back during probe leading to lengthy stalls.
+                * This will be fixed by keeping ->mq_usage_counter in
+                * atomic mode until genhd registration, but, for now,
+                * let's work around using expedited synchronization.
+                */
+               __percpu_ref_kill_expedited(&q->mq_usage_counter);
+
                blk_mq_run_queues(q, false);
        }
        wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
@@ -203,7 +212,6 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
        if (tag != BLK_MQ_TAG_FAIL) {
                rq = data->hctx->tags->rqs[tag];
 
-               rq->cmd_flags = 0;
                if (blk_mq_tag_busy(data->hctx)) {
                        rq->cmd_flags = REQ_MQ_INFLIGHT;
                        atomic_inc(&data->hctx->nr_active);
@@ -258,6 +266,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
 
        if (rq->cmd_flags & REQ_MQ_INFLIGHT)
                atomic_dec(&hctx->nr_active);
+       rq->cmd_flags = 0;
 
        clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
        blk_mq_put_tag(hctx, tag, &ctx->last_tag);
@@ -392,6 +401,12 @@ static void blk_mq_start_request(struct request *rq, bool last)
 
        blk_add_timer(rq);
 
+       /*
+        * Ensure that ->deadline is visible before set the started
+        * flag and clear the completed flag.
+        */
+       smp_mb__before_atomic();
+
        /*
         * Mark us as started and clear complete. Complete might have been
         * set if requeue raced with timeout, which then marked it as
@@ -473,7 +488,11 @@ static void blk_mq_requeue_work(struct work_struct *work)
                blk_mq_insert_request(rq, false, false, false);
        }
 
-       blk_mq_run_queues(q, false);
+       /*
+        * Use the start variant of queue running here, so that running
+        * the requeue work will kick stopped queues.
+        */
+       blk_mq_start_hw_queues(q);
 }
 
 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
@@ -957,14 +976,9 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
 
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
-       if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
-           !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
-               blk_insert_flush(rq);
-       } else {
-               spin_lock(&ctx->lock);
-               __blk_mq_insert_request(hctx, rq, at_head);
-               spin_unlock(&ctx->lock);
-       }
+       spin_lock(&ctx->lock);
+       __blk_mq_insert_request(hctx, rq, at_head);
+       spin_unlock(&ctx->lock);
 
        if (run_queue)
                blk_mq_run_hw_queue(hctx, async);
@@ -1404,6 +1418,8 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
                left -= to_do * rq_size;
                for (j = 0; j < to_do; j++) {
                        tags->rqs[i] = p;
+                       tags->rqs[i]->atomic_flags = 0;
+                       tags->rqs[i]->cmd_flags = 0;
                        if (set->ops->init_request) {
                                if (set->ops->init_request(set->driver_data,
                                                tags->rqs[i], hctx_idx, i,
@@ -1956,7 +1972,6 @@ out_unwind:
        while (--i >= 0)
                blk_mq_free_rq_map(set, set->tags[i], i);
 
-       set->tags = NULL;
        return -ENOMEM;
 }
 
index 09da5e4..e6723bd 100644 (file)
@@ -445,8 +445,6 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
  */
 void blk_free_devt(dev_t devt)
 {
-       might_sleep();
-
        if (devt == MKDEV(0, 0))
                return;
 
index fddc1e8..b0ea767 100644 (file)
@@ -419,7 +419,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
        adev->driver_data = pdata;
        pdev = acpi_create_platform_device(adev);
        if (!IS_ERR_OR_NULL(pdev)) {
-               device_enable_async_suspend(&pdev->dev);
                return 1;
        }
 
index 1f9aba5..2747279 100644 (file)
@@ -254,6 +254,7 @@ struct acpi_create_field_info {
        u32 field_bit_position;
        u32 field_bit_length;
        u16 resource_length;
+       u16 pin_number_index;
        u8 field_flags;
        u8 attribute;
        u8 field_type;
index 22fb644..8abb393 100644 (file)
@@ -264,6 +264,7 @@ struct acpi_object_region_field {
        ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length;
        union acpi_operand_object *region_obj;  /* Containing op_region object */
        u8 *resource_buffer;    /* resource_template for serial regions/fields */
+       u16 pin_number_index;   /* Index relative to previous Connection/Template */
 };
 
 struct acpi_object_bank_field {
index 3661c8e..c576661 100644 (file)
@@ -360,6 +360,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
                         */
                        info->resource_buffer = NULL;
                        info->connection_node = NULL;
+                       info->pin_number_index = 0;
 
                        /*
                         * A Connection() is either an actual resource descriptor (buffer)
@@ -437,6 +438,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
                        }
 
                        info->field_bit_position += info->field_bit_length;
+                       info->pin_number_index++;       /* Index relative to previous Connection() */
                        break;
 
                default:
index 9957297..8eb8575 100644 (file)
@@ -142,6 +142,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
        union acpi_operand_object *region_obj2;
        void *region_context = NULL;
        struct acpi_connection_info *context;
+       acpi_physical_address address;
 
        ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
 
@@ -231,25 +232,23 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
        /* We have everything we need, we can invoke the address space handler */
 
        handler = handler_desc->address_space.handler;
-
-       ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
-                         "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
-                         &region_obj->region.handler->address_space, handler,
-                         ACPI_FORMAT_NATIVE_UINT(region_obj->region.address +
-                                                 region_offset),
-                         acpi_ut_get_region_name(region_obj->region.
-                                                 space_id)));
+       address = (region_obj->region.address + region_offset);
 
        /*
         * Special handling for generic_serial_bus and general_purpose_io:
         * There are three extra parameters that must be passed to the
         * handler via the context:
-        *   1) Connection buffer, a resource template from Connection() op.
-        *   2) Length of the above buffer.
-        *   3) Actual access length from the access_as() op.
+        *   1) Connection buffer, a resource template from Connection() op
+        *   2) Length of the above buffer
+        *   3) Actual access length from the access_as() op
+        *
+        * In addition, for general_purpose_io, the Address and bit_width fields
+        * are defined as follows:
+        *   1) Address is the pin number index of the field (bit offset from
+        *      the previous Connection)
+        *   2) bit_width is the actual bit length of the field (number of pins)
         */
-       if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) ||
-            (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) &&
+       if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) &&
            context && field_obj) {
 
                /* Get the Connection (resource_template) buffer */
@@ -258,6 +257,24 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
                context->length = field_obj->field.resource_length;
                context->access_length = field_obj->field.access_length;
        }
+       if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) &&
+           context && field_obj) {
+
+               /* Get the Connection (resource_template) buffer */
+
+               context->connection = field_obj->field.resource_buffer;
+               context->length = field_obj->field.resource_length;
+               context->access_length = field_obj->field.access_length;
+               address = field_obj->field.pin_number_index;
+               bit_width = field_obj->field.bit_length;
+       }
+
+       ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+                         "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
+                         &region_obj->region.handler->address_space, handler,
+                         ACPI_FORMAT_NATIVE_UINT(address),
+                         acpi_ut_get_region_name(region_obj->region.
+                                                 space_id)));
 
        if (!(handler_desc->address_space.handler_flags &
              ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
@@ -271,9 +288,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
 
        /* Call the handler */
 
-       status = handler(function,
-                        (region_obj->region.address + region_offset),
-                        bit_width, value, context,
+       status = handler(function, address, bit_width, value, context,
                         region_obj2->extra.region_context);
 
        if (ACPI_FAILURE(status)) {
index 6907ce0..b994845 100644 (file)
@@ -253,6 +253,37 @@ acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state,
                buffer = &buffer_desc->integer.value;
        }
 
+       if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
+           (obj_desc->field.region_obj->region.space_id ==
+            ACPI_ADR_SPACE_GPIO)) {
+               /*
+                * For GPIO (general_purpose_io), the Address will be the bit offset
+                * from the previous Connection() operator, making it effectively a
+                * pin number index. The bit_length is the length of the field, which
+                * is thus the number of pins.
+                */
+               ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+                                 "GPIO FieldRead [FROM]:  Pin %u Bits %u\n",
+                                 obj_desc->field.pin_number_index,
+                                 obj_desc->field.bit_length));
+
+               /* Lock entire transaction if requested */
+
+               acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+
+               /* Perform the write */
+
+               status = acpi_ex_access_region(obj_desc, 0,
+                                              (u64 *)buffer, ACPI_READ);
+               acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+               if (ACPI_FAILURE(status)) {
+                       acpi_ut_remove_reference(buffer_desc);
+               } else {
+                       *ret_buffer_desc = buffer_desc;
+               }
+               return_ACPI_STATUS(status);
+       }
+
        ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
                          "FieldRead [TO]:   Obj %p, Type %X, Buf %p, ByteLen %X\n",
                          obj_desc, obj_desc->common.type, buffer,
@@ -413,6 +444,42 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
 
                *result_desc = buffer_desc;
                return_ACPI_STATUS(status);
+       } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
+                  (obj_desc->field.region_obj->region.space_id ==
+                   ACPI_ADR_SPACE_GPIO)) {
+               /*
+                * For GPIO (general_purpose_io), we will bypass the entire field
+                * mechanism and handoff the bit address and bit width directly to
+                * the handler. The Address will be the bit offset
+                * from the previous Connection() operator, making it effectively a
+                * pin number index. The bit_length is the length of the field, which
+                * is thus the number of pins.
+                */
+               if (source_desc->common.type != ACPI_TYPE_INTEGER) {
+                       return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+               }
+
+               ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+                                 "GPIO FieldWrite [FROM]: (%s:%X), Val %.8X  [TO]:  Pin %u Bits %u\n",
+                                 acpi_ut_get_type_name(source_desc->common.
+                                                       type),
+                                 source_desc->common.type,
+                                 (u32)source_desc->integer.value,
+                                 obj_desc->field.pin_number_index,
+                                 obj_desc->field.bit_length));
+
+               buffer = &source_desc->integer.value;
+
+               /* Lock entire transaction if requested */
+
+               acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+
+               /* Perform the write */
+
+               status = acpi_ex_access_region(obj_desc, 0,
+                                              (u64 *)buffer, ACPI_WRITE);
+               acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+               return_ACPI_STATUS(status);
        }
 
        /* Get a pointer to the data to be written */
index ee3f872..118e942 100644 (file)
@@ -484,6 +484,8 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
                        obj_desc->field.resource_length = info->resource_length;
                }
 
+               obj_desc->field.pin_number_index = info->pin_number_index;
+
                /* Allow full data read from EC address space */
 
                if ((obj_desc->field.region_obj->region.space_id ==
index 76f7cff..c8ead9f 100644 (file)
@@ -99,6 +99,13 @@ static void container_device_detach(struct acpi_device *adev)
                device_unregister(dev);
 }
 
+static void container_device_online(struct acpi_device *adev)
+{
+       struct device *dev = acpi_driver_data(adev);
+
+       kobject_uevent(&dev->kobj, KOBJ_ONLINE);
+}
+
 static struct acpi_scan_handler container_handler = {
        .ids = container_device_ids,
        .attach = container_device_attach,
@@ -106,6 +113,7 @@ static struct acpi_scan_handler container_handler = {
        .hotplug = {
                .enabled = true,
                .demand_offline = true,
+               .notify_online = container_device_online,
        },
 };
 
index 3bf7764..ae44d86 100644 (file)
@@ -130,7 +130,7 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
        list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
                count = snprintf(&modalias[len], size, "%s:", id->id);
                if (count < 0)
-                       return EINVAL;
+                       return -EINVAL;
                if (count >= size)
                        return -ENOMEM;
                len += count;
@@ -2189,6 +2189,9 @@ static void acpi_bus_attach(struct acpi_device *device)
  ok:
        list_for_each_entry(child, &device->children, node)
                acpi_bus_attach(child);
+
+       if (device->handler && device->handler->hotplug.notify_online)
+               device->handler->hotplug.notify_online(device);
 }
 
 /**
index fcbda10..8e7e185 100644 (file)
@@ -750,6 +750,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T520"),
                },
        },
+       {
+        .callback = video_disable_native_backlight,
+        .ident = "ThinkPad X201s",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
+               },
+       },
 
        /* The native backlight controls do not work on some older machines */
        {
index 20da3ad..7b270a2 100644 (file)
@@ -1211,6 +1211,9 @@ void device_del(struct device *dev)
         */
        if (platform_notify_remove)
                platform_notify_remove(dev);
+       if (dev->bus)
+               blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+                                            BUS_NOTIFY_REMOVED_DEVICE, dev);
        kobject_uevent(&dev->kobj, KOBJ_REMOVE);
        cleanup_device_parent(dev);
        kobject_del(&dev->kobj);
index 551e010..9525458 100644 (file)
@@ -188,31 +188,31 @@ static struct l3_flagmux_data omap_l3_flagmux_clk3 = {
 };
 
 static struct l3_masters_data omap_l3_masters[] = {
-       { 0x0 , "MPU"},
-       { 0x10, "CS_ADP"},
-       { 0x14, "xxx"},
-       { 0x20, "DSP"},
-       { 0x30, "IVAHD"},
-       { 0x40, "ISS"},
-       { 0x44, "DucatiM3"},
-       { 0x48, "FaceDetect"},
-       { 0x50, "SDMA_Rd"},
-       { 0x54, "SDMA_Wr"},
-       { 0x58, "xxx"},
-       { 0x5C, "xxx"},
-       { 0x60, "SGX"},
-       { 0x70, "DSS"},
-       { 0x80, "C2C"},
-       { 0x88, "xxx"},
-       { 0x8C, "xxx"},
-       { 0x90, "HSI"},
-       { 0xA0, "MMC1"},
-       { 0xA4, "MMC2"},
-       { 0xA8, "MMC6"},
-       { 0xB0, "UNIPRO1"},
-       { 0xC0, "USBHOSTHS"},
-       { 0xC4, "USBOTGHS"},
-       { 0xC8, "USBHOSTFS"}
+       { 0x00, "MPU"},
+       { 0x04, "CS_ADP"},
+       { 0x05, "xxx"},
+       { 0x08, "DSP"},
+       { 0x0C, "IVAHD"},
+       { 0x10, "ISS"},
+       { 0x11, "DucatiM3"},
+       { 0x12, "FaceDetect"},
+       { 0x14, "SDMA_Rd"},
+       { 0x15, "SDMA_Wr"},
+       { 0x16, "xxx"},
+       { 0x17, "xxx"},
+       { 0x18, "SGX"},
+       { 0x1C, "DSS"},
+       { 0x20, "C2C"},
+       { 0x22, "xxx"},
+       { 0x23, "xxx"},
+       { 0x24, "HSI"},
+       { 0x28, "MMC1"},
+       { 0x29, "MMC2"},
+       { 0x2A, "MMC6"},
+       { 0x2C, "UNIPRO1"},
+       { 0x30, "USBHOSTHS"},
+       { 0x31, "USBOTGHS"},
+       { 0x32, "USBHOSTFS"}
 };
 
 static struct l3_flagmux_data *omap_l3_flagmux[] = {
index 0300c46..32f7c1b 100644 (file)
@@ -447,7 +447,7 @@ void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
        int i;
 
        num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
-       if (num_parents <= 0 || num_parents > 1)
+       if (num_parents != 2)
                return;
 
        for (i = 0; i < num_parents; ++i) {
index bac2ddf..73a8d0f 100644 (file)
@@ -22,7 +22,7 @@ static struct clk_onecell_data clk_data = {
        .clk_num = ARRAY_SIZE(clk),
 };
 
-static int __init efm32gg_cmu_init(struct device_node *np)
+static void __init efm32gg_cmu_init(struct device_node *np)
 {
        int i;
        void __iomem *base;
@@ -33,7 +33,7 @@ static int __init efm32gg_cmu_init(struct device_node *np)
        base = of_iomap(np, 0);
        if (!base) {
                pr_warn("Failed to map address range for efm32gg,cmu node\n");
-               return -EADDRNOTAVAIL;
+               return;
        }
 
        clk[clk_HFXO] = clk_register_fixed_rate(NULL, "HFXO", NULL,
@@ -76,6 +76,6 @@ static int __init efm32gg_cmu_init(struct device_node *np)
        clk[clk_HFPERCLKDAC0] = clk_register_gate(NULL, "HFPERCLK.DAC0",
                        "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL);
 
-       return of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+       of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
 }
 CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init);
index b76fa69..bacc06f 100644 (file)
@@ -1467,6 +1467,7 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
 static void clk_change_rate(struct clk *clk)
 {
        struct clk *child;
+       struct hlist_node *tmp;
        unsigned long old_rate;
        unsigned long best_parent_rate = 0;
        bool skip_set_rate = false;
@@ -1502,7 +1503,11 @@ static void clk_change_rate(struct clk *clk)
        if (clk->notifier_count && old_rate != clk->rate)
                __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
 
-       hlist_for_each_entry(child, &clk->children, child_node) {
+       /*
+        * Use safe iteration, as change_rate can actually swap parents
+        * for certain clock types.
+        */
+       hlist_for_each_entry_safe(child, tmp, &clk->children, child_node) {
                /* Skip children who will be reparented to another clock */
                if (child->new_parent && child->new_parent != clk)
                        continue;
index 4032e51..3b83b7d 100644 (file)
@@ -1095,7 +1095,7 @@ static struct clk_branch prng_clk = {
 };
 
 static const struct freq_tbl clk_tbl_sdc[] = {
-       {    144000, P_PXO,   5, 18,625 },
+       {    200000, P_PXO,   2, 2, 125 },
        {    400000, P_PLL8,  4, 1, 240 },
        {  16000000, P_PLL8,  4, 1,   6 },
        {  17070000, P_PLL8,  1, 2,  45 },
index 0d8c6c5..b22a2d2 100644 (file)
@@ -545,7 +545,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        GATE(PCLK_PWM, "pclk_pwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 0, GFLAGS),
        GATE(PCLK_TIMER, "pclk_timer", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 1, GFLAGS),
        GATE(PCLK_I2C0, "pclk_i2c0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 2, GFLAGS),
-       GATE(PCLK_I2C1, "pclk_i2c1", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 3, GFLAGS),
+       GATE(PCLK_I2C2, "pclk_i2c2", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 3, GFLAGS),
        GATE(0, "pclk_ddrupctl0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 14, GFLAGS),
        GATE(0, "pclk_publ0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 15, GFLAGS),
        GATE(0, "pclk_ddrupctl1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 0, GFLAGS),
@@ -603,7 +603,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        GATE(PCLK_I2C4, "pclk_i2c4", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 15, GFLAGS),
        GATE(PCLK_UART3, "pclk_uart3", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 11, GFLAGS),
        GATE(PCLK_UART4, "pclk_uart4", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 12, GFLAGS),
-       GATE(PCLK_I2C2, "pclk_i2c2", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 13, GFLAGS),
+       GATE(PCLK_I2C1, "pclk_i2c1", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 13, GFLAGS),
        GATE(PCLK_I2C3, "pclk_i2c3", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 14, GFLAGS),
        GATE(PCLK_SARADC, "pclk_saradc", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 1, GFLAGS),
        GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 2, GFLAGS),
index 4a65b41..af29359 100644 (file)
@@ -139,9 +139,13 @@ static long atl_clk_round_rate(struct clk_hw *hw, unsigned long rate,
 static int atl_clk_set_rate(struct clk_hw *hw, unsigned long rate,
                            unsigned long parent_rate)
 {
-       struct dra7_atl_desc *cdesc = to_atl_desc(hw);
+       struct dra7_atl_desc *cdesc;
        u32 divider;
 
+       if (!hw || !rate)
+               return -EINVAL;
+
+       cdesc = to_atl_desc(hw);
        divider = ((parent_rate + rate / 2) / rate) - 1;
        if (divider > DRA7_ATL_DIVIDER_MASK)
                divider = DRA7_ATL_DIVIDER_MASK;
index e6aa10d..a837f70 100644 (file)
@@ -211,11 +211,16 @@ static long ti_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
 static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
                                   unsigned long parent_rate)
 {
-       struct clk_divider *divider = to_clk_divider(hw);
+       struct clk_divider *divider;
        unsigned int div, value;
        unsigned long flags = 0;
        u32 val;
 
+       if (!hw || !rate)
+               return -EINVAL;
+
+       divider = to_clk_divider(hw);
+
        div = DIV_ROUND_UP(parent_rate, rate);
        value = _get_val(divider, div);
 
index d9fdedd..6e93e7f 100644 (file)
@@ -1289,6 +1289,8 @@ err_get_freq:
                per_cpu(cpufreq_cpu_data, j) = NULL;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
+       up_write(&policy->rwsem);
+
        if (cpufreq_driver->exit)
                cpufreq_driver->exit(policy);
 err_set_policy_cpu:
@@ -1656,6 +1658,8 @@ void cpufreq_suspend(void)
        if (!cpufreq_driver)
                return;
 
+       cpufreq_suspended = true;
+
        if (!has_target())
                return;
 
@@ -1670,8 +1674,6 @@ void cpufreq_suspend(void)
                        pr_err("%s: Failed to suspend driver: %p\n", __func__,
                                policy);
        }
-
-       cpufreq_suspended = true;
 }
 
 /**
@@ -1687,13 +1689,13 @@ void cpufreq_resume(void)
        if (!cpufreq_driver)
                return;
 
+       cpufreq_suspended = false;
+
        if (!has_target())
                return;
 
        pr_debug("%s: Resuming Governors\n", __func__);
 
-       cpufreq_suspended = false;
-
        list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
                if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
                        pr_err("%s: Failed to resume driver: %p\n", __func__,
index 20dc848..4d4e016 100644 (file)
@@ -367,6 +367,10 @@ static int ccp_crypto_init(void)
 {
        int ret;
 
+       ret = ccp_present();
+       if (ret)
+               return ret;
+
        spin_lock_init(&req_queue_lock);
        INIT_LIST_HEAD(&req_queue.cmds);
        req_queue.backlog = &req_queue.cmds;
index a7d1106..c6e6171 100644 (file)
@@ -54,6 +54,20 @@ static inline void ccp_del_device(struct ccp_device *ccp)
        ccp_dev = NULL;
 }
 
+/**
+ * ccp_present - check if a CCP device is present
+ *
+ * Returns zero if a CCP device is present, -ENODEV otherwise.
+ */
+int ccp_present(void)
+{
+       if (ccp_get_device())
+               return 0;
+
+       return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(ccp_present);
+
 /**
  * ccp_enqueue_cmd - queue an operation for processing by the CCP
  *
index b707f29..65dd1ff 100644 (file)
@@ -66,7 +66,7 @@
 #define ADF_DH895XCC_ETR_MAX_BANKS 32
 #define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
 #define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
-#define ADF_DH895XCC_SMIA0_MASK 0xFFFF
+#define ADF_DH895XCC_SMIA0_MASK 0xFFFFFFFF
 #define ADF_DH895XCC_SMIA1_MASK 0x1
 /* Error detection and correction */
 #define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
index 4cf7d9a..bbea824 100644 (file)
@@ -1017,6 +1017,11 @@ static int omap_dma_resume(struct omap_chan *c)
                return -EINVAL;
 
        if (c->paused) {
+               mb();
+
+               /* Restore channel link register */
+               omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl);
+
                omap_dma_start(c, c->desc);
                c->paused = false;
        }
index d8be608..aef6a95 100644 (file)
@@ -7,4 +7,4 @@ obj-$(CONFIG_EFI_VARS_PSTORE)           += efi-pstore.o
 obj-$(CONFIG_UEFI_CPER)                        += cper.o
 obj-$(CONFIG_EFI_RUNTIME_MAP)          += runtime-map.o
 obj-$(CONFIG_EFI_RUNTIME_WRAPPERS)     += runtime-wrappers.o
-obj-$(CONFIG_EFI_STUB)                 += libstub/
+obj-$(CONFIG_EFI_ARM_STUB)             += libstub/
index d62eaaa..687476f 100644 (file)
@@ -377,8 +377,10 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
        struct gpio_chip *chip = achip->chip;
        struct acpi_resource_gpio *agpio;
        struct acpi_resource *ares;
+       int pin_index = (int)address;
        acpi_status status;
        bool pull_up;
+       int length;
        int i;
 
        status = acpi_buffer_to_resource(achip->conn_info.connection,
@@ -400,7 +402,8 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
                return AE_BAD_PARAMETER;
        }
 
-       for (i = 0; i < agpio->pin_table_length; i++) {
+       length = min(agpio->pin_table_length, (u16)(pin_index + bits));
+       for (i = pin_index; i < length; ++i) {
                unsigned pin = agpio->pin_table[i];
                struct acpi_gpio_connection *conn;
                struct gpio_desc *desc;
index 15cc0bb..c68d037 100644 (file)
@@ -413,12 +413,12 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
                return;
        }
 
-       irq_set_chained_handler(parent_irq, parent_handler);
        /*
         * The parent irqchip is already using the chip_data for this
         * irqchip, so our callbacks simply use the handler_data.
         */
        irq_set_handler_data(parent_irq, gpiochip);
+       irq_set_chained_handler(parent_irq, parent_handler);
 }
 EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip);
 
@@ -1674,7 +1674,7 @@ struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
                set_bit(FLAG_OPEN_SOURCE, &desc->flags);
 
        /* No particular flag request, return here... */
-       if (flags & GPIOD_FLAGS_BIT_DIR_SET)
+       if (!(flags & GPIOD_FLAGS_BIT_DIR_SET))
                return desc;
 
        /* Process flags */
index dea99d9..4b7ed52 100644 (file)
@@ -709,11 +709,13 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
        BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
        BUG_ON(!validate_regs_sorted(ring));
 
-       ret = init_hash_table(ring, cmd_tables, cmd_table_count);
-       if (ret) {
-               DRM_ERROR("CMD: cmd_parser_init failed!\n");
-               fini_hash_table(ring);
-               return ret;
+       if (hash_empty(ring->cmd_hash)) {
+               ret = init_hash_table(ring, cmd_tables, cmd_table_count);
+               if (ret) {
+                       DRM_ERROR("CMD: cmd_parser_init failed!\n");
+                       fini_hash_table(ring);
+                       return ret;
+               }
        }
 
        ring->needs_cmd_parser = true;
index ca34de7..5a9de21 100644 (file)
@@ -732,7 +732,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
        if (tmp & HDMI_MODE_SELECT_HDMI)
                pipe_config->has_hdmi_sink = true;
 
-       if (tmp & HDMI_MODE_SELECT_HDMI)
+       if (tmp & SDVO_AUDIO_ENABLE)
                pipe_config->has_audio = true;
 
        if (!HAS_PCH_SPLIT(dev) &&
index fa95659..3d546c6 100644 (file)
@@ -4803,7 +4803,7 @@ struct bonaire_mqd
  */
 static int cik_cp_compute_resume(struct radeon_device *rdev)
 {
-       int r, i, idx;
+       int r, i, j, idx;
        u32 tmp;
        bool use_doorbell = true;
        u64 hqd_gpu_addr;
@@ -4922,7 +4922,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
                mqd->queue_state.cp_hqd_pq_wptr= 0;
                if (RREG32(CP_HQD_ACTIVE) & 1) {
                        WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
-                       for (i = 0; i < rdev->usec_timeout; i++) {
+                       for (j = 0; j < rdev->usec_timeout; j++) {
                                if (!(RREG32(CP_HQD_ACTIVE) & 1))
                                        break;
                                udelay(1);
@@ -7751,17 +7751,17 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
                wptr = RREG32(IH_RB_WPTR);
 
        if (wptr & RB_OVERFLOW) {
+               wptr &= ~RB_OVERFLOW;
                /* When a ring buffer overflow happen start parsing interrupt
                 * from the last not overwritten vector (wptr + 16). Hopefully
                 * this should allow us to catchup.
                 */
-               dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
-                       wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+               dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+                        wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
                rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
-               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
@@ -8251,6 +8251,7 @@ restart_ih:
                /* wptr/rptr are in bytes! */
                rptr += 16;
                rptr &= rdev->ih.ptr_mask;
+               WREG32(IH_RB_RPTR, rptr);
        }
        if (queue_hotplug)
                schedule_work(&rdev->hotplug_work);
@@ -8259,7 +8260,6 @@ restart_ih:
        if (queue_thermal)
                schedule_work(&rdev->pm.dpm.thermal.work);
        rdev->ih.rptr = rptr;
-       WREG32(IH_RB_RPTR, rdev->ih.rptr);
        atomic_set(&rdev->ih.lock, 0);
 
        /* make sure wptr hasn't changed while processing */
index dbca60c..e50807c 100644 (file)
@@ -4749,17 +4749,17 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
                wptr = RREG32(IH_RB_WPTR);
 
        if (wptr & RB_OVERFLOW) {
+               wptr &= ~RB_OVERFLOW;
                /* When a ring buffer overflow happen start parsing interrupt
                 * from the last not overwritten vector (wptr + 16). Hopefully
                 * this should allow us to catchup.
                 */
-               dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
-                       wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+               dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+                        wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
                rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
-               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
@@ -5137,6 +5137,7 @@ restart_ih:
                /* wptr/rptr are in bytes! */
                rptr += 16;
                rptr &= rdev->ih.ptr_mask;
+               WREG32(IH_RB_RPTR, rptr);
        }
        if (queue_hotplug)
                schedule_work(&rdev->hotplug_work);
@@ -5145,7 +5146,6 @@ restart_ih:
        if (queue_thermal && rdev->pm.dpm_enabled)
                schedule_work(&rdev->pm.dpm.thermal.work);
        rdev->ih.rptr = rptr;
-       WREG32(IH_RB_RPTR, rdev->ih.rptr);
        atomic_set(&rdev->ih.lock, 0);
 
        /* make sure wptr hasn't changed while processing */
index 3cfb500..ea5c9af 100644 (file)
@@ -3792,17 +3792,17 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
                wptr = RREG32(IH_RB_WPTR);
 
        if (wptr & RB_OVERFLOW) {
+               wptr &= ~RB_OVERFLOW;
                /* When a ring buffer overflow happen start parsing interrupt
                 * from the last not overwritten vector (wptr + 16). Hopefully
                 * this should allow us to catchup.
                 */
-               dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
-                       wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+               dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+                        wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
                rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
-               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
@@ -4048,6 +4048,7 @@ restart_ih:
                /* wptr/rptr are in bytes! */
                rptr += 16;
                rptr &= rdev->ih.ptr_mask;
+               WREG32(IH_RB_RPTR, rptr);
        }
        if (queue_hotplug)
                schedule_work(&rdev->hotplug_work);
@@ -4056,7 +4057,6 @@ restart_ih:
        if (queue_thermal && rdev->pm.dpm_enabled)
                schedule_work(&rdev->pm.dpm.thermal.work);
        rdev->ih.rptr = rptr;
-       WREG32(IH_RB_RPTR, rdev->ih.rptr);
        atomic_set(&rdev->ih.lock, 0);
 
        /* make sure wptr hasn't changed while processing */
index 5f05b4c..3247bfd 100644 (file)
@@ -106,6 +106,7 @@ extern int radeon_vm_block_size;
 extern int radeon_deep_color;
 extern int radeon_use_pflipirq;
 extern int radeon_bapm;
+extern int radeon_backlight;
 
 /*
  * Copy from radeon_drv.h so we don't have to include both and have conflicting
index 75223dd..12c8329 100644 (file)
@@ -123,6 +123,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
         * https://bugzilla.kernel.org/show_bug.cgi?id=51381
         */
        { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
+       /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
+        * https://bugzilla.kernel.org/show_bug.cgi?id=51381
+        */
+       { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
        /* macbook pro 8.2 */
        { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
        { 0, 0, 0, 0, 0 },
index 4126fd0..f9d17b2 100644 (file)
@@ -181,6 +181,7 @@ int radeon_vm_block_size = -1;
 int radeon_deep_color = 0;
 int radeon_use_pflipirq = 2;
 int radeon_bapm = -1;
+int radeon_backlight = -1;
 
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -263,6 +264,9 @@ module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444);
 MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
 module_param_named(bapm, radeon_bapm, int, 0444);
 
+MODULE_PARM_DESC(backlight, "backlight support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(backlight, radeon_backlight, int, 0444);
+
 static struct pci_device_id pciidlist[] = {
        radeon_PCI_IDS
 };
index 3c2094c..15edf23 100644 (file)
@@ -158,10 +158,43 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8
        return ret;
 }
 
+static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
+                                        struct drm_connector *connector)
+{
+       struct drm_device *dev = radeon_encoder->base.dev;
+       struct radeon_device *rdev = dev->dev_private;
+       bool use_bl = false;
+
+       if (!(radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)))
+               return;
+
+       if (radeon_backlight == 0) {
+               return;
+       } else if (radeon_backlight == 1) {
+               use_bl = true;
+       } else if (radeon_backlight == -1) {
+               /* Quirks */
+               /* Amilo Xi 2550 only works with acpi bl */
+               if ((rdev->pdev->device == 0x9583) &&
+                   (rdev->pdev->subsystem_vendor == 0x1734) &&
+                   (rdev->pdev->subsystem_device == 0x1107))
+                       use_bl = false;
+               else
+                       use_bl = true;
+       }
+
+       if (use_bl) {
+               if (rdev->is_atom_bios)
+                       radeon_atom_backlight_init(radeon_encoder, connector);
+               else
+                       radeon_legacy_backlight_init(radeon_encoder, connector);
+               rdev->mode_info.bl_encoder = radeon_encoder;
+       }
+}
+
 void
 radeon_link_encoder_connector(struct drm_device *dev)
 {
-       struct radeon_device *rdev = dev->dev_private;
        struct drm_connector *connector;
        struct radeon_connector *radeon_connector;
        struct drm_encoder *encoder;
@@ -174,13 +207,8 @@ radeon_link_encoder_connector(struct drm_device *dev)
                        radeon_encoder = to_radeon_encoder(encoder);
                        if (radeon_encoder->devices & radeon_connector->devices) {
                                drm_mode_connector_attach_encoder(connector, encoder);
-                               if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-                                       if (rdev->is_atom_bios)
-                                               radeon_atom_backlight_init(radeon_encoder, connector);
-                                       else
-                                               radeon_legacy_backlight_init(radeon_encoder, connector);
-                                       rdev->mode_info.bl_encoder = radeon_encoder;
-                               }
+                               if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+                                       radeon_encoder_add_backlight(radeon_encoder, connector);
                        }
                }
        }
index 6bce408..3a0b973 100644 (file)
@@ -6316,17 +6316,17 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
                wptr = RREG32(IH_RB_WPTR);
 
        if (wptr & RB_OVERFLOW) {
+               wptr &= ~RB_OVERFLOW;
                /* When a ring buffer overflow happen start parsing interrupt
                 * from the last not overwritten vector (wptr + 16). Hopefully
                 * this should allow us to catchup.
                 */
-               dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
-                       wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+               dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+                        wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
                rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
-               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
@@ -6664,13 +6664,13 @@ restart_ih:
                /* wptr/rptr are in bytes! */
                rptr += 16;
                rptr &= rdev->ih.ptr_mask;
+               WREG32(IH_RB_RPTR, rptr);
        }
        if (queue_hotplug)
                schedule_work(&rdev->hotplug_work);
        if (queue_thermal && rdev->pm.dpm_enabled)
                schedule_work(&rdev->pm.dpm.thermal.work);
        rdev->ih.rptr = rptr;
-       WREG32(IH_RB_RPTR, rdev->ih.rptr);
        atomic_set(&rdev->ih.lock, 0);
 
        /* make sure wptr hasn't changed while processing */
index 4a7cbfa..fcdbde4 100644 (file)
@@ -93,13 +93,29 @@ static ssize_t show_power_crit(struct device *dev,
 }
 static DEVICE_ATTR(power1_crit, S_IRUGO, show_power_crit, NULL);
 
+static umode_t fam15h_power_is_visible(struct kobject *kobj,
+                                      struct attribute *attr,
+                                      int index)
+{
+       /* power1_input is only reported for Fam15h, Models 00h-0fh */
+       if (attr == &dev_attr_power1_input.attr &&
+          (boot_cpu_data.x86 != 0x15 || boot_cpu_data.x86_model > 0xf))
+               return 0;
+
+       return attr->mode;
+}
+
 static struct attribute *fam15h_power_attrs[] = {
        &dev_attr_power1_input.attr,
        &dev_attr_power1_crit.attr,
        NULL
 };
 
-ATTRIBUTE_GROUPS(fam15h_power);
+static const struct attribute_group fam15h_power_group = {
+       .attrs = fam15h_power_attrs,
+       .is_visible = fam15h_power_is_visible,
+};
+__ATTRIBUTE_GROUPS(fam15h_power);
 
 static bool fam15h_power_is_internal_node0(struct pci_dev *f4)
 {
@@ -216,7 +232,9 @@ static int fam15h_power_probe(struct pci_dev *pdev,
 
 static const struct pci_device_id fam15h_power_id_table[] = {
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
+       { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
+       { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
        {}
 };
 MODULE_DEVICE_TABLE(pci, fam15h_power_id_table);
index e42964f..ad571ec 100644 (file)
@@ -145,7 +145,7 @@ static int tmp103_probe(struct i2c_client *client,
        }
 
        i2c_set_clientdata(client, regmap);
-       hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
                                                      regmap, tmp103_groups);
        return PTR_ERR_OR_ZERO(hwmon_dev);
 }
index e0228b2..1722f50 100644 (file)
@@ -2,11 +2,8 @@
 # Makefile for the i2c core.
 #
 
-i2ccore-y := i2c-core.o
-i2ccore-$(CONFIG_ACPI)         += i2c-acpi.o
-
 obj-$(CONFIG_I2C_BOARDINFO)    += i2c-boardinfo.o
-obj-$(CONFIG_I2C)              += i2ccore.o
+obj-$(CONFIG_I2C)              += i2c-core.o
 obj-$(CONFIG_I2C_SMBUS)                += i2c-smbus.o
 obj-$(CONFIG_I2C_CHARDEV)      += i2c-dev.o
 obj-$(CONFIG_I2C_MUX)          += i2c-mux.o
index 9844925..d9ee43c 100644 (file)
@@ -497,7 +497,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
                        desc->wr_len_cmd = dma_size;
                        desc->control |= ISMT_DESC_BLK;
                        priv->dma_buffer[0] = command;
-                       memcpy(&priv->dma_buffer[1], &data->block[1], dma_size);
+                       memcpy(&priv->dma_buffer[1], &data->block[1], dma_size - 1);
                } else {
                        /* Block Read */
                        dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA:  READ\n");
@@ -525,7 +525,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
                        desc->wr_len_cmd = dma_size;
                        desc->control |= ISMT_DESC_I2C;
                        priv->dma_buffer[0] = command;
-                       memcpy(&priv->dma_buffer[1], &data->block[1], dma_size);
+                       memcpy(&priv->dma_buffer[1], &data->block[1], dma_size - 1);
                } else {
                        /* i2c Block Read */
                        dev_dbg(dev, "I2C_SMBUS_I2C_BLOCK_DATA:  READ\n");
index 7170fc8..65a21fe 100644 (file)
@@ -429,7 +429,7 @@ static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap,
                ret = mxs_i2c_pio_wait_xfer_end(i2c);
                if (ret) {
                        dev_err(i2c->dev,
-                               "PIO: Failed to send SELECT command!\n");
+                               "PIO: Failed to send READ command!\n");
                        goto cleanup;
                }
 
index 1cc146c..e506fcd 100644 (file)
@@ -76,8 +76,8 @@
 #define RCAR_IRQ_RECV  (MNR | MAL | MST | MAT | MDR)
 #define RCAR_IRQ_STOP  (MST)
 
-#define RCAR_IRQ_ACK_SEND      (~(MAT | MDE))
-#define RCAR_IRQ_ACK_RECV      (~(MAT | MDR))
+#define RCAR_IRQ_ACK_SEND      (~(MAT | MDE) & 0xFF)
+#define RCAR_IRQ_ACK_RECV      (~(MAT | MDR) & 0xFF)
 
 #define ID_LAST_MSG    (1 << 0)
 #define ID_IOERROR     (1 << 1)
index e637c32..93cfc83 100644 (file)
@@ -433,12 +433,11 @@ static void rk3x_i2c_set_scl_rate(struct rk3x_i2c *i2c, unsigned long scl_rate)
        unsigned long i2c_rate = clk_get_rate(i2c->clk);
        unsigned int div;
 
-       /* SCL rate = (clk rate) / (8 * DIV) */
-       div = DIV_ROUND_UP(i2c_rate, scl_rate * 8);
-
-       /* The lower and upper half of the CLKDIV reg describe the length of
-        * SCL low & high periods. */
-       div = DIV_ROUND_UP(div, 2);
+       /* set DIV = DIVH = DIVL
+        * SCL rate = (clk rate) / (8 * (DIVH + 1 + DIVL + 1))
+        *          = (clk rate) / (16 * (DIV + 1))
+        */
+       div = DIV_ROUND_UP(i2c_rate, scl_rate * 16) - 1;
 
        i2c_writel(i2c, (div << 16) | (div & 0xffff), REG_CLKDIV);
 }
index 87d0371..efba1eb 100644 (file)
@@ -380,34 +380,33 @@ static inline int tegra_i2c_clock_enable(struct tegra_i2c_dev *i2c_dev)
 {
        int ret;
        if (!i2c_dev->hw->has_single_clk_source) {
-               ret = clk_prepare_enable(i2c_dev->fast_clk);
+               ret = clk_enable(i2c_dev->fast_clk);
                if (ret < 0) {
                        dev_err(i2c_dev->dev,
                                "Enabling fast clk failed, err %d\n", ret);
                        return ret;
                }
        }
-       ret = clk_prepare_enable(i2c_dev->div_clk);
+       ret = clk_enable(i2c_dev->div_clk);
        if (ret < 0) {
                dev_err(i2c_dev->dev,
                        "Enabling div clk failed, err %d\n", ret);
-               clk_disable_unprepare(i2c_dev->fast_clk);
+               clk_disable(i2c_dev->fast_clk);
        }
        return ret;
 }
 
 static inline void tegra_i2c_clock_disable(struct tegra_i2c_dev *i2c_dev)
 {
-       clk_disable_unprepare(i2c_dev->div_clk);
+       clk_disable(i2c_dev->div_clk);
        if (!i2c_dev->hw->has_single_clk_source)
-               clk_disable_unprepare(i2c_dev->fast_clk);
+               clk_disable(i2c_dev->fast_clk);
 }
 
 static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
 {
        u32 val;
        int err = 0;
-       int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE;
        u32 clk_divisor;
 
        err = tegra_i2c_clock_enable(i2c_dev);
@@ -428,9 +427,6 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
        i2c_writel(i2c_dev, val, I2C_CNFG);
        i2c_writel(i2c_dev, 0, I2C_INT_MASK);
 
-       clk_multiplier *= (i2c_dev->hw->clk_divisor_std_fast_mode + 1);
-       clk_set_rate(i2c_dev->div_clk, i2c_dev->bus_clk_rate * clk_multiplier);
-
        /* Make sure clock divisor programmed correctly */
        clk_divisor = i2c_dev->hw->clk_divisor_hs_mode;
        clk_divisor |= i2c_dev->hw->clk_divisor_std_fast_mode <<
@@ -712,6 +708,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
        void __iomem *base;
        int irq;
        int ret = 0;
+       int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        base = devm_ioremap_resource(&pdev->dev, res);
@@ -777,17 +774,39 @@ static int tegra_i2c_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, i2c_dev);
 
+       if (!i2c_dev->hw->has_single_clk_source) {
+               ret = clk_prepare(i2c_dev->fast_clk);
+               if (ret < 0) {
+                       dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret);
+                       return ret;
+               }
+       }
+
+       clk_multiplier *= (i2c_dev->hw->clk_divisor_std_fast_mode + 1);
+       ret = clk_set_rate(i2c_dev->div_clk,
+                          i2c_dev->bus_clk_rate * clk_multiplier);
+       if (ret) {
+               dev_err(i2c_dev->dev, "Clock rate change failed %d\n", ret);
+               goto unprepare_fast_clk;
+       }
+
+       ret = clk_prepare(i2c_dev->div_clk);
+       if (ret < 0) {
+               dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret);
+               goto unprepare_fast_clk;
+       }
+
        ret = tegra_i2c_init(i2c_dev);
        if (ret) {
                dev_err(&pdev->dev, "Failed to initialize i2c controller");
-               return ret;
+               goto unprepare_div_clk;
        }
 
        ret = devm_request_irq(&pdev->dev, i2c_dev->irq,
                        tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev);
        if (ret) {
                dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq);
-               return ret;
+               goto unprepare_div_clk;
        }
 
        i2c_set_adapdata(&i2c_dev->adapter, i2c_dev);
@@ -803,16 +822,30 @@ static int tegra_i2c_probe(struct platform_device *pdev)
        ret = i2c_add_numbered_adapter(&i2c_dev->adapter);
        if (ret) {
                dev_err(&pdev->dev, "Failed to add I2C adapter\n");
-               return ret;
+               goto unprepare_div_clk;
        }
 
        return 0;
+
+unprepare_div_clk:
+       clk_unprepare(i2c_dev->div_clk);
+
+unprepare_fast_clk:
+       if (!i2c_dev->hw->has_single_clk_source)
+               clk_unprepare(i2c_dev->fast_clk);
+
+       return ret;
 }
 
 static int tegra_i2c_remove(struct platform_device *pdev)
 {
        struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
        i2c_del_adapter(&i2c_dev->adapter);
+
+       clk_unprepare(i2c_dev->div_clk);
+       if (!i2c_dev->hw->has_single_clk_source)
+               clk_unprepare(i2c_dev->fast_clk);
+
        return 0;
 }
 
diff --git a/drivers/i2c/i2c-acpi.c b/drivers/i2c/i2c-acpi.c
deleted file mode 100644 (file)
index 0dbc18c..0000000
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- * I2C ACPI code
- *
- * Copyright (C) 2014 Intel Corp
- *
- * Author: Lan Tianyu <tianyu.lan@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * for more details.
- */
-#define pr_fmt(fmt) "I2C/ACPI : " fmt
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/i2c.h>
-#include <linux/acpi.h>
-
-struct acpi_i2c_handler_data {
-       struct acpi_connection_info info;
-       struct i2c_adapter *adapter;
-};
-
-struct gsb_buffer {
-       u8      status;
-       u8      len;
-       union {
-               u16     wdata;
-               u8      bdata;
-               u8      data[0];
-       };
-} __packed;
-
-static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data)
-{
-       struct i2c_board_info *info = data;
-
-       if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
-               struct acpi_resource_i2c_serialbus *sb;
-
-               sb = &ares->data.i2c_serial_bus;
-               if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
-                       info->addr = sb->slave_address;
-                       if (sb->access_mode == ACPI_I2C_10BIT_MODE)
-                               info->flags |= I2C_CLIENT_TEN;
-               }
-       } else if (info->irq < 0) {
-               struct resource r;
-
-               if (acpi_dev_resource_interrupt(ares, 0, &r))
-                       info->irq = r.start;
-       }
-
-       /* Tell the ACPI core to skip this resource */
-       return 1;
-}
-
-static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
-                                      void *data, void **return_value)
-{
-       struct i2c_adapter *adapter = data;
-       struct list_head resource_list;
-       struct i2c_board_info info;
-       struct acpi_device *adev;
-       int ret;
-
-       if (acpi_bus_get_device(handle, &adev))
-               return AE_OK;
-       if (acpi_bus_get_status(adev) || !adev->status.present)
-               return AE_OK;
-
-       memset(&info, 0, sizeof(info));
-       info.acpi_node.companion = adev;
-       info.irq = -1;
-
-       INIT_LIST_HEAD(&resource_list);
-       ret = acpi_dev_get_resources(adev, &resource_list,
-                                    acpi_i2c_add_resource, &info);
-       acpi_dev_free_resource_list(&resource_list);
-
-       if (ret < 0 || !info.addr)
-               return AE_OK;
-
-       adev->power.flags.ignore_parent = true;
-       strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
-       if (!i2c_new_device(adapter, &info)) {
-               adev->power.flags.ignore_parent = false;
-               dev_err(&adapter->dev,
-                       "failed to add I2C device %s from ACPI\n",
-                       dev_name(&adev->dev));
-       }
-
-       return AE_OK;
-}
-
-/**
- * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter
- * @adap: pointer to adapter
- *
- * Enumerate all I2C slave devices behind this adapter by walking the ACPI
- * namespace. When a device is found it will be added to the Linux device
- * model and bound to the corresponding ACPI handle.
- */
-void acpi_i2c_register_devices(struct i2c_adapter *adap)
-{
-       acpi_handle handle;
-       acpi_status status;
-
-       if (!adap->dev.parent)
-               return;
-
-       handle = ACPI_HANDLE(adap->dev.parent);
-       if (!handle)
-               return;
-
-       status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
-                                    acpi_i2c_add_device, NULL,
-                                    adap, NULL);
-       if (ACPI_FAILURE(status))
-               dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
-}
-
-#ifdef CONFIG_ACPI_I2C_OPREGION
-static int acpi_gsb_i2c_read_bytes(struct i2c_client *client,
-               u8 cmd, u8 *data, u8 data_len)
-{
-
-       struct i2c_msg msgs[2];
-       int ret;
-       u8 *buffer;
-
-       buffer = kzalloc(data_len, GFP_KERNEL);
-       if (!buffer)
-               return AE_NO_MEMORY;
-
-       msgs[0].addr = client->addr;
-       msgs[0].flags = client->flags;
-       msgs[0].len = 1;
-       msgs[0].buf = &cmd;
-
-       msgs[1].addr = client->addr;
-       msgs[1].flags = client->flags | I2C_M_RD;
-       msgs[1].len = data_len;
-       msgs[1].buf = buffer;
-
-       ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
-       if (ret < 0)
-               dev_err(&client->adapter->dev, "i2c read failed\n");
-       else
-               memcpy(data, buffer, data_len);
-
-       kfree(buffer);
-       return ret;
-}
-
-static int acpi_gsb_i2c_write_bytes(struct i2c_client *client,
-               u8 cmd, u8 *data, u8 data_len)
-{
-
-       struct i2c_msg msgs[1];
-       u8 *buffer;
-       int ret = AE_OK;
-
-       buffer = kzalloc(data_len + 1, GFP_KERNEL);
-       if (!buffer)
-               return AE_NO_MEMORY;
-
-       buffer[0] = cmd;
-       memcpy(buffer + 1, data, data_len);
-
-       msgs[0].addr = client->addr;
-       msgs[0].flags = client->flags;
-       msgs[0].len = data_len + 1;
-       msgs[0].buf = buffer;
-
-       ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
-       if (ret < 0)
-               dev_err(&client->adapter->dev, "i2c write failed\n");
-
-       kfree(buffer);
-       return ret;
-}
-
-static acpi_status
-acpi_i2c_space_handler(u32 function, acpi_physical_address command,
-                       u32 bits, u64 *value64,
-                       void *handler_context, void *region_context)
-{
-       struct gsb_buffer *gsb = (struct gsb_buffer *)value64;
-       struct acpi_i2c_handler_data *data = handler_context;
-       struct acpi_connection_info *info = &data->info;
-       struct acpi_resource_i2c_serialbus *sb;
-       struct i2c_adapter *adapter = data->adapter;
-       struct i2c_client client;
-       struct acpi_resource *ares;
-       u32 accessor_type = function >> 16;
-       u8 action = function & ACPI_IO_MASK;
-       acpi_status ret = AE_OK;
-       int status;
-
-       ret = acpi_buffer_to_resource(info->connection, info->length, &ares);
-       if (ACPI_FAILURE(ret))
-               return ret;
-
-       if (!value64 || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) {
-               ret = AE_BAD_PARAMETER;
-               goto err;
-       }
-
-       sb = &ares->data.i2c_serial_bus;
-       if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) {
-               ret = AE_BAD_PARAMETER;
-               goto err;
-       }
-
-       memset(&client, 0, sizeof(client));
-       client.adapter = adapter;
-       client.addr = sb->slave_address;
-       client.flags = 0;
-
-       if (sb->access_mode == ACPI_I2C_10BIT_MODE)
-               client.flags |= I2C_CLIENT_TEN;
-
-       switch (accessor_type) {
-       case ACPI_GSB_ACCESS_ATTRIB_SEND_RCV:
-               if (action == ACPI_READ) {
-                       status = i2c_smbus_read_byte(&client);
-                       if (status >= 0) {
-                               gsb->bdata = status;
-                               status = 0;
-                       }
-               } else {
-                       status = i2c_smbus_write_byte(&client, gsb->bdata);
-               }
-               break;
-
-       case ACPI_GSB_ACCESS_ATTRIB_BYTE:
-               if (action == ACPI_READ) {
-                       status = i2c_smbus_read_byte_data(&client, command);
-                       if (status >= 0) {
-                               gsb->bdata = status;
-                               status = 0;
-                       }
-               } else {
-                       status = i2c_smbus_write_byte_data(&client, command,
-                                       gsb->bdata);
-               }
-               break;
-
-       case ACPI_GSB_ACCESS_ATTRIB_WORD:
-               if (action == ACPI_READ) {
-                       status = i2c_smbus_read_word_data(&client, command);
-                       if (status >= 0) {
-                               gsb->wdata = status;
-                               status = 0;
-                       }
-               } else {
-                       status = i2c_smbus_write_word_data(&client, command,
-                                       gsb->wdata);
-               }
-               break;
-
-       case ACPI_GSB_ACCESS_ATTRIB_BLOCK:
-               if (action == ACPI_READ) {
-                       status = i2c_smbus_read_block_data(&client, command,
-                                       gsb->data);
-                       if (status >= 0) {
-                               gsb->len = status;
-                               status = 0;
-                       }
-               } else {
-                       status = i2c_smbus_write_block_data(&client, command,
-                                       gsb->len, gsb->data);
-               }
-               break;
-
-       case ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE:
-               if (action == ACPI_READ) {
-                       status = acpi_gsb_i2c_read_bytes(&client, command,
-                                       gsb->data, info->access_length);
-                       if (status > 0)
-                               status = 0;
-               } else {
-                       status = acpi_gsb_i2c_write_bytes(&client, command,
-                                       gsb->data, info->access_length);
-               }
-               break;
-
-       default:
-               pr_info("protocol(0x%02x) is not supported.\n", accessor_type);
-               ret = AE_BAD_PARAMETER;
-               goto err;
-       }
-
-       gsb->status = status;
-
- err:
-       ACPI_FREE(ares);
-       return ret;
-}
-
-
-int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
-{
-       acpi_handle handle = ACPI_HANDLE(adapter->dev.parent);
-       struct acpi_i2c_handler_data *data;
-       acpi_status status;
-
-       if (!handle)
-               return -ENODEV;
-
-       data = kzalloc(sizeof(struct acpi_i2c_handler_data),
-                           GFP_KERNEL);
-       if (!data)
-               return -ENOMEM;
-
-       data->adapter = adapter;
-       status = acpi_bus_attach_private_data(handle, (void *)data);
-       if (ACPI_FAILURE(status)) {
-               kfree(data);
-               return -ENOMEM;
-       }
-
-       status = acpi_install_address_space_handler(handle,
-                               ACPI_ADR_SPACE_GSBUS,
-                               &acpi_i2c_space_handler,
-                               NULL,
-                               data);
-       if (ACPI_FAILURE(status)) {
-               dev_err(&adapter->dev, "Error installing i2c space handler\n");
-               acpi_bus_detach_private_data(handle);
-               kfree(data);
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
-void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
-{
-       acpi_handle handle = ACPI_HANDLE(adapter->dev.parent);
-       struct acpi_i2c_handler_data *data;
-       acpi_status status;
-
-       if (!handle)
-               return;
-
-       acpi_remove_address_space_handler(handle,
-                               ACPI_ADR_SPACE_GSBUS,
-                               &acpi_i2c_space_handler);
-
-       status = acpi_bus_get_private_data(handle, (void **)&data);
-       if (ACPI_SUCCESS(status))
-               kfree(data);
-
-       acpi_bus_detach_private_data(handle);
-}
-#endif
index 632057a..ccfbbab 100644 (file)
@@ -27,6 +27,8 @@
    OF support is copyright (c) 2008 Jochen Friedrich <jochen@scram.de>
    (based on a previous patch from Jon Smirl <jonsmirl@gmail.com>) and
    (c) 2013  Wolfram Sang <wsa@the-dreams.de>
+   I2C ACPI code Copyright (C) 2014 Intel Corp
+   Author: Lan Tianyu <tianyu.lan@intel.com>
  */
 
 #include <linux/module.h>
@@ -78,6 +80,368 @@ void i2c_transfer_trace_unreg(void)
        static_key_slow_dec(&i2c_trace_msg);
 }
 
+#if defined(CONFIG_ACPI)
+struct acpi_i2c_handler_data {
+       struct acpi_connection_info info;
+       struct i2c_adapter *adapter;
+};
+
+struct gsb_buffer {
+       u8      status;
+       u8      len;
+       union {
+               u16     wdata;
+               u8      bdata;
+               u8      data[0];
+       };
+} __packed;
+
+static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data)
+{
+       struct i2c_board_info *info = data;
+
+       if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+               struct acpi_resource_i2c_serialbus *sb;
+
+               sb = &ares->data.i2c_serial_bus;
+               if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+                       info->addr = sb->slave_address;
+                       if (sb->access_mode == ACPI_I2C_10BIT_MODE)
+                               info->flags |= I2C_CLIENT_TEN;
+               }
+       } else if (info->irq < 0) {
+               struct resource r;
+
+               if (acpi_dev_resource_interrupt(ares, 0, &r))
+                       info->irq = r.start;
+       }
+
+       /* Tell the ACPI core to skip this resource */
+       return 1;
+}
+
+static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
+                                      void *data, void **return_value)
+{
+       struct i2c_adapter *adapter = data;
+       struct list_head resource_list;
+       struct i2c_board_info info;
+       struct acpi_device *adev;
+       int ret;
+
+       if (acpi_bus_get_device(handle, &adev))
+               return AE_OK;
+       if (acpi_bus_get_status(adev) || !adev->status.present)
+               return AE_OK;
+
+       memset(&info, 0, sizeof(info));
+       info.acpi_node.companion = adev;
+       info.irq = -1;
+
+       INIT_LIST_HEAD(&resource_list);
+       ret = acpi_dev_get_resources(adev, &resource_list,
+                                    acpi_i2c_add_resource, &info);
+       acpi_dev_free_resource_list(&resource_list);
+
+       if (ret < 0 || !info.addr)
+               return AE_OK;
+
+       adev->power.flags.ignore_parent = true;
+       strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
+       if (!i2c_new_device(adapter, &info)) {
+               adev->power.flags.ignore_parent = false;
+               dev_err(&adapter->dev,
+                       "failed to add I2C device %s from ACPI\n",
+                       dev_name(&adev->dev));
+       }
+
+       return AE_OK;
+}
+
+/**
+ * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter
+ * @adap: pointer to adapter
+ *
+ * Enumerate all I2C slave devices behind this adapter by walking the ACPI
+ * namespace. When a device is found it will be added to the Linux device
+ * model and bound to the corresponding ACPI handle.
+ */
+static void acpi_i2c_register_devices(struct i2c_adapter *adap)
+{
+       acpi_handle handle;
+       acpi_status status;
+
+       if (!adap->dev.parent)
+               return;
+
+       handle = ACPI_HANDLE(adap->dev.parent);
+       if (!handle)
+               return;
+
+       status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+                                    acpi_i2c_add_device, NULL,
+                                    adap, NULL);
+       if (ACPI_FAILURE(status))
+               dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
+}
+
+#else /* CONFIG_ACPI */
+static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { }
+#endif /* CONFIG_ACPI */
+
+#ifdef CONFIG_ACPI_I2C_OPREGION
+static int acpi_gsb_i2c_read_bytes(struct i2c_client *client,
+               u8 cmd, u8 *data, u8 data_len)
+{
+
+       struct i2c_msg msgs[2];
+       int ret;
+       u8 *buffer;
+
+       buffer = kzalloc(data_len, GFP_KERNEL);
+       if (!buffer)
+               return AE_NO_MEMORY;
+
+       msgs[0].addr = client->addr;
+       msgs[0].flags = client->flags;
+       msgs[0].len = 1;
+       msgs[0].buf = &cmd;
+
+       msgs[1].addr = client->addr;
+       msgs[1].flags = client->flags | I2C_M_RD;
+       msgs[1].len = data_len;
+       msgs[1].buf = buffer;
+
+       ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+       if (ret < 0)
+               dev_err(&client->adapter->dev, "i2c read failed\n");
+       else
+               memcpy(data, buffer, data_len);
+
+       kfree(buffer);
+       return ret;
+}
+
+static int acpi_gsb_i2c_write_bytes(struct i2c_client *client,
+               u8 cmd, u8 *data, u8 data_len)
+{
+
+       struct i2c_msg msgs[1];
+       u8 *buffer;
+       int ret = AE_OK;
+
+       buffer = kzalloc(data_len + 1, GFP_KERNEL);
+       if (!buffer)
+               return AE_NO_MEMORY;
+
+       buffer[0] = cmd;
+       memcpy(buffer + 1, data, data_len);
+
+       msgs[0].addr = client->addr;
+       msgs[0].flags = client->flags;
+       msgs[0].len = data_len + 1;
+       msgs[0].buf = buffer;
+
+       ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+       if (ret < 0)
+               dev_err(&client->adapter->dev, "i2c write failed\n");
+
+       kfree(buffer);
+       return ret;
+}
+
+static acpi_status
+acpi_i2c_space_handler(u32 function, acpi_physical_address command,
+                       u32 bits, u64 *value64,
+                       void *handler_context, void *region_context)
+{
+       struct gsb_buffer *gsb = (struct gsb_buffer *)value64;
+       struct acpi_i2c_handler_data *data = handler_context;
+       struct acpi_connection_info *info = &data->info;
+       struct acpi_resource_i2c_serialbus *sb;
+       struct i2c_adapter *adapter = data->adapter;
+       struct i2c_client client;
+       struct acpi_resource *ares;
+       u32 accessor_type = function >> 16;
+       u8 action = function & ACPI_IO_MASK;
+       acpi_status ret = AE_OK;
+       int status;
+
+       ret = acpi_buffer_to_resource(info->connection, info->length, &ares);
+       if (ACPI_FAILURE(ret))
+               return ret;
+
+       if (!value64 || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+               ret = AE_BAD_PARAMETER;
+               goto err;
+       }
+
+       sb = &ares->data.i2c_serial_bus;
+       if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+               ret = AE_BAD_PARAMETER;
+               goto err;
+       }
+
+       memset(&client, 0, sizeof(client));
+       client.adapter = adapter;
+       client.addr = sb->slave_address;
+       client.flags = 0;
+
+       if (sb->access_mode == ACPI_I2C_10BIT_MODE)
+               client.flags |= I2C_CLIENT_TEN;
+
+       switch (accessor_type) {
+       case ACPI_GSB_ACCESS_ATTRIB_SEND_RCV:
+               if (action == ACPI_READ) {
+                       status = i2c_smbus_read_byte(&client);
+                       if (status >= 0) {
+                               gsb->bdata = status;
+                               status = 0;
+                       }
+               } else {
+                       status = i2c_smbus_write_byte(&client, gsb->bdata);
+               }
+               break;
+
+       case ACPI_GSB_ACCESS_ATTRIB_BYTE:
+               if (action == ACPI_READ) {
+                       status = i2c_smbus_read_byte_data(&client, command);
+                       if (status >= 0) {
+                               gsb->bdata = status;
+                               status = 0;
+                       }
+               } else {
+                       status = i2c_smbus_write_byte_data(&client, command,
+                                       gsb->bdata);
+               }
+               break;
+
+       case ACPI_GSB_ACCESS_ATTRIB_WORD:
+               if (action == ACPI_READ) {
+                       status = i2c_smbus_read_word_data(&client, command);
+                       if (status >= 0) {
+                               gsb->wdata = status;
+                               status = 0;
+                       }
+               } else {
+                       status = i2c_smbus_write_word_data(&client, command,
+                                       gsb->wdata);
+               }
+               break;
+
+       case ACPI_GSB_ACCESS_ATTRIB_BLOCK:
+               if (action == ACPI_READ) {
+                       status = i2c_smbus_read_block_data(&client, command,
+                                       gsb->data);
+                       if (status >= 0) {
+                               gsb->len = status;
+                               status = 0;
+                       }
+               } else {
+                       status = i2c_smbus_write_block_data(&client, command,
+                                       gsb->len, gsb->data);
+               }
+               break;
+
+       case ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE:
+               if (action == ACPI_READ) {
+                       status = acpi_gsb_i2c_read_bytes(&client, command,
+                                       gsb->data, info->access_length);
+                       if (status > 0)
+                               status = 0;
+               } else {
+                       status = acpi_gsb_i2c_write_bytes(&client, command,
+                                       gsb->data, info->access_length);
+               }
+               break;
+
+       default:
+               pr_info("protocol(0x%02x) is not supported.\n", accessor_type);
+               ret = AE_BAD_PARAMETER;
+               goto err;
+       }
+
+       gsb->status = status;
+
+ err:
+       ACPI_FREE(ares);
+       return ret;
+}
+
+
+static int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
+{
+       acpi_handle handle;
+       struct acpi_i2c_handler_data *data;
+       acpi_status status;
+
+       if (!adapter->dev.parent)
+               return -ENODEV;
+
+       handle = ACPI_HANDLE(adapter->dev.parent);
+
+       if (!handle)
+               return -ENODEV;
+
+       data = kzalloc(sizeof(struct acpi_i2c_handler_data),
+                           GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->adapter = adapter;
+       status = acpi_bus_attach_private_data(handle, (void *)data);
+       if (ACPI_FAILURE(status)) {
+               kfree(data);
+               return -ENOMEM;
+       }
+
+       status = acpi_install_address_space_handler(handle,
+                               ACPI_ADR_SPACE_GSBUS,
+                               &acpi_i2c_space_handler,
+                               NULL,
+                               data);
+       if (ACPI_FAILURE(status)) {
+               dev_err(&adapter->dev, "Error installing i2c space handler\n");
+               acpi_bus_detach_private_data(handle);
+               kfree(data);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
+{
+       acpi_handle handle;
+       struct acpi_i2c_handler_data *data;
+       acpi_status status;
+
+       if (!adapter->dev.parent)
+               return;
+
+       handle = ACPI_HANDLE(adapter->dev.parent);
+
+       if (!handle)
+               return;
+
+       acpi_remove_address_space_handler(handle,
+                               ACPI_ADR_SPACE_GSBUS,
+                               &acpi_i2c_space_handler);
+
+       status = acpi_bus_get_private_data(handle, (void **)&data);
+       if (ACPI_SUCCESS(status))
+               kfree(data);
+
+       acpi_bus_detach_private_data(handle);
+}
+#else /* CONFIG_ACPI_I2C_OPREGION */
+static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
+{ }
+
+static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
+{ return 0; }
+#endif /* CONFIG_ACPI_I2C_OPREGION */
+
 /* ------------------------------------------------------------------------- */
 
 static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
index a3a2e9c..df0c4f6 100644 (file)
@@ -105,6 +105,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
        umem->length    = size;
        umem->offset    = addr & ~PAGE_MASK;
        umem->page_size = PAGE_SIZE;
+       umem->pid       = get_task_pid(current, PIDTYPE_PID);
        /*
         * We ask for writable memory if any access flags other than
         * "remote read" are set.  "Local write" and "remote write"
@@ -198,6 +199,7 @@ out:
        if (ret < 0) {
                if (need_release)
                        __ib_umem_release(context->device, umem, 0);
+               put_pid(umem->pid);
                kfree(umem);
        } else
                current->mm->pinned_vm = locked;
@@ -230,15 +232,19 @@ void ib_umem_release(struct ib_umem *umem)
 {
        struct ib_ucontext *context = umem->context;
        struct mm_struct *mm;
+       struct task_struct *task;
        unsigned long diff;
 
        __ib_umem_release(umem->context->device, umem, 1);
 
-       mm = get_task_mm(current);
-       if (!mm) {
-               kfree(umem);
-               return;
-       }
+       task = get_pid_task(umem->pid, PIDTYPE_PID);
+       put_pid(umem->pid);
+       if (!task)
+               goto out;
+       mm = get_task_mm(task);
+       put_task_struct(task);
+       if (!mm)
+               goto out;
 
        diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
 
@@ -262,9 +268,10 @@ void ib_umem_release(struct ib_umem *umem)
        } else
                down_write(&mm->mmap_sem);
 
-       current->mm->pinned_vm -= diff;
+       mm->pinned_vm -= diff;
        up_write(&mm->mmap_sem);
        mmput(mm);
+out:
        kfree(umem);
 }
 EXPORT_SYMBOL(ib_umem_release);
index e7bee46..abd9724 100644 (file)
@@ -140,5 +140,9 @@ void ib_copy_path_rec_from_user(struct ib_sa_path_rec *dst,
        dst->packet_life_time   = src->packet_life_time;
        dst->preference         = src->preference;
        dst->packet_life_time_selector = src->packet_life_time_selector;
+
+       memset(dst->smac, 0, sizeof(dst->smac));
+       memset(dst->dmac, 0, sizeof(dst->dmac));
+       dst->vlan_id = 0xffff;
 }
 EXPORT_SYMBOL(ib_copy_path_rec_from_user);
index dc66c45..1da1252 100644 (file)
@@ -54,7 +54,7 @@ static void __ipath_release_user_pages(struct page **p, size_t num_pages,
 
 /* call with current->mm->mmap_sem held */
 static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
-                                 struct page **p, struct vm_area_struct **vma)
+                                 struct page **p)
 {
        unsigned long lock_limit;
        size_t got;
@@ -74,7 +74,7 @@ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
                ret = get_user_pages(current, current->mm,
                                     start_page + got * PAGE_SIZE,
                                     num_pages - got, 1, 1,
-                                    p + got, vma);
+                                    p + got, NULL);
                if (ret < 0)
                        goto bail_release;
        }
@@ -165,7 +165,7 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
 
        down_write(&current->mm->mmap_sem);
 
-       ret = __ipath_get_user_pages(start_page, num_pages, p, NULL);
+       ret = __ipath_get_user_pages(start_page, num_pages, p);
 
        up_write(&current->mm->mmap_sem);
 
index af82563..bda5994 100644 (file)
@@ -59,6 +59,7 @@
 
 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
+#define MLX4_IB_CARD_REV_A0   0xA0
 
 MODULE_AUTHOR("Roland Dreier");
 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
@@ -119,6 +120,17 @@ static int check_flow_steering_support(struct mlx4_dev *dev)
        return dmfs;
 }
 
+static int num_ib_ports(struct mlx4_dev *dev)
+{
+       int ib_ports = 0;
+       int i;
+
+       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+               ib_ports++;
+
+       return ib_ports;
+}
+
 static int mlx4_ib_query_device(struct ib_device *ibdev,
                                struct ib_device_attr *props)
 {
@@ -126,6 +138,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
        struct ib_smp *in_mad  = NULL;
        struct ib_smp *out_mad = NULL;
        int err = -ENOMEM;
+       int have_ib_ports;
 
        in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
        out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
@@ -142,6 +155,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
 
        memset(props, 0, sizeof *props);
 
+       have_ib_ports = num_ib_ports(dev->dev);
+
        props->fw_ver = dev->dev->caps.fw_ver;
        props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
                IB_DEVICE_PORT_ACTIVE_EVENT             |
@@ -152,13 +167,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
                props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
                props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
-       if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
+       if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
                props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
                props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
                props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
-       if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
+       if (dev->dev->caps.max_gso_sz &&
+           (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
+           (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
                props->device_cap_flags |= IB_DEVICE_UD_TSO;
        if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
                props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
@@ -357,7 +374,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
        props->state            = IB_PORT_DOWN;
        props->phys_state       = state_to_phys_state(props->state);
        props->active_mtu       = IB_MTU_256;
-       spin_lock(&iboe->lock);
+       spin_lock_bh(&iboe->lock);
        ndev = iboe->netdevs[port - 1];
        if (!ndev)
                goto out_unlock;
@@ -369,7 +386,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
                                        IB_PORT_ACTIVE : IB_PORT_DOWN;
        props->phys_state       = state_to_phys_state(props->state);
 out_unlock:
-       spin_unlock(&iboe->lock);
+       spin_unlock_bh(&iboe->lock);
 out:
        mlx4_free_cmd_mailbox(mdev->dev, mailbox);
        return err;
@@ -811,11 +828,11 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
        if (!mqp->port)
                return 0;
 
-       spin_lock(&mdev->iboe.lock);
+       spin_lock_bh(&mdev->iboe.lock);
        ndev = mdev->iboe.netdevs[mqp->port - 1];
        if (ndev)
                dev_hold(ndev);
-       spin_unlock(&mdev->iboe.lock);
+       spin_unlock_bh(&mdev->iboe.lock);
 
        if (ndev) {
                ret = 1;
@@ -1292,11 +1309,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        mutex_lock(&mqp->mutex);
        ge = find_gid_entry(mqp, gid->raw);
        if (ge) {
-               spin_lock(&mdev->iboe.lock);
+               spin_lock_bh(&mdev->iboe.lock);
                ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
                if (ndev)
                        dev_hold(ndev);
-               spin_unlock(&mdev->iboe.lock);
+               spin_unlock_bh(&mdev->iboe.lock);
                if (ndev)
                        dev_put(ndev);
                list_del(&ge->list);
@@ -1417,6 +1434,9 @@ static void update_gids_task(struct work_struct *work)
        int err;
        struct mlx4_dev *dev = gw->dev->dev;
 
+       if (!gw->dev->ib_active)
+               return;
+
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox)) {
                pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
@@ -1447,6 +1467,9 @@ static void reset_gids_task(struct work_struct *work)
        int err;
        struct mlx4_dev *dev = gw->dev->dev;
 
+       if (!gw->dev->ib_active)
+               return;
+
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox)) {
                pr_warn("reset gid table failed\n");
@@ -1581,7 +1604,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
                return 0;
 
        iboe = &ibdev->iboe;
-       spin_lock(&iboe->lock);
+       spin_lock_bh(&iboe->lock);
 
        for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
                if ((netif_is_bond_master(real_dev) &&
@@ -1591,7 +1614,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
                        update_gid_table(ibdev, port, gid,
                                         event == NETDEV_DOWN, 0);
 
-       spin_unlock(&iboe->lock);
+       spin_unlock_bh(&iboe->lock);
        return 0;
 
 }
@@ -1664,13 +1687,21 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
        new_smac = mlx4_mac_to_u64(dev->dev_addr);
        read_unlock(&dev_base_lock);
 
+       atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
+
+       /* no need for update QP1 and mac registration in non-SRIOV */
+       if (!mlx4_is_mfunc(ibdev->dev))
+               return;
+
        mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
        qp = ibdev->qp1_proxy[port - 1];
        if (qp) {
                int new_smac_index;
-               u64 old_smac = qp->pri.smac;
+               u64 old_smac;
                struct mlx4_update_qp_params update_params;
 
+               mutex_lock(&qp->mutex);
+               old_smac = qp->pri.smac;
                if (new_smac == old_smac)
                        goto unlock;
 
@@ -1680,22 +1711,25 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
                        goto unlock;
 
                update_params.smac_index = new_smac_index;
-               if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
+               if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
                                   &update_params)) {
                        release_mac = new_smac;
                        goto unlock;
                }
-
+               /* if old port was zero, no mac was yet registered for this QP */
+               if (qp->pri.smac_port)
+                       release_mac = old_smac;
                qp->pri.smac = new_smac;
+               qp->pri.smac_port = port;
                qp->pri.smac_index = new_smac_index;
-
-               release_mac = old_smac;
        }
 
 unlock:
-       mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
        if (release_mac != MLX4_IB_INVALID_MAC)
                mlx4_unregister_mac(ibdev->dev, port, release_mac);
+       if (qp)
+               mutex_unlock(&qp->mutex);
+       mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
 }
 
 static void mlx4_ib_get_dev_addr(struct net_device *dev,
@@ -1706,6 +1740,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
        struct inet6_dev *in6_dev;
        union ib_gid  *pgid;
        struct inet6_ifaddr *ifp;
+       union ib_gid default_gid;
 #endif
        union ib_gid gid;
 
@@ -1726,12 +1761,15 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
                in_dev_put(in_dev);
        }
 #if IS_ENABLED(CONFIG_IPV6)
+       mlx4_make_default_gid(dev, &default_gid);
        /* IPv6 gids */
        in6_dev = in6_dev_get(dev);
        if (in6_dev) {
                read_lock_bh(&in6_dev->lock);
                list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
                        pgid = (union ib_gid *)&ifp->addr;
+                       if (!memcmp(pgid, &default_gid, sizeof(*pgid)))
+                               continue;
                        update_gid_table(ibdev, port, pgid, 0, 0);
                }
                read_unlock_bh(&in6_dev->lock);
@@ -1753,24 +1791,33 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
        struct  net_device *dev;
        struct mlx4_ib_iboe *iboe = &ibdev->iboe;
        int i;
+       int err = 0;
 
-       for (i = 1; i <= ibdev->num_ports; ++i)
-               if (reset_gid_table(ibdev, i))
-                       return -1;
+       for (i = 1; i <= ibdev->num_ports; ++i) {
+               if (rdma_port_get_link_layer(&ibdev->ib_dev, i) ==
+                   IB_LINK_LAYER_ETHERNET) {
+                       err = reset_gid_table(ibdev, i);
+                       if (err)
+                               goto out;
+               }
+       }
 
        read_lock(&dev_base_lock);
-       spin_lock(&iboe->lock);
+       spin_lock_bh(&iboe->lock);
 
        for_each_netdev(&init_net, dev) {
                u8 port = mlx4_ib_get_dev_port(dev, ibdev);
-               if (port)
+               /* port will be non-zero only for ETH ports */
+               if (port) {
+                       mlx4_ib_set_default_gid(ibdev, dev, port);
                        mlx4_ib_get_dev_addr(dev, ibdev, port);
+               }
        }
 
-       spin_unlock(&iboe->lock);
+       spin_unlock_bh(&iboe->lock);
        read_unlock(&dev_base_lock);
-
-       return 0;
+out:
+       return err;
 }
 
 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
@@ -1784,7 +1831,7 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
 
        iboe = &ibdev->iboe;
 
-       spin_lock(&iboe->lock);
+       spin_lock_bh(&iboe->lock);
        mlx4_foreach_ib_transport_port(port, ibdev->dev) {
                enum ib_port_state      port_state = IB_PORT_NOP;
                struct net_device *old_master = iboe->masters[port - 1];
@@ -1816,35 +1863,47 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
                        port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
                                                IB_PORT_ACTIVE : IB_PORT_DOWN;
                        mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
-               } else {
-                       reset_gid_table(ibdev, port);
-               }
-               /* if using bonding/team and a slave port is down, we don't the bond IP
-                * based gids in the table since flows that select port by gid may get
-                * the down port.
-                */
-               if (curr_master && (port_state == IB_PORT_DOWN)) {
-                       reset_gid_table(ibdev, port);
-                       mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
-               }
-               /* if bonding is used it is possible that we add it to masters
-                * only after IP address is assigned to the net bonding
-                * interface.
-               */
-               if (curr_master && (old_master != curr_master)) {
-                       reset_gid_table(ibdev, port);
-                       mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
-                       mlx4_ib_get_dev_addr(curr_master, ibdev, port);
-               }
+                       if (curr_master) {
+                               /* if using bonding/team and a slave port is down, we
+                                * don't want the bond IP based gids in the table since
+                                * flows that select port by gid may get the down port.
+                               */
+                               if (port_state == IB_PORT_DOWN) {
+                                       reset_gid_table(ibdev, port);
+                                       mlx4_ib_set_default_gid(ibdev,
+                                                               curr_netdev,
+                                                               port);
+                               } else {
+                                       /* gids from the upper dev (bond/team)
+                                        * should appear in port's gid table
+                                       */
+                                       mlx4_ib_get_dev_addr(curr_master,
+                                                            ibdev, port);
+                               }
+                       }
+                       /* if bonding is used it is possible that we add it to
+                        * masters only after IP address is assigned to the
+                        * net bonding interface.
+                       */
+                       if (curr_master && (old_master != curr_master)) {
+                               reset_gid_table(ibdev, port);
+                               mlx4_ib_set_default_gid(ibdev,
+                                                       curr_netdev, port);
+                               mlx4_ib_get_dev_addr(curr_master, ibdev, port);
+                       }
 
-               if (!curr_master && (old_master != curr_master)) {
+                       if (!curr_master && (old_master != curr_master)) {
+                               reset_gid_table(ibdev, port);
+                               mlx4_ib_set_default_gid(ibdev,
+                                                       curr_netdev, port);
+                               mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
+                       }
+               } else {
                        reset_gid_table(ibdev, port);
-                       mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
-                       mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
                }
        }
 
-       spin_unlock(&iboe->lock);
+       spin_unlock_bh(&iboe->lock);
 
        if (update_qps_port > 0)
                mlx4_ib_update_qps(ibdev, dev, update_qps_port);
@@ -2186,6 +2245,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                        goto err_steer_free_bitmap;
        }
 
+       for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
+               atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
+
        if (ib_register_device(&ibdev->ib_dev, NULL))
                goto err_steer_free_bitmap;
 
@@ -2222,12 +2284,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                        }
                }
 #endif
-               for (i = 1 ; i <= ibdev->num_ports ; ++i)
-                       reset_gid_table(ibdev, i);
-               rtnl_lock();
-               mlx4_ib_scan_netdevs(ibdev, NULL, 0);
-               rtnl_unlock();
-               mlx4_ib_init_gid_table(ibdev);
+               if (mlx4_ib_init_gid_table(ibdev))
+                       goto err_notif;
        }
 
        for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
@@ -2375,6 +2433,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
        struct mlx4_ib_dev *ibdev = ibdev_ptr;
        int p;
 
+       ibdev->ib_active = false;
+       flush_workqueue(wq);
+
        mlx4_ib_close_sriov(ibdev);
        mlx4_ib_mad_cleanup(ibdev);
        ib_unregister_device(&ibdev->ib_dev);
index e8cad39..6eb743f 100644 (file)
@@ -451,6 +451,7 @@ struct mlx4_ib_iboe {
        spinlock_t              lock;
        struct net_device      *netdevs[MLX4_MAX_PORTS];
        struct net_device      *masters[MLX4_MAX_PORTS];
+       atomic64_t              mac[MLX4_MAX_PORTS];
        struct notifier_block   nb;
        struct notifier_block   nb_inet;
        struct notifier_block   nb_inet6;
index 9b0e80e..8f9325c 100644 (file)
@@ -234,14 +234,13 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
                                        0);
                if (IS_ERR(mmr->umem)) {
                        err = PTR_ERR(mmr->umem);
+                       /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
                        mmr->umem = NULL;
                        goto release_mpt_entry;
                }
                n = ib_umem_page_count(mmr->umem);
                shift = ilog2(mmr->umem->page_size);
 
-               mmr->mmr.iova       = virt_addr;
-               mmr->mmr.size       = length;
                err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
                                              virt_addr, length, n, shift,
                                              *pmpt_entry);
@@ -249,6 +248,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
                        ib_umem_release(mmr->umem);
                        goto release_mpt_entry;
                }
+               mmr->mmr.iova       = virt_addr;
+               mmr->mmr.size       = length;
 
                err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem);
                if (err) {
@@ -262,6 +263,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
         * return a failure. But dereg_mr will free the resources.
         */
        err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry);
+       if (!err && flags & IB_MR_REREG_ACCESS)
+               mmr->mmr.access = mr_access_flags;
 
 release_mpt_entry:
        mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry);
index efb9eff..9c5150c 100644 (file)
@@ -964,9 +964,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
                                   MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
                        pr_warn("modify QP %06x to RESET failed.\n",
                               qp->mqp.qpn);
-               if (qp->pri.smac) {
+               if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
                        mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
                        qp->pri.smac = 0;
+                       qp->pri.smac_port = 0;
                }
                if (qp->alt.smac) {
                        mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
@@ -1325,7 +1326,8 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
                 * If one was already assigned, but the new mac differs,
                 * unregister the old one and register the new one.
                */
-               if (!smac_info->smac || smac_info->smac != smac) {
+               if ((!smac_info->smac && !smac_info->smac_port) ||
+                   smac_info->smac != smac) {
                        /* register candidate now, unreg if needed, after success */
                        smac_index = mlx4_register_mac(dev->dev, port, smac);
                        if (smac_index >= 0) {
@@ -1390,21 +1392,13 @@ static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
 static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac,
                                    struct mlx4_qp_context *context)
 {
-       struct net_device *ndev;
        u64 u64_mac;
        int smac_index;
 
-
-       ndev = dev->iboe.netdevs[qp->port - 1];
-       if (ndev) {
-               smac = ndev->dev_addr;
-               u64_mac = mlx4_mac_to_u64(smac);
-       } else {
-               u64_mac = dev->dev->caps.def_mac[qp->port];
-       }
+       u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]);
 
        context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6);
-       if (!qp->pri.smac) {
+       if (!qp->pri.smac && !qp->pri.smac_port) {
                smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac);
                if (smac_index >= 0) {
                        qp->pri.candidate_smac_index = smac_index;
@@ -1432,6 +1426,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
        int steer_qp = 0;
        int err = -EINVAL;
 
+       /* APM is not supported under RoCE */
+       if (attr_mask & IB_QP_ALT_PATH &&
+           rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
+           IB_LINK_LAYER_ETHERNET)
+               return -ENOTSUPP;
+
        context = kzalloc(sizeof *context, GFP_KERNEL);
        if (!context)
                return -ENOMEM;
@@ -1682,7 +1682,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                                        MLX4_IB_LINK_TYPE_ETH;
                if (dev->dev->caps.tunnel_offload_mode ==  MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
                        /* set QP to receive both tunneled & non-tunneled packets */
-                       if (!(context->flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)))
+                       if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET)))
                                context->srqn = cpu_to_be32(7 << 28);
                }
        }
@@ -1786,9 +1786,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                        if (qp->flags & MLX4_IB_QP_NETIF)
                                mlx4_ib_steer_qp_reg(dev, qp, 0);
                }
-               if (qp->pri.smac) {
+               if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
                        mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
                        qp->pri.smac = 0;
+                       qp->pri.smac_port = 0;
                }
                if (qp->alt.smac) {
                        mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
@@ -1812,11 +1813,12 @@ out:
        if (err && steer_qp)
                mlx4_ib_steer_qp_reg(dev, qp, 0);
        kfree(context);
-       if (qp->pri.candidate_smac) {
+       if (qp->pri.candidate_smac ||
+           (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) {
                if (err) {
                        mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac);
                } else {
-                       if (qp->pri.smac)
+                       if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port))
                                mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
                        qp->pri.smac = qp->pri.candidate_smac;
                        qp->pri.smac_index = qp->pri.candidate_smac_index;
@@ -2089,6 +2091,16 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
        return 0;
 }
 
+static void mlx4_u64_to_smac(u8 *dst_mac, u64 src_mac)
+{
+       int i;
+
+       for (i = ETH_ALEN; i; i--) {
+               dst_mac[i - 1] = src_mac & 0xff;
+               src_mac >>= 8;
+       }
+}
+
 static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
                            void *wqe, unsigned *mlx_seg_len)
 {
@@ -2203,7 +2215,6 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
        }
 
        if (is_eth) {
-               u8 *smac;
                struct in6_addr in6;
 
                u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
@@ -2216,12 +2227,17 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
                memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4);
                memcpy(&in6, sgid.raw, sizeof(in6));
 
-               if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev))
-                       smac = to_mdev(sqp->qp.ibqp.device)->
-                               iboe.netdevs[sqp->qp.port - 1]->dev_addr;
-               else    /* use the src mac of the tunnel */
-                       smac = ah->av.eth.s_mac;
-               memcpy(sqp->ud_header.eth.smac_h, smac, 6);
+               if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
+                       u64 mac = atomic64_read(&to_mdev(ib_dev)->iboe.mac[sqp->qp.port - 1]);
+                       u8 smac[ETH_ALEN];
+
+                       mlx4_u64_to_smac(smac, mac);
+                       memcpy(sqp->ud_header.eth.smac_h, smac, ETH_ALEN);
+               } else {
+                       /* use the src mac of the tunnel */
+                       memcpy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac, ETH_ALEN);
+               }
+
                if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
                        mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
                if (!is_vlan) {
index 40f8536..ac02ce4 100644 (file)
@@ -38,7 +38,7 @@
 #define OCRDMA_VID_PCP_SHIFT   0xD
 
 static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
-                               struct ib_ah_attr *attr, int pdid)
+                       struct ib_ah_attr *attr, union ib_gid *sgid, int pdid)
 {
        int status = 0;
        u16 vlan_tag; bool vlan_enabled = false;
@@ -49,8 +49,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
        memset(&eth, 0, sizeof(eth));
        memset(&grh, 0, sizeof(grh));
 
-       ah->sgid_index = attr->grh.sgid_index;
-
+       /* VLAN */
        vlan_tag = attr->vlan_id;
        if (!vlan_tag || (vlan_tag > 0xFFF))
                vlan_tag = dev->pvid;
@@ -65,15 +64,14 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
                eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
                eth_sz = sizeof(struct ocrdma_eth_basic);
        }
+       /* MAC */
        memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
-       memcpy(&eth.dmac[0], attr->dmac, ETH_ALEN);
        status = ocrdma_resolve_dmac(dev, attr, &eth.dmac[0]);
        if (status)
                return status;
-       status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index,
-                       (union ib_gid *)&grh.sgid[0]);
-       if (status)
-               return status;
+       ah->sgid_index = attr->grh.sgid_index;
+       memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
+       memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
 
        grh.tclass_flow = cpu_to_be32((6 << 28) |
                        (attr->grh.traffic_class << 24) |
@@ -81,8 +79,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
        /* 0x1b is next header value in GRH */
        grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
                        (0x1b << 8) | attr->grh.hop_limit);
-
-       memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
+       /* Eth HDR */
        memcpy(&ah->av->eth_hdr, &eth, eth_sz);
        memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
        if (vlan_enabled)
@@ -98,6 +95,8 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
        struct ocrdma_ah *ah;
        struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
        struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+       union ib_gid sgid;
+       u8 zmac[ETH_ALEN];
 
        if (!(attr->ah_flags & IB_AH_GRH))
                return ERR_PTR(-EINVAL);
@@ -111,7 +110,27 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
        status = ocrdma_alloc_av(dev, ah);
        if (status)
                goto av_err;
-       status = set_av_attr(dev, ah, attr, pd->id);
+
+       status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, &sgid);
+       if (status) {
+               pr_err("%s(): Failed to query sgid, status = %d\n",
+                     __func__, status);
+               goto av_conf_err;
+       }
+
+       memset(&zmac, 0, ETH_ALEN);
+       if (pd->uctx &&
+           memcmp(attr->dmac, &zmac, ETH_ALEN)) {
+               status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid,
+                                        attr->dmac, &attr->vlan_id);
+               if (status) {
+                       pr_err("%s(): Failed to resolve dmac from gid." 
+                               "status = %d\n", __func__, status);
+                       goto av_conf_err;
+               }
+       }
+
+       status = set_av_attr(dev, ah, attr, &sgid, pd->id);
        if (status)
                goto av_conf_err;
 
@@ -145,7 +164,7 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
        struct ocrdma_av *av = ah->av;
        struct ocrdma_grh *grh;
        attr->ah_flags |= IB_AH_GRH;
-       if (ah->av->valid & Bit(1)) {
+       if (ah->av->valid & OCRDMA_AV_VALID) {
                grh = (struct ocrdma_grh *)((u8 *)ah->av +
                                sizeof(struct ocrdma_eth_vlan));
                attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13;
index acb434d..8f5f257 100644 (file)
@@ -101,7 +101,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
        attr->max_srq_sge = dev->attr.max_srq_sge;
        attr->max_srq_wr = dev->attr.max_rqe;
        attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
-       attr->max_fast_reg_page_list_len = 0;
+       attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
        attr->max_pkeys = 1;
        return 0;
 }
@@ -2846,11 +2846,9 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
        if (cq->first_arm) {
                ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
                cq->first_arm = false;
-               goto skip_defer;
        }
-       cq->deferred_arm = true;
 
-skip_defer:
+       cq->deferred_arm = true;
        cq->deferred_sol = sol_needed;
        spin_unlock_irqrestore(&cq->cq_lock, flags);
 
index 799a0c3..6abd3ed 100644 (file)
@@ -193,6 +193,7 @@ static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
        struct qib_qp_iter *iter;
        loff_t n = *pos;
 
+       rcu_read_lock();
        iter = qib_qp_iter_init(s->private);
        if (!iter)
                return NULL;
@@ -224,7 +225,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
 
 static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
 {
-       /* nothing for now */
+       rcu_read_unlock();
 }
 
 static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr)
index 7fcc150..6ddc026 100644 (file)
@@ -1325,7 +1325,6 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
        struct qib_qp *pqp = iter->qp;
        struct qib_qp *qp;
 
-       rcu_read_lock();
        for (; n < dev->qp_table_size; n++) {
                if (pqp)
                        qp = rcu_dereference(pqp->next);
@@ -1333,18 +1332,11 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
                        qp = rcu_dereference(dev->qp_table[n]);
                pqp = qp;
                if (qp) {
-                       if (iter->qp)
-                               atomic_dec(&iter->qp->refcount);
-                       atomic_inc(&qp->refcount);
-                       rcu_read_unlock();
                        iter->qp = qp;
                        iter->n = n;
                        return 0;
                }
        }
-       rcu_read_unlock();
-       if (iter->qp)
-               atomic_dec(&iter->qp->refcount);
        return ret;
 }
 
index 2bc1d2b..74f90b2 100644 (file)
@@ -52,7 +52,7 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages,
  * Call with current->mm->mmap_sem held.
  */
 static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
-                               struct page **p, struct vm_area_struct **vma)
+                               struct page **p)
 {
        unsigned long lock_limit;
        size_t got;
@@ -69,7 +69,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
                ret = get_user_pages(current, current->mm,
                                     start_page + got * PAGE_SIZE,
                                     num_pages - got, 1, 1,
-                                    p + got, vma);
+                                    p + got, NULL);
                if (ret < 0)
                        goto bail_release;
        }
@@ -136,7 +136,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
 
        down_write(&current->mm->mmap_sem);
 
-       ret = __qib_get_user_pages(start_page, num_pages, p, NULL);
+       ret = __qib_get_user_pages(start_page, num_pages, p);
 
        up_write(&current->mm->mmap_sem);
 
index 3edce61..d7562be 100644 (file)
@@ -131,6 +131,12 @@ struct ipoib_cb {
        u8                      hwaddr[INFINIBAND_ALEN];
 };
 
+static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb)
+{
+       BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb));
+       return (struct ipoib_cb *)skb->cb;
+}
+
 /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
 struct ipoib_mcast {
        struct ib_sa_mcmember_rec mcmember;
index 1310acf..13e6e04 100644 (file)
@@ -716,7 +716,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_neigh *neigh;
-       struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
+       struct ipoib_cb *cb = ipoib_skb_cb(skb);
        struct ipoib_header *header;
        unsigned long flags;
 
@@ -813,7 +813,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
                             const void *daddr, const void *saddr, unsigned len)
 {
        struct ipoib_header *header;
-       struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
+       struct ipoib_cb *cb = ipoib_skb_cb(skb);
 
        header = (struct ipoib_header *) skb_push(skb, sizeof *header);
 
index d4e0057..ffb83b5 100644 (file)
@@ -529,21 +529,13 @@ void ipoib_mcast_join_task(struct work_struct *work)
                          port_attr.state);
                return;
        }
+       priv->local_lid = port_attr.lid;
 
        if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
                ipoib_warn(priv, "ib_query_gid() failed\n");
        else
                memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
 
-       {
-               struct ib_port_attr attr;
-
-               if (!ib_query_port(priv->ca, priv->port, &attr))
-                       priv->local_lid = attr.lid;
-               else
-                       ipoib_warn(priv, "ib_query_port failed\n");
-       }
-
        if (!priv->broadcast) {
                struct ipoib_mcast *broadcast;
 
index 61ee91d..93ce62f 100644 (file)
@@ -344,7 +344,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
                     int is_leading)
 {
        struct iscsi_conn *conn = cls_conn->dd_data;
-       struct iscsi_session *session;
        struct iser_conn *ib_conn;
        struct iscsi_endpoint *ep;
        int error;
@@ -363,9 +362,17 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
        }
        ib_conn = ep->dd_data;
 
-       session = conn->session;
-       if (iser_alloc_rx_descriptors(ib_conn, session))
-               return -ENOMEM;
+       mutex_lock(&ib_conn->state_mutex);
+       if (ib_conn->state != ISER_CONN_UP) {
+               error = -EINVAL;
+               iser_err("iser_conn %p state is %d, teardown started\n",
+                        ib_conn, ib_conn->state);
+               goto out;
+       }
+
+       error = iser_alloc_rx_descriptors(ib_conn, conn->session);
+       if (error)
+               goto out;
 
        /* binds the iSER connection retrieved from the previously
         * connected ep_handle to the iSCSI layer connection. exchanges
@@ -375,7 +382,9 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
        conn->dd_data = ib_conn;
        ib_conn->iscsi_conn = conn;
 
-       return 0;
+out:
+       mutex_unlock(&ib_conn->state_mutex);
+       return error;
 }
 
 static int
index c877dad..9f0e0e3 100644 (file)
@@ -69,7 +69,7 @@
 
 #define DRV_NAME       "iser"
 #define PFX            DRV_NAME ": "
-#define DRV_VER                "1.4"
+#define DRV_VER                "1.4.1"
 
 #define iser_dbg(fmt, arg...)                          \
        do {                                            \
index 3ef167f..3bfec4b 100644 (file)
@@ -73,7 +73,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
 {
        struct iser_cq_desc *cq_desc;
        struct ib_device_attr *dev_attr = &device->dev_attr;
-       int ret, i, j;
+       int ret, i;
 
        ret = ib_query_device(device->ib_device, dev_attr);
        if (ret) {
@@ -125,16 +125,20 @@ static int iser_create_device_ib_res(struct iser_device *device)
                                          iser_cq_event_callback,
                                          (void *)&cq_desc[i],
                                          ISER_MAX_RX_CQ_LEN, i);
-               if (IS_ERR(device->rx_cq[i]))
+               if (IS_ERR(device->rx_cq[i])) {
+                       device->rx_cq[i] = NULL;
                        goto cq_err;
+               }
 
                device->tx_cq[i] = ib_create_cq(device->ib_device,
                                          NULL, iser_cq_event_callback,
                                          (void *)&cq_desc[i],
                                          ISER_MAX_TX_CQ_LEN, i);
 
-               if (IS_ERR(device->tx_cq[i]))
+               if (IS_ERR(device->tx_cq[i])) {
+                       device->tx_cq[i] = NULL;
                        goto cq_err;
+               }
 
                if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP))
                        goto cq_err;
@@ -160,14 +164,14 @@ static int iser_create_device_ib_res(struct iser_device *device)
 handler_err:
        ib_dereg_mr(device->mr);
 dma_mr_err:
-       for (j = 0; j < device->cqs_used; j++)
-               tasklet_kill(&device->cq_tasklet[j]);
+       for (i = 0; i < device->cqs_used; i++)
+               tasklet_kill(&device->cq_tasklet[i]);
 cq_err:
-       for (j = 0; j < i; j++) {
-               if (device->tx_cq[j])
-                       ib_destroy_cq(device->tx_cq[j]);
-               if (device->rx_cq[j])
-                       ib_destroy_cq(device->rx_cq[j]);
+       for (i = 0; i < device->cqs_used; i++) {
+               if (device->tx_cq[i])
+                       ib_destroy_cq(device->tx_cq[i]);
+               if (device->rx_cq[i])
+                       ib_destroy_cq(device->rx_cq[i]);
        }
        ib_dealloc_pd(device->pd);
 pd_err:
index 713e3dd..40b7d6c 100644 (file)
@@ -465,6 +465,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
                },
        },
+       {
+               /* Asus X450LCP */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
+               },
+       },
        {
                /* Avatar AVIU-145A6 */
                .matches = {
index 2f4f45d..505a9ad 100644 (file)
@@ -87,6 +87,27 @@ int amd_iommu_max_glx_val = -1;
 
 static struct dma_map_ops amd_iommu_dma_ops;
 
+/*
+ * This struct contains device specific data for the IOMMU
+ */
+struct iommu_dev_data {
+       struct list_head list;            /* For domain->dev_list */
+       struct list_head dev_data_list;   /* For global dev_data_list */
+       struct list_head alias_list;      /* Link alias-groups together */
+       struct iommu_dev_data *alias_data;/* The alias dev_data */
+       struct protection_domain *domain; /* Domain the device is bound to */
+       u16 devid;                        /* PCI Device ID */
+       bool iommu_v2;                    /* Device can make use of IOMMUv2 */
+       bool passthrough;                 /* Default for device is pt_domain */
+       struct {
+               bool enabled;
+               int qdep;
+       } ats;                            /* ATS state */
+       bool pri_tlp;                     /* PASID TLB required for
+                                            PPR completions */
+       u32 errata;                       /* Bitmap for errata to apply */
+};
+
 /*
  * general struct to manage commands send to an IOMMU
  */
@@ -114,8 +135,9 @@ static struct iommu_dev_data *alloc_dev_data(u16 devid)
        if (!dev_data)
                return NULL;
 
+       INIT_LIST_HEAD(&dev_data->alias_list);
+
        dev_data->devid = devid;
-       atomic_set(&dev_data->bind, 0);
 
        spin_lock_irqsave(&dev_data_list_lock, flags);
        list_add_tail(&dev_data->dev_data_list, &dev_data_list);
@@ -357,6 +379,9 @@ static int iommu_init_device(struct device *dev)
                        return -ENOTSUPP;
                }
                dev_data->alias_data = alias_data;
+
+               /* Add device to the alias_list */
+               list_add(&dev_data->alias_list, &alias_data->alias_list);
        }
 
        if (pci_iommuv2_capable(pdev)) {
@@ -1366,6 +1391,9 @@ static int iommu_map_page(struct protection_domain *dom,
        count     = PAGE_SIZE_PTE_COUNT(page_size);
        pte       = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
 
+       if (!pte)
+               return -ENOMEM;
+
        for (i = 0; i < count; ++i)
                if (IOMMU_PTE_PRESENT(pte[i]))
                        return -EBUSY;
@@ -2120,35 +2148,29 @@ static void do_detach(struct iommu_dev_data *dev_data)
 static int __attach_device(struct iommu_dev_data *dev_data,
                           struct protection_domain *domain)
 {
+       struct iommu_dev_data *head, *entry;
        int ret;
 
        /* lock domain */
        spin_lock(&domain->lock);
 
-       if (dev_data->alias_data != NULL) {
-               struct iommu_dev_data *alias_data = dev_data->alias_data;
-
-               /* Some sanity checks */
-               ret = -EBUSY;
-               if (alias_data->domain != NULL &&
-                               alias_data->domain != domain)
-                       goto out_unlock;
+       head = dev_data;
 
-               if (dev_data->domain != NULL &&
-                               dev_data->domain != domain)
-                       goto out_unlock;
+       if (head->alias_data != NULL)
+               head = head->alias_data;
 
-               /* Do real assignment */
-               if (alias_data->domain == NULL)
-                       do_attach(alias_data, domain);
+       /* Now we have the root of the alias group, if any */
 
-               atomic_inc(&alias_data->bind);
-       }
+       ret = -EBUSY;
+       if (head->domain != NULL)
+               goto out_unlock;
 
-       if (dev_data->domain == NULL)
-               do_attach(dev_data, domain);
+       /* Attach alias group root */
+       do_attach(head, domain);
 
-       atomic_inc(&dev_data->bind);
+       /* Attach other devices in the alias group */
+       list_for_each_entry(entry, &head->alias_list, alias_list)
+               do_attach(entry, domain);
 
        ret = 0;
 
@@ -2296,6 +2318,7 @@ static int attach_device(struct device *dev,
  */
 static void __detach_device(struct iommu_dev_data *dev_data)
 {
+       struct iommu_dev_data *head, *entry;
        struct protection_domain *domain;
        unsigned long flags;
 
@@ -2305,15 +2328,14 @@ static void __detach_device(struct iommu_dev_data *dev_data)
 
        spin_lock_irqsave(&domain->lock, flags);
 
-       if (dev_data->alias_data != NULL) {
-               struct iommu_dev_data *alias_data = dev_data->alias_data;
+       head = dev_data;
+       if (head->alias_data != NULL)
+               head = head->alias_data;
 
-               if (atomic_dec_and_test(&alias_data->bind))
-                       do_detach(alias_data);
-       }
+       list_for_each_entry(entry, &head->alias_list, alias_list)
+               do_detach(entry);
 
-       if (atomic_dec_and_test(&dev_data->bind))
-               do_detach(dev_data);
+       do_detach(head);
 
        spin_unlock_irqrestore(&domain->lock, flags);
 
@@ -3157,7 +3179,6 @@ static void cleanup_domain(struct protection_domain *domain)
                entry = list_first_entry(&domain->dev_list,
                                         struct iommu_dev_data, list);
                __detach_device(entry);
-               atomic_set(&entry->bind, 0);
        }
 
        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
index 3783e0b..b0522f1 100644 (file)
@@ -712,7 +712,7 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
        set_iommu_for_device(iommu, devid);
 }
 
-static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line)
+static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
 {
        struct devid_map *entry;
        struct list_head *list;
@@ -731,6 +731,8 @@ static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line)
                pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n",
                        type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
 
+               *devid = entry->devid;
+
                return 0;
        }
 
@@ -739,7 +741,7 @@ static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line)
                return -ENOMEM;
 
        entry->id       = id;
-       entry->devid    = devid;
+       entry->devid    = *devid;
        entry->cmd_line = cmd_line;
 
        list_add_tail(&entry->list, list);
@@ -754,7 +756,7 @@ static int __init add_early_maps(void)
        for (i = 0; i < early_ioapic_map_size; ++i) {
                ret = add_special_device(IVHD_SPECIAL_IOAPIC,
                                         early_ioapic_map[i].id,
-                                        early_ioapic_map[i].devid,
+                                        &early_ioapic_map[i].devid,
                                         early_ioapic_map[i].cmd_line);
                if (ret)
                        return ret;
@@ -763,7 +765,7 @@ static int __init add_early_maps(void)
        for (i = 0; i < early_hpet_map_size; ++i) {
                ret = add_special_device(IVHD_SPECIAL_HPET,
                                         early_hpet_map[i].id,
-                                        early_hpet_map[i].devid,
+                                        &early_hpet_map[i].devid,
                                         early_hpet_map[i].cmd_line);
                if (ret)
                        return ret;
@@ -978,10 +980,17 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
                                    PCI_SLOT(devid),
                                    PCI_FUNC(devid));
 
-                       set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
-                       ret = add_special_device(type, handle, devid, false);
+                       ret = add_special_device(type, handle, &devid, false);
                        if (ret)
                                return ret;
+
+                       /*
+                        * add_special_device might update the devid in case a
+                        * command-line override is present. So call
+                        * set_dev_entry_from_acpi after add_special_device.
+                        */
+                       set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
+
                        break;
                }
                default:
index 8e43b7c..cec51a8 100644 (file)
@@ -417,27 +417,6 @@ struct protection_domain {
 
 };
 
-/*
- * This struct contains device specific data for the IOMMU
- */
-struct iommu_dev_data {
-       struct list_head list;            /* For domain->dev_list */
-       struct list_head dev_data_list;   /* For global dev_data_list */
-       struct iommu_dev_data *alias_data;/* The alias dev_data */
-       struct protection_domain *domain; /* Domain the device is bound to */
-       atomic_t bind;                    /* Domain attach reference count */
-       u16 devid;                        /* PCI Device ID */
-       bool iommu_v2;                    /* Device can make use of IOMMUv2 */
-       bool passthrough;                 /* Default for device is pt_domain */
-       struct {
-               bool enabled;
-               int qdep;
-       } ats;                            /* ATS state */
-       bool pri_tlp;                     /* PASID TLB required for
-                                            PPR completions */
-       u32 errata;                       /* Bitmap for errata to apply */
-};
-
 /*
  * For dynamic growth the aperture size is split into ranges of 128MB of
  * DMA address space each. This struct represents one such range.
index ce39b12..60558f7 100644 (file)
@@ -24,7 +24,7 @@
  *     - v7/v8 long-descriptor format
  *     - Non-secure access to the SMMU
  *     - 4k and 64k pages, with contiguous pte hints.
- *     - Up to 42-bit addressing (dependent on VA_BITS)
+ *     - Up to 48-bit addressing (dependent on VA_BITS)
  *     - Context fault reporting
  */
 
@@ -59,7 +59,7 @@
 
 /* SMMU global address space */
 #define ARM_SMMU_GR0(smmu)             ((smmu)->base)
-#define ARM_SMMU_GR1(smmu)             ((smmu)->base + (smmu)->pagesize)
+#define ARM_SMMU_GR1(smmu)             ((smmu)->base + (1 << (smmu)->pgshift))
 
 /*
  * SMMU global address space with conditional offset to access secure
 
 /* Translation context bank */
 #define ARM_SMMU_CB_BASE(smmu)         ((smmu)->base + ((smmu)->size >> 1))
-#define ARM_SMMU_CB(smmu, n)           ((n) * (smmu)->pagesize)
+#define ARM_SMMU_CB(smmu, n)           ((n) * (1 << (smmu)->pgshift))
 
 #define ARM_SMMU_CB_SCTLR              0x0
 #define ARM_SMMU_CB_RESUME             0x8
 
 #define FSYNR0_WNR                     (1 << 4)
 
+static int force_stage;
+module_param_named(force_stage, force_stage, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(force_stage,
+       "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
+
+enum arm_smmu_arch_version {
+       ARM_SMMU_V1 = 1,
+       ARM_SMMU_V2,
+};
+
 struct arm_smmu_smr {
        u8                              idx;
        u16                             mask;
@@ -349,7 +359,7 @@ struct arm_smmu_device {
 
        void __iomem                    *base;
        unsigned long                   size;
-       unsigned long                   pagesize;
+       unsigned long                   pgshift;
 
 #define ARM_SMMU_FEAT_COHERENT_WALK    (1 << 0)
 #define ARM_SMMU_FEAT_STREAM_MATCH     (1 << 1)
@@ -360,7 +370,7 @@ struct arm_smmu_device {
 
 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
        u32                             options;
-       int                             version;
+       enum arm_smmu_arch_version      version;
 
        u32                             num_context_banks;
        u32                             num_s2_context_banks;
@@ -370,8 +380,9 @@ struct arm_smmu_device {
        u32                             num_mapping_groups;
        DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
 
-       unsigned long                   input_size;
+       unsigned long                   s1_input_size;
        unsigned long                   s1_output_size;
+       unsigned long                   s2_input_size;
        unsigned long                   s2_output_size;
 
        u32                             num_global_irqs;
@@ -426,17 +437,17 @@ static void parse_driver_options(struct arm_smmu_device *smmu)
        } while (arm_smmu_options[++i].opt);
 }
 
-static struct device *dev_get_master_dev(struct device *dev)
+static struct device_node *dev_get_dev_node(struct device *dev)
 {
        if (dev_is_pci(dev)) {
                struct pci_bus *bus = to_pci_dev(dev)->bus;
 
                while (!pci_is_root_bus(bus))
                        bus = bus->parent;
-               return bus->bridge->parent;
+               return bus->bridge->parent->of_node;
        }
 
-       return dev;
+       return dev->of_node;
 }
 
 static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
@@ -461,15 +472,17 @@ static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
 }
 
 static struct arm_smmu_master_cfg *
-find_smmu_master_cfg(struct arm_smmu_device *smmu, struct device *dev)
+find_smmu_master_cfg(struct device *dev)
 {
-       struct arm_smmu_master *master;
+       struct arm_smmu_master_cfg *cfg = NULL;
+       struct iommu_group *group = iommu_group_get(dev);
 
-       if (dev_is_pci(dev))
-               return dev->archdata.iommu;
+       if (group) {
+               cfg = iommu_group_get_iommudata(group);
+               iommu_group_put(group);
+       }
 
-       master = find_smmu_master(smmu, dev->of_node);
-       return master ? &master->cfg : NULL;
+       return cfg;
 }
 
 static int insert_smmu_master(struct arm_smmu_device *smmu,
@@ -545,7 +558,7 @@ static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
 {
        struct arm_smmu_device *smmu;
        struct arm_smmu_master *master = NULL;
-       struct device_node *dev_node = dev_get_master_dev(dev)->of_node;
+       struct device_node *dev_node = dev_get_dev_node(dev);
 
        spin_lock(&arm_smmu_devices_lock);
        list_for_each_entry(smmu, &arm_smmu_devices, list) {
@@ -729,7 +742,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
 
        /* CBAR */
        reg = cfg->cbar;
-       if (smmu->version == 1)
+       if (smmu->version == ARM_SMMU_V1)
                reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
 
        /*
@@ -744,7 +757,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
        }
        writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
 
-       if (smmu->version > 1) {
+       if (smmu->version > ARM_SMMU_V1) {
                /* CBA2R */
 #ifdef CONFIG_64BIT
                reg = CBA2R_RW64_64BIT;
@@ -755,7 +768,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
                               gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
 
                /* TTBCR2 */
-               switch (smmu->input_size) {
+               switch (smmu->s1_input_size) {
                case 32:
                        reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
                        break;
@@ -817,14 +830,14 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
         * TTBCR
         * We use long descriptor, with inner-shareable WBWA tables in TTBR0.
         */
-       if (smmu->version > 1) {
+       if (smmu->version > ARM_SMMU_V1) {
                if (PAGE_SIZE == SZ_4K)
                        reg = TTBCR_TG0_4K;
                else
                        reg = TTBCR_TG0_64K;
 
                if (!stage1) {
-                       reg |= (64 - smmu->s1_output_size) << TTBCR_T0SZ_SHIFT;
+                       reg |= (64 - smmu->s2_input_size) << TTBCR_T0SZ_SHIFT;
 
                        switch (smmu->s2_output_size) {
                        case 32:
@@ -847,7 +860,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
                                break;
                        }
                } else {
-                       reg |= (64 - smmu->input_size) << TTBCR_T0SZ_SHIFT;
+                       reg |= (64 - smmu->s1_input_size) << TTBCR_T0SZ_SHIFT;
                }
        } else {
                reg = 0;
@@ -914,7 +927,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
                goto out_unlock;
 
        cfg->cbndx = ret;
-       if (smmu->version == 1) {
+       if (smmu->version == ARM_SMMU_V1) {
                cfg->irptndx = atomic_inc_return(&smmu->irptndx);
                cfg->irptndx %= smmu->num_context_irqs;
        } else {
@@ -1151,9 +1164,10 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
        struct arm_smmu_device *smmu = smmu_domain->smmu;
        void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
 
+       /* Devices in an IOMMU group may already be configured */
        ret = arm_smmu_master_configure_smrs(smmu, cfg);
        if (ret)
-               return ret;
+               return ret == -EEXIST ? 0 : ret;
 
        for (i = 0; i < cfg->num_streamids; ++i) {
                u32 idx, s2cr;
@@ -1174,6 +1188,10 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
        struct arm_smmu_device *smmu = smmu_domain->smmu;
        void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
 
+       /* An IOMMU group is torn down by the first device to be removed */
+       if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
+               return;
+
        /*
         * We *must* clear the S2CR first, because freeing the SMR means
         * that it can be re-allocated immediately.
@@ -1195,12 +1213,17 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
        struct arm_smmu_device *smmu, *dom_smmu;
        struct arm_smmu_master_cfg *cfg;
 
-       smmu = dev_get_master_dev(dev)->archdata.iommu;
+       smmu = find_smmu_for_device(dev);
        if (!smmu) {
                dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
                return -ENXIO;
        }
 
+       if (dev->archdata.iommu) {
+               dev_err(dev, "already attached to IOMMU domain\n");
+               return -EEXIST;
+       }
+
        /*
         * Sanity check the domain. We don't support domains across
         * different SMMUs.
@@ -1223,11 +1246,14 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
        }
 
        /* Looks ok, so add the device to the domain */
-       cfg = find_smmu_master_cfg(smmu_domain->smmu, dev);
+       cfg = find_smmu_master_cfg(dev);
        if (!cfg)
                return -ENODEV;
 
-       return arm_smmu_domain_add_master(smmu_domain, cfg);
+       ret = arm_smmu_domain_add_master(smmu_domain, cfg);
+       if (!ret)
+               dev->archdata.iommu = domain;
+       return ret;
 }
 
 static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
@@ -1235,9 +1261,12 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
        struct arm_smmu_domain *smmu_domain = domain->priv;
        struct arm_smmu_master_cfg *cfg;
 
-       cfg = find_smmu_master_cfg(smmu_domain->smmu, dev);
-       if (cfg)
-               arm_smmu_domain_remove_master(smmu_domain, cfg);
+       cfg = find_smmu_master_cfg(dev);
+       if (!cfg)
+               return;
+
+       dev->archdata.iommu = NULL;
+       arm_smmu_domain_remove_master(smmu_domain, cfg);
 }
 
 static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
@@ -1379,6 +1408,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
                ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn,
                                              prot, stage);
                phys += next - addr;
+               pfn = __phys_to_pfn(phys);
        } while (pmd++, addr = next, addr < end);
 
        return ret;
@@ -1431,9 +1461,11 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
 
        if (cfg->cbar == CBAR_TYPE_S2_TRANS) {
                stage = 2;
+               input_mask = (1ULL << smmu->s2_input_size) - 1;
                output_mask = (1ULL << smmu->s2_output_size) - 1;
        } else {
                stage = 1;
+               input_mask = (1ULL << smmu->s1_input_size) - 1;
                output_mask = (1ULL << smmu->s1_output_size) - 1;
        }
 
@@ -1443,7 +1475,6 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
        if (size & ~PAGE_MASK)
                return -EINVAL;
 
-       input_mask = (1ULL << smmu->input_size) - 1;
        if ((phys_addr_t)iova & ~input_mask)
                return -ERANGE;
 
@@ -1548,17 +1579,19 @@ static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
        return 0; /* Continue walking */
 }
 
+static void __arm_smmu_release_pci_iommudata(void *data)
+{
+       kfree(data);
+}
+
 static int arm_smmu_add_device(struct device *dev)
 {
        struct arm_smmu_device *smmu;
+       struct arm_smmu_master_cfg *cfg;
        struct iommu_group *group;
+       void (*releasefn)(void *) = NULL;
        int ret;
 
-       if (dev->archdata.iommu) {
-               dev_warn(dev, "IOMMU driver already assigned to device\n");
-               return -EINVAL;
-       }
-
        smmu = find_smmu_for_device(dev);
        if (!smmu)
                return -ENODEV;
@@ -1570,7 +1603,6 @@ static int arm_smmu_add_device(struct device *dev)
        }
 
        if (dev_is_pci(dev)) {
-               struct arm_smmu_master_cfg *cfg;
                struct pci_dev *pdev = to_pci_dev(dev);
 
                cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
@@ -1586,11 +1618,20 @@ static int arm_smmu_add_device(struct device *dev)
                 */
                pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid,
                                       &cfg->streamids[0]);
-               dev->archdata.iommu = cfg;
+               releasefn = __arm_smmu_release_pci_iommudata;
        } else {
-               dev->archdata.iommu = smmu;
+               struct arm_smmu_master *master;
+
+               master = find_smmu_master(smmu, dev->of_node);
+               if (!master) {
+                       ret = -ENODEV;
+                       goto out_put_group;
+               }
+
+               cfg = &master->cfg;
        }
 
+       iommu_group_set_iommudata(group, cfg, releasefn);
        ret = iommu_group_add_device(group, dev);
 
 out_put_group:
@@ -1600,10 +1641,6 @@ out_put_group:
 
 static void arm_smmu_remove_device(struct device *dev)
 {
-       if (dev_is_pci(dev))
-               kfree(dev->archdata.iommu);
-
-       dev->archdata.iommu = NULL;
        iommu_group_remove_device(dev);
 }
 
@@ -1701,10 +1738,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
        u32 id;
 
        dev_notice(smmu->dev, "probing hardware configuration...\n");
-
-       /* Primecell ID */
-       id = readl_relaxed(gr0_base + ARM_SMMU_GR0_PIDR2);
-       smmu->version = ((id >> PIDR2_ARCH_SHIFT) & PIDR2_ARCH_MASK) + 1;
        dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
 
        /* ID0 */
@@ -1715,6 +1748,13 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                return -ENODEV;
        }
 #endif
+
+       /* Restrict available stages based on module parameter */
+       if (force_stage == 1)
+               id &= ~(ID0_S2TS | ID0_NTS);
+       else if (force_stage == 2)
+               id &= ~(ID0_S1TS | ID0_NTS);
+
        if (id & ID0_S1TS) {
                smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
                dev_notice(smmu->dev, "\tstage 1 translation\n");
@@ -1731,8 +1771,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
        }
 
        if (!(smmu->features &
-               (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2 |
-                ARM_SMMU_FEAT_TRANS_NESTED))) {
+               (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
                dev_err(smmu->dev, "\tno translation support!\n");
                return -ENODEV;
        }
@@ -1778,12 +1817,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 
        /* ID1 */
        id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
-       smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K;
+       smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
 
        /* Check for size mismatch of SMMU address space from mapped region */
        size = 1 <<
                (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
-       size *= (smmu->pagesize << 1);
+       size *= 2 << smmu->pgshift;
        if (smmu->size != size)
                dev_warn(smmu->dev,
                        "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
@@ -1802,28 +1841,21 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
        /* ID2 */
        id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
        size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
+       smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size);
 
-       /*
-        * Stage-1 output limited by stage-2 input size due to pgd
-        * allocation (PTRS_PER_PGD).
-        */
-       if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
+       /* Stage-2 input size limited due to pgd allocation (PTRS_PER_PGD) */
 #ifdef CONFIG_64BIT
-               smmu->s1_output_size = min_t(unsigned long, VA_BITS, size);
+       smmu->s2_input_size = min_t(unsigned long, VA_BITS, size);
 #else
-               smmu->s1_output_size = min(32UL, size);
+       smmu->s2_input_size = min(32UL, size);
 #endif
-       } else {
-               smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT,
-                                            size);
-       }
 
        /* The stage-2 output mask is also applied for bypass */
        size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
        smmu->s2_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size);
 
-       if (smmu->version == 1) {
-               smmu->input_size = 32;
+       if (smmu->version == ARM_SMMU_V1) {
+               smmu->s1_input_size = 32;
        } else {
 #ifdef CONFIG_64BIT
                size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
@@ -1831,7 +1863,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 #else
                size = 32;
 #endif
-               smmu->input_size = size;
+               smmu->s1_input_size = size;
 
                if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) ||
                    (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) ||
@@ -1842,15 +1874,30 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                }
        }
 
-       dev_notice(smmu->dev,
-                  "\t%lu-bit VA, %lu-bit IPA, %lu-bit PA\n",
-                  smmu->input_size, smmu->s1_output_size,
-                  smmu->s2_output_size);
+       if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
+               dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
+                          smmu->s1_input_size, smmu->s1_output_size);
+
+       if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
+               dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
+                          smmu->s2_input_size, smmu->s2_output_size);
+
        return 0;
 }
 
+static const struct of_device_id arm_smmu_of_match[] = {
+       { .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 },
+       { .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 },
+       { .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 },
+       { .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 },
+       { .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 },
+       { },
+};
+MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
+
 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
 {
+       const struct of_device_id *of_id;
        struct resource *res;
        struct arm_smmu_device *smmu;
        struct device *dev = &pdev->dev;
@@ -1865,6 +1912,9 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
        }
        smmu->dev = dev;
 
+       of_id = of_match_node(arm_smmu_of_match, dev->of_node);
+       smmu->version = (enum arm_smmu_arch_version)of_id->data;
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        smmu->base = devm_ioremap_resource(dev, res);
        if (IS_ERR(smmu->base))
@@ -1929,7 +1979,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
 
        parse_driver_options(smmu);
 
-       if (smmu->version > 1 &&
+       if (smmu->version > ARM_SMMU_V1 &&
            smmu->num_context_banks != smmu->num_context_irqs) {
                dev_err(dev,
                        "found only %d context interrupt(s) but %d required\n",
@@ -2010,17 +2060,6 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_OF
-static const struct of_device_id arm_smmu_of_match[] = {
-       { .compatible = "arm,smmu-v1", },
-       { .compatible = "arm,smmu-v2", },
-       { .compatible = "arm,mmu-400", },
-       { .compatible = "arm,mmu-500", },
-       { },
-};
-MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
-#endif
-
 static struct platform_driver arm_smmu_driver = {
        .driver = {
                .owner          = THIS_MODULE,
index 06d268a..c5c61ca 100644 (file)
@@ -155,6 +155,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
        if (event == BUS_NOTIFY_ADD_DEVICE) {
                for (tmp = dev; tmp; tmp = tmp->bus->self) {
                        level--;
+                       info->path[level].bus = tmp->bus->number;
                        info->path[level].device = PCI_SLOT(tmp->devfn);
                        info->path[level].function = PCI_FUNC(tmp->devfn);
                        if (pci_is_root_bus(tmp->bus))
@@ -177,17 +178,33 @@ static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
        int i;
 
        if (info->bus != bus)
-               return false;
+               goto fallback;
        if (info->level != count)
-               return false;
+               goto fallback;
 
        for (i = 0; i < count; i++) {
                if (path[i].device != info->path[i].device ||
                    path[i].function != info->path[i].function)
-                       return false;
+                       goto fallback;
        }
 
        return true;
+
+fallback:
+
+       if (count != 1)
+               return false;
+
+       i = info->level - 1;
+       if (bus              == info->path[i].bus &&
+           path[0].device   == info->path[i].device &&
+           path[0].function == info->path[i].function) {
+               pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
+                       bus, path[0].device, path[0].function);
+               return true;
+       }
+
+       return false;
 }
 
 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
@@ -247,7 +264,7 @@ int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
 
        for_each_active_dev_scope(devices, count, index, tmp)
                if (tmp == &info->dev->dev) {
-                       rcu_assign_pointer(devices[index].dev, NULL);
+                       RCU_INIT_POINTER(devices[index].dev, NULL);
                        synchronize_rcu();
                        put_device(tmp);
                        return 1;
index d037e87..7423318 100644 (file)
@@ -32,7 +32,7 @@
 typedef u32 sysmmu_iova_t;
 typedef u32 sysmmu_pte_t;
 
-/* We does not consider super section mapping (16MB) */
+/* We do not consider super section mapping (16MB) */
 #define SECT_ORDER 20
 #define LPAGE_ORDER 16
 #define SPAGE_ORDER 12
@@ -307,7 +307,7 @@ static void show_fault_information(const char *name,
 
 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
 {
-       /* SYSMMU is in blocked when interrupt occurred. */
+       /* SYSMMU is in blocked state when interrupt occurred. */
        struct sysmmu_drvdata *data = dev_id;
        enum exynos_sysmmu_inttype itype;
        sysmmu_iova_t addr = -1;
@@ -567,8 +567,8 @@ static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova,
                /*
                 * L2TLB invalidation required
                 * 4KB page: 1 invalidation
-                * 64KB page: 16 invalidation
-                * 1MB page: 64 invalidation
+                * 64KB page: 16 invalidations
+                * 1MB page: 64 invalidations
                 * because it is set-associative TLB
                 * with 8-way and 64 sets.
                 * 1MB page can be cached in one of all sets.
@@ -714,7 +714,7 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain)
        if (!priv->lv2entcnt)
                goto err_counter;
 
-       /* w/a of System MMU v3.3 to prevent caching 1MiB mapping */
+       /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
        for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
                priv->pgtable[i + 0] = ZERO_LV2LINK;
                priv->pgtable[i + 1] = ZERO_LV2LINK;
@@ -861,14 +861,14 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv,
                pgtable_flush(sent, sent + 1);
 
                /*
-                * If pretched SLPD is a fault SLPD in zero_l2_table, FLPD cache
-                * may caches the address of zero_l2_table. This function
-                * replaces the zero_l2_table with new L2 page table to write
-                * valid mappings.
+                * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
+                * FLPD cache may cache the address of zero_l2_table. This
+                * function replaces the zero_l2_table with new L2 page table
+                * to write valid mappings.
                 * Accessing the valid area may cause page fault since FLPD
-                * cache may still caches zero_l2_table for the valid area
-                * instead of new L2 page table that have the mapping
-                * information of the valid area
+                * cache may still cache zero_l2_table for the valid area
+                * instead of new L2 page table that has the mapping
+                * information of the valid area.
                 * Thus any replacement of zero_l2_table with other valid L2
                 * page table must involve FLPD cache invalidation for System
                 * MMU v3.3.
@@ -963,27 +963,27 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
 /*
  * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
  *
- * System MMU v3.x have an advanced logic to improve address translation
+ * System MMU v3.x has advanced logic to improve address translation
  * performance with caching more page table entries by a page table walk.
- * However, the logic has a bug that caching fault page table entries and System
- * MMU reports page fault if the cached fault entry is hit even though the fault
- * entry is updated to a valid entry after the entry is cached.
- * To prevent caching fault page table entries which may be updated to valid
- * entries later, the virtual memory manager should care about the w/a about the
- * problem. The followings describe w/a.
+ * However, the logic has a bug that while caching faulty page table entries,
+ * System MMU reports page fault if the cached fault entry is hit even though
+ * the fault entry is updated to a valid entry after the entry is cached.
+ * To prevent caching faulty page table entries which may be updated to valid
+ * entries later, the virtual memory manager should care about the workaround
+ * for the problem. The following describes the workaround.
  *
  * Any two consecutive I/O virtual address regions must have a hole of 128KiB
- * in maximum to prevent misbehavior of System MMU 3.x. (w/a of h/w bug)
+ * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
  *
- * Precisely, any start address of I/O virtual region must be aligned by
+ * Precisely, any start address of I/O virtual region must be aligned with
  * the following sizes for System MMU v3.1 and v3.2.
  * System MMU v3.1: 128KiB
  * System MMU v3.2: 256KiB
  *
  * Because System MMU v3.3 caches page table entries more aggressively, it needs
- * more w/a.
- * - Any two consecutive I/O virtual regions must be have a hole of larger size
- *   than or equal size to 128KiB.
+ * more workarounds.
+ * - Any two consecutive I/O virtual regions must have a hole of size larger
+ *   than or equal to 128KiB.
  * - Start address of an I/O virtual region must be aligned by 128KiB.
  */
 static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
@@ -1061,7 +1061,8 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain,
                        goto err;
                }
 
-               *ent = ZERO_LV2LINK; /* w/a for h/w bug in Sysmem MMU v3.3 */
+               /* workaround for h/w bug in System MMU v3.3 */
+               *ent = ZERO_LV2LINK;
                pgtable_flush(ent, ent + 1);
                size = SECT_SIZE;
                goto done;
index bc1a203..a27d6cb 100644 (file)
@@ -3865,8 +3865,7 @@ static int device_notifier(struct notifier_block *nb,
        if (iommu_dummy(dev))
                return 0;
 
-       if (action != BUS_NOTIFY_UNBOUND_DRIVER &&
-           action != BUS_NOTIFY_DEL_DEVICE)
+       if (action != BUS_NOTIFY_REMOVED_DEVICE)
                return 0;
 
        /*
index ef5e5dd..7c80661 100644 (file)
@@ -438,8 +438,7 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
                    (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
 
        /* Set interrupt-remapping table pointer */
-       iommu->gcmd |= DMA_GCMD_SIRTP;
-       writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+       writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG);
 
        IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
                      readl, (sts & DMA_GSTS_IRTPS), sts);
index 47517cf..3627887 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/of.h>
 #include <linux/of_iommu.h>
 #include <linux/of_irq.h>
+#include <linux/of_platform.h>
 
 #include <asm/cacheflush.h>
 
@@ -892,19 +893,11 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
                goto err_enable;
        flush_iotlb_all(obj);
 
-       if (!try_module_get(obj->owner)) {
-               err = -ENODEV;
-               goto err_module;
-       }
-
        spin_unlock(&obj->iommu_lock);
 
        dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
        return obj;
 
-err_module:
-       if (obj->refcount == 1)
-               iommu_disable(obj);
 err_enable:
        obj->refcount--;
        spin_unlock(&obj->iommu_lock);
@@ -925,8 +918,6 @@ static void omap_iommu_detach(struct omap_iommu *obj)
        if (--obj->refcount == 0)
                iommu_disable(obj);
 
-       module_put(obj->owner);
-
        obj->iopgd = NULL;
 
        spin_unlock(&obj->iommu_lock);
@@ -1091,6 +1082,11 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
        struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
        int ret = 0;
 
+       if (!arch_data || !arch_data->name) {
+               dev_err(dev, "device doesn't have an associated iommu\n");
+               return -EINVAL;
+       }
+
        spin_lock(&omap_domain->lock);
 
        /* only a single device is supported per domain for now */
@@ -1239,6 +1235,7 @@ static int omap_iommu_add_device(struct device *dev)
 {
        struct omap_iommu_arch_data *arch_data;
        struct device_node *np;
+       struct platform_device *pdev;
 
        /*
         * Allocate the archdata iommu structure for DT-based devices.
@@ -1253,13 +1250,19 @@ static int omap_iommu_add_device(struct device *dev)
        if (!np)
                return 0;
 
+       pdev = of_find_device_by_node(np);
+       if (WARN_ON(!pdev)) {
+               of_node_put(np);
+               return -EINVAL;
+       }
+
        arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
        if (!arch_data) {
                of_node_put(np);
                return -ENOMEM;
        }
 
-       arch_data->name = kstrdup(dev_name(dev), GFP_KERNEL);
+       arch_data->name = kstrdup(dev_name(&pdev->dev), GFP_KERNEL);
        dev->archdata.iommu = arch_data;
 
        of_node_put(np);
index 1275a82..4f1b68c 100644 (file)
@@ -28,7 +28,6 @@ struct iotlb_entry {
 
 struct omap_iommu {
        const char      *name;
-       struct module   *owner;
        void __iomem    *regbase;
        struct device   *dev;
        void            *isr_priv;
index d7690f8..55de4f6 100644 (file)
@@ -540,11 +540,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
        has_nonrot_disk = 0;
        choose_next_idle = 0;
 
-       if (conf->mddev->recovery_cp < MaxSector &&
-           (this_sector + sectors >= conf->next_resync))
-               choose_first = 1;
-       else
-               choose_first = 0;
+       choose_first = (conf->mddev->recovery_cp < this_sector + sectors);
 
        for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
                sector_t dist;
@@ -831,7 +827,7 @@ static void flush_pending_writes(struct r1conf *conf)
  *    there is no normal IO happeing.  It must arrange to call
  *    lower_barrier when the particular background IO completes.
  */
-static void raise_barrier(struct r1conf *conf)
+static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
 {
        spin_lock_irq(&conf->resync_lock);
 
@@ -841,6 +837,7 @@ static void raise_barrier(struct r1conf *conf)
 
        /* block any new IO from starting */
        conf->barrier++;
+       conf->next_resync = sector_nr;
 
        /* For these conditions we must wait:
         * A: while the array is in frozen state
@@ -849,14 +846,17 @@ static void raise_barrier(struct r1conf *conf)
         * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
         *    next resync will reach to the window which normal bios are
         *    handling.
+        * D: while there are any active requests in the current window.
         */
        wait_event_lock_irq(conf->wait_barrier,
                            !conf->array_frozen &&
                            conf->barrier < RESYNC_DEPTH &&
+                           conf->current_window_requests == 0 &&
                            (conf->start_next_window >=
                             conf->next_resync + RESYNC_SECTORS),
                            conf->resync_lock);
 
+       conf->nr_pending++;
        spin_unlock_irq(&conf->resync_lock);
 }
 
@@ -866,6 +866,7 @@ static void lower_barrier(struct r1conf *conf)
        BUG_ON(conf->barrier <= 0);
        spin_lock_irqsave(&conf->resync_lock, flags);
        conf->barrier--;
+       conf->nr_pending--;
        spin_unlock_irqrestore(&conf->resync_lock, flags);
        wake_up(&conf->wait_barrier);
 }
@@ -877,12 +878,10 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
        if (conf->array_frozen || !bio)
                wait = true;
        else if (conf->barrier && bio_data_dir(bio) == WRITE) {
-               if (conf->next_resync < RESYNC_WINDOW_SECTORS)
-                       wait = true;
-               else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
-                               >= bio_end_sector(bio)) ||
-                        (conf->next_resync + NEXT_NORMALIO_DISTANCE
-                               <= bio->bi_iter.bi_sector))
+               if ((conf->mddev->curr_resync_completed
+                    >= bio_end_sector(bio)) ||
+                   (conf->next_resync + NEXT_NORMALIO_DISTANCE
+                    <= bio->bi_iter.bi_sector))
                        wait = false;
                else
                        wait = true;
@@ -919,8 +918,8 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
        }
 
        if (bio && bio_data_dir(bio) == WRITE) {
-               if (conf->next_resync + NEXT_NORMALIO_DISTANCE
-                   <= bio->bi_iter.bi_sector) {
+               if (bio->bi_iter.bi_sector >=
+                   conf->mddev->curr_resync_completed) {
                        if (conf->start_next_window == MaxSector)
                                conf->start_next_window =
                                        conf->next_resync +
@@ -1186,6 +1185,7 @@ read_again:
                                   atomic_read(&bitmap->behind_writes) == 0);
                }
                r1_bio->read_disk = rdisk;
+               r1_bio->start_next_window = 0;
 
                read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
                bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
@@ -1548,8 +1548,13 @@ static void close_sync(struct r1conf *conf)
        mempool_destroy(conf->r1buf_pool);
        conf->r1buf_pool = NULL;
 
+       spin_lock_irq(&conf->resync_lock);
        conf->next_resync = 0;
        conf->start_next_window = MaxSector;
+       conf->current_window_requests +=
+               conf->next_window_requests;
+       conf->next_window_requests = 0;
+       spin_unlock_irq(&conf->resync_lock);
 }
 
 static int raid1_spare_active(struct mddev *mddev)
@@ -2150,7 +2155,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
                        d--;
                        rdev = conf->mirrors[d].rdev;
                        if (rdev &&
-                           test_bit(In_sync, &rdev->flags))
+                           !test_bit(Faulty, &rdev->flags))
                                r1_sync_page_io(rdev, sect, s,
                                                conf->tmppage, WRITE);
                }
@@ -2162,7 +2167,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
                        d--;
                        rdev = conf->mirrors[d].rdev;
                        if (rdev &&
-                           test_bit(In_sync, &rdev->flags)) {
+                           !test_bit(Faulty, &rdev->flags)) {
                                if (r1_sync_page_io(rdev, sect, s,
                                                    conf->tmppage, READ)) {
                                        atomic_add(s, &rdev->corrected_errors);
@@ -2541,9 +2546,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
 
        bitmap_cond_end_sync(mddev->bitmap, sector_nr);
        r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
-       raise_barrier(conf);
 
-       conf->next_resync = sector_nr;
+       raise_barrier(conf, sector_nr);
 
        rcu_read_lock();
        /*
index 103ef6b..be76315 100644 (file)
@@ -1490,6 +1490,7 @@ static struct v4l2_ctrl *cx2341x_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
 {
        struct v4l2_ctrl_config cfg;
 
+       memset(&cfg, 0, sizeof(cfg));
        cx2341x_ctrl_fill(id, &cfg.name, &cfg.type, &min, &max, &step, &def, &cfg.flags);
        cfg.ops = &cx2341x_ops;
        cfg.id = id;
index 72fb583..7975c66 100644 (file)
@@ -1095,6 +1095,7 @@ struct dvb_frontend *cx24123_attach(const struct cx24123_config *config,
                sizeof(state->tuner_i2c_adapter.name));
        state->tuner_i2c_adapter.algo      = &cx24123_tuner_i2c_algo;
        state->tuner_i2c_adapter.algo_data = NULL;
+       state->tuner_i2c_adapter.dev.parent = i2c->dev.parent;
        i2c_set_adapdata(&state->tuner_i2c_adapter, state);
        if (i2c_add_adapter(&state->tuner_i2c_adapter) < 0) {
                err("tuner i2c bus could not be initialized\n");
index d4fa213..de88b98 100644 (file)
@@ -2325,7 +2325,7 @@ static int adv7604_log_status(struct v4l2_subdev *sd)
        v4l2_info(sd, "HDCP keys read: %s%s\n",
                        (hdmi_read(sd, 0x04) & 0x20) ? "yes" : "no",
                        (hdmi_read(sd, 0x04) & 0x10) ? "ERROR" : "");
-       if (!is_hdmi(sd)) {
+       if (is_hdmi(sd)) {
                bool audio_pll_locked = hdmi_read(sd, 0x04) & 0x01;
                bool audio_sample_packet_detect = hdmi_read(sd, 0x18) & 0x01;
                bool audio_mute = io_read(sd, 0x65) & 0x40;
index 998919e..7b35e63 100644 (file)
@@ -27,6 +27,7 @@
 
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/io.h>
 #include <linux/delay.h>
 #include <linux/videodev2.h>
 #include <linux/kthread.h>
index 90dec29..29abc37 100644 (file)
@@ -1342,7 +1342,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
        struct em28xx *dev = video_drvdata(file);
        struct em28xx_v4l2 *v4l2 = dev->v4l2;
 
-       if (v4l2->streaming_users > 0)
+       if (vb2_is_busy(&v4l2->vb_vidq))
                return -EBUSY;
 
        vidioc_try_fmt_vid_cap(file, priv, f);
@@ -1883,8 +1883,9 @@ static int em28xx_v4l2_open(struct file *filp)
                return -EINVAL;
        }
 
-       em28xx_videodbg("open dev=%s type=%s\n",
-                       video_device_node_name(vdev), v4l2_type_names[fh_type]);
+       em28xx_videodbg("open dev=%s type=%s users=%d\n",
+                       video_device_node_name(vdev), v4l2_type_names[fh_type],
+                       v4l2->users);
 
        if (mutex_lock_interruptible(&dev->lock))
                return -ERESTARTSYS;
@@ -1897,9 +1898,7 @@ static int em28xx_v4l2_open(struct file *filp)
                return ret;
        }
 
-       if (v4l2_fh_is_singular_file(filp)) {
-               em28xx_videodbg("first opened filehandle, initializing device\n");
-
+       if (v4l2->users == 0) {
                em28xx_set_mode(dev, EM28XX_ANALOG_MODE);
 
                if (vdev->vfl_type != VFL_TYPE_RADIO)
@@ -1910,8 +1909,6 @@ static int em28xx_v4l2_open(struct file *filp)
                 * of some i2c devices
                 */
                em28xx_wake_i2c(dev);
-       } else {
-               em28xx_videodbg("further filehandles are already opened\n");
        }
 
        if (vdev->vfl_type == VFL_TYPE_RADIO) {
@@ -1921,6 +1918,7 @@ static int em28xx_v4l2_open(struct file *filp)
 
        kref_get(&dev->ref);
        kref_get(&v4l2->ref);
+       v4l2->users++;
 
        mutex_unlock(&dev->lock);
 
@@ -2027,11 +2025,12 @@ static int em28xx_v4l2_close(struct file *filp)
        struct em28xx_v4l2    *v4l2 = dev->v4l2;
        int              errCode;
 
-       mutex_lock(&dev->lock);
+       em28xx_videodbg("users=%d\n", v4l2->users);
 
-       if (v4l2_fh_is_singular_file(filp)) {
-               em28xx_videodbg("last opened filehandle, shutting down device\n");
+       vb2_fop_release(filp);
+       mutex_lock(&dev->lock);
 
+       if (v4l2->users == 1) {
                /* No sense to try to write to the device */
                if (dev->disconnected)
                        goto exit;
@@ -2050,12 +2049,10 @@ static int em28xx_v4l2_close(struct file *filp)
                        em28xx_errdev("cannot change alternate number to "
                                        "0 (error=%i)\n", errCode);
                }
-       } else {
-               em28xx_videodbg("further opened filehandles left\n");
        }
 
 exit:
-       vb2_fop_release(filp);
+       v4l2->users--;
        kref_put(&v4l2->ref, em28xx_free_v4l2);
        mutex_unlock(&dev->lock);
        kref_put(&dev->ref, em28xx_free_device);
index 84ef8ef..4360338 100644 (file)
@@ -524,6 +524,7 @@ struct em28xx_v4l2 {
        int sensor_yres;
        int sensor_xtal;
 
+       int users;              /* user count for exclusive use */
        int streaming_users;    /* number of actively streaming users */
 
        u32 frequency;          /* selected tuner frequency */
index c359006..25d3ae2 100644 (file)
@@ -971,6 +971,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
         * to the userspace.
         */
        req->count = allocated_buffers;
+       q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
 
        return 0;
 }
@@ -1018,6 +1019,7 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
                memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
                memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
                q->memory = create->memory;
+               q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
        }
 
        num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
@@ -1130,7 +1132,7 @@ EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
  */
 void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
 {
-       if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
+       if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
                return NULL;
 
        return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
@@ -1165,13 +1167,10 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
        if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
                return;
 
-       if (!q->start_streaming_called) {
-               if (WARN_ON(state != VB2_BUF_STATE_QUEUED))
-                       state = VB2_BUF_STATE_QUEUED;
-       } else if (WARN_ON(state != VB2_BUF_STATE_DONE &&
-                          state != VB2_BUF_STATE_ERROR)) {
-                       state = VB2_BUF_STATE_ERROR;
-       }
+       if (WARN_ON(state != VB2_BUF_STATE_DONE &&
+                   state != VB2_BUF_STATE_ERROR &&
+                   state != VB2_BUF_STATE_QUEUED))
+               state = VB2_BUF_STATE_ERROR;
 
 #ifdef CONFIG_VIDEO_ADV_DEBUG
        /*
@@ -1762,6 +1761,12 @@ static int vb2_start_streaming(struct vb2_queue *q)
        q->start_streaming_called = 0;
 
        dprintk(1, "driver refused to start streaming\n");
+       /*
+        * If you see this warning, then the driver isn't cleaning up properly
+        * after a failed start_streaming(). See the start_streaming()
+        * documentation in videobuf2-core.h for more information how buffers
+        * should be returned to vb2 in start_streaming().
+        */
        if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
                unsigned i;
 
@@ -1777,6 +1782,12 @@ static int vb2_start_streaming(struct vb2_queue *q)
                /* Must be zero now */
                WARN_ON(atomic_read(&q->owned_by_drv_count));
        }
+       /*
+        * If done_list is not empty, then start_streaming() didn't call
+        * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or
+        * STATE_DONE.
+        */
+       WARN_ON(!list_empty(&q->done_list));
        return ret;
 }
 
@@ -1812,6 +1823,7 @@ static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
         */
        list_add_tail(&vb->queued_entry, &q->queued_list);
        q->queued_count++;
+       q->waiting_for_buffers = false;
        vb->state = VB2_BUF_STATE_QUEUED;
        if (V4L2_TYPE_IS_OUTPUT(q->type)) {
                /*
@@ -2123,6 +2135,12 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
        if (q->start_streaming_called)
                call_void_qop(q, stop_streaming, q);
 
+       /*
+        * If you see this warning, then the driver isn't cleaning up properly
+        * in stop_streaming(). See the stop_streaming() documentation in
+        * videobuf2-core.h for more information how buffers should be returned
+        * to vb2 in stop_streaming().
+        */
        if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
                for (i = 0; i < q->num_buffers; ++i)
                        if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE)
@@ -2272,6 +2290,7 @@ static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
         * their normal dequeued state.
         */
        __vb2_queue_cancel(q);
+       q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
 
        dprintk(3, "successful\n");
        return 0;
@@ -2590,10 +2609,17 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
        }
 
        /*
-        * There is nothing to wait for if no buffer has been queued and the
-        * queue isn't streaming, or if the error flag is set.
+        * There is nothing to wait for if the queue isn't streaming, or if the
+        * error flag is set.
+        */
+       if (!vb2_is_streaming(q) || q->error)
+               return res | POLLERR;
+       /*
+        * For compatibility with vb1: if QBUF hasn't been called yet, then
+        * return POLLERR as well. This only affects capture queues, output
+        * queues will always initialize waiting_for_buffers to false.
         */
-       if ((list_empty(&q->queued_list) && !vb2_is_streaming(q)) || q->error)
+       if (q->waiting_for_buffers)
                return res | POLLERR;
 
        /*
index adefc31..9b163a4 100644 (file)
@@ -113,7 +113,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla
                goto fail_pages_alloc;
 
        ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
-                       buf->num_pages, 0, size, gfp_flags);
+                       buf->num_pages, 0, size, GFP_KERNEL);
        if (ret)
                goto fail_table_alloc;
 
index a34a11d..63ca984 100644 (file)
@@ -29,7 +29,7 @@ config FUSION_SPI
 config FUSION_FC
        tristate "Fusion MPT ScsiHost drivers for FC"
        depends on PCI && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        ---help---
          SCSI HOST support for a Fiber Channel host adapters.
 
index f0f5eab..798ae69 100644 (file)
@@ -175,7 +175,7 @@ MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
                                "the same MAC; 0 for none (default), "
                                "1 for active, 2 for follow");
 module_param(all_slaves_active, int, 0);
-MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
+MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
                                     "by setting active flag for all slaves; "
                                     "0 for never (default), 1 for always.");
 module_param(resend_igmp, int, 0);
@@ -3659,8 +3659,14 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
                else
                        bond_xmit_slave_id(bond, skb, 0);
        } else {
-               slave_id = bond_rr_gen_slave_id(bond);
-               bond_xmit_slave_id(bond, skb, slave_id % bond->slave_cnt);
+               int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
+
+               if (likely(slave_cnt)) {
+                       slave_id = bond_rr_gen_slave_id(bond);
+                       bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
+               } else {
+                       dev_kfree_skb_any(skb);
+               }
        }
 
        return NETDEV_TX_OK;
@@ -3691,8 +3697,13 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
 static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
 
-       bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % bond->slave_cnt);
+       if (likely(slave_cnt))
+               bond_xmit_slave_id(bond, skb,
+                                  bond_xmit_hash(bond, skb) % slave_cnt);
+       else
+               dev_kfree_skb_any(skb);
 
        return NETDEV_TX_OK;
 }
index f07fa89..05e1aa0 100644 (file)
@@ -1123,7 +1123,9 @@ static int at91_open(struct net_device *dev)
        struct at91_priv *priv = netdev_priv(dev);
        int err;
 
-       clk_enable(priv->clk);
+       err = clk_prepare_enable(priv->clk);
+       if (err)
+               return err;
 
        /* check or determine and set bittime */
        err = open_candev(dev);
@@ -1149,7 +1151,7 @@ static int at91_open(struct net_device *dev)
  out_close:
        close_candev(dev);
  out:
-       clk_disable(priv->clk);
+       clk_disable_unprepare(priv->clk);
 
        return err;
 }
@@ -1166,7 +1168,7 @@ static int at91_close(struct net_device *dev)
        at91_chip_stop(dev, CAN_STATE_STOPPED);
 
        free_irq(dev->irq, dev);
-       clk_disable(priv->clk);
+       clk_disable_unprepare(priv->clk);
 
        close_candev(dev);
 
index 109cb44..fb279d6 100644 (file)
@@ -97,14 +97,14 @@ static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable)
        ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
        writel(ctrl, priv->raminit_ctrlreg);
        ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance);
-       c_can_hw_raminit_wait_ti(priv, ctrl, mask);
+       c_can_hw_raminit_wait_ti(priv, mask, ctrl);
 
        if (enable) {
                /* Set start bit and wait for the done bit. */
                ctrl |= CAN_RAMINIT_START_MASK(priv->instance);
                writel(ctrl, priv->raminit_ctrlreg);
                ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
-               c_can_hw_raminit_wait_ti(priv, ctrl, mask);
+               c_can_hw_raminit_wait_ti(priv, mask, ctrl);
        }
        spin_unlock(&raminit_lock);
 }
index 944aa5d..6586309 100644 (file)
@@ -62,7 +62,7 @@
 #define FLEXCAN_MCR_BCC                        BIT(16)
 #define FLEXCAN_MCR_LPRIO_EN           BIT(13)
 #define FLEXCAN_MCR_AEN                        BIT(12)
-#define FLEXCAN_MCR_MAXMB(x)           ((x) & 0x1f)
+#define FLEXCAN_MCR_MAXMB(x)           ((x) & 0x7f)
 #define FLEXCAN_MCR_IDAM_A             (0 << 8)
 #define FLEXCAN_MCR_IDAM_B             (1 << 8)
 #define FLEXCAN_MCR_IDAM_C             (2 << 8)
         FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT)
 
 /* FLEXCAN interrupt flag register (IFLAG) bits */
-#define FLEXCAN_TX_BUF_ID              8
+/* Errata ERR005829 step7: Reserve first valid MB */
+#define FLEXCAN_TX_BUF_RESERVED                8
+#define FLEXCAN_TX_BUF_ID              9
 #define FLEXCAN_IFLAG_BUF(x)           BIT(x)
 #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
 #define FLEXCAN_IFLAG_RX_FIFO_WARN     BIT(6)
 
 /* FLEXCAN message buffers */
 #define FLEXCAN_MB_CNT_CODE(x)         (((x) & 0xf) << 24)
+#define FLEXCAN_MB_CODE_RX_INACTIVE    (0x0 << 24)
+#define FLEXCAN_MB_CODE_RX_EMPTY       (0x4 << 24)
+#define FLEXCAN_MB_CODE_RX_FULL                (0x2 << 24)
+#define FLEXCAN_MB_CODE_RX_OVERRRUN    (0x6 << 24)
+#define FLEXCAN_MB_CODE_RX_RANSWER     (0xa << 24)
+
+#define FLEXCAN_MB_CODE_TX_INACTIVE    (0x8 << 24)
+#define FLEXCAN_MB_CODE_TX_ABORT       (0x9 << 24)
+#define FLEXCAN_MB_CODE_TX_DATA                (0xc << 24)
+#define FLEXCAN_MB_CODE_TX_TANSWER     (0xe << 24)
+
 #define FLEXCAN_MB_CNT_SRR             BIT(22)
 #define FLEXCAN_MB_CNT_IDE             BIT(21)
 #define FLEXCAN_MB_CNT_RTR             BIT(20)
@@ -298,7 +311,7 @@ static int flexcan_chip_enable(struct flexcan_priv *priv)
        flexcan_write(reg, &regs->mcr);
 
        while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
-               usleep_range(10, 20);
+               udelay(10);
 
        if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
                return -ETIMEDOUT;
@@ -317,7 +330,7 @@ static int flexcan_chip_disable(struct flexcan_priv *priv)
        flexcan_write(reg, &regs->mcr);
 
        while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
-               usleep_range(10, 20);
+               udelay(10);
 
        if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
                return -ETIMEDOUT;
@@ -336,7 +349,7 @@ static int flexcan_chip_freeze(struct flexcan_priv *priv)
        flexcan_write(reg, &regs->mcr);
 
        while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
-               usleep_range(100, 200);
+               udelay(100);
 
        if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
                return -ETIMEDOUT;
@@ -355,7 +368,7 @@ static int flexcan_chip_unfreeze(struct flexcan_priv *priv)
        flexcan_write(reg, &regs->mcr);
 
        while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
-               usleep_range(10, 20);
+               udelay(10);
 
        if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
                return -ETIMEDOUT;
@@ -370,7 +383,7 @@ static int flexcan_chip_softreset(struct flexcan_priv *priv)
 
        flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
        while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
-               usleep_range(10, 20);
+               udelay(10);
 
        if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
                return -ETIMEDOUT;
@@ -428,6 +441,14 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
        flexcan_write(can_id, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_id);
        flexcan_write(ctrl, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
 
+       /* Errata ERR005829 step8:
+        * Write twice INACTIVE(0x8) code to first MB.
+        */
+       flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+                     &regs->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+       flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+                     &regs->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+
        return NETDEV_TX_OK;
 }
 
@@ -744,6 +765,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
                stats->tx_bytes += can_get_echo_skb(dev, 0);
                stats->tx_packets++;
                can_led_event(dev, CAN_LED_EVENT_TX);
+               /* after sending a RTR frame mailbox is in RX mode */
+               flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+                             &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
                flexcan_write((1 << FLEXCAN_TX_BUF_ID), &regs->iflag1);
                netif_wake_queue(dev);
        }
@@ -801,6 +825,7 @@ static int flexcan_chip_start(struct net_device *dev)
        struct flexcan_regs __iomem *regs = priv->base;
        int err;
        u32 reg_mcr, reg_ctrl;
+       int i;
 
        /* enable module */
        err = flexcan_chip_enable(priv);
@@ -867,8 +892,18 @@ static int flexcan_chip_start(struct net_device *dev)
        netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
        flexcan_write(reg_ctrl, &regs->ctrl);
 
-       /* Abort any pending TX, mark Mailbox as INACTIVE */
-       flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
+       /* clear and invalidate all mailboxes first */
+       for (i = FLEXCAN_TX_BUF_ID; i < ARRAY_SIZE(regs->cantxfg); i++) {
+               flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE,
+                             &regs->cantxfg[i].can_ctrl);
+       }
+
+       /* Errata ERR005829: mark first TX mailbox as INACTIVE */
+       flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+                     &regs->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+
+       /* mark TX mailbox as INACTIVE */
+       flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
                      &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
 
        /* acceptance mask/acceptance code (accept everything) */
index 7a85590..e5fac36 100644 (file)
@@ -70,6 +70,8 @@ struct peak_pci_chan {
 #define PEAK_PC_104P_DEVICE_ID 0x0006  /* PCAN-PC/104+ cards */
 #define PEAK_PCI_104E_DEVICE_ID        0x0007  /* PCAN-PCI/104 Express cards */
 #define PEAK_MPCIE_DEVICE_ID   0x0008  /* The miniPCIe slot cards */
+#define PEAK_PCIE_OEM_ID       0x0009  /* PCAN-PCI Express OEM */
+#define PEAK_PCIEC34_DEVICE_ID 0x000A  /* PCAN-PCI Express 34 (one channel) */
 
 #define PEAK_PCI_CHAN_MAX      4
 
@@ -87,6 +89,7 @@ static const struct pci_device_id peak_pci_tbl[] = {
        {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
 #ifdef CONFIG_CAN_PEAK_PCIEC
        {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+       {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
 #endif
        {0,}
 };
@@ -653,7 +656,8 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                 * This must be done *before* register_sja1000dev() but
                 * *after* devices linkage
                 */
-               if (pdev->device == PEAK_PCIEC_DEVICE_ID) {
+               if (pdev->device == PEAK_PCIEC_DEVICE_ID ||
+                   pdev->device == PEAK_PCIEC34_DEVICE_ID) {
                        err = peak_pciec_probe(pdev, dev);
                        if (err) {
                                dev_err(&pdev->dev,
index 3fe45c7..8ca49f0 100644 (file)
@@ -2129,6 +2129,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
        int entry = vp->cur_tx % TX_RING_SIZE;
        struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
        unsigned long flags;
+       dma_addr_t dma_addr;
 
        if (vortex_debug > 6) {
                pr_debug("boomerang_start_xmit()\n");
@@ -2163,24 +2164,48 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
 
        if (!skb_shinfo(skb)->nr_frags) {
-               vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
-                                                                               skb->len, PCI_DMA_TODEVICE));
+               dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len,
+                                         PCI_DMA_TODEVICE);
+               if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+                       goto out_dma_err;
+
+               vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
                vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
        } else {
                int i;
 
-               vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
-                                                                               skb_headlen(skb), PCI_DMA_TODEVICE));
+               dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data,
+                                         skb_headlen(skb), PCI_DMA_TODEVICE);
+               if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+                       goto out_dma_err;
+
+               vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
                vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
 
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
+                       dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag,
+                                                   0,
+                                                   frag->size,
+                                                   DMA_TO_DEVICE);
+                       if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) {
+                               for(i = i-1; i >= 0; i--)
+                                       dma_unmap_page(&VORTEX_PCI(vp)->dev,
+                                                      le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
+                                                      le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
+                                                      DMA_TO_DEVICE);
+
+                               pci_unmap_single(VORTEX_PCI(vp),
+                                                le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
+                                                le32_to_cpu(vp->tx_ring[entry].frag[0].length),
+                                                PCI_DMA_TODEVICE);
+
+                               goto out_dma_err;
+                       }
+
                        vp->tx_ring[entry].frag[i+1].addr =
-                                       cpu_to_le32(skb_frag_dma_map(
-                                               &VORTEX_PCI(vp)->dev,
-                                               frag,
-                                               frag->page_offset, frag->size, DMA_TO_DEVICE));
+                                               cpu_to_le32(dma_addr);
 
                        if (i == skb_shinfo(skb)->nr_frags-1)
                                        vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
@@ -2189,7 +2214,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
                }
        }
 #else
-       vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
+       dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
+       if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+               goto out_dma_err;
+       vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
        vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
        vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
 #endif
@@ -2217,7 +2245,11 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
        skb_tx_timestamp(skb);
        iowrite16(DownUnstall, ioaddr + EL3_CMD);
        spin_unlock_irqrestore(&vp->lock, flags);
+out:
        return NETDEV_TX_OK;
+out_dma_err:
+       dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n");
+       goto out;
 }
 
 /* The interrupt handler does all of the Rx thread work and cleans up
index fe5cfea..5919394 100644 (file)
 #define DRV_NAME       "arc_emac"
 #define DRV_VERSION    "1.0"
 
+/**
+ * arc_emac_tx_avail - Return the number of available slots in the tx ring.
+ * @priv: Pointer to ARC EMAC private data structure.
+ *
+ * returns: the number of slots available for transmission in tx the ring.
+ */
+static inline int arc_emac_tx_avail(struct arc_emac_priv *priv)
+{
+       return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM;
+}
+
 /**
  * arc_emac_adjust_link - Adjust the PHY link duplex.
  * @ndev:      Pointer to the net_device structure.
@@ -180,10 +191,15 @@ static void arc_emac_tx_clean(struct net_device *ndev)
                txbd->info = 0;
 
                *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
-
-               if (netif_queue_stopped(ndev))
-                       netif_wake_queue(ndev);
        }
+
+       /* Ensure that txbd_dirty is visible to tx() before checking
+        * for queue stopped.
+        */
+       smp_mb();
+
+       if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv))
+               netif_wake_queue(ndev);
 }
 
 /**
@@ -298,7 +314,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
        work_done = arc_emac_rx(ndev, budget);
        if (work_done < budget) {
                napi_complete(napi);
-               arc_reg_or(priv, R_ENABLE, RXINT_MASK);
+               arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
        }
 
        return work_done;
@@ -327,9 +343,9 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
        /* Reset all flags except "MDIO complete" */
        arc_reg_set(priv, R_STATUS, status);
 
-       if (status & RXINT_MASK) {
+       if (status & (RXINT_MASK | TXINT_MASK)) {
                if (likely(napi_schedule_prep(&priv->napi))) {
-                       arc_reg_clr(priv, R_ENABLE, RXINT_MASK);
+                       arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
                        __napi_schedule(&priv->napi);
                }
        }
@@ -440,7 +456,7 @@ static int arc_emac_open(struct net_device *ndev)
        arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);
 
        /* Enable interrupts */
-       arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
+       arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
 
        /* Set CONTROL */
        arc_reg_set(priv, R_CTRL,
@@ -511,7 +527,7 @@ static int arc_emac_stop(struct net_device *ndev)
        netif_stop_queue(ndev);
 
        /* Disable interrupts */
-       arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
+       arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
 
        /* Disable EMAC */
        arc_reg_clr(priv, R_CTRL, EN_MASK);
@@ -574,11 +590,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
 
        len = max_t(unsigned int, ETH_ZLEN, skb->len);
 
-       /* EMAC still holds this buffer in its possession.
-        * CPU must not modify this buffer descriptor
-        */
-       if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) {
+       if (unlikely(!arc_emac_tx_avail(priv))) {
                netif_stop_queue(ndev);
+               netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n");
                return NETDEV_TX_BUSY;
        }
 
@@ -607,12 +621,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
        /* Increment index to point to the next BD */
        *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
 
-       /* Get "info" of the next BD */
-       info = &priv->txbd[*txbd_curr].info;
+       /* Ensure that tx_clean() sees the new txbd_curr before
+        * checking the queue status. This prevents an unneeded wake
+        * of the queue in tx_clean().
+        */
+       smp_mb();
 
-       /* Check if if Tx BD ring is full - next BD is still owned by EMAC */
-       if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC))
+       if (!arc_emac_tx_avail(priv)) {
                netif_stop_queue(ndev);
+               /* Refresh tx_dirty */
+               smp_mb();
+               if (arc_emac_tx_avail(priv))
+                       netif_start_queue(ndev);
+       }
 
        arc_reg_set(priv, R_STATUS, TXPL_MASK);
 
index 4a7028d..d588136 100644 (file)
@@ -1697,7 +1697,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
                                     hwstat->tx_underruns +
                                     hwstat->tx_excessive_cols +
                                     hwstat->tx_late_cols);
-               nstat->multicast  = hwstat->tx_multicast_pkts;
+               nstat->multicast  = hwstat->rx_multicast_pkts;
                nstat->collisions = hwstat->tx_total_cols;
 
                nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
index 6f4e186..d9b9170 100644 (file)
@@ -534,6 +534,25 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
        while ((processed < to_process) && (processed < budget)) {
                cb = &priv->rx_cbs[priv->rx_read_ptr];
                skb = cb->skb;
+
+               processed++;
+               priv->rx_read_ptr++;
+
+               if (priv->rx_read_ptr == priv->num_rx_bds)
+                       priv->rx_read_ptr = 0;
+
+               /* We do not have a backing SKB, so we do not a corresponding
+                * DMA mapping for this incoming packet since
+                * bcm_sysport_rx_refill always either has both skb and mapping
+                * or none.
+                */
+               if (unlikely(!skb)) {
+                       netif_err(priv, rx_err, ndev, "out of memory!\n");
+                       ndev->stats.rx_dropped++;
+                       ndev->stats.rx_errors++;
+                       goto refill;
+               }
+
                dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
                                 RX_BUF_LENGTH, DMA_FROM_DEVICE);
 
@@ -543,23 +562,11 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
                status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
                          DESC_STATUS_MASK;
 
-               processed++;
-               priv->rx_read_ptr++;
-               if (priv->rx_read_ptr == priv->num_rx_bds)
-                       priv->rx_read_ptr = 0;
-
                netif_dbg(priv, rx_status, ndev,
                          "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
                          p_index, priv->rx_c_index, priv->rx_read_ptr,
                          len, status);
 
-               if (unlikely(!skb)) {
-                       netif_err(priv, rx_err, ndev, "out of memory!\n");
-                       ndev->stats.rx_dropped++;
-                       ndev->stats.rx_errors++;
-                       goto refill;
-               }
-
                if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
                        netif_err(priv, rx_status, ndev, "fragmented packet!\n");
                        ndev->stats.rx_dropped++;
index 3f9d4de..5cc9cae 100644 (file)
@@ -875,6 +875,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
        int last_tx_cn, last_c_index, num_tx_bds;
        struct enet_cb *tx_cb_ptr;
        struct netdev_queue *txq;
+       unsigned int bds_compl;
        unsigned int c_index;
 
        /* Compute how many buffers are transmitted since last xmit call */
@@ -899,7 +900,9 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
        /* Reclaim transmitted buffers */
        while (last_tx_cn-- > 0) {
                tx_cb_ptr = ring->cbs + last_c_index;
+               bds_compl = 0;
                if (tx_cb_ptr->skb) {
+                       bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
                        dev->stats.tx_bytes += tx_cb_ptr->skb->len;
                        dma_unmap_single(&dev->dev,
                                         dma_unmap_addr(tx_cb_ptr, dma_addr),
@@ -916,7 +919,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
                        dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
                }
                dev->stats.tx_packets++;
-               ring->free_bds += 1;
+               ring->free_bds += bds_compl;
 
                last_c_index++;
                last_c_index &= (num_tx_bds - 1);
@@ -1274,12 +1277,29 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
 
        while ((rxpktprocessed < rxpkttoprocess) &&
               (rxpktprocessed < budget)) {
+               cb = &priv->rx_cbs[priv->rx_read_ptr];
+               skb = cb->skb;
+
+               rxpktprocessed++;
+
+               priv->rx_read_ptr++;
+               priv->rx_read_ptr &= (priv->num_rx_bds - 1);
+
+               /* We do not have a backing SKB, so we do not have a
+                * corresponding DMA mapping for this incoming packet since
+                * bcmgenet_rx_refill always either has both skb and mapping or
+                * none.
+                */
+               if (unlikely(!skb)) {
+                       dev->stats.rx_dropped++;
+                       dev->stats.rx_errors++;
+                       goto refill;
+               }
+
                /* Unmap the packet contents such that we can use the
                 * RSV from the 64 bytes descriptor when enabled and save
                 * a 32-bits register read
                 */
-               cb = &priv->rx_cbs[priv->rx_read_ptr];
-               skb = cb->skb;
                dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
                                 priv->rx_buf_len, DMA_FROM_DEVICE);
 
@@ -1307,18 +1327,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
                          __func__, p_index, priv->rx_c_index,
                          priv->rx_read_ptr, dma_length_status);
 
-               rxpktprocessed++;
-
-               priv->rx_read_ptr++;
-               priv->rx_read_ptr &= (priv->num_rx_bds - 1);
-
-               /* out of memory, just drop packets at the hardware level */
-               if (unlikely(!skb)) {
-                       dev->stats.rx_dropped++;
-                       dev->stats.rx_errors++;
-                       goto refill;
-               }
-
                if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
                        netif_err(priv, rx_status, dev,
                                  "dropping fragmented packet!\n");
@@ -1736,13 +1744,63 @@ static void bcmgenet_init_multiq(struct net_device *dev)
        bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
 }
 
+static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
+{
+       int ret = 0;
+       int timeout = 0;
+       u32 reg;
+
+       /* Disable TDMA to stop add more frames in TX DMA */
+       reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+       reg &= ~DMA_EN;
+       bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+       /* Check TDMA status register to confirm TDMA is disabled */
+       while (timeout++ < DMA_TIMEOUT_VAL) {
+               reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
+               if (reg & DMA_DISABLED)
+                       break;
+
+               udelay(1);
+       }
+
+       if (timeout == DMA_TIMEOUT_VAL) {
+               netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
+               ret = -ETIMEDOUT;
+       }
+
+       /* Wait 10ms for packet drain in both tx and rx dma */
+       usleep_range(10000, 20000);
+
+       /* Disable RDMA */
+       reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+       reg &= ~DMA_EN;
+       bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+       timeout = 0;
+       /* Check RDMA status register to confirm RDMA is disabled */
+       while (timeout++ < DMA_TIMEOUT_VAL) {
+               reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
+               if (reg & DMA_DISABLED)
+                       break;
+
+               udelay(1);
+       }
+
+       if (timeout == DMA_TIMEOUT_VAL) {
+               netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
+               ret = -ETIMEDOUT;
+       }
+
+       return ret;
+}
+
 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
 {
        int i;
 
        /* disable DMA */
-       bcmgenet_rdma_writel(priv, 0, DMA_CTRL);
-       bcmgenet_tdma_writel(priv, 0, DMA_CTRL);
+       bcmgenet_dma_teardown(priv);
 
        for (i = 0; i < priv->num_tx_bds; i++) {
                if (priv->tx_cbs[i].skb != NULL) {
@@ -2101,57 +2159,6 @@ err_clk_disable:
        return ret;
 }
 
-static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
-{
-       int ret = 0;
-       int timeout = 0;
-       u32 reg;
-
-       /* Disable TDMA to stop add more frames in TX DMA */
-       reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
-       reg &= ~DMA_EN;
-       bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
-       /* Check TDMA status register to confirm TDMA is disabled */
-       while (timeout++ < DMA_TIMEOUT_VAL) {
-               reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
-               if (reg & DMA_DISABLED)
-                       break;
-
-               udelay(1);
-       }
-
-       if (timeout == DMA_TIMEOUT_VAL) {
-               netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
-               ret = -ETIMEDOUT;
-       }
-
-       /* Wait 10ms for packet drain in both tx and rx dma */
-       usleep_range(10000, 20000);
-
-       /* Disable RDMA */
-       reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
-       reg &= ~DMA_EN;
-       bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
-       timeout = 0;
-       /* Check RDMA status register to confirm RDMA is disabled */
-       while (timeout++ < DMA_TIMEOUT_VAL) {
-               reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
-               if (reg & DMA_DISABLED)
-                       break;
-
-               udelay(1);
-       }
-
-       if (timeout == DMA_TIMEOUT_VAL) {
-               netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
-               ret = -ETIMEDOUT;
-       }
-
-       return ret;
-}
-
 static void bcmgenet_netif_stop(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
index cb77ae9..e7d3a62 100644 (file)
@@ -7914,8 +7914,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        entry = tnapi->tx_prod;
        base_flags = 0;
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
-               base_flags |= TXD_FLAG_TCPUDP_CSUM;
 
        mss = skb_shinfo(skb)->gso_size;
        if (mss) {
@@ -7929,6 +7927,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
 
+               /* HW/FW can not correctly segment packets that have been
+                * vlan encapsulated.
+                */
+               if (skb->protocol == htons(ETH_P_8021Q) ||
+                   skb->protocol == htons(ETH_P_8021AD))
+                       return tg3_tso_bug(tp, tnapi, txq, skb);
+
                if (!skb_is_gso_v6(skb)) {
                        if (unlikely((ETH_HLEN + hdr_len) > 80) &&
                            tg3_flag(tp, TSO_BUG))
@@ -7979,6 +7984,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                base_flags |= tsflags << 12;
                        }
                }
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               /* HW/FW can not correctly checksum packets that have been
+                * vlan encapsulated.
+                */
+               if (skb->protocol == htons(ETH_P_8021Q) ||
+                   skb->protocol == htons(ETH_P_8021AD)) {
+                       if (skb_checksum_help(skb))
+                               goto drop;
+               } else  {
+                       base_flags |= TXD_FLAG_TCPUDP_CSUM;
+               }
        }
 
        if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
index 8c34811..e5be511 100644 (file)
@@ -6478,6 +6478,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct port_info *pi;
        bool highdma = false;
        struct adapter *adapter = NULL;
+       void __iomem *regs;
 
        printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
 
@@ -6494,19 +6495,35 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto out_release_regions;
        }
 
+       regs = pci_ioremap_bar(pdev, 0);
+       if (!regs) {
+               dev_err(&pdev->dev, "cannot map device registers\n");
+               err = -ENOMEM;
+               goto out_disable_device;
+       }
+
+       /* We control everything through one PF */
+       func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
+       if (func != ent->driver_data) {
+               iounmap(regs);
+               pci_disable_device(pdev);
+               pci_save_state(pdev);        /* to restore SR-IOV later */
+               goto sriov;
+       }
+
        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
                highdma = true;
                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
                if (err) {
                        dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
                                "coherent allocations\n");
-                       goto out_disable_device;
+                       goto out_unmap_bar0;
                }
        } else {
                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
                if (err) {
                        dev_err(&pdev->dev, "no usable DMA configuration\n");
-                       goto out_disable_device;
+                       goto out_unmap_bar0;
                }
        }
 
@@ -6518,7 +6535,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
        if (!adapter) {
                err = -ENOMEM;
-               goto out_disable_device;
+               goto out_unmap_bar0;
        }
 
        adapter->workq = create_singlethread_workqueue("cxgb4");
@@ -6530,20 +6547,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* PCI device has been enabled */
        adapter->flags |= DEV_ENABLED;
 
-       adapter->regs = pci_ioremap_bar(pdev, 0);
-       if (!adapter->regs) {
-               dev_err(&pdev->dev, "cannot map device registers\n");
-               err = -ENOMEM;
-               goto out_free_adapter;
-       }
-
-       /* We control everything through one PF */
-       func = SOURCEPF_GET(readl(adapter->regs + PL_WHOAMI));
-       if (func != ent->driver_data) {
-               pci_save_state(pdev);        /* to restore SR-IOV later */
-               goto sriov;
-       }
-
+       adapter->regs = regs;
        adapter->pdev = pdev;
        adapter->pdev_dev = &pdev->dev;
        adapter->mbox = func;
@@ -6560,7 +6564,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        err = t4_prep_adapter(adapter);
        if (err)
-               goto out_unmap_bar0;
+               goto out_free_adapter;
+
 
        if (!is_t4(adapter->params.chip)) {
                s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
@@ -6577,14 +6582,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                        dev_err(&pdev->dev,
                                "Incorrect number of egress queues per page\n");
                        err = -EINVAL;
-                       goto out_unmap_bar0;
+                       goto out_free_adapter;
                }
                adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
                pci_resource_len(pdev, 2));
                if (!adapter->bar2) {
                        dev_err(&pdev->dev, "cannot map device bar2 region\n");
                        err = -ENOMEM;
-                       goto out_unmap_bar0;
+                       goto out_free_adapter;
                }
        }
 
@@ -6722,13 +6727,13 @@ sriov:
  out_unmap_bar:
        if (!is_t4(adapter->params.chip))
                iounmap(adapter->bar2);
- out_unmap_bar0:
-       iounmap(adapter->regs);
  out_free_adapter:
        if (adapter->workq)
                destroy_workqueue(adapter->workq);
 
        kfree(adapter);
+ out_unmap_bar0:
+       iounmap(regs);
  out_disable_device:
        pci_disable_pcie_error_reporting(pdev);
        pci_disable_device(pdev);
index 9b33057..70089c2 100644 (file)
@@ -1399,7 +1399,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
        const void *mac_addr;
 
        if (!IS_ENABLED(CONFIG_OF) || !np)
-               return NULL;
+               return ERR_PTR(-ENXIO);
 
        pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata)
index 65a4a0f..02a2e90 100644 (file)
@@ -2389,6 +2389,22 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
 }
 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
 
+static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
+{
+       struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
+       int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
+                       + 1;
+       int max_port = min_port +
+               bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+
+       if (port < min_port)
+               port = min_port;
+       else if (port >= max_port)
+               port = max_port - 1;
+
+       return port;
+}
+
 int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2402,6 +2418,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
        if (slave < 0)
                return -EINVAL;
 
+       port = mlx4_slaves_closest_port(dev, slave, port);
        s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
        s_info->mac = mac;
        mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
@@ -2428,6 +2445,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
        if (slave < 0)
                return -EINVAL;
 
+       port = mlx4_slaves_closest_port(dev, slave, port);
        vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
 
        if ((0 == vlan) && (0 == qos))
@@ -2455,6 +2473,7 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
        struct mlx4_priv *priv;
 
        priv = mlx4_priv(dev);
+       port = mlx4_slaves_closest_port(dev, slave, port);
        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 
        if (MLX4_VGT != vp_oper->state.default_vlan) {
@@ -2482,6 +2501,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
        if (slave < 0)
                return -EINVAL;
 
+       port = mlx4_slaves_closest_port(dev, slave, port);
        s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
        s_info->spoofchk = setting;
 
@@ -2535,6 +2555,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat
        if (slave < 0)
                return -EINVAL;
 
+       port = mlx4_slaves_closest_port(dev, slave, port);
        switch (link_state) {
        case IFLA_VF_LINK_STATE_AUTO:
                /* get current link state */
index e22f24f..35ff292 100644 (file)
@@ -487,6 +487,9 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
        struct mlx4_en_dev *mdev = priv->mdev;
        int err;
 
+       if (pause->autoneg)
+               return -EINVAL;
+
        priv->prof->tx_pause = pause->tx_pause != 0;
        priv->prof->rx_pause = pause->rx_pause != 0;
        err = mlx4_SET_PORT_general(mdev->dev, priv->port,
index 7d717ec..193a6ad 100644 (file)
@@ -298,6 +298,7 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
                            MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
 }
 
+/* Must protect against concurrent access */
 int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
                       struct mlx4_mpt_entry ***mpt_entry)
 {
@@ -305,13 +306,10 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
        int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1);
        struct mlx4_cmd_mailbox *mailbox = NULL;
 
-       /* Make sure that at this point we have single-threaded access only */
-
        if (mmr->enabled != MLX4_MPT_EN_HW)
                return -EINVAL;
 
        err = mlx4_HW2SW_MPT(dev, NULL, key);
-
        if (err) {
                mlx4_warn(dev, "HW2SW_MPT failed (%d).", err);
                mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n");
@@ -333,7 +331,6 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
                                   0, MLX4_CMD_QUERY_MPT,
                                   MLX4_CMD_TIME_CLASS_B,
                                   MLX4_CMD_WRAPPED);
-
                if (err)
                        goto free_mailbox;
 
@@ -378,9 +375,10 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
                err = mlx4_SW2HW_MPT(dev, mailbox, key);
        }
 
-       mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK;
-       if (!err)
+       if (!err) {
+               mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK;
                mmr->enabled = MLX4_MPT_EN_HW;
+       }
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt);
@@ -400,11 +398,12 @@ EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt);
 int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry,
                         u32 pdn)
 {
-       u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags);
+       u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK;
        /* The wrapper function will put the slave's id here */
        if (mlx4_is_mfunc(dev))
                pd_flags &= ~MLX4_MPT_PD_VF_MASK;
-       mpt_entry->pd_flags = cpu_to_be32((pd_flags &  ~MLX4_MPT_PD_MASK) |
+
+       mpt_entry->pd_flags = cpu_to_be32(pd_flags |
                                          (pdn & MLX4_MPT_PD_MASK)
                                          | MLX4_MPT_PD_FLAG_EN_INV);
        return 0;
@@ -600,14 +599,18 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
 {
        int err;
 
-       mpt_entry->start       = cpu_to_be64(mr->iova);
-       mpt_entry->length      = cpu_to_be64(mr->size);
-       mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
+       mpt_entry->start       = cpu_to_be64(iova);
+       mpt_entry->length      = cpu_to_be64(size);
+       mpt_entry->entity_size = cpu_to_be32(page_shift);
 
        err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
        if (err)
                return err;
 
+       mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
+                                          MLX4_MPT_PD_FLAG_EN_INV);
+       mpt_entry->flags    &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
+                                          MLX4_MPT_FLAG_SW_OWNS);
        if (mr->mtt.order < 0) {
                mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
                mpt_entry->mtt_addr = 0;
@@ -617,6 +620,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
                if (mr->mtt.page_shift == 0)
                        mpt_entry->mtt_sz    = cpu_to_be32(1 << mr->mtt.order);
        }
+       if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
+               /* fast register MR in free state */
+               mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
+               mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
+                                                  MLX4_MPT_PD_FLAG_RAE);
+       } else {
+               mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
+       }
        mr->enabled = MLX4_MPT_EN_SW;
 
        return 0;
index 9ba0c1c..94eeb2c 100644 (file)
@@ -103,7 +103,8 @@ static int find_index(struct mlx4_dev *dev,
        int i;
 
        for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
-               if ((mac & MLX4_MAC_MASK) ==
+               if (table->refs[i] &&
+                   (MLX4_MAC_MASK & mac) ==
                    (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
                        return i;
        }
@@ -165,12 +166,14 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
 
        mutex_lock(&table->mutex);
        for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
-               if (free < 0 && !table->entries[i]) {
-                       free = i;
+               if (!table->refs[i]) {
+                       if (free < 0)
+                               free = i;
                        continue;
                }
 
-               if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
+               if ((MLX4_MAC_MASK & mac) ==
+                    (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
                        /* MAC already registered, increment ref count */
                        err = i;
                        ++table->refs[i];
index 0dc31d8..2301365 100644 (file)
@@ -390,13 +390,14 @@ err_icm:
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 
 #define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
-int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
                   enum mlx4_update_qp_attr attr,
                   struct mlx4_update_qp_params *params)
 {
        struct mlx4_cmd_mailbox *mailbox;
        struct mlx4_update_qp_context *cmd;
        u64 pri_addr_path_mask = 0;
+       u64 qp_mask = 0;
        int err = 0;
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -413,9 +414,16 @@ int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
                cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
        }
 
+       if (attr & MLX4_UPDATE_QP_VSD) {
+               qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
+               if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
+                       cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
+       }
+
        cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
+       cmd->qp_mask = cpu_to_be64(qp_mask);
 
-       err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
+       err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
                       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
                       MLX4_CMD_NATIVE);
 
index 1089367..5d2498d 100644 (file)
@@ -702,11 +702,13 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
        struct mlx4_qp_context  *qpc = inbox->buf + 8;
        struct mlx4_vport_oper_state *vp_oper;
        struct mlx4_priv *priv;
+       u32 qp_type;
        int port;
 
        port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
        priv = mlx4_priv(dev);
        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+       qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
 
        if (MLX4_VGT != vp_oper->state.default_vlan) {
                /* the reserved QPs (special, proxy, tunnel)
@@ -715,8 +717,20 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
                if (mlx4_is_qp_reserved(dev, qpn))
                        return 0;
 
-               /* force strip vlan by clear vsd */
-               qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
+               /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
+               if (qp_type == MLX4_QP_ST_UD ||
+                   (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
+                       if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
+                               *(__be32 *)inbox->buf =
+                                       cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
+                                       MLX4_QP_OPTPAR_VLAN_STRIPPING);
+                               qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
+                       } else {
+                               struct mlx4_update_qp_params params = {.flags = 0};
+
+                               mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+                       }
+               }
 
                if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
                    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
@@ -3998,13 +4012,17 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
        }
 
        port = (rqp->sched_queue >> 6 & 1) + 1;
-       smac_index = cmd->qp_context.pri_path.grh_mylmc;
-       err = mac_find_smac_ix_in_slave(dev, slave, port,
-                                       smac_index, &mac);
-       if (err) {
-               mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
-                        qpn, smac_index);
-               goto err_mac;
+
+       if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
+               smac_index = cmd->qp_context.pri_path.grh_mylmc;
+               err = mac_find_smac_ix_in_slave(dev, slave, port,
+                                               smac_index, &mac);
+
+               if (err) {
+                       mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
+                                qpn, smac_index);
+                       goto err_mac;
+               }
        }
 
        err = mlx4_cmd(dev, inbox->dma,
@@ -4818,7 +4836,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
 
        upd_context = mailbox->buf;
-       upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
+       upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
 
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
index 979c698..a422930 100644 (file)
@@ -290,9 +290,11 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
                /* Read the hardware TX timestamp if one was recorded */
                if (unlikely(re.s.tstamp)) {
                        struct skb_shared_hwtstamps ts;
+                       u64 ns;
+
                        memset(&ts, 0, sizeof(ts));
                        /* Read the timestamp */
-                       u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
+                       ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
                        /* Remove the timestamp from the FIFO */
                        cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
                        /* Tell the kernel about the timestamp */
index 44c8be1..5f7a352 100644 (file)
@@ -7,6 +7,7 @@ config PCH_GBE
        depends on PCI && (X86_32 || COMPILE_TEST)
        select MII
        select PTP_1588_CLOCK_PCH
+       select NET_PTP_CLASSIFY
        ---help---
          This is a gigabit ethernet driver for EG20T PCH.
          EG20T PCH is the platform controller hub that is used in Intel's
index 91652e7..0921302 100644 (file)
@@ -1783,33 +1783,31 @@ static void __rtl8169_set_features(struct net_device *dev,
                                   netdev_features_t features)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
-       netdev_features_t changed = features ^ dev->features;
        void __iomem *ioaddr = tp->mmio_addr;
+       u32 rx_config;
 
-       if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM |
-                        NETIF_F_HW_VLAN_CTAG_RX)))
-               return;
+       rx_config = RTL_R32(RxConfig);
+       if (features & NETIF_F_RXALL)
+               rx_config |= (AcceptErr | AcceptRunt);
+       else
+               rx_config &= ~(AcceptErr | AcceptRunt);
 
-       if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) {
-               if (features & NETIF_F_RXCSUM)
-                       tp->cp_cmd |= RxChkSum;
-               else
-                       tp->cp_cmd &= ~RxChkSum;
+       RTL_W32(RxConfig, rx_config);
 
-               if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
-                       tp->cp_cmd |= RxVlan;
-               else
-                       tp->cp_cmd &= ~RxVlan;
+       if (features & NETIF_F_RXCSUM)
+               tp->cp_cmd |= RxChkSum;
+       else
+               tp->cp_cmd &= ~RxChkSum;
 
-               RTL_W16(CPlusCmd, tp->cp_cmd);
-               RTL_R16(CPlusCmd);
-       }
-       if (changed & NETIF_F_RXALL) {
-               int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
-               if (features & NETIF_F_RXALL)
-                       tmp |= (AcceptErr | AcceptRunt);
-               RTL_W32(RxConfig, tmp);
-       }
+       if (features & NETIF_F_HW_VLAN_CTAG_RX)
+               tp->cp_cmd |= RxVlan;
+       else
+               tp->cp_cmd &= ~RxVlan;
+
+       tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum);
+
+       RTL_W16(CPlusCmd, tp->cp_cmd);
+       RTL_R16(CPlusCmd);
 }
 
 static int rtl8169_set_features(struct net_device *dev,
@@ -1817,8 +1815,11 @@ static int rtl8169_set_features(struct net_device *dev,
 {
        struct rtl8169_private *tp = netdev_priv(dev);
 
+       features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
+
        rtl_lock_work(tp);
-       __rtl8169_set_features(dev, features);
+       if (features ^ dev->features)
+               __rtl8169_set_features(dev, features);
        rtl_unlock_work(tp);
 
        return 0;
@@ -7118,8 +7119,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
        }
 }
 
-static int
-rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
        const unsigned int region = cfg->region;
@@ -7194,7 +7194,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_out_mwi_2;
        }
 
-       tp->cp_cmd = RxChkSum;
+       tp->cp_cmd = 0;
 
        if ((sizeof(dma_addr_t) > 4) &&
            !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
@@ -7235,13 +7235,6 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        pci_set_master(pdev);
 
-       /*
-        * Pretend we are using VLANs; This bypasses a nasty bug where
-        * Interrupts stop flowing on high load on 8110SCd controllers.
-        */
-       if (tp->mac_version == RTL_GIGA_MAC_VER_05)
-               tp->cp_cmd |= RxVlan;
-
        rtl_init_mdio_ops(tp);
        rtl_init_pll_power_ops(tp);
        rtl_init_jumbo_ops(tp);
@@ -7302,8 +7295,14 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
                NETIF_F_HIGHDMA;
 
+       tp->cp_cmd |= RxChkSum | RxVlan;
+
+       /*
+        * Pretend we are using VLANs; This bypasses a nasty bug where
+        * Interrupts stop flowing on high load on 8110SCd controllers.
+        */
        if (tp->mac_version == RTL_GIGA_MAC_VER_05)
-               /* 8110SCd requires hardware Rx VLAN - disallow toggling */
+               /* Disallow toggling */
                dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
 
        if (tp->txd_version == RTL_TD_0)
index 0537381..6859437 100644 (file)
@@ -2933,6 +2933,9 @@ void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
        u32 crc;
        int bit;
 
+       if (!efx_dev_registered(efx))
+               return;
+
        netif_addr_lock_bh(net_dev);
 
        efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
index 23c89ab..f675396 100644 (file)
@@ -350,14 +350,17 @@ static int vnet_walk_rx_one(struct vnet_port *port,
        if (IS_ERR(desc))
                return PTR_ERR(desc);
 
+       if (desc->hdr.state != VIO_DESC_READY)
+               return 1;
+
+       rmb();
+
        viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
               desc->hdr.state, desc->hdr.ack,
               desc->size, desc->ncookies,
               desc->cookies[0].cookie_addr,
               desc->cookies[0].cookie_size);
 
-       if (desc->hdr.state != VIO_DESC_READY)
-               return 1;
        err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
        if (err == -ECONNRESET)
                return err;
index 999fb72..e2a0028 100644 (file)
@@ -699,6 +699,28 @@ static void cpsw_rx_handler(void *token, int len, int status)
        cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
 
        if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
+               bool ndev_status = false;
+               struct cpsw_slave *slave = priv->slaves;
+               int n;
+
+               if (priv->data.dual_emac) {
+                       /* In dual emac mode check for all interfaces */
+                       for (n = priv->data.slaves; n; n--, slave++)
+                               if (netif_running(slave->ndev))
+                                       ndev_status = true;
+               }
+
+               if (ndev_status && (status >= 0)) {
+                       /* The packet received is for the interface which
+                        * is already down and the other interface is up
+                        * and running, intead of freeing which results
+                        * in reducing of the number of rx descriptor in
+                        * DMA engine, requeue skb back to cpdma.
+                        */
+                       new_skb = skb;
+                       goto requeue;
+               }
+
                /* the interface is going down, skbs are purged */
                dev_kfree_skb_any(skb);
                return;
@@ -717,6 +739,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
                new_skb = skb;
        }
 
+requeue:
        ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data,
                        skb_tailroom(new_skb), 0);
        if (WARN_ON(ret < 0))
@@ -2311,10 +2334,19 @@ static int cpsw_suspend(struct device *dev)
        struct net_device       *ndev = platform_get_drvdata(pdev);
        struct cpsw_priv        *priv = netdev_priv(ndev);
 
-       if (netif_running(ndev))
-               cpsw_ndo_stop(ndev);
+       if (priv->data.dual_emac) {
+               int i;
 
-       for_each_slave(priv, soft_reset_slave);
+               for (i = 0; i < priv->data.slaves; i++) {
+                       if (netif_running(priv->slaves[i].ndev))
+                               cpsw_ndo_stop(priv->slaves[i].ndev);
+                       soft_reset_slave(priv->slaves + i);
+               }
+       } else {
+               if (netif_running(ndev))
+                       cpsw_ndo_stop(ndev);
+               for_each_slave(priv, soft_reset_slave);
+       }
 
        pm_runtime_put_sync(&pdev->dev);
 
@@ -2328,14 +2360,24 @@ static int cpsw_resume(struct device *dev)
 {
        struct platform_device  *pdev = to_platform_device(dev);
        struct net_device       *ndev = platform_get_drvdata(pdev);
+       struct cpsw_priv        *priv = netdev_priv(ndev);
 
        pm_runtime_get_sync(&pdev->dev);
 
        /* Select default pin state */
        pinctrl_pm_select_default_state(&pdev->dev);
 
-       if (netif_running(ndev))
-               cpsw_ndo_open(ndev);
+       if (priv->data.dual_emac) {
+               int i;
+
+               for (i = 0; i < priv->data.slaves; i++) {
+                       if (netif_running(priv->slaves[i].ndev))
+                               cpsw_ndo_open(priv->slaves[i].ndev);
+               }
+       } else {
+               if (netif_running(ndev))
+                       cpsw_ndo_open(ndev);
+       }
        return 0;
 }
 
index a969555..726edab 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/netpoll.h>
 
 #define MACVLAN_HASH_SIZE      (1 << BITS_PER_BYTE)
+#define MACVLAN_BC_QUEUE_LEN   1000
 
 struct macvlan_port {
        struct net_device       *dev;
@@ -248,7 +249,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
                goto err;
 
        spin_lock(&port->bc_queue.lock);
-       if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) {
+       if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) {
                __skb_queue_tail(&port->bc_queue, nskb);
                err = 0;
        }
@@ -806,6 +807,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
                                             features,
                                             mask);
        features |= ALWAYS_ON_FEATURES;
+       features &= ~NETIF_F_NETNS_LOCAL;
 
        return features;
 }
index fd0ea7c..011dbda 100644 (file)
@@ -592,8 +592,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ9031,
        .phy_id_mask    = 0x00fffff0,
        .name           = "Micrel KSZ9031 Gigabit PHY",
-       .features       = (PHY_GBIT_FEATURES | SUPPORTED_Pause
-                               | SUPPORTED_Asym_Pause),
+       .features       = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
        .config_init    = ksz9031_config_init,
        .config_aneg    = genphy_config_aneg,
index 87f7104..74760e8 100644 (file)
@@ -2019,7 +2019,7 @@ static int rtl8153_enable(struct r8152 *tp)
        return rtl_enable(tp);
 }
 
-static void rtl8152_disable(struct r8152 *tp)
+static void rtl_disable(struct r8152 *tp)
 {
        u32 ocp_data;
        int i;
@@ -2232,6 +2232,13 @@ static inline void r8152b_enable_aldps(struct r8152 *tp)
                                            LINKENA | DIS_SDSAVE);
 }
 
+static void rtl8152_disable(struct r8152 *tp)
+{
+       r8152b_disable_aldps(tp);
+       rtl_disable(tp);
+       r8152b_enable_aldps(tp);
+}
+
 static void r8152b_hw_phy_cfg(struct r8152 *tp)
 {
        u16 data;
@@ -2242,11 +2249,8 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp)
                r8152_mdio_write(tp, MII_BMCR, data);
        }
 
-       r8152b_disable_aldps(tp);
-
        rtl_clear_bp(tp);
 
-       r8152b_enable_aldps(tp);
        set_bit(PHY_RESET, &tp->flags);
 }
 
@@ -2255,9 +2259,6 @@ static void r8152b_exit_oob(struct r8152 *tp)
        u32 ocp_data;
        int i;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
-               return;
-
        ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
        ocp_data &= ~RCR_ACPT_ALL;
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
@@ -2347,7 +2348,7 @@ static void r8152b_enter_oob(struct r8152 *tp)
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB);
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB);
 
-       rtl8152_disable(tp);
+       rtl_disable(tp);
 
        for (i = 0; i < 1000; i++) {
                ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -2485,9 +2486,6 @@ static void r8153_first_init(struct r8152 *tp)
        u32 ocp_data;
        int i;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
-               return;
-
        rxdy_gated_en(tp, true);
        r8153_teredo_off(tp);
 
@@ -2560,7 +2558,7 @@ static void r8153_enter_oob(struct r8152 *tp)
        ocp_data &= ~NOW_IS_OOB;
        ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
 
-       rtl8152_disable(tp);
+       rtl_disable(tp);
 
        for (i = 0; i < 1000; i++) {
                ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -2624,6 +2622,13 @@ static void r8153_enable_aldps(struct r8152 *tp)
        ocp_reg_write(tp, OCP_POWER_CFG, data);
 }
 
+static void rtl8153_disable(struct r8152 *tp)
+{
+       r8153_disable_aldps(tp);
+       rtl_disable(tp);
+       r8153_enable_aldps(tp);
+}
+
 static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
 {
        u16 bmcr, anar, gbcr;
@@ -2714,6 +2719,16 @@ out:
        return ret;
 }
 
+static void rtl8152_up(struct r8152 *tp)
+{
+       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+               return;
+
+       r8152b_disable_aldps(tp);
+       r8152b_exit_oob(tp);
+       r8152b_enable_aldps(tp);
+}
+
 static void rtl8152_down(struct r8152 *tp)
 {
        if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
@@ -2727,6 +2742,16 @@ static void rtl8152_down(struct r8152 *tp)
        r8152b_enable_aldps(tp);
 }
 
+static void rtl8153_up(struct r8152 *tp)
+{
+       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+               return;
+
+       r8153_disable_aldps(tp);
+       r8153_first_init(tp);
+       r8153_enable_aldps(tp);
+}
+
 static void rtl8153_down(struct r8152 *tp)
 {
        if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
@@ -2946,6 +2971,8 @@ static void r8152b_init(struct r8152 *tp)
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
 
+       r8152b_disable_aldps(tp);
+
        if (tp->version == RTL_VER_01) {
                ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
                ocp_data &= ~LED_MODE_MASK;
@@ -2984,6 +3011,7 @@ static void r8153_init(struct r8152 *tp)
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
 
+       r8153_disable_aldps(tp);
        r8153_u1u2en(tp, false);
 
        for (i = 0; i < 500; i++) {
@@ -3392,7 +3420,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
                        ops->init               = r8152b_init;
                        ops->enable             = rtl8152_enable;
                        ops->disable            = rtl8152_disable;
-                       ops->up                 = r8152b_exit_oob;
+                       ops->up                 = rtl8152_up;
                        ops->down               = rtl8152_down;
                        ops->unload             = rtl8152_unload;
                        ret = 0;
@@ -3400,8 +3428,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
                case PRODUCT_ID_RTL8153:
                        ops->init               = r8153_init;
                        ops->enable             = rtl8153_enable;
-                       ops->disable            = rtl8152_disable;
-                       ops->up                 = r8153_first_init;
+                       ops->disable            = rtl8153_disable;
+                       ops->up                 = rtl8153_up;
                        ops->down               = rtl8153_down;
                        ops->unload             = rtl8153_unload;
                        ret = 0;
@@ -3416,8 +3444,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
                case PRODUCT_ID_SAMSUNG:
                        ops->init               = r8153_init;
                        ops->enable             = rtl8153_enable;
-                       ops->disable            = rtl8152_disable;
-                       ops->up                 = r8153_first_init;
+                       ops->disable            = rtl8153_disable;
+                       ops->up                 = rtl8153_up;
                        ops->down               = rtl8153_down;
                        ops->unload             = rtl8153_unload;
                        ret = 0;
index 733be51..6ad4447 100644 (file)
@@ -57,7 +57,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
                                 struct ath9k_beacon_state *bs)
 {
        struct ath_common *common = ath9k_hw_common(ah);
-       int dtim_intval, sleepduration;
+       int dtim_intval;
        u64 tsf;
 
        /* No need to configure beacon if we are not associated */
@@ -75,7 +75,6 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
         * last beacon we received (which may be none).
         */
        dtim_intval = conf->intval * conf->dtim_period;
-       sleepduration = ah->hw->conf.listen_interval * conf->intval;
 
        /*
         * Pull nexttbtt forward to reflect the current
@@ -113,7 +112,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
         */
 
        bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
-                                                 sleepduration));
+                                                 conf->intval));
        if (bs->bs_sleepduration > bs->bs_dtimperiod)
                bs->bs_sleepduration = bs->bs_dtimperiod;
 
index bb86eb2..f0484b1 100644 (file)
@@ -978,7 +978,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
        struct ath_hw *ah = common->ah;
        struct ath_htc_rx_status *rxstatus;
        struct ath_rx_status rx_stats;
-       bool decrypt_error;
+       bool decrypt_error = false;
 
        if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
                ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
index e6ac8d2..4b148bb 100644 (file)
@@ -513,7 +513,7 @@ irqreturn_t ath_isr(int irq, void *dev)
         * touch anything. Note this can happen early
         * on if the IRQ is shared.
         */
-       if (test_bit(ATH_OP_INVALID, &common->op_flags))
+       if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
                return IRQ_NONE;
 
        /* shared irq, not for us */
index b8e2561..fe3dc12 100644 (file)
@@ -27,10 +27,17 @@ config BRCMFMAC
          one of the bus interface support. If you choose to build a module,
          it'll be called brcmfmac.ko.
 
+config BRCMFMAC_PROTO_BCDC
+       bool
+
+config BRCMFMAC_PROTO_MSGBUF
+       bool
+
 config BRCMFMAC_SDIO
        bool "SDIO bus interface support for FullMAC driver"
        depends on (MMC = y || MMC = BRCMFMAC)
        depends on BRCMFMAC
+       select BRCMFMAC_PROTO_BCDC
        select FW_LOADER
        default y
        ---help---
@@ -42,6 +49,7 @@ config BRCMFMAC_USB
        bool "USB bus interface support for FullMAC driver"
        depends on (USB = y || USB = BRCMFMAC)
        depends on BRCMFMAC
+       select BRCMFMAC_PROTO_BCDC
        select FW_LOADER
        ---help---
          This option enables the USB bus interface support for Broadcom
@@ -52,6 +60,8 @@ config BRCMFMAC_PCIE
        bool "PCIE bus interface support for FullMAC driver"
        depends on BRCMFMAC
        depends on PCI
+       depends on HAS_DMA
+       select BRCMFMAC_PROTO_MSGBUF
        select FW_LOADER
        ---help---
          This option enables the PCIE bus interface support for Broadcom
index c35adf4..90a977f 100644 (file)
@@ -30,16 +30,18 @@ brcmfmac-objs += \
                fwsignal.o \
                p2p.o \
                proto.o \
-               bcdc.o \
-               commonring.o \
-               flowring.o \
-               msgbuf.o \
                dhd_common.o \
                dhd_linux.o \
                firmware.o \
                feature.o \
                btcoex.o \
                vendor.o
+brcmfmac-$(CONFIG_BRCMFMAC_PROTO_BCDC) += \
+               bcdc.o
+brcmfmac-$(CONFIG_BRCMFMAC_PROTO_MSGBUF) += \
+               commonring.o \
+               flowring.o \
+               msgbuf.o
 brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
                dhd_sdio.o \
                bcmsdh.o
index 17e8c03..6003179 100644 (file)
 #ifndef BRCMFMAC_BCDC_H
 #define BRCMFMAC_BCDC_H
 
-
+#ifdef CONFIG_BRCMFMAC_PROTO_BCDC
 int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr);
 void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr);
-
+#else
+static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; }
+static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {}
+#endif
 
 #endif /* BRCMFMAC_BCDC_H */
index 4f1daab..44fc85f 100644 (file)
@@ -185,7 +185,13 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
                  ifevent->action, ifevent->ifidx, ifevent->bssidx,
                  ifevent->flags, ifevent->role);
 
-       if (ifevent->flags & BRCMF_E_IF_FLAG_NOIF) {
+       /* The P2P Device interface event must not be ignored
+        * contrary to what firmware tells us. The only way to
+        * distinguish the P2P Device is by looking at the ifidx
+        * and bssidx received.
+        */
+       if (!(ifevent->ifidx == 0 && ifevent->bssidx == 1) &&
+           (ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) {
                brcmf_dbg(EVENT, "event can be ignored\n");
                return;
        }
@@ -210,12 +216,12 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
                                return;
        }
 
-       if (ifevent->action == BRCMF_E_IF_CHANGE)
+       if (ifp && ifevent->action == BRCMF_E_IF_CHANGE)
                brcmf_fws_reset_interface(ifp);
 
        err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
 
-       if (ifevent->action == BRCMF_E_IF_DEL) {
+       if (ifp && ifevent->action == BRCMF_E_IF_DEL) {
                brcmf_fws_del_interface(ifp);
                brcmf_del_if(drvr, ifevent->bssidx);
        }
index dd20b18..cbf033f 100644 (file)
@@ -172,6 +172,8 @@ enum brcmf_fweh_event_code {
 #define BRCMF_E_IF_ROLE_STA                    0
 #define BRCMF_E_IF_ROLE_AP                     1
 #define BRCMF_E_IF_ROLE_WDS                    2
+#define BRCMF_E_IF_ROLE_P2P_GO                 3
+#define BRCMF_E_IF_ROLE_P2P_CLIENT             4
 
 /**
  * definitions for event packet validation.
index f901ae5..77a51b8 100644 (file)
@@ -15,6 +15,7 @@
 #ifndef BRCMFMAC_MSGBUF_H
 #define BRCMFMAC_MSGBUF_H
 
+#ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF
 
 #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM      20
 #define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM       256
 
 
 int brcmf_proto_msgbuf_rx_trigger(struct device *dev);
+void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid);
 int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr);
 void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr);
-void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid);
-
+#else
+static inline int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
+{
+       return 0;
+}
+static inline void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) {}
+#endif
 
 #endif /* BRCMFMAC_MSGBUF_H */
index 02fe706..16a246b 100644 (file)
@@ -497,8 +497,11 @@ brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable)
 static void
 brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev)
 {
-       struct net_device *ndev = wdev->netdev;
-       struct brcmf_if *ifp = netdev_priv(ndev);
+       struct brcmf_cfg80211_vif *vif;
+       struct brcmf_if *ifp;
+
+       vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+       ifp = vif->ifp;
 
        if ((wdev->iftype == NL80211_IFTYPE_ADHOC) ||
            (wdev->iftype == NL80211_IFTYPE_AP) ||
@@ -4918,7 +4921,7 @@ static void brcmf_count_20mhz_channels(struct brcmf_cfg80211_info *cfg,
        struct brcmu_chan ch;
        int i;
 
-       for (i = 0; i <= total; i++) {
+       for (i = 0; i < total; i++) {
                ch.chspec = (u16)le32_to_cpu(chlist->element[i]);
                cfg->d11inf.decchspec(&ch);
 
@@ -5143,6 +5146,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
 
                ch.band = BRCMU_CHAN_BAND_2G;
                ch.bw = BRCMU_CHAN_BW_40;
+               ch.sb = BRCMU_CHAN_SB_NONE;
                ch.chnum = 0;
                cfg->d11inf.encchspec(&ch);
 
@@ -5176,6 +5180,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
 
                        brcmf_update_bw40_channel_flag(&band->channels[j], &ch);
                }
+               kfree(pbuf);
        }
        return err;
 }
index 760c45c..1513dbc 100644 (file)
@@ -40,7 +40,7 @@
 #include "commands.h"
 #include "power.h"
 
-static bool force_cam;
+static bool force_cam = true;
 module_param(force_cam, bool, 0644);
 MODULE_PARM_DESC(force_cam, "force continuously aware mode (no power saving at all)");
 
index d67a37a..d53adc2 100644 (file)
@@ -83,6 +83,8 @@
 #define IWL7260_TX_POWER_VERSION       0xffff /* meaningless */
 #define IWL3160_NVM_VERSION            0x709
 #define IWL3160_TX_POWER_VERSION       0xffff /* meaningless */
+#define IWL3165_NVM_VERSION            0x709
+#define IWL3165_TX_POWER_VERSION       0xffff /* meaningless */
 #define IWL7265_NVM_VERSION            0x0a1d
 #define IWL7265_TX_POWER_VERSION       0xffff /* meaningless */
 
@@ -92,6 +94,9 @@
 #define IWL3160_FW_PRE "iwlwifi-3160-"
 #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
 
+#define IWL3165_FW_PRE "iwlwifi-3165-"
+#define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode"
+
 #define IWL7265_FW_PRE "iwlwifi-7265-"
 #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
 
@@ -213,6 +218,16 @@ static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
        {0},
 };
 
+const struct iwl_cfg iwl3165_2ac_cfg = {
+       .name = "Intel(R) Dual Band Wireless AC 3165",
+       .fw_name_pre = IWL3165_FW_PRE,
+       IWL_DEVICE_7000,
+       .ht_params = &iwl7000_ht_params,
+       .nvm_ver = IWL3165_NVM_VERSION,
+       .nvm_calib_ver = IWL3165_TX_POWER_VERSION,
+       .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+};
+
 const struct iwl_cfg iwl7265_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 7265",
        .fw_name_pre = IWL7265_FW_PRE,
@@ -245,4 +260,5 @@ const struct iwl_cfg iwl7265_n_cfg = {
 
 MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
+MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
index 8da596d..3d7cc37 100644 (file)
@@ -120,6 +120,8 @@ enum iwl_led_mode {
 #define IWL_LONG_WD_TIMEOUT    10000
 #define IWL_MAX_WD_TIMEOUT     120000
 
+#define IWL_DEFAULT_MAX_TX_POWER 22
+
 /* Antenna presence definitions */
 #define        ANT_NONE        0x0
 #define        ANT_A           BIT(0)
@@ -335,6 +337,7 @@ extern const struct iwl_cfg iwl7260_n_cfg;
 extern const struct iwl_cfg iwl3160_2ac_cfg;
 extern const struct iwl_cfg iwl3160_2n_cfg;
 extern const struct iwl_cfg iwl3160_n_cfg;
+extern const struct iwl_cfg iwl3165_2ac_cfg;
 extern const struct iwl_cfg iwl7265_2ac_cfg;
 extern const struct iwl_cfg iwl7265_2n_cfg;
 extern const struct iwl_cfg iwl7265_n_cfg;
index 018af29..354255f 100644 (file)
@@ -146,8 +146,6 @@ static const u8 iwl_nvm_channels_family_8000[] = {
 #define LAST_2GHZ_HT_PLUS              9
 #define LAST_5GHZ_HT                   161
 
-#define DEFAULT_MAX_TX_POWER 16
-
 /* rate data (static) */
 static struct ieee80211_rate iwl_cfg80211_rates[] = {
        { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
@@ -295,7 +293,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                 * Default value - highest tx power value.  max_power
                 * is not used in mvm, and is used for backwards compatibility
                 */
-               channel->max_power = DEFAULT_MAX_TX_POWER;
+               channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
                is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
                IWL_DEBUG_EEPROM(dev,
                                 "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
index 2291bbc..ce71625 100644 (file)
@@ -585,8 +585,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
        lockdep_assert_held(&mvm->mutex);
 
        if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
-               u32 mode;
-
                switch (mvm->bt_force_ant_mode) {
                case BT_FORCE_ANT_BT:
                        mode = BT_COEX_BT;
@@ -756,7 +754,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
        struct iwl_bt_iterator_data *data = _data;
        struct iwl_mvm *mvm = data->mvm;
        struct ieee80211_chanctx_conf *chanctx_conf;
-       enum ieee80211_smps_mode smps_mode;
+       /* default smps_mode is AUTOMATIC - only used for client modes */
+       enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
        u32 bt_activity_grading;
        int ave_rssi;
 
@@ -764,8 +763,6 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
 
        switch (vif->type) {
        case NL80211_IFTYPE_STATION:
-               /* default smps_mode for BSS / P2P client is AUTOMATIC */
-               smps_mode = IEEE80211_SMPS_AUTOMATIC;
                break;
        case NL80211_IFTYPE_AP:
                if (!mvmvif->ap_ibss_active)
@@ -797,7 +794,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
        else if (bt_activity_grading >= BT_LOW_TRAFFIC)
                smps_mode = IEEE80211_SMPS_DYNAMIC;
 
-       /* relax SMPS contraints for next association */
+       /* relax SMPS constraints for next association */
        if (!vif->bss_conf.assoc)
                smps_mode = IEEE80211_SMPS_AUTOMATIC;
 
index 2e90ff7..87e517b 100644 (file)
@@ -74,8 +74,7 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
 
        switch (param) {
        case MVM_DEBUGFS_PM_KEEP_ALIVE: {
-               struct ieee80211_hw *hw = mvm->hw;
-               int dtimper = hw->conf.ps_dtim_period ?: 1;
+               int dtimper = vif->bss_conf.dtim_period ?: 1;
                int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
 
                IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
index 95f5b32..9a922f3 100644 (file)
@@ -1563,14 +1563,14 @@ enum iwl_sf_scenario {
 
 /**
  * Smart Fifo configuration command.
- * @state: smart fifo state, types listed in iwl_sf_sate.
+ * @state: smart fifo state, types listed in enum %iwl_sf_sate.
  * @watermark: Minimum allowed availabe free space in RXF for transient state.
  * @long_delay_timeouts: aging and idle timer values for each scenario
  * in long delay state.
  * @full_on_timeouts: timer values for each scenario in full on state.
  */
 struct iwl_sf_cfg_cmd {
-       enum iwl_sf_state state;
+       __le32 state;
        __le32 watermark[SF_TRANSIENT_STATES_NUMBER];
        __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
        __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
index 0e523e2..8242e68 100644 (file)
@@ -721,11 +721,6 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
            !force_assoc_off) {
                u32 dtim_offs;
 
-               /* Allow beacons to pass through as long as we are not
-                * associated, or we do not have dtim period information.
-                */
-               cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
-
                /*
                 * The DTIM count counts down, so when it is N that means N
                 * more beacon intervals happen until the DTIM TBTT. Therefore
@@ -759,6 +754,11 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
                ctxt_sta->is_assoc = cpu_to_le32(1);
        } else {
                ctxt_sta->is_assoc = cpu_to_le32(0);
+
+               /* Allow beacons to pass through as long as we are not
+                * associated, or we do not have dtim period information.
+                */
+               cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
        }
 
        ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int);
index 7c87965..cdc272d 100644 (file)
@@ -396,12 +396,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        else
                hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
-       /* TODO: enable that only for firmwares that don't crash */
-       /* hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; */
-       hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
-       hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
-       /* we create the 802.11 header and zero length SSID IE. */
-       hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
+       if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10) {
+               hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+               hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+               hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+               /* we create the 802.11 header and zero length SSID IE. */
+               hw->wiphy->max_sched_scan_ie_len =
+                       SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
+       }
 
        hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
                               NL80211_FEATURE_LOW_PRIORITY_SCAN |
@@ -1524,11 +1526,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                 */
                iwl_mvm_remove_time_event(mvm, mvmvif,
                                          &mvmvif->time_event_data);
-       } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
-                             BSS_CHANGED_QOS)) {
-               ret = iwl_mvm_power_update_mac(mvm);
-               if (ret)
-                       IWL_ERR(mvm, "failed to update power mode\n");
        }
 
        if (changes & BSS_CHANGED_BEACON_INFO) {
@@ -1536,6 +1533,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
        }
 
+       if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) {
+               ret = iwl_mvm_power_update_mac(mvm);
+               if (ret)
+                       IWL_ERR(mvm, "failed to update power mode\n");
+       }
+
        if (changes & BSS_CHANGED_TXPOWER) {
                IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
                                bss_conf->txpower);
index 2b2d108..d9769a2 100644 (file)
@@ -281,7 +281,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
                                    struct ieee80211_vif *vif,
                                    struct iwl_mac_power_cmd *cmd)
 {
-       struct ieee80211_hw *hw = mvm->hw;
        struct ieee80211_chanctx_conf *chanctx_conf;
        struct ieee80211_channel *chan;
        int dtimper, dtimper_msec;
@@ -292,7 +291,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
 
        cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
                                                            mvmvif->color));
-       dtimper = hw->conf.ps_dtim_period ?: 1;
+       dtimper = vif->bss_conf.dtim_period;
 
        /*
         * Regardless of power management state the driver must set
@@ -885,7 +884,7 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
        iwl_mvm_power_build_cmd(mvm, vif, &cmd);
        if (enable) {
                /* configure skip over dtim up to 300 msec */
-               int dtimper = mvm->hw->conf.ps_dtim_period ?: 1;
+               int dtimper = vif->bss_conf.dtim_period ?: 1;
                int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
 
                if (WARN_ON(!dtimper_msec))
index 4b98987..bf5cd8c 100644 (file)
@@ -149,13 +149,13 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
            le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]);
        energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >>
                                                IWL_RX_INFO_ENERGY_ANT_A_POS;
-       energy_a = energy_a ? -energy_a : -256;
+       energy_a = energy_a ? -energy_a : S8_MIN;
        energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >>
                                                IWL_RX_INFO_ENERGY_ANT_B_POS;
-       energy_b = energy_b ? -energy_b : -256;
+       energy_b = energy_b ? -energy_b : S8_MIN;
        energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >>
                                                IWL_RX_INFO_ENERGY_ANT_C_POS;
-       energy_c = energy_c ? -energy_c : -256;
+       energy_c = energy_c ? -energy_c : S8_MIN;
        max_energy = max(energy_a, energy_b);
        max_energy = max(max_energy, energy_c);
 
index 7edfd15..e843b67 100644 (file)
@@ -172,7 +172,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
                             enum iwl_sf_state new_state)
 {
        struct iwl_sf_cfg_cmd sf_cmd = {
-               .state = new_state,
+               .state = cpu_to_le32(new_state),
        };
        struct ieee80211_sta *sta;
        int ret = 0;
index dbc8707..9ee410b 100644 (file)
@@ -168,10 +168,14 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
 
        /*
         * for data packets, rate info comes from the table inside the fw. This
-        * table is controlled by LINK_QUALITY commands
+        * table is controlled by LINK_QUALITY commands. Exclude ctrl port
+        * frames like EAPOLs which should be treated as mgmt frames. This
+        * avoids them being sent initially in high rates which increases the
+        * chances for completion of the 4-Way handshake.
         */
 
-       if (ieee80211_is_data(fc) && sta) {
+       if (ieee80211_is_data(fc) && sta &&
+           !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) {
                tx_cmd->initial_rate_index = 0;
                tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
                return;
index f0e722c..073a68b 100644 (file)
@@ -352,11 +352,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
        {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)},
 
+/* 3165 Series */
+       {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)},
+
 /* 7265 Series */
        {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
@@ -378,6 +384,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
index f868333..963a4a5 100644 (file)
@@ -501,9 +501,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate,
                targets->sens_res =
                         be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A_ATQA]);
                targets->sel_res = skb->data[MICROREAD_EMCF_A_SAK];
-               memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID],
-                      skb->data[MICROREAD_EMCF_A_LEN]);
                targets->nfcid1_len = skb->data[MICROREAD_EMCF_A_LEN];
+               if (targets->nfcid1_len > sizeof(targets->nfcid1)) {
+                       r = -EINVAL;
+                       goto exit_free;
+               }
+               memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID],
+                      targets->nfcid1_len);
                break;
        case MICROREAD_GATE_ID_MREAD_ISO_A_3:
                targets->supported_protocols =
@@ -511,9 +515,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate,
                targets->sens_res =
                         be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A3_ATQA]);
                targets->sel_res = skb->data[MICROREAD_EMCF_A3_SAK];
-               memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID],
-                      skb->data[MICROREAD_EMCF_A3_LEN]);
                targets->nfcid1_len = skb->data[MICROREAD_EMCF_A3_LEN];
+               if (targets->nfcid1_len > sizeof(targets->nfcid1)) {
+                       r = -EINVAL;
+                       goto exit_free;
+               }
+               memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID],
+                      targets->nfcid1_len);
                break;
        case MICROREAD_GATE_ID_MREAD_ISO_B:
                targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK;
index db7a38a..7d688f9 100644 (file)
@@ -2,7 +2,8 @@
 # Makefile for ST21NFCA HCI based NFC driver
 #
 
-st21nfca_i2c-objs  = i2c.o
+st21nfca_hci-objs = st21nfca.o st21nfca_dep.o
+obj-$(CONFIG_NFC_ST21NFCA)     += st21nfca_hci.o
 
-obj-$(CONFIG_NFC_ST21NFCA)     += st21nfca.o st21nfca_dep.o
+st21nfca_i2c-objs  = i2c.o
 obj-$(CONFIG_NFC_ST21NFCA_I2C) += st21nfca_i2c.o
index 13d9f03..f4d835d 100644 (file)
@@ -2,7 +2,8 @@
 # Makefile for ST21NFCB NCI based NFC driver
 #
 
-st21nfcb_i2c-objs  = i2c.o
+st21nfcb_nci-objs = ndlc.o st21nfcb.o
+obj-$(CONFIG_NFC_ST21NFCB)     += st21nfcb_nci.o
 
-obj-$(CONFIG_NFC_ST21NFCB)     += st21nfcb.o ndlc.o
+st21nfcb_i2c-objs = i2c.o
 obj-$(CONFIG_NFC_ST21NFCB_I2C) += st21nfcb_i2c.o
index d8574ad..293ed4b 100644 (file)
@@ -138,6 +138,9 @@ int __of_add_property_sysfs(struct device_node *np, struct property *pp)
        /* Important: Don't leak passwords */
        bool secure = strncmp(pp->name, "security-", 9) == 0;
 
+       if (!IS_ENABLED(CONFIG_SYSFS))
+               return 0;
+
        if (!of_kset || !of_node_is_attached(np))
                return 0;
 
@@ -158,6 +161,9 @@ int __of_attach_node_sysfs(struct device_node *np)
        struct property *pp;
        int rc;
 
+       if (!IS_ENABLED(CONFIG_SYSFS))
+               return 0;
+
        if (!of_kset)
                return 0;
 
@@ -1713,6 +1719,9 @@ int __of_remove_property(struct device_node *np, struct property *prop)
 
 void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
 {
+       if (!IS_ENABLED(CONFIG_SYSFS))
+               return;
+
        /* at early boot, bail here and defer setup to of_init() */
        if (of_kset && of_node_is_attached(np))
                sysfs_remove_bin_file(&np->kobj, &prop->attr);
@@ -1777,6 +1786,9 @@ int __of_update_property(struct device_node *np, struct property *newprop,
 void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
                struct property *oldprop)
 {
+       if (!IS_ENABLED(CONFIG_SYSFS))
+               return;
+
        /* At early boot, bail out and defer setup to of_init() */
        if (!of_kset)
                return;
@@ -1847,6 +1859,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
 {
        struct property *pp;
 
+       of_aliases = of_find_node_by_path("/aliases");
        of_chosen = of_find_node_by_path("/chosen");
        if (of_chosen == NULL)
                of_chosen = of_find_node_by_path("/chosen@0");
@@ -1862,7 +1875,6 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
                        of_stdout = of_find_node_by_path(name);
        }
 
-       of_aliases = of_find_node_by_path("/aliases");
        if (!of_aliases)
                return;
 
@@ -1986,7 +1998,7 @@ bool of_console_check(struct device_node *dn, char *name, int index)
 {
        if (!dn || dn != of_stdout || console_set_on_cmdline)
                return false;
-       return add_preferred_console(name, index, NULL);
+       return !add_preferred_console(name, index, NULL);
 }
 EXPORT_SYMBOL_GPL(of_console_check);
 
index 54fecc4..f297891 100644 (file)
@@ -45,6 +45,9 @@ void __of_detach_node_sysfs(struct device_node *np)
 {
        struct property *pp;
 
+       if (!IS_ENABLED(CONFIG_SYSFS))
+               return;
+
        BUG_ON(!of_node_is_initialized(np));
        if (!of_kset)
                return;
index 79cb831..d1ffca8 100644 (file)
@@ -928,7 +928,11 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
 void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
 {
        const u64 phys_offset = __pa(PAGE_OFFSET);
-       base &= PAGE_MASK;
+
+       if (!PAGE_ALIGNED(base)) {
+               size -= PAGE_SIZE - (base & ~PAGE_MASK);
+               base = PAGE_ALIGN(base);
+       }
        size &= PAGE_MASK;
 
        if (base > MAX_PHYS_ADDR) {
@@ -937,10 +941,10 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
                return;
        }
 
-       if (base + size > MAX_PHYS_ADDR) {
-               pr_warning("Ignoring memory range 0x%lx - 0x%llx\n",
-                               ULONG_MAX, base + size);
-               size = MAX_PHYS_ADDR - base;
+       if (base + size - 1 > MAX_PHYS_ADDR) {
+               pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
+                               ((u64)MAX_PHYS_ADDR) + 1, base + size);
+               size = MAX_PHYS_ADDR - base + 1;
        }
 
        if (base + size < phys_offset) {
index 0f54ab6..3651c38 100644 (file)
@@ -278,7 +278,7 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun
 {
        struct hardware_path hwpath;
        unsigned short i;
-       char in[count+1], *temp;
+       char in[64], *temp;
        struct device *dev;
        int ret;
 
@@ -286,8 +286,9 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun
                return -EINVAL;
 
        /* We'll use a local copy of buf */
-       memset(in, 0, count+1);
+       count = min_t(size_t, count, sizeof(in)-1);
        strncpy(in, buf, count);
+       in[count] = '\0';
        
        /* Let's clean up the target. 0xff is a blank pattern */
        memset(&hwpath, 0xff, sizeof(hwpath));
@@ -393,14 +394,15 @@ pdcspath_layer_write(struct pdcspath_entry *entry, const char *buf, size_t count
 {
        unsigned int layers[6]; /* device-specific info (ctlr#, unit#, ...) */
        unsigned short i;
-       char in[count+1], *temp;
+       char in[64], *temp;
 
        if (!entry || !buf || !count)
                return -EINVAL;
 
        /* We'll use a local copy of buf */
-       memset(in, 0, count+1);
+       count = min_t(size_t, count, sizeof(in)-1);
        strncpy(in, buf, count);
+       in[count] = '\0';
        
        /* Let's clean up the target. 0 is a blank pattern */
        memset(&layers, 0, sizeof(layers));
@@ -755,7 +757,7 @@ static ssize_t pdcs_auto_write(struct kobject *kobj,
 {
        struct pdcspath_entry *pathentry;
        unsigned char flags;
-       char in[count+1], *temp;
+       char in[8], *temp;
        char c;
 
        if (!capable(CAP_SYS_ADMIN))
@@ -765,8 +767,9 @@ static ssize_t pdcs_auto_write(struct kobject *kobj,
                return -EINVAL;
 
        /* We'll use a local copy of buf */
-       memset(in, 0, count+1);
+       count = min_t(size_t, count, sizeof(in)-1);
        strncpy(in, buf, count);
+       in[count] = '\0';
 
        /* Current flags are stored in primary boot path entry */
        pathentry = &pdcspath_entry_primary;
index 5e01ae3..2a412fa 100644 (file)
@@ -160,7 +160,7 @@ static void pcie_wait_cmd(struct controller *ctrl)
            ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE)
                rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
        else
-               rc = pcie_poll_cmd(ctrl, timeout);
+               rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
 
        /*
         * Controllers with errata like Intel CF118 don't generate
index e3cf8a2..4170113 100644 (file)
@@ -775,7 +775,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
        /* Check if setup is sensible at all */
        if (!pass &&
            (primary != bus->number || secondary <= bus->number ||
-            secondary > subordinate || subordinate > bus->busn_res.end)) {
+            secondary > subordinate)) {
                dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
                         secondary, subordinate);
                broken = 1;
@@ -838,23 +838,18 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
                        goto out;
                }
 
-               if (max >= bus->busn_res.end) {
-                       dev_warn(&dev->dev, "can't allocate child bus %02x from %pR\n",
-                                max, &bus->busn_res);
-                       goto out;
-               }
-
                /* Clear errors */
                pci_write_config_word(dev, PCI_STATUS, 0xffff);
 
-               /* The bus will already exist if we are rescanning */
+               /* Prevent assigning a bus number that already exists.
+                * This can happen when a bridge is hot-plugged, so in
+                * this case we only re-scan this bus. */
                child = pci_find_bus(pci_domain_nr(bus), max+1);
                if (!child) {
                        child = pci_add_new_bus(bus, dev, max+1);
                        if (!child)
                                goto out;
-                       pci_bus_insert_busn_res(child, max+1,
-                                               bus->busn_res.end);
+                       pci_bus_insert_busn_res(child, max+1, 0xff);
                }
                max++;
                buses = (buses & 0xff000000)
@@ -913,11 +908,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
                /*
                 * Set the subordinate bus number to its real value.
                 */
-               if (max > bus->busn_res.end) {
-                       dev_warn(&dev->dev, "max busn %02x is outside %pR\n",
-                                max, &bus->busn_res);
-                       max = bus->busn_res.end;
-               }
                pci_bus_update_busn_res_end(child, max);
                pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
        }
index 8225b89..c384fec 100644 (file)
@@ -232,6 +232,7 @@ static struct platform_driver efi_rtc_driver = {
 
 module_platform_driver_probe(efi_rtc_driver, efi_rtc_probe);
 
+MODULE_ALIAS("platform:rtc-efi");
 MODULE_AUTHOR("dann frazier <dannf@hp.com>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("EFI RTC driver");
index 18a3358..bd85fb4 100644 (file)
@@ -43,7 +43,7 @@ config SCSI_DMA
 config SCSI_NETLINK
        bool
        default n
-       select NET
+       depends on NET
 
 config SCSI_PROC_FS
        bool "legacy /proc/scsi/ support"
@@ -257,7 +257,7 @@ config SCSI_SPI_ATTRS
 
 config SCSI_FC_ATTRS
        tristate "FiberChannel Transport Attributes"
-       depends on SCSI
+       depends on SCSI && NET
        select SCSI_NETLINK
        help
          If you wish to export transport-specific information about
@@ -585,28 +585,28 @@ config HYPERV_STORAGE
 
 config LIBFC
        tristate "LibFC module"
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        select CRC32
        ---help---
          Fibre Channel library module
 
 config LIBFCOE
        tristate "LibFCoE module"
-       select LIBFC
+       depends on LIBFC
        ---help---
          Library for Fibre Channel over Ethernet module
 
 config FCOE
        tristate "FCoE module"
        depends on PCI
-       select LIBFCOE
+       depends on LIBFCOE
        ---help---
          Fibre Channel over Ethernet module
 
 config FCOE_FNIC
        tristate "Cisco FNIC Driver"
        depends on PCI && X86
-       select LIBFCOE
+       depends on LIBFCOE
        help
          This is support for the Cisco PCI-Express FCoE HBA.
 
@@ -816,7 +816,7 @@ config SCSI_IBMVSCSI
 config SCSI_IBMVFC
        tristate "IBM Virtual FC support"
        depends on PPC_PSERIES && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        help
          This is the IBM POWER Virtual FC Client
 
@@ -1266,7 +1266,7 @@ source "drivers/scsi/qla4xxx/Kconfig"
 config SCSI_LPFC
        tristate "Emulex LightPulse Fibre Channel Support"
        depends on PCI && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        select CRC_T10DIF
        help
           This lpfc driver supports the Emulex LightPulse
@@ -1676,7 +1676,7 @@ config SCSI_SUNESP
 config ZFCP
        tristate "FCP host bus adapter driver for IBM eServer zSeries"
        depends on S390 && QDIO && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        help
           If you want to access SCSI devices attached to your IBM eServer
           zSeries by means of Fibre Channel interfaces say Y.
@@ -1704,7 +1704,7 @@ config SCSI_PM8001
 config SCSI_BFA_FC
        tristate "Brocade BFA Fibre Channel Support"
        depends on PCI && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        help
          This bfa driver supports all Brocade PCIe FC/FCOE host adapters.
 
index f245d54..0978828 100644 (file)
@@ -1,11 +1,12 @@
 config SCSI_BNX2X_FCOE
        tristate "QLogic NetXtreme II FCoE support"
        depends on PCI
+       depends on (IPV6 || IPV6=n)
+       depends on LIBFC
+       depends on LIBFCOE
        select NETDEVICES
        select ETHERNET
        select NET_VENDOR_BROADCOM
-       select LIBFC
-       select LIBFCOE
        select CNIC
        ---help---
        This driver supports FCoE offload for the QLogic NetXtreme II
index 44ce54e..ba30ff8 100644 (file)
@@ -2,6 +2,7 @@ config SCSI_BNX2_ISCSI
        tristate "QLogic NetXtreme II iSCSI support"
        depends on NET
        depends on PCI
+       depends on (IPV6 || IPV6=n)
        select SCSI_ISCSI_ATTRS
        select NETDEVICES
        select ETHERNET
index 4d03b03..7c7e508 100644 (file)
@@ -1,7 +1,7 @@
 config SCSI_CHELSIO_FCOE
        tristate "Chelsio Communications FCoE support"
        depends on PCI && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        select FW_LOADER
        help
          This driver supports FCoE Offload functionality over
index 23d6072..113e6c9 100644 (file)
@@ -1,7 +1,7 @@
 config SCSI_QLA_FC
        tristate "QLogic QLA2XXX Fibre Channel Support"
        depends on PCI && SCSI
-       select SCSI_FC_ATTRS
+       depends on SCSI_FC_ATTRS
        select FW_LOADER
        ---help---
        This qla2xxx driver supports all QLogic Fibre Channel
@@ -31,7 +31,7 @@ config SCSI_QLA_FC
 config TCM_QLA2XXX
        tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
        depends on SCSI_QLA_FC && TARGET_CORE
-       select LIBFC
+       depends on LIBFC
        select BTREE
        default n
        ---help---
index 447458e..7e1f120 100644 (file)
 #define GSBI_CTRL_REG          0x0000
 #define GSBI_PROTOCOL_SHIFT    4
 
+struct gsbi_info {
+       struct clk *hclk;
+       u32 mode;
+       u32 crci;
+};
+
 static int gsbi_probe(struct platform_device *pdev)
 {
        struct device_node *node = pdev->dev.of_node;
        struct resource *res;
        void __iomem *base;
-       struct clk *hclk;
-       u32 mode, crci = 0;
+       struct gsbi_info *gsbi;
+
+       gsbi = devm_kzalloc(&pdev->dev, sizeof(*gsbi), GFP_KERNEL);
+
+       if (!gsbi)
+               return -ENOMEM;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(base))
                return PTR_ERR(base);
 
-       if (of_property_read_u32(node, "qcom,mode", &mode)) {
+       if (of_property_read_u32(node, "qcom,mode", &gsbi->mode)) {
                dev_err(&pdev->dev, "missing mode configuration\n");
                return -EINVAL;
        }
 
        /* not required, so default to 0 if not present */
-       of_property_read_u32(node, "qcom,crci", &crci);
+       of_property_read_u32(node, "qcom,crci", &gsbi->crci);
 
-       dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n", mode, crci);
+       dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n",
+                gsbi->mode, gsbi->crci);
+       gsbi->hclk = devm_clk_get(&pdev->dev, "iface");
+       if (IS_ERR(gsbi->hclk))
+               return PTR_ERR(gsbi->hclk);
 
-       hclk = devm_clk_get(&pdev->dev, "iface");
-       if (IS_ERR(hclk))
-               return PTR_ERR(hclk);
+       clk_prepare_enable(gsbi->hclk);
 
-       clk_prepare_enable(hclk);
-
-       writel_relaxed((mode << GSBI_PROTOCOL_SHIFT) | crci,
+       writel_relaxed((gsbi->mode << GSBI_PROTOCOL_SHIFT) | gsbi->crci,
                                base + GSBI_CTRL_REG);
 
        /* make sure the gsbi control write is not reordered */
        wmb();
 
-       clk_disable_unprepare(hclk);
+       platform_set_drvdata(pdev, gsbi);
+
+       return of_platform_populate(node, NULL, NULL, &pdev->dev);
+}
+
+static int gsbi_remove(struct platform_device *pdev)
+{
+       struct gsbi_info *gsbi = platform_get_drvdata(pdev);
+
+       clk_disable_unprepare(gsbi->hclk);
 
-       return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+       return 0;
 }
 
 static const struct of_device_id gsbi_dt_match[] = {
@@ -76,6 +95,7 @@ static struct platform_driver gsbi_driver = {
                .of_match_table = gsbi_dt_match,
        },
        .probe = gsbi_probe,
+       .remove = gsbi_remove,
 };
 
 module_platform_driver(gsbi_driver);
index 8f05111..3588a80 100644 (file)
@@ -1022,7 +1022,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
                bh = page_buffers(page);
                if (bh->b_size == size) {
                        end_block = init_page_buffers(page, bdev,
-                                               index << sizebits, size);
+                                               (sector_t)index << sizebits,
+                                               size);
                        goto done;
                }
                if (!try_to_free_buffers(page))
@@ -1043,7 +1044,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
         */
        spin_lock(&inode->i_mapping->private_lock);
        link_dev_buffers(page, bh);
-       end_block = init_page_buffers(page, bdev, index << sizebits, size);
+       end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
+                       size);
        spin_unlock(&inode->i_mapping->private_lock);
 done:
        ret = (block < end_block) ? 1 : -ENXIO;
index d749731..fbb08e9 100644 (file)
@@ -50,18 +50,18 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
               cache->brun_percent  < 100);
 
        if (*args) {
-               pr_err("'bind' command doesn't take an argument");
+               pr_err("'bind' command doesn't take an argument\n");
                return -EINVAL;
        }
 
        if (!cache->rootdirname) {
-               pr_err("No cache directory specified");
+               pr_err("No cache directory specified\n");
                return -EINVAL;
        }
 
        /* don't permit already bound caches to be re-bound */
        if (test_bit(CACHEFILES_READY, &cache->flags)) {
-               pr_err("Cache already bound");
+               pr_err("Cache already bound\n");
                return -EBUSY;
        }
 
@@ -248,7 +248,7 @@ error_open_root:
        kmem_cache_free(cachefiles_object_jar, fsdef);
 error_root_object:
        cachefiles_end_secure(cache, saved_cred);
-       pr_err("Failed to register: %d", ret);
+       pr_err("Failed to register: %d\n", ret);
        return ret;
 }
 
index b078d30..ce1b115 100644 (file)
@@ -315,7 +315,7 @@ static unsigned int cachefiles_daemon_poll(struct file *file,
 static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
                                         char *args)
 {
-       pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%");
+       pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%\n");
 
        return -EINVAL;
 }
@@ -475,12 +475,12 @@ static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
        _enter(",%s", args);
 
        if (!*args) {
-               pr_err("Empty directory specified");
+               pr_err("Empty directory specified\n");
                return -EINVAL;
        }
 
        if (cache->rootdirname) {
-               pr_err("Second cache directory specified");
+               pr_err("Second cache directory specified\n");
                return -EEXIST;
        }
 
@@ -503,12 +503,12 @@ static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
        _enter(",%s", args);
 
        if (!*args) {
-               pr_err("Empty security context specified");
+               pr_err("Empty security context specified\n");
                return -EINVAL;
        }
 
        if (cache->secctx) {
-               pr_err("Second security context specified");
+               pr_err("Second security context specified\n");
                return -EINVAL;
        }
 
@@ -531,7 +531,7 @@ static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args)
        _enter(",%s", args);
 
        if (!*args) {
-               pr_err("Empty tag specified");
+               pr_err("Empty tag specified\n");
                return -EINVAL;
        }
 
@@ -562,12 +562,12 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
                goto inval;
 
        if (!test_bit(CACHEFILES_READY, &cache->flags)) {
-               pr_err("cull applied to unready cache");
+               pr_err("cull applied to unready cache\n");
                return -EIO;
        }
 
        if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
-               pr_err("cull applied to dead cache");
+               pr_err("cull applied to dead cache\n");
                return -EIO;
        }
 
@@ -587,11 +587,11 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
 
 notdir:
        path_put(&path);
-       pr_err("cull command requires dirfd to be a directory");
+       pr_err("cull command requires dirfd to be a directory\n");
        return -ENOTDIR;
 
 inval:
-       pr_err("cull command requires dirfd and filename");
+       pr_err("cull command requires dirfd and filename\n");
        return -EINVAL;
 }
 
@@ -614,7 +614,7 @@ static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args)
        return 0;
 
 inval:
-       pr_err("debug command requires mask");
+       pr_err("debug command requires mask\n");
        return -EINVAL;
 }
 
@@ -634,12 +634,12 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
                goto inval;
 
        if (!test_bit(CACHEFILES_READY, &cache->flags)) {
-               pr_err("inuse applied to unready cache");
+               pr_err("inuse applied to unready cache\n");
                return -EIO;
        }
 
        if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
-               pr_err("inuse applied to dead cache");
+               pr_err("inuse applied to dead cache\n");
                return -EIO;
        }
 
@@ -659,11 +659,11 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
 
 notdir:
        path_put(&path);
-       pr_err("inuse command requires dirfd to be a directory");
+       pr_err("inuse command requires dirfd to be a directory\n");
        return -ENOTDIR;
 
 inval:
-       pr_err("inuse command requires dirfd and filename");
+       pr_err("inuse command requires dirfd and filename\n");
        return -EINVAL;
 }
 
index 3d50998..8c52472 100644 (file)
@@ -255,7 +255,7 @@ extern int cachefiles_remove_object_xattr(struct cachefiles_cache *cache,
 
 #define cachefiles_io_error(___cache, FMT, ...)                \
 do {                                                   \
-       pr_err("I/O Error: " FMT, ##__VA_ARGS__);       \
+       pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__);   \
        fscache_io_error(&(___cache)->cache);           \
        set_bit(CACHEFILES_DEAD, &(___cache)->flags);   \
 } while (0)
index 180edfb..711f13d 100644 (file)
@@ -84,7 +84,7 @@ error_proc:
 error_object_jar:
        misc_deregister(&cachefiles_dev);
 error_dev:
-       pr_err("failed to register: %d", ret);
+       pr_err("failed to register: %d\n", ret);
        return ret;
 }
 
index 5bf2b41..dad7d95 100644 (file)
@@ -543,7 +543,7 @@ lookup_again:
                               next, next->d_inode, next->d_inode->i_ino);
 
                } else if (!S_ISDIR(next->d_inode->i_mode)) {
-                       pr_err("inode %lu is not a directory",
+                       pr_err("inode %lu is not a directory\n",
                               next->d_inode->i_ino);
                        ret = -ENOBUFS;
                        goto error;
@@ -574,7 +574,7 @@ lookup_again:
                } else if (!S_ISDIR(next->d_inode->i_mode) &&
                           !S_ISREG(next->d_inode->i_mode)
                           ) {
-                       pr_err("inode %lu is not a file or directory",
+                       pr_err("inode %lu is not a file or directory\n",
                               next->d_inode->i_ino);
                        ret = -ENOBUFS;
                        goto error;
@@ -768,7 +768,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
        ASSERT(subdir->d_inode);
 
        if (!S_ISDIR(subdir->d_inode->i_mode)) {
-               pr_err("%s is not a directory", dirname);
+               pr_err("%s is not a directory\n", dirname);
                ret = -EIO;
                goto check_error;
        }
@@ -779,7 +779,8 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
            !subdir->d_inode->i_op->lookup ||
            !subdir->d_inode->i_op->mkdir ||
            !subdir->d_inode->i_op->create ||
-           !subdir->d_inode->i_op->rename ||
+           (!subdir->d_inode->i_op->rename &&
+            !subdir->d_inode->i_op->rename2) ||
            !subdir->d_inode->i_op->rmdir ||
            !subdir->d_inode->i_op->unlink)
                goto check_error;
@@ -795,13 +796,13 @@ check_error:
 mkdir_error:
        mutex_unlock(&dir->d_inode->i_mutex);
        dput(subdir);
-       pr_err("mkdir %s failed with error %d", dirname, ret);
+       pr_err("mkdir %s failed with error %d\n", dirname, ret);
        return ERR_PTR(ret);
 
 lookup_error:
        mutex_unlock(&dir->d_inode->i_mutex);
        ret = PTR_ERR(subdir);
-       pr_err("Lookup %s failed with error %d", dirname, ret);
+       pr_err("Lookup %s failed with error %d\n", dirname, ret);
        return ERR_PTR(ret);
 
 nomem_d_alloc:
@@ -891,7 +892,7 @@ lookup_error:
        if (ret == -EIO) {
                cachefiles_io_error(cache, "Lookup failed");
        } else if (ret != -ENOMEM) {
-               pr_err("Internal error: %d", ret);
+               pr_err("Internal error: %d\n", ret);
                ret = -EIO;
        }
 
@@ -950,7 +951,7 @@ error:
        }
 
        if (ret != -ENOMEM) {
-               pr_err("Internal error: %d", ret);
+               pr_err("Internal error: %d\n", ret);
                ret = -EIO;
        }
 
index 4b1fb5c..25e745b 100644 (file)
@@ -151,7 +151,6 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
        struct cachefiles_one_read *monitor;
        struct cachefiles_object *object;
        struct fscache_retrieval *op;
-       struct pagevec pagevec;
        int error, max;
 
        op = container_of(_op, struct fscache_retrieval, op);
@@ -160,8 +159,6 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
 
        _enter("{ino=%lu}", object->backer->d_inode->i_ino);
 
-       pagevec_init(&pagevec, 0);
-
        max = 8;
        spin_lock_irq(&object->work_lock);
 
@@ -396,7 +393,6 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
 {
        struct cachefiles_object *object;
        struct cachefiles_cache *cache;
-       struct pagevec pagevec;
        struct inode *inode;
        sector_t block0, block;
        unsigned shift;
@@ -427,8 +423,6 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
        op->op.flags |= FSCACHE_OP_ASYNC;
        op->op.processor = cachefiles_read_copier;
 
-       pagevec_init(&pagevec, 0);
-
        /* we assume the absence or presence of the first block is a good
         * enough indication for the page as a whole
         * - TODO: don't use bmap() for this as it is _not_ actually good
index 1ad51ff..acbc1f0 100644 (file)
@@ -51,7 +51,7 @@ int cachefiles_check_object_type(struct cachefiles_object *object)
        }
 
        if (ret != -EEXIST) {
-               pr_err("Can't set xattr on %*.*s [%lu] (err %d)",
+               pr_err("Can't set xattr on %*.*s [%lu] (err %d)\n",
                       dentry->d_name.len, dentry->d_name.len,
                       dentry->d_name.name, dentry->d_inode->i_ino,
                       -ret);
@@ -64,7 +64,7 @@ int cachefiles_check_object_type(struct cachefiles_object *object)
                if (ret == -ERANGE)
                        goto bad_type_length;
 
-               pr_err("Can't read xattr on %*.*s [%lu] (err %d)",
+               pr_err("Can't read xattr on %*.*s [%lu] (err %d)\n",
                       dentry->d_name.len, dentry->d_name.len,
                       dentry->d_name.name, dentry->d_inode->i_ino,
                       -ret);
@@ -85,14 +85,14 @@ error:
        return ret;
 
 bad_type_length:
-       pr_err("Cache object %lu type xattr length incorrect",
+       pr_err("Cache object %lu type xattr length incorrect\n",
               dentry->d_inode->i_ino);
        ret = -EIO;
        goto error;
 
 bad_type:
        xtype[2] = 0;
-       pr_err("Cache object %*.*s [%lu] type %s not %s",
+       pr_err("Cache object %*.*s [%lu] type %s not %s\n",
               dentry->d_name.len, dentry->d_name.len,
               dentry->d_name.name, dentry->d_inode->i_ino,
               xtype, type);
@@ -293,7 +293,7 @@ error:
        return ret;
 
 bad_type_length:
-       pr_err("Cache object %lu xattr length incorrect",
+       pr_err("Cache object %lu xattr length incorrect\n",
               dentry->d_inode->i_ino);
        ret = -EIO;
        goto error;
index 7a5b514..cb25a1a 100644 (file)
@@ -2372,7 +2372,8 @@ void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
 }
 EXPORT_SYMBOL(dentry_update_name_case);
 
-static void switch_names(struct dentry *dentry, struct dentry *target)
+static void switch_names(struct dentry *dentry, struct dentry *target,
+                        bool exchange)
 {
        if (dname_external(target)) {
                if (dname_external(dentry)) {
@@ -2406,13 +2407,19 @@ static void switch_names(struct dentry *dentry, struct dentry *target)
                         */
                        unsigned int i;
                        BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
+                       if (!exchange) {
+                               memcpy(dentry->d_iname, target->d_name.name,
+                                               target->d_name.len + 1);
+                               dentry->d_name.hash_len = target->d_name.hash_len;
+                               return;
+                       }
                        for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
                                swap(((long *) &dentry->d_iname)[i],
                                     ((long *) &target->d_iname)[i]);
                        }
                }
        }
-       swap(dentry->d_name.len, target->d_name.len);
+       swap(dentry->d_name.hash_len, target->d_name.hash_len);
 }
 
 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
@@ -2442,25 +2449,29 @@ static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
        }
 }
 
-static void dentry_unlock_parents_for_move(struct dentry *dentry,
-                                       struct dentry *target)
+static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target)
 {
        if (target->d_parent != dentry->d_parent)
                spin_unlock(&dentry->d_parent->d_lock);
        if (target->d_parent != target)
                spin_unlock(&target->d_parent->d_lock);
+       spin_unlock(&target->d_lock);
+       spin_unlock(&dentry->d_lock);
 }
 
 /*
  * When switching names, the actual string doesn't strictly have to
  * be preserved in the target - because we're dropping the target
  * anyway. As such, we can just do a simple memcpy() to copy over
- * the new name before we switch.
- *
- * Note that we have to be a lot more careful about getting the hash
- * switched - we have to switch the hash value properly even if it
- * then no longer matches the actual (corrupted) string of the target.
- * The hash value has to match the hash queue that the dentry is on..
+ * the new name before we switch, unless we are going to rehash
+ * it.  Note that if we *do* unhash the target, we are not allowed
+ * to rehash it without giving it a new name/hash key - whether
+ * we swap or overwrite the names here, resulting name won't match
+ * the reality in filesystem; it's only there for d_path() purposes.
+ * Note that all of this is happening under rename_lock, so the
+ * any hash lookup seeing it in the middle of manipulations will
+ * be discarded anyway.  So we do not care what happens to the hash
+ * key in that case.
  */
 /*
  * __d_move - move a dentry
@@ -2506,36 +2517,30 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
                           d_hash(dentry->d_parent, dentry->d_name.hash));
        }
 
-       list_del(&dentry->d_u.d_child);
-       list_del(&target->d_u.d_child);
-
        /* Switch the names.. */
-       switch_names(dentry, target);
-       swap(dentry->d_name.hash, target->d_name.hash);
+       switch_names(dentry, target, exchange);
 
-       /* ... and switch the parents */
+       /* ... and switch them in the tree */
        if (IS_ROOT(dentry)) {
+               /* splicing a tree */
                dentry->d_parent = target->d_parent;
                target->d_parent = target;
-               INIT_LIST_HEAD(&target->d_u.d_child);
+               list_del_init(&target->d_u.d_child);
+               list_move(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
        } else {
+               /* swapping two dentries */
                swap(dentry->d_parent, target->d_parent);
-
-               /* And add them back to the (new) parent lists */
-               list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
+               list_move(&target->d_u.d_child, &target->d_parent->d_subdirs);
+               list_move(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
+               if (exchange)
+                       fsnotify_d_move(target);
+               fsnotify_d_move(dentry);
        }
 
-       list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
-
        write_seqcount_end(&target->d_seq);
        write_seqcount_end(&dentry->d_seq);
 
-       dentry_unlock_parents_for_move(dentry, target);
-       if (exchange)
-               fsnotify_d_move(target);
-       spin_unlock(&target->d_lock);
-       fsnotify_d_move(dentry);
-       spin_unlock(&dentry->d_lock);
+       dentry_unlock_for_move(dentry, target);
 }
 
 /*
@@ -2633,45 +2638,6 @@ out_err:
        return ret;
 }
 
-/*
- * Prepare an anonymous dentry for life in the superblock's dentry tree as a
- * named dentry in place of the dentry to be replaced.
- * returns with anon->d_lock held!
- */
-static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
-{
-       struct dentry *dparent;
-
-       dentry_lock_for_move(anon, dentry);
-
-       write_seqcount_begin(&dentry->d_seq);
-       write_seqcount_begin_nested(&anon->d_seq, DENTRY_D_LOCK_NESTED);
-
-       dparent = dentry->d_parent;
-
-       switch_names(dentry, anon);
-       swap(dentry->d_name.hash, anon->d_name.hash);
-
-       dentry->d_parent = dentry;
-       list_del_init(&dentry->d_u.d_child);
-       anon->d_parent = dparent;
-       if (likely(!d_unhashed(anon))) {
-               hlist_bl_lock(&anon->d_sb->s_anon);
-               __hlist_bl_del(&anon->d_hash);
-               anon->d_hash.pprev = NULL;
-               hlist_bl_unlock(&anon->d_sb->s_anon);
-       }
-       list_move(&anon->d_u.d_child, &dparent->d_subdirs);
-
-       write_seqcount_end(&dentry->d_seq);
-       write_seqcount_end(&anon->d_seq);
-
-       dentry_unlock_parents_for_move(anon, dentry);
-       spin_unlock(&dentry->d_lock);
-
-       /* anon->d_lock still locked, returns locked */
-}
-
 /**
  * d_splice_alias - splice a disconnected dentry into the tree if one exists
  * @inode:  the inode which may have a disconnected dentry
@@ -2717,10 +2683,8 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
                                return ERR_PTR(-EIO);
                        }
                        write_seqlock(&rename_lock);
-                       __d_materialise_dentry(dentry, new);
+                       __d_move(new, dentry, false);
                        write_sequnlock(&rename_lock);
-                       _d_rehash(new);
-                       spin_unlock(&new->d_lock);
                        spin_unlock(&inode->i_lock);
                        security_d_instantiate(new, inode);
                        iput(inode);
@@ -2780,7 +2744,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
                        } else if (IS_ROOT(alias)) {
                                /* Is this an anonymous mountpoint that we
                                 * could splice into our tree? */
-                               __d_materialise_dentry(dentry, alias);
+                               __d_move(alias, dentry, false);
                                write_sequnlock(&rename_lock);
                                goto found;
                        } else {
@@ -2807,13 +2771,9 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
        actual = __d_instantiate_unique(dentry, inode);
        if (!actual)
                actual = dentry;
-       else
-               BUG_ON(!d_unhashed(actual));
 
-       spin_lock(&actual->d_lock);
+       d_rehash(actual);
 found:
-       _d_rehash(actual);
-       spin_unlock(&actual->d_lock);
        spin_unlock(&inode->i_lock);
 out_nolock:
        if (actual == dentry) {
index c311640..e181b6b 100644 (file)
@@ -158,7 +158,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
 {
        ssize_t ret;
 
-       ret = iov_iter_get_pages(sdio->iter, dio->pages, DIO_PAGES,
+       ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
                                &sdio->from);
 
        if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) {
index d3b4539..da032da 100644 (file)
@@ -982,6 +982,7 @@ nomem:
 submit_op_failed:
        clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
        spin_unlock(&cookie->lock);
+       fscache_unuse_cookie(object);
        kfree(op);
        _leave(" [EIO]");
        return transit_to(KILL_OBJECT);
index 85332b9..de33b3f 100644 (file)
@@ -43,6 +43,19 @@ void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *pa
 }
 EXPORT_SYMBOL(__fscache_wait_on_page_write);
 
+/*
+ * wait for a page to finish being written to the cache. Put a timeout here
+ * since we might be called recursively via parent fs.
+ */
+static
+bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page)
+{
+       wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
+
+       return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page),
+                                 HZ);
+}
+
 /*
  * decide whether a page can be released, possibly by cancelling a store to it
  * - we're allowed to sleep if __GFP_WAIT is flagged
@@ -115,7 +128,10 @@ page_busy:
        }
 
        fscache_stat(&fscache_n_store_vmscan_wait);
-       __fscache_wait_on_page_write(cookie, page);
+       if (!release_page_wait_timeout(cookie, page))
+               _debug("fscache writeout timeout page: %p{%lx}",
+                       page, page->index);
+
        gfp &= ~__GFP_WAIT;
        goto try_again;
 }
@@ -182,7 +198,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
 {
        struct fscache_operation *op;
        struct fscache_object *object;
-       bool wake_cookie;
+       bool wake_cookie = false;
 
        _enter("%p", cookie);
 
@@ -212,15 +228,16 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
 
        __fscache_use_cookie(cookie);
        if (fscache_submit_exclusive_op(object, op) < 0)
-               goto nobufs;
+               goto nobufs_dec;
        spin_unlock(&cookie->lock);
        fscache_stat(&fscache_n_attr_changed_ok);
        fscache_put_operation(op);
        _leave(" = 0");
        return 0;
 
-nobufs:
+nobufs_dec:
        wake_cookie = __fscache_unuse_cookie(cookie);
+nobufs:
        spin_unlock(&cookie->lock);
        kfree(op);
        if (wake_cookie)
index 912061a..caa8d95 100644 (file)
@@ -1305,6 +1305,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
                size_t start;
                ssize_t ret = iov_iter_get_pages(ii,
                                        &req->pages[req->num_pages],
+                                       *nbytesp - nbytes,
                                        req->max_pages - req->num_pages,
                                        &start);
                if (ret < 0)
index 6252b17..d071e7f 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/buffer_head.h>
 #include <linux/gfp.h>
 #include <linux/mpage.h>
+#include <linux/pagemap.h>
 #include <linux/writeback.h>
 #include <linux/aio.h>
 #include "nilfs.h"
@@ -219,10 +220,10 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
 
 static int nilfs_set_page_dirty(struct page *page)
 {
+       struct inode *inode = page->mapping->host;
        int ret = __set_page_dirty_nobuffers(page);
 
        if (page_has_buffers(page)) {
-               struct inode *inode = page->mapping->host;
                unsigned nr_dirty = 0;
                struct buffer_head *bh, *head;
 
@@ -245,6 +246,10 @@ static int nilfs_set_page_dirty(struct page *page)
 
                if (nr_dirty)
                        nilfs_set_file_dirty(inode, nr_dirty);
+       } else if (ret) {
+               unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+
+               nilfs_set_file_dirty(inode, nr_dirty);
        }
        return ret;
 }
index 3ec906e..e3cfa02 100644 (file)
@@ -655,12 +655,9 @@ void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
        clear_bit(bit, res->refmap);
 }
 
-
-void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
                                   struct dlm_lock_resource *res)
 {
-       assert_spin_locked(&res->spinlock);
-
        res->inflight_locks++;
 
        mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
@@ -668,6 +665,13 @@ void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
             __builtin_return_address(0));
 }
 
+void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+                                  struct dlm_lock_resource *res)
+{
+       assert_spin_locked(&res->spinlock);
+       __dlm_lockres_grab_inflight_ref(dlm, res);
+}
+
 void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
                                   struct dlm_lock_resource *res)
 {
@@ -894,10 +898,8 @@ lookup:
        /* finally add the lockres to its hash bucket */
        __dlm_insert_lockres(dlm, res);
 
-       /* Grab inflight ref to pin the resource */
-       spin_lock(&res->spinlock);
-       dlm_lockres_grab_inflight_ref(dlm, res);
-       spin_unlock(&res->spinlock);
+       /* since this lockres is new it doesn't not require the spinlock */
+       __dlm_lockres_grab_inflight_ref(dlm, res);
 
        /* get an extra ref on the mle in case this is a BLOCK
         * if so, the creator of the BLOCK may try to put the last
index ddb662b..4142546 100644 (file)
@@ -2532,6 +2532,7 @@ static void ocfs2_delete_osb(struct ocfs2_super *osb)
        kfree(osb->journal);
        kfree(osb->local_alloc_copy);
        kfree(osb->uuid_str);
+       kfree(osb->vol_label);
        ocfs2_put_dlm_debug(osb->osb_dlm_debug);
        memset(osb, 0, sizeof(struct ocfs2_super));
 }
index dfc791c..c341568 100644 (file)
@@ -931,23 +931,32 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end,
        while (addr < end) {
                struct vm_area_struct *vma = find_vma(walk->mm, addr);
                pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
-               unsigned long vm_end;
+               /* End of address space hole, which we mark as non-present. */
+               unsigned long hole_end;
 
-               if (!vma) {
-                       vm_end = end;
-               } else {
-                       vm_end = min(end, vma->vm_end);
-                       if (vma->vm_flags & VM_SOFTDIRTY)
-                               pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY);
+               if (vma)
+                       hole_end = min(end, vma->vm_start);
+               else
+                       hole_end = end;
+
+               for (; addr < hole_end; addr += PAGE_SIZE) {
+                       err = add_to_pagemap(addr, &pme, pm);
+                       if (err)
+                               goto out;
                }
 
-               for (; addr < vm_end; addr += PAGE_SIZE) {
+               if (!vma)
+                       break;
+
+               /* Addresses in the VMA. */
+               if (vma->vm_flags & VM_SOFTDIRTY)
+                       pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY);
+               for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
                        err = add_to_pagemap(addr, &pme, pm);
                        if (err)
                                goto out;
                }
        }
-
 out:
        return err;
 }
index a9cc75f..7caa016 100644 (file)
@@ -298,7 +298,10 @@ cg_found:
        ufsi->i_oeftflag = 0;
        ufsi->i_dir_start_lookup = 0;
        memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1));
-       insert_inode_hash(inode);
+       if (insert_inode_locked(inode) < 0) {
+               err = -EIO;
+               goto failed;
+       }
        mark_inode_dirty(inode);
 
        if (uspi->fs_magic == UFS2_MAGIC) {
@@ -337,6 +340,7 @@ cg_found:
 fail_remove_inode:
        unlock_ufs(sb);
        clear_nlink(inode);
+       unlock_new_inode(inode);
        iput(inode);
        UFSD("EXIT (FAILED): err %d\n", err);
        return ERR_PTR(err);
index 2df62a7..fd65deb 100644 (file)
@@ -38,10 +38,12 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
 {
        int err = ufs_add_link(dentry, inode);
        if (!err) {
+               unlock_new_inode(inode);
                d_instantiate(dentry, inode);
                return 0;
        }
        inode_dec_link_count(inode);
+       unlock_new_inode(inode);
        iput(inode);
        return err;
 }
@@ -155,6 +157,7 @@ out_notlocked:
 
 out_fail:
        inode_dec_link_count(inode);
+       unlock_new_inode(inode);
        iput(inode);
        goto out;
 }
@@ -210,6 +213,7 @@ out:
 out_fail:
        inode_dec_link_count(inode);
        inode_dec_link_count(inode);
+       unlock_new_inode(inode);
        iput (inode);
        inode_dec_link_count(dir);
        unlock_ufs(dir->i_sb);
index d91e59b..57ee052 100644 (file)
@@ -118,6 +118,7 @@ struct acpi_device;
 struct acpi_hotplug_profile {
        struct kobject kobj;
        int (*scan_dependent)(struct acpi_device *adev);
+       void (*notify_online)(struct acpi_device *adev);
        bool enabled:1;
        bool demand_offline:1;
 };
index ebcc9d1..7f43703 100644 (file)
@@ -26,6 +26,13 @@ struct ccp_cmd;
 #if defined(CONFIG_CRYPTO_DEV_CCP_DD) || \
        defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE)
 
+/**
+ * ccp_present - check if a CCP device is present
+ *
+ * Returns zero if a CCP device is present, -ENODEV otherwise.
+ */
+int ccp_present(void);
+
 /**
  * ccp_enqueue_cmd - queue an operation for processing by the CCP
  *
@@ -53,6 +60,11 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd);
 
 #else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */
 
+static inline int ccp_present(void)
+{
+       return -ENODEV;
+}
+
 static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd)
 {
        return -ENODEV;
index ade2390..6e39c9b 100644 (file)
@@ -93,12 +93,12 @@ extern int cpuset_slab_spread_node(void);
 
 static inline int cpuset_do_page_mem_spread(void)
 {
-       return current->flags & PF_SPREAD_PAGE;
+       return task_spread_page(current);
 }
 
 static inline int cpuset_do_slab_mem_spread(void)
 {
-       return current->flags & PF_SPREAD_SLAB;
+       return task_spread_slab(current);
 }
 
 extern int current_cpuset_is_being_rebound(void);
index 43d183a..d0d5c5d 100644 (file)
@@ -181,13 +181,14 @@ extern int bus_unregister_notifier(struct bus_type *bus,
  * with the device lock held in the core, so be careful.
  */
 #define BUS_NOTIFY_ADD_DEVICE          0x00000001 /* device added */
-#define BUS_NOTIFY_DEL_DEVICE          0x00000002 /* device removed */
-#define BUS_NOTIFY_BIND_DRIVER         0x00000003 /* driver about to be
+#define BUS_NOTIFY_DEL_DEVICE          0x00000002 /* device to be removed */
+#define BUS_NOTIFY_REMOVED_DEVICE      0x00000003 /* device removed */
+#define BUS_NOTIFY_BIND_DRIVER         0x00000004 /* driver about to be
                                                      bound */
-#define BUS_NOTIFY_BOUND_DRIVER                0x00000004 /* driver bound to device */
-#define BUS_NOTIFY_UNBIND_DRIVER       0x00000005 /* driver about to be
+#define BUS_NOTIFY_BOUND_DRIVER                0x00000005 /* driver bound to device */
+#define BUS_NOTIFY_UNBIND_DRIVER       0x00000006 /* driver about to be
                                                      unbound */
-#define BUS_NOTIFY_UNBOUND_DRIVER      0x00000006 /* driver is unbound
+#define BUS_NOTIFY_UNBOUND_DRIVER      0x00000007 /* driver is unbound
                                                      from the device */
 
 extern struct kset *bus_get_kset(struct bus_type *bus);
index 1deece4..593fff9 100644 (file)
@@ -56,13 +56,19 @@ struct dmar_drhd_unit {
        struct intel_iommu *iommu;
 };
 
+struct dmar_pci_path {
+       u8 bus;
+       u8 device;
+       u8 function;
+};
+
 struct dmar_pci_notify_info {
        struct pci_dev                  *dev;
        unsigned long                   event;
        int                             bus;
        u16                             seg;
        u16                             level;
-       struct acpi_dmar_pci_path       path[];
+       struct dmar_pci_path            path[];
 }  __attribute__((packed));
 
 extern struct rw_semaphore dmar_global_lock;
index a95efeb..b556e0a 100644 (file)
@@ -577,20 +577,4 @@ static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node
 }
 #endif /* CONFIG_OF */
 
-#ifdef CONFIG_ACPI
-void acpi_i2c_register_devices(struct i2c_adapter *adap);
-#else
-static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { }
-#endif /* CONFIG_ACPI */
-
-#ifdef CONFIG_ACPI_I2C_OPREGION
-int acpi_i2c_install_space_handler(struct i2c_adapter *adapter);
-void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter);
-#else
-static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
-{ }
-static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
-{ return 0; }
-#endif /* CONFIG_ACPI_I2C_OPREGION */
-
 #endif /* _LINUX_I2C_H */
index 511c6e0..a5b7d7c 100644 (file)
@@ -209,6 +209,7 @@ enum {
        MLX4_BMME_FLAG_TYPE_2_WIN       = 1 <<  9,
        MLX4_BMME_FLAG_RESERVED_LKEY    = 1 << 10,
        MLX4_BMME_FLAG_FAST_REG_WR      = 1 << 11,
+       MLX4_BMME_FLAG_VSD_INIT2RTR     = 1 << 28,
 };
 
 enum mlx4_event {
index 7040dc9..5f4e36c 100644 (file)
@@ -56,7 +56,8 @@ enum mlx4_qp_optpar {
        MLX4_QP_OPTPAR_RNR_RETRY                = 1 << 13,
        MLX4_QP_OPTPAR_ACK_TIMEOUT              = 1 << 14,
        MLX4_QP_OPTPAR_SCHED_QUEUE              = 1 << 16,
-       MLX4_QP_OPTPAR_COUNTER_INDEX            = 1 << 20
+       MLX4_QP_OPTPAR_COUNTER_INDEX            = 1 << 20,
+       MLX4_QP_OPTPAR_VLAN_STRIPPING           = 1 << 21,
 };
 
 enum mlx4_qp_state {
@@ -423,13 +424,20 @@ struct mlx4_wqe_inline_seg {
 
 enum mlx4_update_qp_attr {
        MLX4_UPDATE_QP_SMAC             = 1 << 0,
+       MLX4_UPDATE_QP_VSD              = 1 << 2,
+       MLX4_UPDATE_QP_SUPPORTED_ATTRS  = (1 << 2) - 1
+};
+
+enum mlx4_update_qp_params_flags {
+       MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE          = 1 << 0,
 };
 
 struct mlx4_update_qp_params {
        u8      smac_index;
+       u32     flags;
 };
 
-int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
                   enum mlx4_update_qp_attr attr,
                   struct mlx4_update_qp_params *params);
 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
index 3dfbf23..ef5894c 100644 (file)
@@ -71,6 +71,7 @@ void percpu_ref_reinit(struct percpu_ref *ref);
 void percpu_ref_exit(struct percpu_ref *ref);
 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
                                 percpu_ref_func_t *confirm_kill);
+void __percpu_ref_kill_expedited(struct percpu_ref *ref);
 
 /**
  * percpu_ref_kill - drop the initial ref
index 5c2c885..b867a4d 100644 (file)
@@ -1903,8 +1903,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
 #define PF_KTHREAD     0x00200000      /* I am a kernel thread */
 #define PF_RANDOMIZE   0x00400000      /* randomize virtual address space */
 #define PF_SWAPWRITE   0x00800000      /* Allowed to write to swap */
-#define PF_SPREAD_PAGE 0x01000000      /* Spread page cache over cpuset */
-#define PF_SPREAD_SLAB 0x02000000      /* Spread some slab caches over cpuset */
 #define PF_NO_SETAFFINITY 0x04000000   /* Userland is not allowed to meddle with cpus_allowed */
 #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
 #define PF_MUTEX_TESTER        0x20000000      /* Thread belongs to the rt mutex tester */
@@ -1957,17 +1955,31 @@ static inline void memalloc_noio_restore(unsigned int flags)
 }
 
 /* Per-process atomic flags. */
-#define PFA_NO_NEW_PRIVS 0x00000001    /* May not gain new privileges. */
+#define PFA_NO_NEW_PRIVS 0     /* May not gain new privileges. */
+#define PFA_SPREAD_PAGE  1      /* Spread page cache over cpuset */
+#define PFA_SPREAD_SLAB  2      /* Spread some slab caches over cpuset */
 
-static inline bool task_no_new_privs(struct task_struct *p)
-{
-       return test_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
-}
 
-static inline void task_set_no_new_privs(struct task_struct *p)
-{
-       set_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
-}
+#define TASK_PFA_TEST(name, func)                                      \
+       static inline bool task_##func(struct task_struct *p)           \
+       { return test_bit(PFA_##name, &p->atomic_flags); }
+#define TASK_PFA_SET(name, func)                                       \
+       static inline void task_set_##func(struct task_struct *p)       \
+       { set_bit(PFA_##name, &p->atomic_flags); }
+#define TASK_PFA_CLEAR(name, func)                                     \
+       static inline void task_clear_##func(struct task_struct *p)     \
+       { clear_bit(PFA_##name, &p->atomic_flags); }
+
+TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
+TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
+
+TASK_PFA_TEST(SPREAD_PAGE, spread_page)
+TASK_PFA_SET(SPREAD_PAGE, spread_page)
+TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
+
+TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
+TASK_PFA_SET(SPREAD_SLAB, spread_slab)
+TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
 
 /*
  * task->jobctl flags
@@ -2608,9 +2620,22 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct
        task_thread_info(p)->task = p;
 }
 
+/*
+ * Return the address of the last usable long on the stack.
+ *
+ * When the stack grows down, this is just above the thread
+ * info struct. Going any lower will corrupt the threadinfo.
+ *
+ * When the stack grows up, this is the highest address.
+ * Beyond that position, we corrupt data on the next page.
+ */
 static inline unsigned long *end_of_stack(struct task_struct *p)
 {
+#ifdef CONFIG_STACK_GROWSUP
+       return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
+#else
        return (unsigned long *)(task_thread_info(p) + 1);
+#endif
 }
 
 #endif
index 48d64e6..290fbf0 100644 (file)
@@ -84,7 +84,7 @@ unsigned long iov_iter_alignment(const struct iov_iter *i);
 void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
                        unsigned long nr_segs, size_t count);
 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
-                       unsigned maxpages, size_t *start);
+                       size_t maxsize, unsigned maxpages, size_t *start);
 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
                        size_t maxsize, size_t *start);
 int iov_iter_npages(const struct iov_iter *i, int maxpages);
index a0cc2e9..b996e6c 100644 (file)
@@ -419,7 +419,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
        alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
                        1, (name))
 #define create_singlethread_workqueue(name)                            \
-       alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name))
+       alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
 
 extern void destroy_workqueue(struct workqueue_struct *wq);
 
index fc910a6..2fefcf4 100644 (file)
@@ -295,7 +295,7 @@ struct vb2_buffer {
  *                     can return an error if hardware fails, in that case all
  *                     buffers that have been already given by the @buf_queue
  *                     callback are to be returned by the driver by calling
- *                     @vb2_buffer_done(VB2_BUF_STATE_DEQUEUED).
+ *                     @vb2_buffer_done(VB2_BUF_STATE_QUEUED).
  *                     If you need a minimum number of buffers before you can
  *                     start streaming, then set @min_buffers_needed in the
  *                     vb2_queue structure. If that is non-zero then
@@ -380,6 +380,9 @@ struct v4l2_fh;
  * @start_streaming_called: start_streaming() was called successfully and we
  *             started streaming.
  * @error:     a fatal error occurred on the queue
+ * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for
+ *             buffers. Only set for capture queues if qbuf has not yet been
+ *             called since poll() needs to return POLLERR in that situation.
  * @fileio:    file io emulator internal data, used only if emulator is active
  * @threadio:  thread io internal data, used only if thread is active
  */
@@ -417,6 +420,7 @@ struct vb2_queue {
        unsigned int                    streaming:1;
        unsigned int                    start_streaming_called:1;
        unsigned int                    error:1;
+       unsigned int                    waiting_for_buffers:1;
 
        struct vb2_fileio_data          *fileio;
        struct vb2_threadio_data        *threadio;
index f679877..ec51e67 100644 (file)
@@ -204,6 +204,7 @@ void ipv6_sock_ac_close(struct sock *sk);
 
 int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr);
 int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
+void ipv6_ac_destroy_dev(struct inet6_dev *idev);
 bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
                         const struct in6_addr *addr);
 bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
index 71c60f4..a8ae4e7 100644 (file)
@@ -480,6 +480,7 @@ void dst_init(void);
 /* Flags for xfrm_lookup flags argument. */
 enum {
        XFRM_LOOKUP_ICMP = 1 << 0,
+       XFRM_LOOKUP_QUEUE = 1 << 1,
 };
 
 struct flowi;
@@ -490,7 +491,16 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
                                            int flags)
 {
        return dst_orig;
-} 
+}
+
+static inline struct dst_entry *xfrm_lookup_route(struct net *net,
+                                                 struct dst_entry *dst_orig,
+                                                 const struct flowi *fl,
+                                                 struct sock *sk,
+                                                 int flags)
+{
+       return dst_orig;
+}
 
 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
 {
@@ -502,6 +512,10 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
                              const struct flowi *fl, struct sock *sk,
                              int flags);
 
+struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
+                                   const struct flowi *fl, struct sock *sk,
+                                   int flags);
+
 /* skb attached with this dst needs transformation if dst->xfrm is valid */
 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
 {
index 93695f0..af10c2c 100644 (file)
@@ -394,4 +394,12 @@ static inline int genl_set_err(struct genl_family *family, struct net *net,
        return netlink_set_err(net->genl_sock, portid, group, code);
 }
 
+static inline int genl_has_listeners(struct genl_family *family,
+                                    struct sock *sk, unsigned int group)
+{
+       if (WARN_ON_ONCE(group >= family->n_mcgrps))
+               return -EINVAL;
+       group = family->mcgrp_offset + group;
+       return netlink_has_listeners(sk, group);
+}
 #endif /* __NET_GENERIC_NETLINK_H */
index a3cfb8e..620e086 100644 (file)
@@ -231,7 +231,8 @@ struct qdisc_skb_cb {
        unsigned int            pkt_len;
        u16                     slave_dev_queue_mapping;
        u16                     _pad;
-       unsigned char           data[24];
+#define QDISC_CB_PRIV_LEN 20
+       unsigned char           data[QDISC_CB_PRIV_LEN];
 };
 
 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
index 1ea0b65..a2bf41e 100644 (file)
@@ -47,6 +47,7 @@ struct ib_umem {
        int                     writable;
        int                     hugetlb;
        struct work_struct      work;
+       struct pid             *pid;
        struct mm_struct       *mm;
        unsigned long           diff;
        struct sg_table sg_head;
index 940aced..3a73f99 100644 (file)
@@ -3985,7 +3985,6 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
 
        l = cgroup_pidlist_find_create(cgrp, type);
        if (!l) {
-               mutex_unlock(&cgrp->pidlist_mutex);
                pidlist_free(array);
                return -ENOMEM;
        }
index 22874d7..52cb04c 100644 (file)
@@ -365,13 +365,14 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs,
                                        struct task_struct *tsk)
 {
        if (is_spread_page(cs))
-               tsk->flags |= PF_SPREAD_PAGE;
+               task_set_spread_page(tsk);
        else
-               tsk->flags &= ~PF_SPREAD_PAGE;
+               task_clear_spread_page(tsk);
+
        if (is_spread_slab(cs))
-               tsk->flags |= PF_SPREAD_SLAB;
+               task_set_spread_slab(tsk);
        else
-               tsk->flags &= ~PF_SPREAD_SLAB;
+               task_clear_spread_slab(tsk);
 }
 
 /*
index c4b8093..f1604d8 100644 (file)
@@ -725,14 +725,6 @@ static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
        clear_bit(bit, addr);
 }
 
-static void memory_bm_clear_current(struct memory_bitmap *bm)
-{
-       int bit;
-
-       bit = max(bm->cur.node_bit - 1, 0);
-       clear_bit(bit, bm->cur.node->data);
-}
-
 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
 {
        void *addr;
@@ -1341,35 +1333,23 @@ static struct memory_bitmap copy_bm;
 
 void swsusp_free(void)
 {
-       unsigned long fb_pfn, fr_pfn;
-
-       memory_bm_position_reset(forbidden_pages_map);
-       memory_bm_position_reset(free_pages_map);
-
-loop:
-       fr_pfn = memory_bm_next_pfn(free_pages_map);
-       fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
-
-       /*
-        * Find the next bit set in both bitmaps. This is guaranteed to
-        * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
-        */
-       do {
-               if (fb_pfn < fr_pfn)
-                       fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
-               if (fr_pfn < fb_pfn)
-                       fr_pfn = memory_bm_next_pfn(free_pages_map);
-       } while (fb_pfn != fr_pfn);
-
-       if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
-               struct page *page = pfn_to_page(fr_pfn);
+       struct zone *zone;
+       unsigned long pfn, max_zone_pfn;
 
-               memory_bm_clear_current(forbidden_pages_map);
-               memory_bm_clear_current(free_pages_map);
-               __free_page(page);
-               goto loop;
+       for_each_populated_zone(zone) {
+               max_zone_pfn = zone_end_pfn(zone);
+               for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
+                       if (pfn_valid(pfn)) {
+                               struct page *page = pfn_to_page(pfn);
+
+                               if (swsusp_page_is_forbidden(page) &&
+                                   swsusp_page_is_free(page)) {
+                                       swsusp_unset_page_forbidden(page);
+                                       swsusp_unset_page_free(page);
+                                       __free_page(page);
+                               }
+                       }
        }
-
        nr_copy_pages = 0;
        nr_meta_pages = 0;
        restore_pblist = NULL;
index bdb9a45..38d2db8 100644 (file)
@@ -588,6 +588,7 @@ struct gen_pool *of_get_named_gen_pool(struct device_node *np,
        if (!np_pool)
                return NULL;
        pdev = of_find_device_by_node(np_pool);
+       of_node_put(np_pool);
        if (!pdev)
                return NULL;
        return dev_get_gen_pool(&pdev->dev);
index fe5a334..a89cf09 100644 (file)
@@ -184,3 +184,19 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
        call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
 }
 EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
+
+/*
+ * XXX: Temporary kludge to work around SCSI blk-mq stall.  Used only by
+ * block/blk-mq.c::blk_mq_freeze_queue().  Will be removed during v3.18
+ * devel cycle.  Do not use anywhere else.
+ */
+void __percpu_ref_kill_expedited(struct percpu_ref *ref)
+{
+       WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
+                 "percpu_ref_kill() called more than once on %pf!",
+                 ref->release);
+
+       ref->pcpu_count_ptr |= PCPU_REF_DEAD;
+       synchronize_sched_expedited();
+       percpu_ref_kill_rcu(&ref->rcu);
+}
index a2c7881..7b36e4d 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/hash.h>
 #include <linux/random.h>
 #include <linux/rhashtable.h>
-#include <linux/log2.h>
 
 #define HASH_DEFAULT_SIZE      64UL
 #define HASH_MIN_SIZE          4UL
index ab88dc0..9a09f20 100644 (file)
@@ -310,7 +310,7 @@ void iov_iter_init(struct iov_iter *i, int direction,
 EXPORT_SYMBOL(iov_iter_init);
 
 static ssize_t get_pages_iovec(struct iov_iter *i,
-                  struct page **pages, unsigned maxpages,
+                  struct page **pages, size_t maxsize, unsigned maxpages,
                   size_t *start)
 {
        size_t offset = i->iov_offset;
@@ -323,6 +323,8 @@ static ssize_t get_pages_iovec(struct iov_iter *i,
        len = iov->iov_len - offset;
        if (len > i->count)
                len = i->count;
+       if (len > maxsize)
+               len = maxsize;
        addr = (unsigned long)iov->iov_base + offset;
        len += *start = addr & (PAGE_SIZE - 1);
        if (len > maxpages * PAGE_SIZE)
@@ -588,13 +590,15 @@ static unsigned long alignment_bvec(const struct iov_iter *i)
 }
 
 static ssize_t get_pages_bvec(struct iov_iter *i,
-                  struct page **pages, unsigned maxpages,
+                  struct page **pages, size_t maxsize, unsigned maxpages,
                   size_t *start)
 {
        const struct bio_vec *bvec = i->bvec;
        size_t len = bvec->bv_len - i->iov_offset;
        if (len > i->count)
                len = i->count;
+       if (len > maxsize)
+               len = maxsize;
        /* can't be more than PAGE_SIZE */
        *start = bvec->bv_offset + i->iov_offset;
 
@@ -711,13 +715,13 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
 EXPORT_SYMBOL(iov_iter_alignment);
 
 ssize_t iov_iter_get_pages(struct iov_iter *i,
-                  struct page **pages, unsigned maxpages,
+                  struct page **pages, size_t maxsize, unsigned maxpages,
                   size_t *start)
 {
        if (i->type & ITER_BVEC)
-               return get_pages_bvec(i, pages, maxpages, start);
+               return get_pages_bvec(i, pages, maxsize, maxpages, start);
        else
-               return get_pages_iovec(i, pages, maxpages, start);
+               return get_pages_iovec(i, pages, maxsize, maxpages, start);
 }
 EXPORT_SYMBOL(iov_iter_get_pages);
 
index adeac30..e229970 100644 (file)
@@ -118,6 +118,8 @@ __setup("norandmaps", disable_randmaps);
 unsigned long zero_pfn __read_mostly;
 unsigned long highest_memmap_pfn __read_mostly;
 
+EXPORT_SYMBOL(zero_pfn);
+
 /*
  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
  */
@@ -1125,7 +1127,7 @@ again:
                                                addr) != page->index) {
                                pte_t ptfile = pgoff_to_pte(page->index);
                                if (pte_soft_dirty(ptent))
-                                       pte_file_mksoft_dirty(ptfile);
+                                       ptfile = pte_file_mksoft_dirty(ptfile);
                                set_pte_at(mm, addr, pte, ptfile);
                        }
                        if (PageAnon(page))
index 0e5fb22..469f90d 100644 (file)
@@ -2367,8 +2367,10 @@ static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struc
 
        if (new_dentry->d_inode) {
                (void) shmem_unlink(new_dir, new_dentry);
-               if (they_are_dirs)
+               if (they_are_dirs) {
+                       drop_nlink(new_dentry->d_inode);
                        drop_nlink(old_dir);
+               }
        } else if (they_are_dirs) {
                drop_nlink(old_dir);
                inc_nlink(new_dir);
index a467b30..7c52b38 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2124,7 +2124,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
 int
 __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
 {
-       size_t left_over, freelist_size, ralign;
+       size_t left_over, freelist_size;
+       size_t ralign = BYTES_PER_WORD;
        gfp_t gfp;
        int err;
        size_t size = cachep->size;
@@ -2157,14 +2158,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
                size &= ~(BYTES_PER_WORD - 1);
        }
 
-       /*
-        * Redzoning and user store require word alignment or possibly larger.
-        * Note this will be overridden by architecture or caller mandated
-        * alignment if either is greater than BYTES_PER_WORD.
-        */
-       if (flags & SLAB_STORE_USER)
-               ralign = BYTES_PER_WORD;
-
        if (flags & SLAB_RED_ZONE) {
                ralign = REDZONE_ALIGN;
                /* If redzoning, ensure that the second redzone is suitably
@@ -2994,7 +2987,7 @@ out:
 
 #ifdef CONFIG_NUMA
 /*
- * Try allocating on another node if PF_SPREAD_SLAB is a mempolicy is set.
+ * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
  *
  * If we are in_interrupt, then process context, including cpusets and
  * mempolicy, may not apply and should not be used for allocation policy.
@@ -3226,7 +3219,7 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
 {
        void *objp;
 
-       if (current->mempolicy || unlikely(current->flags & PF_SPREAD_SLAB)) {
+       if (current->mempolicy || cpuset_do_slab_mem_spread()) {
                objp = alternate_node_alloc(cache, flags);
                if (objp)
                        goto out;
index 62a7fa2..b6c04cb 100644 (file)
@@ -309,6 +309,9 @@ struct br_input_skb_cb {
        int igmp;
        int mrouters_only;
 #endif
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+       bool vlan_filtered;
+#endif
 };
 
 #define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb)
index e1bcd65..3ba57fc 100644 (file)
@@ -27,9 +27,13 @@ static void __vlan_add_flags(struct net_port_vlans *v, u16 vid, u16 flags)
 {
        if (flags & BRIDGE_VLAN_INFO_PVID)
                __vlan_add_pvid(v, vid);
+       else
+               __vlan_delete_pvid(v, vid);
 
        if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
                set_bit(vid, v->untagged_bitmap);
+       else
+               clear_bit(vid, v->untagged_bitmap);
 }
 
 static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
@@ -125,7 +129,8 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
 {
        u16 vid;
 
-       if (!br->vlan_enabled)
+       /* If this packet was not filtered at input, let it pass */
+       if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
                goto out;
 
        /* Vlan filter table must be configured at this point.  The
@@ -164,8 +169,10 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
        /* If VLAN filtering is disabled on the bridge, all packets are
         * permitted.
         */
-       if (!br->vlan_enabled)
+       if (!br->vlan_enabled) {
+               BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
                return true;
+       }
 
        /* If there are no vlan in the permitted list, all packets are
         * rejected.
@@ -173,6 +180,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
        if (!v)
                goto drop;
 
+       BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
        proto = br->vlan_proto;
 
        /* If vlan tx offload is disabled on bridge device and frame was
@@ -251,7 +259,8 @@ bool br_allowed_egress(struct net_bridge *br,
 {
        u16 vid;
 
-       if (!br->vlan_enabled)
+       /* If this packet was not filtered at input, let it pass */
+       if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
                return true;
 
        if (!v)
@@ -270,6 +279,7 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
        struct net_bridge *br = p->br;
        struct net_port_vlans *v;
 
+       /* If filtering was disabled at input, let it pass. */
        if (!br->vlan_enabled)
                return true;
 
index ab9a165..cf8a95f 100644 (file)
@@ -4809,9 +4809,14 @@ static void netdev_adjacent_sysfs_del(struct net_device *dev,
        sysfs_remove_link(&(dev->dev.kobj), linkname);
 }
 
-#define netdev_adjacent_is_neigh_list(dev, dev_list) \
-               (dev_list == &dev->adj_list.upper || \
-                dev_list == &dev->adj_list.lower)
+static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
+                                                struct net_device *adj_dev,
+                                                struct list_head *dev_list)
+{
+       return (dev_list == &dev->adj_list.upper ||
+               dev_list == &dev->adj_list.lower) &&
+               net_eq(dev_net(dev), dev_net(adj_dev));
+}
 
 static int __netdev_adjacent_dev_insert(struct net_device *dev,
                                        struct net_device *adj_dev,
@@ -4841,7 +4846,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
        pr_debug("dev_hold for %s, because of link added from %s to %s\n",
                 adj_dev->name, dev->name, adj_dev->name);
 
-       if (netdev_adjacent_is_neigh_list(dev, dev_list)) {
+       if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
                ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
                if (ret)
                        goto free_adj;
@@ -4862,7 +4867,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
        return 0;
 
 remove_symlinks:
-       if (netdev_adjacent_is_neigh_list(dev, dev_list))
+       if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
                netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
 free_adj:
        kfree(adj);
@@ -4895,8 +4900,7 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
        if (adj->master)
                sysfs_remove_link(&(dev->dev.kobj), "master");
 
-       if (netdev_adjacent_is_neigh_list(dev, dev_list) &&
-           net_eq(dev_net(dev),dev_net(adj_dev)))
+       if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
                netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
 
        list_del_rcu(&adj->list);
index d372b4b..9c3f823 100644 (file)
@@ -1866,7 +1866,7 @@ EXPORT_SYMBOL(sock_alloc_send_skb);
  * skb_page_frag_refill - check that a page_frag contains enough room
  * @sz: minimum size of the fragment we want to get
  * @pfrag: pointer to page_frag
- * @prio: priority for memory allocation
+ * @gfp: priority for memory allocation
  *
  * Note: While this allocator tries to use high order pages, there is
  * no guarantee that allocations succeed. Therefore, @sz MUST be
index afed1aa..bd41dd1 100644 (file)
@@ -79,10 +79,10 @@ static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
        idst->saddr = saddr;
 }
 
-static void tunnel_dst_set(struct ip_tunnel *t,
+static noinline void tunnel_dst_set(struct ip_tunnel *t,
                           struct dst_entry *dst, __be32 saddr)
 {
-       __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst, saddr);
+       __tunnel_dst_set(raw_cpu_ptr(t->dst_cache), dst, saddr);
 }
 
 static void tunnel_dst_reset(struct ip_tunnel *t)
@@ -106,7 +106,7 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t,
        struct dst_entry *dst;
 
        rcu_read_lock();
-       idst = this_cpu_ptr(t->dst_cache);
+       idst = raw_cpu_ptr(t->dst_cache);
        dst = rcu_dereference(idst->dst);
        if (dst && !atomic_inc_not_zero(&dst->__refcnt))
                dst = NULL;
index eaa4b00..173e7ea 100644 (file)
@@ -2265,9 +2265,9 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
                return rt;
 
        if (flp4->flowi4_proto)
-               rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
-                                                  flowi4_to_flowi(flp4),
-                                                  sk, 0);
+               rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
+                                                       flowi4_to_flowi(flp4),
+                                                       sk, 0);
 
        return rt;
 }
index fc1fac2..3342ee6 100644 (file)
@@ -3094,11 +3094,13 @@ static int addrconf_ifdown(struct net_device *dev, int how)
 
        write_unlock_bh(&idev->lock);
 
-       /* Step 5: Discard multicast list */
-       if (how)
+       /* Step 5: Discard anycast and multicast list */
+       if (how) {
+               ipv6_ac_destroy_dev(idev);
                ipv6_mc_destroy_dev(idev);
-       else
+       } else {
                ipv6_mc_down(idev);
+       }
 
        idev->tstamp = jiffies;
 
index ff2de7d..9a38684 100644 (file)
@@ -351,6 +351,27 @@ static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr)
        return __ipv6_dev_ac_dec(idev, addr);
 }
 
+void ipv6_ac_destroy_dev(struct inet6_dev *idev)
+{
+       struct ifacaddr6 *aca;
+
+       write_lock_bh(&idev->lock);
+       while ((aca = idev->ac_list) != NULL) {
+               idev->ac_list = aca->aca_next;
+               write_unlock_bh(&idev->lock);
+
+               addrconf_leave_solict(idev, &aca->aca_addr);
+
+               dst_hold(&aca->aca_rt->dst);
+               ip6_del_rt(aca->aca_rt);
+
+               aca_put(aca);
+
+               write_lock_bh(&idev->lock);
+       }
+       write_unlock_bh(&idev->lock);
+}
+
 /*
  *     check if the interface has this anycast address
  *     called with rcu_read_lock()
index 315a55d..0a3448b 100644 (file)
@@ -1009,7 +1009,7 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
        if (final_dst)
                fl6->daddr = *final_dst;
 
-       return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
+       return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
 }
 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
 
@@ -1041,7 +1041,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
        if (final_dst)
                fl6->daddr = *final_dst;
 
-       return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
+       return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
 }
 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
 
index 441875f..a1e433b 100644 (file)
@@ -1822,7 +1822,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
        if (sdata->vif.bss_conf.use_short_slot)
                sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
-       sinfo->bss_param.dtim_period = sdata->local->hw.conf.ps_dtim_period;
+       sinfo->bss_param.dtim_period = sdata->vif.bss_conf.dtim_period;
        sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int;
 
        sinfo->sta_flags.set = 0;
index 91d66b7..64dc864 100644 (file)
@@ -78,11 +78,12 @@ static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
 
 /* Check if need to build a reply message.
  * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
-static bool ovs_must_notify(struct genl_info *info,
-                           const struct genl_multicast_group *grp)
+static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
+                           unsigned int group)
 {
        return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
-               netlink_has_listeners(genl_info_net(info)->genl_sock, 0);
+              genl_has_listeners(family, genl_info_net(info)->genl_sock,
+                                 group);
 }
 
 static void ovs_notify(struct genl_family *family,
@@ -763,7 +764,7 @@ static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *act
 {
        struct sk_buff *skb;
 
-       if (!always && !ovs_must_notify(info, &ovs_dp_flow_multicast_group))
+       if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
                return NULL;
 
        skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
index 02a86a2..0f62326 100644 (file)
@@ -54,7 +54,7 @@ static int rfkill_gpio_set_power(void *data, bool blocked)
        if (blocked && !IS_ERR(rfkill->clk) && rfkill->clk_enabled)
                clk_disable(rfkill->clk);
 
-       rfkill->clk_enabled = blocked;
+       rfkill->clk_enabled = !blocked;
 
        return 0;
 }
@@ -163,6 +163,7 @@ static const struct acpi_device_id rfkill_acpi_match[] = {
        { "LNV4752", RFKILL_TYPE_GPS },
        { },
 };
+MODULE_DEVICE_TABLE(acpi, rfkill_acpi_match);
 #endif
 
 static struct platform_driver rfkill_gpio_driver = {
index b45d080..1b24191 100644 (file)
@@ -1143,7 +1143,7 @@ static long rxrpc_read(const struct key *key,
                if (copy_to_user(xdr, (s), _l) != 0)                    \
                        goto fault;                                     \
                if (_l & 3 &&                                           \
-                   copy_to_user((u8 *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \
+                   copy_to_user((u8 __user *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \
                        goto fault;                                     \
                xdr += (_l + 3) >> 2;                                   \
        } while(0)
index ed30e43..fb666d1 100644 (file)
@@ -133,10 +133,16 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
        --sch->q.qlen;
 }
 
+/* private part of skb->cb[] that a qdisc is allowed to use
+ * is limited to QDISC_CB_PRIV_LEN bytes.
+ * As a flow key might be too large, we store a part of it only.
+ */
+#define CHOKE_K_LEN min_t(u32, sizeof(struct flow_keys), QDISC_CB_PRIV_LEN - 3)
+
 struct choke_skb_cb {
        u16                     classid;
        u8                      keys_valid;
-       struct flow_keys        keys;
+       u8                      keys[QDISC_CB_PRIV_LEN - 3];
 };
 
 static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
@@ -163,22 +169,26 @@ static u16 choke_get_classid(const struct sk_buff *skb)
 static bool choke_match_flow(struct sk_buff *skb1,
                             struct sk_buff *skb2)
 {
+       struct flow_keys temp;
+
        if (skb1->protocol != skb2->protocol)
                return false;
 
        if (!choke_skb_cb(skb1)->keys_valid) {
                choke_skb_cb(skb1)->keys_valid = 1;
-               skb_flow_dissect(skb1, &choke_skb_cb(skb1)->keys);
+               skb_flow_dissect(skb1, &temp);
+               memcpy(&choke_skb_cb(skb1)->keys, &temp, CHOKE_K_LEN);
        }
 
        if (!choke_skb_cb(skb2)->keys_valid) {
                choke_skb_cb(skb2)->keys_valid = 1;
-               skb_flow_dissect(skb2, &choke_skb_cb(skb2)->keys);
+               skb_flow_dissect(skb2, &temp);
+               memcpy(&choke_skb_cb(skb2)->keys, &temp, CHOKE_K_LEN);
        }
 
        return !memcmp(&choke_skb_cb(skb1)->keys,
                       &choke_skb_cb(skb2)->keys,
-                      sizeof(struct flow_keys));
+                      CHOKE_K_LEN);
 }
 
 /*
index 2e2586e..4cdbc10 100644 (file)
@@ -1996,6 +1996,9 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
        if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
                return -EFAULT;
 
+       if (kmsg->msg_name == NULL)
+               kmsg->msg_namelen = 0;
+
        if (kmsg->msg_namelen < 0)
                return -EINVAL;
 
index df7b133..7257164 100644 (file)
@@ -6969,6 +6969,9 @@ void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp)
        struct nlattr *data = ((void **)skb->cb)[2];
        enum nl80211_multicast_groups mcgrp = NL80211_MCGRP_TESTMODE;
 
+       /* clear CB data for netlink core to own from now on */
+       memset(skb->cb, 0, sizeof(skb->cb));
+
        nla_nest_end(skb, data);
        genlmsg_end(skb, hdr);
 
@@ -9294,6 +9297,9 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb)
        void *hdr = ((void **)skb->cb)[1];
        struct nlattr *data = ((void **)skb->cb)[2];
 
+       /* clear CB data for netlink core to own from now on */
+       memset(skb->cb, 0, sizeof(skb->cb));
+
        if (WARN_ON(!rdev->cur_cmd_info)) {
                kfree_skb(skb);
                return -EINVAL;
index beeed60..fdde51f 100644 (file)
 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
 #define XFRM_MAX_QUEUE_LEN     100
 
+struct xfrm_flo {
+       struct dst_entry *dst_orig;
+       u8 flags;
+};
+
 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
 static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
                                                __read_mostly;
@@ -1877,13 +1882,14 @@ static int xdst_queue_output(struct sock *sk, struct sk_buff *skb)
 }
 
 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
-                                                struct dst_entry *dst,
+                                                struct xfrm_flo *xflo,
                                                 const struct flowi *fl,
                                                 int num_xfrms,
                                                 u16 family)
 {
        int err;
        struct net_device *dev;
+       struct dst_entry *dst;
        struct dst_entry *dst1;
        struct xfrm_dst *xdst;
 
@@ -1891,9 +1897,12 @@ static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
        if (IS_ERR(xdst))
                return xdst;
 
-       if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0)
+       if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
+           net->xfrm.sysctl_larval_drop ||
+           num_xfrms <= 0)
                return xdst;
 
+       dst = xflo->dst_orig;
        dst1 = &xdst->u.dst;
        dst_hold(dst);
        xdst->route = dst;
@@ -1935,7 +1944,7 @@ static struct flow_cache_object *
 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
                   struct flow_cache_object *oldflo, void *ctx)
 {
-       struct dst_entry *dst_orig = (struct dst_entry *)ctx;
+       struct xfrm_flo *xflo = (struct xfrm_flo *)ctx;
        struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
        struct xfrm_dst *xdst, *new_xdst;
        int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
@@ -1976,7 +1985,8 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
                        goto make_dummy_bundle;
        }
 
-       new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
+       new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
+                                                 xflo->dst_orig);
        if (IS_ERR(new_xdst)) {
                err = PTR_ERR(new_xdst);
                if (err != -EAGAIN)
@@ -2010,7 +2020,7 @@ make_dummy_bundle:
        /* We found policies, but there's no bundles to instantiate:
         * either because the policy blocks, has no transformations or
         * we could not build template (no xfrm_states).*/
-       xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family);
+       xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
        if (IS_ERR(xdst)) {
                xfrm_pols_put(pols, num_pols);
                return ERR_CAST(xdst);
@@ -2104,13 +2114,18 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
        }
 
        if (xdst == NULL) {
+               struct xfrm_flo xflo;
+
+               xflo.dst_orig = dst_orig;
+               xflo.flags = flags;
+
                /* To accelerate a bit...  */
                if ((dst_orig->flags & DST_NOXFRM) ||
                    !net->xfrm.policy_count[XFRM_POLICY_OUT])
                        goto nopol;
 
                flo = flow_cache_lookup(net, fl, family, dir,
-                                       xfrm_bundle_lookup, dst_orig);
+                                       xfrm_bundle_lookup, &xflo);
                if (flo == NULL)
                        goto nopol;
                if (IS_ERR(flo)) {
@@ -2138,7 +2153,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
                        xfrm_pols_put(pols, drop_pols);
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
 
-                       return make_blackhole(net, family, dst_orig);
+                       return ERR_PTR(-EREMOTE);
                }
 
                err = -EAGAIN;
@@ -2195,6 +2210,23 @@ dropdst:
 }
 EXPORT_SYMBOL(xfrm_lookup);
 
+/* Callers of xfrm_lookup_route() must ensure a call to dst_output().
+ * Otherwise we may send out blackholed packets.
+ */
+struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
+                                   const struct flowi *fl,
+                                   struct sock *sk, int flags)
+{
+       struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
+                                           flags | XFRM_LOOKUP_QUEUE);
+
+       if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
+               return make_blackhole(net, dst_orig->ops->family, dst_orig);
+
+       return dst;
+}
+EXPORT_SYMBOL(xfrm_lookup_route);
+
 static inline int
 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
 {
@@ -2460,7 +2492,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
 
        skb_dst_force(skb);
 
-       dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
+       dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
        if (IS_ERR(dst)) {
                res = 0;
                dst = NULL;
index cbfd269..293828b 100755 (executable)
@@ -197,6 +197,9 @@ exuberant()
        --regex-c++='/SETPCGFLAG\(([^,)]*).*/SetPageCgroup\1/'          \
        --regex-c++='/CLEARPCGFLAG\(([^,)]*).*/ClearPageCgroup\1/'      \
        --regex-c++='/TESTCLEARPCGFLAG\(([^,)]*).*/TestClearPageCgroup\1/' \
+       --regex-c++='/TASK_PFA_TEST\([^,]*,\s*([^)]*)\)/task_\1/'       \
+       --regex-c++='/TASK_PFA_SET\([^,]*,\s*([^)]*)\)/task_set_\1/'    \
+       --regex-c++='/TASK_PFA_CLEAR\([^,]*,\s*([^)]*)\)/task_clear_\1/'\
        --regex-c='/PCI_OP_READ\((\w*).*[1-4]\)/pci_bus_read_config_\1/' \
        --regex-c='/PCI_OP_WRITE\((\w*).*[1-4]\)/pci_bus_write_config_\1/' \
        --regex-c='/DEFINE_(MUTEX|SEMAPHORE|SPINLOCK)\((\w*)/\2/v/'     \
@@ -260,6 +263,9 @@ emacs()
        --regex='/SETPCGFLAG\(([^,)]*).*/SetPageCgroup\1/'      \
        --regex='/CLEARPCGFLAG\(([^,)]*).*/ClearPageCgroup\1/'  \
        --regex='/TESTCLEARPCGFLAG\(([^,)]*).*/TestClearPageCgroup\1/' \
+       --regex='/TASK_PFA_TEST\([^,]*,\s*([^)]*)\)/task_\1/'           \
+       --regex='/TASK_PFA_SET\([^,]*,\s*([^)]*)\)/task_set_\1/'        \
+       --regex='/TASK_PFA_CLEAR\([^,]*,\s*([^)]*)\)/task_clear_\1/'    \
        --regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/'           \
        --regex='/PCI_OP_READ(\([a-z]*[a-z]\).*[1-4])/pci_bus_read_config_\1/' \
        --regex='/PCI_OP_WRITE(\([a-z]*[a-z]\).*[1-4])/pci_bus_write_config_\1/'\
index 9acc77e..0032278 100644 (file)
@@ -1782,14 +1782,16 @@ static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
 {
        struct snd_pcm_hw_params *params = arg;
        snd_pcm_format_t format;
-       int channels, width;
+       int channels;
+       ssize_t frame_size;
 
        params->fifo_size = substream->runtime->hw.fifo_size;
        if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
                format = params_format(params);
                channels = params_channels(params);
-               width = snd_pcm_format_physical_width(format);
-               params->fifo_size /= width * channels;
+               frame_size = snd_pcm_format_size(format, channels);
+               if (frame_size > 0)
+                       params->fifo_size /= (unsigned)frame_size;
        }
        return 0;
 }
index 6e5d0cb..47ccb8f 100644 (file)
@@ -777,6 +777,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
        { .id = CXT_PINCFG_LENOVO_TP410, .name = "tp410" },
        { .id = CXT_FIXUP_THINKPAD_ACPI, .name = "thinkpad" },
        { .id = CXT_PINCFG_LEMOTE_A1004, .name = "lemote-a1004" },
+       { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" },
        { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" },
        {}
 };
index f65fc09..b7a7c80 100644 (file)
@@ -100,15 +100,19 @@ static int control_put(struct snd_kcontrol *kcontrol,
        struct snd_usb_caiaqdev *cdev = caiaqdev(chip->card);
        int pos = kcontrol->private_value;
        int v = ucontrol->value.integer.value[0];
-       unsigned char cmd = EP1_CMD_WRITE_IO;
+       unsigned char cmd;
 
-       if (cdev->chip.usb_id ==
-               USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1))
-               cmd = EP1_CMD_DIMM_LEDS;
-
-       if (cdev->chip.usb_id ==
-               USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER))
+       switch (cdev->chip.usb_id) {
+       case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER):
+       case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1):
+       case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER2):
+       case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER):
                cmd = EP1_CMD_DIMM_LEDS;
+               break;
+       default:
+               cmd = EP1_CMD_WRITE_IO;
+               break;
+       }
 
        if (pos & CNT_INTVAL) {
                int i = pos & ~CNT_INTVAL;
index 01124ef..416baed 100644 (file)
@@ -71,7 +71,7 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
                                  struct vgic_lr lr_desc)
 {
        if (!(lr_desc.state & LR_STATE_MASK))
-               set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
+               __set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
 }
 
 static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
index 33712fb..95519bc 100644 (file)
@@ -110,7 +110,7 @@ static bool largepages_enabled = true;
 bool kvm_is_mmio_pfn(pfn_t pfn)
 {
        if (pfn_valid(pfn))
-               return PageReserved(pfn_to_page(pfn));
+               return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
 
        return true;
 }
@@ -1725,7 +1725,7 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target)
        rcu_read_lock();
        pid = rcu_dereference(target->pid);
        if (pid)
-               task = get_pid_task(target->pid, PIDTYPE_PID);
+               task = get_pid_task(pid, PIDTYPE_PID);
        rcu_read_unlock();
        if (!task)
                return ret;