Merge branch 'stable-3.2' into pandora-3.2
authorGrazvydas Ignotas <notasas@gmail.com>
Sat, 22 Sep 2012 21:32:18 +0000 (00:32 +0300)
committerGrazvydas Ignotas <notasas@gmail.com>
Sat, 22 Sep 2012 21:32:18 +0000 (00:32 +0300)
Conflicts:
drivers/mtd/ubi/vtbl.c

342 files changed:
Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
Documentation/i2c/busses/i2c-i801
Documentation/sound/alsa/HD-Audio-Models.txt
Documentation/stable_kernel_rules.txt
MAINTAINERS
Makefile
arch/alpha/include/asm/atomic.h
arch/alpha/include/asm/socket.h
arch/arm/Kconfig
arch/arm/Makefile
arch/arm/boot/dts/imx51-babbage.dts
arch/arm/configs/mxs_defconfig
arch/arm/include/asm/mutex.h
arch/arm/include/asm/pgtable.h
arch/arm/kernel/entry-armv.S
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/process.c
arch/arm/kernel/traps.c
arch/arm/mach-dove/common.c
arch/arm/mach-imx/hotplug.c
arch/arm/mach-kirkwood/common.c
arch/arm/mach-mv78xx0/common.c
arch/arm/mach-orion5x/common.c
arch/arm/mach-pxa/raumfeld.c
arch/arm/mm/flush.c
arch/arm/mm/tlb-v7.S
arch/arm/plat-omap/dmtimer.c
arch/arm/plat-orion/common.c
arch/arm/plat-orion/include/plat/common.h
arch/arm/plat-s3c24xx/dma.c
arch/arm/vfp/entry.S
arch/arm/vfp/vfphw.S
arch/arm/vfp/vfpmodule.c
arch/ia64/include/asm/atomic.h
arch/m68k/include/asm/entry.h
arch/m68k/kernel/sys_m68k.c
arch/parisc/include/asm/atomic.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/dbell.c
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/process.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/sysfs.c
arch/powerpc/kernel/traps.c
arch/powerpc/sysdev/xics/icp-hv.c
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/processor.h
arch/s390/kernel/compat_linux.c
arch/s390/kernel/compat_wrapper.S
arch/s390/mm/fault.c
arch/s390/mm/mmap.c
arch/s390/mm/pgtable.c
arch/x86/kernel/alternative.c
arch/x86/kvm/vmx.c
arch/x86/mm/hugetlbpage.c
arch/x86/xen/p2m.c
arch/x86/xen/setup.c
drivers/acpi/acpica/tbxface.c
drivers/ata/ahci.c
drivers/base/power/runtime.c
drivers/block/cciss_scsi.c
drivers/block/floppy.c
drivers/block/virtio_blk.c
drivers/char/mspec.c
drivers/char/random.c
drivers/firmware/dmi_scan.c
drivers/firmware/pcdp.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nvd0_display.c
drivers/gpu/drm/radeon/atombios.h
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/hid/Kconfig
drivers/hid/hid-chicony.c
drivers/hid/hid-core.c
drivers/hid/hid-cypress.c
drivers/hid/hid-ids.h
drivers/hid/hid-multitouch.c
drivers/hid/usbhid/hid-quirks.c
drivers/hwmon/asus_atk0110.c
drivers/hwmon/twl4030-madc-hwmon.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/Makefile
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-i801.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/input/mouse/synaptics.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/touchscreen/eeti_ts.c
drivers/isdn/isdnloop/isdnloop.c
drivers/md/dm-thin.c
drivers/md/md.c
drivers/md/raid1.c
drivers/media/dvb/siano/smsusb.c
drivers/media/rc/ene_ir.c
drivers/media/video/gspca/spca506.c
drivers/mfd/ab3100-core.c
drivers/mfd/ezx-pcap.c
drivers/mfd/wm831x-otp.c
drivers/misc/sgi-xp/xpc_uv.c
drivers/mmc/card/block.c
drivers/mmc/host/mxs-mmc.c
drivers/mmc/host/sdhci-esdhc.h
drivers/mtd/ubi/vtbl.c
drivers/net/caif/caif_serial.c
drivers/net/can/mcp251x.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/sfc/tx.c
drivers/net/netconsole.c
drivers/net/ppp/pptp.c
drivers/net/tun.c
drivers/net/usb/kaweth.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/iwlwifi/iwl-agn-rs.c
drivers/net/wireless/iwlwifi/iwl-debugfs.c
drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
drivers/net/wireless/p54/p54usb.c
drivers/net/wireless/rt2x00/rt2400pci.c
drivers/net/wireless/rt2x00/rt2400pci.h
drivers/net/wireless/rt2x00/rt2500pci.c
drivers/net/wireless/rt2x00/rt2500usb.c
drivers/net/wireless/rt2x00/rt2500usb.h
drivers/net/wireless/rt2x00/rt2800pci.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rt2x00/rt61pci.c
drivers/net/wireless/rt2x00/rt61pci.h
drivers/net/wireless/rt2x00/rt73usb.c
drivers/net/wireless/rt2x00/rt73usb.h
drivers/net/wireless/rtl818x/rtl8187/dev.c
drivers/net/wireless/rtlwifi/usb.c
drivers/net/wireless/rtlwifi/wifi.h
drivers/pci/pci-driver.c
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/asus-wmi.c
drivers/rapidio/devices/tsi721.c
drivers/rtc/rtc-rs5c348.c
drivers/rtc/rtc-wm831x.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mpt2sas/mpt2sas_base.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_scan.c
drivers/staging/comedi/drivers/das08.c
drivers/staging/media/lirc/lirc_sir.c
drivers/staging/rtl8712/recv_linux.c
drivers/staging/speakup/main.c
drivers/staging/vt6656/dpc.c
drivers/staging/vt6656/main_usb.c
drivers/staging/vt6656/rxtx.c
drivers/staging/winbond/wbusb.c
drivers/staging/zcache/zcache-main.c
drivers/target/target_core_transport.c
drivers/tty/serial/imx.c
drivers/tty/serial/pch_uart.c
drivers/tty/serial/pmac_zilog.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/early/ehci-dbgp.c
drivers/usb/gadget/u_ether.c
drivers/usb/host/ehci-q.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/pci-quirks.h
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/emi62.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/mos7840.c
drivers/usb/serial/option.c
drivers/video/console/fbcon.c
drivers/video/smscufx.c
drivers/xen/swiotlb-xen.c
fs/buffer.c
fs/cifs/file.c
fs/compat.c
fs/ecryptfs/inode.c
fs/exofs/ore.c
fs/ext3/inode.c
fs/ext4/super.c
fs/fuse/dev.c
fs/fuse/file.c
fs/hfsplus/wrapper.c
fs/nfs/blocklayout/blocklayout.c
fs/nfs/blocklayout/extents.c
fs/nfs/dir.c
fs/nfs/file.c
fs/nfs/inode.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4filelayoutdev.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4xdr.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfs/super.c
fs/nfs/write.c
fs/nfsd/nfs4xdr.c
fs/nilfs2/ioctl.c
fs/nilfs2/super.c
fs/nilfs2/the_nilfs.c
fs/nilfs2/the_nilfs.h
fs/open.c
fs/squashfs/super.c
fs/stat.c
fs/udf/file.c
include/asm-generic/mutex-xchg.h
include/drm/drm_mode.h
include/linux/hugetlb.h
include/linux/init_task.h
include/linux/input/eeti_ts.h
include/linux/kobject.h
include/linux/ktime.h
include/linux/mfd/ezx-pcap.h
include/linux/mmc/card.h
include/linux/mv643xx_eth.h
include/linux/netdevice.h
include/linux/nfs_fs.h
include/linux/pci_ids.h
include/linux/perf_event.h
include/linux/random.h
include/linux/sched.h
include/linux/sunrpc/xprt.h
include/linux/time.h
include/linux/usb.h
include/net/scm.h
include/net/sock.h
kernel/audit_tree.c
kernel/events/core.c
kernel/futex.c
kernel/irq/handle.c
kernel/sched.c
kernel/time/timekeeping.c
kernel/workqueue.c
lib/vsprintf.c
mm/hugetlb.c
mm/internal.h
mm/memory.c
mm/mempolicy.c
mm/mmu_notifier.c
mm/page_alloc.c
mm/sparse.c
mm/vmscan.c
net/atm/common.c
net/atm/pvc.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sock.c
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/rfcomm/tty.c
net/caif/caif_dev.c
net/core/dev.c
net/core/drop_monitor.c
net/core/rtnetlink.c
net/core/sock.c
net/dccp/ccid.h
net/dccp/ccids/ccid3.c
net/ipv4/cipso_ipv4.c
net/ipv4/ipmr.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv6/addrconf.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/llc/af_llc.c
net/mac80211/mesh.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netlink/af_netlink.c
net/packet/af_packet.c
net/sched/act_gact.c
net/sched/sch_sfb.c
net/sctp/input.c
net/sctp/socket.c
net/socket.c
net/sunrpc/rpcb_clnt.c
net/sunrpc/sched.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcsock.c
net/sunrpc/xprt.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtsock.c
net/unix/af_unix.c
net/wanrouter/wanmain.c
net/wireless/core.c
net/wireless/core.h
net/wireless/util.c
sound/drivers/mpu401/mpu401_uart.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_proc.c
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_via.c
sound/pci/ice1712/prodigy_hifi.c
sound/soc/codecs/wm8962.c
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm9712.c
sound/usb/clock.c

index ab22fe6..e39a0c0 100644 (file)
@@ -10,8 +10,8 @@ Required properties:
 
 Optional properties:
 - fsl,card-wired : Indicate the card is wired to host permanently
-- fsl,cd-internal : Indicate to use controller internal card detection
-- fsl,wp-internal : Indicate to use controller internal write protection
+- fsl,cd-controller : Indicate to use controller internal card detection
+- fsl,wp-controller : Indicate to use controller internal write protection
 - cd-gpios : Specify GPIOs for card detection
 - wp-gpios : Specify GPIOs for write protection
 
@@ -21,8 +21,8 @@ esdhc@70004000 {
        compatible = "fsl,imx51-esdhc";
        reg = <0x70004000 0x4000>;
        interrupts = <1>;
-       fsl,cd-internal;
-       fsl,wp-internal;
+       fsl,cd-controller;
+       fsl,wp-controller;
 };
 
 esdhc@70008000 {
index 2871fd5..99d4e44 100644 (file)
@@ -20,6 +20,8 @@ Supported adapters:
   * Intel Patsburg (PCH)
   * Intel DH89xxCC (PCH)
   * Intel Panther Point (PCH)
+  * Intel Lynx Point (PCH)
+  * Intel Lynx Point-LP (PCH)
    Datasheets: Publicly available at the Intel website
 
 On Intel Patsburg and later chipsets, both the normal host SMBus controller
index edad99a..69820b2 100644 (file)
@@ -60,10 +60,11 @@ ALC267/268
 ==========
   N/A
 
-ALC269
+ALC269/270/275/276/280/282
 ======
   laptop-amic  Laptops with analog-mic input
   laptop-dmic  Laptops with digital-mic input
+  lenovo-dock   Enables docking station I/O for some Lenovos
 
 ALC662/663/272
 ==============
index e1f856b..22bf11b 100644 (file)
@@ -1,4 +1,4 @@
-Everything you ever wanted to know about Linux 2.6 -stable releases.
+Everything you ever wanted to know about Linux -stable releases.
 
 Rules on what kind of patches are accepted, and which ones are not, into the
 "-stable" tree:
@@ -41,10 +41,10 @@ Procedure for submitting patches to the -stable tree:
    cherry-picked than this can be specified in the following format in
    the sign-off area:
 
-     Cc: <stable@vger.kernel.org> # .32.x: a1f84a3: sched: Check for idle
-     Cc: <stable@vger.kernel.org> # .32.x: 1b9508f: sched: Rate-limit newidle
-     Cc: <stable@vger.kernel.org> # .32.x: fd21073: sched: Fix affinity logic
-     Cc: <stable@vger.kernel.org> # .32.x
+     Cc: <stable@vger.kernel.org> # 3.3.x: a1f84a3: sched: Check for idle
+     Cc: <stable@vger.kernel.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
+     Cc: <stable@vger.kernel.org> # 3.3.x: fd21073: sched: Fix affinity logic
+     Cc: <stable@vger.kernel.org> # 3.3.x
     Signed-off-by: Ingo Molnar <mingo@elte.hu>
 
    The tag sequence has the meaning of:
@@ -78,6 +78,15 @@ Review cycle:
    security kernel team, and not go through the normal review cycle.
    Contact the kernel security team for more details on this procedure.
 
+Trees:
+
+ - The queues of patches, for both completed versions and in progress
+   versions can be found at:
+       http://git.kernel.org/?p=linux/kernel/git/stable/stable-queue.git
+ - The finalized and tagged releases of all stable kernels can be found
+   in separate branches per version at:
+       http://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git
+
 
 Review committee:
 
index f986e7d..82d7fa6 100644 (file)
@@ -5452,7 +5452,7 @@ F:        Documentation/blockdev/ramdisk.txt
 F:     drivers/block/brd.c
 
 RANDOM NUMBER DRIVER
-M:     Matt Mackall <mpm@selenic.com>
+M:     Theodore Ts'o" <tytso@mit.edu>
 S:     Maintained
 F:     drivers/char/random.c
 
index fa5acc8..9fd7e60 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 3
 PATCHLEVEL = 2
-SUBLEVEL = 26
+SUBLEVEL = 30
 EXTRAVERSION =
 NAME = Saber-toothed Squirrel
 
index 640f909..6f1aca7 100644 (file)
@@ -14,8 +14,8 @@
  */
 
 
-#define ATOMIC_INIT(i)         ( (atomic_t) { (i) } )
-#define ATOMIC64_INIT(i)       ( (atomic64_t) { (i) } )
+#define ATOMIC_INIT(i)         { (i) }
+#define ATOMIC64_INIT(i)       { (i) }
 
 #define atomic_read(v)         (*(volatile int *)&(v)->counter)
 #define atomic64_read(v)       (*(volatile long *)&(v)->counter)
index 06edfef..3eeb47c 100644 (file)
 
 #define SO_RXQ_OVFL             40
 
+#ifdef __KERNEL__
 /* O_NONBLOCK clashes with the bits used for socket types.  Therefore we
  * have to define SOCK_NONBLOCK to a different value here.
  */
 #define SOCK_NONBLOCK  0x40000000
+#endif /* __KERNEL__ */
 
 #endif /* _ASM_SOCKET_H */
index 7868714..9a9f971 100644 (file)
@@ -2070,6 +2070,7 @@ source "drivers/cpufreq/Kconfig"
 config CPU_FREQ_IMX
        tristate "CPUfreq driver for i.MX CPUs"
        depends on ARCH_MXC && CPU_FREQ
+       select CPU_FREQ_TABLE
        help
          This enables the CPUfreq driver for i.MX CPUs.
 
index dfcf3b0..362c7ca 100644 (file)
@@ -284,10 +284,10 @@ zImage Image xipImage bootpImage uImage: vmlinux
 zinstall uinstall install: vmlinux
        $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
 
-%.dtb:
+%.dtb: scripts
        $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 
-dtbs:
+dtbs: scripts
        $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 
 # We use MRPROPER_FILES and CLEAN_FILES now
index f8766af..4790df2 100644 (file)
@@ -29,8 +29,8 @@
                aips@70000000 { /* aips-1 */
                        spba@70000000 {
                                esdhc@70004000 { /* ESDHC1 */
-                                       fsl,cd-internal;
-                                       fsl,wp-internal;
+                                       fsl,cd-controller;
+                                       fsl,wp-controller;
                                        status = "okay";
                                };
 
index 6ee781b..3ee3e84 100644 (file)
@@ -32,7 +32,6 @@ CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_PREEMPT_VOLUNTARY=y
 CONFIG_AEABI=y
-CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
 CONFIG_AUTO_ZRELADDR=y
 CONFIG_FPE_NWFPE=y
 CONFIG_NET=y
index 93226cf..b1479fd 100644 (file)
  */
 #ifndef _ASM_MUTEX_H
 #define _ASM_MUTEX_H
-
-#if __LINUX_ARM_ARCH__ < 6
-/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
-# include <asm-generic/mutex-xchg.h>
-#else
-
 /*
- * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
- * atomic decrement (it is not a reliable atomic decrement but it satisfies
- * the defined semantics for our purpose, while being smaller and faster
- * than a real atomic decrement or atomic swap.  The idea is to attempt
- * decrementing the lock value only once.  If once decremented it isn't zero,
- * or if its store-back fails due to a dispute on the exclusive store, we
- * simply bail out immediately through the slow path where the lock will be
- * reattempted until it succeeds.
+ * On pre-ARMv6 hardware this results in a swp-based implementation,
+ * which is the most efficient. For ARMv6+, we emit a pair of exclusive
+ * accesses instead.
  */
-static inline void
-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
-       int __ex_flag, __res;
-
-       __asm__ (
-
-               "ldrex  %0, [%2]        \n\t"
-               "sub    %0, %0, #1      \n\t"
-               "strex  %1, %0, [%2]    "
-
-               : "=&r" (__res), "=&r" (__ex_flag)
-               : "r" (&(count)->counter)
-               : "cc","memory" );
-
-       __res |= __ex_flag;
-       if (unlikely(__res != 0))
-               fail_fn(count);
-}
-
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
-       int __ex_flag, __res;
-
-       __asm__ (
-
-               "ldrex  %0, [%2]        \n\t"
-               "sub    %0, %0, #1      \n\t"
-               "strex  %1, %0, [%2]    "
-
-               : "=&r" (__res), "=&r" (__ex_flag)
-               : "r" (&(count)->counter)
-               : "cc","memory" );
-
-       __res |= __ex_flag;
-       if (unlikely(__res != 0))
-               __res = fail_fn(count);
-       return __res;
-}
-
-/*
- * Same trick is used for the unlock fast path. However the original value,
- * rather than the result, is used to test for success in order to have
- * better generated assembly.
- */
-static inline void
-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
-       int __ex_flag, __res, __orig;
-
-       __asm__ (
-
-               "ldrex  %0, [%3]        \n\t"
-               "add    %1, %0, #1      \n\t"
-               "strex  %2, %1, [%3]    "
-
-               : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
-               : "r" (&(count)->counter)
-               : "cc","memory" );
-
-       __orig |= __ex_flag;
-       if (unlikely(__orig != 0))
-               fail_fn(count);
-}
-
-/*
- * If the unlock was done on a contended lock, or if the unlock simply fails
- * then the mutex remains locked.
- */
-#define __mutex_slowpath_needs_to_unlock()     1
-
-/*
- * For __mutex_fastpath_trylock we use another construct which could be
- * described as a "single value cmpxchg".
- *
- * This provides the needed trylock semantics like cmpxchg would, but it is
- * lighter and less generic than a true cmpxchg implementation.
- */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
-       int __ex_flag, __res, __orig;
-
-       __asm__ (
-
-               "1: ldrex       %0, [%3]        \n\t"
-               "subs           %1, %0, #1      \n\t"
-               "strexeq        %2, %1, [%3]    \n\t"
-               "movlt          %0, #0          \n\t"
-               "cmpeq          %2, #0          \n\t"
-               "bgt            1b              "
-
-               : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
-               : "r" (&count->counter)
-               : "cc", "memory" );
-
-       return __orig;
-}
-
-#endif
+#include <asm-generic/mutex-xchg.h>
 #endif
index afe31a3..97d705f 100644 (file)
@@ -235,6 +235,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
 #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
 #define pte_clear(mm,addr,ptep)        set_pte_ext(ptep, __pte(0), 0)
 
+#define pte_none(pte)          (!pte_val(pte))
+#define pte_present(pte)       (pte_val(pte) & L_PTE_PRESENT)
+#define pte_write(pte)         (!(pte_val(pte) & L_PTE_RDONLY))
+#define pte_dirty(pte)         (pte_val(pte) & L_PTE_DIRTY)
+#define pte_young(pte)         (pte_val(pte) & L_PTE_YOUNG)
+#define pte_exec(pte)          (!(pte_val(pte) & L_PTE_XN))
+#define pte_special(pte)       (0)
+
+#define pte_present_user(pte) \
+       ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
+        (L_PTE_PRESENT | L_PTE_USER))
+
 #if __LINUX_ARM_ARCH__ < 6
 static inline void __sync_icache_dcache(pte_t pteval)
 {
@@ -246,25 +258,15 @@ extern void __sync_icache_dcache(pte_t pteval);
 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
                              pte_t *ptep, pte_t pteval)
 {
-       if (addr >= TASK_SIZE)
-               set_pte_ext(ptep, pteval, 0);
-       else {
+       unsigned long ext = 0;
+
+       if (addr < TASK_SIZE && pte_present_user(pteval)) {
                __sync_icache_dcache(pteval);
-               set_pte_ext(ptep, pteval, PTE_EXT_NG);
+               ext |= PTE_EXT_NG;
        }
-}
 
-#define pte_none(pte)          (!pte_val(pte))
-#define pte_present(pte)       (pte_val(pte) & L_PTE_PRESENT)
-#define pte_write(pte)         (!(pte_val(pte) & L_PTE_RDONLY))
-#define pte_dirty(pte)         (pte_val(pte) & L_PTE_DIRTY)
-#define pte_young(pte)         (pte_val(pte) & L_PTE_YOUNG)
-#define pte_exec(pte)          (!(pte_val(pte) & L_PTE_XN))
-#define pte_special(pte)       (0)
-
-#define pte_present_user(pte) \
-       ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
-        (L_PTE_PRESENT | L_PTE_USER))
+       set_pte_ext(ptep, pteval, ext);
+}
 
 #define PTE_BIT_FUNC(fn,op) \
 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
@@ -291,13 +293,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  *
  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- *   <--------------- offset --------------------> <- type --> 0 0 0
+ *   <--------------- offset ----------------------> < type -> 0 0 0
  *
- * This gives us up to 63 swap files and 32GB per swap file.  Note that
+ * This gives us up to 31 swap files and 64GB per swap file.  Note that
  * the offset field is always non-zero.
  */
 #define __SWP_TYPE_SHIFT       3
-#define __SWP_TYPE_BITS                6
+#define __SWP_TYPE_BITS                5
 #define __SWP_TYPE_MASK                ((1 << __SWP_TYPE_BITS) - 1)
 #define __SWP_OFFSET_SHIFT     (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
 
index b145f16..ece0996 100644 (file)
@@ -242,6 +242,19 @@ svc_preempt:
        b       1b
 #endif
 
+__und_fault:
+       @ Correct the PC such that it is pointing at the instruction
+       @ which caused the fault.  If the faulting instruction was ARM
+       @ the PC will be pointing at the next instruction, and have to
+       @ subtract 4.  Otherwise, it is Thumb, and the PC will be
+       @ pointing at the second half of the Thumb instruction.  We
+       @ have to subtract 2.
+       ldr     r2, [r0, #S_PC]
+       sub     r2, r2, r1
+       str     r2, [r0, #S_PC]
+       b       do_undefinstr
+ENDPROC(__und_fault)
+
        .align  5
 __und_svc:
 #ifdef CONFIG_KPROBES
@@ -259,25 +272,32 @@ __und_svc:
        @
        @  r0 - instruction
        @
-#ifndef        CONFIG_THUMB2_KERNEL
+#ifndef CONFIG_THUMB2_KERNEL
        ldr     r0, [r4, #-4]
 #else
+       mov     r1, #2
        ldrh    r0, [r4, #-2]                   @ Thumb instruction at LR - 2
        cmp     r0, #0xe800                     @ 32-bit instruction if xx >= 0
-       ldrhhs  r9, [r4]                        @ bottom 16 bits
-       orrhs   r0, r9, r0, lsl #16
+       blo     __und_svc_fault
+       ldrh    r9, [r4]                        @ bottom 16 bits
+       add     r4, r4, #2
+       str     r4, [sp, #S_PC]
+       orr     r0, r9, r0, lsl #16
 #endif
-       adr     r9, BSYM(1f)
+       adr     r9, BSYM(__und_svc_finish)
        mov     r2, r4
        bl      call_fpe
 
+       mov     r1, #4                          @ PC correction to apply
+__und_svc_fault:
        mov     r0, sp                          @ struct pt_regs *regs
-       bl      do_undefinstr
+       bl      __und_fault
 
        @
        @ IRQs off again before pulling preserved data off the stack
        @
-1:     disable_irq_notrace
+__und_svc_finish:
+       disable_irq_notrace
 
        @
        @ restore SPSR and restart the instruction
@@ -421,25 +441,33 @@ __und_usr:
        mov     r2, r4
        mov     r3, r5
 
+       @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
+       @      faulting instruction depending on Thumb mode.
+       @ r3 = regs->ARM_cpsr
        @
-       @ fall through to the emulation code, which returns using r9 if
-       @ it has emulated the instruction, or the more conventional lr
-       @ if we are to treat this as a real undefined instruction
-       @
-       @  r0 - instruction
+       @ The emulation code returns using r9 if it has emulated the
+       @ instruction, or the more conventional lr if we are to treat
+       @ this as a real undefined instruction
        @
        adr     r9, BSYM(ret_from_exception)
-       adr     lr, BSYM(__und_usr_unknown)
+
        tst     r3, #PSR_T_BIT                  @ Thumb mode?
-       itet    eq                              @ explicit IT needed for the 1f label
-       subeq   r4, r2, #4                      @ ARM instr at LR - 4
-       subne   r4, r2, #2                      @ Thumb instr at LR - 2
-1:     ldreqt  r0, [r4]
+       bne     __und_usr_thumb
+       sub     r4, r2, #4                      @ ARM instr at LR - 4
+1:     ldrt    r0, [r4]
 #ifdef CONFIG_CPU_ENDIAN_BE8
-       reveq   r0, r0                          @ little endian instruction
+       rev     r0, r0                          @ little endian instruction
 #endif
-       beq     call_fpe
+       @ r0 = 32-bit ARM instruction which caused the exception
+       @ r2 = PC value for the following instruction (:= regs->ARM_pc)
+       @ r4 = PC value for the faulting instruction
+       @ lr = 32-bit undefined instruction function
+       adr     lr, BSYM(__und_usr_fault_32)
+       b       call_fpe
+
+__und_usr_thumb:
        @ Thumb instruction
+       sub     r4, r2, #2                      @ First half of thumb instr at LR - 2
 #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
 /*
  * Thumb-2 instruction handling.  Note that because pre-v6 and >= v6 platforms
@@ -453,7 +481,7 @@ __und_usr:
        ldr     r5, .LCcpu_architecture
        ldr     r5, [r5]
        cmp     r5, #CPU_ARCH_ARMv7
-       blo     __und_usr_unknown
+       blo     __und_usr_fault_16              @ 16bit undefined instruction
 /*
  * The following code won't get run unless the running CPU really is v7, so
  * coding round the lack of ldrht on older arches is pointless.  Temporarily
@@ -461,15 +489,18 @@ __und_usr:
  */
        .arch   armv6t2
 #endif
-2:
- ARM(  ldrht   r5, [r4], #2    )
- THUMB(        ldrht   r5, [r4]        )
- THUMB(        add     r4, r4, #2      )
+2:     ldrht   r5, [r4]
        cmp     r5, #0xe800                     @ 32bit instruction if xx != 0
-       blo     __und_usr_unknown
-3:     ldrht   r0, [r4]
+       blo     __und_usr_fault_16              @ 16bit undefined instruction
+3:     ldrht   r0, [r2]
        add     r2, r2, #2                      @ r2 is PC + 2, make it PC + 4
+       str     r2, [sp, #S_PC]                 @ it's a 2x16bit instr, update
        orr     r0, r0, r5, lsl #16
+       adr     lr, BSYM(__und_usr_fault_32)
+       @ r0 = the two 16-bit Thumb instructions which caused the exception
+       @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
+       @ r4 = PC value for the first 16-bit Thumb instruction
+       @ lr = 32bit undefined instruction function
 
 #if __LINUX_ARM_ARCH__ < 7
 /* If the target arch was overridden, change it back: */
@@ -480,17 +511,13 @@ __und_usr:
 #endif
 #endif /* __LINUX_ARM_ARCH__ < 7 */
 #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
-       b       __und_usr_unknown
+       b       __und_usr_fault_16
 #endif
- UNWIND(.fnend         )
+ UNWIND(.fnend)
 ENDPROC(__und_usr)
 
-       @
-       @ fallthrough to call_fpe
-       @
-
 /*
- * The out of line fixup for the ldrt above.
+ * The out of line fixup for the ldrt instructions above.
  */
        .pushsection .fixup, "ax"
 4:     mov     pc, r9
@@ -521,11 +548,12 @@ ENDPROC(__und_usr)
  * NEON handler code.
  *
  * Emulators may wish to make use of the following registers:
- *  r0  = instruction opcode.
- *  r2  = PC+4
+ *  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb)
+ *  r2  = PC value to resume execution after successful emulation
  *  r9  = normal "successful" return address
- *  r10 = this threads thread_info structure.
+ *  r10 = this threads thread_info structure
  *  lr  = unrecognised instruction return address
+ * IRQs disabled, FIQs enabled.
  */
        @
        @ Fall-through from Thumb-2 __und_usr
@@ -660,12 +688,17 @@ ENTRY(no_fp)
        mov     pc, lr
 ENDPROC(no_fp)
 
-__und_usr_unknown:
-       enable_irq
+__und_usr_fault_32:
+       mov     r1, #4
+       b       1f
+__und_usr_fault_16:
+       mov     r1, #2
+1:     enable_irq
        mov     r0, sp
        adr     lr, BSYM(ret_from_exception)
-       b       do_undefinstr
-ENDPROC(__und_usr_unknown)
+       b       __und_fault
+ENDPROC(__und_usr_fault_32)
+ENDPROC(__und_usr_fault_16)
 
        .align  5
 __pabt_usr:
index 814a52a..2bc1a8e 100644 (file)
@@ -160,6 +160,12 @@ static int debug_arch_supported(void)
                arch >= ARM_DEBUG_ARCH_V7_1;
 }
 
+/* Can we determine the watchpoint access type from the fsr? */
+static int debug_exception_updates_fsr(void)
+{
+       return 0;
+}
+
 /* Determine number of WRP registers available. */
 static int get_num_wrp_resources(void)
 {
@@ -620,18 +626,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
        info->address &= ~alignment_mask;
        info->ctrl.len <<= offset;
 
-       /*
-        * Currently we rely on an overflow handler to take
-        * care of single-stepping the breakpoint when it fires.
-        * In the case of userspace breakpoints on a core with V7 debug,
-        * we can use the mismatch feature as a poor-man's hardware
-        * single-step, but this only works for per-task breakpoints.
-        */
-       if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) ||
-           !core_has_mismatch_brps() || !bp->hw.bp_target)) {
-               pr_warning("overflow handler required but none found\n");
-               ret = -EINVAL;
+       if (!bp->overflow_handler) {
+               /*
+                * Mismatch breakpoints are required for single-stepping
+                * breakpoints.
+                */
+               if (!core_has_mismatch_brps())
+                       return -EINVAL;
+
+               /* We don't allow mismatch breakpoints in kernel space. */
+               if (arch_check_bp_in_kernelspace(bp))
+                       return -EPERM;
+
+               /*
+                * Per-cpu breakpoints are not supported by our stepping
+                * mechanism.
+                */
+               if (!bp->hw.bp_target)
+                       return -EINVAL;
+
+               /*
+                * We only support specific access types if the fsr
+                * reports them.
+                */
+               if (!debug_exception_updates_fsr() &&
+                   (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
+                    info->ctrl.type == ARM_BREAKPOINT_STORE))
+                       return -EINVAL;
        }
+
 out:
        return ret;
 }
@@ -707,10 +730,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
                                goto unlock;
 
                        /* Check that the access type matches. */
-                       access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W :
-                                HW_BREAKPOINT_R;
-                       if (!(access & hw_breakpoint_type(wp)))
-                               goto unlock;
+                       if (debug_exception_updates_fsr()) {
+                               access = (fsr & ARM_FSR_ACCESS_MASK) ?
+                                         HW_BREAKPOINT_W : HW_BREAKPOINT_R;
+                               if (!(access & hw_breakpoint_type(wp)))
+                                       goto unlock;
+                       }
 
                        /* We have a winner. */
                        info->trigger = addr;
index 3d0c6fb..e68d251 100644 (file)
@@ -125,6 +125,7 @@ void arm_machine_restart(char mode, const char *cmd)
         */
        mdelay(1000);
        printk("Reboot failed -- System halted\n");
+       local_irq_disable();
        while (1);
 }
 
@@ -240,6 +241,7 @@ void machine_shutdown(void)
 void machine_halt(void)
 {
        machine_shutdown();
+       local_irq_disable();
        while (1);
 }
 
index 160cb16..7ac5dfd 100644 (file)
@@ -362,18 +362,10 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
 
 asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
 {
-       unsigned int correction = thumb_mode(regs) ? 2 : 4;
        unsigned int instr;
        siginfo_t info;
        void __user *pc;
 
-       /*
-        * According to the ARM ARM, PC is 2 or 4 bytes ahead,
-        * depending whether we're in Thumb mode or not.
-        * Correct this offset.
-        */
-       regs->ARM_pc -= correction;
-
        pc = (void __user *)instruction_pointer(regs);
 
        if (processor_mode(regs) == SVC_MODE) {
@@ -388,20 +380,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
 #endif
                        instr = *(u32 *) pc;
        } else if (thumb_mode(regs)) {
-               get_user(instr, (u16 __user *)pc);
+               if (get_user(instr, (u16 __user *)pc))
+                       goto die_sig;
                if (is_wide_instruction(instr)) {
                        unsigned int instr2;
-                       get_user(instr2, (u16 __user *)pc+1);
+                       if (get_user(instr2, (u16 __user *)pc+1))
+                               goto die_sig;
                        instr <<= 16;
                        instr |= instr2;
                }
-       } else {
-               get_user(instr, (u32 __user *)pc);
+       } else if (get_user(instr, (u32 __user *)pc)) {
+               goto die_sig;
        }
 
        if (call_undef_hook(regs, instr) == 0)
                return;
 
+die_sig:
 #ifdef CONFIG_DEBUG_USER
        if (user_debug & UDBG_UNDEFINED) {
                printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
index 1620b15..cb105bf 100644 (file)
@@ -92,7 +92,7 @@ void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data)
 {
        orion_ge00_init(eth_data, &dove_mbus_dram_info,
                        DOVE_GE00_PHYS_BASE, IRQ_DOVE_GE00_SUM,
-                       0, get_tclk());
+                       0, get_tclk(), 1600);
 }
 
 /*****************************************************************************
index 20ed2d5..f8f7437 100644 (file)
@@ -42,22 +42,6 @@ static inline void cpu_enter_lowpower(void)
          : "cc");
 }
 
-static inline void cpu_leave_lowpower(void)
-{
-       unsigned int v;
-
-       asm volatile(
-               "mrc    p15, 0, %0, c1, c0, 0\n"
-       "       orr     %0, %0, %1\n"
-       "       mcr     p15, 0, %0, c1, c0, 0\n"
-       "       mrc     p15, 0, %0, c1, c0, 1\n"
-       "       orr     %0, %0, %2\n"
-       "       mcr     p15, 0, %0, c1, c0, 1\n"
-         : "=&r" (v)
-         : "Ir" (CR_C), "Ir" (0x40)
-         : "cc");
-}
-
 /*
  * platform-specific code to shutdown a CPU
  *
@@ -67,11 +51,10 @@ void platform_cpu_die(unsigned int cpu)
 {
        cpu_enter_lowpower();
        imx_enable_cpu(cpu, false);
-       cpu_do_idle();
-       cpu_leave_lowpower();
 
-       /* We should never return from idle */
-       panic("cpu %d unexpectedly exit from shutdown\n", cpu);
+       /* spin here until hardware takes it down */
+       while (1)
+               ;
 }
 
 int platform_cpu_disable(unsigned int cpu)
index c5dbbb3..06faa97 100644 (file)
@@ -88,7 +88,7 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
 
        orion_ge00_init(eth_data, &kirkwood_mbus_dram_info,
                        GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM,
-                       IRQ_KIRKWOOD_GE00_ERR, kirkwood_tclk);
+                       IRQ_KIRKWOOD_GE00_ERR, kirkwood_tclk, 1600);
 }
 
 
@@ -102,7 +102,7 @@ void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
 
        orion_ge01_init(eth_data, &kirkwood_mbus_dram_info,
                        GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM,
-                       IRQ_KIRKWOOD_GE01_ERR, kirkwood_tclk);
+                       IRQ_KIRKWOOD_GE01_ERR, kirkwood_tclk, 1600);
 }
 
 
index d90e244..570ee4d 100644 (file)
@@ -202,7 +202,8 @@ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data)
 {
        orion_ge00_init(eth_data, &mv78xx0_mbus_dram_info,
                        GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM,
-                       IRQ_MV78XX0_GE_ERR, get_tclk());
+                       IRQ_MV78XX0_GE_ERR, get_tclk(),
+                       MV643XX_TX_CSUM_DEFAULT_LIMIT);
 }
 
 
@@ -213,7 +214,8 @@ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
 {
        orion_ge01_init(eth_data, &mv78xx0_mbus_dram_info,
                        GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM,
-                       NO_IRQ, get_tclk());
+                       NO_IRQ, get_tclk(),
+                       MV643XX_TX_CSUM_DEFAULT_LIMIT);
 }
 
 
index 53b68b8..20260db 100644 (file)
@@ -95,7 +95,8 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)
 {
        orion_ge00_init(eth_data, &orion5x_mbus_dram_info,
                        ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM,
-                       IRQ_ORION5X_ETH_ERR, orion5x_tclk);
+                       IRQ_ORION5X_ETH_ERR, orion5x_tclk,
+                       MV643XX_TX_CSUM_DEFAULT_LIMIT);
 }
 
 
index f0c05f4..ae7786d 100644 (file)
@@ -951,12 +951,12 @@ static struct i2c_board_info raumfeld_connector_i2c_board_info __initdata = {
 
 static struct eeti_ts_platform_data eeti_ts_pdata = {
        .irq_active_high = 1,
+       .irq_gpio = GPIO_TOUCH_IRQ,
 };
 
 static struct i2c_board_info raumfeld_controller_i2c_board_info __initdata = {
        .type   = "eeti_ts",
        .addr   = 0x0a,
-       .irq    = gpio_to_irq(GPIO_TOUCH_IRQ),
        .platform_data = &eeti_ts_pdata,
 };
 
index 1a8d4aa..8fda9f7 100644 (file)
@@ -236,8 +236,6 @@ void __sync_icache_dcache(pte_t pteval)
        struct page *page;
        struct address_space *mapping;
 
-       if (!pte_present_user(pteval))
-               return;
        if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
                /* only flush non-aliasing VIPT caches for exec mappings */
                return;
index 845f461..ea94765 100644 (file)
@@ -39,10 +39,18 @@ ENTRY(v7wbi_flush_user_tlb_range)
        mov     r0, r0, lsr #PAGE_SHIFT         @ align address
        mov     r1, r1, lsr #PAGE_SHIFT
        asid    r3, r3                          @ mask ASID
+#ifdef CONFIG_ARM_ERRATA_720789
+       ALT_SMP(W(mov)  r3, #0  )
+       ALT_UP(W(nop)           )
+#endif
        orr     r0, r3, r0, lsl #PAGE_SHIFT     @ Create initial MVA
        mov     r1, r1, lsl #PAGE_SHIFT
 1:
+#ifdef CONFIG_ARM_ERRATA_720789
+       ALT_SMP(mcr     p15, 0, r0, c8, c3, 3)  @ TLB invalidate U MVA all ASID (shareable)
+#else
        ALT_SMP(mcr     p15, 0, r0, c8, c3, 1)  @ TLB invalidate U MVA (shareable)
+#endif
        ALT_UP(mcr      p15, 0, r0, c8, c7, 1)  @ TLB invalidate U MVA
 
        add     r0, r0, #PAGE_SZ
@@ -67,7 +75,11 @@ ENTRY(v7wbi_flush_kern_tlb_range)
        mov     r0, r0, lsl #PAGE_SHIFT
        mov     r1, r1, lsl #PAGE_SHIFT
 1:
+#ifdef CONFIG_ARM_ERRATA_720789
+       ALT_SMP(mcr     p15, 0, r0, c8, c3, 3)  @ TLB invalidate U MVA all ASID (shareable)
+#else
        ALT_SMP(mcr     p15, 0, r0, c8, c3, 1)  @ TLB invalidate U MVA (shareable)
+#endif
        ALT_UP(mcr      p15, 0, r0, c8, c7, 1)  @ TLB invalidate U MVA
        add     r0, r0, #PAGE_SZ
        cmp     r0, r1
index af3b92b..f9adbbb 100644 (file)
@@ -236,7 +236,7 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_enable);
 
 void omap_dm_timer_disable(struct omap_dm_timer *timer)
 {
-       pm_runtime_put(&timer->pdev->dev);
+       pm_runtime_put_sync(&timer->pdev->dev);
 }
 EXPORT_SYMBOL_GPL(omap_dm_timer_disable);
 
index 11dce87..8a6886a 100644 (file)
@@ -263,10 +263,12 @@ void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
                            unsigned long mapbase,
                            unsigned long irq,
                            unsigned long irq_err,
-                           int tclk)
+                           int tclk,
+                           unsigned int tx_csum_limit)
 {
        fill_resources(&orion_ge00_shared, orion_ge00_shared_resources,
                       mapbase + 0x2000, SZ_16K - 1, irq_err);
+       orion_ge00_shared_data.tx_csum_limit = tx_csum_limit;
        ge_complete(&orion_ge00_shared_data, mbus_dram_info, tclk,
                    orion_ge00_resources, irq, &orion_ge00_shared,
                    eth_data, &orion_ge00);
@@ -317,10 +319,12 @@ void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
                            unsigned long mapbase,
                            unsigned long irq,
                            unsigned long irq_err,
-                           int tclk)
+                           int tclk,
+                           unsigned int tx_csum_limit)
 {
        fill_resources(&orion_ge01_shared, orion_ge01_shared_resources,
                       mapbase + 0x2000, SZ_16K - 1, irq_err);
+       orion_ge01_shared_data.tx_csum_limit = tx_csum_limit;
        ge_complete(&orion_ge01_shared_data, mbus_dram_info, tclk,
                    orion_ge01_resources, irq, &orion_ge01_shared,
                    eth_data, &orion_ge01);
index a2c0e31..b637dae 100644 (file)
@@ -41,14 +41,16 @@ void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
                            unsigned long mapbase,
                            unsigned long irq,
                            unsigned long irq_err,
-                           int tclk);
+                           int tclk,
+                           unsigned int tx_csum_limit);
 
 void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
                            struct mbus_dram_target_info *mbus_dram_info,
                            unsigned long mapbase,
                            unsigned long irq,
                            unsigned long irq_err,
-                           int tclk);
+                           int tclk,
+                           unsigned int tx_csum_limit);
 
 void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
                            struct mbus_dram_target_info *mbus_dram_info,
index 8a90b6a..1eedf8d 100644 (file)
@@ -431,7 +431,7 @@ s3c2410_dma_canload(struct s3c2410_dma_chan *chan)
  * when necessary.
 */
 
-int s3c2410_dma_enqueue(unsigned int channel, void *id,
+int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
                        dma_addr_t data, int size)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
index 4fa9903..cc926c9 100644 (file)
@@ -7,18 +7,20 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * Basic entry code, called from the kernel's undefined instruction trap.
- *  r0  = faulted instruction
- *  r5  = faulted PC+4
- *  r9  = successful return
- *  r10 = thread_info structure
- *  lr  = failure return
  */
 #include <asm/thread_info.h>
 #include <asm/vfpmacros.h>
 #include "../kernel/entry-header.S"
 
+@ VFP entry point.
+@
+@  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb)
+@  r2  = PC value to resume execution after successful emulation
+@  r9  = normal "successful" return address
+@  r10 = this threads thread_info structure
+@  lr  = unrecognised instruction return address
+@  IRQs disabled.
+@
 ENTRY(do_vfp)
 #ifdef CONFIG_PREEMPT
        ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
index 2d30c7f..3a0efaa 100644 (file)
 
 @ VFP hardware support entry point.
 @
-@  r0  = faulted instruction
-@  r2  = faulted PC+4
-@  r9  = successful return
+@  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb)
+@  r2  = PC value to resume execution after successful emulation
+@  r9  = normal "successful" return address
 @  r10 = vfp_state union
 @  r11 = CPU number
-@  lr  = failure return
-
+@  lr  = unrecognised instruction return address
+@  IRQs enabled.
 ENTRY(vfp_support_entry)
        DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
 
@@ -161,9 +161,12 @@ vfp_hw_state_valid:
                                        @ exception before retrying branch
                                        @ out before setting an FPEXC that
                                        @ stops us reading stuff
-       VFPFMXR FPEXC, r1               @ restore FPEXC last
-       sub     r2, r2, #4
-       str     r2, [sp, #S_PC]         @ retry the instruction
+       VFPFMXR FPEXC, r1               @ Restore FPEXC last
+       sub     r2, r2, #4              @ Retry current instruction - if Thumb
+       str     r2, [sp, #S_PC]         @ mode it's two 16-bit instructions,
+                                       @ else it's one 32-bit instruction, so
+                                       @ always subtract 4 from the following
+                                       @ instruction address.
 #ifdef CONFIG_PREEMPT
        get_thread_info r10
        ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
index 8ea07e4..f0702f3 100644 (file)
@@ -453,10 +453,16 @@ static int vfp_pm_suspend(void)
 
                /* disable, just in case */
                fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+       } else if (vfp_current_hw_state[ti->cpu]) {
+#ifndef CONFIG_SMP
+               fmxr(FPEXC, fpexc | FPEXC_EN);
+               vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
+               fmxr(FPEXC, fpexc);
+#endif
        }
 
        /* clear any information we had about last context state */
-       memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state));
+       vfp_current_hw_state[ti->cpu] = NULL;
 
        return 0;
 }
@@ -622,8 +628,10 @@ static int __init vfp_init(void)
                        if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
                                elf_hwcap |= HWCAP_NEON;
 #endif
+#ifdef CONFIG_VFPv3
                        if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
                                elf_hwcap |= HWCAP_VFPv4;
+#endif
                }
        }
        return 0;
index 3fad89e..2fc214b 100644 (file)
@@ -18,8 +18,8 @@
 #include <asm/system.h>
 
 
-#define ATOMIC_INIT(i)         ((atomic_t) { (i) })
-#define ATOMIC64_INIT(i)       ((atomic64_t) { (i) })
+#define ATOMIC_INIT(i)         { (i) }
+#define ATOMIC64_INIT(i)       { (i) }
 
 #define atomic_read(v)         (*(volatile int *)&(v)->counter)
 #define atomic64_read(v)       (*(volatile long *)&(v)->counter)
index c3c5a86..8798ebc 100644 (file)
@@ -33,8 +33,8 @@
 
 /* the following macro is used when enabling interrupts */
 #if defined(MACH_ATARI_ONLY)
-       /* block out HSYNC on the atari */
-#define ALLOWINT       (~0x400)
+       /* block out HSYNC = ipl 2 on the atari */
+#define ALLOWINT       (~0x500)
 #define        MAX_NOINT_IPL   3
 #else
        /* portable version */
index 8623f8d..9a5932e 100644 (file)
@@ -479,9 +479,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
                        goto bad_access;
                }
 
-               mem_value = *mem;
+               /*
+                * No need to check for EFAULT; we know that the page is
+                * present and writable.
+                */
+               __get_user(mem_value, mem);
                if (mem_value == oldval)
-                       *mem = newval;
+                       __put_user(newval, mem);
 
                pte_unmap_unlock(pte, ptl);
                up_read(&mm->mmap_sem);
index 4054b31..c4b779b 100644 (file)
@@ -247,7 +247,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 
 #define atomic_sub_and_test(i,v)       (atomic_sub_return((i),(v)) == 0)
 
-#define ATOMIC_INIT(i) ((atomic_t) { (i) })
+#define ATOMIC_INIT(i) { (i) }
 
 #define smp_mb__before_atomic_dec()    smp_mb()
 #define smp_mb__after_atomic_dec()     smp_mb()
@@ -256,7 +256,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 
 #ifdef CONFIG_64BIT
 
-#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
+#define ATOMIC64_INIT(i) { (i) }
 
 static __inline__ s64
 __atomic64_add_return(s64 i, atomic64_t *v)
index 7c5324f..cc20b0a 100644 (file)
@@ -79,6 +79,7 @@ int main(void)
        DEFINE(SIGSEGV, SIGSEGV);
        DEFINE(NMI_MASK, NMI_MASK);
        DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
+       DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit));
 #else
        DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
 #endif /* CONFIG_PPC64 */
index 2cc451a..6856062 100644 (file)
@@ -28,6 +28,8 @@ void doorbell_setup_this_cpu(void)
 
 void doorbell_cause_ipi(int cpu, unsigned long data)
 {
+       /* Order previous accesses vs. msgsnd, which is treated as a store */
+       mb();
        ppc_msgsnd(PPC_DBELL, 0, data);
 }
 
index d834425..654fc53 100644 (file)
@@ -380,6 +380,12 @@ _GLOBAL(ret_from_fork)
        li      r3,0
        b       syscall_exit
 
+       .section        ".toc","aw"
+DSCR_DEFAULT:
+       .tc dscr_default[TC],dscr_default
+
+       .section        ".text"
+
 /*
  * This routine switches between two different tasks.  The process
  * state of one is saved on its kernel stack.  Then the state
@@ -519,9 +525,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
        mr      r1,r8           /* start using new stack pointer */
        std     r7,PACAKSAVE(r13)
 
-       ld      r6,_CCR(r1)
-       mtcrf   0xFF,r6
-
 #ifdef CONFIG_ALTIVEC
 BEGIN_FTR_SECTION
        ld      r0,THREAD_VRSAVE(r4)
@@ -530,14 +533,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_PPC64
 BEGIN_FTR_SECTION
+       lwz     r6,THREAD_DSCR_INHERIT(r4)
+       ld      r7,DSCR_DEFAULT@toc(2)
        ld      r0,THREAD_DSCR(r4)
-       cmpd    r0,r25
-       beq     1f
+       cmpwi   r6,0
+       bne     1f
+       ld      r0,0(r7)
+1:     cmpd    r0,r25
+       beq     2f
        mtspr   SPRN_DSCR,r0
-1:     
+2:
 END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
 #endif
 
+       ld      r6,_CCR(r1)
+       mtcrf   0xFF,r6
+
        /* r3-r13 are destroyed -- Cort */
        REST_8GPRS(14, r1)
        REST_10GPRS(22, r1)
index 6457574..d687e3f 100644 (file)
@@ -778,16 +778,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
 #endif /* CONFIG_PPC_STD_MMU_64 */
 #ifdef CONFIG_PPC64 
        if (cpu_has_feature(CPU_FTR_DSCR)) {
-               if (current->thread.dscr_inherit) {
-                       p->thread.dscr_inherit = 1;
-                       p->thread.dscr = current->thread.dscr;
-               } else if (0 != dscr_default) {
-                       p->thread.dscr_inherit = 1;
-                       p->thread.dscr = dscr_default;
-               } else {
-                       p->thread.dscr_inherit = 0;
-                       p->thread.dscr = 0;
-               }
+               p->thread.dscr_inherit = current->thread.dscr_inherit;
+               p->thread.dscr = current->thread.dscr;
        }
 #endif
 
index 6df7090..fe04b4a 100644 (file)
@@ -214,8 +214,15 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
        struct cpu_messages *info = &per_cpu(ipi_message, cpu);
        char *message = (char *)&info->messages;
 
+       /*
+        * Order previous accesses before accesses in the IPI handler.
+        */
+       smp_mb();
        message[msg] = 1;
-       mb();
+       /*
+        * cause_ipi functions are required to include a full barrier
+        * before doing whatever causes the IPI.
+        */
        smp_ops->cause_ipi(cpu, info->data);
 }
 
@@ -227,7 +234,7 @@ irqreturn_t smp_ipi_demux(void)
        mb();   /* order any irq clear */
 
        do {
-               all = xchg_local(&info->messages, 0);
+               all = xchg(&info->messages, 0);
 
 #ifdef __BIG_ENDIAN
                if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
index ce035c1..55be64d 100644 (file)
@@ -192,6 +192,14 @@ static ssize_t show_dscr_default(struct sysdev_class *class,
        return sprintf(buf, "%lx\n", dscr_default);
 }
 
+static void update_dscr(void *dummy)
+{
+       if (!current->thread.dscr_inherit) {
+               current->thread.dscr = dscr_default;
+               mtspr(SPRN_DSCR, dscr_default);
+       }
+}
+
 static ssize_t __used store_dscr_default(struct sysdev_class *class,
                struct sysdev_class_attribute *attr, const char *buf,
                size_t count)
@@ -204,6 +212,8 @@ static ssize_t __used store_dscr_default(struct sysdev_class *class,
                return -EINVAL;
        dscr_default = val;
 
+       on_each_cpu(update_dscr, NULL, 1);
+
        return count;
 }
 
index 5459d14..82dcd4d 100644 (file)
@@ -942,8 +942,9 @@ static int emulate_instruction(struct pt_regs *regs)
                        cpu_has_feature(CPU_FTR_DSCR)) {
                PPC_WARN_EMULATED(mtdscr, regs);
                rd = (instword >> 21) & 0x1f;
-               mtspr(SPRN_DSCR, regs->gpr[rd]);
+               current->thread.dscr = regs->gpr[rd];
                current->thread.dscr_inherit = 1;
+               mtspr(SPRN_DSCR, current->thread.dscr);
                return 0;
        }
 #endif
index 9518d36..5c76bf7 100644 (file)
@@ -27,33 +27,53 @@ static inline unsigned int icp_hv_get_xirr(unsigned char cppr)
 {
        unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
        long rc;
+       unsigned int ret = XICS_IRQ_SPURIOUS;
 
        rc = plpar_hcall(H_XIRR, retbuf, cppr);
-       if (rc != H_SUCCESS)
-               panic(" bad return code xirr - rc = %lx\n", rc);
-       return (unsigned int)retbuf[0];
+       if (rc == H_SUCCESS) {
+               ret = (unsigned int)retbuf[0];
+       } else {
+               pr_err("%s: bad return code xirr cppr=0x%x returned %ld\n",
+                       __func__, cppr, rc);
+               WARN_ON_ONCE(1);
+       }
+
+       return ret;
 }
 
 static inline void icp_hv_set_xirr(unsigned int value)
 {
        long rc = plpar_hcall_norets(H_EOI, value);
-       if (rc != H_SUCCESS)
-               panic("bad return code EOI - rc = %ld, value=%x\n", rc, value);
+       if (rc != H_SUCCESS) {
+               pr_err("%s: bad return code eoi xirr=0x%x returned %ld\n",
+                       __func__, value, rc);
+               WARN_ON_ONCE(1);
+       }
 }
 
 static inline void icp_hv_set_cppr(u8 value)
 {
        long rc = plpar_hcall_norets(H_CPPR, value);
-       if (rc != H_SUCCESS)
-               panic("bad return code cppr - rc = %lx\n", rc);
+       if (rc != H_SUCCESS) {
+               pr_err("%s: bad return code cppr cppr=0x%x returned %ld\n",
+                       __func__, value, rc);
+               WARN_ON_ONCE(1);
+       }
 }
 
 static inline void icp_hv_set_qirr(int n_cpu , u8 value)
 {
-       long rc = plpar_hcall_norets(H_IPI, get_hard_smp_processor_id(n_cpu),
-                                    value);
-       if (rc != H_SUCCESS)
-               panic("bad return code qirr - rc = %lx\n", rc);
+       int hw_cpu = get_hard_smp_processor_id(n_cpu);
+       long rc;
+
+       /* Make sure all previous accesses are ordered before IPI sending */
+       mb();
+       rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
+       if (rc != H_SUCCESS) {
+               pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
+                       "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
+               WARN_ON_ONCE(1);
+       }
 }
 
 static void icp_hv_eoi(struct irq_data *d)
index 5682f16..20f0e01 100644 (file)
@@ -12,7 +12,6 @@
 #include <asm/pgalloc.h>
 #include <asm/uaccess.h>
 #include <asm/tlbflush.h>
-#include <asm-generic/mm_hooks.h>
 
 static inline int init_new_context(struct task_struct *tsk,
                                   struct mm_struct *mm)
@@ -92,4 +91,17 @@ static inline void activate_mm(struct mm_struct *prev,
         switch_mm(prev, next, current);
 }
 
+static inline void arch_dup_mmap(struct mm_struct *oldmm,
+                                struct mm_struct *mm)
+{
+#ifdef CONFIG_64BIT
+       if (oldmm->context.asce_limit < mm->context.asce_limit)
+               crst_table_downgrade(mm, oldmm->context.asce_limit);
+#endif
+}
+
+static inline void arch_exit_mmap(struct mm_struct *mm)
+{
+}
+
 #endif /* __S390_MMU_CONTEXT_H */
index 5f33d37..172550d 100644 (file)
@@ -130,7 +130,9 @@ struct stack_frame {
        regs->psw.mask  = psw_user_bits | PSW_MASK_BA;                  \
        regs->psw.addr  = new_psw | PSW_ADDR_AMODE;                     \
        regs->gprs[15]  = new_stackp;                                   \
+       __tlb_flush_mm(current->mm);                                    \
        crst_table_downgrade(current->mm, 1UL << 31);                   \
+       update_mm(current->mm, current);                                \
 } while (0)
 
 /* Forward declaration, a strange C thing */
index 84a9828..38c6645 100644 (file)
@@ -615,7 +615,6 @@ asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
                return -EFAULT;
        if (a.offset & ~PAGE_MASK)
                return -EINVAL;
-       a.addr = (unsigned long) compat_ptr(a.addr);
        return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
                              a.offset >> PAGE_SHIFT);
 }
@@ -626,7 +625,6 @@ asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg)
 
        if (copy_from_user(&a, arg, sizeof(a)))
                return -EFAULT;
-       a.addr = (unsigned long) compat_ptr(a.addr);
        return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
 }
 
index 18c51df..25408d3 100644 (file)
@@ -1636,7 +1636,7 @@ ENTRY(compat_sys_process_vm_readv_wrapper)
        llgfr   %r6,%r6                 # unsigned long
        llgf    %r0,164(%r15)           # unsigned long
        stg     %r0,160(%r15)
-       jg      sys_process_vm_readv
+       jg      compat_sys_process_vm_readv
 
 ENTRY(compat_sys_process_vm_writev_wrapper)
        lgfr    %r2,%r2                 # compat_pid_t
@@ -1646,4 +1646,4 @@ ENTRY(compat_sys_process_vm_writev_wrapper)
        llgfr   %r6,%r6                 # unsigned long
        llgf    %r0,164(%r15)           # unsigned long
        stg     %r0,160(%r15)
-       jg      sys_process_vm_writev
+       jg      compat_sys_process_vm_writev
index b28aaa4..0fc0a7e 100644 (file)
@@ -453,6 +453,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
        struct pt_regs regs;
        int access, fault;
 
+       /* Emulate a uaccess fault from kernel mode. */
        regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
        if (!irqs_disabled())
                regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
@@ -461,12 +462,12 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
        uaddr &= PAGE_MASK;
        access = write ? VM_WRITE : VM_READ;
        fault = do_exception(&regs, access, uaddr | 2);
-       if (unlikely(fault)) {
-               if (fault & VM_FAULT_OOM)
-                       return -EFAULT;
-               else if (fault & VM_FAULT_SIGBUS)
-                       do_sigbus(&regs, pgm_int_code, uaddr);
-       }
+       /*
+        * Since the fault happened in kernel mode while performing a uaccess
+        * all we need to do now is emulating a fixup in case "fault" is not
+        * zero.
+        * For the calling uaccess functions this results always in -EFAULT.
+        */
        return fault ? -EFAULT : 0;
 }
 
index a0155c0..c70b3d8 100644 (file)
@@ -106,9 +106,15 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
 
 int s390_mmap_check(unsigned long addr, unsigned long len)
 {
+       int rc;
+
        if (!is_compat_task() &&
-           len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
-               return crst_table_upgrade(current->mm, 1UL << 53);
+           len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) {
+               rc = crst_table_upgrade(current->mm, 1UL << 53);
+               if (rc)
+                       return rc;
+               update_mm(current->mm, current);
+       }
        return 0;
 }
 
@@ -128,6 +134,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
                rc = crst_table_upgrade(mm, 1UL << 53);
                if (rc)
                        return (unsigned long) rc;
+               update_mm(mm, current);
                area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
        }
        return area;
@@ -150,6 +157,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
                rc = crst_table_upgrade(mm, 1UL << 53);
                if (rc)
                        return (unsigned long) rc;
+               update_mm(mm, current);
                area = arch_get_unmapped_area_topdown(filp, addr, len,
                                                      pgoff, flags);
        }
index f8ceac4..f8e92f8 100644 (file)
@@ -97,7 +97,6 @@ repeat:
                crst_table_free(mm, table);
        if (mm->context.asce_limit < limit)
                goto repeat;
-       update_mm(mm, current);
        return 0;
 }
 
@@ -105,9 +104,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
 {
        pgd_t *pgd;
 
-       if (mm->context.asce_limit <= limit)
-               return;
-       __tlb_flush_mm(mm);
        while (mm->context.asce_limit > limit) {
                pgd = mm->pgd;
                switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
@@ -130,7 +126,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
                mm->task_size = mm->context.asce_limit;
                crst_table_free(mm, (unsigned long *) pgd);
        }
-       update_mm(mm, current);
 }
 #endif
 
index 1f84794..73ef56c 100644 (file)
@@ -219,7 +219,7 @@ void __init arch_init_ideal_nops(void)
                        ideal_nops = intel_nops;
 #endif
                }
-
+               break;
        default:
 #ifdef CONFIG_X86_64
                ideal_nops = k8_nops;
index 7315488..407789b 100644 (file)
@@ -1956,6 +1956,7 @@ static __init void nested_vmx_setup_ctls_msrs(void)
 #endif
                CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
                CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
+               CPU_BASED_RDPMC_EXITING |
                CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
        /*
         * We can allow some features even when not supported by the
index f581a18..df7d12c 100644 (file)
@@ -56,9 +56,16 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
 }
 
 /*
- * search for a shareable pmd page for hugetlb.
+ * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
+ * and returns the corresponding pte. While this is not necessary for the
+ * !shared pmd case because we can allocate the pmd later as well, it makes the
+ * code much cleaner. pmd allocation is essential for the shared case because
+ * pud has to be populated inside the same i_mmap_mutex section - otherwise
+ * racing tasks could either miss the sharing (see huge_pte_offset) or select a
+ * bad pmd for sharing.
  */
-static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+static pte_t *
+huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
 {
        struct vm_area_struct *vma = find_vma(mm, addr);
        struct address_space *mapping = vma->vm_file->f_mapping;
@@ -68,9 +75,10 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
        struct vm_area_struct *svma;
        unsigned long saddr;
        pte_t *spte = NULL;
+       pte_t *pte;
 
        if (!vma_shareable(vma, addr))
-               return;
+               return (pte_t *)pmd_alloc(mm, pud, addr);
 
        mutex_lock(&mapping->i_mmap_mutex);
        vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
@@ -97,7 +105,9 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
                put_page(virt_to_page(spte));
        spin_unlock(&mm->page_table_lock);
 out:
+       pte = (pte_t *)pmd_alloc(mm, pud, addr);
        mutex_unlock(&mapping->i_mmap_mutex);
+       return pte;
 }
 
 /*
@@ -142,8 +152,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
                } else {
                        BUG_ON(sz != PMD_SIZE);
                        if (pud_none(*pud))
-                               huge_pmd_share(mm, addr, pud);
-                       pte = (pte_t *) pmd_alloc(mm, pud, addr);
+                               pte = huge_pmd_share(mm, addr, pud);
+                       else
+                               pte = (pte_t *)pmd_alloc(mm, pud, addr);
                }
        }
        BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
index 1b267e7..00a0385 100644 (file)
@@ -686,6 +686,7 @@ int m2p_add_override(unsigned long mfn, struct page *page,
        unsigned long uninitialized_var(address);
        unsigned level;
        pte_t *ptep = NULL;
+       int ret = 0;
 
        pfn = page_to_pfn(page);
        if (!PageHighMem(page)) {
@@ -721,6 +722,24 @@ int m2p_add_override(unsigned long mfn, struct page *page,
        list_add(&page->lru,  &m2p_overrides[mfn_hash(mfn)]);
        spin_unlock_irqrestore(&m2p_override_lock, flags);
 
+       /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in
+        * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other
+        * pfn so that the following mfn_to_pfn(mfn) calls will return the
+        * pfn from the m2p_override (the backend pfn) instead.
+        * We need to do this because the pages shared by the frontend
+        * (xen-blkfront) can be already locked (lock_page, called by
+        * do_read_cache_page); when the userspace backend tries to use them
+        * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so
+        * do_blockdev_direct_IO is going to try to lock the same pages
+        * again resulting in a deadlock.
+        * As a side effect get_user_pages_fast might not be safe on the
+        * frontend pages while they are being shared with the backend,
+        * because mfn_to_pfn (that ends up being called by GUPF) will
+        * return the backend pfn rather than the frontend pfn. */
+       ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
+       if (ret == 0 && get_phys_to_machine(pfn) == mfn)
+               set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(m2p_add_override);
@@ -732,6 +751,7 @@ int m2p_remove_override(struct page *page, bool clear_pte)
        unsigned long uninitialized_var(address);
        unsigned level;
        pte_t *ptep = NULL;
+       int ret = 0;
 
        pfn = page_to_pfn(page);
        mfn = get_phys_to_machine(pfn);
@@ -801,6 +821,22 @@ int m2p_remove_override(struct page *page, bool clear_pte)
        } else
                set_phys_to_machine(pfn, page->index);
 
+       /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
+        * somewhere in this domain, even before being added to the
+        * m2p_override (see comment above in m2p_add_override).
+        * If there are no other entries in the m2p_override corresponding
+        * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for
+        * the original pfn (the one shared by the frontend): the backend
+        * cannot do any IO on this page anymore because it has been
+        * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of
+        * the original pfn causes mfn_to_pfn(mfn) to return the frontend
+        * pfn again. */
+       mfn &= ~FOREIGN_FRAME_BIT;
+       ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
+       if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
+                       m2p_find_override(mfn) == NULL)
+               set_phys_to_machine(pfn, mfn);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(m2p_remove_override);
index b2c7179..bb104b4 100644 (file)
@@ -78,9 +78,16 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
        memblock_x86_reserve_range(start, start + size, "XEN EXTRA");
 
        xen_max_p2m_pfn = PFN_DOWN(start + size);
+       for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
+               unsigned long mfn = pfn_to_mfn(pfn);
+
+               if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
+                       continue;
+               WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
+                       pfn, mfn);
 
-       for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++)
                __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+       }
 }
 
 static unsigned long __init xen_release_chunk(unsigned long start,
index e7d13f5..d05f2fe 100644 (file)
@@ -436,6 +436,7 @@ acpi_get_table_with_size(char *signature,
 
        return (AE_NOT_FOUND);
 }
+ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
 
 acpi_status
 acpi_get_table(char *signature,
index fb65915..608257a 100644 (file)
@@ -386,6 +386,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
          .driver_data = board_ahci_yes_fbs },                  /* 88se9125 */
        { PCI_DEVICE(0x1b4b, 0x917a),
          .driver_data = board_ahci_yes_fbs },                  /* 88se9172 */
+       { PCI_DEVICE(0x1b4b, 0x9192),
+         .driver_data = board_ahci_yes_fbs },                  /* 88se9172 on some Gigabyte */
        { PCI_DEVICE(0x1b4b, 0x91a3),
          .driver_data = board_ahci_yes_fbs },
 
index 8c78443..3790c80 100644 (file)
@@ -385,7 +385,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
                goto repeat;
        }
 
-       dev->power.deferred_resume = false;
        if (dev->power.no_callbacks)
                goto no_callback;       /* Assume success. */
 
@@ -446,6 +445,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
        wake_up_all(&dev->power.wait_queue);
 
        if (dev->power.deferred_resume) {
+               dev->power.deferred_resume = false;
                rpm_resume(dev, 0);
                retval = -EAGAIN;
                goto out;
@@ -568,6 +568,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
                    || dev->parent->power.runtime_status == RPM_ACTIVE) {
                        atomic_inc(&dev->parent->power.child_count);
                        spin_unlock(&dev->parent->power.lock);
+                       retval = 1;
                        goto no_callback;       /* Assume success. */
                }
                spin_unlock(&dev->parent->power.lock);
@@ -645,7 +646,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
        }
        wake_up_all(&dev->power.wait_queue);
 
-       if (!retval)
+       if (retval >= 0)
                rpm_idle(dev, RPM_ASYNC);
 
  out:
index acda773..38aa6dd 100644 (file)
@@ -763,16 +763,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
                {
                        case CMD_TARGET_STATUS:
                                /* Pass it up to the upper layers... */
-                               if( ei->ScsiStatus)
-                               {
-#if 0
-                                       printk(KERN_WARNING "cciss: cmd %p "
-                                               "has SCSI Status = %x\n",
-                                               c, ei->ScsiStatus);
-#endif
-                                       cmd->result |= (ei->ScsiStatus << 1);
-                               }
-                               else {  /* scsi status is zero??? How??? */
+                               if (!ei->ScsiStatus) {
                                        
        /* Ordinarily, this case should never happen, but there is a bug
           in some released firmware revisions that allows it to happen
index 9955a53..c864add 100644 (file)
@@ -4369,8 +4369,14 @@ out_unreg_blkdev:
 out_put_disk:
        while (dr--) {
                del_timer_sync(&motor_off_timer[dr]);
-               if (disks[dr]->queue)
+               if (disks[dr]->queue) {
                        blk_cleanup_queue(disks[dr]->queue);
+                       /*
+                        * put_disk() is not paired with add_disk() and
+                        * will put queue reference one extra time. fix it.
+                        */
+                       disks[dr]->queue = NULL;
+               }
                put_disk(disks[dr]);
        }
        return err;
index e46f2f7..de9c800 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/blkdev.h>
 #include <linux/hdreg.h>
 #include <linux/module.h>
+#include <linux/mutex.h>
 #include <linux/virtio.h>
 #include <linux/virtio_blk.h>
 #include <linux/scatterlist.h>
@@ -20,22 +21,23 @@ struct workqueue_struct *virtblk_wq;
 
 struct virtio_blk
 {
-       spinlock_t lock;
-
        struct virtio_device *vdev;
        struct virtqueue *vq;
 
        /* The disk structure for the kernel. */
        struct gendisk *disk;
 
-       /* Request tracking. */
-       struct list_head reqs;
-
        mempool_t *pool;
 
        /* Process context for config space updates */
        struct work_struct config_work;
 
+       /* Lock for config space updates */
+       struct mutex config_lock;
+
+       /* enable config space updates */
+       bool config_enable;
+
        /* What host tells us, plus 2 for header & tailer. */
        unsigned int sg_elems;
 
@@ -48,7 +50,6 @@ struct virtio_blk
 
 struct virtblk_req
 {
-       struct list_head list;
        struct request *req;
        struct virtio_blk_outhdr out_hdr;
        struct virtio_scsi_inhdr in_hdr;
@@ -62,7 +63,7 @@ static void blk_done(struct virtqueue *vq)
        unsigned int len;
        unsigned long flags;
 
-       spin_lock_irqsave(&vblk->lock, flags);
+       spin_lock_irqsave(vblk->disk->queue->queue_lock, flags);
        while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
                int error;
 
@@ -92,12 +93,11 @@ static void blk_done(struct virtqueue *vq)
                }
 
                __blk_end_request_all(vbr->req, error);
-               list_del(&vbr->list);
                mempool_free(vbr, vblk->pool);
        }
        /* In case queue is stopped waiting for more buffers. */
        blk_start_queue(vblk->disk->queue);
-       spin_unlock_irqrestore(&vblk->lock, flags);
+       spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags);
 }
 
 static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
@@ -177,7 +177,6 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
                return false;
        }
 
-       list_add_tail(&vbr->list, &vblk->reqs);
        return true;
 }
 
@@ -318,6 +317,10 @@ static void virtblk_config_changed_work(struct work_struct *work)
        char cap_str_2[10], cap_str_10[10];
        u64 capacity, size;
 
+       mutex_lock(&vblk->config_lock);
+       if (!vblk->config_enable)
+               goto done;
+
        /* Host must always specify the capacity. */
        vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
                          &capacity, sizeof(capacity));
@@ -340,6 +343,8 @@ static void virtblk_config_changed_work(struct work_struct *work)
                  cap_str_10, cap_str_2);
 
        set_capacity(vblk->disk, capacity);
+done:
+       mutex_unlock(&vblk->config_lock);
 }
 
 static void virtblk_config_changed(struct virtio_device *vdev)
@@ -383,12 +388,12 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
                goto out_free_index;
        }
 
-       INIT_LIST_HEAD(&vblk->reqs);
-       spin_lock_init(&vblk->lock);
        vblk->vdev = vdev;
        vblk->sg_elems = sg_elems;
        sg_init_table(vblk->sg, vblk->sg_elems);
+       mutex_init(&vblk->config_lock);
        INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
+       vblk->config_enable = true;
 
        /* We expect one virtqueue, for output. */
        vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
@@ -410,7 +415,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
                goto out_mempool;
        }
 
-       q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
+       q = vblk->disk->queue = blk_init_queue(do_virtblk_request, NULL);
        if (!q) {
                err = -ENOMEM;
                goto out_put_disk;
@@ -542,16 +547,19 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
        struct virtio_blk *vblk = vdev->priv;
        int index = vblk->index;
 
-       flush_work(&vblk->config_work);
+       /* Prevent config work handler from accessing the device. */
+       mutex_lock(&vblk->config_lock);
+       vblk->config_enable = false;
+       mutex_unlock(&vblk->config_lock);
 
-       /* Nothing should be pending. */
-       BUG_ON(!list_empty(&vblk->reqs));
+       del_gendisk(vblk->disk);
+       blk_cleanup_queue(vblk->disk->queue);
 
        /* Stop all the virtqueues. */
        vdev->config->reset(vdev);
 
-       del_gendisk(vblk->disk);
-       blk_cleanup_queue(vblk->disk->queue);
+       flush_work(&vblk->config_work);
+
        put_disk(vblk->disk);
        mempool_destroy(vblk->pool);
        vdev->config->del_vqs(vdev);
index 5c0d96a..b12ffea 100644 (file)
@@ -284,7 +284,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
        vdata->flags = flags;
        vdata->type = type;
        spin_lock_init(&vdata->lock);
-       vdata->refcnt = ATOMIC_INIT(1);
+       atomic_set(&vdata->refcnt, 1);
        vma->vm_private_data = vdata;
 
        vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
index 6035ab8..8ae9235 100644 (file)
  * The current exported interfaces for gathering environmental noise
  * from the devices are:
  *
+ *     void add_device_randomness(const void *buf, unsigned int size);
  *     void add_input_randomness(unsigned int type, unsigned int code,
  *                                unsigned int value);
- *     void add_interrupt_randomness(int irq);
+ *     void add_interrupt_randomness(int irq, int irq_flags);
  *     void add_disk_randomness(struct gendisk *disk);
  *
+ * add_device_randomness() is for adding data to the random pool that
+ * is likely to differ between two devices (or possibly even per boot).
+ * This would be things like MAC addresses or serial numbers, or the
+ * read-out of the RTC. This does *not* add any actual entropy to the
+ * pool, but it initializes the pool to different values for devices
+ * that might otherwise be identical and have very little entropy
+ * available to them (particularly common in the embedded world).
+ *
  * add_input_randomness() uses the input layer interrupt timing, as well as
  * the event type information from the hardware.
  *
- * add_interrupt_randomness() uses the inter-interrupt timing as random
- * inputs to the entropy pool.  Note that not all interrupts are good
- * sources of randomness!  For example, the timer interrupts is not a
- * good choice, because the periodicity of the interrupts is too
- * regular, and hence predictable to an attacker.  Network Interface
- * Controller interrupts are a better measure, since the timing of the
- * NIC interrupts are more unpredictable.
+ * add_interrupt_randomness() uses the interrupt timing as random
+ * inputs to the entropy pool. Using the cycle counters and the irq source
+ * as inputs, it feeds the randomness roughly once a second.
  *
  * add_disk_randomness() uses what amounts to the seek time of block
  * layer request events, on a per-disk_devt basis, as input to the
 #include <linux/percpu.h>
 #include <linux/cryptohash.h>
 #include <linux/fips.h>
+#include <linux/ptrace.h>
+#include <linux/kmemcheck.h>
 
 #ifdef CONFIG_GENERIC_HARDIRQS
 # include <linux/irq.h>
 #include <asm/processor.h>
 #include <asm/uaccess.h>
 #include <asm/irq.h>
+#include <asm/irq_regs.h>
 #include <asm/io.h>
 
 /*
 #define SEC_XFER_SIZE 512
 #define EXTRACT_SIZE 10
 
+#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
+
 /*
  * The minimum number of bits of entropy before we wake up a read on
  * /dev/random.  Should be enough to do a significant reseed.
@@ -420,8 +430,10 @@ struct entropy_store {
        /* read-write data: */
        spinlock_t lock;
        unsigned add_ptr;
+       unsigned input_rotate;
        int entropy_count;
-       int input_rotate;
+       int entropy_total;
+       unsigned int initialized:1;
        __u8 last_data[EXTRACT_SIZE];
 };
 
@@ -454,6 +466,10 @@ static struct entropy_store nonblocking_pool = {
        .pool = nonblocking_pool_data
 };
 
+static __u32 const twist_table[8] = {
+       0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
+       0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
+
 /*
  * This function adds bytes into the entropy "pool".  It does not
  * update the entropy estimate.  The caller should call
@@ -464,29 +480,24 @@ static struct entropy_store nonblocking_pool = {
  * it's cheap to do so and helps slightly in the expected case where
  * the entropy is concentrated in the low-order bits.
  */
-static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
-                                  int nbytes, __u8 out[64])
+static void __mix_pool_bytes(struct entropy_store *r, const void *in,
+                            int nbytes, __u8 out[64])
 {
-       static __u32 const twist_table[8] = {
-               0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
-               0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
        unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
        int input_rotate;
        int wordmask = r->poolinfo->poolwords - 1;
        const char *bytes = in;
        __u32 w;
-       unsigned long flags;
 
-       /* Taps are constant, so we can load them without holding r->lock.  */
        tap1 = r->poolinfo->tap1;
        tap2 = r->poolinfo->tap2;
        tap3 = r->poolinfo->tap3;
        tap4 = r->poolinfo->tap4;
        tap5 = r->poolinfo->tap5;
 
-       spin_lock_irqsave(&r->lock, flags);
-       input_rotate = r->input_rotate;
-       i = r->add_ptr;
+       smp_rmb();
+       input_rotate = ACCESS_ONCE(r->input_rotate);
+       i = ACCESS_ONCE(r->add_ptr);
 
        /* mix one byte at a time to simplify size handling and churn faster */
        while (nbytes--) {
@@ -513,19 +524,53 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
                input_rotate += i ? 7 : 14;
        }
 
-       r->input_rotate = input_rotate;
-       r->add_ptr = i;
+       ACCESS_ONCE(r->input_rotate) = input_rotate;
+       ACCESS_ONCE(r->add_ptr) = i;
+       smp_wmb();
 
        if (out)
                for (j = 0; j < 16; j++)
                        ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
+}
+
+static void mix_pool_bytes(struct entropy_store *r, const void *in,
+                            int nbytes, __u8 out[64])
+{
+       unsigned long flags;
 
+       spin_lock_irqsave(&r->lock, flags);
+       __mix_pool_bytes(r, in, nbytes, out);
        spin_unlock_irqrestore(&r->lock, flags);
 }
 
-static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
+struct fast_pool {
+       __u32           pool[4];
+       unsigned long   last;
+       unsigned short  count;
+       unsigned char   rotate;
+       unsigned char   last_timer_intr;
+};
+
+/*
+ * This is a fast mixing routine used by the interrupt randomness
+ * collector.  It's hardcoded for an 128 bit pool and assumes that any
+ * locks that might be needed are taken by the caller.
+ */
+static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
 {
-       mix_pool_bytes_extract(r, in, bytes, NULL);
+       const char      *bytes = in;
+       __u32           w;
+       unsigned        i = f->count;
+       unsigned        input_rotate = f->rotate;
+
+       while (nbytes--) {
+               w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
+                       f->pool[(i + 1) & 3];
+               f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
+               input_rotate += (i++ & 3) ? 7 : 14;
+       }
+       f->count = i;
+       f->rotate = input_rotate;
 }
 
 /*
@@ -533,30 +578,34 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
  */
 static void credit_entropy_bits(struct entropy_store *r, int nbits)
 {
-       unsigned long flags;
-       int entropy_count;
+       int entropy_count, orig;
 
        if (!nbits)
                return;
 
-       spin_lock_irqsave(&r->lock, flags);
-
        DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
-       entropy_count = r->entropy_count;
+retry:
+       entropy_count = orig = ACCESS_ONCE(r->entropy_count);
        entropy_count += nbits;
        if (entropy_count < 0) {
                DEBUG_ENT("negative entropy/overflow\n");
                entropy_count = 0;
        } else if (entropy_count > r->poolinfo->POOLBITS)
                entropy_count = r->poolinfo->POOLBITS;
-       r->entropy_count = entropy_count;
+       if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+               goto retry;
+
+       if (!r->initialized && nbits > 0) {
+               r->entropy_total += nbits;
+               if (r->entropy_total > 128)
+                       r->initialized = 1;
+       }
 
        /* should we wake readers? */
        if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
                wake_up_interruptible(&random_read_wait);
                kill_fasync(&fasync, SIGIO, POLL_IN);
        }
-       spin_unlock_irqrestore(&r->lock, flags);
 }
 
 /*********************************************************************
@@ -609,6 +658,25 @@ static void set_timer_rand_state(unsigned int irq,
 }
 #endif
 
+/*
+ * Add device- or boot-specific data to the input and nonblocking
+ * pools to help initialize them to unique values.
+ *
+ * None of this adds any entropy, it is meant to avoid the
+ * problem of the nonblocking pool having similar initial state
+ * across largely identical devices.
+ */
+void add_device_randomness(const void *buf, unsigned int size)
+{
+       unsigned long time = get_cycles() ^ jiffies;
+
+       mix_pool_bytes(&input_pool, buf, size, NULL);
+       mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
+       mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
+       mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
+}
+EXPORT_SYMBOL(add_device_randomness);
+
 static struct timer_rand_state input_timer_state;
 
 /*
@@ -624,8 +692,8 @@ static struct timer_rand_state input_timer_state;
 static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
 {
        struct {
-               cycles_t cycles;
                long jiffies;
+               unsigned cycles;
                unsigned num;
        } sample;
        long delta, delta2, delta3;
@@ -639,7 +707,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
        sample.jiffies = jiffies;
        sample.cycles = get_cycles();
        sample.num = num;
-       mix_pool_bytes(&input_pool, &sample, sizeof(sample));
+       mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
 
        /*
         * Calculate number of bits of randomness we probably added.
@@ -696,17 +764,48 @@ void add_input_randomness(unsigned int type, unsigned int code,
 }
 EXPORT_SYMBOL_GPL(add_input_randomness);
 
-void add_interrupt_randomness(int irq)
+static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
+
+void add_interrupt_randomness(int irq, int irq_flags)
 {
-       struct timer_rand_state *state;
+       struct entropy_store    *r;
+       struct fast_pool        *fast_pool = &__get_cpu_var(irq_randomness);
+       struct pt_regs          *regs = get_irq_regs();
+       unsigned long           now = jiffies;
+       __u32                   input[4], cycles = get_cycles();
+
+       input[0] = cycles ^ jiffies;
+       input[1] = irq;
+       if (regs) {
+               __u64 ip = instruction_pointer(regs);
+               input[2] = ip;
+               input[3] = ip >> 32;
+       }
 
-       state = get_timer_rand_state(irq);
+       fast_mix(fast_pool, input, sizeof(input));
 
-       if (state == NULL)
+       if ((fast_pool->count & 1023) &&
+           !time_after(now, fast_pool->last + HZ))
                return;
 
-       DEBUG_ENT("irq event %d\n", irq);
-       add_timer_randomness(state, 0x100 + irq);
+       fast_pool->last = now;
+
+       r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
+       __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
+       /*
+        * If we don't have a valid cycle counter, and we see
+        * back-to-back timer interrupts, then skip giving credit for
+        * any entropy.
+        */
+       if (cycles == 0) {
+               if (irq_flags & __IRQF_TIMER) {
+                       if (fast_pool->last_timer_intr)
+                               return;
+                       fast_pool->last_timer_intr = 1;
+               } else
+                       fast_pool->last_timer_intr = 0;
+       }
+       credit_entropy_bits(r, 1);
 }
 
 #ifdef CONFIG_BLOCK
@@ -738,7 +837,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
  */
 static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
 {
-       __u32 tmp[OUTPUT_POOL_WORDS];
+       __u32   tmp[OUTPUT_POOL_WORDS];
 
        if (r->pull && r->entropy_count < nbytes * 8 &&
            r->entropy_count < r->poolinfo->POOLBITS) {
@@ -757,7 +856,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
 
                bytes = extract_entropy(r->pull, tmp, bytes,
                                        random_read_wakeup_thresh / 8, rsvd);
-               mix_pool_bytes(r, tmp, bytes);
+               mix_pool_bytes(r, tmp, bytes, NULL);
                credit_entropy_bits(r, bytes*8);
        }
 }
@@ -816,13 +915,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
 static void extract_buf(struct entropy_store *r, __u8 *out)
 {
        int i;
-       __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
+       union {
+               __u32 w[5];
+               unsigned long l[LONGS(EXTRACT_SIZE)];
+       } hash;
+       __u32 workspace[SHA_WORKSPACE_WORDS];
        __u8 extract[64];
+       unsigned long flags;
 
        /* Generate a hash across the pool, 16 words (512 bits) at a time */
-       sha_init(hash);
+       sha_init(hash.w);
+       spin_lock_irqsave(&r->lock, flags);
        for (i = 0; i < r->poolinfo->poolwords; i += 16)
-               sha_transform(hash, (__u8 *)(r->pool + i), workspace);
+               sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
 
        /*
         * We mix the hash back into the pool to prevent backtracking
@@ -833,13 +938,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
         * brute-forcing the feedback as hard as brute-forcing the
         * hash.
         */
-       mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
+       __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract);
+       spin_unlock_irqrestore(&r->lock, flags);
 
        /*
         * To avoid duplicates, we atomically extract a portion of the
         * pool while mixing, and hash one final time.
         */
-       sha_transform(hash, extract, workspace);
+       sha_transform(hash.w, extract, workspace);
        memset(extract, 0, sizeof(extract));
        memset(workspace, 0, sizeof(workspace));
 
@@ -848,19 +954,30 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
         * pattern, we fold it in half. Thus, we always feed back
         * twice as much data as we output.
         */
-       hash[0] ^= hash[3];
-       hash[1] ^= hash[4];
-       hash[2] ^= rol32(hash[2], 16);
-       memcpy(out, hash, EXTRACT_SIZE);
-       memset(hash, 0, sizeof(hash));
+       hash.w[0] ^= hash.w[3];
+       hash.w[1] ^= hash.w[4];
+       hash.w[2] ^= rol32(hash.w[2], 16);
+
+       /*
+        * If we have a architectural hardware random number
+        * generator, mix that in, too.
+        */
+       for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
+               unsigned long v;
+               if (!arch_get_random_long(&v))
+                       break;
+               hash.l[i] ^= v;
+       }
+
+       memcpy(out, &hash, EXTRACT_SIZE);
+       memset(&hash, 0, sizeof(hash));
 }
 
 static ssize_t extract_entropy(struct entropy_store *r, void *buf,
-                              size_t nbytes, int min, int reserved)
+                                size_t nbytes, int min, int reserved)
 {
        ssize_t ret = 0, i;
        __u8 tmp[EXTRACT_SIZE];
-       unsigned long flags;
 
        xfer_secondary_pool(r, nbytes);
        nbytes = account(r, nbytes, min, reserved);
@@ -869,6 +986,8 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
                extract_buf(r, tmp);
 
                if (fips_enabled) {
+                       unsigned long flags;
+
                        spin_lock_irqsave(&r->lock, flags);
                        if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
                                panic("Hardware RNG duplicated output!\n");
@@ -927,17 +1046,34 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
 
 /*
  * This function is the exported kernel interface.  It returns some
- * number of good random numbers, suitable for seeding TCP sequence
- * numbers, etc.
+ * number of good random numbers, suitable for key generation, seeding
+ * TCP sequence numbers, etc.  It does not use the hw random number
+ * generator, if available; use get_random_bytes_arch() for that.
  */
 void get_random_bytes(void *buf, int nbytes)
+{
+       extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
+}
+EXPORT_SYMBOL(get_random_bytes);
+
+/*
+ * This function will use the architecture-specific hardware random
+ * number generator if it is available.  The arch-specific hw RNG will
+ * almost certainly be faster than what we can do in software, but it
+ * is impossible to verify that it is implemented securely (as
+ * opposed, to, say, the AES encryption of a sequence number using a
+ * key known by the NSA).  So it's useful if we need the speed, but
+ * only if we're willing to trust the hardware manufacturer not to
+ * have put in a back door.
+ */
+void get_random_bytes_arch(void *buf, int nbytes)
 {
        char *p = buf;
 
        while (nbytes) {
                unsigned long v;
                int chunk = min(nbytes, (int)sizeof(unsigned long));
-               
+
                if (!arch_get_random_long(&v))
                        break;
                
@@ -946,9 +1082,11 @@ void get_random_bytes(void *buf, int nbytes)
                nbytes -= chunk;
        }
 
-       extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
+       if (nbytes)
+               extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
 }
-EXPORT_SYMBOL(get_random_bytes);
+EXPORT_SYMBOL(get_random_bytes_arch);
+
 
 /*
  * init_std_data - initialize pool with system data
@@ -961,18 +1099,31 @@ EXPORT_SYMBOL(get_random_bytes);
  */
 static void init_std_data(struct entropy_store *r)
 {
-       ktime_t now;
-       unsigned long flags;
+       int i;
+       ktime_t now = ktime_get_real();
+       unsigned long rv;
 
-       spin_lock_irqsave(&r->lock, flags);
        r->entropy_count = 0;
-       spin_unlock_irqrestore(&r->lock, flags);
-
-       now = ktime_get_real();
-       mix_pool_bytes(r, &now, sizeof(now));
-       mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
+       r->entropy_total = 0;
+       mix_pool_bytes(r, &now, sizeof(now), NULL);
+       for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
+               if (!arch_get_random_long(&rv))
+                       break;
+               mix_pool_bytes(r, &rv, sizeof(rv), NULL);
+       }
+       mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
 }
 
+/*
+ * Note that setup_arch() may call add_device_randomness()
+ * long before we get here. This allows seeding of the pools
+ * with some platform dependent data very early in the boot
+ * process. But it limits our options here. We must use
+ * statically allocated structures that already have all
+ * initializations complete at compile time. We should also
+ * take care not to overwrite the precious per platform data
+ * we were given.
+ */
 static int rand_initialize(void)
 {
        init_std_data(&input_pool);
@@ -1107,7 +1258,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
                count -= bytes;
                p += bytes;
 
-               mix_pool_bytes(r, buf, bytes);
+               mix_pool_bytes(r, buf, bytes, NULL);
                cond_resched();
        }
 
@@ -1250,10 +1401,15 @@ static int proc_do_uuid(ctl_table *table, int write,
        uuid = table->data;
        if (!uuid) {
                uuid = tmp_uuid;
-               uuid[8] = 0;
-       }
-       if (uuid[8] == 0)
                generate_random_uuid(uuid);
+       } else {
+               static DEFINE_SPINLOCK(bootid_spinlock);
+
+               spin_lock(&bootid_spinlock);
+               if (!uuid[8])
+                       generate_random_uuid(uuid);
+               spin_unlock(&bootid_spinlock);
+       }
 
        sprintf(buf, "%pU", uuid);
 
index 153980b..b298158 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/dmi.h>
 #include <linux/efi.h>
 #include <linux/bootmem.h>
+#include <linux/random.h>
 #include <asm/dmi.h>
 
 /*
@@ -111,6 +112,8 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
 
        dmi_table(buf, dmi_len, dmi_num, decode, NULL);
 
+       add_device_randomness(buf, dmi_len);
+
        dmi_iounmap(buf, dmi_len);
        return 0;
 }
index 51e0e2d..a330492 100644 (file)
@@ -95,7 +95,7 @@ efi_setup_pcdp_console(char *cmdline)
        if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
                return -ENODEV;
 
-       pcdp = ioremap(efi.hcdp, 4096);
+       pcdp = early_ioremap(efi.hcdp, 4096);
        printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp);
 
        if (strstr(cmdline, "console=hcdp")) {
@@ -131,6 +131,6 @@ efi_setup_pcdp_console(char *cmdline)
        }
 
 out:
-       iounmap(pcdp);
+       early_iounmap(pcdp, 4096);
        return rc;
 }
index 8323fc3..3f1799b 100644 (file)
@@ -1625,10 +1625,8 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
 
-       if (!req->flags) {
-               DRM_ERROR("no operation set\n");
+       if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
                return -EINVAL;
-       }
 
        mutex_lock(&dev->mode_config.mutex);
        obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
@@ -1641,7 +1639,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
 
        if (req->flags & DRM_MODE_CURSOR_BO) {
                if (!crtc->funcs->cursor_set) {
-                       DRM_ERROR("crtc does not support cursor\n");
                        ret = -ENXIO;
                        goto out;
                }
@@ -1654,7 +1651,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
                if (crtc->funcs->cursor_move) {
                        ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
                } else {
-                       DRM_ERROR("crtc does not support cursor\n");
                        ret = -EFAULT;
                        goto out;
                }
@@ -1692,14 +1688,11 @@ int drm_mode_addfb(struct drm_device *dev,
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
 
-       if ((config->min_width > r->width) || (r->width > config->max_width)) {
-               DRM_ERROR("mode new framebuffer width not within limits\n");
+       if ((config->min_width > r->width) || (r->width > config->max_width))
                return -EINVAL;
-       }
-       if ((config->min_height > r->height) || (r->height > config->max_height)) {
-               DRM_ERROR("mode new framebuffer height not within limits\n");
+
+       if ((config->min_height > r->height) || (r->height > config->max_height))
                return -EINVAL;
-       }
 
        mutex_lock(&dev->mode_config.mutex);
 
@@ -1756,7 +1749,6 @@ int drm_mode_rmfb(struct drm_device *dev,
        obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
        /* TODO check that we really get a framebuffer back. */
        if (!obj) {
-               DRM_ERROR("mode invalid framebuffer id\n");
                ret = -EINVAL;
                goto out;
        }
@@ -1767,7 +1759,6 @@ int drm_mode_rmfb(struct drm_device *dev,
                        found = 1;
 
        if (!found) {
-               DRM_ERROR("tried to remove a fb that we didn't own\n");
                ret = -EINVAL;
                goto out;
        }
@@ -1814,7 +1805,6 @@ int drm_mode_getfb(struct drm_device *dev,
        mutex_lock(&dev->mode_config.mutex);
        obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
        if (!obj) {
-               DRM_ERROR("invalid framebuffer id\n");
                ret = -EINVAL;
                goto out;
        }
@@ -1850,7 +1840,6 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
        mutex_lock(&dev->mode_config.mutex);
        obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
        if (!obj) {
-               DRM_ERROR("invalid framebuffer id\n");
                ret = -EINVAL;
                goto out_err1;
        }
index a1ee634..0c1a99b 100644 (file)
@@ -66,6 +66,8 @@
 #define EDID_QUIRK_FIRST_DETAILED_PREFERRED    (1 << 5)
 /* use +hsync +vsync for detailed mode */
 #define EDID_QUIRK_DETAILED_SYNC_PP            (1 << 6)
+/* Force reduced-blanking timings for detailed modes */
+#define EDID_QUIRK_FORCE_REDUCED_BLANKING      (1 << 7)
 
 struct detailed_mode_closure {
        struct drm_connector *connector;
@@ -85,6 +87,9 @@ static struct edid_quirk {
        int product_id;
        u32 quirks;
 } edid_quirk_list[] = {
+       /* ASUS VW222S */
+       { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+
        /* Acer AL1706 */
        { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
        /* Acer F51 */
@@ -120,6 +125,9 @@ static struct edid_quirk {
        /* Samsung SyncMaster 22[5-6]BW */
        { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
        { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+
+       /* ViewSonic VA2026w */
+       { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
 };
 
 /*** DDC fetch and block validation ***/
@@ -863,12 +871,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
                                "Wrong Hsync/Vsync pulse width\n");
                return NULL;
        }
+
+       if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
+               mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
+               if (!mode)
+                       return NULL;
+
+               goto set_size;
+       }
+
        mode = drm_mode_create(dev);
        if (!mode)
                return NULL;
 
-       mode->type = DRM_MODE_TYPE_DRIVER;
-
        if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
                timing->pixel_clock = cpu_to_le16(1088);
 
@@ -892,8 +907,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
 
        drm_mode_do_interlace_quirk(mode, pt);
 
-       drm_mode_set_name(mode);
-
        if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
                pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
        }
@@ -903,6 +916,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
        mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
                DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
 
+set_size:
        mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
        mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
 
@@ -916,6 +930,9 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
                mode->height_mm = edid->height_cm * 10;
        }
 
+       mode->type = DRM_MODE_TYPE_DRIVER;
+       drm_mode_set_name(mode);
+
        return mode;
 }
 
index d62c731..c364358 100644 (file)
@@ -1170,12 +1170,7 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
        return (int32_t)(seq1 - seq2) >= 0;
 }
 
-static inline u32
-i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
-{
-       drm_i915_private_t *dev_priv = ring->dev->dev_private;
-       return ring->outstanding_lazy_request = dev_priv->next_seqno;
-}
+u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
 
 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
                                           struct intel_ring_buffer *pipelined);
index 3e2edc6..548a400 100644 (file)
@@ -1647,6 +1647,28 @@ i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
        }
 }
 
+static u32
+i915_gem_get_seqno(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       u32 seqno = dev_priv->next_seqno;
+
+       /* reserve 0 for non-seqno */
+       if (++dev_priv->next_seqno == 0)
+               dev_priv->next_seqno = 1;
+
+       return seqno;
+}
+
+u32
+i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
+{
+       if (ring->outstanding_lazy_request == 0)
+               ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
+
+       return ring->outstanding_lazy_request;
+}
+
 int
 i915_add_request(struct intel_ring_buffer *ring,
                 struct drm_file *file,
@@ -1658,6 +1680,7 @@ i915_add_request(struct intel_ring_buffer *ring,
        int ret;
 
        BUG_ON(request == NULL);
+       seqno = i915_gem_next_request_seqno(ring);
 
        ret = ring->add_request(ring, &seqno);
        if (ret)
index 578ddfc..c8b5bc1 100644 (file)
@@ -2006,10 +2006,22 @@ static int i915_driver_irq_postinstall(struct drm_device *dev)
                        hotplug_en |= HDMIC_HOTPLUG_INT_EN;
                if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
                        hotplug_en |= HDMID_HOTPLUG_INT_EN;
-               if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
-                       hotplug_en |= SDVOC_HOTPLUG_INT_EN;
-               if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
-                       hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+               if (IS_G4X(dev)) {
+                       if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
+                               hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+                       if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
+                               hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+               } else if (IS_GEN4(dev)) {
+                       if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
+                               hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+                       if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
+                               hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+               } else {
+                       if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
+                               hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+                       if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
+                               hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+               }
                if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
                        hotplug_en |= CRT_HOTPLUG_INT_EN;
 
index fd53122..4a5e662 100644 (file)
 #define   DPC_HOTPLUG_INT_STATUS               (1 << 28)
 #define   HDMID_HOTPLUG_INT_STATUS             (1 << 27)
 #define   DPD_HOTPLUG_INT_STATUS               (1 << 27)
+/* CRT/TV common between gen3+ */
 #define   CRT_HOTPLUG_INT_STATUS               (1 << 11)
 #define   TV_HOTPLUG_INT_STATUS                        (1 << 10)
 #define   CRT_HOTPLUG_MONITOR_MASK             (3 << 8)
 #define   CRT_HOTPLUG_MONITOR_COLOR            (3 << 8)
 #define   CRT_HOTPLUG_MONITOR_MONO             (2 << 8)
 #define   CRT_HOTPLUG_MONITOR_NONE             (0 << 8)
-#define   SDVOC_HOTPLUG_INT_STATUS             (1 << 7)
-#define   SDVOB_HOTPLUG_INT_STATUS             (1 << 6)
+/* SDVO is different across gen3/4 */
+#define   SDVOC_HOTPLUG_INT_STATUS_G4X         (1 << 3)
+#define   SDVOB_HOTPLUG_INT_STATUS_G4X         (1 << 2)
+#define   SDVOC_HOTPLUG_INT_STATUS_I965                (3 << 4)
+#define   SDVOB_HOTPLUG_INT_STATUS_I965                (3 << 2)
+#define   SDVOC_HOTPLUG_INT_STATUS_I915                (1 << 7)
+#define   SDVOB_HOTPLUG_INT_STATUS_I915                (1 << 6)
 
 /* SDVO port control */
 #define SDVOB                  0x61140
index cc75c4b..6c3fb44 100644 (file)
@@ -1072,8 +1072,8 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
                                     enum pipe pipe, int reg)
 {
        u32 val = I915_READ(reg);
-       WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
-            "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
+       WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
+            "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
             reg, pipe_name(pipe));
 }
 
@@ -1089,13 +1089,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
 
        reg = PCH_ADPA;
        val = I915_READ(reg);
-       WARN(adpa_pipe_enabled(dev_priv, val, pipe),
+       WARN(adpa_pipe_enabled(dev_priv, pipe, val),
             "PCH VGA enabled on transcoder %c, should be disabled\n",
             pipe_name(pipe));
 
        reg = PCH_LVDS;
        val = I915_READ(reg);
-       WARN(lvds_pipe_enabled(dev_priv, val, pipe),
+       WARN(lvds_pipe_enabled(dev_priv, pipe, val),
             "PCH LVDS enabled on transcoder %c, should be disabled\n",
             pipe_name(pipe));
 
@@ -1437,7 +1437,7 @@ static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
                             enum pipe pipe, int reg)
 {
        u32 val = I915_READ(reg);
-       if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
+       if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
                DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
                              reg, pipe);
                I915_WRITE(reg, val & ~PORT_ENABLE);
@@ -1459,12 +1459,12 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
 
        reg = PCH_ADPA;
        val = I915_READ(reg);
-       if (adpa_pipe_enabled(dev_priv, val, pipe))
+       if (adpa_pipe_enabled(dev_priv, pipe, val))
                I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
 
        reg = PCH_LVDS;
        val = I915_READ(reg);
-       if (lvds_pipe_enabled(dev_priv, val, pipe)) {
+       if (lvds_pipe_enabled(dev_priv, pipe, val)) {
                DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
                I915_WRITE(reg, val & ~LVDS_PORT_EN);
                POSTING_READ(reg);
@@ -2852,16 +2852,14 @@ static void intel_clear_scanline_wait(struct drm_device *dev)
 
 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
 {
-       struct drm_i915_gem_object *obj;
-       struct drm_i915_private *dev_priv;
+       struct drm_device *dev = crtc->dev;
 
        if (crtc->fb == NULL)
                return;
 
-       obj = to_intel_framebuffer(crtc->fb)->obj;
-       dev_priv = crtc->dev->dev_private;
-       wait_event(dev_priv->pending_flip_queue,
-                  atomic_read(&obj->pending_flip) == 0);
+       mutex_lock(&dev->struct_mutex);
+       intel_finish_fb(crtc->fb);
+       mutex_unlock(&dev->struct_mutex);
 }
 
 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
@@ -3322,23 +3320,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
        struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
        struct drm_device *dev = crtc->dev;
 
-       /* Flush any pending WAITs before we disable the pipe. Note that
-        * we need to drop the struct_mutex in order to acquire it again
-        * during the lowlevel dpms routines around a couple of the
-        * operations. It does not look trivial nor desirable to move
-        * that locking higher. So instead we leave a window for the
-        * submission of further commands on the fb before we can actually
-        * disable it. This race with userspace exists anyway, and we can
-        * only rely on the pipe being disabled by userspace after it
-        * receives the hotplug notification and has flushed any pending
-        * batches.
-        */
-       if (crtc->fb) {
-               mutex_lock(&dev->struct_mutex);
-               intel_finish_fb(crtc->fb);
-               mutex_unlock(&dev->struct_mutex);
-       }
-
        crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
 
        if (crtc->fb) {
@@ -4748,17 +4729,6 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
                        continue;
                }
 
-               if (intel_encoder->type == INTEL_OUTPUT_EDP) {
-                       /* Use VBT settings if we have an eDP panel */
-                       unsigned int edp_bpc = dev_priv->edp.bpp / 3;
-
-                       if (edp_bpc < display_bpc) {
-                               DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
-                               display_bpc = edp_bpc;
-                       }
-                       continue;
-               }
-
                /* Not one of the known troublemakers, check the EDID */
                list_for_each_entry(connector, &dev->mode_config.connector_list,
                                    head) {
index d4c4937..c8ecaab 100644 (file)
@@ -708,8 +708,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
 
        bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
 
-       for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
-               for (clock = 0; clock <= max_clock; clock++) {
+       for (clock = 0; clock <= max_clock; clock++) {
+               for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
                        int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
 
                        if (intel_dp_link_required(mode->clock, bpp)
@@ -1152,10 +1152,14 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
        WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
 
        pp = ironlake_get_pp_control(dev_priv);
-       pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+       /* We need to switch off panel power _and_ force vdd, for otherwise some
+        * panels get very unhappy and cease to work. */
+       pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
        I915_WRITE(PCH_PP_CONTROL, pp);
        POSTING_READ(PCH_PP_CONTROL);
 
+       intel_dp->want_panel_vdd = false;
+
        ironlake_wait_panel_off(intel_dp);
 }
 
@@ -1265,11 +1269,9 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
         * ensure that we have vdd while we switch off the panel. */
        ironlake_edp_panel_vdd_on(intel_dp);
        ironlake_edp_backlight_off(intel_dp);
-       ironlake_edp_panel_off(intel_dp);
-
        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+       ironlake_edp_panel_off(intel_dp);
        intel_dp_link_down(intel_dp);
-       ironlake_edp_panel_vdd_off(intel_dp, false);
 }
 
 static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1304,11 +1306,9 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
                /* Switching the panel off requires vdd. */
                ironlake_edp_panel_vdd_on(intel_dp);
                ironlake_edp_backlight_off(intel_dp);
-               ironlake_edp_panel_off(intel_dp);
-
                intel_dp_sink_dpms(intel_dp, mode);
+               ironlake_edp_panel_off(intel_dp);
                intel_dp_link_down(intel_dp);
-               ironlake_edp_panel_vdd_off(intel_dp, false);
 
                if (is_cpu_edp(intel_dp))
                        ironlake_edp_pll_off(encoder);
index ceec71b..f07bde2 100644 (file)
@@ -752,7 +752,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
                .ident = "Hewlett-Packard t5745",
                .matches = {
                        DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
-                       DMI_MATCH(DMI_BOARD_NAME, "hp t5745"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"),
                },
        },
        {
@@ -760,7 +760,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
                .ident = "Hewlett-Packard st5747",
                .matches = {
                        DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
-                       DMI_MATCH(DMI_BOARD_NAME, "hp st5747"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"),
                },
        },
        {
index f6613dc..19085c0 100644 (file)
@@ -52,20 +52,6 @@ static inline int ring_space(struct intel_ring_buffer *ring)
        return space;
 }
 
-static u32 i915_gem_get_seqno(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       u32 seqno;
-
-       seqno = dev_priv->next_seqno;
-
-       /* reserve 0 for non-seqno */
-       if (++dev_priv->next_seqno == 0)
-               dev_priv->next_seqno = 1;
-
-       return seqno;
-}
-
 static int
 render_ring_flush(struct intel_ring_buffer *ring,
                  u32   invalidate_domains,
@@ -277,8 +263,6 @@ static int init_ring_common(struct intel_ring_buffer *ring)
        I915_WRITE_HEAD(ring, 0);
        ring->write_tail(ring, 0);
 
-       /* Initialize the ring. */
-       I915_WRITE_START(ring, obj->gtt_offset);
        head = I915_READ_HEAD(ring) & HEAD_ADDR;
 
        /* G45 ring initialization fails to reset head to zero */
@@ -304,14 +288,19 @@ static int init_ring_common(struct intel_ring_buffer *ring)
                }
        }
 
+       /* Initialize the ring. This must happen _after_ we've cleared the ring
+        * registers with the above sequence (the readback of the HEAD registers
+        * also enforces ordering), otherwise the hw might lose the new ring
+        * register values. */
+       I915_WRITE_START(ring, obj->gtt_offset);
        I915_WRITE_CTL(ring,
                        ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
                        | RING_VALID);
 
        /* If the head is still not zero, the ring is dead */
-       if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
-           I915_READ_START(ring) != obj->gtt_offset ||
-           (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
+       if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
+                    I915_READ_START(ring) == obj->gtt_offset &&
+                    (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
                DRM_ERROR("%s initialization failed "
                                "ctl %08x head %08x tail %08x start %08x\n",
                                ring->name,
@@ -488,7 +477,7 @@ gen6_add_request(struct intel_ring_buffer *ring,
        mbox1_reg = ring->signal_mbox[0];
        mbox2_reg = ring->signal_mbox[1];
 
-       *seqno = i915_gem_get_seqno(ring->dev);
+       *seqno = i915_gem_next_request_seqno(ring);
 
        update_mboxes(ring, *seqno, mbox1_reg);
        update_mboxes(ring, *seqno, mbox2_reg);
@@ -586,8 +575,7 @@ static int
 pc_render_add_request(struct intel_ring_buffer *ring,
                      u32 *result)
 {
-       struct drm_device *dev = ring->dev;
-       u32 seqno = i915_gem_get_seqno(dev);
+       u32 seqno = i915_gem_next_request_seqno(ring);
        struct pipe_control *pc = ring->private;
        u32 scratch_addr = pc->gtt_offset + 128;
        int ret;
@@ -638,8 +626,7 @@ static int
 render_ring_add_request(struct intel_ring_buffer *ring,
                        u32 *result)
 {
-       struct drm_device *dev = ring->dev;
-       u32 seqno = i915_gem_get_seqno(dev);
+       u32 seqno = i915_gem_next_request_seqno(ring);
        int ret;
 
        ret = intel_ring_begin(ring, 4);
@@ -813,7 +800,7 @@ ring_add_request(struct intel_ring_buffer *ring,
        if (ret)
                return ret;
 
-       seqno = i915_gem_get_seqno(ring->dev);
+       seqno = i915_gem_next_request_seqno(ring);
 
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
index a8d8ee5..bbf247c 100644 (file)
@@ -2514,6 +2514,7 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_encoder *intel_encoder;
        struct intel_sdvo *intel_sdvo;
+       u32 hotplug_mask;
        int i;
 
        intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
@@ -2544,10 +2545,17 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
                }
        }
 
-       if (IS_SDVOB(sdvo_reg))
-               dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
-       else
-               dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
+       hotplug_mask = 0;
+       if (IS_G4X(dev)) {
+               hotplug_mask = IS_SDVOB(sdvo_reg) ?
+                       SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X;
+       } else if (IS_GEN4(dev)) {
+               hotplug_mask = IS_SDVOB(sdvo_reg) ?
+                       SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965;
+       } else {
+               hotplug_mask = IS_SDVOB(sdvo_reg) ?
+                       SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
+       }
 
        drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
 
@@ -2555,14 +2563,6 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
        if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
                goto err;
 
-       /* Set up hotplug command - note paranoia about contents of reply.
-        * We assume that the hardware is in a sane state, and only touch
-        * the bits we think we understand.
-        */
-       intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
-                            &intel_sdvo->hotplug_active, 2);
-       intel_sdvo->hotplug_active[0] &= ~0x3;
-
        if (intel_sdvo_output_setup(intel_sdvo,
                                    intel_sdvo->caps.output_flags) != true) {
                DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
@@ -2570,6 +2570,12 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
                goto err;
        }
 
+       /* Only enable the hotplug irq if we need it, to work around noisy
+        * hotplug lines.
+        */
+       if (intel_sdvo->hotplug_active[0])
+               dev_priv->hotplug_supported_mask |= hotplug_mask;
+
        intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
 
        /* Set the input timing to the screen. Assume always input 0. */
index b12fd2c..6adef06 100644 (file)
@@ -381,7 +381,7 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
        args->size = args->pitch * args->height;
        args->size = roundup(args->size, PAGE_SIZE);
 
-       ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
+       ret = nouveau_gem_new(dev, args->size, 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, &bo);
        if (ret)
                return ret;
 
index cb006a7..3002d82 100644 (file)
@@ -472,7 +472,7 @@ static int
 nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 {
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       const u32 data = (y << 16) | x;
+       const u32 data = (y << 16) | (x & 0xffff);
 
        nv_wr32(crtc->dev, 0x64d084 + (nv_crtc->index * 0x1000), data);
        nv_wr32(crtc->dev, 0x64d080 + (nv_crtc->index * 0x1000), 0x00000000);
index 1b50ad8..4760466 100644 (file)
 #define ATOM_LCD_SELFTEST_START                                                                        (ATOM_DISABLE+5)
 #define ATOM_LCD_SELFTEST_STOP                                                                 (ATOM_ENABLE+5)
 #define ATOM_ENCODER_INIT                                        (ATOM_DISABLE+7)
+#define ATOM_INIT                                                (ATOM_DISABLE+7)
 #define ATOM_GET_STATUS                         (ATOM_DISABLE+8)
 
 #define ATOM_BLANKING         1
@@ -251,25 +252,25 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
   USHORT SetEngineClock;                         //Function Table,directly used by various SW components,latest version 1.1
   USHORT SetMemoryClock;                         //Function Table,directly used by various SW components,latest version 1.1
   USHORT SetPixelClock;                          //Function Table,directly used by various SW components,latest version 1.2  
-  USHORT DynamicClockGating;                     //Atomic Table,  indirectly used by various SW components,called from ASIC_Init
+  USHORT EnableDispPowerGating;                  //Atomic Table,  indirectly used by various SW components,called from ASIC_Init
   USHORT ResetMemoryDLL;                         //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
   USHORT ResetMemoryDevice;                      //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
-  USHORT MemoryPLLInit;
-  USHORT AdjustDisplayPll;                                                                                             //only used by Bios
+  USHORT MemoryPLLInit;                          //Atomic Table,  used only by Bios
+  USHORT AdjustDisplayPll;                                                                                      //Atomic Table,  used by various SW componentes. 
   USHORT AdjustMemoryController;                 //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock                
   USHORT EnableASIC_StaticPwrMgt;                //Atomic Table,  only used by Bios
   USHORT ASIC_StaticPwrMgtStatusChange;          //Obsolete ,     only used by Bios   
   USHORT DAC_LoadDetection;                      //Atomic Table,  directly used by various SW components,latest version 1.2  
   USHORT LVTMAEncoderControl;                    //Atomic Table,directly used by various SW components,latest version 1.3
-  USHORT LCD1OutputControl;                      //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT HW_Misc_Operation;                      //Atomic Table,  directly used by various SW components,latest version 1.1 
   USHORT DAC1EncoderControl;                     //Atomic Table,  directly used by various SW components,latest version 1.1  
   USHORT DAC2EncoderControl;                     //Atomic Table,  directly used by various SW components,latest version 1.1 
   USHORT DVOOutputControl;                       //Atomic Table,  directly used by various SW components,latest version 1.1 
   USHORT CV1OutputControl;                       //Atomic Table,  Atomic Table,  Obsolete from Ry6xx, use DAC2 Output instead 
-  USHORT GetConditionalGoldenSetting;            //only used by Bios
+  USHORT GetConditionalGoldenSetting;            //Only used by Bios
   USHORT TVEncoderControl;                       //Function Table,directly used by various SW components,latest version 1.1
-  USHORT TMDSAEncoderControl;                    //Atomic Table,  directly used by various SW components,latest version 1.3
-  USHORT LVDSEncoderControl;                     //Atomic Table,  directly used by various SW components,latest version 1.3
+  USHORT PatchMCSetting;                         //only used by BIOS
+  USHORT MC_SEQ_Control;                         //only used by BIOS
   USHORT TV1OutputControl;                       //Atomic Table,  Obsolete from Ry6xx, use DAC2 Output instead
   USHORT EnableScaler;                           //Atomic Table,  used only by Bios
   USHORT BlankCRTC;                              //Atomic Table,  directly used by various SW components,latest version 1.1 
@@ -282,7 +283,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
   USHORT SetCRTC_Replication;                    //Atomic Table,  used only by Bios
   USHORT SelectCRTC_Source;                      //Atomic Table,  directly used by various SW components,latest version 1.1 
   USHORT EnableGraphSurfaces;                    //Atomic Table,  used only by Bios
-  USHORT UpdateCRTC_DoubleBufferRegisters;
+  USHORT UpdateCRTC_DoubleBufferRegisters;                      //Atomic Table,  used only by Bios
   USHORT LUT_AutoFill;                           //Atomic Table,  only used by Bios
   USHORT EnableHW_IconCursor;                    //Atomic Table,  only used by Bios
   USHORT GetMemoryClock;                         //Atomic Table,  directly used by various SW components,latest version 1.1 
@@ -308,27 +309,36 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
   USHORT SetVoltage;                             //Function Table,directly and/or indirectly used by various SW components,latest version 1.1
   USHORT DAC1OutputControl;                      //Atomic Table,  directly used by various SW components,latest version 1.1
   USHORT DAC2OutputControl;                      //Atomic Table,  directly used by various SW components,latest version 1.1
-  USHORT SetupHWAssistedI2CStatus;               //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
+  USHORT ComputeMemoryClockParam;                //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
   USHORT ClockSource;                            //Atomic Table,  indirectly used by various SW components,called from ASIC_Init
   USHORT MemoryDeviceInit;                       //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
-  USHORT EnableYUV;                              //Atomic Table,  indirectly used by various SW components,called from EnableVGARender
+  USHORT GetDispObjectInfo;                      //Atomic Table,  indirectly used by various SW components,called from EnableVGARender
   USHORT DIG1EncoderControl;                     //Atomic Table,directly used by various SW components,latest version 1.1
   USHORT DIG2EncoderControl;                     //Atomic Table,directly used by various SW components,latest version 1.1
   USHORT DIG1TransmitterControl;                 //Atomic Table,directly used by various SW components,latest version 1.1
   USHORT DIG2TransmitterControl;                      //Atomic Table,directly used by various SW components,latest version 1.1 
   USHORT ProcessAuxChannelTransaction;                                  //Function Table,only used by Bios
   USHORT DPEncoderService;                                                                                      //Function Table,only used by Bios
+  USHORT GetVoltageInfo;                         //Function Table,only used by Bios since SI
 }ATOM_MASTER_LIST_OF_COMMAND_TABLES;   
 
 // For backward compatible 
 #define ReadEDIDFromHWAssistedI2C                ProcessI2cChannelTransaction
-#define UNIPHYTransmitterControl                                                    DIG1TransmitterControl
-#define LVTMATransmitterControl                                                             DIG2TransmitterControl
+#define DPTranslatorControl                      DIG2EncoderControl
+#define UNIPHYTransmitterControl                            DIG1TransmitterControl
+#define LVTMATransmitterControl                                     DIG2TransmitterControl
 #define SetCRTC_DPM_State                        GetConditionalGoldenSetting
 #define SetUniphyInstance                        ASIC_StaticPwrMgtStatusChange
 #define HPDInterruptService                      ReadHWAssistedI2CStatus
 #define EnableVGA_Access                         GetSCLKOverMCLKRatio
-#define GetDispObjectInfo                        EnableYUV 
+#define EnableYUV                                GetDispObjectInfo                         
+#define DynamicClockGating                       EnableDispPowerGating
+#define SetupHWAssistedI2CStatus                 ComputeMemoryClockParam
+
+#define TMDSAEncoderControl                      PatchMCSetting
+#define LVDSEncoderControl                       MC_SEQ_Control
+#define LCD1OutputControl                        HW_Misc_Operation
+
 
 typedef struct _ATOM_MASTER_COMMAND_TABLE
 {
@@ -495,6 +505,34 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
 // ucInputFlag
 #define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN  1   // 1-StrobeMode, 0-PerformanceMode
 
+// use for ComputeMemoryClockParamTable
+typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1
+{
+  union
+  {
+    ULONG  ulClock;         
+    ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output:UPPER_WORD=FB_DIV_INTEGER,  LOWER_WORD=FB_DIV_FRAC shl (16-FB_FRACTION_BITS)
+  };
+  UCHAR   ucDllSpeed;                         //Output 
+  UCHAR   ucPostDiv;                          //Output
+  union{
+    UCHAR   ucInputFlag;                      //Input : ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN: 1-StrobeMode, 0-PerformanceMode
+    UCHAR   ucPllCntlFlag;                    //Output: 
+  };
+  UCHAR   ucBWCntl;                       
+}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1;
+
+// definition of ucInputFlag
+#define MPLL_INPUT_FLAG_STROBE_MODE_EN          0x01
+// definition of ucPllCntlFlag
+#define MPLL_CNTL_FLAG_VCO_MODE_MASK            0x03 
+#define MPLL_CNTL_FLAG_BYPASS_DQ_PLL            0x04
+#define MPLL_CNTL_FLAG_QDR_ENABLE               0x08
+#define MPLL_CNTL_FLAG_AD_HALF_RATE             0x10
+
+//MPLL_CNTL_FLAG_BYPASS_AD_PLL has a wrong name, should be BYPASS_DQ_PLL
+#define MPLL_CNTL_FLAG_BYPASS_AD_PLL            0x04
+
 typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
 {
   ATOM_COMPUTE_CLOCK_FREQ ulClock;
@@ -561,6 +599,16 @@ typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS
 }DYNAMIC_CLOCK_GATING_PARAMETERS;
 #define  DYNAMIC_CLOCK_GATING_PS_ALLOCATION  DYNAMIC_CLOCK_GATING_PARAMETERS
 
+/****************************************************************************/ 
+// Structure used by EnableDispPowerGatingTable.ctb
+/****************************************************************************/ 
+typedef struct _ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 
+{
+  UCHAR ucDispPipeId;                 // ATOM_CRTC1, ATOM_CRTC2, ...
+  UCHAR ucEnable;                     // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR ucPadding[2];
+}ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1;
+
 /****************************************************************************/ 
 // Structure used by EnableASIC_StaticPwrMgtTable.ctb
 /****************************************************************************/ 
@@ -807,6 +855,7 @@ typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
 #define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ                0x00
 #define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ                0x01
 #define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ                0x02
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ                0x03
 #define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL                                       0x70
 #define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER                                      0x00
 #define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER                                      0x10
@@ -814,6 +863,7 @@ typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
 #define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER                                      0x30
 #define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER                                      0x40
 #define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER                                      0x50
+#define ATOM_ENCODER_CONFIG_V4_DIG6_ENCODER                                      0x60
 
 typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
 {
@@ -1171,6 +1221,106 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4
 #define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3                0x80    //EF
 
 
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V5
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucReservd1:1;
+  UCHAR ucHPDSel:3;
+  UCHAR ucPhyClkSrcId:2;            
+  UCHAR ucCoherentMode:1;            
+  UCHAR ucReserved:1;
+#else
+  UCHAR ucReserved:1;
+  UCHAR ucCoherentMode:1;            
+  UCHAR ucPhyClkSrcId:2;            
+  UCHAR ucHPDSel:3;
+  UCHAR ucReservd1:1;
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V5;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
+{
+  USHORT usSymClock;                   // Encoder Clock in 10kHz,(DP mode)= linkclock/10, (TMDS/LVDS/HDMI)= pixel clock,  (HDMI deep color), =pixel clock * deep_color_ratio
+  UCHAR  ucPhyId;                   // 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4= UNIPHYE 5=UNIPHYF
+  UCHAR  ucAction;                                 // define as ATOM_TRANSMITER_ACTION_xxx
+  UCHAR  ucLaneNum;                 // indicate lane number 1-8
+  UCHAR  ucConnObjId;               // Connector Object Id defined in ObjectId.h
+  UCHAR  ucDigMode;                 // indicate DIG mode
+  union{
+  ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
+  UCHAR ucConfig;
+  };
+  UCHAR  ucDigEncoderSel;           // indicate DIG front end encoder 
+  UCHAR  ucDPLaneSet;
+  UCHAR  ucReserved;
+  UCHAR  ucReserved1;
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5;
+
+//ucPhyId
+#define ATOM_PHY_ID_UNIPHYA                                 0  
+#define ATOM_PHY_ID_UNIPHYB                                 1
+#define ATOM_PHY_ID_UNIPHYC                                 2
+#define ATOM_PHY_ID_UNIPHYD                                 3
+#define ATOM_PHY_ID_UNIPHYE                                 4
+#define ATOM_PHY_ID_UNIPHYF                                 5
+#define ATOM_PHY_ID_UNIPHYG                                 6
+
+// ucDigEncoderSel
+#define ATOM_TRANMSITTER_V5__DIGA_SEL                       0x01
+#define ATOM_TRANMSITTER_V5__DIGB_SEL                       0x02
+#define ATOM_TRANMSITTER_V5__DIGC_SEL                       0x04
+#define ATOM_TRANMSITTER_V5__DIGD_SEL                       0x08
+#define ATOM_TRANMSITTER_V5__DIGE_SEL                       0x10
+#define ATOM_TRANMSITTER_V5__DIGF_SEL                       0x20
+#define ATOM_TRANMSITTER_V5__DIGG_SEL                       0x40
+
+// ucDigMode
+#define ATOM_TRANSMITTER_DIGMODE_V5_DP                      0
+#define ATOM_TRANSMITTER_DIGMODE_V5_LVDS                    1
+#define ATOM_TRANSMITTER_DIGMODE_V5_DVI                     2
+#define ATOM_TRANSMITTER_DIGMODE_V5_HDMI                    3
+#define ATOM_TRANSMITTER_DIGMODE_V5_SDVO                    4
+#define ATOM_TRANSMITTER_DIGMODE_V5_DP_MST                  5
+
+// ucDPLaneSet
+#define DP_LANE_SET__0DB_0_4V                               0x00
+#define DP_LANE_SET__0DB_0_6V                               0x01
+#define DP_LANE_SET__0DB_0_8V                               0x02
+#define DP_LANE_SET__0DB_1_2V                               0x03
+#define DP_LANE_SET__3_5DB_0_4V                             0x08  
+#define DP_LANE_SET__3_5DB_0_6V                             0x09
+#define DP_LANE_SET__3_5DB_0_8V                             0x0a
+#define DP_LANE_SET__6DB_0_4V                               0x10
+#define DP_LANE_SET__6DB_0_6V                               0x11
+#define DP_LANE_SET__9_5DB_0_4V                             0x18  
+
+// ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
+// Bit1
+#define ATOM_TRANSMITTER_CONFIG_V5_COHERENT                                      0x02
+
+// Bit3:2
+#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_MASK             0x0c
+#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_SHIFT                0x02
+
+#define ATOM_TRANSMITTER_CONFIG_V5_P1PLL                               0x00
+#define ATOM_TRANSMITTER_CONFIG_V5_P2PLL                               0x04
+#define ATOM_TRANSMITTER_CONFIG_V5_P0PLL                               0x08   
+#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT           0x0c
+// Bit6:4
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_MASK                          0x70
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_SHIFT                     0x04
+
+#define ATOM_TRANSMITTER_CONFIG_V5_NO_HPD_SEL                                  0x00
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL                                      0x10
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL                                      0x20
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL                                      0x30
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL                                      0x40
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL                                      0x50
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL                                      0x60
+
+#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION_V1_5            DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
+
+
 /****************************************************************************/ 
 // Structures used by ExternalEncoderControlTable V1.3
 // ASIC Families: Evergreen, Llano, NI
@@ -1793,6 +1943,7 @@ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2
 #define ATOM_PPLL_SS_TYPE_V3_P1PLL            0x00
 #define ATOM_PPLL_SS_TYPE_V3_P2PLL            0x04
 #define ATOM_PPLL_SS_TYPE_V3_DCPLL            0x08
+#define ATOM_PPLL_SS_TYPE_V3_P0PLL            ATOM_PPLL_SS_TYPE_V3_DCPLL
 #define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK     0x00FF
 #define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT    0
 #define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK     0x0F00
@@ -2030,12 +2181,77 @@ typedef struct  _SET_VOLTAGE_PARAMETERS_V2
   USHORT   usVoltageLevel;              // real voltage level
 }SET_VOLTAGE_PARAMETERS_V2;
 
+
+typedef struct _SET_VOLTAGE_PARAMETERS_V1_3
+{
+  UCHAR    ucVoltageType;               // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
+  UCHAR    ucVoltageMode;               // Indicate action: Set voltage level
+  USHORT   usVoltageLevel;              // real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. )
+}SET_VOLTAGE_PARAMETERS_V1_3;
+
+//ucVoltageType
+#define VOLTAGE_TYPE_VDDC                    1
+#define VOLTAGE_TYPE_MVDDC                   2
+#define VOLTAGE_TYPE_MVDDQ                   3
+#define VOLTAGE_TYPE_VDDCI                   4
+
+//SET_VOLTAGE_PARAMETERS_V3.ucVoltageMode
+#define ATOM_SET_VOLTAGE                     0        //Set voltage Level
+#define ATOM_INIT_VOLTAGE_REGULATOR          3        //Init Regulator
+#define ATOM_SET_VOLTAGE_PHASE               4        //Set Vregulator Phase
+#define ATOM_GET_MAX_VOLTAGE                 6        //Get Max Voltage, not used in SetVoltageTable v1.3
+#define ATOM_GET_VOLTAGE_LEVEL               6        //Get Voltage level from vitual voltage ID
+
+// define vitual voltage id in usVoltageLevel
+#define ATOM_VIRTUAL_VOLTAGE_ID0             0xff01
+#define ATOM_VIRTUAL_VOLTAGE_ID1             0xff02
+#define ATOM_VIRTUAL_VOLTAGE_ID2             0xff03
+#define ATOM_VIRTUAL_VOLTAGE_ID3             0xff04
+
 typedef struct _SET_VOLTAGE_PS_ALLOCATION
 {
   SET_VOLTAGE_PARAMETERS sASICSetVoltage;
   WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
 }SET_VOLTAGE_PS_ALLOCATION;
 
+// New Added from SI for GetVoltageInfoTable, input parameter structure
+typedef struct  _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1
+{
+  UCHAR    ucVoltageType;               // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
+  UCHAR    ucVoltageMode;               // Input: Indicate action: Get voltage info
+  USHORT   usVoltageLevel;              // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id 
+  ULONG    ulReserved;
+}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1;
+
+// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_VID
+typedef struct  _GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
+{
+  ULONG    ulVotlageGpioState;
+  ULONG    ulVoltageGPioMask;
+}GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
+
+// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_STATEx_LEAKAGE_VID
+typedef struct  _GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
+{
+  USHORT   usVoltageLevel;
+  USHORT   usVoltageId;                                  // Voltage Id programmed in Voltage Regulator
+  ULONG    ulReseved;
+}GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
+
+
+// GetVoltageInfo v1.1 ucVoltageMode
+#define        ATOM_GET_VOLTAGE_VID                0x00
+#define ATOM_GET_VOTLAGE_INIT_SEQ           0x03
+#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID   0x04
+// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
+#define        ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
+
+// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state
+#define        ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11
+// undefined power state
+#define        ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12
+#define        ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13
+
 /****************************************************************************/ 
 // Structures used by TVEncoderControlTable
 /****************************************************************************/ 
@@ -2065,9 +2281,9 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
   USHORT        MultimediaConfigInfo;     // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios
   USHORT        StandardVESA_Timing;      // Only used by Bios
   USHORT        FirmwareInfo;             // Shared by various SW components,latest version 1.4
-  USHORT        DAC_Info;                 // Will be obsolete from R600
+  USHORT        PaletteData;              // Only used by BIOS
   USHORT        LCD_Info;                 // Shared by various SW components,latest version 1.3, was called LVDS_Info 
-  USHORT        TMDS_Info;                // Will be obsolete from R600
+  USHORT        DIGTransmitterInfo;       // Internal used by VBIOS only version 3.1
   USHORT        AnalogTV_Info;            // Shared by various SW components,latest version 1.1 
   USHORT        SupportedDevicesInfo;     // Will be obsolete from R600
   USHORT        GPIO_I2C_Info;            // Shared by various SW components,latest version 1.2 will be used from R600           
@@ -2096,15 +2312,16 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
        USHORT                          PowerSourceInfo;                                        // Shared by various SW components, latest versoin 1.1
 }ATOM_MASTER_LIST_OF_DATA_TABLES;
 
-// For backward compatible 
-#define LVDS_Info                LCD_Info
-
 typedef struct _ATOM_MASTER_DATA_TABLE
 { 
   ATOM_COMMON_TABLE_HEADER sHeader;  
   ATOM_MASTER_LIST_OF_DATA_TABLES   ListOfDataTables;
 }ATOM_MASTER_DATA_TABLE;
 
+// For backward compatible 
+#define LVDS_Info                LCD_Info
+#define DAC_Info                 PaletteData
+#define TMDS_Info                DIGTransmitterInfo
 
 /****************************************************************************/ 
 // Structure used in MultimediaCapabilityInfoTable
@@ -2171,7 +2388,9 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO
 typedef struct _ATOM_FIRMWARE_CAPABILITY
 {
 #if ATOM_BIG_ENDIAN
-  USHORT Reserved:3;
+  USHORT Reserved:1;
+  USHORT SCL2Redefined:1;
+  USHORT PostWithoutModeSet:1;
   USHORT HyperMemory_Size:4;
   USHORT HyperMemory_Support:1;
   USHORT PPMode_Assigned:1;
@@ -2193,7 +2412,9 @@ typedef struct _ATOM_FIRMWARE_CAPABILITY
   USHORT PPMode_Assigned:1;
   USHORT HyperMemory_Support:1;
   USHORT HyperMemory_Size:4;
-  USHORT Reserved:3;
+  USHORT PostWithoutModeSet:1;
+  USHORT SCL2Redefined:1;
+  USHORT Reserved:1;
 #endif
 }ATOM_FIRMWARE_CAPABILITY;
 
@@ -2418,7 +2639,8 @@ typedef struct _ATOM_FIRMWARE_INFO_V2_2
   USHORT                          usLcdMaxPixelClockPLL_Output; // In MHz unit
   ULONG                           ulReserved4;                //Was ulAsicMaximumVoltage
   ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
-  ULONG                           ulReserved5;                //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
+  UCHAR                           ucRemoteDisplayConfig;
+  UCHAR                           ucReserved5[3];             //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
   ULONG                           ulReserved6;                //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input
   ULONG                           ulReserved7;                //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output
   USHORT                          usReserved11;               //Was usMaxPixelClock;  //In 10Khz unit, Max.  Pclk used only for DAC
@@ -2438,6 +2660,11 @@ typedef struct _ATOM_FIRMWARE_INFO_V2_2
 
 #define ATOM_FIRMWARE_INFO_LAST  ATOM_FIRMWARE_INFO_V2_2
 
+
+// definition of ucRemoteDisplayConfig
+#define REMOTE_DISPLAY_DISABLE                   0x00
+#define REMOTE_DISPLAY_ENABLE                    0x01
+
 /****************************************************************************/ 
 // Structures used in IntegratedSystemInfoTable
 /****************************************************************************/ 
@@ -2660,8 +2887,9 @@ usMinDownStreamHTLinkWidth:  same as above.
 #define    INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND      2
 #define    INTEGRATED_SYSTEM_INFO__AMD_CPU__K8             3
 #define    INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH        4
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI         5
 
-#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE       INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH    // this deff reflects max defined CPU code
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE       INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI    // this deff reflects max defined CPU code
 
 #define SYSTEM_CONFIG_POWEREXPRESS_ENABLE                 0x00000001
 #define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE             0x00000002
@@ -2753,6 +2981,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V5
 #define ASIC_INT_DIG4_ENCODER_ID                                                                                                       0x0b
 #define ASIC_INT_DIG5_ENCODER_ID                                                                                                       0x0c
 #define ASIC_INT_DIG6_ENCODER_ID                                                                                                       0x0d
+#define ASIC_INT_DIG7_ENCODER_ID                                                                                                       0x0e
 
 //define Encoder attribute
 #define ATOM_ANALOG_ENCODER                                                                                                                            0
@@ -3226,15 +3455,23 @@ typedef struct _ATOM_LCD_INFO_V13
 
   UCHAR               ucPowerSequenceDIGONtoDE_in4Ms;
   UCHAR               ucPowerSequenceDEtoVARY_BL_in4Ms;
-  UCHAR               ucPowerSequenceDEtoDIGON_in4Ms;
   UCHAR               ucPowerSequenceVARY_BLtoDE_in4Ms;
+  UCHAR               ucPowerSequenceDEtoDIGON_in4Ms;
 
   UCHAR               ucOffDelay_in4Ms;
   UCHAR               ucPowerSequenceVARY_BLtoBLON_in4Ms;
   UCHAR               ucPowerSequenceBLONtoVARY_BL_in4Ms;
   UCHAR               ucReserved1;
 
-  ULONG               ulReserved[4];
+  UCHAR               ucDPCD_eDP_CONFIGURATION_CAP;     // dpcd 0dh
+  UCHAR               ucDPCD_MAX_LINK_RATE;             // dpcd 01h
+  UCHAR               ucDPCD_MAX_LANE_COUNT;            // dpcd 02h
+  UCHAR               ucDPCD_MAX_DOWNSPREAD;            // dpcd 03h
+
+  USHORT              usMaxPclkFreqInSingleLink;        // Max PixelClock frequency in single link mode. 
+  UCHAR               uceDPToLVDSRxId;
+  UCHAR               ucLcdReservd;
+  ULONG               ulReserved[2];
 }ATOM_LCD_INFO_V13;  
 
 #define ATOM_LCD_INFO_LAST  ATOM_LCD_INFO_V13    
@@ -3273,6 +3510,11 @@ typedef struct _ATOM_LCD_INFO_V13
 //Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
 #define        LCDPANEL_CAP_V13_eDP                    0x4        // = LCDPANEL_CAP_eDP no change comparing to previous version
 
+//uceDPToLVDSRxId
+#define eDP_TO_LVDS_RX_DISABLE                  0x00       // no eDP->LVDS translator chip 
+#define eDP_TO_LVDS_COMMON_ID                   0x01       // common eDP->LVDS translator chip without AMD SW init
+#define eDP_TO_LVDS_RT_ID                       0x02       // RT tanslator which require AMD SW init
+
 typedef struct  _ATOM_PATCH_RECORD_MODE
 {
   UCHAR     ucRecordType;
@@ -3317,6 +3559,7 @@ typedef struct  _ATOM_PANEL_RESOLUTION_PATCH_RECORD
 #define LCD_CAP_RECORD_TYPE                   3
 #define LCD_FAKE_EDID_PATCH_RECORD_TYPE       4
 #define LCD_PANEL_RESOLUTION_RECORD_TYPE      5
+#define LCD_EDID_OFFSET_PATCH_RECORD_TYPE     6
 #define ATOM_RECORD_END_TYPE                  0xFF
 
 /****************************Spread Spectrum Info Table Definitions **********************/
@@ -3528,6 +3771,7 @@ else      //Non VGA case
 
 CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/
 
+/***********************************************************************************/  
 #define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO                      1
 
 typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO
@@ -3818,13 +4062,17 @@ typedef struct _EXT_DISPLAY_PATH
     ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping;
     ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping;
   };
-  UCHAR   ucReserved;
-  USHORT  usReserved[2]; 
+  UCHAR   ucChPNInvert;                   // bit vector for up to 8 lanes, =0: P and N is not invert, =1 P and N is inverted
+  USHORT  usCaps;
+  USHORT  usReserved; 
 }EXT_DISPLAY_PATH;
    
 #define NUMBER_OF_UCHAR_FOR_GUID          16
 #define MAX_NUMBER_OF_EXT_DISPLAY_PATH    7
 
+//usCaps
+#define  EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE          0x01
+
 typedef  struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
 {
   ATOM_COMMON_TABLE_HEADER sHeader;
@@ -3832,7 +4080,9 @@ typedef  struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
   EXT_DISPLAY_PATH         sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
   UCHAR                    ucChecksum;                            // a  simple Checksum of the sum of whole structure equal to 0x0. 
   UCHAR                    uc3DStereoPinId;                       // use for eDP panel
-  UCHAR                    Reserved [6];                          // for potential expansion
+  UCHAR                    ucRemoteDisplayConfig;
+  UCHAR                    uceDPToLVDSRxId;
+  UCHAR                    Reserved[4];                           // for potential expansion
 }ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
 
 //Related definitions, all records are different but they have a commond header
@@ -3977,6 +4227,7 @@ typedef struct  _ATOM_OBJECT_GPIO_CNTL_RECORD
 #define GPIO_PIN_STATE_ACTIVE_HIGH      0x1
 
 // Indexes to GPIO array in GLSync record 
+// GLSync record is for Frame Lock/Gen Lock feature.
 #define ATOM_GPIO_INDEX_GLSYNC_REFCLK    0
 #define ATOM_GPIO_INDEX_GLSYNC_HSYNC     1
 #define ATOM_GPIO_INDEX_GLSYNC_VSYNC     2
@@ -3984,7 +4235,9 @@ typedef struct  _ATOM_OBJECT_GPIO_CNTL_RECORD
 #define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT  4
 #define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5
 #define ATOM_GPIO_INDEX_GLSYNC_V_RESET   6
-#define ATOM_GPIO_INDEX_GLSYNC_MAX       7
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_CNTL 7
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_SEL  8
+#define ATOM_GPIO_INDEX_GLSYNC_MAX       9
 
 typedef struct  _ATOM_ENCODER_DVO_CF_RECORD
 {
@@ -3994,7 +4247,8 @@ typedef struct  _ATOM_ENCODER_DVO_CF_RECORD
 }ATOM_ENCODER_DVO_CF_RECORD;
 
 // Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
-#define ATOM_ENCODER_CAP_RECORD_HBR2     0x01         // DP1.2 HBR2 is supported by this path
+#define ATOM_ENCODER_CAP_RECORD_HBR2                  0x01         // DP1.2 HBR2 is supported by HW encoder
+#define ATOM_ENCODER_CAP_RECORD_HBR2_EN               0x02         // DP1.2 HBR2 setting is qualified and HBR2 can be enabled 
 
 typedef struct  _ATOM_ENCODER_CAP_RECORD
 {
@@ -4003,11 +4257,13 @@ typedef struct  _ATOM_ENCODER_CAP_RECORD
     USHORT                    usEncoderCap;         
     struct {
 #if ATOM_BIG_ENDIAN
-      USHORT                  usReserved:15;        // Bit1-15 may be defined for other capability in future
+      USHORT                  usReserved:14;        // Bit1-15 may be defined for other capability in future
+      USHORT                  usHBR2En:1;           // Bit1 is for DP1.2 HBR2 enable
       USHORT                  usHBR2Cap:1;          // Bit0 is for DP1.2 HBR2 capability. 
 #else
       USHORT                  usHBR2Cap:1;          // Bit0 is for DP1.2 HBR2 capability. 
-      USHORT                  usReserved:15;        // Bit1-15 may be defined for other capability in future
+      USHORT                  usHBR2En:1;           // Bit1 is for DP1.2 HBR2 enable
+      USHORT                  usReserved:14;        // Bit1-15 may be defined for other capability in future
 #endif
     };
   }; 
@@ -4157,6 +4413,7 @@ typedef struct _ATOM_VOLTAGE_CONTROL
 #define        VOLTAGE_CONTROL_ID_VT1556M                                              0x07                                                                    
 #define        VOLTAGE_CONTROL_ID_CHL822x                                              0x08                                                                    
 #define        VOLTAGE_CONTROL_ID_VT1586M                                              0x09
+#define VOLTAGE_CONTROL_ID_UP1637                                              0x0A
 
 typedef struct  _ATOM_VOLTAGE_OBJECT
 {
@@ -4193,6 +4450,69 @@ typedef struct  _ATOM_LEAKID_VOLTAGE
        USHORT  usVoltage;
 }ATOM_LEAKID_VOLTAGE;
 
+typedef struct _ATOM_VOLTAGE_OBJECT_HEADER_V3{
+        UCHAR          ucVoltageType;                                                                  //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI  
+   UCHAR               ucVoltageMode;                                                      //Indicate voltage control mode: Init/Set/Leakage/Set phase 
+        USHORT         usSize;                                                                                                 //Size of Object        
+}ATOM_VOLTAGE_OBJECT_HEADER_V3;
+
+typedef struct  _VOLTAGE_LUT_ENTRY_V2
+{
+        ULONG          ulVoltageId;                                                                      // The Voltage ID which is used to program GPIO register
+        USHORT         usVoltageValue;                                                                 // The corresponding Voltage Value, in mV
+}VOLTAGE_LUT_ENTRY_V2;
+
+typedef struct  _LEAKAGE_VOLTAGE_LUT_ENTRY_V2
+{
+  USHORT       usVoltageLevel;                                                           // The Voltage ID which is used to program GPIO register
+  USHORT  usVoltageId;                    
+       USHORT  usLeakageId;                                                                      // The corresponding Voltage Value, in mV
+}LEAKAGE_VOLTAGE_LUT_ENTRY_V2;
+
+typedef struct  _ATOM_I2C_VOLTAGE_OBJECT_V3
+{
+   ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
+   UCHAR       ucVoltageRegulatorId;                                     //Indicate Voltage Regulator Id
+   UCHAR    ucVoltageControlI2cLine;
+   UCHAR    ucVoltageControlAddress;
+   UCHAR    ucVoltageControlOffset;            
+   ULONG    ulReserved;
+   VOLTAGE_LUT_ENTRY asVolI2cLut[1];        // end with 0xff
+}ATOM_I2C_VOLTAGE_OBJECT_V3;
+
+typedef struct  _ATOM_GPIO_VOLTAGE_OBJECT_V3
+{
+   ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;   
+   UCHAR    ucVoltageGpioCntlId;         // default is 0 which indicate control through CG VID mode 
+   UCHAR    ucGpioEntryNum;              // indiate the entry numbers of Votlage/Gpio value Look up table
+   UCHAR    ucPhaseDelay;                // phase delay in unit of micro second
+   UCHAR    ucReserved;   
+   ULONG    ulGpioMaskVal;               // GPIO Mask value
+   VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[1];   
+}ATOM_GPIO_VOLTAGE_OBJECT_V3;
+
+typedef struct  _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
+{
+   ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
+   UCHAR    ucLeakageCntlId;             // default is 0
+   UCHAR    ucLeakageEntryNum;           // indicate the entry number of LeakageId/Voltage Lut table
+   UCHAR    ucReserved[2];               
+   ULONG    ulMaxVoltageLevel;
+   LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1];   
+}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3;
+
+typedef union _ATOM_VOLTAGE_OBJECT_V3{
+  ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj;
+  ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj;
+  ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj;
+}ATOM_VOLTAGE_OBJECT_V3;
+
+typedef struct  _ATOM_VOLTAGE_OBJECT_INFO_V3_1
+{
+   ATOM_COMMON_TABLE_HEADER    sHeader; 
+        ATOM_VOLTAGE_OBJECT_V3                 asVoltageObj[3];        //Info for Voltage control               
+}ATOM_VOLTAGE_OBJECT_INFO_V3_1;
+
 typedef struct  _ATOM_ASIC_PROFILE_VOLTAGE
 {
        UCHAR           ucProfileId;
@@ -4305,7 +4625,18 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
   USHORT usHDMISSpreadRateIn10Hz;
   USHORT usDVISSPercentage;
   USHORT usDVISSpreadRateIn10Hz;
-  ULONG  ulReserved3[21]; 
+  ULONG  SclkDpmBoostMargin;
+  ULONG  SclkDpmThrottleMargin;
+  USHORT SclkDpmTdpLimitPG; 
+  USHORT SclkDpmTdpLimitBoost;
+  ULONG  ulBoostEngineCLock;
+  UCHAR  ulBoostVid_2bit;  
+  UCHAR  EnableBoost;
+  USHORT GnbTdpLimit;
+  USHORT usMaxLVDSPclkFreqInSingleLink;
+  UCHAR  ucLvdsMisc;
+  UCHAR  ucLVDSReserved;
+  ULONG  ulReserved3[15]; 
   ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;   
 }ATOM_INTEGRATED_SYSTEM_INFO_V6;   
 
@@ -4313,9 +4644,16 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
 #define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE       0x01
 #define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION          0x08
 
-// ulOtherDisplayMisc
-#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT                       0x01
+//ucLVDSMisc:                   
+#define SYS_INFO_LVDSMISC__888_FPDI_MODE                                             0x01
+#define SYS_INFO_LVDSMISC__DL_CH_SWAP                                                0x02
+#define SYS_INFO_LVDSMISC__888_BPC                                                   0x04
+#define SYS_INFO_LVDSMISC__OVERRIDE_EN                                               0x08
+#define SYS_INFO_LVDSMISC__BLON_ACTIVE_LOW                                           0x10
 
+// not used any more
+#define SYS_INFO_LVDSMISC__VSYNC_ACTIVE_LOW                                          0x04
+#define SYS_INFO_LVDSMISC__HSYNC_ACTIVE_LOW                                          0x08
 
 /**********************************************************************************************************************
   ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
@@ -4384,7 +4722,208 @@ ucUMAChannelNumber:                     System memory channel numbers.
 ulCSR_M3_ARB_CNTL_DEFAULT[10]:    Arrays with values for CSR M3 arbiter for default
 ulCSR_M3_ARB_CNTL_UVD[10]:        Arrays with values for CSR M3 arbiter for UVD playback.
 ulCSR_M3_ARB_CNTL_FS3D[10]:       Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
-sAvail_SCLK[5]:                   Arrays to provide available list of SLCK and corresponding voltage, order from low to high  
+sAvail_SCLK[5]:                   Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high  
+ulGMCRestoreResetTime:            GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. 
+ulMinimumNClk:                    Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz. 
+ulIdleNClk:                       NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
+ulDDR_DLL_PowerUpTime:            DDR PHY DLL power up time. Unit in ns.
+ulDDR_PLL_PowerUpTime:            DDR PHY PLL power up time. Unit in ns.
+usPCIEClkSSPercentage:            PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%.
+usPCIEClkSSType:                  PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread.
+usLvdsSSPercentage:               LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. 
+usLvdsSSpreadRateIn10Hz:          LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. 
+usHDMISSPercentage:               HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
+usHDMISSpreadRateIn10Hz:          HDMI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
+usDVISSPercentage:                DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
+usDVISSpreadRateIn10Hz:           DVI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
+usMaxLVDSPclkFreqInSingleLink:    Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
+ucLVDSMisc:                       [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
+                                  [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
+                                  [bit2] LVDS 888bit per color mode  =0: 666 bit per color =1:888 bit per color
+                                  [bit3] LVDS parameter override enable  =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
+                                  [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
+**********************************************************************************************************************/
+
+// this Table is used for Liano/Ontario APU
+typedef struct _ATOM_FUSION_SYSTEM_INFO_V1
+{
+  ATOM_INTEGRATED_SYSTEM_INFO_V6    sIntegratedSysInfo;   
+  ULONG  ulPowerplayTable[128];  
+}ATOM_FUSION_SYSTEM_INFO_V1; 
+/**********************************************************************************************************************
+  ATOM_FUSION_SYSTEM_INFO_V1 Description
+sIntegratedSysInfo:               refer to ATOM_INTEGRATED_SYSTEM_INFO_V6 definition.
+ulPowerplayTable[128]:            This 512 bytes memory is used to save ATOM_PPLIB_POWERPLAYTABLE3, starting form ulPowerplayTable[0]    
+**********************************************************************************************************************/ 
+
+// this IntegrateSystemInfoTable is used for Trinity APU
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG  ulBootUpEngineClock;
+  ULONG  ulDentistVCOFreq;
+  ULONG  ulBootUpUMAClock;
+  ATOM_CLK_VOLT_CAPABILITY   sDISPCLK_Voltage[4];
+  ULONG  ulBootUpReqDisplayVector;
+  ULONG  ulOtherDisplayMisc;
+  ULONG  ulGPUCapInfo;
+  ULONG  ulSB_MMIO_Base_Addr;
+  USHORT usRequestedPWMFreqInHz;
+  UCHAR  ucHtcTmpLmt;
+  UCHAR  ucHtcHystLmt;
+  ULONG  ulMinEngineClock;
+  ULONG  ulSystemConfig;            
+  ULONG  ulCPUCapInfo;
+  USHORT usNBP0Voltage;               
+  USHORT usNBP1Voltage;
+  USHORT usBootUpNBVoltage;                       
+  USHORT usExtDispConnInfoOffset;
+  USHORT usPanelRefreshRateRange;     
+  UCHAR  ucMemoryType;  
+  UCHAR  ucUMAChannelNumber;
+  UCHAR  strVBIOSMsg[40];
+  ULONG  ulReserved[20];
+  ATOM_AVAILABLE_SCLK_LIST   sAvail_SCLK[5];
+  ULONG  ulGMCRestoreResetTime;
+  ULONG  ulMinimumNClk;
+  ULONG  ulIdleNClk;
+  ULONG  ulDDR_DLL_PowerUpTime;
+  ULONG  ulDDR_PLL_PowerUpTime;
+  USHORT usPCIEClkSSPercentage;
+  USHORT usPCIEClkSSType;
+  USHORT usLvdsSSPercentage;
+  USHORT usLvdsSSpreadRateIn10Hz;
+  USHORT usHDMISSPercentage;
+  USHORT usHDMISSpreadRateIn10Hz;
+  USHORT usDVISSPercentage;
+  USHORT usDVISSpreadRateIn10Hz;
+  ULONG  SclkDpmBoostMargin;
+  ULONG  SclkDpmThrottleMargin;
+  USHORT SclkDpmTdpLimitPG; 
+  USHORT SclkDpmTdpLimitBoost;
+  ULONG  ulBoostEngineCLock;
+  UCHAR  ulBoostVid_2bit;  
+  UCHAR  EnableBoost;
+  USHORT GnbTdpLimit;
+  USHORT usMaxLVDSPclkFreqInSingleLink;
+  UCHAR  ucLvdsMisc;
+  UCHAR  ucLVDSReserved;
+  UCHAR  ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+  UCHAR  ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+  UCHAR  ucLVDSOffToOnDelay_in4Ms;
+  UCHAR  ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+  UCHAR  ucLVDSReserved1;
+  ULONG  ulLCDBitDepthControlVal;
+  ULONG  ulNbpStateMemclkFreq[4];
+  USHORT usNBP2Voltage;               
+  USHORT usNBP3Voltage;
+  ULONG  ulNbpStateNClkFreq[4];
+  UCHAR  ucNBDPMEnable;
+  UCHAR  ucReserved[3];
+  UCHAR  ucDPMState0VclkFid;
+  UCHAR  ucDPMState0DclkFid;
+  UCHAR  ucDPMState1VclkFid;
+  UCHAR  ucDPMState1DclkFid;
+  UCHAR  ucDPMState2VclkFid;
+  UCHAR  ucDPMState2DclkFid;
+  UCHAR  ucDPMState3VclkFid;
+  UCHAR  ucDPMState3DclkFid;
+  ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
+}ATOM_INTEGRATED_SYSTEM_INFO_V1_7;
+
+// ulOtherDisplayMisc
+#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT            0x01
+#define INTEGRATED_SYSTEM_INFO__GET_BOOTUP_DISPLAY_CALLBACK_FUNC_SUPPORT  0x02
+#define INTEGRATED_SYSTEM_INFO__GET_EXPANSION_CALLBACK_FUNC_SUPPORT       0x04
+#define INTEGRATED_SYSTEM_INFO__FAST_BOOT_SUPPORT                         0x08
+
+// ulGPUCapInfo
+#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE                0x01
+#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE                               0x02
+#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT                         0x08
+
+/**********************************************************************************************************************
+  ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description
+ulBootUpEngineClock:              VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
+ulDentistVCOFreq:                 Dentist VCO clock in 10kHz unit. 
+ulBootUpUMAClock:                 System memory boot up clock frequency in 10Khz unit. 
+sDISPCLK_Voltage:                 Report Display clock voltage requirement.
+ulBootUpReqDisplayVector:         VBIOS boot up display IDs, following are supported devices in Trinity projects:
+                                  ATOM_DEVICE_CRT1_SUPPORT                  0x0001
+                                  ATOM_DEVICE_DFP1_SUPPORT                  0x0008 
+                                  ATOM_DEVICE_DFP6_SUPPORT                  0x0040 
+                                  ATOM_DEVICE_DFP2_SUPPORT                  0x0080       
+                                  ATOM_DEVICE_DFP3_SUPPORT                  0x0200       
+                                  ATOM_DEVICE_DFP4_SUPPORT                  0x0400        
+                                  ATOM_DEVICE_DFP5_SUPPORT                  0x0800
+                                  ATOM_DEVICE_LCD1_SUPPORT                  0x0002
+ulOtherDisplayMisc:                    bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS. 
+                                        =1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS. 
+                                  bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS
+                                        =1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS
+                                  bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS
+                                        =1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS
+                                  bit[3]=0: VBIOS fast boot is disable
+                                        =1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open)
+ulGPUCapInfo:                     bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
+                                        =1: TMDS/HDMI Coherent Mode use signel PLL mode.
+                                  bit[1]=0: DP mode use cascade PLL mode ( New for Trinity )
+                                        =1: DP mode use single PLL mode
+                                  bit[3]=0: Enable AUX HW mode detection logic
+                                        =1: Disable AUX HW mode detection logic
+                                      
+ulSB_MMIO_Base_Addr:              Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
+
+usRequestedPWMFreqInHz:           When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). 
+                                  Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
+                                  
+                                  When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
+                                  1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
+                                  VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
+                                  Changing BL using VBIOS function is functional in both driver and non-driver present environment; 
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+                                  2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
+                                  that BL control from GPU is expected.
+                                  VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
+                                  Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
+                                  it's per platform 
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+ucHtcTmpLmt:                      Refer to D18F3x64 bit[22:16], HtcTmpLmt. 
+                                  Threshold on value to enter HTC_active state.
+ucHtcHystLmt:                     Refer to D18F3x64 bit[27:24], HtcHystLmt. 
+                                  To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
+ulMinEngineClock:                 Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
+ulSystemConfig:                   Bit[0]=0: PCIE Power Gating Disabled 
+                                        =1: PCIE Power Gating Enabled
+                                  Bit[1]=0: DDR-DLL shut-down feature disabled.
+                                         1: DDR-DLL shut-down feature enabled.
+                                  Bit[2]=0: DDR-PLL Power down feature disabled.
+                                         1: DDR-PLL Power down feature enabled.                                 
+ulCPUCapInfo:                     TBD
+usNBP0Voltage:                    VID for voltage on NB P0 State
+usNBP1Voltage:                    VID for voltage on NB P1 State  
+usNBP2Voltage:                    VID for voltage on NB P2 State
+usNBP3Voltage:                    VID for voltage on NB P3 State  
+usBootUpNBVoltage:                Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
+usExtDispConnInfoOffset:          Offset to sExtDispConnInfo inside the structure
+usPanelRefreshRateRange:          Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
+                                  to indicate a range.
+                                  SUPPORTED_LCD_REFRESHRATE_30Hz          0x0004
+                                  SUPPORTED_LCD_REFRESHRATE_40Hz          0x0008
+                                  SUPPORTED_LCD_REFRESHRATE_50Hz          0x0010
+                                  SUPPORTED_LCD_REFRESHRATE_60Hz          0x0020
+ucMemoryType:                     [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
+ucUMAChannelNumber:                    System memory channel numbers. 
+ulCSR_M3_ARB_CNTL_DEFAULT[10]:    Arrays with values for CSR M3 arbiter for default
+ulCSR_M3_ARB_CNTL_UVD[10]:        Arrays with values for CSR M3 arbiter for UVD playback.
+ulCSR_M3_ARB_CNTL_FS3D[10]:       Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+sAvail_SCLK[5]:                   Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high  
 ulGMCRestoreResetTime:            GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. 
 ulMinimumNClk:                    Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz. 
 ulIdleNClk:                       NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
@@ -4398,6 +4937,41 @@ usHDMISSPercentage:               HDMI Spread Spectrum Percentage in unit 0.01%;
 usHDMISSpreadRateIn10Hz:          HDMI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
 usDVISSPercentage:                DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
 usDVISSpreadRateIn10Hz:           DVI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
+usMaxLVDSPclkFreqInSingleLink:    Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
+ucLVDSMisc:                       [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
+                                  [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
+                                  [bit2] LVDS 888bit per color mode  =0: 666 bit per color =1:888 bit per color
+                                  [bit3] LVDS parameter override enable  =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
+                                  [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
+ucLVDSPwrOnSeqDIGONtoDE_in4Ms:    LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
+                                  =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON. 
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+ucLVDSPwrOnDEtoVARY_BL_in4Ms:     LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ).  
+                                  =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON. 
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOffVARY_BLtoDE_in4Ms:    LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off. 
+                                  =0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOffDEtoDIGON_in4Ms:      LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off. 
+                                  =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSOffToOnDelay_in4Ms:         LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active. 
+                                  =0 means to use VBIOS default delay which is 125 ( 500ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOnVARY_BLtoBLON_in4Ms:   LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active. 
+                                  =0 means to use VBIOS default delay which is 0 ( 0ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOffBLONtoVARY_BL_in4Ms:  LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off. 
+                                  =0 means to use VBIOS default delay which is 0 ( 0ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ulNbpStateMemclkFreq[4]:          system memory clock frequncey in unit of 10Khz in different NB pstate. 
+
 **********************************************************************************************************************/
 
 /**************************************************************************/
@@ -4459,6 +5033,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT
 #define ASIC_INTERNAL_SS_ON_DP      7
 #define ASIC_INTERNAL_SS_ON_DCPLL   8
 #define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
+#define ASIC_INTERNAL_VCE_SS        10
 
 typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
 {
@@ -4520,7 +5095,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
 #define ATOM_DOS_MODE_INFO_DEF        7
 #define ATOM_I2C_CHANNEL_STATUS_DEF   8
 #define ATOM_I2C_CHANNEL_STATUS1_DEF  9
-
+#define ATOM_INTERNAL_TIMER_DEF       10
 
 // BIOS_0_SCRATCH Definition 
 #define ATOM_S0_CRT1_MONO               0x00000001L
@@ -4648,6 +5223,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
 #define ATOM_S2_DEVICE_DPMS_MASKw1      0x3FF
 #define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3     0x0C
 #define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGEb3   0x10
+#define ATOM_S2_TMDS_COHERENT_MODEb3    0x10          // used by VBIOS code only, use coherent mode for TMDS/HDMI mode
 #define ATOM_S2_VRI_BRIGHT_ENABLEb3     0x20
 #define ATOM_S2_ROTATION_STATE_MASKb3   0xC0
 
@@ -5038,6 +5614,23 @@ typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3
   USHORT usDeviceId;                  // Active Device Id for this surface. If no device, set to 0. 
 }ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3;
 
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4
+{
+  USHORT usHight;                     // Image Hight
+  USHORT usWidth;                     // Image Width
+  USHORT usGraphPitch;
+  UCHAR  ucColorDepth;
+  UCHAR  ucPixelFormat;
+  UCHAR  ucSurface;                   // Surface 1 or 2
+  UCHAR  ucEnable;                    // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR  ucModeType;
+  UCHAR  ucReserved;
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4;
+
+// ucEnable
+#define ATOM_GRAPH_CONTROL_SET_PITCH             0x0f
+#define ATOM_GRAPH_CONTROL_SET_DISP_START        0x10
+
 typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION
 {
   ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;          
@@ -5057,6 +5650,58 @@ typedef struct  _GET_DISPLAY_SURFACE_SIZE_PARAMETERS
   USHORT  usY_Size;
 }GET_DISPLAY_SURFACE_SIZE_PARAMETERS; 
 
+typedef struct  _GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2
+{
+  union{
+    USHORT  usX_Size;                     //When use as input parameter, usX_Size indicates which CRTC                 
+    USHORT  usSurface; 
+  };
+  USHORT usY_Size;
+  USHORT usDispXStart;               
+  USHORT usDispYStart;
+}GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2; 
+
+
+typedef struct _PALETTE_DATA_CONTROL_PARAMETERS_V3 
+{
+  UCHAR  ucLutId;
+  UCHAR  ucAction;
+  USHORT usLutStartIndex;
+  USHORT usLutLength;
+  USHORT usLutOffsetInVram;
+}PALETTE_DATA_CONTROL_PARAMETERS_V3;
+
+// ucAction:
+#define PALETTE_DATA_AUTO_FILL            1
+#define PALETTE_DATA_READ                 2
+#define PALETTE_DATA_WRITE                3
+
+
+typedef struct _INTERRUPT_SERVICE_PARAMETERS_V2
+{
+  UCHAR  ucInterruptId;
+  UCHAR  ucServiceId;
+  UCHAR  ucStatus;
+  UCHAR  ucReserved;
+}INTERRUPT_SERVICE_PARAMETER_V2;
+
+// ucInterruptId
+#define HDP1_INTERRUPT_ID                 1
+#define HDP2_INTERRUPT_ID                 2
+#define HDP3_INTERRUPT_ID                 3
+#define HDP4_INTERRUPT_ID                 4
+#define HDP5_INTERRUPT_ID                 5
+#define HDP6_INTERRUPT_ID                 6
+#define SW_INTERRUPT_ID                   11   
+
+// ucAction
+#define INTERRUPT_SERVICE_GEN_SW_INT      1
+#define INTERRUPT_SERVICE_GET_STATUS      2
+
+ // ucStatus
+#define INTERRUPT_STATUS__INT_TRIGGER     1
+#define INTERRUPT_STATUS__HPD_HIGH        2
+
 typedef struct _INDIRECT_IO_ACCESS
 {
   ATOM_COMMON_TABLE_HEADER sHeader;  
@@ -5189,7 +5834,7 @@ typedef struct _ATOM_INIT_REG_BLOCK{
 
 #define END_OF_REG_INDEX_BLOCK  0x0ffff
 #define END_OF_REG_DATA_BLOCK   0x00000000
-#define ATOM_INIT_REG_MASK_FLAG 0x80
+#define ATOM_INIT_REG_MASK_FLAG 0x80               //Not used in BIOS
 #define        CLOCK_RANGE_HIGHEST                     0x00ffffff
 
 #define VALUE_DWORD             SIZEOF ULONG
@@ -5229,6 +5874,7 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
 #define _128Mx8             0x51
 #define _128Mx16            0x52
 #define _256Mx8             0x61
+#define _256Mx16            0x62
 
 #define SAMSUNG             0x1
 #define INFINEON            0x2
@@ -5585,7 +6231,7 @@ typedef struct _ATOM_VRAM_MODULE_V7
   ULONG          ulChannelMapCfg;                      // mmMC_SHARED_CHREMAP
   USHORT  usModuleSize;                     // Size of ATOM_VRAM_MODULE_V7
   USHORT  usPrivateReserved;                // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
-  USHORT  usReserved;
+  USHORT  usEnableChannels;                 // bit vector which indicate which channels are enabled
   UCHAR   ucExtMemoryID;                    // Current memory module ID
   UCHAR   ucMemoryType;                     // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5
   UCHAR   ucChannelNum;                     // Number of mem. channels supported in this module
@@ -5597,7 +6243,8 @@ typedef struct _ATOM_VRAM_MODULE_V7
   UCHAR   ucNPL_RT;                         // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2.
   UCHAR          ucPreamble;                       // [7:4] Write Preamble, [3:0] Read Preamble
   UCHAR   ucMemorySize;                     // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
-  UCHAR   ucReserved[3];
+  USHORT  usSEQSettingOffset;
+  UCHAR   ucReserved;
 // Memory Module specific values
   USHORT  usEMRS2Value;                     // EMRS2/MR2 Value. 
   USHORT  usEMRS3Value;                     // EMRS3/MR3 Value.
@@ -5633,10 +6280,10 @@ typedef struct _ATOM_VRAM_INFO_V3
 typedef struct _ATOM_VRAM_INFO_V4
 {
   ATOM_COMMON_TABLE_HEADER   sHeader;
-       USHORT                                                                           usMemAdjustTblOffset;                                                                                                   // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
-       USHORT                                                                           usMemClkPatchTblOffset;                                                                                                 //     offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
-       USHORT                                                                           usRerseved;
-       UCHAR                            ucMemDQ7_0ByteRemap;                                                                                                      // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3
+  USHORT                     usMemAdjustTblOffset;                                                                                                      // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+  USHORT                     usMemClkPatchTblOffset;                                                                                            //     offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+  USHORT                                                                                usRerseved;
+  UCHAR                         ucMemDQ7_0ByteRemap;                                                                                                      // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3
   ULONG                      ulMemDQ7_0BitRemap;                             // each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21]
   UCHAR                      ucReservde[4]; 
   UCHAR                      ucNumOfVRAMModule;
@@ -5648,9 +6295,10 @@ typedef struct _ATOM_VRAM_INFO_V4
 typedef struct _ATOM_VRAM_INFO_HEADER_V2_1
 {
   ATOM_COMMON_TABLE_HEADER   sHeader;
-       USHORT                                                                           usMemAdjustTblOffset;                                                                                                   // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
-       USHORT                                                                           usMemClkPatchTblOffset;                                                                                                 //     offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
-       USHORT                                                                           usReserved[4];
+  USHORT                     usMemAdjustTblOffset;                                                                                                      // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+  USHORT                     usMemClkPatchTblOffset;                                                                                            //     offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+  USHORT                     usPerBytePresetOffset;                          // offset of ATOM_INIT_REG_BLOCK structure for Per Byte Offset Preset Settings
+  USHORT                     usReserved[3];
   UCHAR                      ucNumOfVRAMModule;                              // indicate number of VRAM module
   UCHAR                      ucMemoryClkPatchTblVer;                         // version of memory AC timing register list
   UCHAR                      ucVramModuleVer;                                // indicate ATOM_VRAM_MODUE version
@@ -5935,6 +6583,52 @@ typedef struct _ATOM_DISP_OUT_INFO_V2
        ASIC_ENCODER_INFO      asEncoderInfo[1];
 }ATOM_DISP_OUT_INFO_V2;
 
+
+typedef struct _ATOM_DISP_CLOCK_ID {
+  UCHAR ucPpllId; 
+  UCHAR ucPpllAttribute;
+}ATOM_DISP_CLOCK_ID;
+
+// ucPpllAttribute
+#define CLOCK_SOURCE_SHAREABLE            0x01
+#define CLOCK_SOURCE_DP_MODE              0x02
+#define CLOCK_SOURCE_NONE_DP_MODE         0x04
+
+//DispOutInfoTable
+typedef struct _ASIC_TRANSMITTER_INFO_V2
+{
+       USHORT usTransmitterObjId;
+       USHORT usDispClkIdOffset;    // point to clock source id list supported by Encoder Object
+  UCHAR  ucTransmitterCmdTblId;
+       UCHAR  ucConfig;
+       UCHAR  ucEncoderID;                                      // available 1st encoder ( default )
+       UCHAR  ucOptionEncoderID;    // available 2nd encoder ( optional )
+       UCHAR  uc2ndEncoderID;
+       UCHAR  ucReserved;
+}ASIC_TRANSMITTER_INFO_V2;
+
+typedef struct _ATOM_DISP_OUT_INFO_V3
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+       USHORT ptrTransmitterInfo;
+       USHORT ptrEncoderInfo;
+  USHORT ptrMainCallParserFar;                  // direct address of main parser call in VBIOS binary. 
+  USHORT usReserved;
+  UCHAR  ucDCERevision;   
+  UCHAR  ucMaxDispEngineNum;
+  UCHAR  ucMaxActiveDispEngineNum;
+  UCHAR  ucMaxPPLLNum;
+  UCHAR  ucCoreRefClkSource;                          // value of CORE_REF_CLK_SOURCE
+  UCHAR  ucReserved[3];
+       ASIC_TRANSMITTER_INFO_V2  asTransmitterInfo[1];     // for alligment only
+}ATOM_DISP_OUT_INFO_V3;
+
+typedef enum CORE_REF_CLK_SOURCE{
+  CLOCK_SRC_XTALIN=0,
+  CLOCK_SRC_XO_IN=1,
+  CLOCK_SRC_XO_IN2=2,
+}CORE_REF_CLK_SOURCE;
+
 // DispDevicePriorityInfo
 typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO
 {
@@ -6070,6 +6764,39 @@ typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
 #define HW_I2C_READ         0
 #define I2C_2BYTE_ADDR      0x02
 
+/****************************************************************************/ 
+// Structures used by HW_Misc_OperationTable
+/****************************************************************************/ 
+typedef struct  _ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1 
+{
+  UCHAR  ucCmd;                //  Input: To tell which action to take
+  UCHAR  ucReserved[3];
+  ULONG  ulReserved;
+}ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1; 
+
+typedef struct  _ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1 
+{
+  UCHAR  ucReturnCode;        // Output: Return value base on action was taken
+  UCHAR  ucReserved[3];
+  ULONG  ulReserved;
+}ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1;
+
+// Actions code
+#define  ATOM_GET_SDI_SUPPORT              0xF0
+
+// Return code 
+#define  ATOM_UNKNOWN_CMD                   0
+#define  ATOM_FEATURE_NOT_SUPPORTED         1
+#define  ATOM_FEATURE_SUPPORTED             2
+
+typedef struct _ATOM_HW_MISC_OPERATION_PS_ALLOCATION
+{
+       ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1        sInput_Output;
+       PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS         sReserved; 
+}ATOM_HW_MISC_OPERATION_PS_ALLOCATION;
+
+/****************************************************************************/ 
+
 typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
 {
    UCHAR ucHWBlkInst;                // HW block instance, 0, 1, 2, ...
@@ -6090,6 +6817,52 @@ typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
 #define SELECT_CRTC_PIXEL_RATE        7
 #define SELECT_VGA_BLK                8
 
+// DIGTransmitterInfoTable structure used to program UNIPHY settings 
+typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_1{  
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  USHORT usDPVsPreEmphSettingOffset;     // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock 
+  USHORT usPhyAnalogRegListOffset;       // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info 
+  USHORT usPhyAnalogSettingOffset;       // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range
+  USHORT usPhyPllRegListOffset;          // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info 
+  USHORT usPhyPllSettingOffset;          // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
+}DIG_TRANSMITTER_INFO_HEADER_V3_1;
+
+typedef struct _CLOCK_CONDITION_REGESTER_INFO{
+  USHORT usRegisterIndex;
+  UCHAR  ucStartBit;
+  UCHAR  ucEndBit;
+}CLOCK_CONDITION_REGESTER_INFO;
+
+typedef struct _CLOCK_CONDITION_SETTING_ENTRY{
+  USHORT usMaxClockFreq;
+  UCHAR  ucEncodeMode;
+  UCHAR  ucPhySel;
+  ULONG  ulAnalogSetting[1];
+}CLOCK_CONDITION_SETTING_ENTRY;
+
+typedef struct _CLOCK_CONDITION_SETTING_INFO{
+  USHORT usEntrySize;
+  CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[1];
+}CLOCK_CONDITION_SETTING_INFO;
+
+typedef struct _PHY_CONDITION_REG_VAL{
+  ULONG  ulCondition;
+  ULONG  ulRegVal;
+}PHY_CONDITION_REG_VAL;
+
+typedef struct _PHY_CONDITION_REG_INFO{
+  USHORT usRegIndex;
+  USHORT usSize;
+  PHY_CONDITION_REG_VAL asRegVal[1];
+}PHY_CONDITION_REG_INFO;
+
+typedef struct _PHY_ANALOG_SETTING_INFO{
+  UCHAR  ucEncodeMode;
+  UCHAR  ucPhySel;
+  USHORT usSize;
+  PHY_CONDITION_REG_INFO  asAnalogSetting[1];
+}PHY_ANALOG_SETTING_INFO;
+
 /****************************************************************************/ 
 //Portion VI: Definitinos for vbios MC scratch registers that driver used
 /****************************************************************************/
@@ -7020,4 +7793,68 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
 
 #pragma pack() // BIOS data must use byte aligment
 
+//
+// AMD ACPI Table
+//
+#pragma pack(1)
+
+typedef struct {
+  ULONG Signature;
+  ULONG TableLength;      //Length
+  UCHAR Revision;
+  UCHAR Checksum;
+  UCHAR OemId[6];
+  UCHAR OemTableId[8];    //UINT64  OemTableId;
+  ULONG OemRevision;
+  ULONG CreatorId;
+  ULONG CreatorRevision;
+} AMD_ACPI_DESCRIPTION_HEADER;
+/*
+//EFI_ACPI_DESCRIPTION_HEADER from AcpiCommon.h
+typedef struct {
+  UINT32  Signature;       //0x0
+  UINT32  Length;          //0x4
+  UINT8   Revision;        //0x8
+  UINT8   Checksum;        //0x9
+  UINT8   OemId[6];        //0xA
+  UINT64  OemTableId;      //0x10
+  UINT32  OemRevision;     //0x18
+  UINT32  CreatorId;       //0x1C
+  UINT32  CreatorRevision; //0x20
+}EFI_ACPI_DESCRIPTION_HEADER;
+*/
+typedef struct {
+  AMD_ACPI_DESCRIPTION_HEADER SHeader;
+  UCHAR TableUUID[16];    //0x24
+  ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture.
+  ULONG Lib1ImageOffset;  //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture.
+  ULONG Reserved[4];      //0x3C
+}UEFI_ACPI_VFCT;
+
+typedef struct {
+  ULONG  PCIBus;          //0x4C
+  ULONG  PCIDevice;       //0x50
+  ULONG  PCIFunction;     //0x54
+  USHORT VendorID;        //0x58
+  USHORT DeviceID;        //0x5A
+  USHORT SSVID;           //0x5C
+  USHORT SSID;            //0x5E
+  ULONG  Revision;        //0x60
+  ULONG  ImageLength;     //0x64
+}VFCT_IMAGE_HEADER;
+
+
+typedef struct {
+  VFCT_IMAGE_HEADER    VbiosHeader;
+  UCHAR        VbiosContent[1];
+}GOP_VBIOS_CONTENT;
+
+typedef struct {
+  VFCT_IMAGE_HEADER    Lib1Header;
+  UCHAR        Lib1Content[1];
+}GOP_LIB1_CONTENT;
+
+#pragma pack()
+
+
 #endif /* _ATOMBIOS_H */
index 757c549..ceffd20 100644 (file)
@@ -1446,14 +1446,98 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
        }
 }
 
+/**
+ * radeon_get_pll_use_mask - look up a mask of which pplls are in use
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the mask of which PPLLs (Pixel PLLs) are in use.
+ */
+static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_crtc *test_crtc;
+       struct radeon_crtc *radeon_test_crtc;
+       u32 pll_in_use = 0;
+
+       list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+               if (crtc == test_crtc)
+                       continue;
+
+               radeon_test_crtc = to_radeon_crtc(test_crtc);
+               if (radeon_test_crtc->pll_id != ATOM_PPLL_INVALID)
+                       pll_in_use |= (1 << radeon_test_crtc->pll_id);
+       }
+       return pll_in_use;
+}
+
+/**
+ * radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
+ * also in DP mode.  For DP, a single PPLL can be used for all DP
+ * crtcs/encoders.
+ */
+static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_encoder *test_encoder;
+       struct radeon_crtc *radeon_test_crtc;
+
+       list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+               if (test_encoder->crtc && (test_encoder->crtc != crtc)) {
+                       if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
+                               /* for DP use the same PLL for all */
+                               radeon_test_crtc = to_radeon_crtc(test_encoder->crtc);
+                               if (radeon_test_crtc->pll_id != ATOM_PPLL_INVALID)
+                                       return radeon_test_crtc->pll_id;
+                       }
+               }
+       }
+       return ATOM_PPLL_INVALID;
+}
+
+/**
+ * radeon_atom_pick_pll - Allocate a PPLL for use by the crtc.
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
+ * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
+ * monitors a dedicated PPLL must be used.  If a particular board has
+ * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
+ * as there is no need to program the PLL itself.  If we are not able to
+ * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
+ * avoid messing up an existing monitor.
+ *
+ * Asic specific PLL information
+ *
+ * DCE 6.1
+ * - PPLL2 is only available to UNIPHYA (both DP and non-DP)
+ * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
+ *
+ * DCE 6.0
+ * - PPLL0 is available to all UNIPHY (DP only)
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ * DCE 5.0
+ * - DCPLL is available to all UNIPHY (DP only)
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ * DCE 3.0/4.0/4.1
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ */
 static int radeon_atom_pick_pll(struct drm_crtc *crtc)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct radeon_device *rdev = dev->dev_private;
        struct drm_encoder *test_encoder;
-       struct drm_crtc *test_crtc;
-       uint32_t pll_in_use = 0;
+       u32 pll_in_use;
+       int pll;
 
        if (ASIC_IS_DCE4(rdev)) {
                list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
@@ -1461,35 +1545,39 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
                                /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
                                 * depending on the asic:
                                 * DCE4: PPLL or ext clock
-                                * DCE5: DCPLL or ext clock
+                                * DCE5: PPLL, DCPLL, or ext clock
                                 *
                                 * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
                                 * PPLL/DCPLL programming and only program the DP DTO for the
                                 * crtc virtual pixel clock.
                                 */
                                if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
-                                       if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk)
+                                       if (rdev->clock.dp_extclk)
+                                               /* skip PPLL programming if using ext clock */
                                                return ATOM_PPLL_INVALID;
+                                       else if (ASIC_IS_DCE5(rdev))
+                                               /* use DCPLL for all DP */
+                                               return ATOM_DCPLL;
+                                       else {
+                                               /* use the same PPLL for all DP monitors */
+                                               pll = radeon_get_shared_dp_ppll(crtc);
+                                               if (pll != ATOM_PPLL_INVALID)
+                                                       return pll;
+                                       }
                                }
+                               break;
                        }
                }
-
-               /* otherwise, pick one of the plls */
-               list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
-                       struct radeon_crtc *radeon_test_crtc;
-
-                       if (crtc == test_crtc)
-                               continue;
-
-                       radeon_test_crtc = to_radeon_crtc(test_crtc);
-                       if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
-                           (radeon_test_crtc->pll_id <= ATOM_PPLL2))
-                               pll_in_use |= (1 << radeon_test_crtc->pll_id);
-               }
-               if (!(pll_in_use & 1))
+               /* all other cases */
+               pll_in_use = radeon_get_pll_use_mask(crtc);
+               if (!(pll_in_use & (1 << ATOM_PPLL2)))
+                       return ATOM_PPLL2;
+               if (!(pll_in_use & (1 << ATOM_PPLL1)))
                        return ATOM_PPLL1;
-               return ATOM_PPLL2;
+               DRM_ERROR("unable to allocate a PPLL\n");
+               return ATOM_PPLL_INVALID;
        } else
+               /* use PPLL1 or PPLL2 */
                return radeon_crtc->crtc_id;
 
 }
@@ -1578,10 +1666,25 @@ static void atombios_crtc_commit(struct drm_crtc *crtc)
 static void atombios_crtc_disable(struct drm_crtc *crtc)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
        struct radeon_atom_ss ss;
+       int i;
 
        atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 
+       for (i = 0; i < rdev->num_crtc; i++) {
+               if (rdev->mode_info.crtcs[i] &&
+                   rdev->mode_info.crtcs[i]->enabled &&
+                   i != radeon_crtc->crtc_id &&
+                   radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) {
+                       /* one other crtc is using this pll don't turn
+                        * off the pll
+                        */
+                       goto done;
+               }
+       }
+
        switch (radeon_crtc->pll_id) {
        case ATOM_PPLL1:
        case ATOM_PPLL2:
@@ -1592,7 +1695,8 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
        default:
                break;
        }
-       radeon_crtc->pll_id = -1;
+done:
+       radeon_crtc->pll_id = ATOM_PPLL_INVALID;
 }
 
 static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
@@ -1641,6 +1745,6 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
                else
                        radeon_crtc->crtc_offset = 0;
        }
-       radeon_crtc->pll_id = -1;
+       radeon_crtc->pll_id = ATOM_PPLL_INVALID;
        drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
 }
index 5351ee1..382e141 100644 (file)
@@ -1344,6 +1344,8 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+       struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
        struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
        struct radeon_connector *radeon_connector = NULL;
        struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
@@ -1355,12 +1357,38 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
 
        switch (mode) {
        case DRM_MODE_DPMS_ON:
-               /* some early dce3.2 boards have a bug in their transmitter control table */
-               if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) ||
-                   ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
+               if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+                       if (!connector)
+                               dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+                       else
+                               dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
+
+                       /* setup and enable the encoder */
+                       atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+                       atombios_dig_encoder_setup(encoder,
+                                                  ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
+                                                  dig->panel_mode);
+                       if (ext_encoder) {
+                               if (ASIC_IS_DCE41(rdev))
+                                       atombios_external_encoder_setup(encoder, ext_encoder,
+                                                                       EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
+                       }
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+               } else if (ASIC_IS_DCE4(rdev)) {
+                       /* setup and enable the encoder */
+                       atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+                       /* enable the transmitter */
                        atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
-               else
                        atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+               } else {
+                       /* setup and enable the encoder and transmitter */
+                       atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+                       /* some early dce3.2 boards have a bug in their transmitter control table */
+                       if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730))
+                               atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+               }
                if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
                        if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
                                atombios_set_edp_panel_power(connector,
@@ -1377,10 +1405,19 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
        case DRM_MODE_DPMS_STANDBY:
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
-               if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
+               if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+                       /* disable the transmitter */
                        atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
-               else
+               } else if (ASIC_IS_DCE4(rdev)) {
+                       /* disable the transmitter */
                        atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+               } else {
+                       /* disable the encoder and transmitter */
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+                       atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
+               }
                if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
                        if (ASIC_IS_DCE4(rdev))
                                atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
@@ -1805,10 +1842,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-       struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
 
        radeon_encoder->pixel_clock = adjusted_mode->clock;
 
+       /* need to call this here rather than in prepare() since we need some crtc info */
+       radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
        if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
                if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
                        atombios_yuv_setup(encoder, true);
@@ -1827,38 +1866,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
        case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-               if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
-                       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-                       struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-
-                       if (!connector)
-                               dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
-                       else
-                               dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
-
-                       /* setup and enable the encoder */
-                       atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
-                       atombios_dig_encoder_setup(encoder,
-                                                  ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
-                                                  dig->panel_mode);
-               } else if (ASIC_IS_DCE4(rdev)) {
-                       /* disable the transmitter */
-                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
-                       /* setup and enable the encoder */
-                       atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
-
-                       /* enable the transmitter */
-                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
-               } else {
-                       /* disable the encoder and transmitter */
-                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
-                       atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
-
-                       /* setup and enable the encoder and transmitter */
-                       atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
-                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
-                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
-               }
+               /* handled in dpms */
                break;
        case ENCODER_OBJECT_ID_INTERNAL_DDI:
        case ENCODER_OBJECT_ID_INTERNAL_DVO1:
@@ -1879,14 +1887,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
                break;
        }
 
-       if (ext_encoder) {
-               if (ASIC_IS_DCE41(rdev))
-                       atombios_external_encoder_setup(encoder, ext_encoder,
-                                                       EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
-               else
-                       atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
-       }
-
        atombios_apply_encoder_quirks(encoder, adjusted_mode);
 
        if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
@@ -2059,7 +2059,6 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
        }
 
        radeon_atom_output_lock(encoder, true);
-       radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
        if (connector) {
                struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -2080,6 +2079,7 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
 
 static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
 {
+       /* need to call this here as we need the crtc set up */
        radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
        radeon_atom_output_lock(encoder, false);
 }
@@ -2120,14 +2120,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
        case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-               if (ASIC_IS_DCE4(rdev))
-                       /* disable the transmitter */
-                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
-               else {
-                       /* disable the encoder and transmitter */
-                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
-                       atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
-               }
+               /* handled in dpms */
                break;
        case ENCODER_OBJECT_ID_INTERNAL_DDI:
        case ENCODER_OBJECT_ID_INTERNAL_DVO1:
index 931f4df..fc0633c 100644 (file)
@@ -1065,24 +1065,8 @@ void evergreen_agp_enable(struct radeon_device *rdev)
 
 void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
 {
-       save->vga_control[0] = RREG32(D1VGA_CONTROL);
-       save->vga_control[1] = RREG32(D2VGA_CONTROL);
        save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
        save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
-       save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
-       save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
-       if (rdev->num_crtc >= 4) {
-               save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
-               save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
-               save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
-               save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
-       }
-       if (rdev->num_crtc >= 6) {
-               save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
-               save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
-               save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
-               save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
-       }
 
        /* Stop all video */
        WREG32(VGA_RENDER_CONTROL, 0);
@@ -1193,47 +1177,6 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
        /* Unlock host access */
        WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
        mdelay(1);
-       /* Restore video state */
-       WREG32(D1VGA_CONTROL, save->vga_control[0]);
-       WREG32(D2VGA_CONTROL, save->vga_control[1]);
-       if (rdev->num_crtc >= 4) {
-               WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
-               WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
-       }
-       if (rdev->num_crtc >= 6) {
-               WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
-               WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
-       }
-       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
-       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
-       if (rdev->num_crtc >= 4) {
-               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
-               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
-       }
-       if (rdev->num_crtc >= 6) {
-               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
-               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
-       }
-       WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
-       WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
-       if (rdev->num_crtc >= 4) {
-               WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
-               WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
-       }
-       if (rdev->num_crtc >= 6) {
-               WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
-               WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
-       }
-       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
-       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-       if (rdev->num_crtc >= 4) {
-               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
-               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
-       }
-       if (rdev->num_crtc >= 6) {
-               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
-               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
-       }
        WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
 }
 
@@ -2080,10 +2023,18 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        if (rdev->flags & RADEON_IS_IGP)
                rdev->config.evergreen.tile_config |= 1 << 4;
        else {
-               if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
-                       rdev->config.evergreen.tile_config |= 1 << 4;
-               else
+               switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+               case 0: /* four banks */
                        rdev->config.evergreen.tile_config |= 0 << 4;
+                       break;
+               case 1: /* eight banks */
+                       rdev->config.evergreen.tile_config |= 1 << 4;
+                       break;
+               case 2: /* sixteen banks */
+               default:
+                       rdev->config.evergreen.tile_config |= 2 << 4;
+                       break;
+               }
        }
        rdev->config.evergreen.tile_config |=
                ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
index 9e50814..636255b 100644 (file)
@@ -804,10 +804,18 @@ static void cayman_gpu_init(struct radeon_device *rdev)
                rdev->config.cayman.tile_config |= (3 << 0);
                break;
        }
-       if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
-               rdev->config.cayman.tile_config |= 1 << 4;
-       else
+       switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+       case 0: /* four banks */
                rdev->config.cayman.tile_config |= 0 << 4;
+               break;
+       case 1: /* eight banks */
+               rdev->config.cayman.tile_config |= 1 << 4;
+               break;
+       case 2: /* sixteen banks */
+       default:
+               rdev->config.cayman.tile_config |= 2 << 4;
+               break;
+       }
        rdev->config.cayman.tile_config |=
                ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
        rdev->config.cayman.tile_config |=
index 8227e76..28e69e9 100644 (file)
@@ -123,21 +123,6 @@ struct radeon_device;
 /*
  * BIOS.
  */
-#define ATRM_BIOS_PAGE 4096
-
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool radeon_atrm_supported(struct pci_dev *pdev);
-int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
-#else
-static inline bool radeon_atrm_supported(struct pci_dev *pdev)
-{
-       return false;
-}
-
-static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
-       return -EINVAL;
-}
-#endif
 bool radeon_get_bios(struct radeon_device *rdev);
 
 
index 5991484..5ce9402 100644 (file)
@@ -253,13 +253,10 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
  * rv515
  */
 struct rv515_mc_save {
-       u32 d1vga_control;
-       u32 d2vga_control;
        u32 vga_render_control;
        u32 vga_hdp_control;
-       u32 d1crtc_control;
-       u32 d2crtc_control;
 };
+
 int rv515_init(struct radeon_device *rdev);
 void rv515_fini(struct radeon_device *rdev);
 uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -387,11 +384,10 @@ void r700_cp_fini(struct radeon_device *rdev);
  * evergreen
  */
 struct evergreen_mc_save {
-       u32 vga_control[6];
        u32 vga_render_control;
        u32 vga_hdp_control;
-       u32 crtc_control[6];
 };
+
 void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
 int evergreen_init(struct radeon_device *rdev);
 void evergreen_fini(struct radeon_device *rdev);
index 9d2c369..38585c5 100644 (file)
@@ -446,7 +446,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
        }
 
        /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
-       if ((dev->pdev->device == 0x9802) &&
+       if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
            (dev->pdev->subsystem_vendor == 0x1734) &&
            (dev->pdev->subsystem_device == 0x11bd)) {
                if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
index 9d95792..2a2cf0b 100644 (file)
@@ -30,56 +30,8 @@ static struct radeon_atpx_priv {
        /* handle for device - and atpx */
        acpi_handle dhandle;
        acpi_handle atpx_handle;
-       acpi_handle atrm_handle;
 } radeon_atpx_priv;
 
-/* retrieve the ROM in 4k blocks */
-static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
-                           int offset, int len)
-{
-       acpi_status status;
-       union acpi_object atrm_arg_elements[2], *obj;
-       struct acpi_object_list atrm_arg;
-       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
-
-       atrm_arg.count = 2;
-       atrm_arg.pointer = &atrm_arg_elements[0];
-
-       atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
-       atrm_arg_elements[0].integer.value = offset;
-
-       atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
-       atrm_arg_elements[1].integer.value = len;
-
-       status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
-       if (ACPI_FAILURE(status)) {
-               printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
-               return -ENODEV;
-       }
-
-       obj = (union acpi_object *)buffer.pointer;
-       memcpy(bios+offset, obj->buffer.pointer, len);
-       kfree(buffer.pointer);
-       return len;
-}
-
-bool radeon_atrm_supported(struct pci_dev *pdev)
-{
-       /* get the discrete ROM only via ATRM */
-       if (!radeon_atpx_priv.atpx_detected)
-               return false;
-
-       if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
-               return false;
-       return true;
-}
-
-
-int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
-{
-       return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len);
-}
-
 static int radeon_atpx_get_version(acpi_handle handle)
 {
        acpi_status status;
@@ -197,7 +149,7 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
 
 static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
 {
-       acpi_handle dhandle, atpx_handle, atrm_handle;
+       acpi_handle dhandle, atpx_handle;
        acpi_status status;
 
        dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
@@ -208,13 +160,8 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
        if (ACPI_FAILURE(status))
                return false;
 
-       status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
-       if (ACPI_FAILURE(status))
-               return false;
-
        radeon_atpx_priv.dhandle = dhandle;
        radeon_atpx_priv.atpx_handle = atpx_handle;
-       radeon_atpx_priv.atrm_handle = atrm_handle;
        return true;
 }
 
index 229a20f..d306cc8 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
+#include <linux/acpi.h>
 /*
  * BIOS.
  */
@@ -98,16 +99,81 @@ static bool radeon_read_bios(struct radeon_device *rdev)
        return true;
 }
 
+#ifdef CONFIG_ACPI
 /* ATRM is used to get the BIOS on the discrete cards in
  * dual-gpu systems.
  */
+/* retrieve the ROM in 4k blocks */
+#define ATRM_BIOS_PAGE 4096
+/**
+ * radeon_atrm_call - fetch a chunk of the vbios
+ *
+ * @atrm_handle: acpi ATRM handle
+ * @bios: vbios image pointer
+ * @offset: offset of vbios image data to fetch
+ * @len: length of vbios image data to fetch
+ *
+ * Executes ATRM to fetch a chunk of the discrete
+ * vbios image on PX systems (all asics).
+ * Returns the length of the buffer fetched.
+ */
+static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
+                           int offset, int len)
+{
+       acpi_status status;
+       union acpi_object atrm_arg_elements[2], *obj;
+       struct acpi_object_list atrm_arg;
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+
+       atrm_arg.count = 2;
+       atrm_arg.pointer = &atrm_arg_elements[0];
+
+       atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
+       atrm_arg_elements[0].integer.value = offset;
+
+       atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
+       atrm_arg_elements[1].integer.value = len;
+
+       status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
+       if (ACPI_FAILURE(status)) {
+               printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
+               return -ENODEV;
+       }
+
+       obj = (union acpi_object *)buffer.pointer;
+       memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
+       len = obj->buffer.length;
+       kfree(buffer.pointer);
+       return len;
+}
+
 static bool radeon_atrm_get_bios(struct radeon_device *rdev)
 {
        int ret;
        int size = 256 * 1024;
        int i;
+       struct pci_dev *pdev = NULL;
+       acpi_handle dhandle, atrm_handle;
+       acpi_status status;
+       bool found = false;
+
+       /* ATRM is for the discrete card only */
+       if (rdev->flags & RADEON_IS_IGP)
+               return false;
+
+       while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+               dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+               if (!dhandle)
+                       continue;
+
+               status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+               if (!ACPI_FAILURE(status)) {
+                       found = true;
+                       break;
+               }
+       }
 
-       if (!radeon_atrm_supported(rdev->pdev))
+       if (!found)
                return false;
 
        rdev->bios = kmalloc(size, GFP_KERNEL);
@@ -117,10 +183,11 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
        }
 
        for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
-               ret = radeon_atrm_get_bios_chunk(rdev->bios,
-                                                (i * ATRM_BIOS_PAGE),
-                                                ATRM_BIOS_PAGE);
-               if (ret <= 0)
+               ret = radeon_atrm_call(atrm_handle,
+                                      rdev->bios,
+                                      (i * ATRM_BIOS_PAGE),
+                                      ATRM_BIOS_PAGE);
+               if (ret < ATRM_BIOS_PAGE)
                        break;
        }
 
@@ -130,6 +197,12 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
        }
        return true;
 }
+#else
+static inline bool radeon_atrm_get_bios(struct radeon_device *rdev)
+{
+       return false;
+}
+#endif
 
 static bool ni_read_disabled_bios(struct radeon_device *rdev)
 {
@@ -476,6 +549,61 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
                return legacy_read_disabled_bios(rdev);
 }
 
+#ifdef CONFIG_ACPI
+static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+{
+       bool ret = false;
+       struct acpi_table_header *hdr;
+       acpi_size tbl_size;
+       UEFI_ACPI_VFCT *vfct;
+       GOP_VBIOS_CONTENT *vbios;
+       VFCT_IMAGE_HEADER *vhdr;
+
+       if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
+               return false;
+       if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
+               DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
+               goto out_unmap;
+       }
+
+       vfct = (UEFI_ACPI_VFCT *)hdr;
+       if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
+               DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
+               goto out_unmap;
+       }
+
+       vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
+       vhdr = &vbios->VbiosHeader;
+       DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
+                       vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
+                       vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
+
+       if (vhdr->PCIBus != rdev->pdev->bus->number ||
+           vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) ||
+           vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) ||
+           vhdr->VendorID != rdev->pdev->vendor ||
+           vhdr->DeviceID != rdev->pdev->device) {
+               DRM_INFO("ACPI VFCT table is not for this card\n");
+               goto out_unmap;
+       };
+
+       if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
+               DRM_ERROR("ACPI VFCT image truncated\n");
+               goto out_unmap;
+       }
+
+       rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
+       ret = !!rdev->bios;
+
+out_unmap:
+       return ret;
+}
+#else
+static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+{
+       return false;
+}
+#endif
 
 bool radeon_get_bios(struct radeon_device *rdev)
 {
@@ -483,6 +611,8 @@ bool radeon_get_bios(struct radeon_device *rdev)
        uint16_t tmp;
 
        r = radeon_atrm_get_bios(rdev);
+       if (r == false)
+               r = radeon_acpi_vfct_bios(rdev);
        if (r == false)
                r = igp_read_bios_from_vram(rdev);
        if (r == false)
index 9231564..c5762e3 100644 (file)
@@ -761,7 +761,7 @@ int radeon_device_init(struct radeon_device *rdev,
        if (rdev->flags & RADEON_IS_AGP)
                rdev->need_dma32 = true;
        if ((rdev->flags & RADEON_IS_PCI) &&
-           (rdev->family < CHIP_RS400))
+           (rdev->family <= CHIP_RS740))
                rdev->need_dma32 = true;
 
        dma_bits = rdev->need_dma32 ? 32 : 40;
index 39497c7..f3ae607 100644 (file)
@@ -117,6 +117,7 @@ int radeon_bo_create(struct radeon_device *rdev,
                return -ENOMEM;
        }
 
+retry:
        bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
        if (bo == NULL)
                return -ENOMEM;
@@ -129,8 +130,6 @@ int radeon_bo_create(struct radeon_device *rdev,
        bo->gem_base.driver_private = NULL;
        bo->surface_reg = -1;
        INIT_LIST_HEAD(&bo->list);
-
-retry:
        radeon_ttm_placement_from_domain(bo, domain);
        /* Kernel allocation are uninterruptible */
        mutex_lock(&rdev->vram_mutex);
index 6613ee9..d5f45b4 100644 (file)
@@ -281,12 +281,8 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
 
 void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
 {
-       save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL);
-       save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL);
        save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
        save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
-       save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL);
-       save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL);
 
        /* Stop all video */
        WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
@@ -311,15 +307,6 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
        /* Unlock host access */
        WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
        mdelay(1);
-       /* Restore video state */
-       WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
-       WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
-       WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
-       WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
-       WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control);
-       WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control);
-       WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
-       WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
        WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
 }
 
index dff8fc7..033fc96 100644 (file)
@@ -178,6 +178,7 @@ static struct pci_device_id vmw_pci_id_list[] = {
        {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
        {0, 0, 0}
 };
+MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
 
 static int enable_fbdev;
 
@@ -1088,6 +1089,11 @@ static struct drm_driver driver = {
        .master_drop = vmw_master_drop,
        .open = vmw_driver_open,
        .postclose = vmw_postclose,
+
+       .dumb_create = vmw_dumb_create,
+       .dumb_map_offset = vmw_dumb_map_offset,
+       .dumb_destroy = vmw_dumb_destroy,
+
        .fops = {
                 .owner = THIS_MODULE,
                 .open = drm_open,
index dc27970..0e3fa7d 100644 (file)
@@ -641,6 +641,16 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 
+int vmw_dumb_create(struct drm_file *file_priv,
+                   struct drm_device *dev,
+                   struct drm_mode_create_dumb *args);
+
+int vmw_dumb_map_offset(struct drm_file *file_priv,
+                       struct drm_device *dev, uint32_t handle,
+                       uint64_t *offset);
+int vmw_dumb_destroy(struct drm_file *file_priv,
+                    struct drm_device *dev,
+                    uint32_t handle);
 /**
  * Overlay control - vmwgfx_overlay.c
  */
index 1c7f09e..0795d17 100644 (file)
@@ -1950,3 +1950,76 @@ err_ref:
        vmw_resource_unreference(&res);
        return ret;
 }
+
+
+int vmw_dumb_create(struct drm_file *file_priv,
+                   struct drm_device *dev,
+                   struct drm_mode_create_dumb *args)
+{
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       struct vmw_master *vmaster = vmw_master(file_priv->master);
+       struct vmw_user_dma_buffer *vmw_user_bo;
+       struct ttm_buffer_object *tmp;
+       int ret;
+
+       args->pitch = args->width * ((args->bpp + 7) / 8);
+       args->size = args->pitch * args->height;
+
+       vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
+       if (vmw_user_bo == NULL)
+               return -ENOMEM;
+
+       ret = ttm_read_lock(&vmaster->lock, true);
+       if (ret != 0) {
+               kfree(vmw_user_bo);
+               return ret;
+       }
+
+       ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
+                             &vmw_vram_sys_placement, true,
+                             &vmw_user_dmabuf_destroy);
+       if (ret != 0)
+               goto out_no_dmabuf;
+
+       tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
+       ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
+                                  &vmw_user_bo->base,
+                                  false,
+                                  ttm_buffer_type,
+                                  &vmw_user_dmabuf_release, NULL);
+       if (unlikely(ret != 0))
+               goto out_no_base_object;
+
+       args->handle = vmw_user_bo->base.hash.key;
+
+out_no_base_object:
+       ttm_bo_unref(&tmp);
+out_no_dmabuf:
+       ttm_read_unlock(&vmaster->lock);
+       return ret;
+}
+
+int vmw_dumb_map_offset(struct drm_file *file_priv,
+                       struct drm_device *dev, uint32_t handle,
+                       uint64_t *offset)
+{
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       struct vmw_dma_buffer *out_buf;
+       int ret;
+
+       ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
+       if (ret != 0)
+               return -EINVAL;
+
+       *offset = out_buf->base.addr_space_offset;
+       vmw_dmabuf_unreference(&out_buf);
+       return 0;
+}
+
+int vmw_dumb_destroy(struct drm_file *file_priv,
+                    struct drm_device *dev,
+                    uint32_t handle)
+{
+       return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+                                        handle, TTM_REF_USAGE);
+}
index d21f6d0..b5cc078 100644 (file)
@@ -350,6 +350,7 @@ config HID_MULTITOUCH
          - Lumio CrystalTouch panels
          - MosArt dual-touch panels
          - PenMount dual touch panels
+         - PixArt optical touch screen
          - Pixcir dual touch panels
          - eGalax dual-touch panels, including the Joojoo and Wetab tablets
          - Stantum multitouch panels
index b99af34..a2abb8e 100644 (file)
@@ -60,6 +60,7 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
 static const struct hid_device_id ch_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, ch_devices);
index 95430a0..0c8bea9 100644 (file)
@@ -1398,12 +1398,14 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
@@ -1505,6 +1507,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
        { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_PCI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
index 2f0be4c..9e43aac 100644 (file)
@@ -129,6 +129,8 @@ static const struct hid_device_id cp_devices[] = {
                .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3),
                .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4),
+               .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE),
                .driver_data = CP_2WHEEL_MOUSE_HACK },
        { }
index 7db934d..ab75a4e 100644 (file)
 #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH      0xb19d
 #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
 #define USB_DEVICE_ID_CHICONY_WIRELESS2        0x1123
+#define USB_DEVICE_ID_CHICONY_AK1D     0x1125
 
 #define USB_VENDOR_ID_CHUNGHWAT                0x2247
 #define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH     0x0001
 #define USB_DEVICE_ID_CYPRESS_BARCODE_1        0xde61
 #define USB_DEVICE_ID_CYPRESS_BARCODE_2        0xde64
 #define USB_DEVICE_ID_CYPRESS_BARCODE_3        0xbca1
+#define USB_DEVICE_ID_CYPRESS_BARCODE_4        0xed81
 #define USB_DEVICE_ID_CYPRESS_TRUETOUCH        0xc001
 
 #define USB_VENDOR_ID_DEALEXTREAME     0x10c5
 #define USB_VENDOR_ID_PI_ENGINEERING   0x05f3
 #define USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL 0xff
 
+#define USB_VENDOR_ID_PIXART                           0x093a
+#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN      0x8001
+#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1     0x8002
+#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2     0x8003
+
 #define USB_VENDOR_ID_PLAYDOTCOM       0x0b43
 #define USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII     0x0003
 
index 995fc4c..13af0f1 100644 (file)
@@ -93,6 +93,7 @@ struct mt_class {
 #define MT_CLS_DUAL_INRANGE_CONTACTID          0x0005
 #define MT_CLS_DUAL_INRANGE_CONTACTNUMBER      0x0006
 #define MT_CLS_DUAL_NSMU_CONTACTID             0x0007
+#define MT_CLS_INRANGE_CONTACTNUMBER           0x0009
 
 /* vendor specific classes */
 #define MT_CLS_3M                              0x0101
@@ -155,6 +156,9 @@ struct mt_class mt_classes[] = {
                .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
                        MT_QUIRK_SLOT_IS_CONTACTID,
                .maxcontacts = 2 },
+       { .name = MT_CLS_INRANGE_CONTACTNUMBER,
+               .quirks = MT_QUIRK_VALID_IS_INRANGE |
+                       MT_QUIRK_SLOT_IS_CONTACTNUMBER },
 
        /*
         * vendor specific classes
@@ -744,6 +748,17 @@ static const struct hid_device_id mt_devices[] = {
                HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT,
                        USB_DEVICE_ID_PENMOUNT_PCI) },
 
+       /* PixArt optical touch screen */
+       { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
+               HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
+                       USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN) },
+       { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
+               HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
+                       USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1) },
+       { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
+               HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
+                       USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2) },
+
        /* PixCir-based panels */
        { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
                HID_USB_DEVICE(USB_VENDOR_ID_HANVON,
index 1fe6b80..afb73af 100644 (file)
@@ -68,6 +68,10 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
index 00e9851..83d2fbd 100644 (file)
@@ -34,6 +34,12 @@ static const struct dmi_system_id __initconst atk_force_new_if[] = {
                .matches = {
                        DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58")
                }
+       }, {
+               /* Old interface reads the same sensor for fan0 and fan1 */
+               .ident = "Asus M5A78L",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_NAME, "M5A78L")
+               }
        },
        { }
 };
index 0018c7d..1a174f0 100644 (file)
@@ -44,12 +44,13 @@ static ssize_t madc_read(struct device *dev,
                         struct device_attribute *devattr, char *buf)
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-       struct twl4030_madc_request req;
+       struct twl4030_madc_request req = {
+               .channels = 1 << attr->index,
+               .method = TWL4030_MADC_SW2,
+               .type = TWL4030_MADC_WAIT,
+       };
        long val;
 
-       req.channels = (1 << attr->index);
-       req.method = TWL4030_MADC_SW2;
-       req.func_cb = NULL;
        val = twl4030_madc_conversion(&req);
        if (val < 0)
                return val;
index a3afac4..60f593c 100644 (file)
@@ -103,6 +103,8 @@ config I2C_I801
            Patsburg (PCH)
            DH89xxCC (PCH)
            Panther Point (PCH)
+           Lynx Point (PCH)
+           Lynx Point-LP (PCH)
 
          This driver can also be built as a module.  If so, the module
          will be called i2c-i801.
@@ -349,9 +351,13 @@ config I2C_DAVINCI
          devices such as DaVinci NIC.
          For details please see http://www.ti.com/davinci
 
+config I2C_DESIGNWARE_CORE
+       tristate
+
 config I2C_DESIGNWARE_PLATFORM
        tristate "Synopsys DesignWare Platfrom"
        depends on HAVE_CLK
+       select I2C_DESIGNWARE_CORE
        help
          If you say yes to this option, support will be included for the
          Synopsys DesignWare I2C adapter. Only master mode is supported.
@@ -362,6 +368,7 @@ config I2C_DESIGNWARE_PLATFORM
 config I2C_DESIGNWARE_PCI
        tristate "Synopsys DesignWare PCI"
        depends on PCI
+       select I2C_DESIGNWARE_CORE
        help
          If you say yes to this option, support will be included for the
          Synopsys DesignWare I2C adapter. Only master mode is supported.
index fba6da6..d6b8779 100644 (file)
@@ -33,10 +33,11 @@ obj-$(CONFIG_I2C_AU1550)    += i2c-au1550.o
 obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
 obj-$(CONFIG_I2C_CPM)          += i2c-cpm.o
 obj-$(CONFIG_I2C_DAVINCI)      += i2c-davinci.o
+obj-$(CONFIG_I2C_DESIGNWARE_CORE)      += i2c-designware-core.o
 obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM)  += i2c-designware-platform.o
-i2c-designware-platform-objs := i2c-designware-platdrv.o i2c-designware-core.o
+i2c-designware-platform-objs := i2c-designware-platdrv.o
 obj-$(CONFIG_I2C_DESIGNWARE_PCI)       += i2c-designware-pci.o
-i2c-designware-pci-objs := i2c-designware-pcidrv.o i2c-designware-core.o
+i2c-designware-pci-objs := i2c-designware-pcidrv.o
 obj-$(CONFIG_I2C_GPIO)         += i2c-gpio.o
 obj-$(CONFIG_I2C_HIGHLANDER)   += i2c-highlander.o
 obj-$(CONFIG_I2C_IBM_IIC)      += i2c-ibm_iic.o
index df87992..6193349 100644 (file)
@@ -25,6 +25,7 @@
  * ----------------------------------------------------------------------------
  *
  */
+#include <linux/export.h>
 #include <linux/clk.h>
 #include <linux/errno.h>
 #include <linux/err.h>
@@ -305,6 +306,7 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
        dw_writel(dev, dev->master_cfg , DW_IC_CON);
        return 0;
 }
+EXPORT_SYMBOL_GPL(i2c_dw_init);
 
 /*
  * Waiting for bus not busy
@@ -557,12 +559,14 @@ done:
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(i2c_dw_xfer);
 
 u32 i2c_dw_func(struct i2c_adapter *adap)
 {
        struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
        return dev->functionality;
 }
+EXPORT_SYMBOL_GPL(i2c_dw_func);
 
 static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
 {
@@ -667,17 +671,20 @@ tx_aborted:
 
        return IRQ_HANDLED;
 }
+EXPORT_SYMBOL_GPL(i2c_dw_isr);
 
 void i2c_dw_enable(struct dw_i2c_dev *dev)
 {
        /* Enable the adapter */
        dw_writel(dev, 1, DW_IC_ENABLE);
 }
+EXPORT_SYMBOL_GPL(i2c_dw_enable);
 
 u32 i2c_dw_is_enabled(struct dw_i2c_dev *dev)
 {
        return dw_readl(dev, DW_IC_ENABLE);
 }
+EXPORT_SYMBOL_GPL(i2c_dw_is_enabled);
 
 void i2c_dw_disable(struct dw_i2c_dev *dev)
 {
@@ -688,18 +695,22 @@ void i2c_dw_disable(struct dw_i2c_dev *dev)
        dw_writel(dev, 0, DW_IC_INTR_MASK);
        dw_readl(dev, DW_IC_CLR_INTR);
 }
+EXPORT_SYMBOL_GPL(i2c_dw_disable);
 
 void i2c_dw_clear_int(struct dw_i2c_dev *dev)
 {
        dw_readl(dev, DW_IC_CLR_INTR);
 }
+EXPORT_SYMBOL_GPL(i2c_dw_clear_int);
 
 void i2c_dw_disable_int(struct dw_i2c_dev *dev)
 {
        dw_writel(dev, 0, DW_IC_INTR_MASK);
 }
+EXPORT_SYMBOL_GPL(i2c_dw_disable_int);
 
 u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev)
 {
        return dw_readl(dev, DW_IC_COMP_PARAM_1);
 }
+EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param);
index ab26840..817d025 100644 (file)
@@ -51,6 +51,8 @@
   Patsburg (PCH) IDF    0x1d72     32     hard     yes     yes     yes
   DH89xxCC (PCH)        0x2330     32     hard     yes     yes     yes
   Panther Point (PCH)   0x1e22     32     hard     yes     yes     yes
+  Lynx Point (PCH)      0x8c22     32     hard     yes     yes     yes
+  Lynx Point-LP (PCH)   0x9c22     32     hard     yes     yes     yes
 
   Features supported by this driver:
   Software PEC                     no
 #define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22
 #define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS     0x2330
 #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS        0x3b30
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS    0x8c22
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22
 
 struct i801_priv {
        struct i2c_adapter adapter;
@@ -633,6 +637,8 @@ static const struct pci_device_id i801_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS) },
        { 0, }
 };
 
index 0bfa545..c76b051 100644 (file)
@@ -568,24 +568,62 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
                        scmnd->sc_data_direction);
 }
 
-static void srp_remove_req(struct srp_target_port *target,
-                          struct srp_request *req, s32 req_lim_delta)
+/**
+ * srp_claim_req - Take ownership of the scmnd associated with a request.
+ * @target: SRP target port.
+ * @req: SRP request.
+ * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
+ *         ownership of @req->scmnd if it equals @scmnd.
+ *
+ * Return value:
+ * Either NULL or a pointer to the SCSI command the caller became owner of.
+ */
+static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
+                                      struct srp_request *req,
+                                      struct scsi_cmnd *scmnd)
 {
        unsigned long flags;
 
-       srp_unmap_data(req->scmnd, target, req);
+       spin_lock_irqsave(&target->lock, flags);
+       if (!scmnd) {
+               scmnd = req->scmnd;
+               req->scmnd = NULL;
+       } else if (req->scmnd == scmnd) {
+               req->scmnd = NULL;
+       } else {
+               scmnd = NULL;
+       }
+       spin_unlock_irqrestore(&target->lock, flags);
+
+       return scmnd;
+}
+
+/**
+ * srp_free_req() - Unmap data and add request to the free request list.
+ */
+static void srp_free_req(struct srp_target_port *target,
+                        struct srp_request *req, struct scsi_cmnd *scmnd,
+                        s32 req_lim_delta)
+{
+       unsigned long flags;
+
+       srp_unmap_data(scmnd, target, req);
+
        spin_lock_irqsave(&target->lock, flags);
        target->req_lim += req_lim_delta;
-       req->scmnd = NULL;
        list_add_tail(&req->list, &target->free_reqs);
        spin_unlock_irqrestore(&target->lock, flags);
 }
 
 static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
 {
-       req->scmnd->result = DID_RESET << 16;
-       req->scmnd->scsi_done(req->scmnd);
-       srp_remove_req(target, req, 0);
+       struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
+
+       if (scmnd) {
+               scmnd->result = DID_RESET << 16;
+               scmnd->scsi_done(scmnd);
+               srp_free_req(target, req, scmnd, 0);
+       }
 }
 
 static int srp_reconnect_target(struct srp_target_port *target)
@@ -1055,11 +1093,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
                complete(&target->tsk_mgmt_done);
        } else {
                req = &target->req_ring[rsp->tag];
-               scmnd = req->scmnd;
-               if (!scmnd)
+               scmnd = srp_claim_req(target, req, NULL);
+               if (!scmnd) {
                        shost_printk(KERN_ERR, target->scsi_host,
                                     "Null scmnd for RSP w/tag %016llx\n",
                                     (unsigned long long) rsp->tag);
+
+                       spin_lock_irqsave(&target->lock, flags);
+                       target->req_lim += be32_to_cpu(rsp->req_lim_delta);
+                       spin_unlock_irqrestore(&target->lock, flags);
+
+                       return;
+               }
                scmnd->result = rsp->status;
 
                if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
@@ -1074,7 +1119,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
                else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
                        scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
 
-               srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
+               srp_free_req(target, req, scmnd,
+                            be32_to_cpu(rsp->req_lim_delta));
+
                scmnd->host_scribble = NULL;
                scmnd->scsi_done(scmnd);
        }
@@ -1613,25 +1660,17 @@ static int srp_abort(struct scsi_cmnd *scmnd)
 {
        struct srp_target_port *target = host_to_target(scmnd->device->host);
        struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
-       int ret = SUCCESS;
 
        shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
 
-       if (!req || target->qp_in_error)
-               return FAILED;
-       if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
-                             SRP_TSK_ABORT_TASK))
+       if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd))
                return FAILED;
+       srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
+                         SRP_TSK_ABORT_TASK);
+       srp_free_req(target, req, scmnd, 0);
+       scmnd->result = DID_ABORT << 16;
 
-       if (req->scmnd) {
-               if (!target->tsk_mgmt_status) {
-                       srp_remove_req(target, req, 0);
-                       scmnd->result = DID_ABORT << 16;
-               } else
-                       ret = FAILED;
-       }
-
-       return ret;
+       return SUCCESS;
 }
 
 static int srp_reset_device(struct scsi_cmnd *scmnd)
index a6dcd18..96532bc 100644 (file)
  * Note that newer firmware allows querying device for maximum useable
  * coordinates.
  */
+#define XMIN 0
+#define XMAX 6143
+#define YMIN 0
+#define YMAX 6143
 #define XMIN_NOMINAL 1472
 #define XMAX_NOMINAL 5472
 #define YMIN_NOMINAL 1408
 #define YMAX_NOMINAL 4448
 
+/* Size in bits of absolute position values reported by the hardware */
+#define ABS_POS_BITS 13
+
+/*
+ * Any position values from the hardware above the following limits are
+ * treated as "wrapped around negative" values that have been truncated to
+ * the 13-bit reporting range of the hardware. These are just reasonable
+ * guesses and can be adjusted if hardware is found that operates outside
+ * of these parameters.
+ */
+#define X_MAX_POSITIVE (((1 << ABS_POS_BITS) + XMAX) / 2)
+#define Y_MAX_POSITIVE (((1 << ABS_POS_BITS) + YMAX) / 2)
+
 /*
  * Synaptics touchpads report the y coordinate from bottom to top, which is
  * opposite from what userspace expects.
@@ -544,6 +561,12 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
                hw->right = (buf[0] & 0x02) ? 1 : 0;
        }
 
+       /* Convert wrap-around values to negative */
+       if (hw->x > X_MAX_POSITIVE)
+               hw->x -= 1 << ABS_POS_BITS;
+       if (hw->y > Y_MAX_POSITIVE)
+               hw->y -= 1 << ABS_POS_BITS;
+
        return 0;
 }
 
index b4cfc6c..d4ec371 100644 (file)
@@ -176,6 +176,20 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
                },
        },
+       {
+               /* Gigabyte T1005 - defines wrong chassis type ("Other") */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "T1005"),
+               },
+       },
+       {
+               /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"),
+               },
+       },
        {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
index 7f8f538..4f938bb 100644 (file)
@@ -48,7 +48,7 @@ struct eeti_ts_priv {
        struct input_dev *input;
        struct work_struct work;
        struct mutex mutex;
-       int irq, irq_active_high;
+       int irq_gpio, irq, irq_active_high;
 };
 
 #define EETI_TS_BITDEPTH       (11)
@@ -62,7 +62,7 @@ struct eeti_ts_priv {
 
 static inline int eeti_ts_irq_active(struct eeti_ts_priv *priv)
 {
-       return gpio_get_value(irq_to_gpio(priv->irq)) == priv->irq_active_high;
+       return gpio_get_value(priv->irq_gpio) == priv->irq_active_high;
 }
 
 static void eeti_ts_read(struct work_struct *work)
@@ -157,7 +157,7 @@ static void eeti_ts_close(struct input_dev *dev)
 static int __devinit eeti_ts_probe(struct i2c_client *client,
                                   const struct i2c_device_id *idp)
 {
-       struct eeti_ts_platform_data *pdata;
+       struct eeti_ts_platform_data *pdata = client->dev.platform_data;
        struct eeti_ts_priv *priv;
        struct input_dev *input;
        unsigned int irq_flags;
@@ -199,9 +199,12 @@ static int __devinit eeti_ts_probe(struct i2c_client *client,
 
        priv->client = client;
        priv->input = input;
-       priv->irq = client->irq;
+       priv->irq_gpio = pdata->irq_gpio;
+       priv->irq = gpio_to_irq(pdata->irq_gpio);
 
-       pdata = client->dev.platform_data;
+       err = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name);
+       if (err < 0)
+               goto err1;
 
        if (pdata)
                priv->irq_active_high = pdata->irq_active_high;
@@ -215,13 +218,13 @@ static int __devinit eeti_ts_probe(struct i2c_client *client,
 
        err = input_register_device(input);
        if (err)
-               goto err1;
+               goto err2;
 
        err = request_irq(priv->irq, eeti_ts_isr, irq_flags,
                          client->name, priv);
        if (err) {
                dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
-               goto err2;
+               goto err3;
        }
 
        /*
@@ -233,9 +236,11 @@ static int __devinit eeti_ts_probe(struct i2c_client *client,
        device_init_wakeup(&client->dev, 0);
        return 0;
 
-err2:
+err3:
        input_unregister_device(input);
        input = NULL; /* so we dont try to free it below */
+err2:
+       gpio_free(pdata->irq_gpio);
 err1:
        input_free_device(input);
        kfree(priv);
index d497db0..509135f 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/sched.h>
 #include "isdnloop.h"
 
-static char *revision = "$Revision: 1.11.6.7 $";
 static char *isdnloop_id = "loop0";
 
 MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card");
@@ -1494,17 +1493,6 @@ isdnloop_addcard(char *id1)
 static int __init
 isdnloop_init(void)
 {
-       char *p;
-       char rev[10];
-
-       if ((p = strchr(revision, ':'))) {
-               strcpy(rev, p + 1);
-               p = strchr(rev, '$');
-               *p = 0;
-       } else
-               strcpy(rev, " ??? ");
-       printk(KERN_NOTICE "isdnloop-ISDN-driver Rev%s\n", rev);
-
        if (isdnloop_id)
                return (isdnloop_addcard(isdnloop_id));
 
index 532a902..d432032 100644 (file)
@@ -19,7 +19,7 @@
 /*
  * Tunable constants
  */
-#define ENDIO_HOOK_POOL_SIZE 10240
+#define ENDIO_HOOK_POOL_SIZE 1024
 #define DEFERRED_SET_SIZE 64
 #define MAPPING_POOL_SIZE 1024
 #define PRISON_CELLS 1024
@@ -857,7 +857,7 @@ static void process_prepared_mapping(struct new_mapping *m)
 
        if (m->err) {
                cell_error(m->cell);
-               return;
+               goto out;
        }
 
        /*
@@ -869,7 +869,7 @@ static void process_prepared_mapping(struct new_mapping *m)
        if (r) {
                DMERR("dm_thin_insert_block() failed");
                cell_error(m->cell);
-               return;
+               goto out;
        }
 
        /*
@@ -884,6 +884,7 @@ static void process_prepared_mapping(struct new_mapping *m)
        } else
                cell_defer(tc, m->cell, m->data_block);
 
+out:
        list_del(&m->list);
        mempool_free(m, tc->pool->mapping_pool);
 }
index d8646d7..2887f22 100644 (file)
@@ -1144,8 +1144,11 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
                        ret = 0;
        }
        rdev->sectors = rdev->sb_start;
-       /* Limit to 4TB as metadata cannot record more than that */
-       if (rdev->sectors >= (2ULL << 32))
+       /* Limit to 4TB as metadata cannot record more than that.
+        * (not needed for Linear and RAID0 as metadata doesn't
+        * record this size)
+        */
+       if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
                rdev->sectors = (2ULL << 32) - 2;
 
        if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
@@ -1427,7 +1430,7 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
        /* Limit to 4TB as metadata cannot record more than that.
         * 4TB == 2^32 KB, or 2*2^32 sectors.
         */
-       if (num_sectors >= (2ULL << 32))
+       if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
                num_sectors = (2ULL << 32) - 2;
        md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
                       rdev->sb_page);
index 2d97bf0..62306e5 100644 (file)
@@ -2321,7 +2321,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
                /* There is nowhere to write, so all non-sync
                 * drives must be failed - so we are finished
                 */
-               sector_t rv = max_sector - sector_nr;
+               sector_t rv;
+               if (min_bad > 0)
+                       max_sector = sector_nr + min_bad;
+               rv = max_sector - sector_nr;
                *skipped = 1;
                put_buf(r1_bio);
                return rv;
index fb68805..027550d 100644 (file)
@@ -481,7 +481,7 @@ static int smsusb_resume(struct usb_interface *intf)
        return 0;
 }
 
-static const struct usb_device_id smsusb_id_table[] __devinitconst = {
+static const struct usb_device_id smsusb_id_table[] = {
        { USB_DEVICE(0x187f, 0x0010),
                .driver_info = SMS1XXX_BOARD_SIANO_STELLAR },
        { USB_DEVICE(0x187f, 0x0100),
index ed77c6d..5327061 100644 (file)
@@ -1018,6 +1018,8 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
 
        spin_lock_init(&dev->hw_lock);
 
+       dev->hw_io = pnp_port_start(pnp_dev, 0);
+
        pnp_set_drvdata(pnp_dev, dev);
        dev->pnp_dev = pnp_dev;
 
@@ -1072,7 +1074,6 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
 
        /* claim the resources */
        error = -EBUSY;
-       dev->hw_io = pnp_port_start(pnp_dev, 0);
        if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
                dev->hw_io = -1;
                dev->irq = -1;
index 89fec4c..731cd16 100644 (file)
@@ -685,7 +685,7 @@ static const struct sd_desc sd_desc = {
 };
 
 /* -- module initialisation -- */
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
        {USB_DEVICE(0x06e1, 0xa190)},
 /*fixme: may be IntelPCCameraPro BRIDGE_SPCA505
        {USB_DEVICE(0x0733, 0x0430)}, */
index 60107ee..4eec7b7 100644 (file)
@@ -409,8 +409,6 @@ static irqreturn_t ab3100_irq_handler(int irq, void *data)
        u32 fatevent;
        int err;
 
-       add_interrupt_randomness(irq);
-
        err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1,
                                       event_regs, 3);
        if (err)
index 43a76c4..db662e2 100644 (file)
@@ -202,7 +202,7 @@ static void pcap_isr_work(struct work_struct *work)
                }
                local_irq_enable();
                ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
-       } while (gpio_get_value(irq_to_gpio(pcap->spi->irq)));
+       } while (gpio_get_value(pdata->gpio));
 }
 
 static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
index f742745..b90f3e0 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/bcd.h>
 #include <linux/delay.h>
 #include <linux/mfd/core.h>
+#include <linux/random.h>
 
 #include <linux/mfd/wm831x/core.h>
 #include <linux/mfd/wm831x/otp.h>
@@ -66,6 +67,7 @@ static DEVICE_ATTR(unique_id, 0444, wm831x_unique_id_show, NULL);
 
 int wm831x_otp_init(struct wm831x *wm831x)
 {
+       char uuid[WM831X_UNIQUE_ID_LEN];
        int ret;
 
        ret = device_create_file(wm831x->dev, &dev_attr_unique_id);
@@ -73,6 +75,12 @@ int wm831x_otp_init(struct wm831x *wm831x)
                dev_err(wm831x->dev, "Unique ID attribute not created: %d\n",
                        ret);
 
+       ret = wm831x_unique_id_read(wm831x, uuid);
+       if (ret == 0)
+               add_device_randomness(uuid, sizeof(uuid));
+       else
+               dev_err(wm831x->dev, "Failed to read UUID: %d\n", ret);
+
        return ret;
 }
 
index 17bbacb..cc2ae7e 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/device.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <asm/uv/uv_hub.h>
@@ -59,6 +61,8 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
                                         XPC_NOTIFY_MSG_SIZE_UV)
 #define XPC_NOTIFY_IRQ_NAME            "xpc_notify"
 
+static int xpc_mq_node = -1;
+
 static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
 static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
 
@@ -109,11 +113,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
 #if defined CONFIG_X86_64
        mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
                        UV_AFFINITY_CPU);
-       if (mq->irq < 0) {
-               dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
-                       -mq->irq);
+       if (mq->irq < 0)
                return mq->irq;
-       }
 
        mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
 
@@ -238,8 +239,9 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
        mq->mmr_blade = uv_cpu_to_blade_id(cpu);
 
        nid = cpu_to_node(cpu);
-       page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
-                               pg_order);
+       page = alloc_pages_exact_node(nid,
+                                     GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+                                     pg_order);
        if (page == NULL) {
                dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
                        "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
@@ -1731,9 +1733,50 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
        .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
 };
 
+static int
+xpc_init_mq_node(int nid)
+{
+       int cpu;
+
+       get_online_cpus();
+
+       for_each_cpu(cpu, cpumask_of_node(nid)) {
+               xpc_activate_mq_uv =
+                       xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid,
+                                            XPC_ACTIVATE_IRQ_NAME,
+                                            xpc_handle_activate_IRQ_uv);
+               if (!IS_ERR(xpc_activate_mq_uv))
+                       break;
+       }
+       if (IS_ERR(xpc_activate_mq_uv)) {
+               put_online_cpus();
+               return PTR_ERR(xpc_activate_mq_uv);
+       }
+
+       for_each_cpu(cpu, cpumask_of_node(nid)) {
+               xpc_notify_mq_uv =
+                       xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid,
+                                            XPC_NOTIFY_IRQ_NAME,
+                                            xpc_handle_notify_IRQ_uv);
+               if (!IS_ERR(xpc_notify_mq_uv))
+                       break;
+       }
+       if (IS_ERR(xpc_notify_mq_uv)) {
+               xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
+               put_online_cpus();
+               return PTR_ERR(xpc_notify_mq_uv);
+       }
+
+       put_online_cpus();
+       return 0;
+}
+
 int
 xpc_init_uv(void)
 {
+       int nid;
+       int ret = 0;
+
        xpc_arch_ops = xpc_arch_ops_uv;
 
        if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
@@ -1742,21 +1785,21 @@ xpc_init_uv(void)
                return -E2BIG;
        }
 
-       xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
-                                                 XPC_ACTIVATE_IRQ_NAME,
-                                                 xpc_handle_activate_IRQ_uv);
-       if (IS_ERR(xpc_activate_mq_uv))
-               return PTR_ERR(xpc_activate_mq_uv);
+       if (xpc_mq_node < 0)
+               for_each_online_node(nid) {
+                       ret = xpc_init_mq_node(nid);
 
-       xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
-                                               XPC_NOTIFY_IRQ_NAME,
-                                               xpc_handle_notify_IRQ_uv);
-       if (IS_ERR(xpc_notify_mq_uv)) {
-               xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
-               return PTR_ERR(xpc_notify_mq_uv);
-       }
+                       if (!ret)
+                               break;
+               }
+       else
+               ret = xpc_init_mq_node(xpc_mq_node);
 
-       return 0;
+       if (ret < 0)
+               dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n",
+                       -ret);
+
+       return ret;
 }
 
 void
@@ -1765,3 +1808,6 @@ xpc_exit_uv(void)
        xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
        xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
 }
+
+module_param(xpc_mq_node, int, 0);
+MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues.");
index 34416d4..74793af 100644 (file)
@@ -1339,7 +1339,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
                /* complete ongoing async transfer before issuing discard */
                if (card->host->areq)
                        mmc_blk_issue_rw_rq(mq, NULL);
-               if (req->cmd_flags & REQ_SECURE)
+               if (req->cmd_flags & REQ_SECURE &&
+                       !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
                        ret = mmc_blk_issue_secdiscard_rq(mq, req);
                else
                        ret = mmc_blk_issue_discard_rq(mq, req);
@@ -1614,6 +1615,8 @@ static int mmc_add_disk(struct mmc_blk_data *md)
        return ret;
 }
 
+#define CID_MANFID_SAMSUNG     0x15
+
 static const struct mmc_fixup blk_fixups[] =
 {
        MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
@@ -1644,6 +1647,28 @@ static const struct mmc_fixup blk_fixups[] =
        MMC_FIXUP(CID_NAME_ANY, 0x13, 0x200, add_quirk_mmc,
                  MMC_QUIRK_LONG_READ_TIME),
 
+       /*
+        * On these Samsung MoviNAND parts, performing secure erase or
+        * secure trim can result in unrecoverable corruption due to a
+        * firmware bug.
+        */
+       MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+                 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+       MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+                 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+       MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+                 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+       MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+                 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+       MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+                 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+       MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+                 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+       MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+                 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+       MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+                 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+
        END_FIXUP
 };
 
index 99b449d..f201bed 100644 (file)
@@ -279,11 +279,11 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
        writel(stat & MXS_MMC_IRQ_BITS,
               host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
 
+       spin_unlock(&host->lock);
+
        if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
                mmc_signal_sdio_irq(host->mmc);
 
-       spin_unlock(&host->lock);
-
        if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
                cmd->error = -ETIMEDOUT;
        else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
@@ -628,10 +628,6 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
                       host->base + HW_SSP_CTRL0 + MXS_SET_ADDR);
                writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
                       host->base + HW_SSP_CTRL1 + MXS_SET_ADDR);
-
-               if (readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ)
-                       mmc_signal_sdio_irq(host->mmc);
-
        } else {
                writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
                       host->base + HW_SSP_CTRL0 + MXS_CLR_ADDR);
@@ -640,6 +636,10 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
        }
 
        spin_unlock_irqrestore(&host->lock, flags);
+
+       if (enable && readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ)
+               mmc_signal_sdio_irq(host->mmc);
+
 }
 
 static const struct mmc_host_ops mxs_mmc_ops = {
index c3b08f1..62ca03a 100644 (file)
@@ -48,14 +48,14 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
        int div = 1;
        u32 temp;
 
+       if (clock == 0)
+               goto out;
+
        temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
        temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
                | ESDHC_CLOCK_MASK);
        sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
 
-       if (clock == 0)
-               goto out;
-
        while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
                pre_div *= 2;
 
index 437bc19..568307c 100644 (file)
@@ -340,7 +340,7 @@ retry:
         * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
         */
        err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
-       kfree(new_aeb);
+       kmem_cache_free(ai->aeb_slab_cache, new_aeb);
        ubi_free_vid_hdr(ubi, vid_hdr);
        return err;
 
@@ -353,7 +353,7 @@ write_error:
                list_add(&new_aeb->u.list, &ai->erase);
                goto retry;
        }
-       kfree(new_aeb);
+       kmem_cache_free(ai->aeb_slab_cache, new_aeb);
 out_free:
        ubi_free_vid_hdr(ubi, vid_hdr);
        return err;
index 23406e6..ae286a9 100644 (file)
@@ -325,6 +325,9 @@ static int ldisc_open(struct tty_struct *tty)
 
        sprintf(name, "cf%s", tty->name);
        dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
+       if (!dev)
+               return -ENOMEM;
+
        ser = netdev_priv(dev);
        ser->tty = tty_kref_get(tty);
        ser->dev = dev;
index 330140e..9bcc39a 100644 (file)
 #define INSTRUCTION_LOAD_TXB(n)        (0x40 + 2 * (n))
 #define INSTRUCTION_READ_RXB(n)        (((n) == 0) ? 0x90 : 0x94)
 #define INSTRUCTION_RESET      0xC0
+#define RTS_TXB0               0x01
+#define RTS_TXB1               0x02
+#define RTS_TXB2               0x04
+#define INSTRUCTION_RTS(n)     (0x80 | ((n) & 0x07))
+
 
 /* MPC251x registers */
 #define CANSTAT              0x0e
@@ -397,6 +402,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
 static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
                          int tx_buf_idx)
 {
+       struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
        u32 sid, eid, exide, rtr;
        u8 buf[SPI_TRANSFER_BUF_LEN];
 
@@ -418,7 +424,10 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
        buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc;
        memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc);
        mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx);
-       mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx), TXBCTRL_TXREQ);
+
+       /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */
+       priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx);
+       mcp251x_spi_trans(priv->spi, 1);
 }
 
 static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
index 965c723..721adfd 100644 (file)
@@ -5378,7 +5378,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
                        int k, last;
 
                        if (skb == NULL) {
-                               j++;
+                               j = NEXT_TX_BD(j);
                                continue;
                        }
 
@@ -5390,8 +5390,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
                        tx_buf->skb = NULL;
 
                        last = tx_buf->nr_frags;
-                       j++;
-                       for (k = 0; k < last; k++, j++) {
+                       j = NEXT_TX_BD(j);
+                       for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
                                tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
                                dma_unmap_page(&bp->pdev->dev,
                                        dma_unmap_addr(tx_buf, mapping),
index 83199fd..d0722a7 100644 (file)
@@ -1041,7 +1041,7 @@ static int gfar_probe(struct platform_device *ofdev)
 
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
                dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-               dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+               dev->features |= NETIF_F_HW_VLAN_RX;
        }
 
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
index b1cd41b..021463b 100644 (file)
@@ -472,14 +472,9 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
        }
 
        if (adapter->rx_queue.queue_addr != NULL) {
-               if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
-                       dma_unmap_single(dev,
-                                       adapter->rx_queue.queue_dma,
-                                       adapter->rx_queue.queue_len,
-                                       DMA_BIDIRECTIONAL);
-                       adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
-               }
-               kfree(adapter->rx_queue.queue_addr);
+               dma_free_coherent(dev, adapter->rx_queue.queue_len,
+                                 adapter->rx_queue.queue_addr,
+                                 adapter->rx_queue.queue_dma);
                adapter->rx_queue.queue_addr = NULL;
        }
 
@@ -556,10 +551,13 @@ static int ibmveth_open(struct net_device *netdev)
                goto err_out;
        }
 
+       dev = &adapter->vdev->dev;
+
        adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
                                                rxq_entries;
-       adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
-                                               GFP_KERNEL);
+       adapter->rx_queue.queue_addr =
+           dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
+                              &adapter->rx_queue.queue_dma, GFP_KERNEL);
 
        if (!adapter->rx_queue.queue_addr) {
                netdev_err(netdev, "unable to allocate rx queue pages\n");
@@ -567,19 +565,13 @@ static int ibmveth_open(struct net_device *netdev)
                goto err_out;
        }
 
-       dev = &adapter->vdev->dev;
-
        adapter->buffer_list_dma = dma_map_single(dev,
                        adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
        adapter->filter_list_dma = dma_map_single(dev,
                        adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
-       adapter->rx_queue.queue_dma = dma_map_single(dev,
-                       adapter->rx_queue.queue_addr,
-                       adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
 
        if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
-           (dma_mapping_error(dev, adapter->filter_list_dma)) ||
-           (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
+           (dma_mapping_error(dev, adapter->filter_list_dma))) {
                netdev_err(netdev, "unable to map filter or buffer list "
                           "pages\n");
                rc = -ENOMEM;
index de00805..0549261 100644 (file)
@@ -4743,12 +4743,14 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
                e1000_setup_rctl(adapter);
                e1000_set_rx_mode(netdev);
 
+               rctl = er32(RCTL);
+
                /* turn on all-multi mode if wake on multicast is enabled */
-               if (wufc & E1000_WUFC_MC) {
-                       rctl = er32(RCTL);
+               if (wufc & E1000_WUFC_MC)
                        rctl |= E1000_RCTL_MPE;
-                       ew32(RCTL, rctl);
-               }
+
+               /* enable receives in the hardware */
+               ew32(RCTL, rctl | E1000_RCTL_EN);
 
                if (hw->mac_type >= e1000_82540) {
                        ctrl = er32(CTRL);
index 3072d35..4f4d52a 100644 (file)
@@ -1600,10 +1600,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
                         * auto-negotiation in the TXCW register and disable
                         * forced link in the Device Control register in an
                         * attempt to auto-negotiate with our link partner.
-                        * If the partner code word is null, stop forcing
-                        * and restart auto negotiation.
                         */
-                       if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW))  {
+                       if (rxcw & E1000_RXCW_C) {
                                /* Enable autoneg, and unforce link up */
                                ew32(TXCW, mac->txcw);
                                ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
index f478a22..8e362bb 100644 (file)
@@ -302,6 +302,7 @@ struct e1000_adapter {
         */
        struct e1000_ring *tx_ring /* One per active queue */
                                                ____cacheline_aligned_in_smp;
+       u32 tx_fifo_limit;
 
        struct napi_struct napi;
 
index 64d3f98..0182649 100644 (file)
@@ -3385,6 +3385,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
                break;
        }
 
+       /*
+        * Alignment of Tx data is on an arbitrary byte boundary with the
+        * maximum size per Tx descriptor limited only to the transmit
+        * allocation of the packet buffer minus 96 bytes with an upper
+        * limit of 24KB due to receive synchronization limitations.
+        */
+       adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
+                                      24 << 10);
+
        /*
         * Disable Adaptive Interrupt Moderation if 2 full packets cannot
         * fit in receive buffer and early-receive not supported.
@@ -4647,13 +4656,9 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
        return 1;
 }
 
-#define E1000_MAX_PER_TXD      8192
-#define E1000_MAX_TXD_PWR      12
-
 static int e1000_tx_map(struct e1000_adapter *adapter,
                        struct sk_buff *skb, unsigned int first,
-                       unsigned int max_per_txd, unsigned int nr_frags,
-                       unsigned int mss)
+                       unsigned int max_per_txd, unsigned int nr_frags)
 {
        struct e1000_ring *tx_ring = adapter->tx_ring;
        struct pci_dev *pdev = adapter->pdev;
@@ -4882,20 +4887,19 @@ static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
 
+       BUG_ON(size > adapter->tx_ring->count);
+
        if (e1000_desc_unused(adapter->tx_ring) >= size)
                return 0;
        return __e1000_maybe_stop_tx(netdev, size);
 }
 
-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                                    struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_ring *tx_ring = adapter->tx_ring;
        unsigned int first;
-       unsigned int max_per_txd = E1000_MAX_PER_TXD;
-       unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
        unsigned int tx_flags = 0;
        unsigned int len = skb_headlen(skb);
        unsigned int nr_frags;
@@ -4915,18 +4919,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
        }
 
        mss = skb_shinfo(skb)->gso_size;
-       /*
-        * The controller does a simple calculation to
-        * make sure there is enough room in the FIFO before
-        * initiating the DMA for each buffer.  The calc is:
-        * 4 = ceil(buffer len/mss).  To make sure we don't
-        * overrun the FIFO, adjust the max buffer len if mss
-        * drops.
-        */
        if (mss) {
                u8 hdr_len;
-               max_per_txd = min(mss << 2, max_per_txd);
-               max_txd_pwr = fls(max_per_txd) - 1;
 
                /*
                 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
@@ -4956,12 +4950,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                count++;
        count++;
 
-       count += TXD_USE_COUNT(len, max_txd_pwr);
+       count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
 
        nr_frags = skb_shinfo(skb)->nr_frags;
        for (f = 0; f < nr_frags; f++)
-               count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
-                                      max_txd_pwr);
+               count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
+                                     adapter->tx_fifo_limit);
 
        if (adapter->hw.mac.tx_pkt_filtering)
                e1000_transfer_dhcp_info(adapter, skb);
@@ -5000,12 +4994,15 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                tx_flags |= E1000_TX_FLAGS_IPV4;
 
        /* if count is 0 then mapping error has occurred */
-       count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
+       count = e1000_tx_map(adapter, skb, first, adapter->tx_fifo_limit,
+                            nr_frags);
        if (count) {
                e1000_tx_queue(adapter, tx_flags, count);
                /* Make sure there is space in the ring for the next send. */
-               e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
-
+               e1000_maybe_stop_tx(netdev,
+                                   (MAX_SKB_FRAGS *
+                                    DIV_ROUND_UP(PAGE_SIZE,
+                                                 adapter->tx_fifo_limit) + 2));
        } else {
                dev_kfree_skb_any(skb);
                tx_ring->buffer_info[first].time_stamp = 0;
@@ -6150,8 +6147,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        adapter->hw.phy.autoneg_advertised = 0x2f;
 
        /* ring size defaults */
-       adapter->rx_ring->count = 256;
-       adapter->tx_ring->count = 256;
+       adapter->rx_ring->count = E1000_DEFAULT_RXD;
+       adapter->tx_ring->count = E1000_DEFAULT_TXD;
 
        /*
         * Initial Wake on LAN setting - If APM wake is enabled in
index d5731f1..a6611f1 100644 (file)
@@ -1383,6 +1383,11 @@ static int efx_probe_all(struct efx_nic *efx)
                goto fail2;
        }
 
+       BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
+       if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
+               rc = -EINVAL;
+               goto fail3;
+       }
        efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
        rc = efx_probe_channels(efx);
        if (rc)
@@ -1973,6 +1978,7 @@ static int efx_register_netdev(struct efx_nic *efx)
        net_dev->irq = efx->pci_dev->irq;
        net_dev->netdev_ops = &efx_netdev_ops;
        SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
+       net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
 
        /* Clear MAC statistics */
        efx->mac_op->update_stats(efx);
index 4764793..1355245 100644 (file)
@@ -34,6 +34,7 @@ extern netdev_tx_t
 efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
 extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
 extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
+extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
 
 /* RX */
 extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -56,10 +57,15 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
 #define EFX_MAX_EVQ_SIZE 16384UL
 #define EFX_MIN_EVQ_SIZE 512UL
 
-/* The smallest [rt]xq_entries that the driver supports. Callers of
- * efx_wake_queue() assume that they can subsequently send at least one
- * skb. Falcon/A1 may require up to three descriptors per skb_frag. */
-#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS))
+/* Maximum number of TCP segments we support for soft-TSO */
+#define EFX_TSO_MAX_SEGS       100
+
+/* The smallest [rt]xq_entries that the driver supports.  RX minimum
+ * is a bit arbitrary.  For TX, we must have space for at least 2
+ * TSO skbs.
+ */
+#define EFX_RXQ_MIN_ENT                128U
+#define EFX_TXQ_MIN_ENT(efx)   (2 * efx_tx_max_skb_descs(efx))
 
 /* Filters */
 extern int efx_probe_filters(struct efx_nic *efx);
index f3cd96d..90158c9 100644 (file)
@@ -690,21 +690,27 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
                                     struct ethtool_ringparam *ring)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
+       u32 txq_entries;
 
        if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
            ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
            ring->tx_pending > EFX_MAX_DMAQ_SIZE)
                return -EINVAL;
 
-       if (ring->rx_pending < EFX_MIN_RING_SIZE ||
-           ring->tx_pending < EFX_MIN_RING_SIZE) {
+       if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
                netif_err(efx, drv, efx->net_dev,
-                         "TX and RX queues cannot be smaller than %ld\n",
-                         EFX_MIN_RING_SIZE);
+                         "RX queues cannot be smaller than %u\n",
+                         EFX_RXQ_MIN_ENT);
                return -EINVAL;
        }
 
-       return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending);
+       txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
+       if (txq_entries != ring->tx_pending)
+               netif_warn(efx, drv, efx->net_dev,
+                          "increasing TX queue size to minimum of %u\n",
+                          txq_entries);
+
+       return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
 }
 
 static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
index 5fb24d3..66ece48 100644 (file)
@@ -65,6 +65,9 @@ enum {
 #define FALCON_GMAC_LOOPBACKS                  \
        (1 << LOOPBACK_GMAC)
 
+/* Alignment of PCIe DMA boundaries (4KB) */
+#define EFX_PAGE_SIZE  4096
+
 /**
  * struct falcon_board_type - board operations and type information
  * @id: Board type id, as found in NVRAM
index df88c54..807d515 100644 (file)
@@ -115,6 +115,25 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
        return len;
 }
 
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
+{
+       /* Header and payload descriptor for each output segment, plus
+        * one for every input fragment boundary within a segment
+        */
+       unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
+
+       /* Possibly one more per segment for the alignment workaround */
+       if (EFX_WORKAROUND_5391(efx))
+               max_descs += EFX_TSO_MAX_SEGS;
+
+       /* Possibly more for PCIe page boundaries within input fragments */
+       if (PAGE_SIZE > EFX_PAGE_SIZE)
+               max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
+                                  DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
+
+       return max_descs;
+}
+
 /*
  * Add a socket buffer to a TX queue
  *
index e888202..01b104e 100644 (file)
@@ -652,7 +652,6 @@ static int netconsole_netdev_event(struct notifier_block *this,
                                                          flags);
                                        dev_put(nt->np.dev);
                                        nt->np.dev = NULL;
-                                       netconsole_target_put(nt);
                                }
                                nt->enabled = 0;
                                stopped = true;
index f8a6853..ad6a9d9 100644 (file)
@@ -189,7 +189,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        if (sk_pppox(po)->sk_state & PPPOX_DEAD)
                goto tx_error;
 
-       rt = ip_route_output_ports(&init_net, &fl4, NULL,
+       rt = ip_route_output_ports(sock_net(sk), &fl4, NULL,
                                   opt->dst_addr.sin_addr.s_addr,
                                   opt->src_addr.sin_addr.s_addr,
                                   0, 0, IPPROTO_GRE,
@@ -468,7 +468,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
        po->chan.private = sk;
        po->chan.ops = &pptp_chan_ops;
 
-       rt = ip_route_output_ports(&init_net, &fl4, sk,
+       rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
                                   opt->dst_addr.sin_addr.s_addr,
                                   opt->src_addr.sin_addr.s_addr,
                                   0, 0,
index 7bea9c6..a12c9bf 100644 (file)
@@ -1243,10 +1243,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
        int vnet_hdr_sz;
        int ret;
 
-       if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
+       if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
                if (copy_from_user(&ifr, argp, ifreq_len))
                        return -EFAULT;
-
+       } else {
+               memset(&ifr, 0, sizeof(ifr));
+       }
        if (cmd == TUNGETFEATURES) {
                /* Currently this just means: "what IFF flags are valid?".
                 * This is needed because we never checked for invalid flags on
index 582ca2d..c4c6a73 100644 (file)
@@ -1308,7 +1308,7 @@ static int kaweth_internal_control_msg(struct usb_device *usb_dev,
         int retv;
         int length = 0; /* shut up GCC */
 
-        urb = usb_alloc_urb(0, GFP_NOIO);
+       urb = usb_alloc_urb(0, GFP_ATOMIC);
         if (!urb)
                 return -ENOMEM;
 
index 7f97164..2b8e957 100644 (file)
@@ -674,6 +674,7 @@ int ath9k_hw_init(struct ath_hw *ah)
        case AR9300_DEVID_AR9340:
        case AR9300_DEVID_AR9580:
        case AR9300_DEVID_AR9462:
+       case AR9485_DEVID_AR1111:
                break;
        default:
                if (common->bus_ops->ath_bus_type == ATH_USB)
index 1bd8edf..a5c4ba8 100644 (file)
@@ -48,6 +48,7 @@
 #define AR9300_DEVID_AR9580    0x0033
 #define AR9300_DEVID_AR9462    0x0034
 #define AR9300_DEVID_AR9330    0x0035
+#define AR9485_DEVID_AR1111    0x0037
 
 #define AR5416_AR9100_DEVID    0x000b
 
index 2dcdf63..1883d39 100644 (file)
@@ -35,6 +35,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
        { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E  AR9485 */
        { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E  AR9580 */
        { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E  AR9462 */
+       { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E  AR1111/AR9485 */
        { 0 }
 };
 
index e6d791c..b4cbc82 100644 (file)
@@ -1782,7 +1782,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
        struct ieee80211_hw *hw = sc->hw;
        struct ieee80211_hdr *hdr;
        int retval;
-       bool decrypt_error = false;
        struct ath_rx_status rs;
        enum ath9k_rx_qtype qtype;
        bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
@@ -1804,6 +1803,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
        tsf_lower = tsf & 0xffffffff;
 
        do {
+               bool decrypt_error = false;
                /* If handling rx interrupt and flush is in progress => exit */
                if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
                        break;
index 9ba2c1b..3395025 100644 (file)
@@ -708,11 +708,14 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
  */
 static bool rs_use_green(struct ieee80211_sta *sta)
 {
-       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-       struct iwl_rxon_context *ctx = sta_priv->ctx;
-
-       return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
-               !(ctx->ht.non_gf_sta_present);
+       /*
+        * There's a bug somewhere in this code that causes the
+        * scaling to get stuck because GF+SGI can't be combined
+        * in SISO rates. Until we find that bug, disable GF, it
+        * has only limited benefit and we still interoperate with
+        * GF APs since we can always receive GF transmissions.
+        */
+       return false;
 }
 
 /**
index a1670e3..93e6179 100644 (file)
@@ -232,6 +232,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
        struct iwl_priv *priv = file->private_data;
        size_t bufsz;
 
+       if (!iwl_is_ready_rf(priv->shrd))
+               return -EAGAIN;
+
        /* default is to dump the entire data segment */
        if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
                priv->dbgfs_sram_offset = 0x800000;
index 5c29281..8533ba2 100644 (file)
@@ -303,7 +303,7 @@ int iwl_queue_space(const struct iwl_queue *q);
 ******************************************************/
 int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
                            char **buf, bool display);
-int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
+int iwl_dump_fh(struct iwl_trans *trans, char **buf);
 void iwl_dump_csr(struct iwl_trans *trans);
 
 /*****************************************************
index 1daf01e..17fb25d 100644 (file)
@@ -678,7 +678,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
 
        iwl_dump_nic_error_log(trans);
        iwl_dump_csr(trans);
-       iwl_dump_fh(trans, NULL, false);
+       iwl_dump_fh(trans, NULL);
        iwl_dump_nic_event_log(trans, false, NULL, false);
 #ifdef CONFIG_IWLWIFI_DEBUG
        if (iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS)
index 4661a64..75da4bc 100644 (file)
@@ -1541,13 +1541,9 @@ static const char *get_fh_string(int cmd)
        }
 }
 
-int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
+int iwl_dump_fh(struct iwl_trans *trans, char **buf)
 {
        int i;
-#ifdef CONFIG_IWLWIFI_DEBUG
-       int pos = 0;
-       size_t bufsz = 0;
-#endif
        static const u32 fh_tbl[] = {
                FH_RSCSR_CHNL0_STTS_WPTR_REG,
                FH_RSCSR_CHNL0_RBDCB_BASE_REG,
@@ -1559,29 +1555,35 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
                FH_TSSR_TX_STATUS_REG,
                FH_TSSR_TX_ERROR_REG
        };
-#ifdef CONFIG_IWLWIFI_DEBUG
-       if (display) {
-               bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (buf) {
+               int pos = 0;
+               size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+
                *buf = kmalloc(bufsz, GFP_KERNEL);
                if (!*buf)
                        return -ENOMEM;
+
                pos += scnprintf(*buf + pos, bufsz - pos,
                                "FH register values:\n");
-               for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+
+               for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
                        pos += scnprintf(*buf + pos, bufsz - pos,
                                "  %34s: 0X%08x\n",
                                get_fh_string(fh_tbl[i]),
                                iwl_read_direct32(bus(trans), fh_tbl[i]));
-               }
+
                return pos;
        }
 #endif
+
        IWL_ERR(trans, "FH register values:\n");
-       for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++) {
+       for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++)
                IWL_ERR(trans, "  %34s: 0X%08x\n",
                        get_fh_string(fh_tbl[i]),
                        iwl_read_direct32(bus(trans), fh_tbl[i]));
-       }
+
        return 0;
 }
 
@@ -1929,11 +1931,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
                                         size_t count, loff_t *ppos)
 {
        struct iwl_trans *trans = file->private_data;
-       char *buf;
+       char *buf = NULL;
        int pos = 0;
        ssize_t ret = -EFAULT;
 
-       ret = pos = iwl_dump_fh(trans, &buf, true);
+       ret = pos = iwl_dump_fh(trans, &buf);
        if (buf) {
                ret = simple_read_from_buffer(user_buf,
                                              count, ppos, buf, pos);
index 9b60968..8a009bc 100644 (file)
@@ -42,7 +42,7 @@ MODULE_FIRMWARE("isl3887usb");
  * whenever you add a new device.
  */
 
-static struct usb_device_id p54u_table[] __devinitdata = {
+static struct usb_device_id p54u_table[] = {
        /* Version 1 devices (pci chip + net2280) */
        {USB_DEVICE(0x0411, 0x0050)},   /* Buffalo WLI2-USB2-G54 */
        {USB_DEVICE(0x045e, 0x00c2)},   /* Microsoft MN-710 */
index 3a6b402..0ea85f4 100644 (file)
@@ -1611,6 +1611,7 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
 {
        int retval;
+       u32 reg;
 
        /*
         * Allocate eeprom data.
@@ -1623,6 +1624,14 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
        if (retval)
                return retval;
 
+       /*
+        * Enable rfkill polling by setting GPIO direction of the
+        * rfkill switch GPIO pin correctly.
+        */
+       rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
+       rt2x00_set_field32(&reg, GPIOCSR_BIT8, 1);
+       rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg);
+
        /*
         * Initialize hw specifications.
         */
index d3a4a68..7564ae9 100644 (file)
 #define GPIOCSR_BIT5                   FIELD32(0x00000020)
 #define GPIOCSR_BIT6                   FIELD32(0x00000040)
 #define GPIOCSR_BIT7                   FIELD32(0x00000080)
+#define GPIOCSR_BIT8                   FIELD32(0x00000100)
 
 /*
  * BBPPCSR: BBP Pin control register.
index dcc0e1f..aa10c48 100644 (file)
@@ -1929,6 +1929,7 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
 {
        int retval;
+       u32 reg;
 
        /*
         * Allocate eeprom data.
@@ -1941,6 +1942,14 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
        if (retval)
                return retval;
 
+       /*
+        * Enable rfkill polling by setting GPIO direction of the
+        * rfkill switch GPIO pin correctly.
+        */
+       rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
+       rt2x00_set_field32(&reg, GPIOCSR_DIR0, 1);
+       rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg);
+
        /*
         * Initialize hw specifications.
         */
index 53c5f87..22ed6df 100644 (file)
@@ -283,7 +283,7 @@ static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
        u16 reg;
 
        rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
-       return rt2x00_get_field32(reg, MAC_CSR19_BIT7);
+       return rt2x00_get_field16(reg, MAC_CSR19_BIT7);
 }
 
 #ifdef CONFIG_RT2X00_LIB_LEDS
@@ -1768,6 +1768,7 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
 {
        int retval;
+       u16 reg;
 
        /*
         * Allocate eeprom data.
@@ -1780,6 +1781,14 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
        if (retval)
                return retval;
 
+       /*
+        * Enable rfkill polling by setting GPIO direction of the
+        * rfkill switch GPIO pin correctly.
+        */
+       rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
+       rt2x00_set_field16(&reg, MAC_CSR19_BIT8, 0);
+       rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg);
+
        /*
         * Initialize hw specifications.
         */
index b493306..196bd51 100644 (file)
  * MAC_CSR19: GPIO control register.
  */
 #define MAC_CSR19                      0x0426
-#define MAC_CSR19_BIT0                 FIELD32(0x0001)
-#define MAC_CSR19_BIT1                 FIELD32(0x0002)
-#define MAC_CSR19_BIT2                 FIELD32(0x0004)
-#define MAC_CSR19_BIT3                 FIELD32(0x0008)
-#define MAC_CSR19_BIT4                 FIELD32(0x0010)
-#define MAC_CSR19_BIT5                 FIELD32(0x0020)
-#define MAC_CSR19_BIT6                 FIELD32(0x0040)
-#define MAC_CSR19_BIT7                 FIELD32(0x0080)
+#define MAC_CSR19_BIT0                 FIELD16(0x0001)
+#define MAC_CSR19_BIT1                 FIELD16(0x0002)
+#define MAC_CSR19_BIT2                 FIELD16(0x0004)
+#define MAC_CSR19_BIT3                 FIELD16(0x0008)
+#define MAC_CSR19_BIT4                 FIELD16(0x0010)
+#define MAC_CSR19_BIT5                 FIELD16(0x0020)
+#define MAC_CSR19_BIT6                 FIELD16(0x0040)
+#define MAC_CSR19_BIT7                 FIELD16(0x0080)
+#define MAC_CSR19_BIT8                 FIELD16(0x0100)
 
 /*
  * MAC_CSR20: LED control register.
index 837b460..518157d 100644 (file)
@@ -935,6 +935,7 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
 static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
 {
        int retval;
+       u32 reg;
 
        /*
         * Allocate eeprom data.
@@ -947,6 +948,14 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
        if (retval)
                return retval;
 
+       /*
+        * Enable rfkill polling by setting GPIO direction of the
+        * rfkill switch GPIO pin correctly.
+        */
+       rt2x00pci_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
+       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
+       rt2x00pci_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+
        /*
         * Initialize hw specifications.
         */
index bdf960b..b66a61b 100644 (file)
@@ -621,8 +621,16 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
        skb_pull(entry->skb, RXINFO_DESC_SIZE);
 
        /*
-        * FIXME: we need to check for rx_pkt_len validity
+        * Check for rx_pkt_len validity. Return if invalid, leaving
+        * rxdesc->size zeroed out by the upper level.
         */
+       if (unlikely(rx_pkt_len == 0 ||
+                       rx_pkt_len > entry->queue->data_size)) {
+               ERROR(entry->queue->rt2x00dev,
+                       "Bad frame size %d, forcing to 0\n", rx_pkt_len);
+               return;
+       }
+
        rxd = (__le32 *)(entry->skb->data + rx_pkt_len);
 
        /*
@@ -690,6 +698,7 @@ static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
 static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
 {
        int retval;
+       u32 reg;
 
        /*
         * Allocate eeprom data.
@@ -702,6 +711,14 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
        if (retval)
                return retval;
 
+       /*
+        * Enable rfkill polling by setting GPIO direction of the
+        * rfkill switch GPIO pin correctly.
+        */
+       rt2x00usb_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
+       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
+       rt2x00usb_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+
        /*
         * Initialize hw specifications.
         */
@@ -925,6 +942,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x0411, 0x015d) },
        { USB_DEVICE(0x0411, 0x016f) },
        { USB_DEVICE(0x0411, 0x01a2) },
+       { USB_DEVICE(0x0411, 0x01ee) },
        /* Corega */
        { USB_DEVICE(0x07aa, 0x002f) },
        { USB_DEVICE(0x07aa, 0x003c) },
@@ -1110,6 +1128,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x1690, 0x0744) },
        { USB_DEVICE(0x1690, 0x0761) },
        { USB_DEVICE(0x1690, 0x0764) },
+       /* ASUS */
+       { USB_DEVICE(0x0b05, 0x179d) },
        /* Cisco */
        { USB_DEVICE(0x167b, 0x4001) },
        /* EnGenius */
@@ -1162,7 +1182,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x0b05, 0x1760) },
        { USB_DEVICE(0x0b05, 0x1761) },
        { USB_DEVICE(0x0b05, 0x1790) },
-       { USB_DEVICE(0x0b05, 0x179d) },
        /* AzureWave */
        { USB_DEVICE(0x13d3, 0x3262) },
        { USB_DEVICE(0x13d3, 0x3284) },
index 21b529b..f099b30 100644 (file)
@@ -624,7 +624,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry)
         */
        if (unlikely(rxdesc.size == 0 ||
                     rxdesc.size > entry->queue->data_size)) {
-               WARNING(rt2x00dev, "Wrong frame size %d max %d.\n",
+               ERROR(rt2x00dev, "Wrong frame size %d max %d.\n",
                        rxdesc.size, entry->queue->data_size);
                dev_kfree_skb(entry->skb);
                goto renew_skb;
index bf55b4a..3e058e5 100644 (file)
@@ -2243,8 +2243,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
 
 static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
 {
-       struct ieee80211_conf conf = { .flags = 0 };
-       struct rt2x00lib_conf libconf = { .conf = &conf };
+       struct rt2x00lib_conf libconf = { .conf = &rt2x00dev->hw->conf };
 
        rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
 }
@@ -2833,6 +2832,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
 {
        int retval;
+       u32 reg;
 
        /*
         * Disable power saving.
@@ -2850,6 +2850,14 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
        if (retval)
                return retval;
 
+       /*
+        * Enable rfkill polling by setting GPIO direction of the
+        * rfkill switch GPIO pin correctly.
+        */
+       rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
+       rt2x00_set_field32(&reg, MAC_CSR13_BIT13, 1);
+       rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg);
+
        /*
         * Initialize hw specifications.
         */
index e3cd6db..8f3da5a 100644 (file)
@@ -372,6 +372,7 @@ struct hw_pairwise_ta_entry {
 #define MAC_CSR13_BIT10                        FIELD32(0x00000400)
 #define MAC_CSR13_BIT11                        FIELD32(0x00000800)
 #define MAC_CSR13_BIT12                        FIELD32(0x00001000)
+#define MAC_CSR13_BIT13                        FIELD32(0x00002000)
 
 /*
  * MAC_CSR14: LED control register.
index cfb19db..2ad468d 100644 (file)
@@ -2177,6 +2177,7 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
 {
        int retval;
+       u32 reg;
 
        /*
         * Allocate eeprom data.
@@ -2189,6 +2190,14 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
        if (retval)
                return retval;
 
+       /*
+        * Enable rfkill polling by setting GPIO direction of the
+        * rfkill switch GPIO pin correctly.
+        */
+       rt2x00usb_register_read(rt2x00dev, MAC_CSR13, &reg);
+       rt2x00_set_field32(&reg, MAC_CSR13_BIT15, 0);
+       rt2x00usb_register_write(rt2x00dev, MAC_CSR13, reg);
+
        /*
         * Initialize hw specifications.
         */
index 9f6b470..df1cc11 100644 (file)
@@ -282,6 +282,9 @@ struct hw_pairwise_ta_entry {
 #define MAC_CSR13_BIT10                        FIELD32(0x00000400)
 #define MAC_CSR13_BIT11                        FIELD32(0x00000800)
 #define MAC_CSR13_BIT12                        FIELD32(0x00001000)
+#define MAC_CSR13_BIT13                        FIELD32(0x00002000)
+#define MAC_CSR13_BIT14                        FIELD32(0x00004000)
+#define MAC_CSR13_BIT15                        FIELD32(0x00008000)
 
 /*
  * MAC_CSR14: LED control register.
index 4a78f9e..4e98c39 100644 (file)
@@ -44,7 +44,7 @@ MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
 MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver");
 MODULE_LICENSE("GPL");
 
-static struct usb_device_id rtl8187_table[] __devinitdata = {
+static struct usb_device_id rtl8187_table[] = {
        /* Asus */
        {USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187},
        /* Belkin */
index db34db6..a49e848 100644 (file)
@@ -120,15 +120,19 @@ static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len)
        u8 request;
        u16 wvalue;
        u16 index;
-       __le32 *data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
+       __le32 *data;
+       unsigned long flags;
 
+       spin_lock_irqsave(&rtlpriv->locks.usb_lock, flags);
+       if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
+               rtlpriv->usb_data_index = 0;
+       data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
+       spin_unlock_irqrestore(&rtlpriv->locks.usb_lock, flags);
        request = REALTEK_USB_VENQT_CMD_REQ;
        index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
 
        wvalue = (u16)addr;
        _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
-       if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
-               rtlpriv->usb_data_index = 0;
        return le32_to_cpu(*data);
 }
 
@@ -909,6 +913,10 @@ int __devinit rtl_usb_probe(struct usb_interface *intf,
                                    GFP_KERNEL);
        if (!rtlpriv->usb_data)
                return -ENOMEM;
+
+       /* this spin lock must be initialized early */
+       spin_lock_init(&rtlpriv->locks.usb_lock);
+
        rtlpriv->usb_data_index = 0;
        SET_IEEE80211_DEV(hw, &intf->dev);
        udev = interface_to_usbdev(intf);
index b1e9deb..deb87e9 100644 (file)
@@ -1550,6 +1550,7 @@ struct rtl_locks {
        spinlock_t rf_lock;
        spinlock_t lps_lock;
        spinlock_t waitq_lock;
+       spinlock_t usb_lock;
 
        /*Dual mac*/
        spinlock_t cck_and_rw_pagea_lock;
index d024f83..68af94c 100644 (file)
@@ -952,6 +952,13 @@ static int pci_pm_poweroff_noirq(struct device *dev)
        if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
                pci_prepare_to_sleep(pci_dev);
 
+       /*
+        * The reason for doing this here is the same as for the analogous code
+        * in pci_pm_suspend_noirq().
+        */
+       if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
+               pci_write_config_word(pci_dev, PCI_COMMAND, 0);
+
        return 0;
 }
 
index b0859d4..ec5b17f 100644 (file)
@@ -86,6 +86,10 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
        { KE_KEY, 0x8A, { KEY_PROG1 } },
        { KE_KEY, 0x95, { KEY_MEDIA } },
        { KE_KEY, 0x99, { KEY_PHONE } },
+       { KE_KEY, 0xA0, { KEY_SWITCHVIDEOMODE } }, /* SDSP HDMI only */
+       { KE_KEY, 0xA1, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + HDMI */
+       { KE_KEY, 0xA2, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + HDMI */
+       { KE_KEY, 0xA3, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + HDMI */
        { KE_KEY, 0xb5, { KEY_CALC } },
        { KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
        { KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
index d1049ee..26fba2d 100644 (file)
@@ -1431,14 +1431,9 @@ static int asus_wmi_platform_init(struct asus_wmi *asus)
         */
        if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, 0, 0, NULL))
                asus->dsts_id = ASUS_WMI_METHODID_DSTS;
-       else if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS2, 0, 0, NULL))
+       else
                asus->dsts_id = ASUS_WMI_METHODID_DSTS2;
 
-       if (!asus->dsts_id) {
-               pr_err("Can't find DSTS");
-               return -ENODEV;
-       }
-
        /* CWAP allow to define the behavior of the Fn+F2 key,
         * this method doesn't seems to be present on Eee PCs */
        if (asus->driver->wapf >= 0)
index 30d2072..33471e1 100644 (file)
@@ -439,6 +439,9 @@ static void tsi721_db_dpc(struct work_struct *work)
                                " info %4.4x\n", DBELL_SID(idb.bytes),
                                DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
                }
+
+               wr_ptr = ioread32(priv->regs +
+                                 TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
        }
 
        iowrite32(rd_ptr & (IDB_QSIZE - 1),
@@ -449,6 +452,10 @@ static void tsi721_db_dpc(struct work_struct *work)
        regval |= TSI721_SR_CHINT_IDBQRCV;
        iowrite32(regval,
                priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
+
+       wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
+       if (wr_ptr != rd_ptr)
+               schedule_work(&priv->idb_work);
 }
 
 /**
@@ -2155,7 +2162,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
                                  const struct pci_device_id *id)
 {
        struct tsi721_device *priv;
-       int i, cap;
+       int cap;
        int err;
        u32 regval;
 
@@ -2175,12 +2182,15 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
        priv->pdev = pdev;
 
 #ifdef DEBUG
+       {
+       int i;
        for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
                dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n",
                        i, (unsigned long long)pci_resource_start(pdev, i),
                        (unsigned long)pci_resource_len(pdev, i),
                        pci_resource_flags(pdev, i));
        }
+       }
 #endif
        /*
         * Verify BAR configuration
index 971bc8e..11bcb20 100644 (file)
@@ -122,9 +122,12 @@ rs5c348_rtc_read_time(struct device *dev, struct rtc_time *tm)
        tm->tm_min = bcd2bin(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK);
        tm->tm_hour = bcd2bin(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK);
        if (!pdata->rtc_24h) {
-               tm->tm_hour %= 12;
-               if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM)
+               if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) {
+                       tm->tm_hour -= 20;
+                       tm->tm_hour %= 12;
                        tm->tm_hour += 12;
+               } else
+                       tm->tm_hour %= 12;
        }
        tm->tm_wday = bcd2bin(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK);
        tm->tm_mday = bcd2bin(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK);
index bdc909b..f3c2110 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/mfd/wm831x/core.h>
 #include <linux/delay.h>
 #include <linux/platform_device.h>
-
+#include <linux/random.h>
 
 /*
  * R16416 (0x4020) - RTC Write Counter
@@ -96,6 +96,26 @@ struct wm831x_rtc {
        unsigned int alarm_enabled:1;
 };
 
+static void wm831x_rtc_add_randomness(struct wm831x *wm831x)
+{
+       int ret;
+       u16 reg;
+
+       /*
+        * The write counter contains a pseudo-random number which is
+        * regenerated every time we set the RTC so it should be a
+        * useful per-system source of entropy.
+        */
+       ret = wm831x_reg_read(wm831x, WM831X_RTC_WRITE_COUNTER);
+       if (ret >= 0) {
+               reg = ret;
+               add_device_randomness(&reg, sizeof(reg));
+       } else {
+               dev_warn(wm831x->dev, "Failed to read RTC write counter: %d\n",
+                        ret);
+       }
+}
+
 /*
  * Read current time and date in RTC
  */
@@ -449,6 +469,8 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
                        alm_irq, ret);
        }
 
+       wm831x_rtc_add_randomness(wm831x);
+
        return 0;
 
 err:
index 29a994f..7c471eb 100644 (file)
@@ -4125,7 +4125,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        spin_lock_init(&instance->cmd_pool_lock);
        spin_lock_init(&instance->hba_lock);
        spin_lock_init(&instance->completion_lock);
-       spin_lock_init(&poll_aen_lock);
 
        mutex_init(&instance->aen_mutex);
        mutex_init(&instance->reset_mutex);
@@ -5520,6 +5519,8 @@ static int __init megasas_init(void)
        printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION,
               MEGASAS_EXT_VERSION);
 
+       spin_lock_init(&poll_aen_lock);
+
        support_poll_for_event = 2;
        support_device_change = 1;
 
index e903077..98cb5e6 100644 (file)
@@ -2353,10 +2353,13 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
        }
 
        /* command line tunables  for max controller queue depth */
-       if (max_queue_depth != -1)
-               max_request_credit = (max_queue_depth < facts->RequestCredit)
-                   ? max_queue_depth : facts->RequestCredit;
-       else
+       if (max_queue_depth != -1 && max_queue_depth != 0) {
+               max_request_credit = min_t(u16, max_queue_depth +
+                       ioc->hi_priority_depth + ioc->internal_depth,
+                       facts->RequestCredit);
+               if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
+                       max_request_credit =  MAX_HBA_QUEUE_DEPTH;
+       } else
                max_request_credit = min_t(u16, facts->RequestCredit,
                    MAX_HBA_QUEUE_DEPTH);
 
@@ -2431,7 +2434,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
        /* set the scsi host can_queue depth
         * with some internal commands that could be outstanding
         */
-       ioc->shost->can_queue = ioc->scsiio_depth - (2);
+       ioc->shost->can_queue = ioc->scsiio_depth;
        dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: "
            "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue));
 
index 456b131..c83571e 100644 (file)
@@ -41,6 +41,8 @@
 
 #include <trace/events/scsi.h>
 
+static void scsi_eh_done(struct scsi_cmnd *scmd);
+
 #define SENSE_TIMEOUT          (10*HZ)
 
 /*
@@ -240,6 +242,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
        if (! scsi_command_normalize_sense(scmd, &sshdr))
                return FAILED;  /* no valid sense data */
 
+       if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
+               /*
+                * nasty: for mid-layer issued TURs, we need to return the
+                * actual sense data without any recovery attempt.  For eh
+                * issued ones, we need to try to recover and interpret
+                */
+               return SUCCESS;
+
        if (scsi_sense_is_deferred(&sshdr))
                return NEEDS_RETRY;
 
index a48b59c..c6c80c9 100644 (file)
@@ -776,6 +776,16 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
        sdev->model = (char *) (sdev->inquiry + 16);
        sdev->rev = (char *) (sdev->inquiry + 32);
 
+       if (strncmp(sdev->vendor, "ATA     ", 8) == 0) {
+               /*
+                * sata emulation layer device.  This is a hack to work around
+                * the SATL power management specifications which state that
+                * when the SATL detects the device has gone into standby
+                * mode, it shall respond with NOT READY.
+                */
+               sdev->allow_restart = 1;
+       }
+
        if (*bflags & BLIST_ISROM) {
                sdev->type = TYPE_ROM;
                sdev->removable = 1;
index 3141dc8..a48fe88 100644 (file)
@@ -385,7 +385,7 @@ static const struct das08_board_struct das08_boards[] = {
         .ai = das08_ai_rinsn,
         .ai_nbits = 16,
         .ai_pg = das08_pg_none,
-        .ai_encoding = das08_encode12,
+        .ai_encoding = das08_encode16,
         .ao = das08jr_ao_winsn,
         .ao_nbits = 16,
         .di = das08jr_di_rbits,
@@ -655,7 +655,7 @@ static int das08jr_ao_winsn(struct comedi_device *dev,
        int chan;
 
        lsb = data[0] & 0xff;
-       msb = (data[0] >> 8) & 0xf;
+       msb = (data[0] >> 8) & 0xff;
 
        chan = CR_CHAN(insn->chanspec);
 
index 6903d39..90e9e32 100644 (file)
@@ -53,6 +53,7 @@
 #include <linux/io.h>
 #include <asm/irq.h>
 #include <linux/fcntl.h>
+#include <linux/platform_device.h>
 #ifdef LIRC_ON_SA1100
 #include <asm/hardware.h>
 #ifdef CONFIG_SA1100_COLLIE
@@ -488,9 +489,11 @@ static struct lirc_driver driver = {
        .owner          = THIS_MODULE,
 };
 
+static struct platform_device *lirc_sir_dev;
 
 static int init_chrdev(void)
 {
+       driver.dev = &lirc_sir_dev->dev;
        driver.minor = lirc_register_driver(&driver);
        if (driver.minor < 0) {
                printk(KERN_ERR LIRC_DRIVER_NAME ": init_chrdev() failed.\n");
@@ -1216,20 +1219,71 @@ static int init_lirc_sir(void)
        return 0;
 }
 
+static int __devinit lirc_sir_probe(struct platform_device *dev)
+{
+       return 0;
+}
+
+static int __devexit lirc_sir_remove(struct platform_device *dev)
+{
+       return 0;
+}
+
+static struct platform_driver lirc_sir_driver = {
+       .probe          = lirc_sir_probe,
+       .remove         = __devexit_p(lirc_sir_remove),
+       .driver         = {
+               .name   = "lirc_sir",
+               .owner  = THIS_MODULE,
+       },
+};
 
 static int __init lirc_sir_init(void)
 {
        int retval;
 
+       retval = platform_driver_register(&lirc_sir_driver);
+       if (retval) {
+               printk(KERN_ERR LIRC_DRIVER_NAME ": Platform driver register "
+                      "failed!\n");
+               return -ENODEV;
+       }
+
+       lirc_sir_dev = platform_device_alloc("lirc_dev", 0);
+       if (!lirc_sir_dev) {
+               printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device alloc "
+                      "failed!\n");
+               retval = -ENOMEM;
+               goto pdev_alloc_fail;
+       }
+
+       retval = platform_device_add(lirc_sir_dev);
+       if (retval) {
+               printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device add "
+                      "failed!\n");
+               retval = -ENODEV;
+               goto pdev_add_fail;
+       }
+
        retval = init_chrdev();
        if (retval < 0)
-               return retval;
+               goto fail;
+
        retval = init_lirc_sir();
        if (retval) {
                drop_chrdev();
-               return retval;
+               goto fail;
        }
+
        return 0;
+
+fail:
+       platform_device_del(lirc_sir_dev);
+pdev_add_fail:
+       platform_device_put(lirc_sir_dev);
+pdev_alloc_fail:
+       platform_driver_unregister(&lirc_sir_driver);
+       return retval;
 }
 
 static void __exit lirc_sir_exit(void)
@@ -1237,6 +1291,8 @@ static void __exit lirc_sir_exit(void)
        drop_hardware();
        drop_chrdev();
        drop_port();
+       platform_device_unregister(lirc_sir_dev);
+       platform_driver_unregister(&lirc_sir_driver);
        printk(KERN_INFO LIRC_DRIVER_NAME ": Uninstalled.\n");
 }
 
index 0e26d5f..495ee12 100644 (file)
@@ -117,13 +117,8 @@ void r8712_recv_indicatepkt(struct _adapter *padapter,
        if (skb == NULL)
                goto _recv_indicatepkt_drop;
        skb->data = precv_frame->u.hdr.rx_data;
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
-       skb->tail = (sk_buff_data_t)(precv_frame->u.hdr.rx_tail -
-                    precv_frame->u.hdr.rx_head);
-#else
-       skb->tail = (sk_buff_data_t)precv_frame->u.hdr.rx_tail;
-#endif
        skb->len = precv_frame->u.hdr.len;
+       skb_set_tail_pointer(skb, skb->len);
        if ((pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1))
                skb->ip_summed = CHECKSUM_UNNECESSARY;
        else
index 8be5604..0d70f68 100644 (file)
@@ -1854,7 +1854,7 @@ static void speakup_bits(struct vc_data *vc)
 
 static int handle_goto(struct vc_data *vc, u_char type, u_char ch, u_short key)
 {
-       static u_char *goto_buf = "\0\0\0\0\0\0";
+       static u_char goto_buf[8];
        static int num;
        int maxlen, go_pos;
        char *cp;
index c0edf97..08021f4 100644 (file)
@@ -200,7 +200,7 @@ s_vProcessRxMACHeader (
     } else if (!compare_ether_addr(pbyRxBuffer, &pDevice->abySNAP_RFC1042[0])) {
         cbHeaderSize += 6;
         pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize);
-       if ((*pwType == cpu_to_le16(ETH_P_IPX)) ||
+       if ((*pwType == cpu_to_be16(ETH_P_IPX)) ||
            (*pwType == cpu_to_le16(0xF380))) {
                cbHeaderSize -= 8;
             pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize);
index 27521b6..ae62d57 100644 (file)
@@ -222,7 +222,7 @@ DEVICE_PARAM(b80211hEnable, "802.11h mode");
 // Static vars definitions
 //
 
-static struct usb_device_id vt6656_table[] __devinitdata = {
+static struct usb_device_id vt6656_table[] = {
        {USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)},
        {}
 };
index 9b64b10..fe21868 100644 (file)
@@ -1701,7 +1701,7 @@ s_bPacketToWirelessUsb(
     // 802.1H
     if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) {
        if (pDevice->dwDiagRefCount == 0) {
-               if ((psEthHeader->wType == cpu_to_le16(ETH_P_IPX)) ||
+               if ((psEthHeader->wType == cpu_to_be16(ETH_P_IPX)) ||
                    (psEthHeader->wType == cpu_to_le16(0xF380))) {
                        memcpy((PBYTE) (pbyPayloadHead),
                               abySNAP_Bridgetunnel, 6);
@@ -2840,10 +2840,10 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
     Packet_Type = skb->data[ETH_HLEN+1];
     Descriptor_type = skb->data[ETH_HLEN+1+1+2];
     Key_info = (skb->data[ETH_HLEN+1+1+2+1] << 8)|(skb->data[ETH_HLEN+1+1+2+2]);
-    if (pDevice->sTxEthHeader.wType == cpu_to_le16(ETH_P_PAE)) {
-       /* 802.1x OR eapol-key challenge frame transfer */
-       if (((Protocol_Version == 1) || (Protocol_Version == 2)) &&
-               (Packet_Type == 3)) {
+       if (pDevice->sTxEthHeader.wType == cpu_to_be16(ETH_P_PAE)) {
+               /* 802.1x OR eapol-key challenge frame transfer */
+               if (((Protocol_Version == 1) || (Protocol_Version == 2)) &&
+                       (Packet_Type == 3)) {
                         bTxeapol_key = TRUE;
                        if(!(Key_info & BIT3) &&  //WPA or RSN group-key challenge
                           (Key_info & BIT8) && (Key_info & BIT9)) {    //send 2/2 key
@@ -2989,19 +2989,19 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
         }
     }
 
-    if (pDevice->sTxEthHeader.wType == cpu_to_le16(ETH_P_PAE)) {
-        if (pDevice->byBBType != BB_TYPE_11A) {
-            pDevice->wCurrentRate = RATE_1M;
-            pDevice->byACKRate = RATE_1M;
-            pDevice->byTopCCKBasicRate = RATE_1M;
-            pDevice->byTopOFDMBasicRate = RATE_6M;
-        } else {
-            pDevice->wCurrentRate = RATE_6M;
-            pDevice->byACKRate = RATE_6M;
-            pDevice->byTopCCKBasicRate = RATE_1M;
-            pDevice->byTopOFDMBasicRate = RATE_6M;
-        }
-    }
+       if (pDevice->sTxEthHeader.wType == cpu_to_be16(ETH_P_PAE)) {
+               if (pDevice->byBBType != BB_TYPE_11A) {
+                       pDevice->wCurrentRate = RATE_1M;
+                       pDevice->byACKRate = RATE_1M;
+                       pDevice->byTopCCKBasicRate = RATE_1M;
+                       pDevice->byTopOFDMBasicRate = RATE_6M;
+               } else {
+                       pDevice->wCurrentRate = RATE_6M;
+                       pDevice->byACKRate = RATE_6M;
+                       pDevice->byTopCCKBasicRate = RATE_1M;
+                       pDevice->byTopOFDMBasicRate = RATE_6M;
+               }
+       }
 
     DBG_PRT(MSG_LEVEL_DEBUG,
            KERN_INFO "dma_tx: pDevice->wCurrentRate = %d\n",
@@ -3017,7 +3017,7 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
 
     if (bNeedEncryption == TRUE) {
         DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ntohs Pkt Type=%04x\n", ntohs(pDevice->sTxEthHeader.wType));
-       if ((pDevice->sTxEthHeader.wType) == cpu_to_le16(ETH_P_PAE)) {
+       if ((pDevice->sTxEthHeader.wType) == cpu_to_be16(ETH_P_PAE)) {
                bNeedEncryption = FALSE;
             DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Pkt Type=%04x\n", (pDevice->sTxEthHeader.wType));
             if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
index f958eb4..3f0ce2b 100644 (file)
@@ -25,7 +25,7 @@ MODULE_DESCRIPTION("IS89C35 802.11bg WLAN USB Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION("0.1");
 
-static const struct usb_device_id wb35_table[] __devinitconst = {
+static const struct usb_device_id wb35_table[] = {
        { USB_DEVICE(0x0416, 0x0035) },
        { USB_DEVICE(0x18E8, 0x6201) },
        { USB_DEVICE(0x18E8, 0x6206) },
index 16ad9fe..4306475 100644 (file)
@@ -1223,13 +1223,12 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
                                        void *pampd, struct tmem_pool *pool,
                                        struct tmem_oid *oid, uint32_t index)
 {
-       int ret = 0;
-
        BUG_ON(!is_ephemeral(pool));
-       zbud_decompress((struct page *)(data), pampd);
+       if (zbud_decompress((struct page *)(data), pampd) < 0)
+               return -EINVAL;
        zbud_free_and_delist((struct zbud_hdr *)pampd);
        atomic_dec(&zcache_curr_eph_pampd_count);
-       return ret;
+       return 0;
 }
 
 /*
index 94c03d2..597fb9b 100644 (file)
@@ -3509,9 +3509,9 @@ transport_generic_get_mem(struct se_cmd *cmd)
        return 0;
 
 out:
-       while (i >= 0) {
-               __free_page(sg_page(&cmd->t_data_sg[i]));
+       while (i > 0) {
                i--;
+               __free_page(sg_page(&cmd->t_data_sg[i]));
        }
        kfree(cmd->t_data_sg);
        cmd->t_data_sg = NULL;
index 163fc90..8e68f79 100644 (file)
 #define  UCR4_OREN      (1<<1)  /* Receiver overrun interrupt enable */
 #define  UCR4_DREN      (1<<0)  /* Recv data ready interrupt enable */
 #define  UFCR_RXTL_SHF   0       /* Receiver trigger level shift */
+#define  UFCR_DCEDTE    (1<<6)  /* DCE/DTE mode select */
 #define  UFCR_RFDIV      (7<<7)  /* Reference freq divider mask */
 #define  UFCR_RFDIV_REG(x)     (((x) < 7 ? 6 - (x) : 6) << 7)
 #define  UFCR_TXTL_SHF   10      /* Transmitter trigger level shift */
@@ -635,22 +636,11 @@ static void imx_break_ctl(struct uart_port *port, int break_state)
 static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
 {
        unsigned int val;
-       unsigned int ufcr_rfdiv;
-
-       /* set receiver / transmitter trigger level.
-        * RFDIV is set such way to satisfy requested uartclk value
-        */
-       val = TXTL << 10 | RXTL;
-       ufcr_rfdiv = (clk_get_rate(sport->clk) + sport->port.uartclk / 2)
-                       / sport->port.uartclk;
-
-       if(!ufcr_rfdiv)
-               ufcr_rfdiv = 1;
-
-       val |= UFCR_RFDIV_REG(ufcr_rfdiv);
 
+       /* set receiver / transmitter trigger level */
+       val = readl(sport->port.membase + UFCR) & (UFCR_RFDIV | UFCR_DCEDTE);
+       val |= TXTL << UFCR_TXTL_SHF | RXTL;
        writel(val, sport->port.membase + UFCR);
-
        return 0;
 }
 
@@ -725,6 +715,7 @@ static int imx_startup(struct uart_port *port)
                }
        }
 
+       spin_lock_irqsave(&sport->port.lock, flags);
        /*
         * Finally, clear and enable interrupts
         */
@@ -778,7 +769,6 @@ static int imx_startup(struct uart_port *port)
        /*
         * Enable modem status interrupts
         */
-       spin_lock_irqsave(&sport->port.lock,flags);
        imx_enable_ms(&sport->port);
        spin_unlock_irqrestore(&sport->port.lock,flags);
 
@@ -808,10 +798,13 @@ static void imx_shutdown(struct uart_port *port)
 {
        struct imx_port *sport = (struct imx_port *)port;
        unsigned long temp;
+       unsigned long flags;
 
+       spin_lock_irqsave(&sport->port.lock, flags);
        temp = readl(sport->port.membase + UCR2);
        temp &= ~(UCR2_TXEN);
        writel(temp, sport->port.membase + UCR2);
+       spin_unlock_irqrestore(&sport->port.lock, flags);
 
        if (USE_IRDA(sport)) {
                struct imxuart_platform_data *pdata;
@@ -840,12 +833,14 @@ static void imx_shutdown(struct uart_port *port)
         * Disable all interrupts, port and break condition.
         */
 
+       spin_lock_irqsave(&sport->port.lock, flags);
        temp = readl(sport->port.membase + UCR1);
        temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
        if (USE_IRDA(sport))
                temp &= ~(UCR1_IREN);
 
        writel(temp, sport->port.membase + UCR1);
+       spin_unlock_irqrestore(&sport->port.lock, flags);
 }
 
 static void
@@ -1119,6 +1114,9 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
 {
        struct imx_port *sport = imx_ports[co->index];
        unsigned int old_ucr1, old_ucr2, ucr1;
+       unsigned long flags;
+
+       spin_lock_irqsave(&sport->port.lock, flags);
 
        /*
         *      First, save UCR1/2 and then disable interrupts
@@ -1145,6 +1143,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
 
        writel(old_ucr1, sport->port.membase + UCR1);
        writel(old_ucr2, sport->port.membase + UCR2);
+
+       spin_unlock_irqrestore(&sport->port.lock, flags);
 }
 
 /*
index a4b192d..08b92a6 100644 (file)
@@ -660,7 +660,8 @@ static void pch_dma_rx_complete(void *arg)
                tty_flip_buffer_push(tty);
        tty_kref_put(tty);
        async_tx_ack(priv->desc_rx);
-       pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT);
+       pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT |
+                                           PCH_UART_HAL_RX_ERR_INT);
 }
 
 static void pch_dma_tx_complete(void *arg)
@@ -715,7 +716,8 @@ static int handle_rx_to(struct eg20t_port *priv)
        int rx_size;
        int ret;
        if (!priv->start_rx) {
-               pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT);
+               pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT |
+                                                    PCH_UART_HAL_RX_ERR_INT);
                return 0;
        }
        buf = &priv->rxbuf;
@@ -977,11 +979,13 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
                case PCH_UART_IID_RDR:  /* Received Data Ready */
                        if (priv->use_dma) {
                                pch_uart_hal_disable_interrupt(priv,
-                                                       PCH_UART_HAL_RX_INT);
+                                               PCH_UART_HAL_RX_INT |
+                                               PCH_UART_HAL_RX_ERR_INT);
                                ret = dma_handle_rx(priv);
                                if (!ret)
                                        pch_uart_hal_enable_interrupt(priv,
-                                                       PCH_UART_HAL_RX_INT);
+                                               PCH_UART_HAL_RX_INT |
+                                               PCH_UART_HAL_RX_ERR_INT);
                        } else {
                                ret = handle_rx(priv);
                        }
@@ -1107,7 +1111,8 @@ static void pch_uart_stop_rx(struct uart_port *port)
        struct eg20t_port *priv;
        priv = container_of(port, struct eg20t_port, port);
        priv->start_rx = 0;
-       pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT);
+       pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT |
+                                            PCH_UART_HAL_RX_ERR_INT);
        priv->int_dis_flag = 1;
 }
 
@@ -1163,6 +1168,7 @@ static int pch_uart_startup(struct uart_port *port)
                break;
        case 16:
                fifo_size = PCH_UART_HAL_FIFO16;
+               break;
        case 1:
        default:
                fifo_size = PCH_UART_HAL_FIFO_DIS;
@@ -1200,7 +1206,8 @@ static int pch_uart_startup(struct uart_port *port)
                pch_request_dma(port);
 
        priv->start_rx = 1;
-       pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT);
+       pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT |
+                                           PCH_UART_HAL_RX_ERR_INT);
        uart_update_timeout(port, CS8, default_baud);
 
        return 0;
@@ -1258,7 +1265,7 @@ static void pch_uart_set_termios(struct uart_port *port,
                stb = PCH_UART_HAL_STB1;
 
        if (termios->c_cflag & PARENB) {
-               if (!(termios->c_cflag & PARODD))
+               if (termios->c_cflag & PARODD)
                        parity = PCH_UART_HAL_PARITY_ODD;
                else
                        parity = PCH_UART_HAL_PARITY_EVEN;
index 5acd24a..086f7fe 100644 (file)
@@ -1407,10 +1407,16 @@ static int pmz_verify_port(struct uart_port *port, struct serial_struct *ser)
 static int pmz_poll_get_char(struct uart_port *port)
 {
        struct uart_pmac_port *uap = (struct uart_pmac_port *)port;
+       int tries = 2;
 
-       while ((read_zsreg(uap, R0) & Rx_CH_AV) == 0)
-               udelay(5);
-       return read_zsdata(uap);
+       while (tries) {
+               if ((read_zsreg(uap, R0) & Rx_CH_AV) != 0)
+                       return read_zsdata(uap);
+               if (tries--)
+                       udelay(5);
+       }
+
+       return NO_POLL_CHAR;
 }
 
 static void pmz_poll_put_char(struct uart_port *port, unsigned char c)
index 1094469..dbf7d20 100644 (file)
@@ -1043,7 +1043,8 @@ skip_normal_probe:
        }
 
 
-       if (data_interface->cur_altsetting->desc.bNumEndpoints < 2)
+       if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 ||
+           control_interface->cur_altsetting->desc.bNumEndpoints == 0)
                return -EINVAL;
 
        epctrl = &control_interface->cur_altsetting->endpoint[0].desc;
index 175b6bb..52340cc 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/kthread.h>
 #include <linux/mutex.h>
 #include <linux/freezer.h>
+#include <linux/random.h>
 
 #include <asm/uaccess.h>
 #include <asm/byteorder.h>
@@ -1897,6 +1898,14 @@ int usb_new_device(struct usb_device *udev)
        /* Tell the world! */
        announce_device(udev);
 
+       if (udev->serial)
+               add_device_randomness(udev->serial, strlen(udev->serial));
+       if (udev->product)
+               add_device_randomness(udev->product, strlen(udev->product));
+       if (udev->manufacturer)
+               add_device_randomness(udev->manufacturer,
+                                     strlen(udev->manufacturer));
+
        device_enable_async_suspend(&udev->dev);
        /* Register the device.  The device driver is responsible
         * for configuring the device and invoking the add-device
index 32d3adc..8b2a9d8 100644 (file)
@@ -96,6 +96,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x04b4, 0x0526), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
 
+       /* Microchip Joss Optical infrared touchboard device */
+       { USB_DEVICE(0x04d8, 0x000c), .driver_info =
+                       USB_QUIRK_CONFIG_INTF_STRINGS },
+
        /* Samsung Android phone modem - ID conflict with SPH-I500 */
        { USB_DEVICE(0x04e8, 0x6601), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
index 1fc8f12..347bb05 100644 (file)
@@ -450,7 +450,7 @@ static int dbgp_ehci_startup(void)
        writel(FLAG_CF, &ehci_regs->configured_flag);
 
        /* Wait until the controller is no longer halted */
-       loop = 10;
+       loop = 1000;
        do {
                status = readl(&ehci_regs->status);
                if (!(status & STS_HALT))
index 4e1f0aa..9a2a1ae 100644 (file)
@@ -669,6 +669,8 @@ static int eth_stop(struct net_device *net)
        spin_lock_irqsave(&dev->lock, flags);
        if (dev->port_usb) {
                struct gether   *link = dev->port_usb;
+               const struct usb_endpoint_descriptor *in;
+               const struct usb_endpoint_descriptor *out;
 
                if (link->close)
                        link->close(link);
@@ -682,10 +684,14 @@ static int eth_stop(struct net_device *net)
                 * their own pace; the network stack can handle old packets.
                 * For the moment we leave this here, since it works.
                 */
+               in = link->in_ep->desc;
+               out = link->out_ep->desc;
                usb_ep_disable(link->in_ep);
                usb_ep_disable(link->out_ep);
                if (netif_carrier_ok(net)) {
                        DBG(dev, "host still using in/out endpoints\n");
+                       link->in_ep->desc = in;
+                       link->out_ep->desc = out;
                        usb_ep_enable(link->in_ep);
                        usb_ep_enable(link->out_ep);
                }
index fef1db3..2023733 100644 (file)
@@ -128,9 +128,17 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
        else {
                qtd = list_entry (qh->qtd_list.next,
                                struct ehci_qtd, qtd_list);
-               /* first qtd may already be partially processed */
-               if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current)
+               /*
+                * first qtd may already be partially processed.
+                * If we come here during unlink, the QH overlay region
+                * might have reference to the just unlinked qtd. The
+                * qtd is updated in qh_completions(). Update the QH
+                * overlay here.
+                */
+               if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) {
+                       qh->hw->hw_qtd_next = qtd->hw_next;
                        qtd = NULL;
+               }
        }
 
        if (qtd)
index 833b3c6..d0ec2f0 100644 (file)
@@ -75,7 +75,9 @@
 #define        NB_PIF0_PWRDOWN_1       0x01100013
 
 #define USB_INTEL_XUSB2PR      0xD0
+#define USB_INTEL_USB2PRM      0xD4
 #define USB_INTEL_USB3_PSSEN   0xD8
+#define USB_INTEL_USB3PRM      0xDC
 
 static struct amd_chipset_info {
        struct pci_dev  *nb_dev;
@@ -772,10 +774,18 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
                return;
        }
 
-       ports_available = 0xffffffff;
+       /* Read USB3PRM, the USB 3.0 Port Routing Mask Register
+        * Indicate the ports that can be changed from OS.
+        */
+       pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM,
+                       &ports_available);
+
+       dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n",
+                       ports_available);
+
        /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
-        * Register, to turn on SuperSpeed terminations for all
-        * available ports.
+        * Register, to turn on SuperSpeed terminations for the
+        * switchable ports.
         */
        pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
                        cpu_to_le32(ports_available));
@@ -785,7 +795,16 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
        dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled "
                        "under xHCI: 0x%x\n", ports_available);
 
-       ports_available = 0xffffffff;
+       /* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register
+        * Indicate the USB 2.0 ports to be controlled by the xHCI host.
+        */
+
+       pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM,
+                       &ports_available);
+
+       dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n",
+                       ports_available);
+
        /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
         * switch the USB 2.0 power and data lines over to the xHCI
         * host.
@@ -800,6 +819,13 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
 }
 EXPORT_SYMBOL_GPL(usb_enable_xhci_ports);
 
+void usb_disable_xhci_ports(struct pci_dev *xhci_pdev)
+{
+       pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0);
+       pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0);
+}
+EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
+
 /**
  * PCI Quirks for xHCI.
  *
@@ -815,12 +841,12 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
        void __iomem *op_reg_base;
        u32 val;
        int timeout;
+       int len = pci_resource_len(pdev, 0);
 
        if (!mmio_resource_enabled(pdev, 0))
                return;
 
-       base = ioremap_nocache(pci_resource_start(pdev, 0),
-                               pci_resource_len(pdev, 0));
+       base = ioremap_nocache(pci_resource_start(pdev, 0), len);
        if (base == NULL)
                return;
 
@@ -830,9 +856,17 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
         */
        ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
        do {
+               if ((ext_cap_offset + sizeof(val)) > len) {
+                       /* We're reading garbage from the controller */
+                       dev_warn(&pdev->dev,
+                                "xHCI controller failing to respond");
+                       return;
+               }
+
                if (!ext_cap_offset)
                        /* We've reached the end of the extended capabilities */
                        goto hc_init;
+
                val = readl(base + ext_cap_offset);
                if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
                        break;
@@ -863,9 +897,10 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
        /* Disable any BIOS SMIs and clear all SMI events*/
        writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
 
+hc_init:
        if (usb_is_intel_switchable_xhci(pdev))
                usb_enable_xhci_ports(pdev);
-hc_init:
+
        op_reg_base = base + XHCI_HC_LENGTH(readl(base));
 
        /* Wait for the host controller to be ready before writing any
index b1002a8..7f69a39 100644 (file)
@@ -10,10 +10,12 @@ void usb_amd_quirk_pll_disable(void);
 void usb_amd_quirk_pll_enable(void);
 bool usb_is_intel_switchable_xhci(struct pci_dev *pdev);
 void usb_enable_xhci_ports(struct pci_dev *xhci_pdev);
+void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
 #else
 static inline void usb_amd_quirk_pll_disable(void) {}
 static inline void usb_amd_quirk_pll_enable(void) {}
 static inline void usb_amd_dev_put(void) {}
+static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
 #endif  /* CONFIG_PCI */
 
 #endif  /*  __LINUX_USB_PCI_QUIRKS_H  */
index fd8a2c2..978860b 100644 (file)
@@ -469,11 +469,48 @@ static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
                 * when this bit is set.
                 */
                pls |= USB_PORT_STAT_CONNECTION;
+       } else {
+               /*
+                * If CAS bit isn't set but the Port is already at
+                * Compliance Mode, fake a connection so the USB core
+                * notices the Compliance state and resets the port.
+                * This resolves an issue generated by the SN65LVPE502CP
+                * in which sometimes the port enters compliance mode
+                * caused by a delay on the host-device negotiation.
+                */
+               if (pls == USB_SS_PORT_LS_COMP_MOD)
+                       pls |= USB_PORT_STAT_CONNECTION;
        }
+
        /* update status field */
        *status |= pls;
 }
 
+/*
+ * Function for Compliance Mode Quirk.
+ *
+ * This Function verifies if all xhc USB3 ports have entered U0, if so,
+ * the compliance mode timer is deleted. A port won't enter
+ * compliance mode if it has previously entered U0.
+ */
+void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex)
+{
+       u32 all_ports_seen_u0 = ((1 << xhci->num_usb3_ports)-1);
+       bool port_in_u0 = ((status & PORT_PLS_MASK) == XDEV_U0);
+
+       if (!(xhci->quirks & XHCI_COMP_MODE_QUIRK))
+               return;
+
+       if ((xhci->port_status_u0 != all_ports_seen_u0) && port_in_u0) {
+               xhci->port_status_u0 |= 1 << wIndex;
+               if (xhci->port_status_u0 == all_ports_seen_u0) {
+                       del_timer_sync(&xhci->comp_mode_recovery_timer);
+                       xhci_dbg(xhci, "All USB3 ports have entered U0 already!\n");
+                       xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted.\n");
+               }
+       }
+}
+
 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                u16 wIndex, char *buf, u16 wLength)
 {
@@ -618,6 +655,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                /* Update Port Link State for super speed ports*/
                if (hcd->speed == HCD_USB3) {
                        xhci_hub_report_link_state(&status, temp);
+                       /*
+                        * Verify if all USB3 Ports Have entered U0 already.
+                        * Delete Compliance Mode Timer if so.
+                        */
+                       xhci_del_comp_mod_timer(xhci, temp, wIndex);
                }
                if (bus_state->port_c_suspend & (1 << wIndex))
                        status |= 1 << USB_PORT_FEAT_C_SUSPEND;
index daf5754..bddcbfc 100644 (file)
@@ -90,11 +90,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
                xhci->limit_active_eps = 64;
                xhci->quirks |= XHCI_SW_BW_CHECKING;
+               /*
+                * PPT desktop boards DH77EB and DH77DF will power back on after
+                * a few seconds of being shutdown.  The fix for this is to
+                * switch the ports from xHCI to EHCI on shutdown.  We can't use
+                * DMI information to find those particular boards (since each
+                * vendor will change the board name), so we have to key off all
+                * PPT chipsets.
+                */
+               xhci->quirks |= XHCI_SPURIOUS_REBOOT;
        }
        if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
                        pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
                xhci->quirks |= XHCI_RESET_ON_RESUME;
                xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
+               xhci->quirks |= XHCI_TRUST_TX_LENGTH;
        }
        if (pdev->vendor == PCI_VENDOR_ID_VIA)
                xhci->quirks |= XHCI_RESET_ON_RESUME;
index fb0981e..c7c530c 100644 (file)
@@ -145,25 +145,34 @@ static void next_trb(struct xhci_hcd *xhci,
  */
 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
 {
-       union xhci_trb *next = ++(ring->dequeue);
        unsigned long long addr;
 
        ring->deq_updates++;
-       /* Update the dequeue pointer further if that was a link TRB or we're at
-        * the end of an event ring segment (which doesn't have link TRBS)
-        */
-       while (last_trb(xhci, ring, ring->deq_seg, next)) {
-               if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
-                       ring->cycle_state = (ring->cycle_state ? 0 : 1);
-                       if (!in_interrupt())
-                               xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
-                                               ring,
-                                               (unsigned int) ring->cycle_state);
+
+       do {
+               /*
+                * Update the dequeue pointer further if that was a link TRB or
+                * we're at the end of an event ring segment (which doesn't have
+                * link TRBS)
+                */
+               if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
+                       if (consumer && last_trb_on_last_seg(xhci, ring,
+                                               ring->deq_seg, ring->dequeue)) {
+                               if (!in_interrupt())
+                                       xhci_dbg(xhci, "Toggle cycle state "
+                                                       "for ring %p = %i\n",
+                                                       ring,
+                                                       (unsigned int)
+                                                       ring->cycle_state);
+                               ring->cycle_state = (ring->cycle_state ? 0 : 1);
+                       }
+                       ring->deq_seg = ring->deq_seg->next;
+                       ring->dequeue = ring->deq_seg->trbs;
+               } else {
+                       ring->dequeue++;
                }
-               ring->deq_seg = ring->deq_seg->next;
-               ring->dequeue = ring->deq_seg->trbs;
-               next = ring->dequeue;
-       }
+       } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
+
        addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
 }
 
index 05f82e9..09872ee 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/slab.h>
+#include <linux/dmi.h>
 
 #include "xhci.h"
 
@@ -163,7 +164,7 @@ int xhci_reset(struct xhci_hcd *xhci)
        xhci_writel(xhci, command, &xhci->op_regs->command);
 
        ret = handshake(xhci, &xhci->op_regs->command,
-                       CMD_RESET, 0, 250 * 1000);
+                       CMD_RESET, 0, 10 * 1000 * 1000);
        if (ret)
                return ret;
 
@@ -172,7 +173,8 @@ int xhci_reset(struct xhci_hcd *xhci)
         * xHCI cannot write to any doorbells or operational registers other
         * than status until the "Controller Not Ready" flag is cleared.
         */
-       return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
+       return handshake(xhci, &xhci->op_regs->status,
+                        STS_CNR, 0, 10 * 1000 * 1000);
 }
 
 #ifdef CONFIG_PCI
@@ -386,6 +388,95 @@ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
 
 #endif
 
+static void compliance_mode_recovery(unsigned long arg)
+{
+       struct xhci_hcd *xhci;
+       struct usb_hcd *hcd;
+       u32 temp;
+       int i;
+
+       xhci = (struct xhci_hcd *)arg;
+
+       for (i = 0; i < xhci->num_usb3_ports; i++) {
+               temp = xhci_readl(xhci, xhci->usb3_ports[i]);
+               if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
+                       /*
+                        * Compliance Mode Detected. Letting USB Core
+                        * handle the Warm Reset
+                        */
+                       xhci_dbg(xhci, "Compliance Mode Detected->Port %d!\n",
+                                       i + 1);
+                       xhci_dbg(xhci, "Attempting Recovery routine!\n");
+                       hcd = xhci->shared_hcd;
+
+                       if (hcd->state == HC_STATE_SUSPENDED)
+                               usb_hcd_resume_root_hub(hcd);
+
+                       usb_hcd_poll_rh_status(hcd);
+               }
+       }
+
+       if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
+               mod_timer(&xhci->comp_mode_recovery_timer,
+                       jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
+}
+
+/*
+ * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
+ * that causes ports behind that hardware to enter compliance mode sometimes.
+ * The quirk creates a timer that polls every 2 seconds the link state of
+ * each host controller's port and recovers it by issuing a Warm reset
+ * if Compliance mode is detected, otherwise the port will become "dead" (no
+ * device connections or disconnections will be detected anymore). Becasue no
+ * status event is generated when entering compliance mode (per xhci spec),
+ * this quirk is needed on systems that have the failing hardware installed.
+ */
+static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
+{
+       xhci->port_status_u0 = 0;
+       init_timer(&xhci->comp_mode_recovery_timer);
+
+       xhci->comp_mode_recovery_timer.data = (unsigned long) xhci;
+       xhci->comp_mode_recovery_timer.function = compliance_mode_recovery;
+       xhci->comp_mode_recovery_timer.expires = jiffies +
+                       msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
+
+       set_timer_slack(&xhci->comp_mode_recovery_timer,
+                       msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
+       add_timer(&xhci->comp_mode_recovery_timer);
+       xhci_dbg(xhci, "Compliance Mode Recovery Timer Initialized.\n");
+}
+
+/*
+ * This function identifies the systems that have installed the SN65LVPE502CP
+ * USB3.0 re-driver and that need the Compliance Mode Quirk.
+ * Systems:
+ * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
+ */
+static bool compliance_mode_recovery_timer_quirk_check(void)
+{
+       const char *dmi_product_name, *dmi_sys_vendor;
+
+       dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
+       dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
+
+       if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
+               return false;
+
+       if (strstr(dmi_product_name, "Z420") ||
+                       strstr(dmi_product_name, "Z620") ||
+                       strstr(dmi_product_name, "Z820"))
+               return true;
+
+       return false;
+}
+
+static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
+{
+       return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
+}
+
+
 /*
  * Initialize memory for HCD and xHC (one-time init).
  *
@@ -409,6 +500,12 @@ int xhci_init(struct usb_hcd *hcd)
        retval = xhci_mem_init(xhci, GFP_KERNEL);
        xhci_dbg(xhci, "Finished xhci_init\n");
 
+       /* Initializing Compliance Mode Recovery Data If Needed */
+       if (compliance_mode_recovery_timer_quirk_check()) {
+               xhci->quirks |= XHCI_COMP_MODE_QUIRK;
+               compliance_mode_recovery_timer_init(xhci);
+       }
+
        return retval;
 }
 
@@ -617,6 +714,11 @@ void xhci_stop(struct usb_hcd *hcd)
        del_timer_sync(&xhci->event_ring_timer);
 #endif
 
+       /* Deleting Compliance Mode Recovery Timer */
+       if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
+                       (!(xhci_all_ports_seen_u0(xhci))))
+               del_timer_sync(&xhci->comp_mode_recovery_timer);
+
        if (xhci->quirks & XHCI_AMD_PLL_FIX)
                usb_amd_dev_put();
 
@@ -647,6 +749,9 @@ void xhci_shutdown(struct usb_hcd *hcd)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 
+       if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
+               usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
+
        spin_lock_irq(&xhci->lock);
        xhci_halt(xhci);
        spin_unlock_irq(&xhci->lock);
@@ -790,6 +895,16 @@ int xhci_suspend(struct xhci_hcd *xhci)
        }
        spin_unlock_irq(&xhci->lock);
 
+       /*
+        * Deleting Compliance Mode Recovery Timer because the xHCI Host
+        * is about to be suspended.
+        */
+       if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
+                       (!(xhci_all_ports_seen_u0(xhci)))) {
+               del_timer_sync(&xhci->comp_mode_recovery_timer);
+               xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted!\n");
+       }
+
        /* step 5: remove core well power */
        /* synchronize irq when using MSI-X */
        xhci_msix_sync_irqs(xhci);
@@ -922,6 +1037,16 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                usb_hcd_resume_root_hub(hcd);
                usb_hcd_resume_root_hub(xhci->shared_hcd);
        }
+
+       /*
+        * If system is subject to the Quirk, Compliance Mode Timer needs to
+        * be re-initialized Always after a system resume. Ports are subject
+        * to suffer the Compliance Mode issue again. It doesn't matter if
+        * ports have entered previously to U0 before system's suspension.
+        */
+       if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
+               compliance_mode_recovery_timer_init(xhci);
+
        return retval;
 }
 #endif /* CONFIG_PM */
index 7a56805..44d518a 100644 (file)
@@ -1471,6 +1471,8 @@ struct xhci_hcd {
 #define        XHCI_SW_BW_CHECKING     (1 << 8)
 #define XHCI_AMD_0x96_HOST     (1 << 9)
 #define XHCI_TRUST_TX_LENGTH   (1 << 10)
+#define XHCI_SPURIOUS_REBOOT   (1 << 13)
+#define XHCI_COMP_MODE_QUIRK   (1 << 14)
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
        /* There are two roothubs to keep track of bus suspend info for */
@@ -1487,6 +1489,11 @@ struct xhci_hcd {
        unsigned                sw_lpm_support:1;
        /* support xHCI 1.0 spec USB2 hardware LPM */
        unsigned                hw_lpm_support:1;
+       /* Compliance Mode Recovery Data */
+       struct timer_list       comp_mode_recovery_timer;
+       u32                     port_status_u0;
+/* Compliance Mode Timer Triggered every 2 seconds */
+#define COMP_MODE_RCVRY_MSECS 2000
 };
 
 /* convert between an HCD pointer and the corresponding EHCI_HCD */
index fc15ad4..723e833 100644 (file)
@@ -259,7 +259,7 @@ wraperr:
        return err;
 }
 
-static const struct usb_device_id id_table[] __devinitconst = {
+static const struct usb_device_id id_table[] = {
        { USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) },
        { }                                             /* Terminating entry */
 };
index 4045e39..7324bea 100644 (file)
@@ -704,6 +704,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_NZR_SEM_USB_PID) },
        { USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) },
        { USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) },
        { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) },
@@ -804,13 +805,33 @@ static struct usb_device_id id_table_combined [] = {
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
-       { USB_DEVICE(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID,
+                                       USB_CLASS_VENDOR_SPEC,
+                                       USB_SUBCLASS_VENDOR_SPEC, 0x00) },
        { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
        { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
        { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },
+       { USB_DEVICE(FTDI_VID, PI_C865_PID) },
+       { USB_DEVICE(FTDI_VID, PI_C857_PID) },
+       { USB_DEVICE(PI_VID, PI_C866_PID) },
+       { USB_DEVICE(PI_VID, PI_C663_PID) },
+       { USB_DEVICE(PI_VID, PI_C725_PID) },
+       { USB_DEVICE(PI_VID, PI_E517_PID) },
+       { USB_DEVICE(PI_VID, PI_C863_PID) },
        { USB_DEVICE(PI_VID, PI_E861_PID) },
+       { USB_DEVICE(PI_VID, PI_C867_PID) },
+       { USB_DEVICE(PI_VID, PI_E609_PID) },
+       { USB_DEVICE(PI_VID, PI_E709_PID) },
+       { USB_DEVICE(PI_VID, PI_100F_PID) },
+       { USB_DEVICE(PI_VID, PI_1011_PID) },
+       { USB_DEVICE(PI_VID, PI_1012_PID) },
+       { USB_DEVICE(PI_VID, PI_1013_PID) },
+       { USB_DEVICE(PI_VID, PI_1014_PID) },
+       { USB_DEVICE(PI_VID, PI_1015_PID) },
+       { USB_DEVICE(PI_VID, PI_1016_PID) },
+       { USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) },
        { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
        { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
index d27d7d7..06f6fd2 100644 (file)
@@ -75,6 +75,9 @@
 #define FTDI_OPENDCC_GATEWAY_PID       0xBFDB
 #define FTDI_OPENDCC_GBM_PID   0xBFDC
 
+/* NZR SEM 16+ USB (http://www.nzr.de) */
+#define FTDI_NZR_SEM_USB_PID   0xC1E0  /* NZR SEM-LOG16+ */
+
 /*
  * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com)
  */
 /*
  * Microchip Technology, Inc.
  *
- * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are also used by:
+ * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are
+ * used by single function CDC ACM class based firmware demo
+ * applications.  The VID/PID has also been used in firmware
+ * emulating FTDI serial chips by:
  * Hornby Elite - Digital Command Control Console
  * http://www.hornby.com/hornby-dcc/controllers/
  */
  * Physik Instrumente
  * http://www.physikinstrumente.com/en/products/
  */
+/* These two devices use the VID of FTDI */
+#define PI_C865_PID    0xe0a0  /* PI C-865 Piezomotor Controller */
+#define PI_C857_PID    0xe0a1  /* PI Encoder Trigger Box */
+
 #define PI_VID              0x1a72  /* Vendor ID */
-#define PI_E861_PID         0x1008  /* E-861 piezo controller USB connection */
+#define PI_C866_PID    0x1000  /* PI C-866 Piezomotor Controller */
+#define PI_C663_PID    0x1001  /* PI C-663 Mercury-Step */
+#define PI_C725_PID    0x1002  /* PI C-725 Piezomotor Controller */
+#define PI_E517_PID    0x1005  /* PI E-517 Digital Piezo Controller Operation Module */
+#define PI_C863_PID    0x1007  /* PI C-863 */
+#define PI_E861_PID    0x1008  /* PI E-861 Piezomotor Controller */
+#define PI_C867_PID    0x1009  /* PI C-867 Piezomotor Controller */
+#define PI_E609_PID    0x100D  /* PI E-609 Digital Piezo Controller */
+#define PI_E709_PID    0x100E  /* PI E-709 Digital Piezo Controller */
+#define PI_100F_PID    0x100F  /* PI Digital Piezo Controller */
+#define PI_1011_PID    0x1011  /* PI Digital Piezo Controller */
+#define PI_1012_PID    0x1012  /* PI Motion Controller */
+#define PI_1013_PID    0x1013  /* PI Motion Controller */
+#define PI_1014_PID    0x1014  /* PI Device */
+#define PI_1015_PID    0x1015  /* PI Device */
+#define PI_1016_PID    0x1016  /* PI Digital Servo Module */
+
+/*
+ * Kondo Kagaku Co.Ltd.
+ * http://www.kondo-robot.com/EN
+ */
+#define KONDO_VID              0x165c
+#define KONDO_USB_SERIAL_PID   0x0002
 
 /*
  * Bayer Ascensia Contour blood glucose meter USB-converter cable.
index 5c7d654..b150ed9 100644 (file)
@@ -1191,9 +1191,12 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty)
        }
 
        spin_lock_irqsave(&mos7840_port->pool_lock, flags);
-       for (i = 0; i < NUM_URBS; ++i)
-               if (mos7840_port->busy[i])
-                       chars += URB_TRANSFER_BUFFER_SIZE;
+       for (i = 0; i < NUM_URBS; ++i) {
+               if (mos7840_port->busy[i]) {
+                       struct urb *urb = mos7840_port->write_urb_pool[i];
+                       chars += urb->transfer_buffer_length;
+               }
+       }
        spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
        dbg("%s - returns %d", __func__, chars);
        return chars;
index d89aac1..c068b4d 100644 (file)
@@ -80,84 +80,9 @@ static void option_instat_callback(struct urb *urb);
 #define OPTION_PRODUCT_GTM380_MODEM            0x7201
 
 #define HUAWEI_VENDOR_ID                       0x12D1
-#define HUAWEI_PRODUCT_E600                    0x1001
-#define HUAWEI_PRODUCT_E220                    0x1003
-#define HUAWEI_PRODUCT_E220BIS                 0x1004
-#define HUAWEI_PRODUCT_E1401                   0x1401
-#define HUAWEI_PRODUCT_E1402                   0x1402
-#define HUAWEI_PRODUCT_E1403                   0x1403
-#define HUAWEI_PRODUCT_E1404                   0x1404
-#define HUAWEI_PRODUCT_E1405                   0x1405
-#define HUAWEI_PRODUCT_E1406                   0x1406
-#define HUAWEI_PRODUCT_E1407                   0x1407
-#define HUAWEI_PRODUCT_E1408                   0x1408
-#define HUAWEI_PRODUCT_E1409                   0x1409
-#define HUAWEI_PRODUCT_E140A                   0x140A
-#define HUAWEI_PRODUCT_E140B                   0x140B
-#define HUAWEI_PRODUCT_E140C                   0x140C
-#define HUAWEI_PRODUCT_E140D                   0x140D
-#define HUAWEI_PRODUCT_E140E                   0x140E
-#define HUAWEI_PRODUCT_E140F                   0x140F
-#define HUAWEI_PRODUCT_E1410                   0x1410
-#define HUAWEI_PRODUCT_E1411                   0x1411
-#define HUAWEI_PRODUCT_E1412                   0x1412
-#define HUAWEI_PRODUCT_E1413                   0x1413
-#define HUAWEI_PRODUCT_E1414                   0x1414
-#define HUAWEI_PRODUCT_E1415                   0x1415
-#define HUAWEI_PRODUCT_E1416                   0x1416
-#define HUAWEI_PRODUCT_E1417                   0x1417
-#define HUAWEI_PRODUCT_E1418                   0x1418
-#define HUAWEI_PRODUCT_E1419                   0x1419
-#define HUAWEI_PRODUCT_E141A                   0x141A
-#define HUAWEI_PRODUCT_E141B                   0x141B
-#define HUAWEI_PRODUCT_E141C                   0x141C
-#define HUAWEI_PRODUCT_E141D                   0x141D
-#define HUAWEI_PRODUCT_E141E                   0x141E
-#define HUAWEI_PRODUCT_E141F                   0x141F
-#define HUAWEI_PRODUCT_E1420                   0x1420
-#define HUAWEI_PRODUCT_E1421                   0x1421
-#define HUAWEI_PRODUCT_E1422                   0x1422
-#define HUAWEI_PRODUCT_E1423                   0x1423
-#define HUAWEI_PRODUCT_E1424                   0x1424
-#define HUAWEI_PRODUCT_E1425                   0x1425
-#define HUAWEI_PRODUCT_E1426                   0x1426
-#define HUAWEI_PRODUCT_E1427                   0x1427
-#define HUAWEI_PRODUCT_E1428                   0x1428
-#define HUAWEI_PRODUCT_E1429                   0x1429
-#define HUAWEI_PRODUCT_E142A                   0x142A
-#define HUAWEI_PRODUCT_E142B                   0x142B
-#define HUAWEI_PRODUCT_E142C                   0x142C
-#define HUAWEI_PRODUCT_E142D                   0x142D
-#define HUAWEI_PRODUCT_E142E                   0x142E
-#define HUAWEI_PRODUCT_E142F                   0x142F
-#define HUAWEI_PRODUCT_E1430                   0x1430
-#define HUAWEI_PRODUCT_E1431                   0x1431
-#define HUAWEI_PRODUCT_E1432                   0x1432
-#define HUAWEI_PRODUCT_E1433                   0x1433
-#define HUAWEI_PRODUCT_E1434                   0x1434
-#define HUAWEI_PRODUCT_E1435                   0x1435
-#define HUAWEI_PRODUCT_E1436                   0x1436
-#define HUAWEI_PRODUCT_E1437                   0x1437
-#define HUAWEI_PRODUCT_E1438                   0x1438
-#define HUAWEI_PRODUCT_E1439                   0x1439
-#define HUAWEI_PRODUCT_E143A                   0x143A
-#define HUAWEI_PRODUCT_E143B                   0x143B
-#define HUAWEI_PRODUCT_E143C                   0x143C
-#define HUAWEI_PRODUCT_E143D                   0x143D
-#define HUAWEI_PRODUCT_E143E                   0x143E
-#define HUAWEI_PRODUCT_E143F                   0x143F
 #define HUAWEI_PRODUCT_K4505                   0x1464
 #define HUAWEI_PRODUCT_K3765                   0x1465
-#define HUAWEI_PRODUCT_E14AC                   0x14AC
-#define HUAWEI_PRODUCT_K3806                   0x14AE
 #define HUAWEI_PRODUCT_K4605                   0x14C6
-#define HUAWEI_PRODUCT_K3770                   0x14C9
-#define HUAWEI_PRODUCT_K3771                   0x14CA
-#define HUAWEI_PRODUCT_K4510                   0x14CB
-#define HUAWEI_PRODUCT_K4511                   0x14CC
-#define HUAWEI_PRODUCT_ETS1220                 0x1803
-#define HUAWEI_PRODUCT_E353                    0x1506
-#define HUAWEI_PRODUCT_E173S                   0x1C05
 
 #define QUANTA_VENDOR_ID                       0x0408
 #define QUANTA_PRODUCT_Q101                    0xEA02
@@ -614,101 +539,123 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
        { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
        { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1402, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1404, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1407, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140A, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140B, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140C, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140D, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140E, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140F, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1413, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1414, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1415, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1416, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141A, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141B, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141C, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141D, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141E, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141F, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1420, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1421, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1422, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1423, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1424, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1425, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1426, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1427, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1428, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1429, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142A, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142B, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142C, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142D, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142E, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142F, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1430, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1431, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1432, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1433, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1434, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1435, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1436, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1437, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1438, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1439, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143A, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143B, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143C, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
                .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
                .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
                .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x31) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x32) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) },  /* E398 3G Modem */
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) },  /* E398 3G PC UI Interface */
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) },  /* E398 3G Application Interface */
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x03) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x04) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x05) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x06) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x10) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x12) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x13) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x14) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x15) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x17) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x18) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x19) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x31) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x32) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x33) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x34) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x35) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x36) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x48) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x49) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x61) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x62) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x63) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x64) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x65) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x66) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x02) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x03) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x04) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x05) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x06) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x10) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x12) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x13) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x14) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x15) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x17) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x18) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x19) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x31) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x32) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x33) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x34) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x35) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x36) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x48) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x49) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x61) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x62) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x63) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x64) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x65) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x66) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
+
+
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
@@ -1143,6 +1090,10 @@ static const struct usb_device_id option_ids[] = {
         .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
         .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
+       { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
+       { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
+
        { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
        { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
        { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
index 8745637..bf9a9b7 100644 (file)
@@ -373,8 +373,15 @@ static void fb_flashcursor(struct work_struct *work)
        struct vc_data *vc = NULL;
        int c;
        int mode;
+       int ret;
+
+       /* FIXME: we should sort out the unbind locking instead */
+       /* instead we just fail to flash the cursor if we can't get
+        * the lock instead of blocking fbcon deinit */
+       ret = console_trylock();
+       if (ret == 0)
+               return;
 
-       console_lock();
        if (ops && ops->currcon != -1)
                vc = vc_cons[ops->currcon].d;
 
index aaccffa..dd9533a 100644 (file)
@@ -904,7 +904,7 @@ static ssize_t ufx_ops_write(struct fb_info *info, const char __user *buf,
        result = fb_sys_write(info, buf, count, ppos);
 
        if (result > 0) {
-               int start = max((int)(offset / info->fix.line_length) - 1, 0);
+               int start = max((int)(offset / info->fix.line_length), 0);
                int lines = min((u32)((result / info->fix.line_length) + 1),
                                (u32)info->var.yres);
 
index 284798a..89588e7 100644 (file)
@@ -231,7 +231,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                return ret;
 
        if (hwdev && hwdev->coherent_dma_mask)
-               dma_mask = hwdev->coherent_dma_mask;
+               dma_mask = dma_alloc_coherent_mask(hwdev, flags);
 
        phys = virt_to_phys(ret);
        dev_addr = xen_phys_to_bus(phys);
index 4115eca..19a4f0b 100644 (file)
@@ -964,7 +964,7 @@ link_dev_buffers(struct page *page, struct buffer_head *head)
 /*
  * Initialise the state of a blockdev page's buffers.
  */ 
-static void
+static sector_t
 init_page_buffers(struct page *page, struct block_device *bdev,
                        sector_t block, int size)
 {
@@ -986,33 +986,41 @@ init_page_buffers(struct page *page, struct block_device *bdev,
                block++;
                bh = bh->b_this_page;
        } while (bh != head);
+
+       /*
+        * Caller needs to validate requested block against end of device.
+        */
+       return end_block;
 }
 
 /*
  * Create the page-cache page that contains the requested block.
  *
- * This is user purely for blockdev mappings.
+ * This is used purely for blockdev mappings.
  */
-static struct page *
+static int
 grow_dev_page(struct block_device *bdev, sector_t block,
-               pgoff_t index, int size)
+               pgoff_t index, int size, int sizebits)
 {
        struct inode *inode = bdev->bd_inode;
        struct page *page;
        struct buffer_head *bh;
+       sector_t end_block;
+       int ret = 0;            /* Will call free_more_memory() */
 
        page = find_or_create_page(inode->i_mapping, index,
                (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
        if (!page)
-               return NULL;
+               return ret;
 
        BUG_ON(!PageLocked(page));
 
        if (page_has_buffers(page)) {
                bh = page_buffers(page);
                if (bh->b_size == size) {
-                       init_page_buffers(page, bdev, block, size);
-                       return page;
+                       end_block = init_page_buffers(page, bdev,
+                                               index << sizebits, size);
+                       goto done;
                }
                if (!try_to_free_buffers(page))
                        goto failed;
@@ -1032,15 +1040,14 @@ grow_dev_page(struct block_device *bdev, sector_t block,
         */
        spin_lock(&inode->i_mapping->private_lock);
        link_dev_buffers(page, bh);
-       init_page_buffers(page, bdev, block, size);
+       end_block = init_page_buffers(page, bdev, index << sizebits, size);
        spin_unlock(&inode->i_mapping->private_lock);
-       return page;
-
+done:
+       ret = (block < end_block) ? 1 : -ENXIO;
 failed:
-       BUG();
        unlock_page(page);
        page_cache_release(page);
-       return NULL;
+       return ret;
 }
 
 /*
@@ -1050,7 +1057,6 @@ failed:
 static int
 grow_buffers(struct block_device *bdev, sector_t block, int size)
 {
-       struct page *page;
        pgoff_t index;
        int sizebits;
 
@@ -1074,22 +1080,14 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
                        bdevname(bdev, b));
                return -EIO;
        }
-       block = index << sizebits;
+
        /* Create a page with the proper size buffers.. */
-       page = grow_dev_page(bdev, block, index, size);
-       if (!page)
-               return 0;
-       unlock_page(page);
-       page_cache_release(page);
-       return 1;
+       return grow_dev_page(bdev, block, index, size, sizebits);
 }
 
 static struct buffer_head *
 __getblk_slow(struct block_device *bdev, sector_t block, int size)
 {
-       int ret;
-       struct buffer_head *bh;
-
        /* Size must be multiple of hard sectorsize */
        if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
                        (size < 512 || size > PAGE_SIZE))) {
@@ -1102,21 +1100,20 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
                return NULL;
        }
 
-retry:
-       bh = __find_get_block(bdev, block, size);
-       if (bh)
-               return bh;
+       for (;;) {
+               struct buffer_head *bh;
+               int ret;
 
-       ret = grow_buffers(bdev, block, size);
-       if (ret == 0) {
-               free_more_memory();
-               goto retry;
-       } else if (ret > 0) {
                bh = __find_get_block(bdev, block, size);
                if (bh)
                        return bh;
+
+               ret = grow_buffers(bdev, block, size);
+               if (ret < 0)
+                       return NULL;
+               if (ret == 0)
+                       free_more_memory();
        }
-       return NULL;
 }
 
 /*
@@ -1372,10 +1369,6 @@ EXPORT_SYMBOL(__find_get_block);
  * which corresponds to the passed block_device, block and size. The
  * returned buffer has its reference count incremented.
  *
- * __getblk() cannot fail - it just keeps trying.  If you pass it an
- * illegal block number, __getblk() will happily return a buffer_head
- * which represents the non-existent block.  Very weird.
- *
  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
  * attempt is failing.  FIXME, perhaps?
  */
index 0bb785f..51574d4 100644 (file)
@@ -882,7 +882,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
        if (!buf) {
                mutex_unlock(&cinode->lock_mutex);
                FreeXid(xid);
-               return rc;
+               return -ENOMEM;
        }
 
        for (i = 0; i < 2; i++) {
index c987875..e07a3d3 100644 (file)
@@ -1174,11 +1174,14 @@ compat_sys_readv(unsigned long fd, const struct compat_iovec __user *vec,
        struct file *file;
        int fput_needed;
        ssize_t ret;
+       loff_t pos;
 
        file = fget_light(fd, &fput_needed);
        if (!file)
                return -EBADF;
-       ret = compat_readv(file, vec, vlen, &file->f_pos);
+       pos = file->f_pos;
+       ret = compat_readv(file, vec, vlen, &pos);
+       file->f_pos = pos;
        fput_light(file, fput_needed);
        return ret;
 }
@@ -1233,11 +1236,14 @@ compat_sys_writev(unsigned long fd, const struct compat_iovec __user *vec,
        struct file *file;
        int fput_needed;
        ssize_t ret;
+       loff_t pos;
 
        file = fget_light(fd, &fput_needed);
        if (!file)
                return -EBADF;
-       ret = compat_writev(file, vec, vlen, &file->f_pos);
+       pos = file->f_pos;
+       ret = compat_writev(file, vec, vlen, &pos);
+       file->f_pos = pos;
        fput_light(file, fput_needed);
        return ret;
 }
index af11098..7c7556b 100644 (file)
@@ -640,6 +640,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct dentry *lower_old_dir_dentry;
        struct dentry *lower_new_dir_dentry;
        struct dentry *trap = NULL;
+       struct inode *target_inode;
 
        lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
        lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
@@ -647,6 +648,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        dget(lower_new_dentry);
        lower_old_dir_dentry = dget_parent(lower_old_dentry);
        lower_new_dir_dentry = dget_parent(lower_new_dentry);
+       target_inode = new_dentry->d_inode;
        trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
        /* source should not be ancestor of target */
        if (trap == lower_old_dentry) {
@@ -662,6 +664,9 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        lower_new_dir_dentry->d_inode, lower_new_dentry);
        if (rc)
                goto out_lock;
+       if (target_inode)
+               fsstack_copy_attr_all(target_inode,
+                                     ecryptfs_inode_to_lower(target_inode));
        fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode);
        if (new_dir != old_dir)
                fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode);
index 24a49d4..1585db1 100644 (file)
@@ -837,11 +837,11 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
                                bio->bi_rw |= REQ_WRITE;
                        }
 
-                       osd_req_write(or, _ios_obj(ios, dev), per_dev->offset,
-                                     bio, per_dev->length);
+                       osd_req_write(or, _ios_obj(ios, cur_comp),
+                                     per_dev->offset, bio, per_dev->length);
                        ORE_DBGMSG("write(0x%llx) offset=0x%llx "
                                      "length=0x%llx dev=%d\n",
-                                    _LLU(_ios_obj(ios, dev)->id),
+                                    _LLU(_ios_obj(ios, cur_comp)->id),
                                     _LLU(per_dev->offset),
                                     _LLU(per_dev->length), dev);
                } else if (ios->kern_buff) {
@@ -853,20 +853,20 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
                               (ios->si.unit_off + ios->length >
                                ios->layout->stripe_unit));
 
-                       ret = osd_req_write_kern(or, _ios_obj(ios, per_dev->dev),
+                       ret = osd_req_write_kern(or, _ios_obj(ios, cur_comp),
                                                 per_dev->offset,
                                                 ios->kern_buff, ios->length);
                        if (unlikely(ret))
                                goto out;
                        ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
                                      "length=0x%llx dev=%d\n",
-                                    _LLU(_ios_obj(ios, dev)->id),
+                                    _LLU(_ios_obj(ios, cur_comp)->id),
                                     _LLU(per_dev->offset),
                                     _LLU(ios->length), per_dev->dev);
                } else {
-                       osd_req_set_attributes(or, _ios_obj(ios, dev));
+                       osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
                        ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
-                                    _LLU(_ios_obj(ios, dev)->id),
+                                    _LLU(_ios_obj(ios, cur_comp)->id),
                                     ios->out_attr_len, dev);
                }
 
index 5b3f907..71b263f 100644 (file)
@@ -3072,6 +3072,8 @@ static int ext3_do_update_inode(handle_t *handle,
        struct ext3_inode_info *ei = EXT3_I(inode);
        struct buffer_head *bh = iloc->bh;
        int err = 0, rc, block;
+       int need_datasync = 0;
+       __le32 disksize;
 
 again:
        /* we can't allow multiple procs in here at once, its a bit racey */
@@ -3109,7 +3111,11 @@ again:
                raw_inode->i_gid_high = 0;
        }
        raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
-       raw_inode->i_size = cpu_to_le32(ei->i_disksize);
+       disksize = cpu_to_le32(ei->i_disksize);
+       if (disksize != raw_inode->i_size) {
+               need_datasync = 1;
+               raw_inode->i_size = disksize;
+       }
        raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
        raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
        raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
@@ -3125,8 +3131,11 @@ again:
        if (!S_ISREG(inode->i_mode)) {
                raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
        } else {
-               raw_inode->i_size_high =
-                       cpu_to_le32(ei->i_disksize >> 32);
+               disksize = cpu_to_le32(ei->i_disksize >> 32);
+               if (disksize != raw_inode->i_size_high) {
+                       raw_inode->i_size_high = disksize;
+                       need_datasync = 1;
+               }
                if (ei->i_disksize > 0x7fffffffULL) {
                        struct super_block *sb = inode->i_sb;
                        if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
@@ -3179,6 +3188,8 @@ again:
        ext3_clear_inode_state(inode, EXT3_STATE_NEW);
 
        atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid);
+       if (need_datasync)
+               atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
 out_brelse:
        brelse (bh);
        ext3_std_error(inode->i_sb, err);
index a071348..f8d5fce 100644 (file)
@@ -904,6 +904,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
        ei->i_reserved_meta_blocks = 0;
        ei->i_allocated_meta_blocks = 0;
        ei->i_da_metadata_calc_len = 0;
+       ei->i_da_metadata_calc_last_lblock = 0;
        spin_lock_init(&(ei->i_block_reservation_lock));
 #ifdef CONFIG_QUOTA
        ei->i_reserved_quota = 0;
@@ -3107,6 +3108,10 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
        ext4_group_t            i, ngroups = ext4_get_groups_count(sb);
        int                     s, j, count = 0;
 
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC))
+               return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
+                       sbi->s_itb_per_group + 2);
+
        first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
                (grp * EXT4_BLOCKS_PER_GROUP(sb));
        last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
index 2aaf3ea..5c029fb 100644 (file)
@@ -1524,6 +1524,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
                req->pages[req->num_pages] = page;
                req->num_pages++;
 
+               offset = 0;
                num -= this_num;
                total_len += this_num;
                index++;
index 0c84100..5242006 100644 (file)
@@ -1687,7 +1687,7 @@ static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
        size_t n;
        u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
 
-       for (n = 0; n < count; n++) {
+       for (n = 0; n < count; n++, iov++) {
                if (iov->iov_len > (size_t) max)
                        return -ENOMEM;
                max -= iov->iov_len;
index 7daf4b8..90effcc 100644 (file)
@@ -56,7 +56,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
        DECLARE_COMPLETION_ONSTACK(wait);
        struct bio *bio;
        int ret = 0;
-       unsigned int io_size;
+       u64 io_size;
        loff_t start;
        int offset;
 
index 3db6b82..d774309 100644 (file)
@@ -38,6 +38,8 @@
 #include <linux/buffer_head.h> /* various write calls */
 #include <linux/prefetch.h>
 
+#include "../pnfs.h"
+#include "../internal.h"
 #include "blocklayout.h"
 
 #define NFSDBG_FACILITY        NFSDBG_PNFS_LD
@@ -814,7 +816,7 @@ nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
         * GETDEVICEINFO's maxcount
         */
        max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
-       max_pages = max_resp_sz >> PAGE_SHIFT;
+       max_pages = nfs_page_array_len(0, max_resp_sz);
        dprintk("%s max_resp_sz %u max_pages %d\n",
                __func__, max_resp_sz, max_pages);
 
index c69682a..4e2ee99 100644 (file)
@@ -153,7 +153,7 @@ static int _preload_range(struct pnfs_inval_markings *marks,
        count = (int)(end - start) / (int)tree->mtt_step_size;
 
        /* Pre-malloc what memory we might need */
-       storage = kmalloc(sizeof(*storage) * count, GFP_NOFS);
+       storage = kcalloc(count, sizeof(*storage), GFP_NOFS);
        if (!storage)
                return -ENOMEM;
        for (i = 0; i < count; i++) {
index ac28990..756f4df 100644 (file)
@@ -1103,7 +1103,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
        struct nfs_fattr *fattr = NULL;
        int error;
 
-       if (nd->flags & LOOKUP_RCU)
+       if (nd && (nd->flags & LOOKUP_RCU))
                return -ECHILD;
 
        parent = dget_parent(dentry);
@@ -1508,7 +1508,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
        struct nfs_open_context *ctx;
        int openflags, ret = 0;
 
-       if (nd->flags & LOOKUP_RCU)
+       if (nd && (nd->flags & LOOKUP_RCU))
                return -ECHILD;
 
        inode = dentry->d_inode;
index c43a452..961e562 100644 (file)
@@ -452,8 +452,11 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
 
        dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
 
-       /* Only do I/O if gfp is a superset of GFP_KERNEL */
-       if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) {
+       /* Only do I/O if gfp is a superset of GFP_KERNEL, and we're not
+        * doing this memory reclaim for a fs-related allocation.
+        */
+       if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL &&
+           !(current->flags & PF_FSTRANS)) {
                int how = FLUSH_SYNC;
 
                /* Don't let kswapd deadlock waiting for OOM RPC calls */
index 50a15fa..b78b5b6 100644 (file)
@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
        nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
        nfsi->attrtimeo_timestamp = jiffies;
 
-       memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
+       memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf));
        if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
                nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
        else
index d4bc9ed..dba87e6 100644 (file)
@@ -68,7 +68,7 @@ do_proc_get_root(struct rpc_clnt *client, struct nfs_fh *fhandle,
        nfs_fattr_init(info->fattr);
        status = rpc_call_sync(client, &msg, 0);
        dprintk("%s: reply fsinfo: %d\n", __func__, status);
-       if (!(info->fattr->valid & NFS_ATTR_FATTR)) {
+       if (status == 0 && !(info->fattr->valid & NFS_ATTR_FATTR)) {
                msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR];
                msg.rpc_resp = info->fattr;
                status = rpc_call_sync(client, &msg, 0);
@@ -633,7 +633,7 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
                  u64 cookie, struct page **pages, unsigned int count, int plus)
 {
        struct inode            *dir = dentry->d_inode;
-       __be32                  *verf = NFS_COOKIEVERF(dir);
+       __be32                  *verf = NFS_I(dir)->cookieverf;
        struct nfs3_readdirargs arg = {
                .fh             = NFS_FH(dir),
                .cookie         = cookie,
index ed388aa..bd5d9cf 100644 (file)
@@ -721,7 +721,7 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_fla
         * GETDEVICEINFO's maxcount
         */
        max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
-       max_pages = max_resp_sz >> PAGE_SHIFT;
+       max_pages = nfs_page_array_len(0, max_resp_sz);
        dprintk("%s inode %p max_resp_sz %u max_pages %d\n",
                __func__, inode, max_resp_sz, max_pages);
 
index 8000459..61796a4 100644 (file)
@@ -3025,11 +3025,11 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
                        dentry->d_parent->d_name.name,
                        dentry->d_name.name,
                        (unsigned long long)cookie);
-       nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
+       nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
        res.pgbase = args.pgbase;
        status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
        if (status >= 0) {
-               memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
+               memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
                status += args.pgbase;
        }
 
@@ -5769,11 +5769,58 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
        dprintk("<-- %s\n", __func__);
 }
 
+static size_t max_response_pages(struct nfs_server *server)
+{
+       u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+       return nfs_page_array_len(0, max_resp_sz);
+}
+
+static void nfs4_free_pages(struct page **pages, size_t size)
+{
+       int i;
+
+       if (!pages)
+               return;
+
+       for (i = 0; i < size; i++) {
+               if (!pages[i])
+                       break;
+               __free_page(pages[i]);
+       }
+       kfree(pages);
+}
+
+static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
+{
+       struct page **pages;
+       int i;
+
+       pages = kcalloc(size, sizeof(struct page *), gfp_flags);
+       if (!pages) {
+               dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
+               return NULL;
+       }
+
+       for (i = 0; i < size; i++) {
+               pages[i] = alloc_page(gfp_flags);
+               if (!pages[i]) {
+                       dprintk("%s: failed to allocate page\n", __func__);
+                       nfs4_free_pages(pages, size);
+                       return NULL;
+               }
+       }
+
+       return pages;
+}
+
 static void nfs4_layoutget_release(void *calldata)
 {
        struct nfs4_layoutget *lgp = calldata;
+       struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+       size_t max_pages = max_response_pages(server);
 
        dprintk("--> %s\n", __func__);
+       nfs4_free_pages(lgp->args.layout.pages, max_pages);
        put_nfs_open_context(lgp->args.ctx);
        kfree(calldata);
        dprintk("<-- %s\n", __func__);
@@ -5785,9 +5832,10 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = {
        .rpc_release = nfs4_layoutget_release,
 };
 
-int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
+int nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
 {
        struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+       size_t max_pages = max_response_pages(server);
        struct rpc_task *task;
        struct rpc_message msg = {
                .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
@@ -5805,6 +5853,13 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
 
        dprintk("--> %s\n", __func__);
 
+       lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
+       if (!lgp->args.layout.pages) {
+               nfs4_layoutget_release(lgp);
+               return -ENOMEM;
+       }
+       lgp->args.layout.pglen = max_pages * PAGE_SIZE;
+
        lgp->res.layoutp = &lgp->args.layout;
        lgp->res.seq_res.sr_slot = NULL;
        task = rpc_run_task(&task_setup_data);
index bdd5bdc..00818c8 100644 (file)
@@ -6113,7 +6113,8 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
        status = decode_open(xdr, res);
        if (status)
                goto out;
-       if (decode_getfh(xdr, &res->fh) != 0)
+       status = decode_getfh(xdr, &res->fh);
+       if (status)
                goto out;
        if (decode_getfattr(xdr, res->f_attr, res->server,
                                !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
index f881a63..3ad6595 100644 (file)
@@ -575,9 +575,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
        struct nfs_server *server = NFS_SERVER(ino);
        struct nfs4_layoutget *lgp;
        struct pnfs_layout_segment *lseg = NULL;
-       struct page **pages = NULL;
-       int i;
-       u32 max_resp_sz, max_pages;
 
        dprintk("--> %s\n", __func__);
 
@@ -586,20 +583,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
        if (lgp == NULL)
                return NULL;
 
-       /* allocate pages for xdr post processing */
-       max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
-       max_pages = max_resp_sz >> PAGE_SHIFT;
-
-       pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags);
-       if (!pages)
-               goto out_err_free;
-
-       for (i = 0; i < max_pages; i++) {
-               pages[i] = alloc_page(gfp_flags);
-               if (!pages[i])
-                       goto out_err_free;
-       }
-
        lgp->args.minlength = PAGE_CACHE_SIZE;
        if (lgp->args.minlength > range->length)
                lgp->args.minlength = range->length;
@@ -608,39 +591,19 @@ send_layoutget(struct pnfs_layout_hdr *lo,
        lgp->args.type = server->pnfs_curr_ld->id;
        lgp->args.inode = ino;
        lgp->args.ctx = get_nfs_open_context(ctx);
-       lgp->args.layout.pages = pages;
-       lgp->args.layout.pglen = max_pages * PAGE_SIZE;
        lgp->lsegpp = &lseg;
        lgp->gfp_flags = gfp_flags;
 
        /* Synchronously retrieve layout information from server and
         * store in lseg.
         */
-       nfs4_proc_layoutget(lgp);
+       nfs4_proc_layoutget(lgp, gfp_flags);
        if (!lseg) {
                /* remember that LAYOUTGET failed and suspend trying */
                set_bit(lo_fail_bit(range->iomode), &lo->plh_flags);
        }
 
-       /* free xdr pages */
-       for (i = 0; i < max_pages; i++)
-               __free_page(pages[i]);
-       kfree(pages);
-
        return lseg;
-
-out_err_free:
-       /* free any allocated xdr pages, lgp as it's not used */
-       if (pages) {
-               for (i = 0; i < max_pages; i++) {
-                       if (!pages[i])
-                               break;
-                       __free_page(pages[i]);
-               }
-               kfree(pages);
-       }
-       kfree(lgp);
-       return NULL;
 }
 
 /* Initiates a LAYOUTRETURN(FILE) */
index 53d593a..c946b1b 100644 (file)
@@ -162,7 +162,7 @@ extern int nfs4_proc_getdevicelist(struct nfs_server *server,
                                   struct pnfs_devicelist *devlist);
 extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
                                   struct pnfs_device *dev);
-extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp);
+extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags);
 extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);
 
 /* pnfs.c */
index 376cd65..e42d6f6 100644 (file)
@@ -1820,6 +1820,7 @@ static int nfs_validate_mount_data(void *options,
 
                memcpy(sap, &data->addr, sizeof(data->addr));
                args->nfs_server.addrlen = sizeof(data->addr);
+               args->nfs_server.port = ntohs(data->addr.sin_port);
                if (!nfs_verify_server_address(sap))
                        goto out_no_address;
 
@@ -2538,6 +2539,7 @@ static int nfs4_validate_mount_data(void *options,
                        return -EFAULT;
                if (!nfs_verify_server_address(sap))
                        goto out_no_address;
+               args->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port);
 
                if (data->auth_flavourlen) {
                        if (data->auth_flavourlen > 1)
@@ -3087,4 +3089,6 @@ static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
        return res;
 }
 
+MODULE_ALIAS("nfs4");
+
 #endif /* CONFIG_NFS_V4 */
index c6e523a..301391a 100644 (file)
@@ -1742,12 +1742,12 @@ int __init nfs_init_writepagecache(void)
        nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
                                                     nfs_wdata_cachep);
        if (nfs_wdata_mempool == NULL)
-               return -ENOMEM;
+               goto out_destroy_write_cache;
 
        nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
                                                      nfs_wdata_cachep);
        if (nfs_commit_mempool == NULL)
-               return -ENOMEM;
+               goto out_destroy_write_mempool;
 
        /*
         * NFS congestion size, scale with available memory.
@@ -1770,6 +1770,12 @@ int __init nfs_init_writepagecache(void)
                nfs_congestion_kb = 256*1024;
 
        return 0;
+
+out_destroy_write_mempool:
+       mempool_destroy(nfs_wdata_mempool);
+out_destroy_write_cache:
+       kmem_cache_destroy(nfs_wdata_cachep);
+       return -ENOMEM;
 }
 
 void nfs_destroy_writepagecache(void)
index 9cfa60a..87a1746 100644 (file)
@@ -2236,7 +2236,7 @@ out_acl:
        if (bmval0 & FATTR4_WORD0_CASE_INSENSITIVE) {
                if ((buflen -= 4) < 0)
                        goto out_resource;
-               WRITE32(1);
+               WRITE32(0);
        }
        if (bmval0 & FATTR4_WORD0_CASE_PRESERVING) {
                if ((buflen -= 4) < 0)
index ac258be..c598cfb 100644 (file)
@@ -182,7 +182,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
        if (copy_from_user(&cpmode, argp, sizeof(cpmode)))
                goto out;
 
-       down_read(&inode->i_sb->s_umount);
+       mutex_lock(&nilfs->ns_snapshot_mount_mutex);
 
        nilfs_transaction_begin(inode->i_sb, &ti, 0);
        ret = nilfs_cpfile_change_cpmode(
@@ -192,7 +192,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
        else
                nilfs_transaction_commit(inode->i_sb); /* never fails */
 
-       up_read(&inode->i_sb->s_umount);
+       mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
 out:
        mnt_drop_write(filp->f_path.mnt);
        return ret;
index 8351c44..97bfbdd 100644 (file)
@@ -951,6 +951,8 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
        struct nilfs_root *root;
        int ret;
 
+       mutex_lock(&nilfs->ns_snapshot_mount_mutex);
+
        down_read(&nilfs->ns_segctor_sem);
        ret = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, cno);
        up_read(&nilfs->ns_segctor_sem);
@@ -975,6 +977,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
        ret = nilfs_get_root_dentry(s, root, root_dentry);
        nilfs_put_root(root);
  out:
+       mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
        return ret;
 }
 
index 35a8970..1c98f53 100644 (file)
@@ -76,6 +76,7 @@ struct the_nilfs *alloc_nilfs(struct block_device *bdev)
        nilfs->ns_bdev = bdev;
        atomic_set(&nilfs->ns_ndirtyblks, 0);
        init_rwsem(&nilfs->ns_sem);
+       mutex_init(&nilfs->ns_snapshot_mount_mutex);
        INIT_LIST_HEAD(&nilfs->ns_dirty_files);
        INIT_LIST_HEAD(&nilfs->ns_gc_inodes);
        spin_lock_init(&nilfs->ns_inode_lock);
index 9992b11..de7435f 100644 (file)
@@ -47,6 +47,7 @@ enum {
  * @ns_flags: flags
  * @ns_bdev: block device
  * @ns_sem: semaphore for shared states
+ * @ns_snapshot_mount_mutex: mutex to protect snapshot mounts
  * @ns_sbh: buffer heads of on-disk super blocks
  * @ns_sbp: pointers to super block data
  * @ns_sbwtime: previous write time of super block
@@ -99,6 +100,7 @@ struct the_nilfs {
 
        struct block_device    *ns_bdev;
        struct rw_semaphore     ns_sem;
+       struct mutex            ns_snapshot_mount_mutex;
 
        /*
         * used for
index 76e2012..1ae8e82 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -883,9 +883,10 @@ static inline int build_open_flags(int flags, int mode, struct open_flags *op)
        int lookup_flags = 0;
        int acc_mode;
 
-       if (!(flags & O_CREAT))
-               mode = 0;
-       op->mode = mode;
+       if (flags & O_CREAT)
+               op->mode = (mode & S_IALLUGO) | S_IFREG;
+       else
+               op->mode = 0;
 
        /* Must never be set by userspace */
        flags &= ~FMODE_NONOTIFY;
index 2da1715..4619247 100644 (file)
@@ -290,7 +290,7 @@ handle_fragments:
 
 check_directory_table:
        /* Sanity check directory_table */
-       if (msblk->directory_table >= next_table) {
+       if (msblk->directory_table > next_table) {
                err = -EINVAL;
                goto failed_mount;
        }
index 8806b89..7b21801 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -57,12 +57,13 @@ EXPORT_SYMBOL(vfs_getattr);
 
 int vfs_fstat(unsigned int fd, struct kstat *stat)
 {
-       struct file *f = fget(fd);
+       int fput_needed;
+       struct file *f = fget_raw_light(fd, &fput_needed);
        int error = -EBADF;
 
        if (f) {
                error = vfs_getattr(f->f_path.mnt, f->f_path.dentry, stat);
-               fput(f);
+               fput_light(f, fput_needed);
        }
        return error;
 }
index d567b84..874c9e3 100644 (file)
 #include "udf_i.h"
 #include "udf_sb.h"
 
-static int udf_adinicb_readpage(struct file *file, struct page *page)
+static void __udf_adinicb_readpage(struct page *page)
 {
        struct inode *inode = page->mapping->host;
        char *kaddr;
        struct udf_inode_info *iinfo = UDF_I(inode);
 
-       BUG_ON(!PageLocked(page));
-
        kaddr = kmap(page);
-       memset(kaddr, 0, PAGE_CACHE_SIZE);
        memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size);
+       memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size);
        flush_dcache_page(page);
        SetPageUptodate(page);
        kunmap(page);
+}
+
+static int udf_adinicb_readpage(struct file *file, struct page *page)
+{
+       BUG_ON(!PageLocked(page));
+       __udf_adinicb_readpage(page);
        unlock_page(page);
 
        return 0;
@@ -77,6 +81,25 @@ static int udf_adinicb_writepage(struct page *page,
        return 0;
 }
 
+static int udf_adinicb_write_begin(struct file *file,
+                       struct address_space *mapping, loff_t pos,
+                       unsigned len, unsigned flags, struct page **pagep,
+                       void **fsdata)
+{
+       struct page *page;
+
+       if (WARN_ON_ONCE(pos >= PAGE_CACHE_SIZE))
+               return -EIO;
+       page = grab_cache_page_write_begin(mapping, 0, flags);
+       if (!page)
+               return -ENOMEM;
+       *pagep = page;
+
+       if (!PageUptodate(page) && len != PAGE_CACHE_SIZE)
+               __udf_adinicb_readpage(page);
+       return 0;
+}
+
 static int udf_adinicb_write_end(struct file *file,
                        struct address_space *mapping,
                        loff_t pos, unsigned len, unsigned copied,
@@ -98,8 +121,8 @@ static int udf_adinicb_write_end(struct file *file,
 const struct address_space_operations udf_adinicb_aops = {
        .readpage       = udf_adinicb_readpage,
        .writepage      = udf_adinicb_writepage,
-       .write_begin = simple_write_begin,
-       .write_end = udf_adinicb_write_end,
+       .write_begin    = udf_adinicb_write_begin,
+       .write_end      = udf_adinicb_write_end,
 };
 
 static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
index 580a6d3..c04e0db 100644 (file)
@@ -26,7 +26,13 @@ static inline void
 __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
 {
        if (unlikely(atomic_xchg(count, 0) != 1))
-               fail_fn(count);
+               /*
+                * We failed to acquire the lock, so mark it contended
+                * to ensure that any waiting tasks are woken up by the
+                * unlock slow path.
+                */
+               if (likely(atomic_xchg(count, -1) != 1))
+                       fail_fn(count);
 }
 
 /**
@@ -43,7 +49,8 @@ static inline int
 __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
 {
        if (unlikely(atomic_xchg(count, 0) != 1))
-               return fail_fn(count);
+               if (likely(atomic_xchg(count, -1) != 1))
+                       return fail_fn(count);
        return 0;
 }
 
index ddd46db..7639f18 100644 (file)
@@ -277,8 +277,9 @@ struct drm_mode_mode_cmd {
        struct drm_mode_modeinfo mode;
 };
 
-#define DRM_MODE_CURSOR_BO     (1<<0)
-#define DRM_MODE_CURSOR_MOVE   (1<<1)
+#define DRM_MODE_CURSOR_BO     0x01
+#define DRM_MODE_CURSOR_MOVE   0x02
+#define DRM_MODE_CURSOR_FLAGS  0x03
 
 /*
  * depending on the value in flags different members are used.
index c5ed2f1..a2227f7 100644 (file)
@@ -41,6 +41,9 @@ int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
                        unsigned long *, int *, int, unsigned int flags);
 void unmap_hugepage_range(struct vm_area_struct *,
                        unsigned long, unsigned long, struct page *);
+void __unmap_hugepage_range_final(struct vm_area_struct *vma,
+                         unsigned long start, unsigned long end,
+                         struct page *ref_page);
 void __unmap_hugepage_range(struct vm_area_struct *,
                        unsigned long, unsigned long, struct page *);
 int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
@@ -99,6 +102,13 @@ static inline unsigned long hugetlb_total_pages(void)
 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
 #define hugetlb_prefault(mapping, vma)         ({ BUG(); 0; })
 #define unmap_hugepage_range(vma, start, end, page)    BUG()
+static inline void __unmap_hugepage_range_final(struct vm_area_struct *vma,
+                       unsigned long start, unsigned long end,
+                       struct page *ref_page)
+{
+       BUG();
+}
+
 static inline void hugetlb_report_meminfo(struct seq_file *m)
 {
 }
index df53fdf..cdde2b3 100644 (file)
@@ -124,8 +124,17 @@ extern struct group_info init_groups;
 
 extern struct cred init_cred;
 
+extern struct task_group root_task_group;
+
+#ifdef CONFIG_CGROUP_SCHED
+# define INIT_CGROUP_SCHED(tsk)                                                \
+       .sched_task_group = &root_task_group,
+#else
+# define INIT_CGROUP_SCHED(tsk)
+#endif
+
 #ifdef CONFIG_PERF_EVENTS
-# define INIT_PERF_EVENTS(tsk)                                 \
+# define INIT_PERF_EVENTS(tsk)                                         \
        .perf_event_mutex =                                             \
                 __MUTEX_INITIALIZER(tsk.perf_event_mutex),             \
        .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
@@ -162,6 +171,7 @@ extern struct cred init_cred;
        },                                                              \
        .tasks          = LIST_HEAD_INIT(tsk.tasks),                    \
        INIT_PUSHABLE_TASKS(tsk)                                        \
+       INIT_CGROUP_SCHED(tsk)                                          \
        .ptraced        = LIST_HEAD_INIT(tsk.ptraced),                  \
        .ptrace_entry   = LIST_HEAD_INIT(tsk.ptrace_entry),             \
        .real_parent    = &tsk,                                         \
index f875b31..16625d7 100644 (file)
@@ -2,6 +2,7 @@
 #define LINUX_INPUT_EETI_TS_H
 
 struct eeti_ts_platform_data {
+       int irq_gpio;
        unsigned int irq_active_high;
 };
 
index ad81e1c..445f978 100644 (file)
@@ -226,7 +226,7 @@ static inline int kobject_uevent_env(struct kobject *kobj,
 
 static inline __printf(2, 3)
 int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
-{ return 0; }
+{ return -ENOMEM; }
 
 static inline int kobject_action_type(const char *buf, size_t count,
                                      enum kobject_action *type)
index 603bec2..06177ba 100644 (file)
@@ -58,13 +58,6 @@ union ktime {
 
 typedef union ktime ktime_t;           /* Kill this */
 
-#define KTIME_MAX                      ((s64)~((u64)1 << 63))
-#if (BITS_PER_LONG == 64)
-# define KTIME_SEC_MAX                 (KTIME_MAX / NSEC_PER_SEC)
-#else
-# define KTIME_SEC_MAX                 LONG_MAX
-#endif
-
 /*
  * ktime_t definitions when using the 64-bit scalar representation:
  */
index 40c3721..32a1b5c 100644 (file)
@@ -16,6 +16,7 @@ struct pcap_subdev {
 struct pcap_platform_data {
        unsigned int irq_base;
        unsigned int config;
+       int gpio;
        void (*init) (void *);  /* board specific init */
        int num_subdevs;
        struct pcap_subdev *subdevs;
index c8ef9bc..87967ee 100644 (file)
@@ -219,6 +219,7 @@ struct mmc_card {
 #define MMC_QUIRK_BLK_NO_CMD23 (1<<7)          /* Avoid CMD23 for regular multiblock */
 #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8)  /* Avoid sending 512 bytes in */
 #define MMC_QUIRK_LONG_READ_TIME (1<<9)                /* Data read time > CSD says */
+#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10)        /* Skip secure for erase/trim */
                                                /* byte mode */
        unsigned int    poweroff_notify_state;  /* eMMC4.5 notify feature */
 #define MMC_NO_POWER_NOTIFICATION      0
index 30b0c4e..43e038a 100644 (file)
@@ -15,6 +15,8 @@
 #define MV643XX_ETH_SIZE_REG_4         0x2224
 #define MV643XX_ETH_BASE_ADDR_ENABLE_REG       0x2290
 
+#define MV643XX_TX_CSUM_DEFAULT_LIMIT  0
+
 struct mv643xx_eth_shared_platform_data {
        struct mbus_dram_target_info    *dram;
        struct platform_device  *shared_smi;
index cb52340..00ca32b 100644 (file)
@@ -1299,6 +1299,8 @@ struct net_device {
        /* for setting kernel sock attribute on TCP connection setup */
 #define GSO_MAX_SIZE           65536
        unsigned int            gso_max_size;
+#define GSO_MAX_SEGS           65535
+       u16                     gso_max_segs;
 
 #ifdef CONFIG_DCB
        /* Data Center Bridging netlink ops */
@@ -1511,6 +1513,8 @@ struct packet_type {
        struct sk_buff          **(*gro_receive)(struct sk_buff **head,
                                               struct sk_buff *skb);
        int                     (*gro_complete)(struct sk_buff *skb);
+       bool                    (*id_match)(struct packet_type *ptype,
+                                           struct sock *sk);
        void                    *af_packet_priv;
        struct list_head        list;
 };
index 92ecf55..33c52a2 100644 (file)
@@ -261,11 +261,6 @@ static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode)
        return NFS_SERVER(inode)->nfs_client->rpc_ops;
 }
 
-static inline __be32 *NFS_COOKIEVERF(const struct inode *inode)
-{
-       return NFS_I(inode)->cookieverf;
-}
-
 static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode)
 {
        struct nfs_server *nfss = NFS_SERVER(inode);
index 2aaee0c..67cc215 100644 (file)
 #define PCI_DEVICE_ID_TIGON3_5704S     0x16a8
 #define PCI_DEVICE_ID_NX2_57800_VF     0x16a9
 #define PCI_DEVICE_ID_NX2_5706S                0x16aa
-#define PCI_DEVICE_ID_NX2_57840_MF     0x16ab
+#define PCI_DEVICE_ID_NX2_57840_MF     0x16a4
 #define PCI_DEVICE_ID_NX2_5708S                0x16ac
 #define PCI_DEVICE_ID_NX2_57840_VF     0x16ad
 #define PCI_DEVICE_ID_NX2_57810_MF     0x16ae
index b1f8912..b669be6 100644 (file)
@@ -794,7 +794,7 @@ struct perf_event {
        struct hw_perf_event            hw;
 
        struct perf_event_context       *ctx;
-       struct file                     *filp;
+       atomic_long_t                   refcount;
 
        /*
         * These accumulate total time (in nanoseconds) that children
index 8f74538..29e217a 100644 (file)
@@ -50,11 +50,13 @@ struct rnd_state {
 
 extern void rand_initialize_irq(int irq);
 
+extern void add_device_randomness(const void *, unsigned int);
 extern void add_input_randomness(unsigned int type, unsigned int code,
                                 unsigned int value);
-extern void add_interrupt_randomness(int irq);
+extern void add_interrupt_randomness(int irq, int irq_flags);
 
 extern void get_random_bytes(void *buf, int nbytes);
+extern void get_random_bytes_arch(void *buf, int nbytes);
 void generate_random_uuid(unsigned char uuid_out[16]);
 
 #ifndef MODULE
index d336c35..1e86bb4 100644 (file)
@@ -1236,6 +1236,9 @@ struct task_struct {
        const struct sched_class *sched_class;
        struct sched_entity se;
        struct sched_rt_entity rt;
+#ifdef CONFIG_CGROUP_SCHED
+       struct task_group *sched_task_group;
+#endif
 
 #ifdef CONFIG_PREEMPT_NOTIFIERS
        /* list of struct preempt_notifier: */
@@ -2646,7 +2649,7 @@ extern int sched_group_set_rt_period(struct task_group *tg,
 extern long sched_group_rt_period(struct task_group *tg);
 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
 #endif
-#endif
+#endif /* CONFIG_CGROUP_SCHED */
 
 extern int task_can_switch_user(struct user_struct *up,
                                        struct task_struct *tsk);
index 15518a1..0a4cd10 100644 (file)
@@ -114,6 +114,7 @@ struct rpc_xprt_ops {
        void            (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
        int             (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
        void            (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
+       void            (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task);
        void            (*rpcbind)(struct rpc_task *task);
        void            (*set_port)(struct rpc_xprt *xprt, unsigned short port);
        void            (*connect)(struct rpc_task *task);
@@ -274,6 +275,8 @@ void                        xprt_connect(struct rpc_task *task);
 void                   xprt_reserve(struct rpc_task *task);
 int                    xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
 int                    xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
+void                   xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
+void                   xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
 int                    xprt_prepare_transmit(struct rpc_task *task);
 void                   xprt_transmit(struct rpc_task *task);
 void                   xprt_end_transmit(struct rpc_task *task);
index b306178..8c0216e 100644 (file)
@@ -107,11 +107,36 @@ static inline struct timespec timespec_sub(struct timespec lhs,
        return ts_delta;
 }
 
+#define KTIME_MAX                      ((s64)~((u64)1 << 63))
+#if (BITS_PER_LONG == 64)
+# define KTIME_SEC_MAX                 (KTIME_MAX / NSEC_PER_SEC)
+#else
+# define KTIME_SEC_MAX                 LONG_MAX
+#endif
+
 /*
  * Returns true if the timespec is norm, false if denorm:
  */
-#define timespec_valid(ts) \
-       (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC))
+static inline bool timespec_valid(const struct timespec *ts)
+{
+       /* Dates before 1970 are bogus */
+       if (ts->tv_sec < 0)
+               return false;
+       /* Can't have more nanoseconds then a second */
+       if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
+               return false;
+       return true;
+}
+
+static inline bool timespec_valid_strict(const struct timespec *ts)
+{
+       if (!timespec_valid(ts))
+               return false;
+       /* Disallow values that could overflow ktime_t */
+       if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
+               return false;
+       return true;
+}
 
 extern void read_persistent_clock(struct timespec *ts);
 extern void read_boot_clock(struct timespec *ts);
index 4269c3f..93629fc 100644 (file)
@@ -775,6 +775,27 @@ static inline int usb_make_path(struct usb_device *dev, char *buf, size_t size)
        .bInterfaceSubClass = (sc), \
        .bInterfaceProtocol = (pr)
 
+/**
+ * USB_VENDOR_AND_INTERFACE_INFO - describe a specific usb vendor with a class of usb interfaces
+ * @vend: the 16 bit USB Vendor ID
+ * @cl: bInterfaceClass value
+ * @sc: bInterfaceSubClass value
+ * @pr: bInterfaceProtocol value
+ *
+ * This macro is used to create a struct usb_device_id that matches a
+ * specific vendor with a specific class of interfaces.
+ *
+ * This is especially useful when explicitly matching devices that have
+ * vendor specific bDeviceClass values, but standards-compliant interfaces.
+ */
+#define USB_VENDOR_AND_INTERFACE_INFO(vend, cl, sc, pr) \
+       .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
+               | USB_DEVICE_ID_MATCH_VENDOR, \
+       .idVendor = (vend), \
+       .bInterfaceClass = (cl), \
+       .bInterfaceSubClass = (sc), \
+       .bInterfaceProtocol = (pr)
+
 /* ----------------------------------------------------------------------- */
 
 /* Stuff for dynamic usb ids */
index d456f4c..0c0017c 100644 (file)
@@ -71,9 +71,11 @@ static __inline__ void scm_destroy(struct scm_cookie *scm)
 }
 
 static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
-                              struct scm_cookie *scm)
+                              struct scm_cookie *scm, bool forcecreds)
 {
        memset(scm, 0, sizeof(*scm));
+       if (forcecreds)
+               scm_set_cred(scm, task_tgid(current), current_cred());
        unix_get_peersec_dgram(sock, scm);
        if (msg->msg_controllen <= 0)
                return 0;
index 32e3937..ddf523c 100644 (file)
@@ -194,6 +194,7 @@ struct sock_common {
   *    @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
   *    @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
   *    @sk_gso_max_size: Maximum GSO segment size to build
+  *    @sk_gso_max_segs: Maximum number of GSO segments
   *    @sk_lingertime: %SO_LINGER l_linger setting
   *    @sk_backlog: always used with the per-socket spinlock held
   *    @sk_callback_lock: used with the callbacks in the end of this struct
@@ -310,6 +311,7 @@ struct sock {
        int                     sk_route_nocaps;
        int                     sk_gso_type;
        unsigned int            sk_gso_max_size;
+       u16                     sk_gso_max_segs;
        int                     sk_rcvlowat;
        unsigned long           sk_lingertime;
        struct sk_buff_head     sk_error_queue;
index 5bf0790..31fdc48 100644 (file)
@@ -250,7 +250,6 @@ static void untag_chunk(struct node *p)
                spin_unlock(&hash_lock);
                spin_unlock(&entry->lock);
                fsnotify_destroy_mark(entry);
-               fsnotify_put_mark(entry);
                goto out;
        }
 
@@ -259,7 +258,7 @@ static void untag_chunk(struct node *p)
 
        fsnotify_duplicate_mark(&new->mark, entry);
        if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
-               free_chunk(new);
+               fsnotify_put_mark(&new->mark);
                goto Fallback;
        }
 
@@ -293,7 +292,6 @@ static void untag_chunk(struct node *p)
        spin_unlock(&hash_lock);
        spin_unlock(&entry->lock);
        fsnotify_destroy_mark(entry);
-       fsnotify_put_mark(entry);
        goto out;
 
 Fallback:
@@ -322,7 +320,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
 
        entry = &chunk->mark;
        if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
-               free_chunk(chunk);
+               fsnotify_put_mark(entry);
                return -ENOSPC;
        }
 
@@ -332,6 +330,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
                spin_unlock(&hash_lock);
                chunk->dead = 1;
                spin_unlock(&entry->lock);
+               fsnotify_get_mark(entry);
                fsnotify_destroy_mark(entry);
                fsnotify_put_mark(entry);
                return 0;
@@ -396,7 +395,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
        fsnotify_duplicate_mark(chunk_entry, old_entry);
        if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
                spin_unlock(&old_entry->lock);
-               free_chunk(chunk);
+               fsnotify_put_mark(chunk_entry);
                fsnotify_put_mark(old_entry);
                return -ENOSPC;
        }
@@ -412,6 +411,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
                spin_unlock(&chunk_entry->lock);
                spin_unlock(&old_entry->lock);
 
+               fsnotify_get_mark(chunk_entry);
                fsnotify_destroy_mark(chunk_entry);
 
                fsnotify_put_mark(chunk_entry);
@@ -445,7 +445,6 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
        spin_unlock(&old_entry->lock);
        fsnotify_destroy_mark(old_entry);
        fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
-       fsnotify_put_mark(old_entry); /* and kill it */
        return 0;
 }
 
index 58690af..7d1f05e 100644 (file)
@@ -3011,12 +3011,12 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
 /*
  * Called when the last reference to the file is gone.
  */
-static int perf_release(struct inode *inode, struct file *file)
+static void put_event(struct perf_event *event)
 {
-       struct perf_event *event = file->private_data;
        struct task_struct *owner;
 
-       file->private_data = NULL;
+       if (!atomic_long_dec_and_test(&event->refcount))
+               return;
 
        rcu_read_lock();
        owner = ACCESS_ONCE(event->owner);
@@ -3051,7 +3051,13 @@ static int perf_release(struct inode *inode, struct file *file)
                put_task_struct(owner);
        }
 
-       return perf_event_release_kernel(event);
+       perf_event_release_kernel(event);
+}
+
+static int perf_release(struct inode *inode, struct file *file)
+{
+       put_event(file->private_data);
+       return 0;
 }
 
 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
@@ -3304,7 +3310,7 @@ unlock:
 
 static const struct file_operations perf_fops;
 
-static struct perf_event *perf_fget_light(int fd, int *fput_needed)
+static struct file *perf_fget_light(int fd, int *fput_needed)
 {
        struct file *file;
 
@@ -3318,7 +3324,7 @@ static struct perf_event *perf_fget_light(int fd, int *fput_needed)
                return ERR_PTR(-EBADF);
        }
 
-       return file->private_data;
+       return file;
 }
 
 static int perf_event_set_output(struct perf_event *event,
@@ -3350,19 +3356,21 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
        case PERF_EVENT_IOC_SET_OUTPUT:
        {
+               struct file *output_file = NULL;
                struct perf_event *output_event = NULL;
                int fput_needed = 0;
                int ret;
 
                if (arg != -1) {
-                       output_event = perf_fget_light(arg, &fput_needed);
-                       if (IS_ERR(output_event))
-                               return PTR_ERR(output_event);
+                       output_file = perf_fget_light(arg, &fput_needed);
+                       if (IS_ERR(output_file))
+                               return PTR_ERR(output_file);
+                       output_event = output_file->private_data;
                }
 
                ret = perf_event_set_output(event, output_event);
                if (output_event)
-                       fput_light(output_event->filp, fput_needed);
+                       fput_light(output_file, fput_needed);
 
                return ret;
        }
@@ -5912,6 +5920,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 
        mutex_init(&event->mmap_mutex);
 
+       atomic_long_set(&event->refcount, 1);
        event->cpu              = cpu;
        event->attr             = *attr;
        event->group_leader     = group_leader;
@@ -6182,12 +6191,12 @@ SYSCALL_DEFINE5(perf_event_open,
                return event_fd;
 
        if (group_fd != -1) {
-               group_leader = perf_fget_light(group_fd, &fput_needed);
-               if (IS_ERR(group_leader)) {
-                       err = PTR_ERR(group_leader);
+               group_file = perf_fget_light(group_fd, &fput_needed);
+               if (IS_ERR(group_file)) {
+                       err = PTR_ERR(group_file);
                        goto err_fd;
                }
-               group_file = group_leader->filp;
+               group_leader = group_file->private_data;
                if (flags & PERF_FLAG_FD_OUTPUT)
                        output_event = group_leader;
                if (flags & PERF_FLAG_FD_NO_GROUP)
@@ -6322,7 +6331,6 @@ SYSCALL_DEFINE5(perf_event_open,
                put_ctx(gctx);
        }
 
-       event->filp = event_file;
        WARN_ON_ONCE(ctx->parent_ctx);
        mutex_lock(&ctx->mutex);
 
@@ -6412,7 +6420,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
                goto err_free;
        }
 
-       event->filp = NULL;
        WARN_ON_ONCE(ctx->parent_ctx);
        mutex_lock(&ctx->mutex);
        perf_install_in_context(ctx, event, cpu);
@@ -6461,7 +6468,7 @@ static void sync_child_event(struct perf_event *child_event,
         * Release the parent event, if this was the last
         * reference to it.
         */
-       fput(parent_event->filp);
+       put_event(parent_event);
 }
 
 static void
@@ -6537,9 +6544,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
         *
         *   __perf_event_exit_task()
         *     sync_child_event()
-        *       fput(parent_event->filp)
-        *         perf_release()
-        *           mutex_lock(&ctx->mutex)
+        *       put_event()
+        *         mutex_lock(&ctx->mutex)
         *
         * But since its the parent context it won't be the same instance.
         */
@@ -6607,7 +6613,7 @@ static void perf_free_event(struct perf_event *event,
        list_del_init(&event->child_list);
        mutex_unlock(&parent->child_mutex);
 
-       fput(parent->filp);
+       put_event(parent);
 
        perf_group_detach(event);
        list_del_event(event, ctx);
@@ -6687,6 +6693,12 @@ inherit_event(struct perf_event *parent_event,
                                           NULL, NULL);
        if (IS_ERR(child_event))
                return child_event;
+
+       if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
+               free_event(child_event);
+               return NULL;
+       }
+
        get_ctx(child_ctx);
 
        /*
@@ -6727,14 +6739,6 @@ inherit_event(struct perf_event *parent_event,
        add_event_to_ctx(child_event, child_ctx);
        raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
 
-       /*
-        * Get a reference to the parent filp - we will fput it
-        * when the child event exits. This is safe to do because
-        * we are in the parent and we know that the filp still
-        * exists and has a nonzero count:
-        */
-       atomic_long_inc(&parent_event->filp->f_count);
-
        /*
         * Link this into the parent event's child list
         */
index 866c9d5..80fb1c6 100644 (file)
@@ -2231,11 +2231,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
  * @uaddr2:    the pi futex we will take prior to returning to user-space
  *
  * The caller will wait on uaddr and will be requeued by futex_requeue() to
- * uaddr2 which must be PI aware.  Normal wakeup will wake on uaddr2 and
- * complete the acquisition of the rt_mutex prior to returning to userspace.
- * This ensures the rt_mutex maintains an owner when it has waiters; without
- * one, the pi logic wouldn't know which task to boost/deboost, if there was a
- * need to.
+ * uaddr2 which must be PI aware and unique from uaddr.  Normal wakeup will wake
+ * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
+ * userspace.  This ensures the rt_mutex maintains an owner when it has waiters;
+ * without one, the pi logic would not know which task to boost/deboost, if
+ * there was a need to.
  *
  * We call schedule in futex_wait_queue_me() when we enqueue and return there
  * via the following:
@@ -2272,6 +2272,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
        struct futex_q q = futex_q_init;
        int res, ret;
 
+       if (uaddr == uaddr2)
+               return -EINVAL;
+
        if (!bitset)
                return -EINVAL;
 
@@ -2343,7 +2346,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
                 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
                 * the pi_state.
                 */
-               WARN_ON(!&q.pi_state);
+               WARN_ON(!q.pi_state);
                pi_mutex = &q.pi_state->pi_mutex;
                ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
                debug_rt_mutex_free_waiter(&rt_waiter);
@@ -2370,7 +2373,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
         * fault, unlock the rt_mutex and return the fault to userspace.
         */
        if (ret == -EFAULT) {
-               if (rt_mutex_owner(pi_mutex) == current)
+               if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
                        rt_mutex_unlock(pi_mutex);
        } else if (ret == -EINTR) {
                /*
index 470d08c..10e0772 100644 (file)
@@ -117,7 +117,7 @@ irqreturn_t
 handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
 {
        irqreturn_t retval = IRQ_NONE;
-       unsigned int random = 0, irq = desc->irq_data.irq;
+       unsigned int flags = 0, irq = desc->irq_data.irq;
 
        do {
                irqreturn_t res;
@@ -145,7 +145,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
 
                        /* Fall through to add to randomness */
                case IRQ_HANDLED:
-                       random |= action->flags;
+                       flags |= action->flags;
                        break;
 
                default:
@@ -156,8 +156,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
                action = action->next;
        } while (action);
 
-       if (random & IRQF_SAMPLE_RANDOM)
-               add_interrupt_randomness(irq);
+       add_interrupt_randomness(irq, flags);
 
        if (!noirqdebug)
                note_interrupt(irq, desc, retval);
index b16c3ac..5e5c0a8 100644 (file)
@@ -746,22 +746,19 @@ static inline int cpu_of(struct rq *rq)
 /*
  * Return the group to which this tasks belongs.
  *
- * We use task_subsys_state_check() and extend the RCU verification with
- * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
- * task it moves into the cgroup. Therefore by holding either of those locks,
- * we pin the task to the current cgroup.
+ * We cannot use task_subsys_state() and friends because the cgroup
+ * subsystem changes that value before the cgroup_subsys::attach() method
+ * is called, therefore we cannot pin it and might observe the wrong value.
+ *
+ * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
+ * core changes this before calling sched_move_task().
+ *
+ * Instead we use a 'copy' which is updated from sched_move_task() while
+ * holding both task_struct::pi_lock and rq::lock.
  */
 static inline struct task_group *task_group(struct task_struct *p)
 {
-       struct task_group *tg;
-       struct cgroup_subsys_state *css;
-
-       css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
-                       lockdep_is_held(&p->pi_lock) ||
-                       lockdep_is_held(&task_rq(p)->lock));
-       tg = container_of(css, struct task_group, css);
-
-       return autogroup_task_group(p, tg);
+       return p->sched_task_group;
 }
 
 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
@@ -2372,7 +2369,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
         * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
         *
         * sched_move_task() holds both and thus holding either pins the cgroup,
-        * see set_task_rq().
+        * see task_group().
         *
         * Furthermore, all task_rq users should acquire both locks, see
         * task_rq_lock().
@@ -4358,6 +4355,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
 # define nsecs_to_cputime(__nsecs)     nsecs_to_jiffies(__nsecs)
 #endif
 
+static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total)
+{
+       u64 temp = (__force u64) rtime;
+
+       temp *= (__force u64) utime;
+
+       if (sizeof(cputime_t) == 4)
+               temp = div_u64(temp, (__force u32) total);
+       else
+               temp = div64_u64(temp, (__force u64) total);
+
+       return (__force cputime_t) temp;
+}
+
 void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
 {
        cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
@@ -4367,13 +4378,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
         */
        rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
 
-       if (total) {
-               u64 temp = rtime;
-
-               temp *= utime;
-               do_div(temp, total);
-               utime = (cputime_t)temp;
-       } else
+       if (total)
+               utime = scale_utime(utime, rtime, total);
+       else
                utime = rtime;
 
        /*
@@ -4400,13 +4407,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
        total = cputime_add(cputime.utime, cputime.stime);
        rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
 
-       if (total) {
-               u64 temp = rtime;
-
-               temp *= cputime.utime;
-               do_div(temp, total);
-               utime = (cputime_t)temp;
-       } else
+       if (total)
+               utime = scale_utime(cputime.utime, rtime, total);
+       else
                utime = rtime;
 
        sig->prev_utime = max(sig->prev_utime, utime);
@@ -8953,6 +8956,7 @@ void sched_destroy_group(struct task_group *tg)
  */
 void sched_move_task(struct task_struct *tsk)
 {
+       struct task_group *tg;
        int on_rq, running;
        unsigned long flags;
        struct rq *rq;
@@ -8967,6 +8971,12 @@ void sched_move_task(struct task_struct *tsk)
        if (unlikely(running))
                tsk->sched_class->put_prev_task(rq, tsk);
 
+       tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
+                               lockdep_is_held(&tsk->sighand->siglock)),
+                         struct task_group, css);
+       tg = autogroup_task_group(tsk, tg);
+       tsk->sched_task_group = tg;
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
        if (tsk->sched_class->task_move_group)
                tsk->sched_class->task_move_group(tsk, on_rq);
index 03e67d4..5ee1ac0 100644 (file)
@@ -382,7 +382,7 @@ int do_settimeofday(const struct timespec *tv)
        struct timespec ts_delta;
        unsigned long flags;
 
-       if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+       if (!timespec_valid_strict(tv))
                return -EINVAL;
 
        write_seqlock_irqsave(&xtime_lock, flags);
@@ -417,6 +417,8 @@ EXPORT_SYMBOL(do_settimeofday);
 int timekeeping_inject_offset(struct timespec *ts)
 {
        unsigned long flags;
+       struct timespec tmp;
+       int ret = 0;
 
        if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
                return -EINVAL;
@@ -425,9 +427,16 @@ int timekeeping_inject_offset(struct timespec *ts)
 
        timekeeping_forward_now();
 
+       tmp = timespec_add(xtime,  *ts);
+       if (!timespec_valid_strict(&tmp)) {
+               ret = -EINVAL;
+               goto error;
+       }
+
        xtime = timespec_add(xtime, *ts);
        wall_to_monotonic = timespec_sub(wall_to_monotonic, *ts);
 
+error: /* even if we error out, we forwarded the time, so call update */
        timekeeping_update(true);
 
        write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -435,7 +444,7 @@ int timekeeping_inject_offset(struct timespec *ts)
        /* signal hrtimers about time change */
        clock_was_set();
 
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL(timekeeping_inject_offset);
 
@@ -582,7 +591,20 @@ void __init timekeeping_init(void)
        struct timespec now, boot;
 
        read_persistent_clock(&now);
+       if (!timespec_valid_strict(&now)) {
+               pr_warn("WARNING: Persistent clock returned invalid value!\n"
+                       "         Check your CMOS/BIOS settings.\n");
+               now.tv_sec = 0;
+               now.tv_nsec = 0;
+       }
+
        read_boot_clock(&boot);
+       if (!timespec_valid_strict(&boot)) {
+               pr_warn("WARNING: Boot clock returned invalid value!\n"
+                       "         Check your CMOS/BIOS settings.\n");
+               boot.tv_sec = 0;
+               boot.tv_nsec = 0;
+       }
 
        write_seqlock_irqsave(&xtime_lock, flags);
 
@@ -627,7 +649,7 @@ static void update_sleep_time(struct timespec t)
  */
 static void __timekeeping_inject_sleeptime(struct timespec *delta)
 {
-       if (!timespec_valid(delta)) {
+       if (!timespec_valid_strict(delta)) {
                printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
                                        "sleep delta value!\n");
                return;
@@ -1011,6 +1033,10 @@ static void update_wall_time(void)
 #else
        offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
 #endif
+       /* Check if there's really nothing to do */
+       if (offset < timekeeper.cycle_interval)
+               return;
+
        timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift;
 
        /*
index a650bee..979d4de 100644 (file)
@@ -3437,14 +3437,17 @@ static int __cpuinit trustee_thread(void *__gcwq)
 
        for_each_busy_worker(worker, i, pos, gcwq) {
                struct work_struct *rebind_work = &worker->rebind_work;
+               unsigned long worker_flags = worker->flags;
 
                /*
                 * Rebind_work may race with future cpu hotplug
                 * operations.  Use a separate flag to mark that
-                * rebinding is scheduled.
+                * rebinding is scheduled.  The morphing should
+                * be atomic.
                 */
-               worker->flags |= WORKER_REBIND;
-               worker->flags &= ~WORKER_ROGUE;
+               worker_flags |= WORKER_REBIND;
+               worker_flags &= ~WORKER_ROGUE;
+               ACCESS_ONCE(worker->flags) = worker_flags;
 
                /* queue rebind_work, wq doesn't matter, use the default one */
                if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
index 993599e..d74c317 100644 (file)
@@ -886,7 +886,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
                 * %pK cannot be used in IRQ context because its test
                 * for CAP_SYSLOG would be meaningless.
                 */
-               if (in_irq() || in_serving_softirq() || in_nmi()) {
+               if (kptr_restrict && (in_irq() || in_serving_softirq() ||
+                                     in_nmi())) {
                        if (spec.field_width == -1)
                                spec.field_width = 2 * sizeof(void *);
                        return string(buf, end, "pK-error", spec);
index b1e1bad..0f897b8 100644 (file)
@@ -2382,6 +2382,25 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        }
 }
 
+void __unmap_hugepage_range_final(struct vm_area_struct *vma,
+                         unsigned long start, unsigned long end,
+                         struct page *ref_page)
+{
+       __unmap_hugepage_range(vma, start, end, ref_page);
+
+       /*
+        * Clear this flag so that x86's huge_pmd_share page_table_shareable
+        * test will fail on a vma being torn down, and not grab a page table
+        * on its way out.  We're lucky that the flag has such an appropriate
+        * name, and can in fact be safely cleared here. We could clear it
+        * before the __unmap_hugepage_range above, but all that's necessary
+        * is to clear it before releasing the i_mmap_mutex. This works
+        * because in the context this is called, the VMA is about to be
+        * destroyed and the i_mmap_mutex is held.
+        */
+       vma->vm_flags &= ~VM_MAYSHARE;
+}
+
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                          unsigned long end, struct page *ref_page)
 {
@@ -2939,9 +2958,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
                }
        }
        spin_unlock(&mm->page_table_lock);
-       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
-
+       /*
+        * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
+        * may have cleared our pud entry and done put_page on the page table:
+        * once we release i_mmap_mutex, another task can do the final put_page
+        * and that page table be reused and filled with junk.
+        */
        flush_tlb_range(vma, start, end);
+       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
 }
 
 int hugetlb_reserve_pages(struct inode *inode,
index 2189af4..0c26b5e 100644 (file)
@@ -309,3 +309,5 @@ extern u64 hwpoison_filter_flags_mask;
 extern u64 hwpoison_filter_flags_value;
 extern u64 hwpoison_filter_memcg;
 extern u32 hwpoison_filter_enable;
+
+extern void set_pageblock_order(void);
index 5ee121c..791f811 100644 (file)
@@ -1358,8 +1358,11 @@ unsigned long unmap_vmas(struct mmu_gather *tlb,
                                 * Since no pte has actually been setup, it is
                                 * safe to do nothing in this case.
                                 */
-                               if (vma->vm_file)
-                                       unmap_hugepage_range(vma, start, end, NULL);
+                               if (vma->vm_file) {
+                                       mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
+                                       __unmap_hugepage_range_final(vma, start, end, NULL);
+                                       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+                               }
 
                                start = end;
                        } else
index c0007f9..11b8d47 100644 (file)
@@ -2533,7 +2533,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
                break;
 
        default:
-               BUG();
+               return -EINVAL;
        }
 
        l = strlen(policy_modes[mode]);
index 9a611d3..862b608 100644 (file)
 void __mmu_notifier_release(struct mm_struct *mm)
 {
        struct mmu_notifier *mn;
+       struct hlist_node *n;
+
+       /*
+        * RCU here will block mmu_notifier_unregister until
+        * ->release returns.
+        */
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
+               /*
+                * if ->release runs before mmu_notifier_unregister it
+                * must be handled as it's the only way for the driver
+                * to flush all existing sptes and stop the driver
+                * from establishing any more sptes before all the
+                * pages in the mm are freed.
+                */
+               if (mn->ops->release)
+                       mn->ops->release(mn, mm);
+       rcu_read_unlock();
 
        spin_lock(&mm->mmu_notifier_mm->lock);
        while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
@@ -46,23 +64,6 @@ void __mmu_notifier_release(struct mm_struct *mm)
                 * mmu_notifier_unregister to return.
                 */
                hlist_del_init_rcu(&mn->hlist);
-               /*
-                * RCU here will block mmu_notifier_unregister until
-                * ->release returns.
-                */
-               rcu_read_lock();
-               spin_unlock(&mm->mmu_notifier_mm->lock);
-               /*
-                * if ->release runs before mmu_notifier_unregister it
-                * must be handled as it's the only way for the driver
-                * to flush all existing sptes and stop the driver
-                * from establishing any more sptes before all the
-                * pages in the mm are freed.
-                */
-               if (mn->ops->release)
-                       mn->ops->release(mn, mm);
-               rcu_read_unlock();
-               spin_lock(&mm->mmu_notifier_mm->lock);
        }
        spin_unlock(&mm->mmu_notifier_mm->lock);
 
@@ -284,16 +285,13 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
 {
        BUG_ON(atomic_read(&mm->mm_count) <= 0);
 
-       spin_lock(&mm->mmu_notifier_mm->lock);
        if (!hlist_unhashed(&mn->hlist)) {
-               hlist_del_rcu(&mn->hlist);
-
                /*
                 * RCU here will force exit_mmap to wait ->release to finish
                 * before freeing the pages.
                 */
                rcu_read_lock();
-               spin_unlock(&mm->mmu_notifier_mm->lock);
+
                /*
                 * exit_mmap will block in mmu_notifier_release to
                 * guarantee ->release is called before freeing the
@@ -302,8 +300,11 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
                if (mn->ops->release)
                        mn->ops->release(mn, mm);
                rcu_read_unlock();
-       } else
+
+               spin_lock(&mm->mmu_notifier_mm->lock);
+               hlist_del_rcu(&mn->hlist);
                spin_unlock(&mm->mmu_notifier_mm->lock);
+       }
 
        /*
         * Wait any running method to finish, of course including
index 065dbe8..6e51bf0 100644 (file)
@@ -4281,25 +4281,24 @@ static inline void setup_usemap(struct pglist_data *pgdat,
 
 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
 
-/* Return a sensible default order for the pageblock size. */
-static inline int pageblock_default_order(void)
-{
-       if (HPAGE_SHIFT > PAGE_SHIFT)
-               return HUGETLB_PAGE_ORDER;
-
-       return MAX_ORDER-1;
-}
-
 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
-static inline void __init set_pageblock_order(unsigned int order)
+void __init set_pageblock_order(void)
 {
+       unsigned int order;
+
        /* Check that pageblock_nr_pages has not already been setup */
        if (pageblock_order)
                return;
 
+       if (HPAGE_SHIFT > PAGE_SHIFT)
+               order = HUGETLB_PAGE_ORDER;
+       else
+               order = MAX_ORDER - 1;
+
        /*
         * Assume the largest contiguous order of interest is a huge page.
-        * This value may be variable depending on boot parameters on IA64
+        * This value may be variable depending on boot parameters on IA64 and
+        * powerpc.
         */
        pageblock_order = order;
 }
@@ -4307,15 +4306,13 @@ static inline void __init set_pageblock_order(unsigned int order)
 
 /*
  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
- * and pageblock_default_order() are unused as pageblock_order is set
- * at compile-time. See include/linux/pageblock-flags.h for the values of
- * pageblock_order based on the kernel config
+ * is unused as pageblock_order is set at compile-time. See
+ * include/linux/pageblock-flags.h for the values of pageblock_order based on
+ * the kernel config
  */
-static inline int pageblock_default_order(unsigned int order)
+void __init set_pageblock_order(void)
 {
-       return MAX_ORDER-1;
 }
-#define set_pageblock_order(x) do {} while (0)
 
 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
 
@@ -4403,7 +4400,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                if (!size)
                        continue;
 
-               set_pageblock_order(pageblock_default_order());
+               set_pageblock_order();
                setup_usemap(pgdat, zone, size);
                ret = init_currently_empty_zone(zone, zone_start_pfn,
                                                size, MEMMAP_EARLY);
index a8bc7d3..bf7d3cc 100644 (file)
@@ -486,6 +486,9 @@ void __init sparse_init(void)
        struct page **map_map;
 #endif
 
+       /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
+       set_pageblock_order();
+
        /*
         * map is using big page (aka 2M in x86 64 bit)
         * usemap is less one page (aka 24 bytes)
index 48febd7..86eb848 100644 (file)
@@ -1977,10 +1977,10 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
         * proportional to the fraction of recently scanned pages on
         * each list that were recently referenced and in active use.
         */
-       ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
+       ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
        ap /= reclaim_stat->recent_rotated[0] + 1;
 
-       fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
+       fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
        fp /= reclaim_stat->recent_rotated[1] + 1;
        spin_unlock_irq(&zone->lru_lock);
 
@@ -1993,7 +1993,7 @@ out:
                unsigned long scan;
 
                scan = zone_nr_lru_pages(zone, sc, l);
-               if (priority || noswap) {
+               if (priority || noswap || !vmscan_swappiness(sc)) {
                        scan >>= priority;
                        if (!scan && force_scan)
                                scan = SWAP_CLUSTER_MAX;
index 14ff9fe..0ca06e8 100644 (file)
@@ -784,6 +784,7 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,
 
                if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
                        return -ENOTCONN;
+               memset(&pvc, 0, sizeof(pvc));
                pvc.sap_family = AF_ATMPVC;
                pvc.sap_addr.itf = vcc->dev->number;
                pvc.sap_addr.vpi = vcc->vpi;
index 3a73491..ae03240 100644 (file)
@@ -95,6 +95,7 @@ static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr,
                return -ENOTCONN;
        *sockaddr_len = sizeof(struct sockaddr_atmpvc);
        addr = (struct sockaddr_atmpvc *)sockaddr;
+       memset(addr, 0, sizeof(*addr));
        addr->sap_family = AF_ATMPVC;
        addr->sap_addr.itf = vcc->dev->number;
        addr->sap_addr.vpi = vcc->vpi;
index 643a41b..6033f02 100644 (file)
@@ -1411,7 +1411,13 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
                if (conn->type == ACL_LINK) {
                        conn->state = BT_CONFIG;
                        hci_conn_hold(conn);
-                       conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+
+                       if (!conn->out &&
+                           !(conn->ssp_mode && conn->hdev->ssp_mode) &&
+                           !hci_find_link_key(hdev, &ev->bdaddr))
+                               conn->disc_timeout = HCI_PAIRING_TIMEOUT;
+                       else
+                               conn->disc_timeout = HCI_DISCONN_TIMEOUT;
                        mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
                } else
                        conn->state = BT_CONNECTED;
index f6afe3d..8361ee4 100644 (file)
@@ -388,6 +388,7 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add
        *addr_len = sizeof(*haddr);
        haddr->hci_family = AF_BLUETOOTH;
        haddr->hci_dev    = hdev->id;
+       haddr->hci_channel= 0;
 
        release_sock(sk);
        return 0;
@@ -671,6 +672,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char
                {
                        struct hci_filter *f = &hci_pi(sk)->filter;
 
+                       memset(&uf, 0, sizeof(uf));
                        uf.type_mask = f->type_mask;
                        uf.opcode    = f->opcode;
                        uf.event_mask[0] = *((u32 *) f->event_mask + 0);
index 17b5b1c..dd76177 100644 (file)
@@ -862,6 +862,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
        write_lock_bh(&conn->chan_lock);
 
        hci_conn_hold(conn->hcon);
+       conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
 
        bacpy(&bt_sk(sk)->src, conn->src);
        bacpy(&bt_sk(sk)->dst, conn->dst);
@@ -2263,12 +2264,14 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
        while (len >= L2CAP_CONF_OPT_SIZE) {
                len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
 
-               switch (type) {
-               case L2CAP_CONF_RFC:
-                       if (olen == sizeof(rfc))
-                               memcpy(&rfc, (void *)val, olen);
-                       goto done;
-               }
+               if (type != L2CAP_CONF_RFC)
+                       continue;
+
+               if (olen != sizeof(rfc))
+                       break;
+
+               memcpy(&rfc, (void *)val, olen);
+               goto done;
        }
 
        /* Use sane default values in case a misbehaving remote device
index 5c406d3..6dedd6f 100644 (file)
@@ -293,6 +293,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
 
        BT_DBG("sock %p, sk %p", sock, sk);
 
+       memset(la, 0, sizeof(struct sockaddr_l2));
        addr->sa_family = AF_BLUETOOTH;
        *len = sizeof(struct sockaddr_l2);
 
index 5417f61..7ee4ead 100644 (file)
@@ -547,6 +547,7 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *
 
        BT_DBG("sock %p, sk %p", sock, sk);
 
+       memset(sa, 0, sizeof(*sa));
        sa->rc_family  = AF_BLUETOOTH;
        sa->rc_channel = rfcomm_pi(sk)->channel;
        if (peer)
@@ -835,6 +836,7 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
                }
 
                sec.level = rfcomm_pi(sk)->sec_level;
+               sec.key_size = 0;
 
                len = min_t(unsigned int, len, sizeof(sec));
                if (copy_to_user(optval, (char *) &sec, len))
index c258796..bc1eb56 100644 (file)
@@ -471,7 +471,7 @@ static int rfcomm_get_dev_list(void __user *arg)
 
        size = sizeof(*dl) + dev_num * sizeof(*di);
 
-       dl = kmalloc(size, GFP_KERNEL);
+       dl = kzalloc(size, GFP_KERNEL);
        if (!dl)
                return -ENOMEM;
 
index 68223e4..4e9115d 100644 (file)
@@ -428,9 +428,9 @@ static int __init caif_device_init(void)
 
 static void __exit caif_device_exit(void)
 {
-       unregister_pernet_subsys(&caif_net_ops);
        unregister_netdevice_notifier(&caif_device_notifier);
        dev_remove_pack(&caif_packet_type);
+       unregister_pernet_subsys(&caif_net_ops);
 }
 
 module_init(caif_device_init);
index 5738654..832ba6d 100644 (file)
@@ -1059,6 +1059,8 @@ rollback:
  */
 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
 {
+       char *new_ifalias;
+
        ASSERT_RTNL();
 
        if (len >= IFALIASZ)
@@ -1072,9 +1074,10 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
                return 0;
        }
 
-       dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
-       if (!dev->ifalias)
+       new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
+       if (!new_ifalias)
                return -ENOMEM;
+       dev->ifalias = new_ifalias;
 
        strlcpy(dev->ifalias, alias, len+1);
        return len;
@@ -1177,6 +1180,7 @@ static int __dev_open(struct net_device *dev)
                net_dmaengine_get();
                dev_set_rx_mode(dev);
                dev_activate(dev);
+               add_device_randomness(dev->dev_addr, dev->addr_len);
        }
 
        return ret;
@@ -1627,6 +1631,19 @@ static inline int deliver_skb(struct sk_buff *skb,
        return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
 }
 
+static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
+{
+       if (ptype->af_packet_priv == NULL)
+               return false;
+
+       if (ptype->id_match)
+               return ptype->id_match(ptype, skb->sk);
+       else if ((struct sock *)ptype->af_packet_priv == skb->sk)
+               return true;
+
+       return false;
+}
+
 /*
  *     Support routine. Sends outgoing frames to any network
  *     taps currently in use.
@@ -1644,8 +1661,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
                 * they originated from - MvS (miquels@drinkel.ow.org)
                 */
                if ((ptype->dev == dev || !ptype->dev) &&
-                   (ptype->af_packet_priv == NULL ||
-                    (struct sock *)ptype->af_packet_priv != skb->sk)) {
+                   (!skb_loop_sk(ptype, skb))) {
                        if (pt_prev) {
                                deliver_skb(skb2, pt_prev, skb->dev);
                                pt_prev = ptype;
@@ -2107,6 +2123,9 @@ u32 netif_skb_features(struct sk_buff *skb)
        __be16 protocol = skb->protocol;
        u32 features = skb->dev->features;
 
+       if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
+               features &= ~NETIF_F_GSO_MASK;
+
        if (protocol == htons(ETH_P_8021Q)) {
                struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
                protocol = veh->h_vlan_encapsulated_proto;
@@ -4841,6 +4860,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
        err = ops->ndo_set_mac_address(dev, sa);
        if (!err)
                call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+       add_device_randomness(dev->dev_addr, dev->addr_len);
        return err;
 }
 EXPORT_SYMBOL(dev_set_mac_address);
@@ -5621,6 +5641,7 @@ int register_netdevice(struct net_device *dev)
        dev_init_scheduler(dev);
        dev_hold(dev);
        list_netdevice(dev);
+       add_device_randomness(dev->dev_addr, dev->addr_len);
 
        /* Notify protocols, that a new device appeared. */
        ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
@@ -5987,6 +6008,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
        dev_net_set(dev, &init_net);
 
        dev->gso_max_size = GSO_MAX_SIZE;
+       dev->gso_max_segs = GSO_MAX_SEGS;
 
        INIT_LIST_HEAD(&dev->napi_list);
        INIT_LIST_HEAD(&dev->unreg_list);
index 7f36b38..b856f87 100644 (file)
 #define TRACE_ON 1
 #define TRACE_OFF 0
 
-static void send_dm_alert(struct work_struct *unused);
-
-
 /*
  * Globals, our netlink socket pointer
  * and the work handle that will send up
  * netlink alerts
  */
 static int trace_state = TRACE_OFF;
-static DEFINE_SPINLOCK(trace_state_lock);
+static DEFINE_MUTEX(trace_state_mutex);
 
 struct per_cpu_dm_data {
-       struct work_struct dm_alert_work;
-       struct sk_buff *skb;
-       atomic_t dm_hit_count;
-       struct timer_list send_timer;
+       spinlock_t              lock;
+       struct sk_buff          *skb;
+       struct work_struct      dm_alert_work;
+       struct timer_list       send_timer;
 };
 
 struct dm_hw_stat_delta {
@@ -74,56 +71,59 @@ static int dm_delay = 1;
 static unsigned long dm_hw_check_delta = 2*HZ;
 static LIST_HEAD(hw_stats_list);
 
-static void reset_per_cpu_data(struct per_cpu_dm_data *data)
+static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
 {
        size_t al;
        struct net_dm_alert_msg *msg;
        struct nlattr *nla;
+       struct sk_buff *skb;
+       unsigned long flags;
 
        al = sizeof(struct net_dm_alert_msg);
        al += dm_hit_limit * sizeof(struct net_dm_drop_point);
        al += sizeof(struct nlattr);
 
-       data->skb = genlmsg_new(al, GFP_KERNEL);
-       genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family,
-                       0, NET_DM_CMD_ALERT);
-       nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg));
-       msg = nla_data(nla);
-       memset(msg, 0, al);
-       atomic_set(&data->dm_hit_count, dm_hit_limit);
+       skb = genlmsg_new(al, GFP_KERNEL);
+
+       if (skb) {
+               genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
+                               0, NET_DM_CMD_ALERT);
+               nla = nla_reserve(skb, NLA_UNSPEC,
+                                 sizeof(struct net_dm_alert_msg));
+               msg = nla_data(nla);
+               memset(msg, 0, al);
+       } else {
+               mod_timer(&data->send_timer, jiffies + HZ / 10);
+       }
+
+       spin_lock_irqsave(&data->lock, flags);
+       swap(data->skb, skb);
+       spin_unlock_irqrestore(&data->lock, flags);
+
+       return skb;
 }
 
-static void send_dm_alert(struct work_struct *unused)
+static void send_dm_alert(struct work_struct *work)
 {
        struct sk_buff *skb;
-       struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
+       struct per_cpu_dm_data *data;
 
-       /*
-        * Grab the skb we're about to send
-        */
-       skb = data->skb;
+       data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
 
-       /*
-        * Replace it with a new one
-        */
-       reset_per_cpu_data(data);
-
-       /*
-        * Ship it!
-        */
-       genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
+       skb = reset_per_cpu_data(data);
 
+       if (skb)
+               genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
 }
 
 /*
  * This is the timer function to delay the sending of an alert
  * in the event that more drops will arrive during the
- * hysteresis period.  Note that it operates under the timer interrupt
- * so we don't need to disable preemption here
+ * hysteresis period.
  */
-static void sched_send_work(unsigned long unused)
+static void sched_send_work(unsigned long _data)
 {
-       struct per_cpu_dm_data *data =  &__get_cpu_var(dm_cpu_data);
+       struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data;
 
        schedule_work(&data->dm_alert_work);
 }
@@ -134,17 +134,19 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
        struct nlmsghdr *nlh;
        struct nlattr *nla;
        int i;
-       struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
+       struct sk_buff *dskb;
+       struct per_cpu_dm_data *data;
+       unsigned long flags;
 
+       local_irq_save(flags);
+       data = &__get_cpu_var(dm_cpu_data);
+       spin_lock(&data->lock);
+       dskb = data->skb;
 
-       if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
-               /*
-                * we're already at zero, discard this hit
-                */
+       if (!dskb)
                goto out;
-       }
 
-       nlh = (struct nlmsghdr *)data->skb->data;
+       nlh = (struct nlmsghdr *)dskb->data;
        nla = genlmsg_data(nlmsg_data(nlh));
        msg = nla_data(nla);
        for (i = 0; i < msg->entries; i++) {
@@ -153,11 +155,12 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
                        goto out;
                }
        }
-
+       if (msg->entries == dm_hit_limit)
+               goto out;
        /*
         * We need to create a new entry
         */
-       __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point));
+       __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
        nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
        memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
        msg->points[msg->entries].count = 1;
@@ -165,11 +168,11 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
 
        if (!timer_pending(&data->send_timer)) {
                data->send_timer.expires = jiffies + dm_delay * HZ;
-               add_timer_on(&data->send_timer, smp_processor_id());
+               add_timer(&data->send_timer);
        }
 
 out:
-       return;
+       spin_unlock_irqrestore(&data->lock, flags);
 }
 
 static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
@@ -213,7 +216,7 @@ static int set_all_monitor_traces(int state)
        struct dm_hw_stat_delta *new_stat = NULL;
        struct dm_hw_stat_delta *temp;
 
-       spin_lock(&trace_state_lock);
+       mutex_lock(&trace_state_mutex);
 
        if (state == trace_state) {
                rc = -EAGAIN;
@@ -252,7 +255,7 @@ static int set_all_monitor_traces(int state)
                rc = -EINPROGRESS;
 
 out_unlock:
-       spin_unlock(&trace_state_lock);
+       mutex_unlock(&trace_state_mutex);
 
        return rc;
 }
@@ -295,12 +298,12 @@ static int dropmon_net_event(struct notifier_block *ev_block,
 
                new_stat->dev = dev;
                new_stat->last_rx = jiffies;
-               spin_lock(&trace_state_lock);
+               mutex_lock(&trace_state_mutex);
                list_add_rcu(&new_stat->list, &hw_stats_list);
-               spin_unlock(&trace_state_lock);
+               mutex_unlock(&trace_state_mutex);
                break;
        case NETDEV_UNREGISTER:
-               spin_lock(&trace_state_lock);
+               mutex_lock(&trace_state_mutex);
                list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
                        if (new_stat->dev == dev) {
                                new_stat->dev = NULL;
@@ -311,7 +314,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
                                }
                        }
                }
-               spin_unlock(&trace_state_lock);
+               mutex_unlock(&trace_state_mutex);
                break;
        }
 out:
@@ -367,13 +370,15 @@ static int __init init_net_drop_monitor(void)
 
        for_each_present_cpu(cpu) {
                data = &per_cpu(dm_cpu_data, cpu);
-               reset_per_cpu_data(data);
                INIT_WORK(&data->dm_alert_work, send_dm_alert);
                init_timer(&data->send_timer);
-               data->send_timer.data = cpu;
+               data->send_timer.data = (unsigned long)data;
                data->send_timer.function = sched_send_work;
+               spin_lock_init(&data->lock);
+               reset_per_cpu_data(data);
        }
 
+
        goto out;
 
 out_unreg:
index 2ef859a..0cf604b 100644 (file)
@@ -670,6 +670,12 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
        }
 }
 
+static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
+{
+       return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
+              (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
+}
+
 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
                                           const struct ifinfomsg *ifm)
 {
@@ -678,7 +684,7 @@ static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
        /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
        if (ifm->ifi_change)
                flags = (flags & ifm->ifi_change) |
-                       (dev->flags & ~ifm->ifi_change);
+                       (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
 
        return flags;
 }
@@ -1354,6 +1360,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
                        goto errout;
                send_addr_notify = 1;
                modified = 1;
+               add_device_randomness(dev->dev_addr, dev->addr_len);
        }
 
        if (tb[IFLA_MTU]) {
index 8d095b9..018fd41 100644 (file)
@@ -1308,6 +1308,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
                } else {
                        sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
                        sk->sk_gso_max_size = dst->dev->gso_max_size;
+                       sk->sk_gso_max_segs = dst->dev->gso_max_segs;
                }
        }
 }
index 75c3582..fb85d37 100644 (file)
@@ -246,7 +246,7 @@ static inline int ccid_hc_rx_getsockopt(struct ccid *ccid, struct sock *sk,
                                        u32 __user *optval, int __user *optlen)
 {
        int rc = -ENOPROTOOPT;
-       if (ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL)
+       if (ccid != NULL && ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL)
                rc = ccid->ccid_ops->ccid_hc_rx_getsockopt(sk, optname, len,
                                                 optval, optlen);
        return rc;
@@ -257,7 +257,7 @@ static inline int ccid_hc_tx_getsockopt(struct ccid *ccid, struct sock *sk,
                                        u32 __user *optval, int __user *optlen)
 {
        int rc = -ENOPROTOOPT;
-       if (ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL)
+       if (ccid != NULL && ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL)
                rc = ccid->ccid_ops->ccid_hc_tx_getsockopt(sk, optname, len,
                                                 optval, optlen);
        return rc;
index 3d604e1..4caf63f 100644 (file)
@@ -532,6 +532,7 @@ static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
        case DCCP_SOCKOPT_CCID_TX_INFO:
                if (len < sizeof(tfrc))
                        return -EINVAL;
+               memset(&tfrc, 0, sizeof(tfrc));
                tfrc.tfrctx_x      = hc->tx_x;
                tfrc.tfrctx_x_recv = hc->tx_x_recv;
                tfrc.tfrctx_x_calc = hc->tx_x_calc;
index 86f3b88..afaa735 100644 (file)
@@ -1725,8 +1725,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
                case CIPSO_V4_TAG_LOCAL:
                        /* This is a non-standard tag that we only allow for
                         * local connections, so if the incoming interface is
-                        * not the loopback device drop the packet. */
-                       if (!(skb->dev->flags & IFF_LOOPBACK)) {
+                        * not the loopback device drop the packet. Further,
+                        * there is no legitimate reason for setting this from
+                        * userspace so reject it if skb is NULL. */
+                       if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {
                                err_offset = opt_iter;
                                goto validate_return_locked;
                        }
index d2aae27..0064394 100644 (file)
@@ -125,6 +125,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
 static struct kmem_cache *mrt_cachep __read_mostly;
 
 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
+static void ipmr_free_table(struct mr_table *mrt);
+
 static int ip_mr_forward(struct net *net, struct mr_table *mrt,
                         struct sk_buff *skb, struct mfc_cache *cache,
                         int local);
@@ -132,6 +134,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
                             struct sk_buff *pkt, vifi_t vifi, int assert);
 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
                              struct mfc_cache *c, struct rtmsg *rtm);
+static void mroute_clean_tables(struct mr_table *mrt);
 static void ipmr_expire_process(unsigned long arg);
 
 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -272,7 +275,7 @@ static void __net_exit ipmr_rules_exit(struct net *net)
 
        list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
                list_del(&mrt->list);
-               kfree(mrt);
+               ipmr_free_table(mrt);
        }
        fib_rules_unregister(net->ipv4.mr_rules_ops);
 }
@@ -300,7 +303,7 @@ static int __net_init ipmr_rules_init(struct net *net)
 
 static void __net_exit ipmr_rules_exit(struct net *net)
 {
-       kfree(net->ipv4.mrt);
+       ipmr_free_table(net->ipv4.mrt);
 }
 #endif
 
@@ -337,6 +340,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
        return mrt;
 }
 
+static void ipmr_free_table(struct mr_table *mrt)
+{
+       del_timer_sync(&mrt->ipmr_expire_timer);
+       mroute_clean_tables(mrt);
+       kfree(mrt);
+}
+
 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
 
 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
index 11ba922..043d49b 100644 (file)
@@ -740,7 +740,9 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
                           old_size_goal + mss_now > xmit_size_goal)) {
                        xmit_size_goal = old_size_goal;
                } else {
-                       tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
+                       tp->xmit_size_goal_segs =
+                               min_t(u16, xmit_size_goal / mss_now,
+                                     sk->sk_gso_max_segs);
                        xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
                }
        }
@@ -2391,7 +2393,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                /* Cap the max timeout in ms TCP will retry/retrans
                 * before giving up and aborting (ETIMEDOUT) a connection.
                 */
-               icsk->icsk_user_timeout = msecs_to_jiffies(val);
+               if (val < 0)
+                       err = -EINVAL;
+               else
+                       icsk->icsk_user_timeout = msecs_to_jiffies(val);
                break;
        default:
                err = -ENOPROTOOPT;
index 850c737..6cebfd2 100644 (file)
@@ -290,7 +290,8 @@ int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
        left = tp->snd_cwnd - in_flight;
        if (sk_can_gso(sk) &&
            left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
-           left * tp->mss_cache < sk->sk_gso_max_size)
+           left * tp->mss_cache < sk->sk_gso_max_size &&
+           left < sk->sk_gso_max_segs)
                return 1;
        return left <= tcp_max_burst(tp);
 }
index 32e6ca2..a08a621 100644 (file)
@@ -5415,7 +5415,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                        if (tp->copied_seq == tp->rcv_nxt &&
                            len - tcp_header_len <= tp->ucopy.len) {
 #ifdef CONFIG_NET_DMA
-                               if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
+                               if (tp->ucopy.task == current &&
+                                   sock_owned_by_user(sk) &&
+                                   tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
                                        copied_early = 1;
                                        eaten = 1;
                                }
index c51dd5b..921cbac 100644 (file)
@@ -1318,21 +1318,21 @@ static void tcp_cwnd_validate(struct sock *sk)
  * when we would be allowed to send the split-due-to-Nagle skb fully.
  */
 static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
-                                       unsigned int mss_now, unsigned int cwnd)
+                                       unsigned int mss_now, unsigned int max_segs)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
-       u32 needed, window, cwnd_len;
+       u32 needed, window, max_len;
 
        window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
-       cwnd_len = mss_now * cwnd;
+       max_len = mss_now * max_segs;
 
-       if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk)))
-               return cwnd_len;
+       if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
+               return max_len;
 
        needed = min(skb->len, window);
 
-       if (cwnd_len <= needed)
-               return cwnd_len;
+       if (max_len <= needed)
+               return max_len;
 
        return needed - needed % mss_now;
 }
@@ -1560,7 +1560,8 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
        limit = min(send_win, cong_win);
 
        /* If a full-sized TSO skb can be sent, do it. */
-       if (limit >= sk->sk_gso_max_size)
+       if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
+                          sk->sk_gso_max_segs * tp->mss_cache))
                goto send_now;
 
        /* Middle in queue won't get any more data, full sendable already? */
@@ -1786,7 +1787,9 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                limit = mss_now;
                if (tso_segs > 1 && !tcp_urg_mode(tp))
                        limit = tcp_mss_split_point(sk, skb, mss_now,
-                                                   cwnd_quota);
+                                                   min_t(unsigned int,
+                                                         cwnd_quota,
+                                                         sk->sk_gso_max_segs));
 
                if (skb->len > limit &&
                    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
index a5521c5..aef80d7 100644 (file)
@@ -493,8 +493,7 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
        struct net_device *dev;
        struct inet6_dev *idev;
 
-       rcu_read_lock();
-       for_each_netdev_rcu(net, dev) {
+       for_each_netdev(net, dev) {
                idev = __in6_dev_get(dev);
                if (idev) {
                        int changed = (!idev->cnf.forwarding) ^ (!newf);
@@ -503,7 +502,6 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
                                dev_forward_change(idev);
                }
        }
-       rcu_read_unlock();
 }
 
 static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
index 89ff8c6..7501b22 100644 (file)
@@ -1253,11 +1253,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
        /* Remove from tunnel list */
        spin_lock_bh(&pn->l2tp_tunnel_list_lock);
        list_del_rcu(&tunnel->list);
+       kfree_rcu(tunnel, rcu);
        spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
-       synchronize_rcu();
 
        atomic_dec(&l2tp_tunnel_count);
-       kfree(tunnel);
 }
 
 /* Create a socket for the tunnel, if one isn't set up by
index a16a48e..4393794 100644 (file)
@@ -157,6 +157,7 @@ struct l2tp_tunnel_cfg {
 
 struct l2tp_tunnel {
        int                     magic;          /* Should be L2TP_TUNNEL_MAGIC */
+       struct rcu_head rcu;
        rwlock_t                hlist_lock;     /* protect session_hlist */
        struct hlist_head       session_hlist[L2TP_HASH_SIZE];
                                                /* hashed list of sessions,
index a18e6c3..99a60d5 100644 (file)
@@ -966,14 +966,13 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
        struct sockaddr_llc sllc;
        struct sock *sk = sock->sk;
        struct llc_sock *llc = llc_sk(sk);
-       int rc = 0;
+       int rc = -EBADF;
 
        memset(&sllc, 0, sizeof(sllc));
        lock_sock(sk);
        if (sock_flag(sk, SOCK_ZAPPED))
                goto out;
        *uaddrlen = sizeof(sllc);
-       memset(uaddr, 0, *uaddrlen);
        if (peer) {
                rc = -ENOTCONN;
                if (sk->sk_state != TCP_ESTABLISHED)
index a7078fd..f85de8e 100644 (file)
@@ -543,6 +543,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
 
        del_timer_sync(&sdata->u.mesh.housekeeping_timer);
        del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
+       del_timer_sync(&sdata->u.mesh.mesh_path_timer);
        /*
         * If the timer fired while we waited for it, it will have
         * requeued the work. Now the work will be running again
index e1a66cf..72f4253 100644 (file)
@@ -2713,6 +2713,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
        {
                struct ip_vs_timeout_user t;
 
+               memset(&t, 0, sizeof(t));
                __ip_vs_get_timeouts(net, &t);
                if (copy_to_user(user, &t, sizeof(t)) != 0)
                        ret = -EFAULT;
index a99fb41..38b78b9 100644 (file)
@@ -1333,7 +1333,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
        if (NULL == siocb->scm)
                siocb->scm = &scm;
 
-       err = scm_send(sock, msg, siocb->scm);
+       err = scm_send(sock, msg, siocb->scm, true);
        if (err < 0)
                return err;
 
@@ -1344,7 +1344,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
                dst_pid = addr->nl_pid;
                dst_group = ffs(addr->nl_groups);
                err =  -EPERM;
-               if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
+               if ((dst_group || dst_pid) &&
+                   !netlink_capable(sock, NL_NONROOT_SEND))
                        goto out;
        } else {
                dst_pid = nlk->dst_pid;
@@ -2103,6 +2104,7 @@ static void __init netlink_add_usersock_entry(void)
        rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
        nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
        nl_table[NETLINK_USERSOCK].registered = 1;
+       nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
 
        netlink_table_ungrab();
 }
index d9d4970..85afc13 100644 (file)
@@ -1281,6 +1281,14 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
        spin_unlock(&f->lock);
 }
 
+bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
+{
+       if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
+               return true;
+
+       return false;
+}
+
 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
 {
        struct packet_sock *po = pkt_sk(sk);
@@ -1333,6 +1341,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
                match->prot_hook.dev = po->prot_hook.dev;
                match->prot_hook.func = packet_rcv_fanout;
                match->prot_hook.af_packet_priv = match;
+               match->prot_hook.id_match = match_fanout_group;
                dev_add_pack(&match->prot_hook);
                list_add(&match->list, &fanout_list);
        }
@@ -1931,7 +1940,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
 
        if (likely(po->tx_ring.pg_vec)) {
                ph = skb_shinfo(skb)->destructor_arg;
-               BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
                BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
                atomic_dec(&po->tx_ring.pending);
                __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
index b77f5a0..bdacd8d 100644 (file)
@@ -67,6 +67,9 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
        struct tcf_common *pc;
        int ret = 0;
        int err;
+#ifdef CONFIG_GACT_PROB
+       struct tc_gact_p *p_parm = NULL;
+#endif
 
        if (nla == NULL)
                return -EINVAL;
@@ -82,6 +85,12 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
 #ifndef CONFIG_GACT_PROB
        if (tb[TCA_GACT_PROB] != NULL)
                return -EOPNOTSUPP;
+#else
+       if (tb[TCA_GACT_PROB]) {
+               p_parm = nla_data(tb[TCA_GACT_PROB]);
+               if (p_parm->ptype >= MAX_RAND)
+                       return -EINVAL;
+       }
 #endif
 
        pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info);
@@ -103,8 +112,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
        spin_lock_bh(&gact->tcf_lock);
        gact->tcf_action = parm->action;
 #ifdef CONFIG_GACT_PROB
-       if (tb[TCA_GACT_PROB] != NULL) {
-               struct tc_gact_p *p_parm = nla_data(tb[TCA_GACT_PROB]);
+       if (p_parm) {
                gact->tcfg_paction = p_parm->paction;
                gact->tcfg_pval    = p_parm->pval;
                gact->tcfg_ptype   = p_parm->ptype;
@@ -133,7 +141,7 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
 
        spin_lock(&gact->tcf_lock);
 #ifdef CONFIG_GACT_PROB
-       if (gact->tcfg_ptype && gact_rand[gact->tcfg_ptype] != NULL)
+       if (gact->tcfg_ptype)
                action = gact_rand[gact->tcfg_ptype](gact);
        else
                action = gact->tcf_action;
index 17859ea..351a69b 100644 (file)
@@ -559,6 +559,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
 
        sch->qstats.backlog = q->qdisc->qstats.backlog;
        opts = nla_nest_start(skb, TCA_OPTIONS);
+       if (opts == NULL)
+               goto nla_put_failure;
        NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt);
        return nla_nest_end(skb, opts);
 
index b7692aa..0fc18c7 100644 (file)
@@ -736,15 +736,12 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
 
        epb = &ep->base;
 
-       if (hlist_unhashed(&epb->node))
-               return;
-
        epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
 
        head = &sctp_ep_hashtable[epb->hashent];
 
        sctp_write_lock(&head->lock);
-       __hlist_del(&epb->node);
+       hlist_del_init(&epb->node);
        sctp_write_unlock(&head->lock);
 }
 
@@ -825,7 +822,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
        head = &sctp_assoc_hashtable[epb->hashent];
 
        sctp_write_lock(&head->lock);
-       __hlist_del(&epb->node);
+       hlist_del_init(&epb->node);
        sctp_write_unlock(&head->lock);
 }
 
index 0075554..8e49d76 100644 (file)
@@ -1231,8 +1231,14 @@ out_free:
        SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
                          " kaddrs: %p err: %d\n",
                          asoc, kaddrs, err);
-       if (asoc)
+       if (asoc) {
+               /* sctp_primitive_ASSOCIATE may have added this association
+                * To the hash table, try to unhash it, just in case, its a noop
+                * if it wasn't hashed so we're safe
+                */
+               sctp_unhash_established(asoc);
                sctp_association_free(asoc);
+       }
        return err;
 }
 
@@ -1942,8 +1948,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
        goto out_unlock;
 
 out_free:
-       if (new_asoc)
+       if (new_asoc) {
+               sctp_unhash_established(asoc);
                sctp_association_free(asoc);
+       }
 out_unlock:
        sctp_release_sock(sk);
 
index 273cbce..68879db 100644 (file)
@@ -2645,6 +2645,7 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
        if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf)))
                return -EFAULT;
 
+       memset(&ifc, 0, sizeof(ifc));
        if (ifc32.ifcbuf == 0) {
                ifc32.ifc_len = 0;
                ifc.ifc_len = 0;
index 8761bf8..337c68b 100644 (file)
@@ -246,7 +246,7 @@ static int rpcb_create_local_unix(void)
        if (IS_ERR(clnt)) {
                dprintk("RPC:       failed to create AF_LOCAL rpcbind "
                                "client (errno %ld).\n", PTR_ERR(clnt));
-               result = -PTR_ERR(clnt);
+               result = PTR_ERR(clnt);
                goto out;
        }
 
@@ -293,7 +293,7 @@ static int rpcb_create_local_net(void)
        if (IS_ERR(clnt)) {
                dprintk("RPC:       failed to create local rpcbind "
                                "client (errno %ld).\n", PTR_ERR(clnt));
-               result = -PTR_ERR(clnt);
+               result = PTR_ERR(clnt);
                goto out;
        }
 
index 4e2b3b4..c90b832 100644 (file)
@@ -755,7 +755,9 @@ void rpc_execute(struct rpc_task *task)
 
 static void rpc_async_schedule(struct work_struct *work)
 {
+       current->flags |= PF_FSTRANS;
        __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
+       current->flags &= ~PF_FSTRANS;
 }
 
 /**
index 9ed2cd0..3282453 100644 (file)
@@ -315,7 +315,6 @@ static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
  */
 void svc_xprt_enqueue(struct svc_xprt *xprt)
 {
-       struct svc_serv *serv = xprt->xpt_server;
        struct svc_pool *pool;
        struct svc_rqst *rqstp;
        int cpu;
@@ -361,8 +360,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
                                rqstp, rqstp->rq_xprt);
                rqstp->rq_xprt = xprt;
                svc_xprt_get(xprt);
-               rqstp->rq_reserved = serv->sv_max_mesg;
-               atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
                pool->sp_stats.threads_woken++;
                wake_up(&rqstp->rq_wait);
        } else {
@@ -642,8 +639,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
        if (xprt) {
                rqstp->rq_xprt = xprt;
                svc_xprt_get(xprt);
-               rqstp->rq_reserved = serv->sv_max_mesg;
-               atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
 
                /* As there is a shortage of threads and this request
                 * had to be queued, don't allow the thread to wait so
@@ -740,6 +735,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
                else
                        len = xprt->xpt_ops->xpo_recvfrom(rqstp);
                dprintk("svc: got len=%d\n", len);
+               rqstp->rq_reserved = serv->sv_max_mesg;
+               atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
        }
        svc_xprt_received(xprt);
 
@@ -796,7 +793,8 @@ int svc_send(struct svc_rqst *rqstp)
 
        /* Grab mutex to serialize outgoing data. */
        mutex_lock(&xprt->xpt_mutex);
-       if (test_bit(XPT_DEAD, &xprt->xpt_flags))
+       if (test_bit(XPT_DEAD, &xprt->xpt_flags)
+                       || test_bit(XPT_CLOSE, &xprt->xpt_flags))
                len = -ENOTCONN;
        else
                len = xprt->xpt_ops->xpo_sendto(rqstp);
index 71bed1c..296192c 100644 (file)
@@ -1136,9 +1136,9 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
        if (len >= 0)
                svsk->sk_tcplen += len;
        if (len != want) {
+               svc_tcp_save_pages(svsk, rqstp);
                if (len < 0 && len != -EAGAIN)
                        goto err_other;
-               svc_tcp_save_pages(svsk, rqstp);
                dprintk("svc: incomplete TCP record (%d of %d)\n",
                        svsk->sk_tcplen, svsk->sk_reclen);
                goto err_noclose;
index 3ac9789..ffba207 100644 (file)
@@ -962,11 +962,11 @@ static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
        return false;
 }
 
-static void xprt_alloc_slot(struct rpc_task *task)
+void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
 {
-       struct rpc_xprt *xprt = task->tk_xprt;
        struct rpc_rqst *req;
 
+       spin_lock(&xprt->reserve_lock);
        if (!list_empty(&xprt->free)) {
                req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
                list_del(&req->rq_list);
@@ -987,12 +987,29 @@ static void xprt_alloc_slot(struct rpc_task *task)
        default:
                task->tk_status = -EAGAIN;
        }
+       spin_unlock(&xprt->reserve_lock);
        return;
 out_init_req:
        task->tk_status = 0;
        task->tk_rqstp = req;
        xprt_request_init(task, xprt);
+       spin_unlock(&xprt->reserve_lock);
+}
+EXPORT_SYMBOL_GPL(xprt_alloc_slot);
+
+void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
+{
+       /* Note: grabbing the xprt_lock_write() ensures that we throttle
+        * new slot allocation if the transport is congested (i.e. when
+        * reconnecting a stream transport or when out of socket write
+        * buffer space).
+        */
+       if (xprt_lock_write(xprt, task)) {
+               xprt_alloc_slot(xprt, task);
+               xprt_release_write(xprt, task);
+       }
 }
+EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
 
 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
 {
@@ -1076,20 +1093,9 @@ void xprt_reserve(struct rpc_task *task)
        if (task->tk_rqstp != NULL)
                return;
 
-       /* Note: grabbing the xprt_lock_write() here is not strictly needed,
-        * but ensures that we throttle new slot allocation if the transport
-        * is congested (e.g. if reconnecting or if we're out of socket
-        * write buffer space).
-        */
        task->tk_timeout = 0;
        task->tk_status = -EAGAIN;
-       if (!xprt_lock_write(xprt, task))
-               return;
-
-       spin_lock(&xprt->reserve_lock);
-       xprt_alloc_slot(task);
-       spin_unlock(&xprt->reserve_lock);
-       xprt_release_write(xprt, task);
+       xprt->ops->alloc_slot(xprt, task);
 }
 
 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
index b446e10..5d9202d 100644 (file)
@@ -200,6 +200,7 @@ xprt_rdma_connect_worker(struct work_struct *work)
        int rc = 0;
 
        if (!xprt->shutdown) {
+               current->flags |= PF_FSTRANS;
                xprt_clear_connected(xprt);
 
                dprintk("RPC:       %s: %sconnect\n", __func__,
@@ -212,10 +213,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
 
 out:
        xprt_wake_pending_tasks(xprt, rc);
-
 out_clear:
        dprintk("RPC:       %s: exit\n", __func__);
        xprt_clear_connecting(xprt);
+       current->flags &= ~PF_FSTRANS;
 }
 
 /*
@@ -712,6 +713,7 @@ static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
 static struct rpc_xprt_ops xprt_rdma_procs = {
        .reserve_xprt           = xprt_rdma_reserve_xprt,
        .release_xprt           = xprt_release_xprt_cong, /* sunrpc/xprt.c */
+       .alloc_slot             = xprt_alloc_slot,
        .release_request        = xprt_release_rqst_cong,       /* ditto */
        .set_retrans_timeout    = xprt_set_retrans_timeout_def, /* ditto */
        .rpcbind                = rpcb_getport_async,   /* sunrpc/rpcb_clnt.c */
index 55472c4..c5391af 100644 (file)
@@ -1895,6 +1895,8 @@ static void xs_local_setup_socket(struct work_struct *work)
        if (xprt->shutdown)
                goto out;
 
+       current->flags |= PF_FSTRANS;
+
        clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
        status = __sock_create(xprt->xprt_net, AF_LOCAL,
                                        SOCK_STREAM, 0, &sock, 1);
@@ -1928,6 +1930,7 @@ static void xs_local_setup_socket(struct work_struct *work)
 out:
        xprt_clear_connecting(xprt);
        xprt_wake_pending_tasks(xprt, status);
+       current->flags &= ~PF_FSTRANS;
 }
 
 static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
@@ -1970,6 +1973,8 @@ static void xs_udp_setup_socket(struct work_struct *work)
        if (xprt->shutdown)
                goto out;
 
+       current->flags |= PF_FSTRANS;
+
        /* Start by resetting any existing state */
        xs_reset_transport(transport);
        sock = xs_create_sock(xprt, transport,
@@ -1988,6 +1993,7 @@ static void xs_udp_setup_socket(struct work_struct *work)
 out:
        xprt_clear_connecting(xprt);
        xprt_wake_pending_tasks(xprt, status);
+       current->flags &= ~PF_FSTRANS;
 }
 
 /*
@@ -2113,6 +2119,8 @@ static void xs_tcp_setup_socket(struct work_struct *work)
        if (xprt->shutdown)
                goto out;
 
+       current->flags |= PF_FSTRANS;
+
        if (!sock) {
                clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
                sock = xs_create_sock(xprt, transport,
@@ -2162,6 +2170,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
        case -EINPROGRESS:
        case -EALREADY:
                xprt_clear_connecting(xprt);
+               current->flags &= ~PF_FSTRANS;
                return;
        case -EINVAL:
                /* Happens, for instance, if the user specified a link
@@ -2174,6 +2183,7 @@ out_eagain:
 out:
        xprt_clear_connecting(xprt);
        xprt_wake_pending_tasks(xprt, status);
+       current->flags &= ~PF_FSTRANS;
 }
 
 /**
@@ -2412,6 +2422,7 @@ static void bc_destroy(struct rpc_xprt *xprt)
 static struct rpc_xprt_ops xs_local_ops = {
        .reserve_xprt           = xprt_reserve_xprt,
        .release_xprt           = xs_tcp_release_xprt,
+       .alloc_slot             = xprt_alloc_slot,
        .rpcbind                = xs_local_rpcbind,
        .set_port               = xs_local_set_port,
        .connect                = xs_connect,
@@ -2428,6 +2439,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
        .set_buffer_size        = xs_udp_set_buffer_size,
        .reserve_xprt           = xprt_reserve_xprt_cong,
        .release_xprt           = xprt_release_xprt_cong,
+       .alloc_slot             = xprt_alloc_slot,
        .rpcbind                = rpcb_getport_async,
        .set_port               = xs_set_port,
        .connect                = xs_connect,
@@ -2445,6 +2457,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
 static struct rpc_xprt_ops xs_tcp_ops = {
        .reserve_xprt           = xprt_reserve_xprt,
        .release_xprt           = xs_tcp_release_xprt,
+       .alloc_slot             = xprt_lock_and_alloc_slot,
        .rpcbind                = rpcb_getport_async,
        .set_port               = xs_set_port,
        .connect                = xs_connect,
index d99678a..317bfe3 100644 (file)
@@ -1435,7 +1435,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
        if (NULL == siocb->scm)
                siocb->scm = &tmp_scm;
        wait_for_unix_gc();
-       err = scm_send(sock, msg, siocb->scm);
+       err = scm_send(sock, msg, siocb->scm, false);
        if (err < 0)
                return err;
 
@@ -1596,7 +1596,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
        if (NULL == siocb->scm)
                siocb->scm = &tmp_scm;
        wait_for_unix_gc();
-       err = scm_send(sock, msg, siocb->scm);
+       err = scm_send(sock, msg, siocb->scm, false);
        if (err < 0)
                return err;
 
index 788a12c..2ab7850 100644 (file)
@@ -602,36 +602,31 @@ static int wanrouter_device_new_if(struct wan_device *wandev,
                 * successfully, add it to the interface list.
                 */
 
-               if (dev->name == NULL) {
-                       err = -EINVAL;
-               } else {
+#ifdef WANDEBUG
+               printk(KERN_INFO "%s: registering interface %s...\n",
+                      wanrouter_modname, dev->name);
+#endif
 
-                       #ifdef WANDEBUG
-                       printk(KERN_INFO "%s: registering interface %s...\n",
-                               wanrouter_modname, dev->name);
-                       #endif
-
-                       err = register_netdev(dev);
-                       if (!err) {
-                               struct net_device *slave = NULL;
-                               unsigned long smp_flags=0;
-
-                               lock_adapter_irq(&wandev->lock, &smp_flags);
-
-                               if (wandev->dev == NULL) {
-                                       wandev->dev = dev;
-                               } else {
-                                       for (slave=wandev->dev;
-                                            DEV_TO_SLAVE(slave);
-                                            slave = DEV_TO_SLAVE(slave))
-                                               DEV_TO_SLAVE(slave) = dev;
-                               }
-                               ++wandev->ndev;
-
-                               unlock_adapter_irq(&wandev->lock, &smp_flags);
-                               err = 0;        /* done !!! */
-                               goto out;
+               err = register_netdev(dev);
+               if (!err) {
+                       struct net_device *slave = NULL;
+                       unsigned long smp_flags=0;
+
+                       lock_adapter_irq(&wandev->lock, &smp_flags);
+
+                       if (wandev->dev == NULL) {
+                               wandev->dev = dev;
+                       } else {
+                               for (slave=wandev->dev;
+                                    DEV_TO_SLAVE(slave);
+                                    slave = DEV_TO_SLAVE(slave))
+                                       DEV_TO_SLAVE(slave) = dev;
                        }
+                       ++wandev->ndev;
+
+                       unlock_adapter_irq(&wandev->lock, &smp_flags);
+                       err = 0;        /* done !!! */
+                       goto out;
                }
                if (wandev->del_if)
                        wandev->del_if(wandev, dev);
index 220f3bd..8f5042d 100644 (file)
@@ -971,6 +971,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
                 */
                synchronize_rcu();
                INIT_LIST_HEAD(&wdev->list);
+               /*
+                * Ensure that all events have been processed and
+                * freed.
+                */
+               cfg80211_process_wdev_events(wdev);
                break;
        case NETDEV_PRE_UP:
                if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
index b9ec306..02c3be3 100644 (file)
@@ -426,6 +426,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
                          struct net_device *dev, enum nl80211_iftype ntype,
                          u32 *flags, struct vif_params *params);
 void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
+void cfg80211_process_wdev_events(struct wireless_dev *wdev);
 
 int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
                                  struct wireless_dev *wdev,
index 74d5292..22fb802 100644 (file)
@@ -725,7 +725,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
        wdev->connect_keys = NULL;
 }
 
-static void cfg80211_process_wdev_events(struct wireless_dev *wdev)
+void cfg80211_process_wdev_events(struct wireless_dev *wdev)
 {
        struct cfg80211_event *ev;
        unsigned long flags;
@@ -981,6 +981,9 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
        }
        mutex_unlock(&rdev->devlist_mtx);
 
+       if (total == 1)
+               return 0;
+
        for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
                const struct ieee80211_iface_combination *c;
                struct ieee80211_iface_limit *limits;
index 1cff331..4608c2c 100644 (file)
@@ -554,6 +554,7 @@ int snd_mpu401_uart_new(struct snd_card *card, int device,
        spin_lock_init(&mpu->output_lock);
        spin_lock_init(&mpu->timer_lock);
        mpu->hardware = hardware;
+       mpu->irq = -1;
        if (! (info_flags & MPU401_INFO_INTEGRATED)) {
                int res_size = hardware == MPU401_HW_PC98II ? 4 : 2;
                mpu->res = request_region(port, res_size, "MPU401 UART");
index f3be54e..b0187e7 100644 (file)
@@ -2312,6 +2312,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
        }
        if (codec->patch_ops.free)
                codec->patch_ops.free(codec);
+       memset(&codec->patch_ops, 0, sizeof(codec->patch_ops));
        codec->proc_widget_hook = NULL;
        codec->spec = NULL;
        free_hda_cache(&codec->amp_cache);
@@ -2324,7 +2325,6 @@ int snd_hda_codec_reset(struct hda_codec *codec)
        codec->num_pcms = 0;
        codec->pcm_info = NULL;
        codec->preset = NULL;
-       memset(&codec->patch_ops, 0, sizeof(codec->patch_ops));
        codec->slave_dig_outs = NULL;
        codec->spdif_status_reset = 0;
        module_put(codec->owner);
index 254ab52..2210b83 100644 (file)
@@ -412,7 +412,7 @@ static void print_digital_conv(struct snd_info_buffer *buffer,
        if (digi1 & AC_DIG1_EMPHASIS)
                snd_iprintf(buffer, " Preemphasis");
        if (digi1 & AC_DIG1_COPYRIGHT)
-               snd_iprintf(buffer, " Copyright");
+               snd_iprintf(buffer, " Non-Copyright");
        if (digi1 & AC_DIG1_NONAUDIO)
                snd_iprintf(buffer, " Non-Audio");
        if (digi1 & AC_DIG1_PROFESSIONAL)
index 35abe3c..b22989e 100644 (file)
@@ -276,6 +276,10 @@ static int _add_switch(struct hda_codec *codec, hda_nid_t nid, const char *pfx,
        int type = dir ? HDA_INPUT : HDA_OUTPUT;
        struct snd_kcontrol_new knew =
                HDA_CODEC_MUTE_MONO(namestr, nid, chan, 0, type);
+       if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_MUTE) == 0) {
+               snd_printdd("Skipping '%s %s Switch' (no mute on node 0x%x)\n", pfx, dirstr[dir], nid);
+               return 0;
+       }
        sprintf(namestr, "%s %s Switch", pfx, dirstr[dir]);
        return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec));
 }
@@ -287,6 +291,10 @@ static int _add_volume(struct hda_codec *codec, hda_nid_t nid, const char *pfx,
        int type = dir ? HDA_INPUT : HDA_OUTPUT;
        struct snd_kcontrol_new knew =
                HDA_CODEC_VOLUME_MONO(namestr, nid, chan, 0, type);
+       if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_NUM_STEPS) == 0) {
+               snd_printdd("Skipping '%s %s Volume' (no amp on node 0x%x)\n", pfx, dirstr[dir], nid);
+               return 0;
+       }
        sprintf(namestr, "%s %s Volume", pfx, dirstr[dir]);
        return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec));
 }
index 51a1afc..402f330 100644 (file)
@@ -3059,7 +3059,6 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO),
        SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
-       SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
        SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
index 191fd78..32c8169 100644 (file)
@@ -4809,6 +4809,15 @@ static int alc269_resume(struct hda_codec *codec)
 }
 #endif /* CONFIG_PM */
 
+static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec,
+                                                const struct alc_fixup *fix, int action)
+{
+       struct alc_spec *spec = codec->spec;
+
+       if (action == ALC_FIXUP_ACT_PRE_PROBE)
+               spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
+}
+
 static void alc269_fixup_hweq(struct hda_codec *codec,
                               const struct alc_fixup *fix, int action)
 {
@@ -4909,6 +4918,8 @@ enum {
        ALC269_FIXUP_DMIC,
        ALC269VB_FIXUP_AMIC,
        ALC269VB_FIXUP_DMIC,
+       ALC269_FIXUP_LENOVO_DOCK,
+       ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
 };
 
 static const struct alc_fixup alc269_fixups[] = {
@@ -4970,6 +4981,8 @@ static const struct alc_fixup alc269_fixups[] = {
        [ALC269_FIXUP_PCM_44K] = {
                .type = ALC_FIXUP_FUNC,
                .v.func = alc269_fixup_pcm_44k,
+               .chained = true,
+               .chain_id = ALC269_FIXUP_QUANTA_MUTE
        },
        [ALC269_FIXUP_STEREO_DMIC] = {
                .type = ALC_FIXUP_FUNC,
@@ -5029,6 +5042,20 @@ static const struct alc_fixup alc269_fixups[] = {
                        { }
                },
        },
+       [ALC269_FIXUP_LENOVO_DOCK] = {
+               .type = ALC_FIXUP_PINS,
+               .v.pins = (const struct alc_pincfg[]) {
+                       { 0x19, 0x23a11040 }, /* dock mic */
+                       { 0x1b, 0x2121103f }, /* dock headphone */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT
+       },
+       [ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT] = {
+               .type = ALC_FIXUP_FUNC,
+               .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5051,8 +5078,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
-       SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE),
-       SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K),
+       SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
        SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
 
 #if 1
@@ -5109,6 +5139,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
 static const struct alc_model_fixup alc269_fixup_models[] = {
        {.id = ALC269_FIXUP_AMIC, .name = "laptop-amic"},
        {.id = ALC269_FIXUP_DMIC, .name = "laptop-dmic"},
+       {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
        {}
 };
 
index 1fe1308..7160ff2 100644 (file)
@@ -3227,7 +3227,7 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
 {
        struct via_spec *spec = codec->spec;
        int imux_is_smixer;
-       unsigned int parm;
+       unsigned int parm, parm2;
        /* MUX6 (1eh) = stereo mixer */
        imux_is_smixer =
        snd_hda_codec_read(codec, 0x1e, 0, AC_VERB_GET_CONNECT_SEL, 0x00) == 5;
@@ -3250,7 +3250,7 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
        parm = AC_PWRST_D3;
        set_pin_power_state(codec, 0x27, &parm);
        snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0xb, 0, AC_VERB_SET_POWER_STATE, parm);
+       parm2 = parm; /* for pin 0x0b */
 
        /* PW2 (26h), AOW2 (ah) */
        parm = AC_PWRST_D3;
@@ -3265,6 +3265,9 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
        if (!spec->hp_independent_mode) /* check for redirected HP */
                set_pin_power_state(codec, 0x28, &parm);
        snd_hda_codec_write(codec, 0x8, 0, AC_VERB_SET_POWER_STATE, parm);
+       if (!spec->hp_independent_mode && parm2 != AC_PWRST_D3)
+               parm = parm2;
+       snd_hda_codec_write(codec, 0xb, 0, AC_VERB_SET_POWER_STATE, parm);
        /* MW9 (21h), Mw2 (1ah), AOW0 (8h) */
        snd_hda_codec_write(codec, 0x21, 0, AC_VERB_SET_POWER_STATE,
                            imux_is_smixer ? AC_PWRST_D0 : parm);
index 764cc93..075d5aa 100644 (file)
@@ -297,6 +297,7 @@ static int ak4396_dac_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem
 }
 
 static const DECLARE_TLV_DB_SCALE(db_scale_wm_dac, -12700, 100, 1);
+static const DECLARE_TLV_DB_LINEAR(ak4396_db_scale, TLV_DB_GAIN_MUTE, 0);
 
 static struct snd_kcontrol_new prodigy_hd2_controls[] __devinitdata = {
     {
@@ -307,7 +308,7 @@ static struct snd_kcontrol_new prodigy_hd2_controls[] __devinitdata = {
        .info = ak4396_dac_vol_info,
        .get = ak4396_dac_vol_get,
        .put = ak4396_dac_vol_put,
-       .tlv = { .p = db_scale_wm_dac },
+       .tlv = { .p = ak4396_db_scale },
     },
 };
 
index 07dd7eb..e97df24 100644 (file)
@@ -3105,6 +3105,9 @@ static int wm8962_set_bias_level(struct snd_soc_codec *codec,
                /* VMID 2*250k */
                snd_soc_update_bits(codec, WM8962_PWR_MGMT_1,
                                    WM8962_VMID_SEL_MASK, 0x100);
+
+               if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
+                       msleep(100);
                break;
 
        case SND_SOC_BIAS_OFF:
index de61b8a..98c5774 100644 (file)
@@ -2508,7 +2508,7 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
                return -EINVAL;
        }
 
-       bclk_rate = params_rate(params) * 2;
+       bclk_rate = params_rate(params) * 4;
        switch (params_format(params)) {
        case SNDRV_PCM_FORMAT_S16_LE:
                bclk_rate *= 16;
index 90117f8..90e5005 100644 (file)
@@ -270,7 +270,7 @@ SOC_DAPM_ENUM("Route", wm9712_enum[9]);
 
 /* Mic select */
 static const struct snd_kcontrol_new wm9712_mic_src_controls =
-SOC_DAPM_ENUM("Route", wm9712_enum[7]);
+SOC_DAPM_ENUM("Mic Source Select", wm9712_enum[7]);
 
 /* diff select */
 static const struct snd_kcontrol_new wm9712_diff_sel_controls =
@@ -289,7 +289,9 @@ SND_SOC_DAPM_MUX("Left Capture Select", SND_SOC_NOPM, 0, 0,
        &wm9712_capture_selectl_controls),
 SND_SOC_DAPM_MUX("Right Capture Select", SND_SOC_NOPM, 0, 0,
        &wm9712_capture_selectr_controls),
-SND_SOC_DAPM_MUX("Mic Select Source", SND_SOC_NOPM, 0, 0,
+SND_SOC_DAPM_MUX("Left Mic Select Source", SND_SOC_NOPM, 0, 0,
+       &wm9712_mic_src_controls),
+SND_SOC_DAPM_MUX("Right Mic Select Source", SND_SOC_NOPM, 0, 0,
        &wm9712_mic_src_controls),
 SND_SOC_DAPM_MUX("Differential Source", SND_SOC_NOPM, 0, 0,
        &wm9712_diff_sel_controls),
@@ -317,6 +319,7 @@ SND_SOC_DAPM_PGA("Out 3 PGA", AC97_INT_PAGING, 5, 1, NULL, 0),
 SND_SOC_DAPM_PGA("Line PGA", AC97_INT_PAGING, 2, 1, NULL, 0),
 SND_SOC_DAPM_PGA("Phone PGA", AC97_INT_PAGING, 1, 1, NULL, 0),
 SND_SOC_DAPM_PGA("Mic PGA", AC97_INT_PAGING, 0, 1, NULL, 0),
+SND_SOC_DAPM_PGA("Differential Mic", SND_SOC_NOPM, 0, 0, NULL, 0),
 SND_SOC_DAPM_MICBIAS("Mic Bias", AC97_INT_PAGING, 10, 1),
 SND_SOC_DAPM_OUTPUT("MONOOUT"),
 SND_SOC_DAPM_OUTPUT("HPOUTL"),
@@ -377,6 +380,18 @@ static const struct snd_soc_dapm_route wm9712_audio_map[] = {
        {"Mic PGA", NULL, "MIC1"},
        {"Mic PGA", NULL, "MIC2"},
 
+       /* microphones */
+       {"Differential Mic", NULL, "MIC1"},
+       {"Differential Mic", NULL, "MIC2"},
+       {"Left Mic Select Source", "Mic 1", "MIC1"},
+       {"Left Mic Select Source", "Mic 2", "MIC2"},
+       {"Left Mic Select Source", "Stereo", "MIC1"},
+       {"Left Mic Select Source", "Differential", "Differential Mic"},
+       {"Right Mic Select Source", "Mic 1", "MIC1"},
+       {"Right Mic Select Source", "Mic 2", "MIC2"},
+       {"Right Mic Select Source", "Stereo", "MIC2"},
+       {"Right Mic Select Source", "Differential", "Differential Mic"},
+
        /* left capture selector */
        {"Left Capture Select", "Mic", "MIC1"},
        {"Left Capture Select", "Speaker Mixer", "Speaker Mixer"},
index 379baad..5e634a2 100644 (file)
@@ -111,7 +111,8 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, int source_id)
                return 0;
 
        /* If a clock source can't tell us whether it's valid, we assume it is */
-       if (!uac2_control_is_readable(cs_desc->bmControls, UAC2_CS_CONTROL_CLOCK_VALID))
+       if (!uac2_control_is_readable(cs_desc->bmControls,
+                                     UAC2_CS_CONTROL_CLOCK_VALID - 1))
                return 1;
 
        err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,