Merge branch 'rcu/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck...
authorIngo Molnar <mingo@kernel.org>
Mon, 11 Jun 2012 08:30:23 +0000 (10:30 +0200)
committerIngo Molnar <mingo@kernel.org>
Mon, 11 Jun 2012 08:30:23 +0000 (10:30 +0200)
Merge RCU fixes from Paul E. McKenney:

 " This series has four patches, the major point of which is to eliminate
   some slowdowns (including boot-time slowdowns) resulting from some
   RCU_FAST_NO_HZ changes.  The issue with the changes is that posting timers
   from the idle loop has no effect if the CPU has entered dyntick-idle
   mode because the CPU has already computed its wakeup time, and posting
   a timer does not cause it to be recomputed.  The short-term fix is for
   RCU to precompute the timeout value so that the CPU's calculation is
   correct. "

Signed-off-by: Ingo Molnar <mingo@kernel.org>
95 files changed:
Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt [new file with mode: 0644]
Documentation/kernel-parameters.txt
Documentation/vm/frontswap.txt [new file with mode: 0644]
MAINTAINERS
arch/arm/Kconfig
arch/arm/mach-shmobile/Kconfig
arch/arm/mm/dma-mapping.c
arch/avr32/kernel/signal.c
arch/blackfin/kernel/process.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/smpboot.c
arch/xtensa/include/asm/syscall.h
arch/xtensa/kernel/signal.c
drivers/acpi/bus.c
drivers/acpi/power.c
drivers/acpi/scan.c
drivers/acpi/sleep.c
drivers/clocksource/Makefile
drivers/clocksource/em_sti.c [new file with mode: 0644]
drivers/gpio/gpio-samsung.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_audio.c
drivers/gpu/drm/radeon/r600_hdmi.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/sid.h
drivers/i2c/muxes/Kconfig
drivers/i2c/muxes/Makefile
drivers/i2c/muxes/i2c-mux-pinctrl.c [new file with mode: 0644]
drivers/rtc/rtc-cmos.c
drivers/staging/ramster/zcache-main.c
drivers/staging/zcache/zcache-main.c
drivers/target/sbp/sbp_target.c
drivers/target/target_core_file.c
drivers/target/target_core_file.h
drivers/xen/tmem.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/misc.c
fs/cifs/smb1ops.c
fs/cifs/transport.c
fs/fuse/control.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/proc/base.c
include/drm/drm_pciids.h
include/linux/clockchips.h
include/linux/compaction.h
include/linux/frontswap.h [new file with mode: 0644]
include/linux/fs.h
include/linux/fuse.h
include/linux/i2c-mux-pinctrl.h [new file with mode: 0644]
include/linux/init_task.h
include/linux/radix-tree.h
include/linux/sched.h
include/linux/swap.h
include/linux/swapfile.h [new file with mode: 0644]
kernel/cgroup.c
kernel/irq/chip.c
kernel/irq/internals.h
kernel/irq/manage.c
kernel/irq/migration.c
kernel/panic.c
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/smpboot.c
kernel/time/clockevents.c
kernel/time/tick-sched.c
lib/Kconfig.debug
lib/radix-tree.c
lib/spinlock_debug.c
mm/Kconfig
mm/Makefile
mm/compaction.c
mm/frontswap.c [new file with mode: 0644]
mm/internal.h
mm/memblock.c
mm/migrate.c
mm/nommu.c
mm/page_alloc.c
mm/page_io.c
mm/swapfile.c
virt/kvm/irq_comm.c

diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
new file mode 100644 (file)
index 0000000..ae8af16
--- /dev/null
@@ -0,0 +1,93 @@
+Pinctrl-based I2C Bus Mux
+
+This binding describes an I2C bus multiplexer that uses pin multiplexing to
+route the I2C signals, and represents the pin multiplexing configuration
+using the pinctrl device tree bindings.
+
+                                 +-----+  +-----+
+                                 | dev |  | dev |
+    +------------------------+   +-----+  +-----+
+    | SoC                    |      |        |
+    |                   /----|------+--------+
+    |   +---+   +------+     | child bus A, on first set of pins
+    |   |I2C|---|Pinmux|     |
+    |   +---+   +------+     | child bus B, on second set of pins
+    |                   \----|------+--------+--------+
+    |                        |      |        |        |
+    +------------------------+  +-----+  +-----+  +-----+
+                                | dev |  | dev |  | dev |
+                                +-----+  +-----+  +-----+
+
+Required properties:
+- compatible: i2c-mux-pinctrl
+- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
+  port is connected to.
+
+Also required are:
+
+* Standard pinctrl properties that specify the pin mux state for each child
+  bus. See ../pinctrl/pinctrl-bindings.txt.
+
+* Standard I2C mux properties. See mux.txt in this directory.
+
+* I2C child bus nodes. See mux.txt in this directory.
+
+For each named state defined in the pinctrl-names property, an I2C child bus
+will be created. I2C child bus numbers are assigned based on the index into
+the pinctrl-names property.
+
+The only exception is that no bus will be created for a state named "idle". If
+such a state is defined, it must be the last entry in pinctrl-names. For
+example:
+
+       pinctrl-names = "ddc", "pta", "idle"  ->  ddc = bus 0, pta = bus 1
+       pinctrl-names = "ddc", "idle", "pta"  ->  Invalid ("idle" not last)
+       pinctrl-names = "idle", "ddc", "pta"  ->  Invalid ("idle" not last)
+
+Whenever an access is made to a device on a child bus, the relevant pinctrl
+state will be programmed into hardware.
+
+If an idle state is defined, whenever an access is not being made to a device
+on a child bus, the idle pinctrl state will be programmed into hardware.
+
+If an idle state is not defined, the most recently used pinctrl state will be
+left programmed into hardware whenever no access is being made of a device on
+a child bus.
+
+Example:
+
+       i2cmux {
+               compatible = "i2c-mux-pinctrl";
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               i2c-parent = <&i2c1>;
+
+               pinctrl-names = "ddc", "pta", "idle";
+               pinctrl-0 = <&state_i2cmux_ddc>;
+               pinctrl-1 = <&state_i2cmux_pta>;
+               pinctrl-2 = <&state_i2cmux_idle>;
+
+               i2c@0 {
+                       reg = <0>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       eeprom {
+                               compatible = "eeprom";
+                               reg = <0x50>;
+                       };
+               };
+
+               i2c@1 {
+                       reg = <1>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       eeprom {
+                               compatible = "eeprom";
+                               reg = <0x50>;
+                       };
+               };
+       };
+
index c45513d..a92c5eb 100644 (file)
@@ -2543,6 +2543,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
        sched_debug     [KNL] Enables verbose scheduler debug messages.
 
+       skew_tick=      [KNL] Offset the periodic timer tick per cpu to mitigate
+                       xtime_lock contention on larger systems, and/or RCU lock
+                       contention on all systems with CONFIG_MAXSMP set.
+                       Format: { "0" | "1" }
+                       0 -- disable. (may be 1 via CONFIG_CMDLINE="skew_tick=1"
+                       1 -- enable.
+                       Note: increases power consumption, thus should only be
+                       enabled if running jitter sensitive (HPC/RT) workloads.
+
        security=       [SECURITY] Choose a security module to enable at boot.
                        If this boot parameter is not specified, only the first
                        security module asking for security registration will be
diff --git a/Documentation/vm/frontswap.txt b/Documentation/vm/frontswap.txt
new file mode 100644 (file)
index 0000000..37067cf
--- /dev/null
@@ -0,0 +1,278 @@
+Frontswap provides a "transcendent memory" interface for swap pages.
+In some environments, dramatic performance savings may be obtained because
+swapped pages are saved in RAM (or a RAM-like device) instead of a swap disk.
+
+(Note, frontswap -- and cleancache (merged at 3.0) -- are the "frontends"
+and the only necessary changes to the core kernel for transcendent memory;
+all other supporting code -- the "backends" -- is implemented as drivers.
+See the LWN.net article "Transcendent memory in a nutshell" for a detailed
+overview of frontswap and related kernel parts:
+https://lwn.net/Articles/454795/ )
+
+Frontswap is so named because it can be thought of as the opposite of
+a "backing" store for a swap device.  The storage is assumed to be
+a synchronous concurrency-safe page-oriented "pseudo-RAM device" conforming
+to the requirements of transcendent memory (such as Xen's "tmem", or
+in-kernel compressed memory, aka "zcache", or future RAM-like devices);
+this pseudo-RAM device is not directly accessible or addressable by the
+kernel and is of unknown and possibly time-varying size.  The driver
+links itself to frontswap by calling frontswap_register_ops to set the
+frontswap_ops funcs appropriately and the functions it provides must
+conform to certain policies as follows:
+
+An "init" prepares the device to receive frontswap pages associated
+with the specified swap device number (aka "type").  A "store" will
+copy the page to transcendent memory and associate it with the type and
+offset associated with the page. A "load" will copy the page, if found,
+from transcendent memory into kernel memory, but will NOT remove the page
+from from transcendent memory.  An "invalidate_page" will remove the page
+from transcendent memory and an "invalidate_area" will remove ALL pages
+associated with the swap type (e.g., like swapoff) and notify the "device"
+to refuse further stores with that swap type.
+
+Once a page is successfully stored, a matching load on the page will normally
+succeed.  So when the kernel finds itself in a situation where it needs
+to swap out a page, it first attempts to use frontswap.  If the store returns
+success, the data has been successfully saved to transcendent memory and
+a disk write and, if the data is later read back, a disk read are avoided.
+If a store returns failure, transcendent memory has rejected the data, and the
+page can be written to swap as usual.
+
+If a backend chooses, frontswap can be configured as a "writethrough
+cache" by calling frontswap_writethrough().  In this mode, the reduction
+in swap device writes is lost (and also a non-trivial performance advantage)
+in order to allow the backend to arbitrarily "reclaim" space used to
+store frontswap pages to more completely manage its memory usage.
+
+Note that if a page is stored and the page already exists in transcendent memory
+(a "duplicate" store), either the store succeeds and the data is overwritten,
+or the store fails AND the page is invalidated.  This ensures stale data may
+never be obtained from frontswap.
+
+If properly configured, monitoring of frontswap is done via debugfs in
+the /sys/kernel/debug/frontswap directory.  The effectiveness of
+frontswap can be measured (across all swap devices) with:
+
+failed_stores  - how many store attempts have failed
+loads          - how many loads were attempted (all should succeed)
+succ_stores    - how many store attempts have succeeded
+invalidates    - how many invalidates were attempted
+
+A backend implementation may provide additional metrics.
+
+FAQ
+
+1) Where's the value?
+
+When a workload starts swapping, performance falls through the floor.
+Frontswap significantly increases performance in many such workloads by
+providing a clean, dynamic interface to read and write swap pages to
+"transcendent memory" that is otherwise not directly addressable to the kernel.
+This interface is ideal when data is transformed to a different form
+and size (such as with compression) or secretly moved (as might be
+useful for write-balancing for some RAM-like devices).  Swap pages (and
+evicted page-cache pages) are a great use for this kind of slower-than-RAM-
+but-much-faster-than-disk "pseudo-RAM device" and the frontswap (and
+cleancache) interface to transcendent memory provides a nice way to read
+and write -- and indirectly "name" -- the pages.
+
+Frontswap -- and cleancache -- with a fairly small impact on the kernel,
+provides a huge amount of flexibility for more dynamic, flexible RAM
+utilization in various system configurations:
+
+In the single kernel case, aka "zcache", pages are compressed and
+stored in local memory, thus increasing the total anonymous pages
+that can be safely kept in RAM.  Zcache essentially trades off CPU
+cycles used in compression/decompression for better memory utilization.
+Benchmarks have shown little or no impact when memory pressure is
+low while providing a significant performance improvement (25%+)
+on some workloads under high memory pressure.
+
+"RAMster" builds on zcache by adding "peer-to-peer" transcendent memory
+support for clustered systems.  Frontswap pages are locally compressed
+as in zcache, but then "remotified" to another system's RAM.  This
+allows RAM to be dynamically load-balanced back-and-forth as needed,
+i.e. when system A is overcommitted, it can swap to system B, and
+vice versa.  RAMster can also be configured as a memory server so
+many servers in a cluster can swap, dynamically as needed, to a single
+server configured with a large amount of RAM... without pre-configuring
+how much of the RAM is available for each of the clients!
+
+In the virtual case, the whole point of virtualization is to statistically
+multiplex physical resources acrosst the varying demands of multiple
+virtual machines.  This is really hard to do with RAM and efforts to do
+it well with no kernel changes have essentially failed (except in some
+well-publicized special-case workloads).
+Specifically, the Xen Transcendent Memory backend allows otherwise
+"fallow" hypervisor-owned RAM to not only be "time-shared" between multiple
+virtual machines, but the pages can be compressed and deduplicated to
+optimize RAM utilization.  And when guest OS's are induced to surrender
+underutilized RAM (e.g. with "selfballooning"), sudden unexpected
+memory pressure may result in swapping; frontswap allows those pages
+to be swapped to and from hypervisor RAM (if overall host system memory
+conditions allow), thus mitigating the potentially awful performance impact
+of unplanned swapping.
+
+A KVM implementation is underway and has been RFC'ed to lkml.  And,
+using frontswap, investigation is also underway on the use of NVM as
+a memory extension technology.
+
+2) Sure there may be performance advantages in some situations, but
+   what's the space/time overhead of frontswap?
+
+If CONFIG_FRONTSWAP is disabled, every frontswap hook compiles into
+nothingness and the only overhead is a few extra bytes per swapon'ed
+swap device.  If CONFIG_FRONTSWAP is enabled but no frontswap "backend"
+registers, there is one extra global variable compared to zero for
+every swap page read or written.  If CONFIG_FRONTSWAP is enabled
+AND a frontswap backend registers AND the backend fails every "store"
+request (i.e. provides no memory despite claiming it might),
+CPU overhead is still negligible -- and since every frontswap fail
+precedes a swap page write-to-disk, the system is highly likely
+to be I/O bound and using a small fraction of a percent of a CPU
+will be irrelevant anyway.
+
+As for space, if CONFIG_FRONTSWAP is enabled AND a frontswap backend
+registers, one bit is allocated for every swap page for every swap
+device that is swapon'd.  This is added to the EIGHT bits (which
+was sixteen until about 2.6.34) that the kernel already allocates
+for every swap page for every swap device that is swapon'd.  (Hugh
+Dickins has observed that frontswap could probably steal one of
+the existing eight bits, but let's worry about that minor optimization
+later.)  For very large swap disks (which are rare) on a standard
+4K pagesize, this is 1MB per 32GB swap.
+
+When swap pages are stored in transcendent memory instead of written
+out to disk, there is a side effect that this may create more memory
+pressure that can potentially outweigh the other advantages.  A
+backend, such as zcache, must implement policies to carefully (but
+dynamically) manage memory limits to ensure this doesn't happen.
+
+3) OK, how about a quick overview of what this frontswap patch does
+   in terms that a kernel hacker can grok?
+
+Let's assume that a frontswap "backend" has registered during
+kernel initialization; this registration indicates that this
+frontswap backend has access to some "memory" that is not directly
+accessible by the kernel.  Exactly how much memory it provides is
+entirely dynamic and random.
+
+Whenever a swap-device is swapon'd frontswap_init() is called,
+passing the swap device number (aka "type") as a parameter.
+This notifies frontswap to expect attempts to "store" swap pages
+associated with that number.
+
+Whenever the swap subsystem is readying a page to write to a swap
+device (c.f swap_writepage()), frontswap_store is called.  Frontswap
+consults with the frontswap backend and if the backend says it does NOT
+have room, frontswap_store returns -1 and the kernel swaps the page
+to the swap device as normal.  Note that the response from the frontswap
+backend is unpredictable to the kernel; it may choose to never accept a
+page, it could accept every ninth page, or it might accept every
+page.  But if the backend does accept a page, the data from the page
+has already been copied and associated with the type and offset,
+and the backend guarantees the persistence of the data.  In this case,
+frontswap sets a bit in the "frontswap_map" for the swap device
+corresponding to the page offset on the swap device to which it would
+otherwise have written the data.
+
+When the swap subsystem needs to swap-in a page (swap_readpage()),
+it first calls frontswap_load() which checks the frontswap_map to
+see if the page was earlier accepted by the frontswap backend.  If
+it was, the page of data is filled from the frontswap backend and
+the swap-in is complete.  If not, the normal swap-in code is
+executed to obtain the page of data from the real swap device.
+
+So every time the frontswap backend accepts a page, a swap device read
+and (potentially) a swap device write are replaced by a "frontswap backend
+store" and (possibly) a "frontswap backend loads", which are presumably much
+faster.
+
+4) Can't frontswap be configured as a "special" swap device that is
+   just higher priority than any real swap device (e.g. like zswap,
+   or maybe swap-over-nbd/NFS)?
+
+No.  First, the existing swap subsystem doesn't allow for any kind of
+swap hierarchy.  Perhaps it could be rewritten to accomodate a hierarchy,
+but this would require fairly drastic changes.  Even if it were
+rewritten, the existing swap subsystem uses the block I/O layer which
+assumes a swap device is fixed size and any page in it is linearly
+addressable.  Frontswap barely touches the existing swap subsystem,
+and works around the constraints of the block I/O subsystem to provide
+a great deal of flexibility and dynamicity.
+
+For example, the acceptance of any swap page by the frontswap backend is
+entirely unpredictable. This is critical to the definition of frontswap
+backends because it grants completely dynamic discretion to the
+backend.  In zcache, one cannot know a priori how compressible a page is.
+"Poorly" compressible pages can be rejected, and "poorly" can itself be
+defined dynamically depending on current memory constraints.
+
+Further, frontswap is entirely synchronous whereas a real swap
+device is, by definition, asynchronous and uses block I/O.  The
+block I/O layer is not only unnecessary, but may perform "optimizations"
+that are inappropriate for a RAM-oriented device including delaying
+the write of some pages for a significant amount of time.  Synchrony is
+required to ensure the dynamicity of the backend and to avoid thorny race
+conditions that would unnecessarily and greatly complicate frontswap
+and/or the block I/O subsystem.  That said, only the initial "store"
+and "load" operations need be synchronous.  A separate asynchronous thread
+is free to manipulate the pages stored by frontswap.  For example,
+the "remotification" thread in RAMster uses standard asynchronous
+kernel sockets to move compressed frontswap pages to a remote machine.
+Similarly, a KVM guest-side implementation could do in-guest compression
+and use "batched" hypercalls.
+
+In a virtualized environment, the dynamicity allows the hypervisor
+(or host OS) to do "intelligent overcommit".  For example, it can
+choose to accept pages only until host-swapping might be imminent,
+then force guests to do their own swapping.
+
+There is a downside to the transcendent memory specifications for
+frontswap:  Since any "store" might fail, there must always be a real
+slot on a real swap device to swap the page.  Thus frontswap must be
+implemented as a "shadow" to every swapon'd device with the potential
+capability of holding every page that the swap device might have held
+and the possibility that it might hold no pages at all.  This means
+that frontswap cannot contain more pages than the total of swapon'd
+swap devices.  For example, if NO swap device is configured on some
+installation, frontswap is useless.  Swapless portable devices
+can still use frontswap but a backend for such devices must configure
+some kind of "ghost" swap device and ensure that it is never used.
+
+5) Why this weird definition about "duplicate stores"?  If a page
+   has been previously successfully stored, can't it always be
+   successfully overwritten?
+
+Nearly always it can, but no, sometimes it cannot.  Consider an example
+where data is compressed and the original 4K page has been compressed
+to 1K.  Now an attempt is made to overwrite the page with data that
+is non-compressible and so would take the entire 4K.  But the backend
+has no more space.  In this case, the store must be rejected.  Whenever
+frontswap rejects a store that would overwrite, it also must invalidate
+the old data and ensure that it is no longer accessible.  Since the
+swap subsystem then writes the new data to the read swap device,
+this is the correct course of action to ensure coherency.
+
+6) What is frontswap_shrink for?
+
+When the (non-frontswap) swap subsystem swaps out a page to a real
+swap device, that page is only taking up low-value pre-allocated disk
+space.  But if frontswap has placed a page in transcendent memory, that
+page may be taking up valuable real estate.  The frontswap_shrink
+routine allows code outside of the swap subsystem to force pages out
+of the memory managed by frontswap and back into kernel-addressable memory.
+For example, in RAMster, a "suction driver" thread will attempt
+to "repatriate" pages sent to a remote machine back to the local machine;
+this is driven using the frontswap_shrink mechanism when memory pressure
+subsides.
+
+7) Why does the frontswap patch create the new include file swapfile.h?
+
+The frontswap code depends on some swap-subsystem-internal data
+structures that have, over the years, moved back and forth between
+static and global.  This seemed a reasonable compromise:  Define
+them as global but declare them in a new include file that isn't
+included by the large number of source files that include swap.h.
+
+Dan Magenheimer, last updated April 9, 2012
index 55f0fda..f935a0c 100644 (file)
@@ -2930,6 +2930,13 @@ F:       Documentation/power/freezing-of-tasks.txt
 F:     include/linux/freezer.h
 F:     kernel/freezer.c
 
+FRONTSWAP API
+M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+F:     mm/frontswap.c
+F:     include/linux/frontswap.h
+
 FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS
 M:     David Howells <dhowells@redhat.com>
 L:     linux-cachefs@redhat.com
@@ -4096,6 +4103,8 @@ F:        drivers/scsi/53c700*
 LED SUBSYSTEM
 M:     Bryan Wu <bryan.wu@canonical.com>
 M:     Richard Purdie <rpurdie@rpsys.net>
+L:     linux-leds@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/linux-leds.git
 S:     Maintained
 F:     drivers/leds/
 F:     include/linux/leds.h
@@ -7291,11 +7300,11 @@ F:      Documentation/DocBook/uio-howto.tmpl
 F:     drivers/uio/
 F:     include/linux/uio*.h
 
-UTIL-LINUX-NG PACKAGE
+UTIL-LINUX PACKAGE
 M:     Karel Zak <kzak@redhat.com>
-L:     util-linux-ng@vger.kernel.org
-W:     http://kernel.org/~kzak/util-linux-ng/
-T:     git git://git.kernel.org/pub/scm/utils/util-linux-ng/util-linux-ng.git
+L:     util-linux@vger.kernel.org
+W:     http://en.wikipedia.org/wiki/Util-linux
+T:     git git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git
 S:     Maintained
 
 UVESAFB DRIVER
index b649c59..84449dd 100644 (file)
@@ -7,7 +7,6 @@ config ARM
        select HAVE_IDE if PCI || ISA || PCMCIA
        select HAVE_DMA_ATTRS
        select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
-       select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
        select HAVE_MEMBLOCK
        select RTC_LIB
        select SYS_SUPPORTS_APM_EMULATION
index f31383c..df33909 100644 (file)
@@ -186,6 +186,12 @@ config SH_TIMER_TMU
        help
          This enables build of the TMU timer driver.
 
+config EM_TIMER_STI
+       bool "STI timer driver"
+       default y
+       help
+         This enables build of the STI timer driver.
+
 endmenu
 
 config SH_CLK_CPG
index ea6b431..106c4c0 100644 (file)
@@ -268,10 +268,8 @@ static int __init consistent_init(void)
        unsigned long base = consistent_base;
        unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
 
-#ifndef CONFIG_ARM_DMA_USE_IOMMU
-       if (cpu_architecture() >= CPU_ARCH_ARMv6)
+       if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
                return 0;
-#endif
 
        consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
        if (!consistent_pte) {
@@ -342,7 +340,7 @@ static int __init coherent_init(void)
        struct page *page;
        void *ptr;
 
-       if (cpu_architecture() < CPU_ARCH_ARMv6)
+       if (!IS_ENABLED(CONFIG_CMA))
                return 0;
 
        ptr = __alloc_from_contiguous(NULL, size, prot, &page);
@@ -704,7 +702,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 
        if (arch_is_coherent() || nommu())
                addr = __alloc_simple_buffer(dev, size, gfp, &page);
-       else if (cpu_architecture() < CPU_ARCH_ARMv6)
+       else if (!IS_ENABLED(CONFIG_CMA))
                addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
        else if (gfp & GFP_ATOMIC)
                addr = __alloc_from_pool(dev, size, &page, caller);
@@ -773,7 +771,7 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
 
        if (arch_is_coherent() || nommu()) {
                __dma_free_buffer(page, size);
-       } else if (cpu_architecture() < CPU_ARCH_ARMv6) {
+       } else if (!IS_ENABLED(CONFIG_CMA)) {
                __dma_free_remap(cpu_addr, size);
                __dma_free_buffer(page, size);
        } else {
index c140f9b..d552a85 100644 (file)
@@ -300,7 +300,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti)
        if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR)
                syscall = 1;
 
-       if (ti->flags & _TIF_SIGPENDING))
+       if (ti->flags & _TIF_SIGPENDING)
                do_signal(regs, syscall);
 
        if (ti->flags & _TIF_NOTIFY_RESUME) {
index 2e3994b..62bcea7 100644 (file)
@@ -173,7 +173,7 @@ asmlinkage int bfin_clone(struct pt_regs *regs)
        unsigned long newsp;
 
 #ifdef __ARCH_SYNC_CORE_DCACHE
-       if (current->rt.nr_cpus_allowed == num_possible_cpus())
+       if (current->nr_cpus_allowed == num_possible_cpus())
                set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
 #endif
 
index 0a687fd..a97f3c4 100644 (file)
@@ -1274,7 +1274,7 @@ static void mce_timer_fn(unsigned long data)
         */
        iv = __this_cpu_read(mce_next_interval);
        if (mce_notify_irq())
-               iv = max(iv, (unsigned long) HZ/100);
+               iv = max(iv / 2, (unsigned long) HZ/100);
        else
                iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
        __this_cpu_write(mce_next_interval, iv);
index f56f96d..fd019d7 100644 (file)
@@ -410,15 +410,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
 /* maps the cpu to the sched domain representing multi-core */
 const struct cpumask *cpu_coregroup_mask(int cpu)
 {
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
-       /*
-        * For perf, we return last level cache shared map.
-        * And for power savings, we return cpu_core_map
-        */
-       if (!(cpu_has(c, X86_FEATURE_AMD_DCM)))
-               return cpu_core_mask(cpu);
-       else
-               return cpu_llc_shared_mask(cpu);
+       return cpu_llc_shared_mask(cpu);
 }
 
 static void impress_friends(void)
index 0b9f2e1..c1dacca 100644 (file)
@@ -31,5 +31,5 @@ asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
 asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
        struct timespec __user *tsp, const sigset_t __user *sigmask,
        size_t sigsetsize);
-
-
+asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset,
+               size_t sigsetsize);
index b9f8e58..efe4e85 100644 (file)
@@ -493,7 +493,7 @@ static void do_signal(struct pt_regs *regs)
                if (ret)
                        return;
 
-               signal_delivered(signr, info, ka, regs, 0);
+               signal_delivered(signr, &info, &ka, regs, 0);
                if (current->ptrace & PT_SINGLESTEP)
                        task_pt_regs(current)->icountlevel = 1;
 
index 3188da3..adceafd 100644 (file)
@@ -182,41 +182,66 @@ EXPORT_SYMBOL(acpi_bus_get_private_data);
                                  Power Management
    -------------------------------------------------------------------------- */
 
+static const char *state_string(int state)
+{
+       switch (state) {
+       case ACPI_STATE_D0:
+               return "D0";
+       case ACPI_STATE_D1:
+               return "D1";
+       case ACPI_STATE_D2:
+               return "D2";
+       case ACPI_STATE_D3_HOT:
+               return "D3hot";
+       case ACPI_STATE_D3_COLD:
+               return "D3";
+       default:
+               return "(unknown)";
+       }
+}
+
 static int __acpi_bus_get_power(struct acpi_device *device, int *state)
 {
-       int result = 0;
-       acpi_status status = 0;
-       unsigned long long psc = 0;
+       int result = ACPI_STATE_UNKNOWN;
 
        if (!device || !state)
                return -EINVAL;
 
-       *state = ACPI_STATE_UNKNOWN;
-
-       if (device->flags.power_manageable) {
-               /*
-                * Get the device's power state either directly (via _PSC) or
-                * indirectly (via power resources).
-                */
-               if (device->power.flags.power_resources) {
-                       result = acpi_power_get_inferred_state(device, state);
-                       if (result)
-                               return result;
-               } else if (device->power.flags.explicit_get) {
-                       status = acpi_evaluate_integer(device->handle, "_PSC",
-                                                      NULL, &psc);
-                       if (ACPI_FAILURE(status))
-                               return -ENODEV;
-                       *state = (int)psc;
-               }
-       } else {
+       if (!device->flags.power_manageable) {
                /* TBD: Non-recursive algorithm for walking up hierarchy. */
                *state = device->parent ?
                        device->parent->power.state : ACPI_STATE_D0;
+               goto out;
+       }
+
+       /*
+        * Get the device's power state either directly (via _PSC) or
+        * indirectly (via power resources).
+        */
+       if (device->power.flags.explicit_get) {
+               unsigned long long psc;
+               acpi_status status = acpi_evaluate_integer(device->handle,
+                                                          "_PSC", NULL, &psc);
+               if (ACPI_FAILURE(status))
+                       return -ENODEV;
+
+               result = psc;
+       }
+       /* The test below covers ACPI_STATE_UNKNOWN too. */
+       if (result <= ACPI_STATE_D2) {
+         ; /* Do nothing. */
+       } else if (device->power.flags.power_resources) {
+               int error = acpi_power_get_inferred_state(device, &result);
+               if (error)
+                       return error;
+       } else if (result == ACPI_STATE_D3_HOT) {
+               result = ACPI_STATE_D3;
        }
+       *state = result;
 
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n",
-                         device->pnp.bus_id, *state));
+ out:
+       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n",
+                         device->pnp.bus_id, state_string(*state)));
 
        return 0;
 }
@@ -234,13 +259,14 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
        /* Make sure this is a valid target state */
 
        if (state == device->power.state) {
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
-                                 state));
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n",
+                                 state_string(state)));
                return 0;
        }
 
        if (!device->power.states[state].flags.valid) {
-               printk(KERN_WARNING PREFIX "Device does not support D%d\n", state);
+               printk(KERN_WARNING PREFIX "Device does not support %s\n",
+                      state_string(state));
                return -ENODEV;
        }
        if (device->parent && (state < device->parent->power.state)) {
@@ -294,13 +320,13 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
       end:
        if (result)
                printk(KERN_WARNING PREFIX
-                             "Device [%s] failed to transition to D%d\n",
-                             device->pnp.bus_id, state);
+                             "Device [%s] failed to transition to %s\n",
+                             device->pnp.bus_id, state_string(state));
        else {
                device->power.state = state;
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                 "Device [%s] transitioned to D%d\n",
-                                 device->pnp.bus_id, state));
+                                 "Device [%s] transitioned to %s\n",
+                                 device->pnp.bus_id, state_string(state)));
        }
 
        return result;
index 0500f71..dd6d6a3 100644 (file)
@@ -631,7 +631,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
         * We know a device's inferred power state when all the resources
         * required for a given D-state are 'on'.
         */
-       for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) {
+       for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
                list = &device->power.states[i].resources;
                if (list->count < 1)
                        continue;
index 85cbfdc..c8a1f3b 100644 (file)
@@ -1567,6 +1567,7 @@ static int acpi_bus_scan_fixed(void)
                                                ACPI_BUS_TYPE_POWER_BUTTON,
                                                ACPI_STA_DEFAULT,
                                                &ops);
+               device_init_wakeup(&device->dev, true);
        }
 
        if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
index 74ee4ab..8856102 100644 (file)
@@ -57,6 +57,7 @@ MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
 MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
 
 static u8 sleep_states[ACPI_S_STATE_COUNT];
+static bool pwr_btn_event_pending;
 
 static void acpi_sleep_tts_switch(u32 acpi_state)
 {
@@ -184,6 +185,14 @@ static int acpi_pm_prepare(void)
        return error;
 }
 
+static int find_powerf_dev(struct device *dev, void *data)
+{
+       struct acpi_device *device = to_acpi_device(dev);
+       const char *hid = acpi_device_hid(device);
+
+       return !strcmp(hid, ACPI_BUTTON_HID_POWERF);
+}
+
 /**
  *     acpi_pm_finish - Instruct the platform to leave a sleep state.
  *
@@ -192,6 +201,7 @@ static int acpi_pm_prepare(void)
  */
 static void acpi_pm_finish(void)
 {
+       struct device *pwr_btn_dev;
        u32 acpi_state = acpi_target_sleep_state;
 
        acpi_ec_unblock_transactions();
@@ -209,6 +219,23 @@ static void acpi_pm_finish(void)
        acpi_set_firmware_waking_vector((acpi_physical_address) 0);
 
        acpi_target_sleep_state = ACPI_STATE_S0;
+
+       /* If we were woken with the fixed power button, provide a small
+        * hint to userspace in the form of a wakeup event on the fixed power
+        * button device (if it can be found).
+        *
+        * We delay the event generation til now, as the PM layer requires
+        * timekeeping to be running before we generate events. */
+       if (!pwr_btn_event_pending)
+               return;
+
+       pwr_btn_event_pending = false;
+       pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL,
+                                     find_powerf_dev);
+       if (pwr_btn_dev) {
+               pm_wakeup_event(pwr_btn_dev, 0);
+               put_device(pwr_btn_dev);
+       }
 }
 
 /**
@@ -298,9 +325,23 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
        /* ACPI 3.0 specs (P62) says that it's the responsibility
         * of the OSPM to clear the status bit [ implying that the
         * POWER_BUTTON event should not reach userspace ]
+        *
+        * However, we do generate a small hint for userspace in the form of
+        * a wakeup event. We flag this condition for now and generate the
+        * event later, as we're currently too early in resume to be able to
+        * generate wakeup events.
         */
-       if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3))
-               acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+       if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
+               acpi_event_status pwr_btn_status;
+
+               acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
+
+               if (pwr_btn_status & ACPI_EVENT_FLAG_SET) {
+                       acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+                       /* Flag for later */
+                       pwr_btn_event_pending = true;
+               }
+       }
 
        /*
         * Disable and clear GPE status before interrupt is enabled. Some GPEs
@@ -730,8 +771,8 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
         * can wake the system.  _S0W may be valid, too.
         */
        if (acpi_target_sleep_state == ACPI_STATE_S0 ||
-           (device_may_wakeup(dev) &&
-            adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
+           (device_may_wakeup(dev) && adev->wakeup.flags.valid &&
+            adev->wakeup.sleep_state >= acpi_target_sleep_state)) {
                acpi_status status;
 
                acpi_method[3] = 'W';
index 8d81a1d..dd3e661 100644 (file)
@@ -6,6 +6,7 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC)    += cs5535-clockevt.o
 obj-$(CONFIG_SH_TIMER_CMT)     += sh_cmt.o
 obj-$(CONFIG_SH_TIMER_MTU2)    += sh_mtu2.o
 obj-$(CONFIG_SH_TIMER_TMU)     += sh_tmu.o
+obj-$(CONFIG_EM_TIMER_STI)     += em_sti.o
 obj-$(CONFIG_CLKBLD_I8253)     += i8253.o
 obj-$(CONFIG_CLKSRC_MMIO)      += mmio.o
 obj-$(CONFIG_DW_APB_TIMER)     += dw_apb_timer.o
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
new file mode 100644 (file)
index 0000000..372051d
--- /dev/null
@@ -0,0 +1,406 @@
+/*
+ * Emma Mobile Timer Support - STI
+ *
+ *  Copyright (C) 2012 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+enum { USER_CLOCKSOURCE, USER_CLOCKEVENT, USER_NR };
+
+struct em_sti_priv {
+       void __iomem *base;
+       struct clk *clk;
+       struct platform_device *pdev;
+       unsigned int active[USER_NR];
+       unsigned long rate;
+       raw_spinlock_t lock;
+       struct clock_event_device ced;
+       struct clocksource cs;
+};
+
+#define STI_CONTROL 0x00
+#define STI_COMPA_H 0x10
+#define STI_COMPA_L 0x14
+#define STI_COMPB_H 0x18
+#define STI_COMPB_L 0x1c
+#define STI_COUNT_H 0x20
+#define STI_COUNT_L 0x24
+#define STI_COUNT_RAW_H 0x28
+#define STI_COUNT_RAW_L 0x2c
+#define STI_SET_H 0x30
+#define STI_SET_L 0x34
+#define STI_INTSTATUS 0x40
+#define STI_INTRAWSTATUS 0x44
+#define STI_INTENSET 0x48
+#define STI_INTENCLR 0x4c
+#define STI_INTFFCLR 0x50
+
+static inline unsigned long em_sti_read(struct em_sti_priv *p, int offs)
+{
+       return ioread32(p->base + offs);
+}
+
+static inline void em_sti_write(struct em_sti_priv *p, int offs,
+                               unsigned long value)
+{
+       iowrite32(value, p->base + offs);
+}
+
+static int em_sti_enable(struct em_sti_priv *p)
+{
+       int ret;
+
+       /* enable clock */
+       ret = clk_enable(p->clk);
+       if (ret) {
+               dev_err(&p->pdev->dev, "cannot enable clock\n");
+               return ret;
+       }
+
+       /* configure channel, periodic mode and maximum timeout */
+       p->rate = clk_get_rate(p->clk);
+
+       /* reset the counter */
+       em_sti_write(p, STI_SET_H, 0x40000000);
+       em_sti_write(p, STI_SET_L, 0x00000000);
+
+       /* mask and clear pending interrupts */
+       em_sti_write(p, STI_INTENCLR, 3);
+       em_sti_write(p, STI_INTFFCLR, 3);
+
+       /* enable updates of counter registers */
+       em_sti_write(p, STI_CONTROL, 1);
+
+       return 0;
+}
+
+static void em_sti_disable(struct em_sti_priv *p)
+{
+       /* mask interrupts */
+       em_sti_write(p, STI_INTENCLR, 3);
+
+       /* stop clock */
+       clk_disable(p->clk);
+}
+
+static cycle_t em_sti_count(struct em_sti_priv *p)
+{
+       cycle_t ticks;
+       unsigned long flags;
+
+       /* the STI hardware buffers the 48-bit count, but to
+        * break it out into two 32-bit access the registers
+        * must be accessed in a certain order.
+        * Always read STI_COUNT_H before STI_COUNT_L.
+        */
+       raw_spin_lock_irqsave(&p->lock, flags);
+       ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32;
+       ticks |= em_sti_read(p, STI_COUNT_L);
+       raw_spin_unlock_irqrestore(&p->lock, flags);
+
+       return ticks;
+}
+
+static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&p->lock, flags);
+
+       /* mask compare A interrupt */
+       em_sti_write(p, STI_INTENCLR, 1);
+
+       /* update compare A value */
+       em_sti_write(p, STI_COMPA_H, next >> 32);
+       em_sti_write(p, STI_COMPA_L, next & 0xffffffff);
+
+       /* clear compare A interrupt source */
+       em_sti_write(p, STI_INTFFCLR, 1);
+
+       /* unmask compare A interrupt */
+       em_sti_write(p, STI_INTENSET, 1);
+
+       raw_spin_unlock_irqrestore(&p->lock, flags);
+
+       return next;
+}
+
+static irqreturn_t em_sti_interrupt(int irq, void *dev_id)
+{
+       struct em_sti_priv *p = dev_id;
+
+       p->ced.event_handler(&p->ced);
+       return IRQ_HANDLED;
+}
+
+static int em_sti_start(struct em_sti_priv *p, unsigned int user)
+{
+       unsigned long flags;
+       int used_before;
+       int ret = 0;
+
+       raw_spin_lock_irqsave(&p->lock, flags);
+       used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+       if (!used_before)
+               ret = em_sti_enable(p);
+
+       if (!ret)
+               p->active[user] = 1;
+       raw_spin_unlock_irqrestore(&p->lock, flags);
+
+       return ret;
+}
+
+static void em_sti_stop(struct em_sti_priv *p, unsigned int user)
+{
+       unsigned long flags;
+       int used_before, used_after;
+
+       raw_spin_lock_irqsave(&p->lock, flags);
+       used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+       p->active[user] = 0;
+       used_after = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+
+       if (used_before && !used_after)
+               em_sti_disable(p);
+       raw_spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs)
+{
+       return container_of(cs, struct em_sti_priv, cs);
+}
+
+static cycle_t em_sti_clocksource_read(struct clocksource *cs)
+{
+       return em_sti_count(cs_to_em_sti(cs));
+}
+
+static int em_sti_clocksource_enable(struct clocksource *cs)
+{
+       int ret;
+       struct em_sti_priv *p = cs_to_em_sti(cs);
+
+       ret = em_sti_start(p, USER_CLOCKSOURCE);
+       if (!ret)
+               __clocksource_updatefreq_hz(cs, p->rate);
+       return ret;
+}
+
+static void em_sti_clocksource_disable(struct clocksource *cs)
+{
+       em_sti_stop(cs_to_em_sti(cs), USER_CLOCKSOURCE);
+}
+
+static void em_sti_clocksource_resume(struct clocksource *cs)
+{
+       em_sti_clocksource_enable(cs);
+}
+
+static int em_sti_register_clocksource(struct em_sti_priv *p)
+{
+       struct clocksource *cs = &p->cs;
+
+       memset(cs, 0, sizeof(*cs));
+       cs->name = dev_name(&p->pdev->dev);
+       cs->rating = 200;
+       cs->read = em_sti_clocksource_read;
+       cs->enable = em_sti_clocksource_enable;
+       cs->disable = em_sti_clocksource_disable;
+       cs->suspend = em_sti_clocksource_disable;
+       cs->resume = em_sti_clocksource_resume;
+       cs->mask = CLOCKSOURCE_MASK(48);
+       cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+       dev_info(&p->pdev->dev, "used as clock source\n");
+
+       /* Register with dummy 1 Hz value, gets updated in ->enable() */
+       clocksource_register_hz(cs, 1);
+       return 0;
+}
+
+static struct em_sti_priv *ced_to_em_sti(struct clock_event_device *ced)
+{
+       return container_of(ced, struct em_sti_priv, ced);
+}
+
+static void em_sti_clock_event_mode(enum clock_event_mode mode,
+                                   struct clock_event_device *ced)
+{
+       struct em_sti_priv *p = ced_to_em_sti(ced);
+
+       /* deal with old setting first */
+       switch (ced->mode) {
+       case CLOCK_EVT_MODE_ONESHOT:
+               em_sti_stop(p, USER_CLOCKEVENT);
+               break;
+       default:
+               break;
+       }
+
+       switch (mode) {
+       case CLOCK_EVT_MODE_ONESHOT:
+               dev_info(&p->pdev->dev, "used for oneshot clock events\n");
+               em_sti_start(p, USER_CLOCKEVENT);
+               clockevents_config(&p->ced, p->rate);
+               break;
+       case CLOCK_EVT_MODE_SHUTDOWN:
+       case CLOCK_EVT_MODE_UNUSED:
+               em_sti_stop(p, USER_CLOCKEVENT);
+               break;
+       default:
+               break;
+       }
+}
+
+static int em_sti_clock_event_next(unsigned long delta,
+                                  struct clock_event_device *ced)
+{
+       struct em_sti_priv *p = ced_to_em_sti(ced);
+       cycle_t next;
+       int safe;
+
+       next = em_sti_set_next(p, em_sti_count(p) + delta);
+       safe = em_sti_count(p) < (next - 1);
+
+       return !safe;
+}
+
+static void em_sti_register_clockevent(struct em_sti_priv *p)
+{
+       struct clock_event_device *ced = &p->ced;
+
+       memset(ced, 0, sizeof(*ced));
+       ced->name = dev_name(&p->pdev->dev);
+       ced->features = CLOCK_EVT_FEAT_ONESHOT;
+       ced->rating = 200;
+       ced->cpumask = cpumask_of(0);
+       ced->set_next_event = em_sti_clock_event_next;
+       ced->set_mode = em_sti_clock_event_mode;
+
+       dev_info(&p->pdev->dev, "used for clock events\n");
+
+       /* Register with dummy 1 Hz value, gets updated in ->set_mode() */
+       clockevents_config_and_register(ced, 1, 2, 0xffffffff);
+}
+
+static int __devinit em_sti_probe(struct platform_device *pdev)
+{
+       struct em_sti_priv *p;
+       struct resource *res;
+       int irq, ret;
+
+       p = kzalloc(sizeof(*p), GFP_KERNEL);
+       if (p == NULL) {
+               dev_err(&pdev->dev, "failed to allocate driver data\n");
+               ret = -ENOMEM;
+               goto err0;
+       }
+
+       p->pdev = pdev;
+       platform_set_drvdata(pdev, p);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "failed to get I/O memory\n");
+               ret = -EINVAL;
+               goto err0;
+       }
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(&pdev->dev, "failed to get irq\n");
+               ret = -EINVAL;
+               goto err0;
+       }
+
+       /* map memory, let base point to the STI instance */
+       p->base = ioremap_nocache(res->start, resource_size(res));
+       if (p->base == NULL) {
+               dev_err(&pdev->dev, "failed to remap I/O memory\n");
+               ret = -ENXIO;
+               goto err0;
+       }
+
+       /* get hold of clock */
+       p->clk = clk_get(&pdev->dev, "sclk");
+       if (IS_ERR(p->clk)) {
+               dev_err(&pdev->dev, "cannot get clock\n");
+               ret = PTR_ERR(p->clk);
+               goto err1;
+       }
+
+       if (request_irq(irq, em_sti_interrupt,
+                       IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
+                       dev_name(&pdev->dev), p)) {
+               dev_err(&pdev->dev, "failed to request low IRQ\n");
+               ret = -ENOENT;
+               goto err2;
+       }
+
+       raw_spin_lock_init(&p->lock);
+       em_sti_register_clockevent(p);
+       em_sti_register_clocksource(p);
+       return 0;
+
+err2:
+       clk_put(p->clk);
+err1:
+       iounmap(p->base);
+err0:
+       kfree(p);
+       return ret;
+}
+
+static int __devexit em_sti_remove(struct platform_device *pdev)
+{
+       return -EBUSY; /* cannot unregister clockevent and clocksource */
+}
+
+static const struct of_device_id em_sti_dt_ids[] __devinitconst = {
+       { .compatible = "renesas,em-sti", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, em_sti_dt_ids);
+
+static struct platform_driver em_sti_device_driver = {
+       .probe          = em_sti_probe,
+       .remove         = __devexit_p(em_sti_remove),
+       .driver         = {
+               .name   = "em_sti",
+               .of_match_table = em_sti_dt_ids,
+       }
+};
+
+module_platform_driver(em_sti_device_driver);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("Renesas Emma Mobile STI Timer Driver");
+MODULE_LICENSE("GPL v2");
index 7bb0044..b6453d0 100644 (file)
@@ -2833,7 +2833,7 @@ static __init void exynos5_gpiolib_init(void)
        }
 
        /* need to set base address for gpc4 */
-       exonys5_gpios_1[11].base = gpio_base1 + 0x2E0;
+       exynos5_gpios_1[11].base = gpio_base1 + 0x2E0;
 
        /* need to set base address for gpx */
        chip = &exynos5_gpios_1[21];
index 3df4efa..3186522 100644 (file)
@@ -460,15 +460,28 @@ static void cayman_gpu_init(struct radeon_device *rdev)
                rdev->config.cayman.max_pipes_per_simd = 4;
                rdev->config.cayman.max_tile_pipes = 2;
                if ((rdev->pdev->device == 0x9900) ||
-                   (rdev->pdev->device == 0x9901)) {
+                   (rdev->pdev->device == 0x9901) ||
+                   (rdev->pdev->device == 0x9905) ||
+                   (rdev->pdev->device == 0x9906) ||
+                   (rdev->pdev->device == 0x9907) ||
+                   (rdev->pdev->device == 0x9908) ||
+                   (rdev->pdev->device == 0x9909) ||
+                   (rdev->pdev->device == 0x9910) ||
+                   (rdev->pdev->device == 0x9917)) {
                        rdev->config.cayman.max_simds_per_se = 6;
                        rdev->config.cayman.max_backends_per_se = 2;
                } else if ((rdev->pdev->device == 0x9903) ||
-                          (rdev->pdev->device == 0x9904)) {
+                          (rdev->pdev->device == 0x9904) ||
+                          (rdev->pdev->device == 0x990A) ||
+                          (rdev->pdev->device == 0x9913) ||
+                          (rdev->pdev->device == 0x9918)) {
                        rdev->config.cayman.max_simds_per_se = 4;
                        rdev->config.cayman.max_backends_per_se = 2;
-               } else if ((rdev->pdev->device == 0x9990) ||
-                          (rdev->pdev->device == 0x9991)) {
+               } else if ((rdev->pdev->device == 0x9919) ||
+                          (rdev->pdev->device == 0x9990) ||
+                          (rdev->pdev->device == 0x9991) ||
+                          (rdev->pdev->device == 0x9994) ||
+                          (rdev->pdev->device == 0x99A0)) {
                        rdev->config.cayman.max_simds_per_se = 3;
                        rdev->config.cayman.max_backends_per_se = 1;
                } else {
index 45cfcea..f30dc95 100644 (file)
@@ -2426,6 +2426,12 @@ int r600_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = r600_audio_init(rdev);
+       if (r) {
+               DRM_ERROR("radeon: audio init failed\n");
+               return r;
+       }
+
        return 0;
 }
 
@@ -2462,12 +2468,6 @@ int r600_resume(struct radeon_device *rdev)
                return r;
        }
 
-       r = r600_audio_init(rdev);
-       if (r) {
-               DRM_ERROR("radeon: audio resume failed\n");
-               return r;
-       }
-
        return r;
 }
 
@@ -2577,9 +2577,6 @@ int r600_init(struct radeon_device *rdev)
                rdev->accel_working = false;
        }
 
-       r = r600_audio_init(rdev);
-       if (r)
-               return r; /* TODO error handling */
        return 0;
 }
 
index 7c4fa77..7479a5c 100644 (file)
@@ -192,6 +192,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
        int base_rate = 48000;
 
        switch (radeon_encoder->encoder_id) {
@@ -217,8 +218,8 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
                WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
                WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
 
-               /* Some magic trigger or src sel? */
-               WREG32_P(0x5ac, 0x01, ~0x77);
+               /* Select DTO source */
+               WREG32(0x5ac, radeon_crtc->crtc_id);
        } else {
                switch (dig->dig_encoder) {
                case 0:
index 226379e..969c275 100644 (file)
@@ -348,7 +348,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
                WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
                       HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
                       HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
-                      HDMI0_AUDIO_SEND_MAX_PACKETS | /* send NULL packets if no audio is available */
                       HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
                       HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
        }
index 85dac33..fefcca5 100644 (file)
@@ -1374,9 +1374,9 @@ struct cayman_asic {
 
 struct si_asic {
        unsigned max_shader_engines;
-       unsigned max_pipes_per_simd;
        unsigned max_tile_pipes;
-       unsigned max_simds_per_se;
+       unsigned max_cu_per_sh;
+       unsigned max_sh_per_se;
        unsigned max_backends_per_se;
        unsigned max_texture_channel_caches;
        unsigned max_gprs;
@@ -1387,7 +1387,6 @@ struct si_asic {
        unsigned sc_hiz_tile_fifo_size;
        unsigned sc_earlyz_tile_fifo_size;
 
-       unsigned num_shader_engines;
        unsigned num_tile_pipes;
        unsigned num_backends_per_se;
        unsigned backend_disable_mask_per_asic;
index 79db56e..59d4493 100644 (file)
@@ -476,12 +476,18 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
 
        mutex_lock(&vm->mutex);
        if (last_pfn > vm->last_pfn) {
-               /* grow va space 32M by 32M */
-               unsigned align = ((32 << 20) >> 12) - 1;
+               /* release mutex and lock in right order */
+               mutex_unlock(&vm->mutex);
                radeon_mutex_lock(&rdev->cs_mutex);
-               radeon_vm_unbind_locked(rdev, vm);
+               mutex_lock(&vm->mutex);
+               /* and check again */
+               if (last_pfn > vm->last_pfn) {
+                       /* grow va space 32M by 32M */
+                       unsigned align = ((32 << 20) >> 12) - 1;
+                       radeon_vm_unbind_locked(rdev, vm);
+                       vm->last_pfn = (last_pfn + align) & ~align;
+               }
                radeon_mutex_unlock(&rdev->cs_mutex);
-               vm->last_pfn = (last_pfn + align) & ~align;
        }
        head = &vm->va;
        last_offset = 0;
@@ -595,8 +601,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
        if (bo_va == NULL)
                return 0;
 
-       mutex_lock(&vm->mutex);
        radeon_mutex_lock(&rdev->cs_mutex);
+       mutex_lock(&vm->mutex);
        radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
        radeon_mutex_unlock(&rdev->cs_mutex);
        list_del(&bo_va->vm_list);
@@ -641,9 +647,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
        struct radeon_bo_va *bo_va, *tmp;
        int r;
 
-       mutex_lock(&vm->mutex);
-
        radeon_mutex_lock(&rdev->cs_mutex);
+       mutex_lock(&vm->mutex);
        radeon_vm_unbind_locked(rdev, vm);
        radeon_mutex_unlock(&rdev->cs_mutex);
 
index f1016a5..5c58d7d 100644 (file)
@@ -273,7 +273,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                break;
        case RADEON_INFO_MAX_PIPES:
                if (rdev->family >= CHIP_TAHITI)
-                       value = rdev->config.si.max_pipes_per_simd;
+                       value = rdev->config.si.max_cu_per_sh;
                else if (rdev->family >= CHIP_CAYMAN)
                        value = rdev->config.cayman.max_pipes_per_simd;
                else if (rdev->family >= CHIP_CEDAR)
index 25f9eef..e95c5e6 100644 (file)
@@ -908,12 +908,6 @@ static int rs600_startup(struct radeon_device *rdev)
                return r;
        }
 
-       r = r600_audio_init(rdev);
-       if (r) {
-               dev_err(rdev->dev, "failed initializing audio\n");
-               return r;
-       }
-
        r = radeon_ib_pool_start(rdev);
        if (r)
                return r;
@@ -922,6 +916,12 @@ static int rs600_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = r600_audio_init(rdev);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing audio\n");
+               return r;
+       }
+
        return 0;
 }
 
index 3277dde..159b6a4 100644 (file)
@@ -637,12 +637,6 @@ static int rs690_startup(struct radeon_device *rdev)
                return r;
        }
 
-       r = r600_audio_init(rdev);
-       if (r) {
-               dev_err(rdev->dev, "failed initializing audio\n");
-               return r;
-       }
-
        r = radeon_ib_pool_start(rdev);
        if (r)
                return r;
@@ -651,6 +645,12 @@ static int rs690_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = r600_audio_init(rdev);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing audio\n");
+               return r;
+       }
+
        return 0;
 }
 
index 04ddc36..4ad0281 100644 (file)
@@ -956,6 +956,12 @@ static int rv770_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = r600_audio_init(rdev);
+       if (r) {
+               DRM_ERROR("radeon: audio init failed\n");
+               return r;
+       }
+
        return 0;
 }
 
@@ -978,12 +984,6 @@ int rv770_resume(struct radeon_device *rdev)
                return r;
        }
 
-       r = r600_audio_init(rdev);
-       if (r) {
-               dev_err(rdev->dev, "radeon: audio init failed\n");
-               return r;
-       }
-
        return r;
 
 }
@@ -1092,12 +1092,6 @@ int rv770_init(struct radeon_device *rdev)
                rdev->accel_working = false;
        }
 
-       r = r600_audio_init(rdev);
-       if (r) {
-               dev_err(rdev->dev, "radeon: audio init failed\n");
-               return r;
-       }
-
        return 0;
 }
 
index 549732e..c7b61f1 100644 (file)
@@ -867,200 +867,6 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
 /*
  * Core functions
  */
-static u32 si_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
-                                          u32 num_tile_pipes,
-                                          u32 num_backends_per_asic,
-                                          u32 *backend_disable_mask_per_asic,
-                                          u32 num_shader_engines)
-{
-       u32 backend_map = 0;
-       u32 enabled_backends_mask = 0;
-       u32 enabled_backends_count = 0;
-       u32 num_backends_per_se;
-       u32 cur_pipe;
-       u32 swizzle_pipe[SI_MAX_PIPES];
-       u32 cur_backend = 0;
-       u32 i;
-       bool force_no_swizzle;
-
-       /* force legal values */
-       if (num_tile_pipes < 1)
-               num_tile_pipes = 1;
-       if (num_tile_pipes > rdev->config.si.max_tile_pipes)
-               num_tile_pipes = rdev->config.si.max_tile_pipes;
-       if (num_shader_engines < 1)
-               num_shader_engines = 1;
-       if (num_shader_engines > rdev->config.si.max_shader_engines)
-               num_shader_engines = rdev->config.si.max_shader_engines;
-       if (num_backends_per_asic < num_shader_engines)
-               num_backends_per_asic = num_shader_engines;
-       if (num_backends_per_asic > (rdev->config.si.max_backends_per_se * num_shader_engines))
-               num_backends_per_asic = rdev->config.si.max_backends_per_se * num_shader_engines;
-
-       /* make sure we have the same number of backends per se */
-       num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
-       /* set up the number of backends per se */
-       num_backends_per_se = num_backends_per_asic / num_shader_engines;
-       if (num_backends_per_se > rdev->config.si.max_backends_per_se) {
-               num_backends_per_se = rdev->config.si.max_backends_per_se;
-               num_backends_per_asic = num_backends_per_se * num_shader_engines;
-       }
-
-       /* create enable mask and count for enabled backends */
-       for (i = 0; i < SI_MAX_BACKENDS; ++i) {
-               if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
-                       enabled_backends_mask |= (1 << i);
-                       ++enabled_backends_count;
-               }
-               if (enabled_backends_count == num_backends_per_asic)
-                       break;
-       }
-
-       /* force the backends mask to match the current number of backends */
-       if (enabled_backends_count != num_backends_per_asic) {
-               u32 this_backend_enabled;
-               u32 shader_engine;
-               u32 backend_per_se;
-
-               enabled_backends_mask = 0;
-               enabled_backends_count = 0;
-               *backend_disable_mask_per_asic = SI_MAX_BACKENDS_MASK;
-               for (i = 0; i < SI_MAX_BACKENDS; ++i) {
-                       /* calc the current se */
-                       shader_engine = i / rdev->config.si.max_backends_per_se;
-                       /* calc the backend per se */
-                       backend_per_se = i % rdev->config.si.max_backends_per_se;
-                       /* default to not enabled */
-                       this_backend_enabled = 0;
-                       if ((shader_engine < num_shader_engines) &&
-                           (backend_per_se < num_backends_per_se))
-                               this_backend_enabled = 1;
-                       if (this_backend_enabled) {
-                               enabled_backends_mask |= (1 << i);
-                               *backend_disable_mask_per_asic &= ~(1 << i);
-                               ++enabled_backends_count;
-                       }
-               }
-       }
-
-
-       memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * SI_MAX_PIPES);
-       switch (rdev->family) {
-       case CHIP_TAHITI:
-       case CHIP_PITCAIRN:
-       case CHIP_VERDE:
-               force_no_swizzle = true;
-               break;
-       default:
-               force_no_swizzle = false;
-               break;
-       }
-       if (force_no_swizzle) {
-               bool last_backend_enabled = false;
-
-               force_no_swizzle = false;
-               for (i = 0; i < SI_MAX_BACKENDS; ++i) {
-                       if (((enabled_backends_mask >> i) & 1) == 1) {
-                               if (last_backend_enabled)
-                                       force_no_swizzle = true;
-                               last_backend_enabled = true;
-                       } else
-                               last_backend_enabled = false;
-               }
-       }
-
-       switch (num_tile_pipes) {
-       case 1:
-       case 3:
-       case 5:
-       case 7:
-               DRM_ERROR("odd number of pipes!\n");
-               break;
-       case 2:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 1;
-               break;
-       case 4:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 1;
-                       swizzle_pipe[3] = 3;
-               }
-               break;
-       case 6:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-                       swizzle_pipe[5] = 5;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 1;
-                       swizzle_pipe[4] = 3;
-                       swizzle_pipe[5] = 5;
-               }
-               break;
-       case 8:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-                       swizzle_pipe[5] = 5;
-                       swizzle_pipe[6] = 6;
-                       swizzle_pipe[7] = 7;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 6;
-                       swizzle_pipe[4] = 1;
-                       swizzle_pipe[5] = 3;
-                       swizzle_pipe[6] = 5;
-                       swizzle_pipe[7] = 7;
-               }
-               break;
-       }
-
-       for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
-               while (((1 << cur_backend) & enabled_backends_mask) == 0)
-                       cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
-
-               backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
-
-               cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
-       }
-
-       return backend_map;
-}
-
-static u32 si_get_disable_mask_per_asic(struct radeon_device *rdev,
-                                       u32 disable_mask_per_se,
-                                       u32 max_disable_mask_per_se,
-                                       u32 num_shader_engines)
-{
-       u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
-       u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
-
-       if (num_shader_engines == 1)
-               return disable_mask_per_asic;
-       else if (num_shader_engines == 2)
-               return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
-       else
-               return 0xffffffff;
-}
-
 static void si_tiling_mode_table_init(struct radeon_device *rdev)
 {
        const u32 num_tile_mode_states = 32;
@@ -1562,18 +1368,151 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
                DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
 }
 
+static void si_select_se_sh(struct radeon_device *rdev,
+                           u32 se_num, u32 sh_num)
+{
+       u32 data = INSTANCE_BROADCAST_WRITES;
+
+       if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
+               data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+       else if (se_num == 0xffffffff)
+               data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
+       else if (sh_num == 0xffffffff)
+               data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
+       else
+               data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
+       WREG32(GRBM_GFX_INDEX, data);
+}
+
+static u32 si_create_bitmask(u32 bit_width)
+{
+       u32 i, mask = 0;
+
+       for (i = 0; i < bit_width; i++) {
+               mask <<= 1;
+               mask |= 1;
+       }
+       return mask;
+}
+
+static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
+{
+       u32 data, mask;
+
+       data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+       if (data & 1)
+               data &= INACTIVE_CUS_MASK;
+       else
+               data = 0;
+       data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+
+       data >>= INACTIVE_CUS_SHIFT;
+
+       mask = si_create_bitmask(cu_per_sh);
+
+       return ~data & mask;
+}
+
+static void si_setup_spi(struct radeon_device *rdev,
+                        u32 se_num, u32 sh_per_se,
+                        u32 cu_per_sh)
+{
+       int i, j, k;
+       u32 data, mask, active_cu;
+
+       for (i = 0; i < se_num; i++) {
+               for (j = 0; j < sh_per_se; j++) {
+                       si_select_se_sh(rdev, i, j);
+                       data = RREG32(SPI_STATIC_THREAD_MGMT_3);
+                       active_cu = si_get_cu_enabled(rdev, cu_per_sh);
+
+                       mask = 1;
+                       for (k = 0; k < 16; k++) {
+                               mask <<= k;
+                               if (active_cu & mask) {
+                                       data &= ~mask;
+                                       WREG32(SPI_STATIC_THREAD_MGMT_3, data);
+                                       break;
+                               }
+                       }
+               }
+       }
+       si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
+static u32 si_get_rb_disabled(struct radeon_device *rdev,
+                             u32 max_rb_num, u32 se_num,
+                             u32 sh_per_se)
+{
+       u32 data, mask;
+
+       data = RREG32(CC_RB_BACKEND_DISABLE);
+       if (data & 1)
+               data &= BACKEND_DISABLE_MASK;
+       else
+               data = 0;
+       data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
+
+       data >>= BACKEND_DISABLE_SHIFT;
+
+       mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
+
+       return data & mask;
+}
+
+static void si_setup_rb(struct radeon_device *rdev,
+                       u32 se_num, u32 sh_per_se,
+                       u32 max_rb_num)
+{
+       int i, j;
+       u32 data, mask;
+       u32 disabled_rbs = 0;
+       u32 enabled_rbs = 0;
+
+       for (i = 0; i < se_num; i++) {
+               for (j = 0; j < sh_per_se; j++) {
+                       si_select_se_sh(rdev, i, j);
+                       data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+                       disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
+               }
+       }
+       si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+
+       mask = 1;
+       for (i = 0; i < max_rb_num; i++) {
+               if (!(disabled_rbs & mask))
+                       enabled_rbs |= mask;
+               mask <<= 1;
+       }
+
+       for (i = 0; i < se_num; i++) {
+               si_select_se_sh(rdev, i, 0xffffffff);
+               data = 0;
+               for (j = 0; j < sh_per_se; j++) {
+                       switch (enabled_rbs & 3) {
+                       case 1:
+                               data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
+                               break;
+                       case 2:
+                               data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
+                               break;
+                       case 3:
+                       default:
+                               data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
+                               break;
+                       }
+                       enabled_rbs >>= 2;
+               }
+               WREG32(PA_SC_RASTER_CONFIG, data);
+       }
+       si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
 static void si_gpu_init(struct radeon_device *rdev)
 {
-       u32 cc_rb_backend_disable = 0;
-       u32 cc_gc_shader_array_config;
        u32 gb_addr_config = 0;
        u32 mc_shared_chmap, mc_arb_ramcfg;
-       u32 gb_backend_map;
-       u32 cgts_tcc_disable;
        u32 sx_debug_1;
-       u32 gc_user_shader_array_config;
-       u32 gc_user_rb_backend_disable;
-       u32 cgts_user_tcc_disable;
        u32 hdp_host_path_cntl;
        u32 tmp;
        int i, j;
@@ -1581,9 +1520,9 @@ static void si_gpu_init(struct radeon_device *rdev)
        switch (rdev->family) {
        case CHIP_TAHITI:
                rdev->config.si.max_shader_engines = 2;
-               rdev->config.si.max_pipes_per_simd = 4;
                rdev->config.si.max_tile_pipes = 12;
-               rdev->config.si.max_simds_per_se = 8;
+               rdev->config.si.max_cu_per_sh = 8;
+               rdev->config.si.max_sh_per_se = 2;
                rdev->config.si.max_backends_per_se = 4;
                rdev->config.si.max_texture_channel_caches = 12;
                rdev->config.si.max_gprs = 256;
@@ -1594,12 +1533,13 @@ static void si_gpu_init(struct radeon_device *rdev)
                rdev->config.si.sc_prim_fifo_size_backend = 0x100;
                rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_PITCAIRN:
                rdev->config.si.max_shader_engines = 2;
-               rdev->config.si.max_pipes_per_simd = 4;
                rdev->config.si.max_tile_pipes = 8;
-               rdev->config.si.max_simds_per_se = 5;
+               rdev->config.si.max_cu_per_sh = 5;
+               rdev->config.si.max_sh_per_se = 2;
                rdev->config.si.max_backends_per_se = 4;
                rdev->config.si.max_texture_channel_caches = 8;
                rdev->config.si.max_gprs = 256;
@@ -1610,13 +1550,14 @@ static void si_gpu_init(struct radeon_device *rdev)
                rdev->config.si.sc_prim_fifo_size_backend = 0x100;
                rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_VERDE:
        default:
                rdev->config.si.max_shader_engines = 1;
-               rdev->config.si.max_pipes_per_simd = 4;
                rdev->config.si.max_tile_pipes = 4;
-               rdev->config.si.max_simds_per_se = 2;
+               rdev->config.si.max_cu_per_sh = 2;
+               rdev->config.si.max_sh_per_se = 2;
                rdev->config.si.max_backends_per_se = 4;
                rdev->config.si.max_texture_channel_caches = 4;
                rdev->config.si.max_gprs = 256;
@@ -1627,6 +1568,7 @@ static void si_gpu_init(struct radeon_device *rdev)
                rdev->config.si.sc_prim_fifo_size_backend = 0x40;
                rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
                break;
        }
 
@@ -1648,31 +1590,7 @@ static void si_gpu_init(struct radeon_device *rdev)
        mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
        mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
-       cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
-       cc_gc_shader_array_config = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
-       cgts_tcc_disable = 0xffff0000;
-       for (i = 0; i < rdev->config.si.max_texture_channel_caches; i++)
-               cgts_tcc_disable &= ~(1 << (16 + i));
-       gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
-       gc_user_shader_array_config = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
-       cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
-
-       rdev->config.si.num_shader_engines = rdev->config.si.max_shader_engines;
        rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
-       tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
-       rdev->config.si.num_backends_per_se = r600_count_pipe_bits(tmp);
-       tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
-       rdev->config.si.backend_disable_mask_per_asic =
-               si_get_disable_mask_per_asic(rdev, tmp, SI_MAX_BACKENDS_PER_SE_MASK,
-                                            rdev->config.si.num_shader_engines);
-       rdev->config.si.backend_map =
-               si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
-                                               rdev->config.si.num_backends_per_se *
-                                               rdev->config.si.num_shader_engines,
-                                               &rdev->config.si.backend_disable_mask_per_asic,
-                                               rdev->config.si.num_shader_engines);
-       tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
-       rdev->config.si.num_texture_channel_caches = r600_count_pipe_bits(tmp);
        rdev->config.si.mem_max_burst_length_bytes = 256;
        tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
        rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
@@ -1683,55 +1601,8 @@ static void si_gpu_init(struct radeon_device *rdev)
        rdev->config.si.num_gpus = 1;
        rdev->config.si.multi_gpu_tile_size = 64;
 
-       gb_addr_config = 0;
-       switch (rdev->config.si.num_tile_pipes) {
-       case 1:
-               gb_addr_config |= NUM_PIPES(0);
-               break;
-       case 2:
-               gb_addr_config |= NUM_PIPES(1);
-               break;
-       case 4:
-               gb_addr_config |= NUM_PIPES(2);
-               break;
-       case 8:
-       default:
-               gb_addr_config |= NUM_PIPES(3);
-               break;
-       }
-
-       tmp = (rdev->config.si.mem_max_burst_length_bytes / 256) - 1;
-       gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
-       gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.si.num_shader_engines - 1);
-       tmp = (rdev->config.si.shader_engine_tile_size / 16) - 1;
-       gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
-       switch (rdev->config.si.num_gpus) {
-       case 1:
-       default:
-               gb_addr_config |= NUM_GPUS(0);
-               break;
-       case 2:
-               gb_addr_config |= NUM_GPUS(1);
-               break;
-       case 4:
-               gb_addr_config |= NUM_GPUS(2);
-               break;
-       }
-       switch (rdev->config.si.multi_gpu_tile_size) {
-       case 16:
-               gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
-               break;
-       case 32:
-       default:
-               gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
-               break;
-       case 64:
-               gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
-               break;
-       case 128:
-               gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
-               break;
-       }
+       /* fix up row size */
+       gb_addr_config &= ~ROW_SIZE_MASK;
        switch (rdev->config.si.mem_row_size_in_kb) {
        case 1:
        default:
@@ -1745,26 +1616,6 @@ static void si_gpu_init(struct radeon_device *rdev)
                break;
        }
 
-       tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
-       rdev->config.si.num_tile_pipes = (1 << tmp);
-       tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
-       rdev->config.si.mem_max_burst_length_bytes = (tmp + 1) * 256;
-       tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
-       rdev->config.si.num_shader_engines = tmp + 1;
-       tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
-       rdev->config.si.num_gpus = tmp + 1;
-       tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
-       rdev->config.si.multi_gpu_tile_size = 1 << tmp;
-       tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
-       rdev->config.si.mem_row_size_in_kb = 1 << tmp;
-
-       gb_backend_map =
-               si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
-                                               rdev->config.si.num_backends_per_se *
-                                               rdev->config.si.num_shader_engines,
-                                               &rdev->config.si.backend_disable_mask_per_asic,
-                                               rdev->config.si.num_shader_engines);
-
        /* setup tiling info dword.  gb_addr_config is not adequate since it does
         * not have bank info, so create a custom tiling dword.
         * bits 3:0   num_pipes
@@ -1789,33 +1640,29 @@ static void si_gpu_init(struct radeon_device *rdev)
                rdev->config.si.tile_config |= (3 << 0);
                break;
        }
-       rdev->config.si.tile_config |=
-               ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
+       if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+               rdev->config.si.tile_config |= 1 << 4;
+       else
+               rdev->config.si.tile_config |= 0 << 4;
        rdev->config.si.tile_config |=
                ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
        rdev->config.si.tile_config |=
                ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
 
-       rdev->config.si.backend_map = gb_backend_map;
        WREG32(GB_ADDR_CONFIG, gb_addr_config);
        WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
-       /* primary versions */
-       WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(CC_GC_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
-
-       WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
+       si_tiling_mode_table_init(rdev);
 
-       /* user versions */
-       WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(GC_USER_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
+       si_setup_rb(rdev, rdev->config.si.max_shader_engines,
+                   rdev->config.si.max_sh_per_se,
+                   rdev->config.si.max_backends_per_se);
 
-       WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
+       si_setup_spi(rdev, rdev->config.si.max_shader_engines,
+                    rdev->config.si.max_sh_per_se,
+                    rdev->config.si.max_cu_per_sh);
 
-       si_tiling_mode_table_init(rdev);
 
        /* set HW defaults for 3D engine */
        WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
index 53ea2c4..db40679 100644 (file)
 #ifndef SI_H
 #define SI_H
 
+#define TAHITI_RB_BITMAP_WIDTH_PER_SH  2
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN        0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN         0x12010002
+
 #define        CG_MULT_THERMAL_STATUS                                  0x714
 #define                ASIC_MAX_TEMP(x)                                ((x) << 0)
 #define                ASIC_MAX_TEMP_MASK                              0x000001ff
 #define                SOFT_RESET_IA                                   (1 << 15)
 
 #define GRBM_GFX_INDEX                                 0x802C
+#define                INSTANCE_INDEX(x)                       ((x) << 0)
+#define                SH_INDEX(x)                             ((x) << 8)
+#define                SE_INDEX(x)                             ((x) << 16)
+#define                SH_BROADCAST_WRITES                     (1 << 29)
+#define                INSTANCE_BROADCAST_WRITES               (1 << 30)
+#define                SE_BROADCAST_WRITES                     (1 << 31)
 
 #define GRBM_INT_CNTL                                   0x8060
 #       define RDERR_INT_ENABLE                         (1 << 0)
 #define        VGT_TF_MEMORY_BASE                              0x89B8
 
 #define CC_GC_SHADER_ARRAY_CONFIG                      0x89bc
+#define                INACTIVE_CUS_MASK                       0xFFFF0000
+#define                INACTIVE_CUS_SHIFT                      16
 #define GC_USER_SHADER_ARRAY_CONFIG                    0x89c0
 
 #define        PA_CL_ENHANCE                                   0x8A14
 #define RLC_MC_CNTL                                       0xC344
 #define RLC_UCODE_CNTL                                    0xC348
 
+#define PA_SC_RASTER_CONFIG                             0x28350
+#       define RASTER_CONFIG_RB_MAP_0                   0
+#       define RASTER_CONFIG_RB_MAP_1                   1
+#       define RASTER_CONFIG_RB_MAP_2                   2
+#       define RASTER_CONFIG_RB_MAP_3                   3
+
 #define VGT_EVENT_INITIATOR                             0x28a90
 #       define SAMPLE_STREAMOUTSTATS1                   (1 << 0)
 #       define SAMPLE_STREAMOUTSTATS2                   (2 << 0)
index beb2491..a0edd98 100644 (file)
@@ -37,4 +37,16 @@ config I2C_MUX_PCA954x
          This driver can also be built as a module.  If so, the module
          will be called i2c-mux-pca954x.
 
+config I2C_MUX_PINCTRL
+       tristate "pinctrl-based I2C multiplexer"
+       depends on PINCTRL
+       help
+         If you say yes to this option, support will be included for an I2C
+         multiplexer that uses the pinctrl subsystem, i.e. pin multiplexing.
+         This is useful for SoCs whose I2C module's signals can be routed to
+         different sets of pins at run-time.
+
+         This driver can also be built as a module. If so, the module will be
+         called pinctrl-i2cmux.
+
 endmenu
index 5826249..76da869 100644 (file)
@@ -4,5 +4,6 @@
 obj-$(CONFIG_I2C_MUX_GPIO)     += i2c-mux-gpio.o
 obj-$(CONFIG_I2C_MUX_PCA9541)  += i2c-mux-pca9541.o
 obj-$(CONFIG_I2C_MUX_PCA954x)  += i2c-mux-pca954x.o
+obj-$(CONFIG_I2C_MUX_PINCTRL)  += i2c-mux-pinctrl.o
 
 ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
new file mode 100644 (file)
index 0000000..46a6697
--- /dev/null
@@ -0,0 +1,279 @@
+/*
+ * I2C multiplexer using pinctrl API
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_i2c.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/i2c-mux-pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct i2c_mux_pinctrl {
+       struct device *dev;
+       struct i2c_mux_pinctrl_platform_data *pdata;
+       struct pinctrl *pinctrl;
+       struct pinctrl_state **states;
+       struct pinctrl_state *state_idle;
+       struct i2c_adapter *parent;
+       struct i2c_adapter **busses;
+};
+
+static int i2c_mux_pinctrl_select(struct i2c_adapter *adap, void *data,
+                                 u32 chan)
+{
+       struct i2c_mux_pinctrl *mux = data;
+
+       return pinctrl_select_state(mux->pinctrl, mux->states[chan]);
+}
+
+static int i2c_mux_pinctrl_deselect(struct i2c_adapter *adap, void *data,
+                                   u32 chan)
+{
+       struct i2c_mux_pinctrl *mux = data;
+
+       return pinctrl_select_state(mux->pinctrl, mux->state_idle);
+}
+
+#ifdef CONFIG_OF
+static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
+                               struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       int num_names, i, ret;
+       struct device_node *adapter_np;
+       struct i2c_adapter *adapter;
+
+       if (!np)
+               return 0;
+
+       mux->pdata = devm_kzalloc(&pdev->dev, sizeof(*mux->pdata), GFP_KERNEL);
+       if (!mux->pdata) {
+               dev_err(mux->dev,
+                       "Cannot allocate i2c_mux_pinctrl_platform_data\n");
+               return -ENOMEM;
+       }
+
+       num_names = of_property_count_strings(np, "pinctrl-names");
+       if (num_names < 0) {
+               dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
+                       num_names);
+               return num_names;
+       }
+
+       mux->pdata->pinctrl_states = devm_kzalloc(&pdev->dev,
+               sizeof(*mux->pdata->pinctrl_states) * num_names,
+               GFP_KERNEL);
+       if (!mux->pdata->pinctrl_states) {
+               dev_err(mux->dev, "Cannot allocate pinctrl_states\n");
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < num_names; i++) {
+               ret = of_property_read_string_index(np, "pinctrl-names", i,
+                       &mux->pdata->pinctrl_states[mux->pdata->bus_count]);
+               if (ret < 0) {
+                       dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
+                               ret);
+                       return ret;
+               }
+               if (!strcmp(mux->pdata->pinctrl_states[mux->pdata->bus_count],
+                           "idle")) {
+                       if (i != num_names - 1) {
+                               dev_err(mux->dev, "idle state must be last\n");
+                               return -EINVAL;
+                       }
+                       mux->pdata->pinctrl_state_idle = "idle";
+               } else {
+                       mux->pdata->bus_count++;
+               }
+       }
+
+       adapter_np = of_parse_phandle(np, "i2c-parent", 0);
+       if (!adapter_np) {
+               dev_err(mux->dev, "Cannot parse i2c-parent\n");
+               return -ENODEV;
+       }
+       adapter = of_find_i2c_adapter_by_node(adapter_np);
+       if (!adapter) {
+               dev_err(mux->dev, "Cannot find parent bus\n");
+               return -ENODEV;
+       }
+       mux->pdata->parent_bus_num = i2c_adapter_id(adapter);
+       put_device(&adapter->dev);
+
+       return 0;
+}
+#else
+static inline int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
+                                          struct platform_device *pdev)
+{
+       return 0;
+}
+#endif
+
+static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev)
+{
+       struct i2c_mux_pinctrl *mux;
+       int (*deselect)(struct i2c_adapter *, void *, u32);
+       int i, ret;
+
+       mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
+       if (!mux) {
+               dev_err(&pdev->dev, "Cannot allocate i2c_mux_pinctrl\n");
+               ret = -ENOMEM;
+               goto err;
+       }
+       platform_set_drvdata(pdev, mux);
+
+       mux->dev = &pdev->dev;
+
+       mux->pdata = pdev->dev.platform_data;
+       if (!mux->pdata) {
+               ret = i2c_mux_pinctrl_parse_dt(mux, pdev);
+               if (ret < 0)
+                       goto err;
+       }
+       if (!mux->pdata) {
+               dev_err(&pdev->dev, "Missing platform data\n");
+               ret = -ENODEV;
+               goto err;
+       }
+
+       mux->states = devm_kzalloc(&pdev->dev,
+                                  sizeof(*mux->states) * mux->pdata->bus_count,
+                                  GFP_KERNEL);
+       if (!mux->states) {
+               dev_err(&pdev->dev, "Cannot allocate states\n");
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       mux->busses = devm_kzalloc(&pdev->dev,
+                                  sizeof(mux->busses) * mux->pdata->bus_count,
+                                  GFP_KERNEL);
+       if (!mux->states) {
+               dev_err(&pdev->dev, "Cannot allocate busses\n");
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       mux->pinctrl = devm_pinctrl_get(&pdev->dev);
+       if (IS_ERR(mux->pinctrl)) {
+               ret = PTR_ERR(mux->pinctrl);
+               dev_err(&pdev->dev, "Cannot get pinctrl: %d\n", ret);
+               goto err;
+       }
+       for (i = 0; i < mux->pdata->bus_count; i++) {
+               mux->states[i] = pinctrl_lookup_state(mux->pinctrl,
+                                               mux->pdata->pinctrl_states[i]);
+                       if (IS_ERR(mux->states[i])) {
+                               ret = PTR_ERR(mux->states[i]);
+                               dev_err(&pdev->dev,
+                                       "Cannot look up pinctrl state %s: %d\n",
+                                       mux->pdata->pinctrl_states[i], ret);
+                               goto err;
+                       }
+       }
+       if (mux->pdata->pinctrl_state_idle) {
+               mux->state_idle = pinctrl_lookup_state(mux->pinctrl,
+                                               mux->pdata->pinctrl_state_idle);
+               if (IS_ERR(mux->state_idle)) {
+                       ret = PTR_ERR(mux->state_idle);
+                       dev_err(&pdev->dev,
+                               "Cannot look up pinctrl state %s: %d\n",
+                               mux->pdata->pinctrl_state_idle, ret);
+                       goto err;
+               }
+
+               deselect = i2c_mux_pinctrl_deselect;
+       } else {
+               deselect = NULL;
+       }
+
+       mux->parent = i2c_get_adapter(mux->pdata->parent_bus_num);
+       if (!mux->parent) {
+               dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
+                       mux->pdata->parent_bus_num);
+               ret = -ENODEV;
+               goto err;
+       }
+
+       for (i = 0; i < mux->pdata->bus_count; i++) {
+               u32 bus = mux->pdata->base_bus_num ?
+                               (mux->pdata->base_bus_num + i) : 0;
+
+               mux->busses[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev,
+                                                    mux, bus, i,
+                                                    i2c_mux_pinctrl_select,
+                                                    deselect);
+               if (!mux->busses[i]) {
+                       ret = -ENODEV;
+                       dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
+                       goto err_del_adapter;
+               }
+       }
+
+       return 0;
+
+err_del_adapter:
+       for (; i > 0; i--)
+               i2c_del_mux_adapter(mux->busses[i - 1]);
+       i2c_put_adapter(mux->parent);
+err:
+       return ret;
+}
+
+static int __devexit i2c_mux_pinctrl_remove(struct platform_device *pdev)
+{
+       struct i2c_mux_pinctrl *mux = platform_get_drvdata(pdev);
+       int i;
+
+       for (i = 0; i < mux->pdata->bus_count; i++)
+               i2c_del_mux_adapter(mux->busses[i]);
+
+       i2c_put_adapter(mux->parent);
+
+       return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id i2c_mux_pinctrl_of_match[] __devinitconst = {
+       { .compatible = "i2c-mux-pinctrl", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, i2c_mux_pinctrl_of_match);
+#endif
+
+static struct platform_driver i2c_mux_pinctrl_driver = {
+       .driver = {
+               .name   = "i2c-mux-pinctrl",
+               .owner  = THIS_MODULE,
+               .of_match_table = of_match_ptr(i2c_mux_pinctrl_of_match),
+       },
+       .probe  = i2c_mux_pinctrl_probe,
+       .remove = __devexit_p(i2c_mux_pinctrl_remove),
+};
+module_platform_driver(i2c_mux_pinctrl_driver);
+
+MODULE_DESCRIPTION("pinctrl-based I2C multiplexer driver");
+MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:i2c-mux-pinctrl");
index 7d5f56e..4267789 100644 (file)
@@ -910,14 +910,17 @@ static inline int cmos_poweroff(struct device *dev)
 
 static u32 rtc_handler(void *context)
 {
+       struct device *dev = context;
+
+       pm_wakeup_event(dev, 0);
        acpi_clear_event(ACPI_EVENT_RTC);
        acpi_disable_event(ACPI_EVENT_RTC, 0);
        return ACPI_INTERRUPT_HANDLED;
 }
 
-static inline void rtc_wake_setup(void)
+static inline void rtc_wake_setup(struct device *dev)
 {
-       acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
+       acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev);
        /*
         * After the RTC handler is installed, the Fixed_RTC event should
         * be disabled. Only when the RTC alarm is set will it be enabled.
@@ -950,7 +953,7 @@ cmos_wake_setup(struct device *dev)
        if (acpi_disabled)
                return;
 
-       rtc_wake_setup();
+       rtc_wake_setup(dev);
        acpi_rtc_info.wake_on = rtc_wake_on;
        acpi_rtc_info.wake_off = rtc_wake_off;
 
index 4e7ef0e..d46764b 100644 (file)
@@ -3002,7 +3002,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
        return oid;
 }
 
-static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_store(unsigned type, pgoff_t offset,
                                   struct page *page)
 {
        u64 ind64 = (u64)offset;
@@ -3025,7 +3025,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
 
 /* returns 0 if the page was successfully gotten from frontswap, -1 if
  * was not present (should never happen!) */
-static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_load(unsigned type, pgoff_t offset,
                                   struct page *page)
 {
        u64 ind64 = (u64)offset;
@@ -3080,8 +3080,8 @@ static void zcache_frontswap_init(unsigned ignored)
 }
 
 static struct frontswap_ops zcache_frontswap_ops = {
-       .put_page = zcache_frontswap_put_page,
-       .get_page = zcache_frontswap_get_page,
+       .store = zcache_frontswap_store,
+       .load = zcache_frontswap_load,
        .invalidate_page = zcache_frontswap_flush_page,
        .invalidate_area = zcache_frontswap_flush_area,
        .init = zcache_frontswap_init
index 2734dac..784c796 100644 (file)
@@ -1835,7 +1835,7 @@ static int zcache_frontswap_poolid = -1;
  * Swizzling increases objects per swaptype, increasing tmem concurrency
  * for heavy swaploads.  Later, larger nr_cpus -> larger SWIZ_BITS
  * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
- * frontswap_get_page(), but has side-effects. Hence using 8.
+ * frontswap_load(), but has side-effects. Hence using 8.
  */
 #define SWIZ_BITS              8
 #define SWIZ_MASK              ((1 << SWIZ_BITS) - 1)
@@ -1849,7 +1849,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
        return oid;
 }
 
-static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_store(unsigned type, pgoff_t offset,
                                   struct page *page)
 {
        u64 ind64 = (u64)offset;
@@ -1870,7 +1870,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
 
 /* returns 0 if the page was successfully gotten from frontswap, -1 if
  * was not present (should never happen!) */
-static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_load(unsigned type, pgoff_t offset,
                                   struct page *page)
 {
        u64 ind64 = (u64)offset;
@@ -1919,8 +1919,8 @@ static void zcache_frontswap_init(unsigned ignored)
 }
 
 static struct frontswap_ops zcache_frontswap_ops = {
-       .put_page = zcache_frontswap_put_page,
-       .get_page = zcache_frontswap_get_page,
+       .store = zcache_frontswap_store,
+       .load = zcache_frontswap_load,
        .invalidate_page = zcache_frontswap_flush_page,
        .invalidate_area = zcache_frontswap_flush_area,
        .init = zcache_frontswap_init
index 37c6098..7e6136e 100644 (file)
@@ -587,14 +587,14 @@ static void sbp_management_request_logout(
 {
        struct sbp_tport *tport = agent->tport;
        struct sbp_tpg *tpg = tport->tpg;
-       int login_id;
+       int id;
        struct sbp_login_descriptor *login;
 
-       login_id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
+       id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
 
-       login = sbp_login_find_by_id(tpg, login_id);
+       login = sbp_login_find_by_id(tpg, id);
        if (!login) {
-               pr_warn("cannot find login: %d\n", login_id);
+               pr_warn("cannot find login: %d\n", id);
 
                req->status.status = cpu_to_be32(
                        STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
index 686dba1..9f99d04 100644 (file)
@@ -133,16 +133,11 @@ static struct se_device *fd_create_virtdevice(
                ret = PTR_ERR(dev_p);
                goto fail;
        }
-
-       /* O_DIRECT too? */
-       flags = O_RDWR | O_CREAT | O_LARGEFILE;
-
        /*
-        * If fd_buffered_io=1 has not been set explicitly (the default),
-        * use O_SYNC to force FILEIO writes to disk.
+        * Use O_DSYNC by default instead of O_SYNC to forgo syncing
+        * of pure timestamp updates.
         */
-       if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
-               flags |= O_SYNC;
+       flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
 
        file = filp_open(dev_p, flags, 0600);
        if (IS_ERR(file)) {
@@ -380,23 +375,6 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd)
        }
 }
 
-static void fd_emulate_write_fua(struct se_cmd *cmd)
-{
-       struct se_device *dev = cmd->se_dev;
-       struct fd_dev *fd_dev = dev->dev_ptr;
-       loff_t start = cmd->t_task_lba *
-               dev->se_sub_dev->se_dev_attrib.block_size;
-       loff_t end = start + cmd->data_length;
-       int ret;
-
-       pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
-               cmd->t_task_lba, cmd->data_length);
-
-       ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
-       if (ret != 0)
-               pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
-}
-
 static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
                u32 sgl_nents, enum dma_data_direction data_direction)
 {
@@ -411,19 +389,21 @@ static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
                ret = fd_do_readv(cmd, sgl, sgl_nents);
        } else {
                ret = fd_do_writev(cmd, sgl, sgl_nents);
-
+               /*
+                * Perform implict vfs_fsync_range() for fd_do_writev() ops
+                * for SCSI WRITEs with Forced Unit Access (FUA) set.
+                * Allow this to happen independent of WCE=0 setting.
+                */
                if (ret > 0 &&
-                   dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
                    dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
                    (cmd->se_cmd_flags & SCF_FUA)) {
-                       /*
-                        * We might need to be a bit smarter here
-                        * and return some sense data to let the initiator
-                        * know the FUA WRITE cache sync failed..?
-                        */
-                       fd_emulate_write_fua(cmd);
-               }
+                       struct fd_dev *fd_dev = dev->dev_ptr;
+                       loff_t start = cmd->t_task_lba *
+                               dev->se_sub_dev->se_dev_attrib.block_size;
+                       loff_t end = start + cmd->data_length;
 
+                       vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+               }
        }
 
        if (ret < 0) {
@@ -442,7 +422,6 @@ enum {
 static match_table_t tokens = {
        {Opt_fd_dev_name, "fd_dev_name=%s"},
        {Opt_fd_dev_size, "fd_dev_size=%s"},
-       {Opt_fd_buffered_io, "fd_buffered_io=%d"},
        {Opt_err, NULL}
 };
 
@@ -454,7 +433,7 @@ static ssize_t fd_set_configfs_dev_params(
        struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
        char *orig, *ptr, *arg_p, *opts;
        substring_t args[MAX_OPT_ARGS];
-       int ret = 0, arg, token;
+       int ret = 0, token;
 
        opts = kstrdup(page, GFP_KERNEL);
        if (!opts)
@@ -498,19 +477,6 @@ static ssize_t fd_set_configfs_dev_params(
                                        " bytes\n", fd_dev->fd_dev_size);
                        fd_dev->fbd_flags |= FBDF_HAS_SIZE;
                        break;
-               case Opt_fd_buffered_io:
-                       match_int(args, &arg);
-                       if (arg != 1) {
-                               pr_err("bogus fd_buffered_io=%d value\n", arg);
-                               ret = -EINVAL;
-                               goto out;
-                       }
-
-                       pr_debug("FILEIO: Using buffered I/O"
-                               " operations for struct fd_dev\n");
-
-                       fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
-                       break;
                default:
                        break;
                }
@@ -542,10 +508,8 @@ static ssize_t fd_show_configfs_dev_params(
        ssize_t bl = 0;
 
        bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
-       bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: %s\n",
-               fd_dev->fd_dev_name, fd_dev->fd_dev_size,
-               (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
-               "Buffered" : "Synchronous");
+       bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: O_DSYNC\n",
+               fd_dev->fd_dev_name, fd_dev->fd_dev_size);
        return bl;
 }
 
index fbd59ef..70ce7fd 100644 (file)
@@ -14,7 +14,6 @@
 
 #define FBDF_HAS_PATH          0x01
 #define FBDF_HAS_SIZE          0x02
-#define FDBD_USE_BUFFERED_IO   0x04
 
 struct fd_dev {
        u32             fbd_flags;
index dcb7952..89f264c 100644 (file)
@@ -269,7 +269,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
 }
 
 /* returns 0 if the page was successfully put into frontswap, -1 if not */
-static int tmem_frontswap_put_page(unsigned type, pgoff_t offset,
+static int tmem_frontswap_store(unsigned type, pgoff_t offset,
                                   struct page *page)
 {
        u64 ind64 = (u64)offset;
@@ -295,7 +295,7 @@ static int tmem_frontswap_put_page(unsigned type, pgoff_t offset,
  * returns 0 if the page was successfully gotten from frontswap, -1 if
  * was not present (should never happen!)
  */
-static int tmem_frontswap_get_page(unsigned type, pgoff_t offset,
+static int tmem_frontswap_load(unsigned type, pgoff_t offset,
                                   struct page *page)
 {
        u64 ind64 = (u64)offset;
@@ -362,8 +362,8 @@ static int __init no_frontswap(char *s)
 __setup("nofrontswap", no_frontswap);
 
 static struct frontswap_ops __initdata tmem_frontswap_ops = {
-       .put_page = tmem_frontswap_put_page,
-       .get_page = tmem_frontswap_get_page,
+       .store = tmem_frontswap_store,
+       .load = tmem_frontswap_load,
        .invalidate_page = tmem_frontswap_flush_page,
        .invalidate_area = tmem_frontswap_flush_area,
        .init = tmem_frontswap_init
index 20350a9..6df0cbe 100644 (file)
@@ -174,6 +174,7 @@ struct smb_version_operations {
        void (*add_credits)(struct TCP_Server_Info *, const unsigned int);
        void (*set_credits)(struct TCP_Server_Info *, const int);
        int * (*get_credits_field)(struct TCP_Server_Info *);
+       __u64 (*get_next_mid)(struct TCP_Server_Info *);
        /* data offset from read response message */
        unsigned int (*read_data_offset)(char *);
        /* data length from read response message */
@@ -399,6 +400,12 @@ set_credits(struct TCP_Server_Info *server, const int val)
        server->ops->set_credits(server, val);
 }
 
+static inline __u64
+get_next_mid(struct TCP_Server_Info *server)
+{
+       return server->ops->get_next_mid(server);
+}
+
 /*
  * Macros to allow the TCP_Server_Info->net field and related code to drop out
  * when CONFIG_NET_NS isn't set.
index 5ec21ec..0a6cbfe 100644 (file)
@@ -114,7 +114,6 @@ extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
                                void **request_buf);
 extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
                             const struct nls_table *nls_cp);
-extern __u64 GetNextMid(struct TCP_Server_Info *server);
 extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
 extern u64 cifs_UnixTimeToNT(struct timespec);
 extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
index b5ad716..5b40073 100644 (file)
@@ -268,7 +268,7 @@ small_smb_init_no_tc(const int smb_command, const int wct,
                return rc;
 
        buffer = (struct smb_hdr *)*request_buf;
-       buffer->Mid = GetNextMid(ses->server);
+       buffer->Mid = get_next_mid(ses->server);
        if (ses->capabilities & CAP_UNICODE)
                buffer->Flags2 |= SMBFLG2_UNICODE;
        if (ses->capabilities & CAP_STATUS32)
@@ -402,7 +402,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
 
        cFYI(1, "secFlags 0x%x", secFlags);
 
-       pSMB->hdr.Mid = GetNextMid(server);
+       pSMB->hdr.Mid = get_next_mid(server);
        pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS);
 
        if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
@@ -782,7 +782,7 @@ CIFSSMBLogoff(const int xid, struct cifs_ses *ses)
                return rc;
        }
 
-       pSMB->hdr.Mid = GetNextMid(ses->server);
+       pSMB->hdr.Mid = get_next_mid(ses->server);
 
        if (ses->server->sec_mode &
                   (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
@@ -4762,7 +4762,7 @@ getDFSRetry:
 
        /* server pointer checked in called function,
        but should never be null here anyway */
-       pSMB->hdr.Mid = GetNextMid(ses->server);
+       pSMB->hdr.Mid = get_next_mid(ses->server);
        pSMB->hdr.Tid = ses->ipc_tid;
        pSMB->hdr.Uid = ses->Suid;
        if (ses->capabilities & CAP_STATUS32)
index ccafded..78db68a 100644 (file)
@@ -1058,13 +1058,15 @@ cifs_demultiplex_thread(void *p)
                if (mid_entry != NULL) {
                        if (!mid_entry->multiRsp || mid_entry->multiEnd)
                                mid_entry->callback(mid_entry);
-               } else if (!server->ops->is_oplock_break(buf, server)) {
+               } else if (!server->ops->is_oplock_break ||
+                          !server->ops->is_oplock_break(buf, server)) {
                        cERROR(1, "No task to wake, unknown frame received! "
                                   "NumMids %d", atomic_read(&midCount));
                        cifs_dump_mem("Received Data is: ", buf,
                                      HEADER_SIZE(server));
 #ifdef CONFIG_CIFS_DEBUG2
-                       server->ops->dump_detail(buf);
+                       if (server->ops->dump_detail)
+                               server->ops->dump_detail(buf);
                        cifs_dump_mids(server);
 #endif /* CIFS_DEBUG2 */
 
@@ -3938,7 +3940,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses,
        header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX,
                        NULL /*no tid */ , 4 /*wct */ );
 
-       smb_buffer->Mid = GetNextMid(ses->server);
+       smb_buffer->Mid = get_next_mid(ses->server);
        smb_buffer->Uid = ses->Suid;
        pSMB = (TCONX_REQ *) smb_buffer;
        pSMBr = (TCONX_RSP *) smb_buffer_response;
index 253170d..513adbc 100644 (file)
@@ -876,7 +876,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
        struct cifsLockInfo *li, *tmp;
        struct cifs_tcon *tcon;
        struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
-       unsigned int num, max_num;
+       unsigned int num, max_num, max_buf;
        LOCKING_ANDX_RANGE *buf, *cur;
        int types[] = {LOCKING_ANDX_LARGE_FILES,
                       LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
@@ -892,8 +892,19 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
                return rc;
        }
 
-       max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
-                 sizeof(LOCKING_ANDX_RANGE);
+       /*
+        * Accessing maxBuf is racy with cifs_reconnect - need to store value
+        * and check it for zero before using.
+        */
+       max_buf = tcon->ses->server->maxBuf;
+       if (!max_buf) {
+               mutex_unlock(&cinode->lock_mutex);
+               FreeXid(xid);
+               return -EINVAL;
+       }
+
+       max_num = (max_buf - sizeof(struct smb_hdr)) /
+                                               sizeof(LOCKING_ANDX_RANGE);
        buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
        if (!buf) {
                mutex_unlock(&cinode->lock_mutex);
@@ -1218,7 +1229,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
        int types[] = {LOCKING_ANDX_LARGE_FILES,
                       LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
        unsigned int i;
-       unsigned int max_num, num;
+       unsigned int max_num, num, max_buf;
        LOCKING_ANDX_RANGE *buf, *cur;
        struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
        struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
@@ -1228,8 +1239,16 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
 
        INIT_LIST_HEAD(&tmp_llist);
 
-       max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
-                 sizeof(LOCKING_ANDX_RANGE);
+       /*
+        * Accessing maxBuf is racy with cifs_reconnect - need to store value
+        * and check it for zero before using.
+        */
+       max_buf = tcon->ses->server->maxBuf;
+       if (!max_buf)
+               return -EINVAL;
+
+       max_num = (max_buf - sizeof(struct smb_hdr)) /
+                                               sizeof(LOCKING_ANDX_RANGE);
        buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
@@ -1247,46 +1266,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
                                continue;
                        if (types[i] != li->type)
                                continue;
-                       if (!cinode->can_cache_brlcks) {
-                               cur->Pid = cpu_to_le16(li->pid);
-                               cur->LengthLow = cpu_to_le32((u32)li->length);
-                               cur->LengthHigh =
-                                       cpu_to_le32((u32)(li->length>>32));
-                               cur->OffsetLow = cpu_to_le32((u32)li->offset);
-                               cur->OffsetHigh =
-                                       cpu_to_le32((u32)(li->offset>>32));
-                               /*
-                                * We need to save a lock here to let us add
-                                * it again to the file's list if the unlock
-                                * range request fails on the server.
-                                */
-                               list_move(&li->llist, &tmp_llist);
-                               if (++num == max_num) {
-                                       stored_rc = cifs_lockv(xid, tcon,
-                                                              cfile->netfid,
-                                                              li->type, num,
-                                                              0, buf);
-                                       if (stored_rc) {
-                                               /*
-                                                * We failed on the unlock range
-                                                * request - add all locks from
-                                                * the tmp list to the head of
-                                                * the file's list.
-                                                */
-                                               cifs_move_llist(&tmp_llist,
-                                                               &cfile->llist);
-                                               rc = stored_rc;
-                                       } else
-                                               /*
-                                                * The unlock range request
-                                                * succeed - free the tmp list.
-                                                */
-                                               cifs_free_llist(&tmp_llist);
-                                       cur = buf;
-                                       num = 0;
-                               } else
-                                       cur++;
-                       } else {
+                       if (cinode->can_cache_brlcks) {
                                /*
                                 * We can cache brlock requests - simply remove
                                 * a lock from the file's list.
@@ -1294,7 +1274,41 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
                                list_del(&li->llist);
                                cifs_del_lock_waiters(li);
                                kfree(li);
+                               continue;
                        }
+                       cur->Pid = cpu_to_le16(li->pid);
+                       cur->LengthLow = cpu_to_le32((u32)li->length);
+                       cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
+                       cur->OffsetLow = cpu_to_le32((u32)li->offset);
+                       cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
+                       /*
+                        * We need to save a lock here to let us add it again to
+                        * the file's list if the unlock range request fails on
+                        * the server.
+                        */
+                       list_move(&li->llist, &tmp_llist);
+                       if (++num == max_num) {
+                               stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
+                                                      li->type, num, 0, buf);
+                               if (stored_rc) {
+                                       /*
+                                        * We failed on the unlock range
+                                        * request - add all locks from the tmp
+                                        * list to the head of the file's list.
+                                        */
+                                       cifs_move_llist(&tmp_llist,
+                                                       &cfile->llist);
+                                       rc = stored_rc;
+                               } else
+                                       /*
+                                        * The unlock range request succeed -
+                                        * free the tmp list.
+                                        */
+                                       cifs_free_llist(&tmp_llist);
+                               cur = buf;
+                               num = 0;
+                       } else
+                               cur++;
                }
                if (num) {
                        stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
index e2552d2..557506a 100644 (file)
@@ -212,93 +212,6 @@ cifs_small_buf_release(void *buf_to_free)
        return;
 }
 
-/*
- * Find a free multiplex id (SMB mid). Otherwise there could be
- * mid collisions which might cause problems, demultiplexing the
- * wrong response to this request. Multiplex ids could collide if
- * one of a series requests takes much longer than the others, or
- * if a very large number of long lived requests (byte range
- * locks or FindNotify requests) are pending. No more than
- * 64K-1 requests can be outstanding at one time. If no
- * mids are available, return zero. A future optimization
- * could make the combination of mids and uid the key we use
- * to demultiplex on (rather than mid alone).
- * In addition to the above check, the cifs demultiplex
- * code already used the command code as a secondary
- * check of the frame and if signing is negotiated the
- * response would be discarded if the mid were the same
- * but the signature was wrong. Since the mid is not put in the
- * pending queue until later (when it is about to be dispatched)
- * we do have to limit the number of outstanding requests
- * to somewhat less than 64K-1 although it is hard to imagine
- * so many threads being in the vfs at one time.
- */
-__u64 GetNextMid(struct TCP_Server_Info *server)
-{
-       __u64 mid = 0;
-       __u16 last_mid, cur_mid;
-       bool collision;
-
-       spin_lock(&GlobalMid_Lock);
-
-       /* mid is 16 bit only for CIFS/SMB */
-       cur_mid = (__u16)((server->CurrentMid) & 0xffff);
-       /* we do not want to loop forever */
-       last_mid = cur_mid;
-       cur_mid++;
-
-       /*
-        * This nested loop looks more expensive than it is.
-        * In practice the list of pending requests is short,
-        * fewer than 50, and the mids are likely to be unique
-        * on the first pass through the loop unless some request
-        * takes longer than the 64 thousand requests before it
-        * (and it would also have to have been a request that
-        * did not time out).
-        */
-       while (cur_mid != last_mid) {
-               struct mid_q_entry *mid_entry;
-               unsigned int num_mids;
-
-               collision = false;
-               if (cur_mid == 0)
-                       cur_mid++;
-
-               num_mids = 0;
-               list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
-                       ++num_mids;
-                       if (mid_entry->mid == cur_mid &&
-                           mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
-                               /* This mid is in use, try a different one */
-                               collision = true;
-                               break;
-                       }
-               }
-
-               /*
-                * if we have more than 32k mids in the list, then something
-                * is very wrong. Possibly a local user is trying to DoS the
-                * box by issuing long-running calls and SIGKILL'ing them. If
-                * we get to 2^16 mids then we're in big trouble as this
-                * function could loop forever.
-                *
-                * Go ahead and assign out the mid in this situation, but force
-                * an eventual reconnect to clean out the pending_mid_q.
-                */
-               if (num_mids > 32768)
-                       server->tcpStatus = CifsNeedReconnect;
-
-               if (!collision) {
-                       mid = (__u64)cur_mid;
-                       server->CurrentMid = mid;
-                       break;
-               }
-               cur_mid++;
-       }
-       spin_unlock(&GlobalMid_Lock);
-       return mid;
-}
-
 /* NB: MID can not be set if treeCon not passed in, in that
    case it is responsbility of caller to set the mid */
 void
@@ -334,7 +247,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
 
                        /* Uid is not converted */
                        buffer->Uid = treeCon->ses->Suid;
-                       buffer->Mid = GetNextMid(treeCon->ses->server);
+                       buffer->Mid = get_next_mid(treeCon->ses->server);
                }
                if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
                        buffer->Flags2 |= SMBFLG2_DFS;
index d9d615f..6dec38f 100644 (file)
@@ -125,6 +125,94 @@ cifs_get_credits_field(struct TCP_Server_Info *server)
        return &server->credits;
 }
 
+/*
+ * Find a free multiplex id (SMB mid). Otherwise there could be
+ * mid collisions which might cause problems, demultiplexing the
+ * wrong response to this request. Multiplex ids could collide if
+ * one of a series requests takes much longer than the others, or
+ * if a very large number of long lived requests (byte range
+ * locks or FindNotify requests) are pending. No more than
+ * 64K-1 requests can be outstanding at one time. If no
+ * mids are available, return zero. A future optimization
+ * could make the combination of mids and uid the key we use
+ * to demultiplex on (rather than mid alone).
+ * In addition to the above check, the cifs demultiplex
+ * code already used the command code as a secondary
+ * check of the frame and if signing is negotiated the
+ * response would be discarded if the mid were the same
+ * but the signature was wrong. Since the mid is not put in the
+ * pending queue until later (when it is about to be dispatched)
+ * we do have to limit the number of outstanding requests
+ * to somewhat less than 64K-1 although it is hard to imagine
+ * so many threads being in the vfs at one time.
+ */
+static __u64
+cifs_get_next_mid(struct TCP_Server_Info *server)
+{
+       __u64 mid = 0;
+       __u16 last_mid, cur_mid;
+       bool collision;
+
+       spin_lock(&GlobalMid_Lock);
+
+       /* mid is 16 bit only for CIFS/SMB */
+       cur_mid = (__u16)((server->CurrentMid) & 0xffff);
+       /* we do not want to loop forever */
+       last_mid = cur_mid;
+       cur_mid++;
+
+       /*
+        * This nested loop looks more expensive than it is.
+        * In practice the list of pending requests is short,
+        * fewer than 50, and the mids are likely to be unique
+        * on the first pass through the loop unless some request
+        * takes longer than the 64 thousand requests before it
+        * (and it would also have to have been a request that
+        * did not time out).
+        */
+       while (cur_mid != last_mid) {
+               struct mid_q_entry *mid_entry;
+               unsigned int num_mids;
+
+               collision = false;
+               if (cur_mid == 0)
+                       cur_mid++;
+
+               num_mids = 0;
+               list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
+                       ++num_mids;
+                       if (mid_entry->mid == cur_mid &&
+                           mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
+                               /* This mid is in use, try a different one */
+                               collision = true;
+                               break;
+                       }
+               }
+
+               /*
+                * if we have more than 32k mids in the list, then something
+                * is very wrong. Possibly a local user is trying to DoS the
+                * box by issuing long-running calls and SIGKILL'ing them. If
+                * we get to 2^16 mids then we're in big trouble as this
+                * function could loop forever.
+                *
+                * Go ahead and assign out the mid in this situation, but force
+                * an eventual reconnect to clean out the pending_mid_q.
+                */
+               if (num_mids > 32768)
+                       server->tcpStatus = CifsNeedReconnect;
+
+               if (!collision) {
+                       mid = (__u64)cur_mid;
+                       server->CurrentMid = mid;
+                       break;
+               }
+               cur_mid++;
+       }
+       spin_unlock(&GlobalMid_Lock);
+       return mid;
+}
+
 struct smb_version_operations smb1_operations = {
        .send_cancel = send_nt_cancel,
        .compare_fids = cifs_compare_fids,
@@ -133,6 +221,7 @@ struct smb_version_operations smb1_operations = {
        .add_credits = cifs_add_credits,
        .set_credits = cifs_set_credits,
        .get_credits_field = cifs_get_credits_field,
+       .get_next_mid = cifs_get_next_mid,
        .read_data_offset = cifs_read_data_offset,
        .read_data_length = cifs_read_data_length,
        .map_error = map_smb_to_linux_error,
index 1b36ffe..3097ee5 100644 (file)
@@ -779,7 +779,7 @@ send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
 
        pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
        pSMB->Timeout = 0;
-       pSMB->hdr.Mid = GetNextMid(ses->server);
+       pSMB->hdr.Mid = get_next_mid(ses->server);
 
        return SendReceive(xid, ses, in_buf, out_buf,
                        &bytes_returned, 0);
index 42593c5..03ff5b1 100644 (file)
@@ -75,19 +75,13 @@ static ssize_t fuse_conn_limit_write(struct file *file, const char __user *buf,
                                     unsigned global_limit)
 {
        unsigned long t;
-       char tmp[32];
        unsigned limit = (1 << 16) - 1;
        int err;
 
-       if (*ppos || count >= sizeof(tmp) - 1)
-               return -EINVAL;
-
-       if (copy_from_user(tmp, buf, count))
+       if (*ppos)
                return -EINVAL;
 
-       tmp[count] = '\0';
-
-       err = strict_strtoul(tmp, 0, &t);
+       err = kstrtoul_from_user(buf, count, 0, &t);
        if (err)
                return err;
 
index df5ac04..334e0b1 100644 (file)
@@ -775,6 +775,8 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
 static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
                          struct kstat *stat)
 {
+       unsigned int blkbits;
+
        stat->dev = inode->i_sb->s_dev;
        stat->ino = attr->ino;
        stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
@@ -790,7 +792,13 @@ static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
        stat->ctime.tv_nsec = attr->ctimensec;
        stat->size = attr->size;
        stat->blocks = attr->blocks;
-       stat->blksize = (1 << inode->i_blkbits);
+
+       if (attr->blksize != 0)
+               blkbits = ilog2(attr->blksize);
+       else
+               blkbits = inode->i_sb->s_blocksize_bits;
+
+       stat->blksize = 1 << blkbits;
 }
 
 static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
@@ -863,6 +871,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
                if (stat) {
                        generic_fillattr(inode, stat);
                        stat->mode = fi->orig_i_mode;
+                       stat->ino = fi->orig_ino;
                }
        }
 
index 9562109..b321a68 100644 (file)
@@ -2173,6 +2173,44 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        return ret;
 }
 
+long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
+                           loff_t length)
+{
+       struct fuse_file *ff = file->private_data;
+       struct fuse_conn *fc = ff->fc;
+       struct fuse_req *req;
+       struct fuse_fallocate_in inarg = {
+               .fh = ff->fh,
+               .offset = offset,
+               .length = length,
+               .mode = mode
+       };
+       int err;
+
+       if (fc->no_fallocate)
+               return -EOPNOTSUPP;
+
+       req = fuse_get_req(fc);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
+
+       req->in.h.opcode = FUSE_FALLOCATE;
+       req->in.h.nodeid = ff->nodeid;
+       req->in.numargs = 1;
+       req->in.args[0].size = sizeof(inarg);
+       req->in.args[0].value = &inarg;
+       fuse_request_send(fc, req);
+       err = req->out.h.error;
+       if (err == -ENOSYS) {
+               fc->no_fallocate = 1;
+               err = -EOPNOTSUPP;
+       }
+       fuse_put_request(fc, req);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(fuse_file_fallocate);
+
 static const struct file_operations fuse_file_operations = {
        .llseek         = fuse_file_llseek,
        .read           = do_sync_read,
@@ -2190,6 +2228,7 @@ static const struct file_operations fuse_file_operations = {
        .unlocked_ioctl = fuse_file_ioctl,
        .compat_ioctl   = fuse_file_compat_ioctl,
        .poll           = fuse_file_poll,
+       .fallocate      = fuse_file_fallocate,
 };
 
 static const struct file_operations fuse_direct_io_file_operations = {
@@ -2206,6 +2245,7 @@ static const struct file_operations fuse_direct_io_file_operations = {
        .unlocked_ioctl = fuse_file_ioctl,
        .compat_ioctl   = fuse_file_compat_ioctl,
        .poll           = fuse_file_poll,
+       .fallocate      = fuse_file_fallocate,
        /* no splice_read */
 };
 
index 572cefc..771fb63 100644 (file)
@@ -82,6 +82,9 @@ struct fuse_inode {
            preserve the original mode */
        umode_t orig_i_mode;
 
+       /** 64 bit inode number */
+       u64 orig_ino;
+
        /** Version of last attribute change */
        u64 attr_version;
 
@@ -478,6 +481,9 @@ struct fuse_conn {
        /** Are BSD file locking primitives not implemented by fs? */
        unsigned no_flock:1;
 
+       /** Is fallocate not implemented by fs? */
+       unsigned no_fallocate:1;
+
        /** The number of requests waiting for completion */
        atomic_t num_waiting;
 
index 42678a3..1cd6165 100644 (file)
@@ -91,6 +91,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
        fi->nlookup = 0;
        fi->attr_version = 0;
        fi->writectr = 0;
+       fi->orig_ino = 0;
        INIT_LIST_HEAD(&fi->write_files);
        INIT_LIST_HEAD(&fi->queued_writes);
        INIT_LIST_HEAD(&fi->writepages);
@@ -139,6 +140,18 @@ static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
        return 0;
 }
 
+/*
+ * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
+ * so that it will fit.
+ */
+static ino_t fuse_squash_ino(u64 ino64)
+{
+       ino_t ino = (ino_t) ino64;
+       if (sizeof(ino_t) < sizeof(u64))
+               ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8;
+       return ino;
+}
+
 void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
                                   u64 attr_valid)
 {
@@ -148,7 +161,7 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
        fi->attr_version = ++fc->attr_version;
        fi->i_time = attr_valid;
 
-       inode->i_ino     = attr->ino;
+       inode->i_ino     = fuse_squash_ino(attr->ino);
        inode->i_mode    = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
        set_nlink(inode, attr->nlink);
        inode->i_uid     = attr->uid;
@@ -174,6 +187,8 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
        fi->orig_i_mode = inode->i_mode;
        if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
                inode->i_mode &= ~S_ISVTX;
+
+       fi->orig_ino = attr->ino;
 }
 
 void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
index 616f41a..437195f 100644 (file)
@@ -1803,7 +1803,7 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
                        rcu_read_lock();
                        file = fcheck_files(files, fd);
                        if (file) {
-                               unsigned i_mode, f_mode = file->f_mode;
+                               unsigned f_mode = file->f_mode;
 
                                rcu_read_unlock();
                                put_files_struct(files);
@@ -1819,12 +1819,14 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
                                        inode->i_gid = GLOBAL_ROOT_GID;
                                }
 
-                               i_mode = S_IFLNK;
-                               if (f_mode & FMODE_READ)
-                                       i_mode |= S_IRUSR | S_IXUSR;
-                               if (f_mode & FMODE_WRITE)
-                                       i_mode |= S_IWUSR | S_IXUSR;
-                               inode->i_mode = i_mode;
+                               if (S_ISLNK(inode->i_mode)) {
+                                       unsigned i_mode = S_IFLNK;
+                                       if (f_mode & FMODE_READ)
+                                               i_mode |= S_IRUSR | S_IXUSR;
+                                       if (f_mode & FMODE_WRITE)
+                                               i_mode |= S_IWUSR | S_IXUSR;
+                                       inode->i_mode = i_mode;
+                               }
 
                                security_task_to_inode(task, inode);
                                put_task_struct(task);
@@ -1859,6 +1861,7 @@ static struct dentry *proc_fd_instantiate(struct inode *dir,
        ei = PROC_I(inode);
        ei->fd = fd;
 
+       inode->i_mode = S_IFLNK;
        inode->i_op = &proc_pid_link_inode_operations;
        inode->i_size = 64;
        ei->op.proc_get_link = proc_fd_link;
index 58d0bda..81368ab 100644 (file)
        {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x674A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6771, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
-       {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
-       {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+       {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9918, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9919, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9990, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9991, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0, 0, 0}
 
 #define r128_PCI_IDS \
index 81e803e..acba894 100644 (file)
@@ -132,6 +132,7 @@ extern u64 clockevent_delta2ns(unsigned long latch,
                               struct clock_event_device *evt);
 extern void clockevents_register_device(struct clock_event_device *dev);
 
+extern void clockevents_config(struct clock_event_device *dev, u32 freq);
 extern void clockevents_config_and_register(struct clock_event_device *dev,
                                            u32 freq, unsigned long min_delta,
                                            unsigned long max_delta);
index e988037..51a90b7 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef _LINUX_COMPACTION_H
 #define _LINUX_COMPACTION_H
 
-#include <linux/node.h>
-
 /* Return values for compact_zone() and try_to_compact_pages() */
 /* compaction didn't start as it was not possible or direct reclaim was more suitable */
 #define COMPACT_SKIPPED                0
 /* The full zone was compacted */
 #define COMPACT_COMPLETE       3
 
-/*
- * compaction supports three modes
- *
- * COMPACT_ASYNC_MOVABLE uses asynchronous migration and only scans
- *    MIGRATE_MOVABLE pageblocks as migration sources and targets.
- * COMPACT_ASYNC_UNMOVABLE uses asynchronous migration and only scans
- *    MIGRATE_MOVABLE pageblocks as migration sources.
- *    MIGRATE_UNMOVABLE pageblocks are scanned as potential migration
- *    targets and convers them to MIGRATE_MOVABLE if possible
- * COMPACT_SYNC uses synchronous migration and scans all pageblocks
- */
-enum compact_mode {
-       COMPACT_ASYNC_MOVABLE,
-       COMPACT_ASYNC_UNMOVABLE,
-       COMPACT_SYNC,
-};
-
 #ifdef CONFIG_COMPACTION
 extern int sysctl_compact_memory;
 extern int sysctl_compaction_handler(struct ctl_table *table, int write,
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
new file mode 100644 (file)
index 0000000..0e4e2ee
--- /dev/null
@@ -0,0 +1,127 @@
+#ifndef _LINUX_FRONTSWAP_H
+#define _LINUX_FRONTSWAP_H
+
+#include <linux/swap.h>
+#include <linux/mm.h>
+#include <linux/bitops.h>
+
+struct frontswap_ops {
+       void (*init)(unsigned);
+       int (*store)(unsigned, pgoff_t, struct page *);
+       int (*load)(unsigned, pgoff_t, struct page *);
+       void (*invalidate_page)(unsigned, pgoff_t);
+       void (*invalidate_area)(unsigned);
+};
+
+extern bool frontswap_enabled;
+extern struct frontswap_ops
+       frontswap_register_ops(struct frontswap_ops *ops);
+extern void frontswap_shrink(unsigned long);
+extern unsigned long frontswap_curr_pages(void);
+extern void frontswap_writethrough(bool);
+
+extern void __frontswap_init(unsigned type);
+extern int __frontswap_store(struct page *page);
+extern int __frontswap_load(struct page *page);
+extern void __frontswap_invalidate_page(unsigned, pgoff_t);
+extern void __frontswap_invalidate_area(unsigned);
+
+#ifdef CONFIG_FRONTSWAP
+
+static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
+{
+       bool ret = false;
+
+       if (frontswap_enabled && sis->frontswap_map)
+               ret = test_bit(offset, sis->frontswap_map);
+       return ret;
+}
+
+static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset)
+{
+       if (frontswap_enabled && sis->frontswap_map)
+               set_bit(offset, sis->frontswap_map);
+}
+
+static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset)
+{
+       if (frontswap_enabled && sis->frontswap_map)
+               clear_bit(offset, sis->frontswap_map);
+}
+
+static inline void frontswap_map_set(struct swap_info_struct *p,
+                                    unsigned long *map)
+{
+       p->frontswap_map = map;
+}
+
+static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
+{
+       return p->frontswap_map;
+}
+#else
+/* all inline routines become no-ops and all externs are ignored */
+
+#define frontswap_enabled (0)
+
+static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
+{
+       return false;
+}
+
+static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset)
+{
+}
+
+static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset)
+{
+}
+
+static inline void frontswap_map_set(struct swap_info_struct *p,
+                                    unsigned long *map)
+{
+}
+
+static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
+{
+       return NULL;
+}
+#endif
+
+static inline int frontswap_store(struct page *page)
+{
+       int ret = -1;
+
+       if (frontswap_enabled)
+               ret = __frontswap_store(page);
+       return ret;
+}
+
+static inline int frontswap_load(struct page *page)
+{
+       int ret = -1;
+
+       if (frontswap_enabled)
+               ret = __frontswap_load(page);
+       return ret;
+}
+
+static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset)
+{
+       if (frontswap_enabled)
+               __frontswap_invalidate_page(type, offset);
+}
+
+static inline void frontswap_invalidate_area(unsigned type)
+{
+       if (frontswap_enabled)
+               __frontswap_invalidate_area(type);
+}
+
+static inline void frontswap_init(unsigned type)
+{
+       if (frontswap_enabled)
+               __frontswap_init(type);
+}
+
+#endif /* _LINUX_FRONTSWAP_H */
index 51978ed..17fd887 100644 (file)
@@ -802,13 +802,14 @@ struct inode {
                unsigned int __i_nlink;
        };
        dev_t                   i_rdev;
+       loff_t                  i_size;
        struct timespec         i_atime;
        struct timespec         i_mtime;
        struct timespec         i_ctime;
        spinlock_t              i_lock; /* i_blocks, i_bytes, maybe i_size */
        unsigned short          i_bytes;
+       unsigned int            i_blkbits;
        blkcnt_t                i_blocks;
-       loff_t                  i_size;
 
 #ifdef __NEED_I_SIZE_ORDERED
        seqcount_t              i_size_seqcount;
@@ -828,9 +829,8 @@ struct inode {
                struct list_head        i_dentry;
                struct rcu_head         i_rcu;
        };
-       atomic_t                i_count;
-       unsigned int            i_blkbits;
        u64                     i_version;
+       atomic_t                i_count;
        atomic_t                i_dio_count;
        atomic_t                i_writecount;
        const struct file_operations    *i_fop; /* former ->i_op->default_file_ops */
index 8f2ab8f..9303348 100644 (file)
@@ -54,6 +54,9 @@
  * 7.18
  *  - add FUSE_IOCTL_DIR flag
  *  - add FUSE_NOTIFY_DELETE
+ *
+ * 7.19
+ *  - add FUSE_FALLOCATE
  */
 
 #ifndef _LINUX_FUSE_H
@@ -85,7 +88,7 @@
 #define FUSE_KERNEL_VERSION 7
 
 /** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 18
+#define FUSE_KERNEL_MINOR_VERSION 19
 
 /** The node ID of the root inode */
 #define FUSE_ROOT_ID 1
@@ -278,6 +281,7 @@ enum fuse_opcode {
        FUSE_POLL          = 40,
        FUSE_NOTIFY_REPLY  = 41,
        FUSE_BATCH_FORGET  = 42,
+       FUSE_FALLOCATE     = 43,
 
        /* CUSE specific operations */
        CUSE_INIT          = 4096,
@@ -571,6 +575,14 @@ struct fuse_notify_poll_wakeup_out {
        __u64   kh;
 };
 
+struct fuse_fallocate_in {
+       __u64   fh;
+       __u64   offset;
+       __u64   length;
+       __u32   mode;
+       __u32   padding;
+};
+
 struct fuse_in_header {
        __u32   len;
        __u32   opcode;
diff --git a/include/linux/i2c-mux-pinctrl.h b/include/linux/i2c-mux-pinctrl.h
new file mode 100644 (file)
index 0000000..a65c864
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * i2c-mux-pinctrl platform data
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _LINUX_I2C_MUX_PINCTRL_H
+#define _LINUX_I2C_MUX_PINCTRL_H
+
+/**
+ * struct i2c_mux_pinctrl_platform_data - Platform data for i2c-mux-pinctrl
+ * @parent_bus_num: Parent I2C bus number
+ * @base_bus_num: Base I2C bus number for the child busses. 0 for dynamic.
+ * @bus_count: Number of child busses. Also the number of elements in
+ *     @pinctrl_states
+ * @pinctrl_states: The names of the pinctrl state to select for each child bus
+ * @pinctrl_state_idle: The pinctrl state to select when no child bus is being
+ *     accessed. If NULL, the most recently used pinctrl state will be left
+ *     selected.
+ */
+struct i2c_mux_pinctrl_platform_data {
+       int parent_bus_num;
+       int base_bus_num;
+       int bus_count;
+       const char **pinctrl_states;
+       const char *pinctrl_state_idle;
+};
+
+#endif
index e4baff5..9e65eff 100644 (file)
@@ -149,6 +149,7 @@ extern struct cred init_cred;
        .normal_prio    = MAX_PRIO-20,                                  \
        .policy         = SCHED_NORMAL,                                 \
        .cpus_allowed   = CPU_MASK_ALL,                                 \
+       .nr_cpus_allowed= NR_CPUS,                                      \
        .mm             = NULL,                                         \
        .active_mm      = &init_mm,                                     \
        .se             = {                                             \
@@ -157,7 +158,6 @@ extern struct cred init_cred;
        .rt             = {                                             \
                .run_list       = LIST_HEAD_INIT(tsk.rt.run_list),      \
                .time_slice     = RR_TIMESLICE,                         \
-               .nr_cpus_allowed = NR_CPUS,                             \
        },                                                              \
        .tasks          = LIST_HEAD_INIT(tsk.tasks),                    \
        INIT_PUSHABLE_TASKS(tsk)                                        \
index 0d04cd6..ffc444c 100644 (file)
@@ -368,8 +368,11 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
                        iter->index++;
                        if (likely(*slot))
                                return slot;
-                       if (flags & RADIX_TREE_ITER_CONTIG)
+                       if (flags & RADIX_TREE_ITER_CONTIG) {
+                               /* forbid switching to the next chunk */
+                               iter->next_index = 0;
                                break;
+                       }
                }
        }
        return NULL;
index f34437e..6029d8c 100644 (file)
@@ -145,6 +145,7 @@ extern unsigned long this_cpu_load(void);
 
 
 extern void calc_global_load(unsigned long ticks);
+extern void update_cpu_load_nohz(void);
 
 extern unsigned long get_parent_ip(unsigned long addr);
 
@@ -1187,7 +1188,6 @@ struct sched_rt_entity {
        struct list_head run_list;
        unsigned long timeout;
        unsigned int time_slice;
-       int nr_cpus_allowed;
 
        struct sched_rt_entity *back;
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -1252,6 +1252,7 @@ struct task_struct {
 #endif
 
        unsigned int policy;
+       int nr_cpus_allowed;
        cpumask_t cpus_allowed;
 
 #ifdef CONFIG_PREEMPT_RCU
index b666193..c84ec68 100644 (file)
@@ -197,6 +197,10 @@ struct swap_info_struct {
        struct block_device *bdev;      /* swap device or bdev of swap file */
        struct file *swap_file;         /* seldom referenced */
        unsigned int old_block_size;    /* seldom referenced */
+#ifdef CONFIG_FRONTSWAP
+       unsigned long *frontswap_map;   /* frontswap in-use, one bit per page */
+       atomic_t frontswap_pages;       /* frontswap pages in-use counter */
+#endif
 };
 
 struct swap_list_t {
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
new file mode 100644 (file)
index 0000000..e282624
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef _LINUX_SWAPFILE_H
+#define _LINUX_SWAPFILE_H
+
+/*
+ * these were static in swapfile.c but frontswap.c needs them and we don't
+ * want to expose them to the dozens of source files that include swap.h
+ */
+extern spinlock_t swap_lock;
+extern struct swap_list_t swap_list;
+extern struct swap_info_struct *swap_info[];
+extern int try_to_unuse(unsigned int, bool, unsigned long);
+
+#endif /* _LINUX_SWAPFILE_H */
index 0f3527d..72fcd30 100644 (file)
@@ -896,10 +896,13 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
                mutex_unlock(&cgroup_mutex);
 
                /*
-                * Drop the active superblock reference that we took when we
-                * created the cgroup
+                * We want to drop the active superblock reference from the
+                * cgroup creation after all the dentry refs are gone -
+                * kill_sb gets mighty unhappy otherwise.  Mark
+                * dentry->d_fsdata with cgroup_diput() to tell
+                * cgroup_d_release() to call deactivate_super().
                 */
-               deactivate_super(cgrp->root->sb);
+               dentry->d_fsdata = cgroup_diput;
 
                /*
                 * if we're getting rid of the cgroup, refcount should ensure
@@ -925,6 +928,13 @@ static int cgroup_delete(const struct dentry *d)
        return 1;
 }
 
+static void cgroup_d_release(struct dentry *dentry)
+{
+       /* did cgroup_diput() tell me to deactivate super? */
+       if (dentry->d_fsdata == cgroup_diput)
+               deactivate_super(dentry->d_sb);
+}
+
 static void remove_dir(struct dentry *d)
 {
        struct dentry *parent = dget(d->d_parent);
@@ -1532,6 +1542,7 @@ static int cgroup_get_rootdir(struct super_block *sb)
        static const struct dentry_operations cgroup_dops = {
                .d_iput = cgroup_diput,
                .d_delete = cgroup_delete,
+               .d_release = cgroup_d_release,
        };
 
        struct inode *inode =
index fc275e4..eebd6d5 100644 (file)
@@ -275,8 +275,10 @@ void handle_nested_irq(unsigned int irq)
        kstat_incr_irqs_this_cpu(irq, desc);
 
        action = desc->action;
-       if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
+       if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
+               desc->istate |= IRQS_PENDING;
                goto out_unlock;
+       }
 
        irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
        raw_spin_unlock_irq(&desc->lock);
@@ -324,8 +326,10 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
        kstat_incr_irqs_this_cpu(irq, desc);
 
-       if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
+       if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
+               desc->istate |= IRQS_PENDING;
                goto out_unlock;
+       }
 
        handle_irq_event(desc);
 
index 8e5c56b..001fa5b 100644 (file)
@@ -101,6 +101,9 @@ extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
 
 extern void irq_set_thread_affinity(struct irq_desc *desc);
 
+extern int irq_do_set_affinity(struct irq_data *data,
+                              const struct cpumask *dest, bool force);
+
 /* Inline functions for support of irq chips on slow busses */
 static inline void chip_bus_lock(struct irq_desc *desc)
 {
index ea0c6c2..8c54823 100644 (file)
@@ -142,6 +142,25 @@ static inline void
 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
 #endif
 
+int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
+                       bool force)
+{
+       struct irq_desc *desc = irq_data_to_desc(data);
+       struct irq_chip *chip = irq_data_get_irq_chip(data);
+       int ret;
+
+       ret = chip->irq_set_affinity(data, mask, false);
+       switch (ret) {
+       case IRQ_SET_MASK_OK:
+               cpumask_copy(data->affinity, mask);
+       case IRQ_SET_MASK_OK_NOCOPY:
+               irq_set_thread_affinity(desc);
+               ret = 0;
+       }
+
+       return ret;
+}
+
 int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
 {
        struct irq_chip *chip = irq_data_get_irq_chip(data);
@@ -152,14 +171,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
                return -EINVAL;
 
        if (irq_can_move_pcntxt(data)) {
-               ret = chip->irq_set_affinity(data, mask, false);
-               switch (ret) {
-               case IRQ_SET_MASK_OK:
-                       cpumask_copy(data->affinity, mask);
-               case IRQ_SET_MASK_OK_NOCOPY:
-                       irq_set_thread_affinity(desc);
-                       ret = 0;
-               }
+               ret = irq_do_set_affinity(data, mask, false);
        } else {
                irqd_set_move_pending(data);
                irq_copy_pending(desc, mask);
@@ -283,9 +295,8 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
 static int
 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
 {
-       struct irq_chip *chip = irq_desc_get_chip(desc);
        struct cpumask *set = irq_default_affinity;
-       int ret, node = desc->irq_data.node;
+       int node = desc->irq_data.node;
 
        /* Excludes PER_CPU and NO_BALANCE interrupts */
        if (!irq_can_set_affinity(irq))
@@ -311,13 +322,7 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
                if (cpumask_intersects(mask, nodemask))
                        cpumask_and(mask, mask, nodemask);
        }
-       ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
-       switch (ret) {
-       case IRQ_SET_MASK_OK:
-               cpumask_copy(desc->irq_data.affinity, mask);
-       case IRQ_SET_MASK_OK_NOCOPY:
-               irq_set_thread_affinity(desc);
-       }
+       irq_do_set_affinity(&desc->irq_data, mask, false);
        return 0;
 }
 #else
index c3c8975..ca3f4aa 100644 (file)
@@ -42,17 +42,8 @@ void irq_move_masked_irq(struct irq_data *idata)
         * For correct operation this depends on the caller
         * masking the irqs.
         */
-       if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
-                  < nr_cpu_ids)) {
-               int ret = chip->irq_set_affinity(&desc->irq_data,
-                                                desc->pending_mask, false);
-               switch (ret) {
-               case IRQ_SET_MASK_OK:
-                       cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
-               case IRQ_SET_MASK_OK_NOCOPY:
-                       irq_set_thread_affinity(desc);
-               }
-       }
+       if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
+               irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
 
        cpumask_clear(desc->pending_mask);
 }
index 8ed89a1..d2a5f4e 100644 (file)
@@ -27,7 +27,7 @@
 #define PANIC_TIMER_STEP 100
 #define PANIC_BLINK_SPD 18
 
-int panic_on_oops;
+int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
 static unsigned long tainted_mask;
 static int pause_on_oops;
 static int pause_on_oops_flag;
@@ -108,8 +108,6 @@ void panic(const char *fmt, ...)
         */
        crash_kexec(NULL);
 
-       kmsg_dump(KMSG_DUMP_PANIC);
-
        /*
         * Note smp_send_stop is the usual smp shutdown function, which
         * unfortunately means it may not be hardened to work in a panic
@@ -117,6 +115,8 @@ void panic(const char *fmt, ...)
         */
        smp_send_stop();
 
+       kmsg_dump(KMSG_DUMP_PANIC);
+
        atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
 
        bust_spinlocks(0);
index 39eb601..c46958e 100644 (file)
@@ -142,9 +142,8 @@ const_debug unsigned int sysctl_sched_features =
 #define SCHED_FEAT(name, enabled)      \
        #name ,
 
-static __read_mostly char *sched_feat_names[] = {
+static const char * const sched_feat_names[] = {
 #include "features.h"
-       NULL
 };
 
 #undef SCHED_FEAT
@@ -2517,25 +2516,32 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
        sched_avg_update(this_rq);
 }
 
+#ifdef CONFIG_NO_HZ
+/*
+ * There is no sane way to deal with nohz on smp when using jiffies because the
+ * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
+ * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
+ *
+ * Therefore we cannot use the delta approach from the regular tick since that
+ * would seriously skew the load calculation. However we'll make do for those
+ * updates happening while idle (nohz_idle_balance) or coming out of idle
+ * (tick_nohz_idle_exit).
+ *
+ * This means we might still be one tick off for nohz periods.
+ */
+
 /*
  * Called from nohz_idle_balance() to update the load ratings before doing the
  * idle balance.
  */
 void update_idle_cpu_load(struct rq *this_rq)
 {
-       unsigned long curr_jiffies = jiffies;
+       unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
        unsigned long load = this_rq->load.weight;
        unsigned long pending_updates;
 
        /*
-        * Bloody broken means of dealing with nohz, but better than nothing..
-        * jiffies is updated by one cpu, another cpu can drift wrt the jiffy
-        * update and see 0 difference the one time and 2 the next, even though
-        * we ticked at roughtly the same rate.
-        *
-        * Hence we only use this from nohz_idle_balance() and skip this
-        * nonsense when called from the scheduler_tick() since that's
-        * guaranteed a stable rate.
+        * bail if there's load or we're actually up-to-date.
         */
        if (load || curr_jiffies == this_rq->last_load_update_tick)
                return;
@@ -2546,13 +2552,39 @@ void update_idle_cpu_load(struct rq *this_rq)
        __update_cpu_load(this_rq, load, pending_updates);
 }
 
+/*
+ * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
+ */
+void update_cpu_load_nohz(void)
+{
+       struct rq *this_rq = this_rq();
+       unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
+       unsigned long pending_updates;
+
+       if (curr_jiffies == this_rq->last_load_update_tick)
+               return;
+
+       raw_spin_lock(&this_rq->lock);
+       pending_updates = curr_jiffies - this_rq->last_load_update_tick;
+       if (pending_updates) {
+               this_rq->last_load_update_tick = curr_jiffies;
+               /*
+                * We were idle, this means load 0, the current load might be
+                * !0 due to remote wakeups and the sort.
+                */
+               __update_cpu_load(this_rq, 0, pending_updates);
+       }
+       raw_spin_unlock(&this_rq->lock);
+}
+#endif /* CONFIG_NO_HZ */
+
 /*
  * Called from scheduler_tick()
  */
 static void update_cpu_load_active(struct rq *this_rq)
 {
        /*
-        * See the mess in update_idle_cpu_load().
+        * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
         */
        this_rq->last_load_update_tick = jiffies;
        __update_cpu_load(this_rq, this_rq->load.weight, 1);
@@ -4982,7 +5014,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
                p->sched_class->set_cpus_allowed(p, new_mask);
 
        cpumask_copy(&p->cpus_allowed, new_mask);
-       p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
+       p->nr_cpus_allowed = cpumask_weight(new_mask);
 }
 
 /*
@@ -5997,11 +6029,14 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
 
                cpumask_or(covered, covered, sg_span);
 
-               sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
+               sg->sgp = *per_cpu_ptr(sdd->sgp, i);
                atomic_inc(&sg->sgp->ref);
 
-               if (cpumask_test_cpu(cpu, sg_span))
+               if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
+                              cpumask_first(sg_span) == cpu) {
+                       WARN_ON_ONCE(!cpumask_test_cpu(cpu, sg_span));
                        groups = sg;
+               }
 
                if (!first)
                        first = sg;
@@ -6403,7 +6438,7 @@ static void sched_init_numa(void)
                        return;
 
                for (j = 0; j < nr_node_ids; j++) {
-                       struct cpumask *mask = kzalloc_node(cpumask_size(), GFP_KERNEL, j);
+                       struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
                        if (!mask)
                                return;
 
@@ -6691,7 +6726,6 @@ static int init_sched_domains(const struct cpumask *cpu_map)
        if (!doms_cur)
                doms_cur = &fallback_doms;
        cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
-       dattr_cur = NULL;
        err = build_sched_domains(doms_cur[0], NULL);
        register_sched_domain_sysctl();
 
index 940e6d1..b2a2d23 100644 (file)
@@ -2703,7 +2703,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
        int want_sd = 1;
        int sync = wake_flags & WF_SYNC;
 
-       if (p->rt.nr_cpus_allowed == 1)
+       if (p->nr_cpus_allowed == 1)
                return prev_cpu;
 
        if (sd_flag & SD_BALANCE_WAKE) {
@@ -3503,15 +3503,22 @@ unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
 unsigned long scale_rt_power(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
-       u64 total, available;
+       u64 total, available, age_stamp, avg;
 
-       total = sched_avg_period() + (rq->clock - rq->age_stamp);
+       /*
+        * Since we're reading these variables without serialization make sure
+        * we read them once before doing sanity checks on them.
+        */
+       age_stamp = ACCESS_ONCE(rq->age_stamp);
+       avg = ACCESS_ONCE(rq->rt_avg);
+
+       total = sched_avg_period() + (rq->clock - age_stamp);
 
-       if (unlikely(total < rq->rt_avg)) {
+       if (unlikely(total < avg)) {
                /* Ensures that power won't end up being negative */
                available = 0;
        } else {
-               available = total - rq->rt_avg;
+               available = total - avg;
        }
 
        if (unlikely((s64)total < SCHED_POWER_SCALE))
@@ -3574,11 +3581,26 @@ void update_group_power(struct sched_domain *sd, int cpu)
 
        power = 0;
 
-       group = child->groups;
-       do {
-               power += group->sgp->power;
-               group = group->next;
-       } while (group != child->groups);
+       if (child->flags & SD_OVERLAP) {
+               /*
+                * SD_OVERLAP domains cannot assume that child groups
+                * span the current group.
+                */
+
+               for_each_cpu(cpu, sched_group_cpus(sdg))
+                       power += power_of(cpu);
+       } else  {
+               /*
+                * !SD_OVERLAP domains can assume that child groups
+                * span the current group.
+                */ 
+
+               group = child->groups;
+               do {
+                       power += group->sgp->power;
+                       group = group->next;
+               } while (group != child->groups);
+       }
 
        sdg->sgp->power = power;
 }
index c5565c3..2a4e8df 100644 (file)
@@ -274,13 +274,16 @@ static void update_rt_migration(struct rt_rq *rt_rq)
 
 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
+       struct task_struct *p;
+
        if (!rt_entity_is_task(rt_se))
                return;
 
+       p = rt_task_of(rt_se);
        rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 
        rt_rq->rt_nr_total++;
-       if (rt_se->nr_cpus_allowed > 1)
+       if (p->nr_cpus_allowed > 1)
                rt_rq->rt_nr_migratory++;
 
        update_rt_migration(rt_rq);
@@ -288,13 +291,16 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 
 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
+       struct task_struct *p;
+
        if (!rt_entity_is_task(rt_se))
                return;
 
+       p = rt_task_of(rt_se);
        rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 
        rt_rq->rt_nr_total--;
-       if (rt_se->nr_cpus_allowed > 1)
+       if (p->nr_cpus_allowed > 1)
                rt_rq->rt_nr_migratory--;
 
        update_rt_migration(rt_rq);
@@ -1161,7 +1167,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
 
        enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
 
-       if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
+       if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
                enqueue_pushable_task(rq, p);
 
        inc_nr_running(rq);
@@ -1225,7 +1231,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
 
        cpu = task_cpu(p);
 
-       if (p->rt.nr_cpus_allowed == 1)
+       if (p->nr_cpus_allowed == 1)
                goto out;
 
        /* For anything but wake ups, just return the task_cpu */
@@ -1260,9 +1266,9 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
         * will have to sort it out.
         */
        if (curr && unlikely(rt_task(curr)) &&
-           (curr->rt.nr_cpus_allowed < 2 ||
+           (curr->nr_cpus_allowed < 2 ||
             curr->prio <= p->prio) &&
-           (p->rt.nr_cpus_allowed > 1)) {
+           (p->nr_cpus_allowed > 1)) {
                int target = find_lowest_rq(p);
 
                if (target != -1)
@@ -1276,10 +1282,10 @@ out:
 
 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
 {
-       if (rq->curr->rt.nr_cpus_allowed == 1)
+       if (rq->curr->nr_cpus_allowed == 1)
                return;
 
-       if (p->rt.nr_cpus_allowed != 1
+       if (p->nr_cpus_allowed != 1
            && cpupri_find(&rq->rd->cpupri, p, NULL))
                return;
 
@@ -1395,7 +1401,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
         * The previous task needs to be made eligible for pushing
         * if it is still active
         */
-       if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
+       if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
                enqueue_pushable_task(rq, p);
 }
 
@@ -1408,7 +1414,7 @@ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
 {
        if (!task_running(rq, p) &&
            (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
-           (p->rt.nr_cpus_allowed > 1))
+           (p->nr_cpus_allowed > 1))
                return 1;
        return 0;
 }
@@ -1464,7 +1470,7 @@ static int find_lowest_rq(struct task_struct *task)
        if (unlikely(!lowest_mask))
                return -1;
 
-       if (task->rt.nr_cpus_allowed == 1)
+       if (task->nr_cpus_allowed == 1)
                return -1; /* No other targets possible */
 
        if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
@@ -1586,7 +1592,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
 
        BUG_ON(rq->cpu != task_cpu(p));
        BUG_ON(task_current(rq, p));
-       BUG_ON(p->rt.nr_cpus_allowed <= 1);
+       BUG_ON(p->nr_cpus_allowed <= 1);
 
        BUG_ON(!p->on_rq);
        BUG_ON(!rt_task(p));
@@ -1793,9 +1799,9 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
        if (!task_running(rq, p) &&
            !test_tsk_need_resched(rq->curr) &&
            has_pushable_tasks(rq) &&
-           p->rt.nr_cpus_allowed > 1 &&
+           p->nr_cpus_allowed > 1 &&
            rt_task(rq->curr) &&
-           (rq->curr->rt.nr_cpus_allowed < 2 ||
+           (rq->curr->nr_cpus_allowed < 2 ||
             rq->curr->prio <= p->prio))
                push_rt_tasks(rq);
 }
@@ -1817,7 +1823,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
         * Only update if the process changes its state from whether it
         * can migrate or not.
         */
-       if ((p->rt.nr_cpus_allowed > 1) == (weight > 1))
+       if ((p->nr_cpus_allowed > 1) == (weight > 1))
                return;
 
        rq = task_rq(p);
@@ -1979,6 +1985,8 @@ static void watchdog(struct rq *rq, struct task_struct *p)
 
 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
 {
+       struct sched_rt_entity *rt_se = &p->rt;
+
        update_curr_rt(rq);
 
        watchdog(rq, p);
@@ -1996,12 +2004,15 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
        p->rt.time_slice = RR_TIMESLICE;
 
        /*
-        * Requeue to the end of queue if we are not the only element
-        * on the queue:
+        * Requeue to the end of queue if we (and all of our ancestors) are the
+        * only element on the queue
         */
-       if (p->rt.run_list.prev != p->rt.run_list.next) {
-               requeue_task_rt(rq, p, 0);
-               set_tsk_need_resched(p);
+       for_each_sched_rt_entity(rt_se) {
+               if (rt_se->run_list.prev != rt_se->run_list.next) {
+                       requeue_task_rt(rq, p, 0);
+                       set_tsk_need_resched(p);
+                       return;
+               }
        }
 }
 
index e1a797e..98f60c5 100644 (file)
@@ -31,6 +31,12 @@ void __init idle_thread_set_boot_cpu(void)
        per_cpu(idle_threads, smp_processor_id()) = current;
 }
 
+/**
+ * idle_init - Initialize the idle thread for a cpu
+ * @cpu:       The cpu for which the idle thread should be initialized
+ *
+ * Creates the thread if it does not exist.
+ */
 static inline void idle_init(unsigned int cpu)
 {
        struct task_struct *tsk = per_cpu(idle_threads, cpu);
@@ -45,17 +51,16 @@ static inline void idle_init(unsigned int cpu)
 }
 
 /**
- * idle_thread_init - Initialize the idle thread for a cpu
- * @cpu:       The cpu for which the idle thread should be initialized
- *
- * Creates the thread if it does not exist.
+ * idle_threads_init - Initialize idle threads for all cpus
  */
 void __init idle_threads_init(void)
 {
-       unsigned int cpu;
+       unsigned int cpu, boot_cpu;
+
+       boot_cpu = smp_processor_id();
 
        for_each_possible_cpu(cpu) {
-               if (cpu != smp_processor_id())
+               if (cpu != boot_cpu)
                        idle_init(cpu);
        }
 }
index 9cd928f..7e1ce01 100644 (file)
@@ -297,8 +297,7 @@ void clockevents_register_device(struct clock_event_device *dev)
 }
 EXPORT_SYMBOL_GPL(clockevents_register_device);
 
-static void clockevents_config(struct clock_event_device *dev,
-                              u32 freq)
+void clockevents_config(struct clock_event_device *dev, u32 freq)
 {
        u64 sec;
 
index 52f5ebb..8699978 100644 (file)
@@ -581,6 +581,7 @@ void tick_nohz_idle_exit(void)
        /* Update jiffies first */
        select_nohz_load_balancer(0);
        tick_do_update_jiffies64(now);
+       update_cpu_load_nohz();
 
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
        /*
@@ -819,6 +820,16 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
        return HRTIMER_RESTART;
 }
 
+static int sched_skew_tick;
+
+static int __init skew_tick(char *str)
+{
+       get_option(&str, &sched_skew_tick);
+
+       return 0;
+}
+early_param("skew_tick", skew_tick);
+
 /**
  * tick_setup_sched_timer - setup the tick emulation timer
  */
@@ -836,6 +847,14 @@ void tick_setup_sched_timer(void)
        /* Get the next period (per cpu) */
        hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
 
+       /* Offset the tick to avert xtime_lock contention. */
+       if (sched_skew_tick) {
+               u64 offset = ktime_to_ns(tick_period) >> 1;
+               do_div(offset, num_possible_cpus());
+               offset *= smp_processor_id();
+               hrtimer_add_expires_ns(&ts->sched_timer, offset);
+       }
+
        for (;;) {
                hrtimer_forward(&ts->sched_timer, now, tick_period);
                hrtimer_start_expires(&ts->sched_timer,
index a42d3ae..ff5bdee 100644 (file)
@@ -241,6 +241,26 @@ config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
        default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
        default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
 
+config PANIC_ON_OOPS
+       bool "Panic on Oops" if EXPERT
+       default n
+       help
+         Say Y here to enable the kernel to panic when it oopses. This
+         has the same effect as setting oops=panic on the kernel command
+         line.
+
+         This feature is useful to ensure that the kernel does not do
+         anything erroneous after an oops which could result in data
+         corruption or other issues.
+
+         Say N if unsure.
+
+config PANIC_ON_OOPS_VALUE
+       int
+       range 0 1
+       default 0 if !PANIC_ON_OOPS
+       default 1 if PANIC_ON_OOPS
+
 config DETECT_HUNG_TASK
        bool "Detect Hung Tasks"
        depends on DEBUG_KERNEL
index d7c878c..e796429 100644 (file)
@@ -686,6 +686,9 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
         * during iterating; it can be zero only at the beginning.
         * And we cannot overflow iter->next_index in a single step,
         * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
+        *
+        * This condition also used by radix_tree_next_slot() to stop
+        * contiguous iterating, and forbid swithing to the next chunk.
         */
        index = iter->next_index;
        if (!index && iter->index)
index d0ec4f3..e91fbc2 100644 (file)
@@ -118,7 +118,7 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
                /* lockup suspected: */
                if (print_once) {
                        print_once = 0;
-                       spin_dump(lock, "lockup");
+                       spin_dump(lock, "lockup suspected");
 #ifdef CONFIG_SMP
                        trigger_all_cpu_backtrace();
 #endif
index b217637..82fed4e 100644 (file)
@@ -389,3 +389,20 @@ config CLEANCACHE
          in a negligible performance hit.
 
          If unsure, say Y to enable cleancache
+
+config FRONTSWAP
+       bool "Enable frontswap to cache swap pages if tmem is present"
+       depends on SWAP
+       default n
+       help
+         Frontswap is so named because it can be thought of as the opposite
+         of a "backing" store for a swap device.  The data is stored into
+         "transcendent memory", memory that is not directly accessible or
+         addressable by the kernel and is of unknown and possibly
+         time-varying size.  When space in transcendent memory is available,
+         a significant swap I/O reduction may be achieved.  When none is
+         available, all frontswap calls are reduced to a single pointer-
+         compare-against-NULL resulting in a negligible performance hit
+         and swap data is stored as normal on the matching swap device.
+
+         If unsure, say Y to enable frontswap.
index a156285..2e2fbbe 100644 (file)
@@ -29,6 +29,7 @@ obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
 
 obj-$(CONFIG_BOUNCE)   += bounce.o
 obj-$(CONFIG_SWAP)     += page_io.o swap_state.o swapfile.o
+obj-$(CONFIG_FRONTSWAP)        += frontswap.o
 obj-$(CONFIG_HAS_DMA)  += dmapool.o
 obj-$(CONFIG_HUGETLBFS)        += hugetlb.o
 obj-$(CONFIG_NUMA)     += mempolicy.o
index 4ac338a..7ea259d 100644 (file)
@@ -236,7 +236,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
         */
        while (unlikely(too_many_isolated(zone))) {
                /* async migration should just abort */
-               if (cc->mode != COMPACT_SYNC)
+               if (!cc->sync)
                        return 0;
 
                congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -304,8 +304,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                 * satisfies the allocation
                 */
                pageblock_nr = low_pfn >> pageblock_order;
-               if (cc->mode != COMPACT_SYNC &&
-                   last_pageblock_nr != pageblock_nr &&
+               if (!cc->sync && last_pageblock_nr != pageblock_nr &&
                    !migrate_async_suitable(get_pageblock_migratetype(page))) {
                        low_pfn += pageblock_nr_pages;
                        low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
@@ -326,7 +325,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                        continue;
                }
 
-               if (cc->mode != COMPACT_SYNC)
+               if (!cc->sync)
                        mode |= ISOLATE_ASYNC_MIGRATE;
 
                lruvec = mem_cgroup_page_lruvec(page, zone);
@@ -361,90 +360,27 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
 
 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
 #ifdef CONFIG_COMPACTION
-/*
- * Returns true if MIGRATE_UNMOVABLE pageblock was successfully
- * converted to MIGRATE_MOVABLE type, false otherwise.
- */
-static bool rescue_unmovable_pageblock(struct page *page)
-{
-       unsigned long pfn, start_pfn, end_pfn;
-       struct page *start_page, *end_page;
-
-       pfn = page_to_pfn(page);
-       start_pfn = pfn & ~(pageblock_nr_pages - 1);
-       end_pfn = start_pfn + pageblock_nr_pages;
-
-       start_page = pfn_to_page(start_pfn);
-       end_page = pfn_to_page(end_pfn);
-
-       /* Do not deal with pageblocks that overlap zones */
-       if (page_zone(start_page) != page_zone(end_page))
-               return false;
-
-       for (page = start_page, pfn = start_pfn; page < end_page; pfn++,
-                                                                 page++) {
-               if (!pfn_valid_within(pfn))
-                       continue;
-
-               if (PageBuddy(page)) {
-                       int order = page_order(page);
-
-                       pfn += (1 << order) - 1;
-                       page += (1 << order) - 1;
-
-                       continue;
-               } else if (page_count(page) == 0 || PageLRU(page))
-                       continue;
-
-               return false;
-       }
-
-       set_pageblock_migratetype(page, MIGRATE_MOVABLE);
-       move_freepages_block(page_zone(page), page, MIGRATE_MOVABLE);
-       return true;
-}
 
-enum smt_result {
-       GOOD_AS_MIGRATION_TARGET,
-       FAIL_UNMOVABLE_TARGET,
-       FAIL_BAD_TARGET,
-};
-
-/*
- * Returns GOOD_AS_MIGRATION_TARGET if the page is within a block
- * suitable for migration to, FAIL_UNMOVABLE_TARGET if the page
- * is within a MIGRATE_UNMOVABLE block, FAIL_BAD_TARGET otherwise.
- */
-static enum smt_result suitable_migration_target(struct page *page,
-                                     struct compact_control *cc)
+/* Returns true if the page is within a block suitable for migration to */
+static bool suitable_migration_target(struct page *page)
 {
 
        int migratetype = get_pageblock_migratetype(page);
 
        /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
        if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
-               return FAIL_BAD_TARGET;
+               return false;
 
        /* If the page is a large free page, then allow migration */
        if (PageBuddy(page) && page_order(page) >= pageblock_order)
-               return GOOD_AS_MIGRATION_TARGET;
+               return true;
 
        /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
-       if (cc->mode != COMPACT_ASYNC_UNMOVABLE &&
-           migrate_async_suitable(migratetype))
-               return GOOD_AS_MIGRATION_TARGET;
-
-       if (cc->mode == COMPACT_ASYNC_MOVABLE &&
-           migratetype == MIGRATE_UNMOVABLE)
-               return FAIL_UNMOVABLE_TARGET;
-
-       if (cc->mode != COMPACT_ASYNC_MOVABLE &&
-           migratetype == MIGRATE_UNMOVABLE &&
-           rescue_unmovable_pageblock(page))
-               return GOOD_AS_MIGRATION_TARGET;
+       if (migrate_async_suitable(migratetype))
+               return true;
 
        /* Otherwise skip the block */
-       return FAIL_BAD_TARGET;
+       return false;
 }
 
 /*
@@ -477,13 +413,6 @@ static void isolate_freepages(struct zone *zone,
 
        zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
 
-       /*
-        * isolate_freepages() may be called more than once during
-        * compact_zone_order() run and we want only the most recent
-        * count.
-        */
-       cc->nr_pageblocks_skipped = 0;
-
        /*
         * Isolate free pages until enough are available to migrate the
         * pages on cc->migratepages. We stop searching if the migrate
@@ -492,7 +421,6 @@ static void isolate_freepages(struct zone *zone,
        for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
                                        pfn -= pageblock_nr_pages) {
                unsigned long isolated;
-               enum smt_result ret;
 
                if (!pfn_valid(pfn))
                        continue;
@@ -509,12 +437,9 @@ static void isolate_freepages(struct zone *zone,
                        continue;
 
                /* Check the block is suitable for migration */
-               ret = suitable_migration_target(page, cc);
-               if (ret != GOOD_AS_MIGRATION_TARGET) {
-                       if (ret == FAIL_UNMOVABLE_TARGET)
-                               cc->nr_pageblocks_skipped++;
+               if (!suitable_migration_target(page))
                        continue;
-               }
+
                /*
                 * Found a block suitable for isolating free pages from. Now
                 * we disabled interrupts, double check things are ok and
@@ -523,14 +448,12 @@ static void isolate_freepages(struct zone *zone,
                 */
                isolated = 0;
                spin_lock_irqsave(&zone->lock, flags);
-               ret = suitable_migration_target(page, cc);
-               if (ret == GOOD_AS_MIGRATION_TARGET) {
+               if (suitable_migration_target(page)) {
                        end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
                        isolated = isolate_freepages_block(pfn, end_pfn,
                                                           freelist, false);
                        nr_freepages += isolated;
-               } else if (ret == FAIL_UNMOVABLE_TARGET)
-                       cc->nr_pageblocks_skipped++;
+               }
                spin_unlock_irqrestore(&zone->lock, flags);
 
                /*
@@ -762,9 +685,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
 
                nr_migrate = cc->nr_migratepages;
                err = migrate_pages(&cc->migratepages, compaction_alloc,
-                       (unsigned long)&cc->freepages, false,
-                       (cc->mode == COMPACT_SYNC) ? MIGRATE_SYNC_LIGHT
-                                                     : MIGRATE_ASYNC);
+                               (unsigned long)cc, false,
+                               cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
                update_nr_listpages(cc);
                nr_remaining = cc->nr_migratepages;
 
@@ -793,8 +715,7 @@ out:
 
 static unsigned long compact_zone_order(struct zone *zone,
                                 int order, gfp_t gfp_mask,
-                                enum compact_mode mode,
-                                unsigned long *nr_pageblocks_skipped)
+                                bool sync)
 {
        struct compact_control cc = {
                .nr_freepages = 0,
@@ -802,17 +723,12 @@ static unsigned long compact_zone_order(struct zone *zone,
                .order = order,
                .migratetype = allocflags_to_migratetype(gfp_mask),
                .zone = zone,
-               .mode = mode,
+               .sync = sync,
        };
-       unsigned long rc;
-
        INIT_LIST_HEAD(&cc.freepages);
        INIT_LIST_HEAD(&cc.migratepages);
 
-       rc = compact_zone(zone, &cc);
-       *nr_pageblocks_skipped = cc.nr_pageblocks_skipped;
-
-       return rc;
+       return compact_zone(zone, &cc);
 }
 
 int sysctl_extfrag_threshold = 500;
@@ -837,8 +753,6 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
        struct zoneref *z;
        struct zone *zone;
        int rc = COMPACT_SKIPPED;
-       unsigned long nr_pageblocks_skipped;
-       enum compact_mode mode;
 
        /*
         * Check whether it is worth even starting compaction. The order check is
@@ -855,22 +769,12 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
                                                                nodemask) {
                int status;
 
-               mode = sync ? COMPACT_SYNC : COMPACT_ASYNC_MOVABLE;
-retry:
-               status = compact_zone_order(zone, order, gfp_mask, mode,
-                                               &nr_pageblocks_skipped);
+               status = compact_zone_order(zone, order, gfp_mask, sync);
                rc = max(status, rc);
 
                /* If a normal allocation would succeed, stop compacting */
                if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
                        break;
-
-               if (rc == COMPACT_COMPLETE && mode == COMPACT_ASYNC_MOVABLE) {
-                       if (nr_pageblocks_skipped) {
-                               mode = COMPACT_ASYNC_UNMOVABLE;
-                               goto retry;
-                       }
-               }
        }
 
        return rc;
@@ -904,7 +808,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
                        if (ok && cc->order > zone->compact_order_failed)
                                zone->compact_order_failed = cc->order + 1;
                        /* Currently async compaction is never deferred. */
-                       else if (!ok && cc->mode == COMPACT_SYNC)
+                       else if (!ok && cc->sync)
                                defer_compaction(zone, cc->order);
                }
 
@@ -919,7 +823,7 @@ int compact_pgdat(pg_data_t *pgdat, int order)
 {
        struct compact_control cc = {
                .order = order,
-               .mode = COMPACT_ASYNC_MOVABLE,
+               .sync = false,
        };
 
        return __compact_pgdat(pgdat, &cc);
@@ -929,7 +833,7 @@ static int compact_node(int nid)
 {
        struct compact_control cc = {
                .order = -1,
-               .mode = COMPACT_SYNC,
+               .sync = true,
        };
 
        return __compact_pgdat(NODE_DATA(nid), &cc);
diff --git a/mm/frontswap.c b/mm/frontswap.c
new file mode 100644 (file)
index 0000000..e250255
--- /dev/null
@@ -0,0 +1,314 @@
+/*
+ * Frontswap frontend
+ *
+ * This code provides the generic "frontend" layer to call a matching
+ * "backend" driver implementation of frontswap.  See
+ * Documentation/vm/frontswap.txt for more information.
+ *
+ * Copyright (C) 2009-2012 Oracle Corp.  All rights reserved.
+ * Author: Dan Magenheimer
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <linux/proc_fs.h>
+#include <linux/security.h>
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/frontswap.h>
+#include <linux/swapfile.h>
+
+/*
+ * frontswap_ops is set by frontswap_register_ops to contain the pointers
+ * to the frontswap "backend" implementation functions.
+ */
+static struct frontswap_ops frontswap_ops __read_mostly;
+
+/*
+ * This global enablement flag reduces overhead on systems where frontswap_ops
+ * has not been registered, so is preferred to the slower alternative: a
+ * function call that checks a non-global.
+ */
+bool frontswap_enabled __read_mostly;
+EXPORT_SYMBOL(frontswap_enabled);
+
+/*
+ * If enabled, frontswap_store will return failure even on success.  As
+ * a result, the swap subsystem will always write the page to swap, in
+ * effect converting frontswap into a writethrough cache.  In this mode,
+ * there is no direct reduction in swap writes, but a frontswap backend
+ * can unilaterally "reclaim" any pages in use with no data loss, thus
+ * providing increases control over maximum memory usage due to frontswap.
+ */
+static bool frontswap_writethrough_enabled __read_mostly;
+
+#ifdef CONFIG_DEBUG_FS
+/*
+ * Counters available via /sys/kernel/debug/frontswap (if debugfs is
+ * properly configured).  These are for information only so are not protected
+ * against increment races.
+ */
+static u64 frontswap_loads;
+static u64 frontswap_succ_stores;
+static u64 frontswap_failed_stores;
+static u64 frontswap_invalidates;
+
+static inline void inc_frontswap_loads(void) {
+       frontswap_loads++;
+}
+static inline void inc_frontswap_succ_stores(void) {
+       frontswap_succ_stores++;
+}
+static inline void inc_frontswap_failed_stores(void) {
+       frontswap_failed_stores++;
+}
+static inline void inc_frontswap_invalidates(void) {
+       frontswap_invalidates++;
+}
+#else
+static inline void inc_frontswap_loads(void) { }
+static inline void inc_frontswap_succ_stores(void) { }
+static inline void inc_frontswap_failed_stores(void) { }
+static inline void inc_frontswap_invalidates(void) { }
+#endif
+/*
+ * Register operations for frontswap, returning previous thus allowing
+ * detection of multiple backends and possible nesting.
+ */
+struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops)
+{
+       struct frontswap_ops old = frontswap_ops;
+
+       frontswap_ops = *ops;
+       frontswap_enabled = true;
+       return old;
+}
+EXPORT_SYMBOL(frontswap_register_ops);
+
+/*
+ * Enable/disable frontswap writethrough (see above).
+ */
+void frontswap_writethrough(bool enable)
+{
+       frontswap_writethrough_enabled = enable;
+}
+EXPORT_SYMBOL(frontswap_writethrough);
+
+/*
+ * Called when a swap device is swapon'd.
+ */
+void __frontswap_init(unsigned type)
+{
+       struct swap_info_struct *sis = swap_info[type];
+
+       BUG_ON(sis == NULL);
+       if (sis->frontswap_map == NULL)
+               return;
+       if (frontswap_enabled)
+               (*frontswap_ops.init)(type);
+}
+EXPORT_SYMBOL(__frontswap_init);
+
+/*
+ * "Store" data from a page to frontswap and associate it with the page's
+ * swaptype and offset.  Page must be locked and in the swap cache.
+ * If frontswap already contains a page with matching swaptype and
+ * offset, the frontswap implmentation may either overwrite the data and
+ * return success or invalidate the page from frontswap and return failure.
+ */
+int __frontswap_store(struct page *page)
+{
+       int ret = -1, dup = 0;
+       swp_entry_t entry = { .val = page_private(page), };
+       int type = swp_type(entry);
+       struct swap_info_struct *sis = swap_info[type];
+       pgoff_t offset = swp_offset(entry);
+
+       BUG_ON(!PageLocked(page));
+       BUG_ON(sis == NULL);
+       if (frontswap_test(sis, offset))
+               dup = 1;
+       ret = (*frontswap_ops.store)(type, offset, page);
+       if (ret == 0) {
+               frontswap_set(sis, offset);
+               inc_frontswap_succ_stores();
+               if (!dup)
+                       atomic_inc(&sis->frontswap_pages);
+       } else if (dup) {
+               /*
+                 failed dup always results in automatic invalidate of
+                 the (older) page from frontswap
+                */
+               frontswap_clear(sis, offset);
+               atomic_dec(&sis->frontswap_pages);
+               inc_frontswap_failed_stores();
+       } else
+               inc_frontswap_failed_stores();
+       if (frontswap_writethrough_enabled)
+               /* report failure so swap also writes to swap device */
+               ret = -1;
+       return ret;
+}
+EXPORT_SYMBOL(__frontswap_store);
+
+/*
+ * "Get" data from frontswap associated with swaptype and offset that were
+ * specified when the data was put to frontswap and use it to fill the
+ * specified page with data. Page must be locked and in the swap cache.
+ */
+int __frontswap_load(struct page *page)
+{
+       int ret = -1;
+       swp_entry_t entry = { .val = page_private(page), };
+       int type = swp_type(entry);
+       struct swap_info_struct *sis = swap_info[type];
+       pgoff_t offset = swp_offset(entry);
+
+       BUG_ON(!PageLocked(page));
+       BUG_ON(sis == NULL);
+       if (frontswap_test(sis, offset))
+               ret = (*frontswap_ops.load)(type, offset, page);
+       if (ret == 0)
+               inc_frontswap_loads();
+       return ret;
+}
+EXPORT_SYMBOL(__frontswap_load);
+
+/*
+ * Invalidate any data from frontswap associated with the specified swaptype
+ * and offset so that a subsequent "get" will fail.
+ */
+void __frontswap_invalidate_page(unsigned type, pgoff_t offset)
+{
+       struct swap_info_struct *sis = swap_info[type];
+
+       BUG_ON(sis == NULL);
+       if (frontswap_test(sis, offset)) {
+               (*frontswap_ops.invalidate_page)(type, offset);
+               atomic_dec(&sis->frontswap_pages);
+               frontswap_clear(sis, offset);
+               inc_frontswap_invalidates();
+       }
+}
+EXPORT_SYMBOL(__frontswap_invalidate_page);
+
+/*
+ * Invalidate all data from frontswap associated with all offsets for the
+ * specified swaptype.
+ */
+void __frontswap_invalidate_area(unsigned type)
+{
+       struct swap_info_struct *sis = swap_info[type];
+
+       BUG_ON(sis == NULL);
+       if (sis->frontswap_map == NULL)
+               return;
+       (*frontswap_ops.invalidate_area)(type);
+       atomic_set(&sis->frontswap_pages, 0);
+       memset(sis->frontswap_map, 0, sis->max / sizeof(long));
+}
+EXPORT_SYMBOL(__frontswap_invalidate_area);
+
+/*
+ * Frontswap, like a true swap device, may unnecessarily retain pages
+ * under certain circumstances; "shrink" frontswap is essentially a
+ * "partial swapoff" and works by calling try_to_unuse to attempt to
+ * unuse enough frontswap pages to attempt to -- subject to memory
+ * constraints -- reduce the number of pages in frontswap to the
+ * number given in the parameter target_pages.
+ */
+void frontswap_shrink(unsigned long target_pages)
+{
+       struct swap_info_struct *si = NULL;
+       int si_frontswap_pages;
+       unsigned long total_pages = 0, total_pages_to_unuse;
+       unsigned long pages = 0, pages_to_unuse = 0;
+       int type;
+       bool locked = false;
+
+       /*
+        * we don't want to hold swap_lock while doing a very
+        * lengthy try_to_unuse, but swap_list may change
+        * so restart scan from swap_list.head each time
+        */
+       spin_lock(&swap_lock);
+       locked = true;
+       total_pages = 0;
+       for (type = swap_list.head; type >= 0; type = si->next) {
+               si = swap_info[type];
+               total_pages += atomic_read(&si->frontswap_pages);
+       }
+       if (total_pages <= target_pages)
+               goto out;
+       total_pages_to_unuse = total_pages - target_pages;
+       for (type = swap_list.head; type >= 0; type = si->next) {
+               si = swap_info[type];
+               si_frontswap_pages = atomic_read(&si->frontswap_pages);
+               if (total_pages_to_unuse < si_frontswap_pages)
+                       pages = pages_to_unuse = total_pages_to_unuse;
+               else {
+                       pages = si_frontswap_pages;
+                       pages_to_unuse = 0; /* unuse all */
+               }
+               /* ensure there is enough RAM to fetch pages from frontswap */
+               if (security_vm_enough_memory_mm(current->mm, pages))
+                       continue;
+               vm_unacct_memory(pages);
+               break;
+       }
+       if (type < 0)
+               goto out;
+       locked = false;
+       spin_unlock(&swap_lock);
+       try_to_unuse(type, true, pages_to_unuse);
+out:
+       if (locked)
+               spin_unlock(&swap_lock);
+       return;
+}
+EXPORT_SYMBOL(frontswap_shrink);
+
+/*
+ * Count and return the number of frontswap pages across all
+ * swap devices.  This is exported so that backend drivers can
+ * determine current usage without reading debugfs.
+ */
+unsigned long frontswap_curr_pages(void)
+{
+       int type;
+       unsigned long totalpages = 0;
+       struct swap_info_struct *si = NULL;
+
+       spin_lock(&swap_lock);
+       for (type = swap_list.head; type >= 0; type = si->next) {
+               si = swap_info[type];
+               totalpages += atomic_read(&si->frontswap_pages);
+       }
+       spin_unlock(&swap_lock);
+       return totalpages;
+}
+EXPORT_SYMBOL(frontswap_curr_pages);
+
+static int __init init_frontswap(void)
+{
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *root = debugfs_create_dir("frontswap", NULL);
+       if (root == NULL)
+               return -ENXIO;
+       debugfs_create_u64("loads", S_IRUGO, root, &frontswap_loads);
+       debugfs_create_u64("succ_stores", S_IRUGO, root, &frontswap_succ_stores);
+       debugfs_create_u64("failed_stores", S_IRUGO, root,
+                               &frontswap_failed_stores);
+       debugfs_create_u64("invalidates", S_IRUGO,
+                               root, &frontswap_invalidates);
+#endif
+       return 0;
+}
+
+module_init(init_frontswap);
index 5cbb781..2ba87fb 100644 (file)
@@ -94,9 +94,6 @@ extern void putback_lru_page(struct page *page);
 /*
  * in mm/page_alloc.c
  */
-extern void set_pageblock_migratetype(struct page *page, int migratetype);
-extern int move_freepages_block(struct zone *zone, struct page *page,
-                               int migratetype);
 extern void __free_pages_bootmem(struct page *page, unsigned int order);
 extern void prep_compound_page(struct page *page, unsigned long order);
 #ifdef CONFIG_MEMORY_FAILURE
@@ -104,7 +101,6 @@ extern bool is_free_buddy_page(struct page *page);
 #endif
 
 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
-#include <linux/compaction.h>
 
 /*
  * in mm/compaction.c
@@ -123,14 +119,11 @@ struct compact_control {
        unsigned long nr_migratepages;  /* Number of pages to migrate */
        unsigned long free_pfn;         /* isolate_freepages search base */
        unsigned long migrate_pfn;      /* isolate_migratepages search base */
-       enum compact_mode mode;         /* Compaction mode */
+       bool sync;                      /* Synchronous migration */
 
        int order;                      /* order a direct compactor needs */
        int migratetype;                /* MOVABLE, RECLAIMABLE etc */
        struct zone *zone;
-
-       /* Number of UNMOVABLE destination pageblocks skipped during scan */
-       unsigned long nr_pageblocks_skipped;
 };
 
 unsigned long
index 952123e..32a0a5e 100644 (file)
@@ -867,6 +867,16 @@ int __init_memblock memblock_is_memory(phys_addr_t addr)
        return memblock_search(&memblock.memory, addr) != -1;
 }
 
+/**
+ * memblock_is_region_memory - check if a region is a subset of memory
+ * @base: base of region to check
+ * @size: size of region to check
+ *
+ * Check if the region [@base, @base+@size) is a subset of a memory block.
+ *
+ * RETURNS:
+ * 0 if false, non-zero if true
+ */
 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
 {
        int idx = memblock_search(&memblock.memory, base);
@@ -879,6 +889,16 @@ int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size
                 memblock.memory.regions[idx].size) >= end;
 }
 
+/**
+ * memblock_is_region_reserved - check if a region intersects reserved memory
+ * @base: base of region to check
+ * @size: size of region to check
+ *
+ * Check if the region [@base, @base+@size) intersects a reserved memory block.
+ *
+ * RETURNS:
+ * 0 if false, non-zero if true
+ */
 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
 {
        memblock_cap_size(base, &size);
index ab81d48..be26d5c 100644 (file)
@@ -436,7 +436,10 @@ void migrate_page_copy(struct page *newpage, struct page *page)
                 * is actually a signal that all of the page has become dirty.
                 * Whereas only part of our page may be dirty.
                 */
-               __set_page_dirty_nobuffers(newpage);
+               if (PageSwapBacked(page))
+                       SetPageDirty(newpage);
+               else
+                       __set_page_dirty_nobuffers(newpage);
        }
 
        mlock_migrate_page(newpage, page);
index c4acfbc..d4b0c10 100644 (file)
@@ -1486,7 +1486,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
 
        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 
-       ret = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+       retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
 
        if (file)
                fput(file);
index 6092f33..4403009 100644 (file)
@@ -219,7 +219,7 @@ EXPORT_SYMBOL(nr_online_nodes);
 
 int page_group_by_mobility_disabled __read_mostly;
 
-void set_pageblock_migratetype(struct page *page, int migratetype)
+static void set_pageblock_migratetype(struct page *page, int migratetype)
 {
 
        if (unlikely(page_group_by_mobility_disabled))
@@ -954,8 +954,8 @@ static int move_freepages(struct zone *zone,
        return pages_moved;
 }
 
-int move_freepages_block(struct zone *zone, struct page *page,
-                        int migratetype)
+static int move_freepages_block(struct zone *zone, struct page *page,
+                               int migratetype)
 {
        unsigned long start_pfn, end_pfn;
        struct page *start_page, *end_page;
@@ -5651,7 +5651,7 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
                .nr_migratepages = 0,
                .order = -1,
                .zone = page_zone(pfn_to_page(start)),
-               .mode = COMPACT_SYNC,
+               .sync = true,
        };
        INIT_LIST_HEAD(&cc.migratepages);
 
index dc76b4d..34f0292 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/bio.h>
 #include <linux/swapops.h>
 #include <linux/writeback.h>
+#include <linux/frontswap.h>
 #include <asm/pgtable.h>
 
 static struct bio *get_swap_bio(gfp_t gfp_flags,
@@ -98,6 +99,12 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
                unlock_page(page);
                goto out;
        }
+       if (frontswap_store(page) == 0) {
+               set_page_writeback(page);
+               unlock_page(page);
+               end_page_writeback(page);
+               goto out;
+       }
        bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write);
        if (bio == NULL) {
                set_page_dirty(page);
@@ -122,6 +129,11 @@ int swap_readpage(struct page *page)
 
        VM_BUG_ON(!PageLocked(page));
        VM_BUG_ON(PageUptodate(page));
+       if (frontswap_load(page) == 0) {
+               SetPageUptodate(page);
+               unlock_page(page);
+               goto out;
+       }
        bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
        if (bio == NULL) {
                unlock_page(page);
index 457b10b..de5bc51 100644 (file)
@@ -31,6 +31,8 @@
 #include <linux/memcontrol.h>
 #include <linux/poll.h>
 #include <linux/oom.h>
+#include <linux/frontswap.h>
+#include <linux/swapfile.h>
 
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
@@ -42,7 +44,7 @@ static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
 static void free_swap_count_continuations(struct swap_info_struct *);
 static sector_t map_swap_entry(swp_entry_t, struct block_device**);
 
-static DEFINE_SPINLOCK(swap_lock);
+DEFINE_SPINLOCK(swap_lock);
 static unsigned int nr_swapfiles;
 long nr_swap_pages;
 long total_swap_pages;
@@ -53,9 +55,9 @@ static const char Unused_file[] = "Unused swap file entry ";
 static const char Bad_offset[] = "Bad swap offset entry ";
 static const char Unused_offset[] = "Unused swap offset entry ";
 
-static struct swap_list_t swap_list = {-1, -1};
+struct swap_list_t swap_list = {-1, -1};
 
-static struct swap_info_struct *swap_info[MAX_SWAPFILES];
+struct swap_info_struct *swap_info[MAX_SWAPFILES];
 
 static DEFINE_MUTEX(swapon_mutex);
 
@@ -556,6 +558,7 @@ static unsigned char swap_entry_free(struct swap_info_struct *p,
                        swap_list.next = p->type;
                nr_swap_pages++;
                p->inuse_pages--;
+               frontswap_invalidate_page(p->type, offset);
                if ((p->flags & SWP_BLKDEV) &&
                                disk->fops->swap_slot_free_notify)
                        disk->fops->swap_slot_free_notify(p->bdev, offset);
@@ -985,11 +988,12 @@ static int unuse_mm(struct mm_struct *mm,
 }
 
 /*
- * Scan swap_map from current position to next entry still in use.
+ * Scan swap_map (or frontswap_map if frontswap parameter is true)
+ * from current position to next entry still in use.
  * Recycle to start on reaching the end, returning 0 when empty.
  */
 static unsigned int find_next_to_unuse(struct swap_info_struct *si,
-                                       unsigned int prev)
+                                       unsigned int prev, bool frontswap)
 {
        unsigned int max = si->max;
        unsigned int i = prev;
@@ -1015,6 +1019,12 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
                        prev = 0;
                        i = 1;
                }
+               if (frontswap) {
+                       if (frontswap_test(si, i))
+                               break;
+                       else
+                               continue;
+               }
                count = si->swap_map[i];
                if (count && swap_count(count) != SWAP_MAP_BAD)
                        break;
@@ -1026,8 +1036,12 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
  * We completely avoid races by reading each swap page in advance,
  * and then search for the process using it.  All the necessary
  * page table adjustments can then be made atomically.
+ *
+ * if the boolean frontswap is true, only unuse pages_to_unuse pages;
+ * pages_to_unuse==0 means all pages; ignored if frontswap is false
  */
-static int try_to_unuse(unsigned int type)
+int try_to_unuse(unsigned int type, bool frontswap,
+                unsigned long pages_to_unuse)
 {
        struct swap_info_struct *si = swap_info[type];
        struct mm_struct *start_mm;
@@ -1060,7 +1074,7 @@ static int try_to_unuse(unsigned int type)
         * one pass through swap_map is enough, but not necessarily:
         * there are races when an instance of an entry might be missed.
         */
-       while ((i = find_next_to_unuse(si, i)) != 0) {
+       while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
                if (signal_pending(current)) {
                        retval = -EINTR;
                        break;
@@ -1227,6 +1241,10 @@ static int try_to_unuse(unsigned int type)
                 * interactive performance.
                 */
                cond_resched();
+               if (frontswap && pages_to_unuse > 0) {
+                       if (!--pages_to_unuse)
+                               break;
+               }
        }
 
        mmput(start_mm);
@@ -1486,7 +1504,8 @@ bad_bmap:
 }
 
 static void enable_swap_info(struct swap_info_struct *p, int prio,
-                               unsigned char *swap_map)
+                               unsigned char *swap_map,
+                               unsigned long *frontswap_map)
 {
        int i, prev;
 
@@ -1496,6 +1515,7 @@ static void enable_swap_info(struct swap_info_struct *p, int prio,
        else
                p->prio = --least_priority;
        p->swap_map = swap_map;
+       frontswap_map_set(p, frontswap_map);
        p->flags |= SWP_WRITEOK;
        nr_swap_pages += p->pages;
        total_swap_pages += p->pages;
@@ -1512,6 +1532,7 @@ static void enable_swap_info(struct swap_info_struct *p, int prio,
                swap_list.head = swap_list.next = p->type;
        else
                swap_info[prev]->next = p->type;
+       frontswap_init(p->type);
        spin_unlock(&swap_lock);
 }
 
@@ -1585,7 +1606,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
        spin_unlock(&swap_lock);
 
        oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX);
-       err = try_to_unuse(type);
+       err = try_to_unuse(type, false, 0); /* force all pages to be unused */
        compare_swap_oom_score_adj(OOM_SCORE_ADJ_MAX, oom_score_adj);
 
        if (err) {
@@ -1596,7 +1617,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
                 * sys_swapoff for this swap_info_struct at this point.
                 */
                /* re-insert swap space back into swap_list */
-               enable_swap_info(p, p->prio, p->swap_map);
+               enable_swap_info(p, p->prio, p->swap_map, frontswap_map_get(p));
                goto out_dput;
        }
 
@@ -1622,9 +1643,11 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
        swap_map = p->swap_map;
        p->swap_map = NULL;
        p->flags = 0;
+       frontswap_invalidate_area(type);
        spin_unlock(&swap_lock);
        mutex_unlock(&swapon_mutex);
        vfree(swap_map);
+       vfree(frontswap_map_get(p));
        /* Destroy swap account informatin */
        swap_cgroup_swapoff(type);
 
@@ -1988,6 +2011,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
        sector_t span;
        unsigned long maxpages;
        unsigned char *swap_map = NULL;
+       unsigned long *frontswap_map = NULL;
        struct page *page = NULL;
        struct inode *inode = NULL;
 
@@ -2071,6 +2095,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
                error = nr_extents;
                goto bad_swap;
        }
+       /* frontswap enabled? set up bit-per-page map for frontswap */
+       if (frontswap_enabled)
+               frontswap_map = vzalloc(maxpages / sizeof(long));
 
        if (p->bdev) {
                if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
@@ -2086,14 +2113,15 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
        if (swap_flags & SWAP_FLAG_PREFER)
                prio =
                  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
-       enable_swap_info(p, prio, swap_map);
+       enable_swap_info(p, prio, swap_map, frontswap_map);
 
        printk(KERN_INFO "Adding %uk swap on %s.  "
-                       "Priority:%d extents:%d across:%lluk %s%s\n",
+                       "Priority:%d extents:%d across:%lluk %s%s%s\n",
                p->pages<<(PAGE_SHIFT-10), name, p->prio,
                nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
                (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
-               (p->flags & SWP_DISCARDABLE) ? "D" : "");
+               (p->flags & SWP_DISCARDABLE) ? "D" : "",
+               (frontswap_map) ? "FS" : "");
 
        mutex_unlock(&swapon_mutex);
        atomic_inc(&proc_poll_event);
index a6a0365..5afb431 100644 (file)
@@ -332,6 +332,7 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt,
         */
        hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link)
                if (ei->type == KVM_IRQ_ROUTING_MSI ||
+                   ue->type == KVM_IRQ_ROUTING_MSI ||
                    ue->u.irqchip.irqchip == ei->irqchip.irqchip)
                        return r;