Merge branches 'acpi-tables' and 'acpi-general'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tue, 3 Jun 2014 21:10:06 +0000 (23:10 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tue, 3 Jun 2014 21:10:06 +0000 (23:10 +0200)
* acpi-tables:
  ACPI: Fix conflict between customized DSDT and DSDT local copy

* acpi-general:
  ACPI: Add acpi_bus_attach_private_data() to attach data to ACPI handle

118 files changed:
Documentation/debugging-via-ohci1394.txt
Documentation/device-mapper/thin-provisioning.txt
Documentation/virtual/kvm/api.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/armada-380.dtsi
arch/arm/boot/dts/armada-385.dtsi
arch/arm/boot/dts/at91sam9260.dtsi
arch/arm/boot/dts/exynos4412-trats2.dts
arch/arm/boot/dts/exynos5250-arndale.dts
arch/arm/boot/dts/exynos5420-arndale-octa.dts
arch/arm/boot/dts/exynos5420.dtsi
arch/arm/common/bL_switcher.c
arch/arm/configs/exynos_defconfig
arch/arm/include/asm/trusted_foundations.h
arch/arm/include/asm/uaccess.h
arch/arm/kernel/entry-header.S
arch/arm/kernel/unwind.c
arch/arm/mach-at91/at91sam9260_devices.c
arch/arm/mach-exynos/firmware.c
arch/arm/mach-imx/devices/platform-ipu-core.c
arch/arm/mach-mvebu/mvebu-soc-id.c
arch/arm/mach-omap2/board-flash.c
arch/arm/mach-omap2/cclock3xxx_data.c
arch/arm/mach-omap2/cpuidle44xx.c
arch/arm/mach-omap2/omap_hwmod_54xx_data.c
arch/arm/mm/proc-v7m.S
arch/arm/plat-omap/dma.c
arch/arm64/include/asm/pgtable.h
arch/ia64/include/asm/acpi.h
arch/ia64/kernel/acpi.c
arch/mips/Makefile
arch/mips/include/asm/cpu-info.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/kernel/branch.c
arch/mips/kernel/ptrace.c
arch/mips/kernel/traps.c
arch/mips/loongson/common/cs5536/cs5536_mfgpt.c
arch/mips/mm/page.c
arch/mips/mti-malta/malta-memory.c
arch/mips/pci/pci-rc32434.c
arch/powerpc/Makefile
arch/powerpc/include/asm/ppc_asm.h
arch/powerpc/include/asm/sections.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/kernel/kvm.c
arch/powerpc/kernel/machine_kexec_64.c
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_hv_rm_mmu.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/mm/hash_utils_64.c
arch/s390/kvm/kvm-s390.c
arch/x86/include/asm/page_64_types.h
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
drivers/acpi/acpi_pad.c
drivers/acpi/acpi_processor.c
drivers/acpi/bus.c
drivers/acpi/processor_driver.c
drivers/acpi/scan.c
drivers/acpi/thermal.c
drivers/acpi/utils.c
drivers/block/virtio_blk.c
drivers/clk/clk-divider.c
drivers/clk/st/clkgen-pll.c
drivers/clk/tegra/clk-pll.c
drivers/clocksource/tcb_clksrc.c
drivers/clocksource/timer-marco.c
drivers/cpufreq/cpufreq-cpu0.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/intel_pstate.c
drivers/dma/dw/core.c
drivers/dma/sa11x0-dma.c
drivers/firewire/core.h
drivers/firewire/ohci.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/input/keyboard/Kconfig
drivers/input/keyboard/pxa27x_keypad.c
drivers/input/mouse/Kconfig
drivers/input/mouse/synaptics.c
drivers/input/serio/ambakmi.c
drivers/input/touchscreen/Kconfig
drivers/md/dm-cache-target.c
drivers/md/dm-mpath.c
drivers/md/dm-thin.c
drivers/pinctrl/vt8500/pinctrl-wmt.c
fs/dcache.c
fs/splice.c
include/acpi/acpi_drivers.h
include/linux/acpi.h
include/linux/amba/bus.h
include/linux/omap-dma.h
include/uapi/linux/audit.h
kernel/cpu.c
kernel/futex.c
kernel/kexec.c
kernel/locking/rtmutex.c
kernel/sched/core.c
kernel/sched/cpudeadline.c
kernel/sched/cpudeadline.h
kernel/sched/cpupri.c
kernel/sched/cpupri.h
sound/core/pcm_dmaengine.c
sound/pci/hda/hda_intel.c

index fa0151a..5c9a567 100644 (file)
@@ -25,9 +25,11 @@ using data transfer rates in the order of 10MB/s or more.
 With most FireWire controllers, memory access is limited to the low 4 GB
 of physical address space.  This can be a problem on IA64 machines where
 memory is located mostly above that limit, but it is rarely a problem on
-more common hardware such as x86, x86-64 and PowerPC.  However, at least
-Agere/LSI FW643e and FW643e2 controllers are known to support access to
-physical addresses above 4 GB.
+more common hardware such as x86, x86-64 and PowerPC.
+
+At least LSI FW643e and FW643e2 controllers are known to support access to
+physical addresses above 4 GB, but this feature is currently not enabled by
+Linux.
 
 Together with a early initialization of the OHCI-1394 controller for debugging,
 this facility proved most useful for examining long debugs logs in the printk
@@ -101,8 +103,9 @@ Step-by-step instructions for using firescope with early OHCI initialization:
    compliant, they are based on TI PCILynx chips and require drivers for Win-
    dows operating systems.
 
-   The mentioned kernel log message contains ">4 GB phys DMA" in case of
-   OHCI-1394 controllers which support accesses above this limit.
+   The mentioned kernel log message contains the string "physUB" if the
+   controller implements a writable Physical Upper Bound register.  This is
+   required for physical DMA above 4 GB (but not utilized by Linux yet).
 
 2) Establish a working FireWire cable connection:
 
index 05a27e9..2f51735 100644 (file)
@@ -309,7 +309,10 @@ ii) Status
     error_if_no_space|queue_if_no_space
        If the pool runs out of data or metadata space, the pool will
        either queue or error the IO destined to the data device.  The
-       default is to queue the IO until more space is added.
+       default is to queue the IO until more space is added or the
+       'no_space_timeout' expires.  The 'no_space_timeout' dm-thin-pool
+       module parameter can be used to change this timeout -- it
+       defaults to 60 seconds but may be disabled using a value of 0.
 
 iii) Messages
 
index a9380ba..b4f5365 100644 (file)
@@ -2126,7 +2126,7 @@ into the hash PTE second double word).
 4.75 KVM_IRQFD
 
 Capability: KVM_CAP_IRQFD
-Architectures: x86
+Architectures: x86 s390
 Type: vm ioctl
 Parameters: struct kvm_irqfd (in)
 Returns: 0 on success, -1 on error
index cc45111..8ccf31c 100644 (file)
@@ -7405,6 +7405,14 @@ F:       drivers/rpmsg/
 F:     Documentation/rpmsg.txt
 F:     include/linux/rpmsg.h
 
+RESET CONTROLLER FRAMEWORK
+M:     Philipp Zabel <p.zabel@pengutronix.de>
+S:     Maintained
+F:     drivers/reset/
+F:     Documentation/devicetree/bindings/reset/
+F:     include/linux/reset.h
+F:     include/linux/reset-controller.h
+
 RFKILL
 M:     Johannes Berg <johannes@sipsolutions.net>
 L:     linux-wireless@vger.kernel.org
index cf3412d..cdaa5b6 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc8
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
index 068031f..6d0f03c 100644 (file)
@@ -99,7 +99,7 @@
                        pcie@3,0 {
                                device_type = "pci";
                                assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
-                               reg = <0x1000 0 0 0 0>;
+                               reg = <0x1800 0 0 0 0>;
                                #address-cells = <3>;
                                #size-cells = <2>;
                                #interrupt-cells = <1>;
index e2919f0..da80196 100644 (file)
                        pcie@3,0 {
                                device_type = "pci";
                                assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
-                               reg = <0x1000 0 0 0 0>;
+                               reg = <0x1800 0 0 0 0>;
                                #address-cells = <3>;
                                #size-cells = <2>;
                                #interrupt-cells = <1>;
                        pcie@4,0 {
                                device_type = "pci";
                                assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
-                               reg = <0x1000 0 0 0 0>;
+                               reg = <0x2000 0 0 0 0>;
                                #address-cells = <3>;
                                #size-cells = <2>;
                                #interrupt-cells = <1>;
index 366fc2c..c0e0eae 100644 (file)
                                trigger@3 {
                                        reg = <3>;
                                        trigger-name = "external";
-                                       trigger-value = <0x13>;
+                                       trigger-value = <0xd>;
                                        trigger-external;
                                };
                        };
index 9583563..8a558b7 100644 (file)
                status = "okay";
 
                ak8975@0c {
-                       compatible = "ak,ak8975";
+                       compatible = "asahi-kasei,ak8975";
                        reg = <0x0c>;
                        gpios = <&gpj0 7 0>;
                };
index 090f983..cde19c8 100644 (file)
                                        regulator-name = "VDD_IOPERI_1.8V";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
+                                       regulator-always-on;
                                        op_mode = <1>;
                                };
 
index 80a3bf4..896a2a6 100644 (file)
                        gpio-key,wakeup;
                };
        };
-
-       amba {
-               mdma1: mdma@11C10000 {
-                       /*
-                        * MDMA1 can support both secure and non-secure
-                        * AXI transactions. When this is enabled in the kernel
-                        * for boards that run in secure mode, we are getting
-                        * imprecise external aborts causing the kernel to oops.
-                        */
-                       status = "disabled";
-               };
-       };
 };
index c3a9a66..b69fbcb 100644 (file)
                reg = <0x100440C0 0x20>;
        };
 
-       mau_pd: power-domain@100440E0 {
-               compatible = "samsung,exynos4210-pd";
-               reg = <0x100440E0 0x20>;
-       };
-
-       g2d_pd: power-domain@10044100 {
-               compatible = "samsung,exynos4210-pd";
-               reg = <0x10044100 0x20>;
-       };
-
        msc_pd: power-domain@10044120 {
                compatible = "samsung,exynos4210-pd";
                reg = <0x10044120 0x20>;
                        #dma-cells = <1>;
                        #dma-channels = <8>;
                        #dma-requests = <1>;
+                       /*
+                        * MDMA1 can support both secure and non-secure
+                        * AXI transactions. When this is enabled in the kernel
+                        * for boards that run in secure mode, we are getting
+                        * imprecise external aborts causing the kernel to oops.
+                        */
+                       status = "disabled";
                };
        };
 
        spi_0: spi@12d20000 {
                compatible = "samsung,exynos4210-spi";
                reg = <0x12d20000 0x100>;
-               interrupts = <0 66 0>;
+               interrupts = <0 68 0>;
                dmas = <&pdma0 5
                        &pdma0 4>;
                dma-names = "tx", "rx";
        spi_1: spi@12d30000 {
                compatible = "samsung,exynos4210-spi";
                reg = <0x12d30000 0x100>;
-               interrupts = <0 67 0>;
+               interrupts = <0 69 0>;
                dmas = <&pdma1 5
                        &pdma1 4>;
                dma-names = "tx", "rx";
        spi_2: spi@12d40000 {
                compatible = "samsung,exynos4210-spi";
                reg = <0x12d40000 0x100>;
-               interrupts = <0 68 0>;
+               interrupts = <0 70 0>;
                dmas = <&pdma0 7
                        &pdma0 6>;
                dma-names = "tx", "rx";
                interrupts = <0 112 0>;
                clocks = <&clock 471>;
                clock-names = "secss";
-               samsung,power-domain = <&g2d_pd>;
        };
 };
index f01c0ee..490f3dc 100644 (file)
@@ -433,8 +433,12 @@ static void bL_switcher_restore_cpus(void)
 {
        int i;
 
-       for_each_cpu(i, &bL_switcher_removed_logical_cpus)
-               cpu_up(i);
+       for_each_cpu(i, &bL_switcher_removed_logical_cpus) {
+               struct device *cpu_dev = get_cpu_device(i);
+               int ret = device_online(cpu_dev);
+               if (ret)
+                       dev_err(cpu_dev, "switcher: unable to restore CPU\n");
+       }
 }
 
 static int bL_switcher_halve_cpus(void)
@@ -521,7 +525,7 @@ static int bL_switcher_halve_cpus(void)
                        continue;
                }
 
-               ret = cpu_down(i);
+               ret = device_offline(get_cpu_device(i));
                if (ret) {
                        bL_switcher_restore_cpus();
                        return ret;
index 4ce7b70..e07a227 100644 (file)
@@ -65,6 +65,7 @@ CONFIG_TCG_TIS_I2C_INFINEON=y
 CONFIG_I2C=y
 CONFIG_I2C_MUX=y
 CONFIG_I2C_ARB_GPIO_CHALLENGE=y
+CONFIG_I2C_EXYNOS5=y
 CONFIG_I2C_S3C2410=y
 CONFIG_DEBUG_GPIO=y
 # CONFIG_HWMON is not set
index b5f7705..624e1d4 100644 (file)
@@ -54,7 +54,9 @@ static inline void register_trusted_foundations(
         */
        pr_err("No support for Trusted Foundations, continuing in degraded mode.\n");
        pr_err("Secondary processors as well as CPU PM will be disabled.\n");
+#if IS_ENABLED(CONFIG_SMP)
        setup_max_cpus = 0;
+#endif
        cpu_idle_poll_ctrl(true);
 }
 
index 12c3a5d..75d9579 100644 (file)
@@ -171,8 +171,9 @@ extern int __put_user_8(void *, unsigned long long);
 #define __put_user_check(x,p)                                                  \
        ({                                                              \
                unsigned long __limit = current_thread_info()->addr_limit - 1; \
+               const typeof(*(p)) __user *__tmp_p = (p);               \
                register const typeof(*(p)) __r2 asm("r2") = (x);       \
-               register const typeof(*(p)) __user *__p asm("r0") = (p);\
+               register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
                register unsigned long __l asm("r1") = __limit;         \
                register int __e asm("r0");                             \
                switch (sizeof(*(__p))) {                               \
index 1420725..efb208d 100644 (file)
        orrne   r5, V7M_xPSR_FRAMEPTRALIGN
        biceq   r5, V7M_xPSR_FRAMEPTRALIGN
 
+       @ ensure bit 0 is cleared in the PC, otherwise behaviour is
+       @ unpredictable
+       bic     r4, #1
+
        @ write basic exception frame
        stmdb   r2!, {r1, r3-r5}
        ldmia   sp, {r1, r3-r5}
index 3c21769..cb791ac 100644 (file)
@@ -285,7 +285,7 @@ static int unwind_exec_pop_r4_to_rN(struct unwind_ctrl_block *ctrl,
                if (unwind_pop_register(ctrl, &vsp, reg))
                                return -URC_FAILURE;
 
-       if (insn & 0x80)
+       if (insn & 0x8)
                if (unwind_pop_register(ctrl, &vsp, 14))
                                return -URC_FAILURE;
 
index a028292..7cd6f19 100644 (file)
@@ -1308,19 +1308,19 @@ static struct platform_device at91_adc_device = {
 static struct at91_adc_trigger at91_adc_triggers[] = {
        [0] = {
                .name = "timer-counter-0",
-               .value = AT91_ADC_TRGSEL_TC0 | AT91_ADC_TRGEN,
+               .value = 0x1,
        },
        [1] = {
                .name = "timer-counter-1",
-               .value = AT91_ADC_TRGSEL_TC1 | AT91_ADC_TRGEN,
+               .value = 0x3,
        },
        [2] = {
                .name = "timer-counter-2",
-               .value = AT91_ADC_TRGSEL_TC2 | AT91_ADC_TRGEN,
+               .value = 0x5,
        },
        [3] = {
                .name = "external",
-               .value = AT91_ADC_TRGSEL_EXTERNAL | AT91_ADC_TRGEN,
+               .value = 0xd,
                .is_external = true,
        },
 };
index 932129e..aa01c42 100644 (file)
@@ -18,6 +18,8 @@
 
 #include <mach/map.h>
 
+#include <plat/cpu.h>
+
 #include "smc.h"
 
 static int exynos_do_idle(void)
@@ -28,13 +30,24 @@ static int exynos_do_idle(void)
 
 static int exynos_cpu_boot(int cpu)
 {
+       /*
+        * The second parameter of SMC_CMD_CPU1BOOT command means CPU id.
+        * But, Exynos4212 has only one secondary CPU so second parameter
+        * isn't used for informing secure firmware about CPU id.
+        */
+       if (soc_is_exynos4212())
+               cpu = 0;
+
        exynos_smc(SMC_CMD_CPU1BOOT, cpu, 0, 0);
        return 0;
 }
 
 static int exynos_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
 {
-       void __iomem *boot_reg = S5P_VA_SYSRAM_NS + 0x1c + 4*cpu;
+       void __iomem *boot_reg = S5P_VA_SYSRAM_NS + 0x1c;
+
+       if (!soc_is_exynos4212())
+               boot_reg += 4*cpu;
 
        __raw_writel(boot_addr, boot_reg);
        return 0;
index fc4dd7c..6bd7c3f 100644 (file)
@@ -77,7 +77,7 @@ struct platform_device *__init imx_alloc_mx3_camera(
 
        pdev = platform_device_alloc("mx3-camera", 0);
        if (!pdev)
-               goto err;
+               return ERR_PTR(-ENOMEM);
 
        pdev->dev.dma_mask = kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
        if (!pdev->dev.dma_mask)
index f3d4cf5..09520e1 100644 (file)
@@ -108,7 +108,18 @@ static int __init mvebu_soc_id_init(void)
        iounmap(pci_base);
 
 res_ioremap:
-       clk_disable_unprepare(clk);
+       /*
+        * If the PCIe unit is actually enabled and we have PCI
+        * support in the kernel, we intentionally do not release the
+        * reference to the clock. We want to keep it running since
+        * the bootloader does some PCIe link configuration that the
+        * kernel is for now unable to do, and gating the clock would
+        * make us loose this precious configuration.
+        */
+       if (!of_device_is_available(child) || !IS_ENABLED(CONFIG_PCI_MVEBU)) {
+               clk_disable_unprepare(clk);
+               clk_put(clk);
+       }
 
 clk_err:
        of_node_put(child);
index ac82512..b6885e4 100644 (file)
@@ -142,7 +142,7 @@ __init board_nand_init(struct mtd_partition *nand_parts, u8 nr_parts, u8 cs,
        board_nand_data.nr_parts        = nr_parts;
        board_nand_data.devsize         = nand_type;
 
-       board_nand_data.ecc_opt = OMAP_ECC_BCH8_CODE_HW;
+       board_nand_data.ecc_opt = OMAP_ECC_HAM1_CODE_HW;
        gpmc_nand_init(&board_nand_data, gpmc_t);
 }
 #endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */
index 8f5121b..eb8c75e 100644 (file)
@@ -456,7 +456,8 @@ static struct clk_hw_omap dpll4_m5x2_ck_hw = {
        .clkdm_name     = "dpll4_clkdm",
 };
 
-DEFINE_STRUCT_CLK(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names, dpll4_m5x2_ck_ops);
+DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names,
+                       dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
 
 static struct clk dpll4_m5x2_ck_3630 = {
        .name           = "dpll4_m5x2_ck",
index 01fc710..2498ab0 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/cpuidle.h>
 #include <linux/cpu_pm.h>
 #include <linux/export.h>
+#include <linux/clockchips.h>
 
 #include <asm/cpuidle.h>
 #include <asm/proc-fns.h>
@@ -83,6 +84,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
 {
        struct idle_statedata *cx = state_ptr + index;
        u32 mpuss_can_lose_context = 0;
+       int cpu_id = smp_processor_id();
 
        /*
         * CPU0 has to wait and stay ON until CPU1 is OFF state.
@@ -110,6 +112,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
        mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
                                 (cx->mpu_logic_state == PWRDM_POWER_OFF);
 
+       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
+
        /*
         * Call idle CPU PM enter notifier chain so that
         * VFP and per CPU interrupt context is saved.
@@ -165,6 +169,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
        if (dev->cpu == 0 && mpuss_can_lose_context)
                cpu_cluster_pm_exit();
 
+       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
+
 fail:
        cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
        cpu_done[dev->cpu] = false;
@@ -172,6 +178,16 @@ fail:
        return index;
 }
 
+/*
+ * For each cpu, setup the broadcast timer because local timers
+ * stops for the states above C1.
+ */
+static void omap_setup_broadcast_timer(void *arg)
+{
+       int cpu = smp_processor_id();
+       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
+}
+
 static struct cpuidle_driver omap4_idle_driver = {
        .name                           = "omap4_idle",
        .owner                          = THIS_MODULE,
@@ -189,8 +205,7 @@ static struct cpuidle_driver omap4_idle_driver = {
                        /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
                        .exit_latency = 328 + 440,
                        .target_residency = 960,
-                       .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
-                                CPUIDLE_FLAG_TIMER_STOP,
+                       .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
                        .enter = omap_enter_idle_coupled,
                        .name = "C2",
                        .desc = "CPUx OFF, MPUSS CSWR",
@@ -199,8 +214,7 @@ static struct cpuidle_driver omap4_idle_driver = {
                        /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
                        .exit_latency = 460 + 518,
                        .target_residency = 1100,
-                       .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
-                                CPUIDLE_FLAG_TIMER_STOP,
+                       .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
                        .enter = omap_enter_idle_coupled,
                        .name = "C3",
                        .desc = "CPUx OFF, MPUSS OSWR",
@@ -231,5 +245,8 @@ int __init omap4_idle_init(void)
        if (!cpu_clkdm[0] || !cpu_clkdm[1])
                return -ENODEV;
 
+       /* Configure the broadcast timer on each cpu */
+       on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
+
        return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
 }
index 8923172..e829664 100644 (file)
@@ -895,7 +895,7 @@ static struct omap_hwmod omap54xx_mcpdm_hwmod = {
         * current exception.
         */
 
-       .flags          = HWMOD_EXT_OPT_MAIN_CLK,
+       .flags          = HWMOD_EXT_OPT_MAIN_CLK | HWMOD_SWSUP_SIDLE,
        .main_clk       = "pad_clks_ck",
        .prcm = {
                .omap4 = {
index 0c93588..1ca37c7 100644 (file)
@@ -123,6 +123,11 @@ __v7m_setup:
        mov     pc, lr
 ENDPROC(__v7m_setup)
 
+       .align 2
+__v7m_setup_stack:
+       .space  4 * 8                           @ 8 registers
+__v7m_setup_stack_top:
+
        define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
 
        .section ".rodata"
@@ -152,6 +157,3 @@ __v7m_proc_info:
        .long   nop_cache_fns           @ proc_info_list.cache
        .size   __v7m_proc_info, . - __v7m_proc_info
 
-__v7m_setup_stack:
-       .space  4 * 8                           @ 8 registers
-__v7m_setup_stack_top:
index 5f5b975..b5608b1 100644 (file)
@@ -70,6 +70,7 @@ static u32 errata;
 
 static struct omap_dma_global_context_registers {
        u32 dma_irqenable_l0;
+       u32 dma_irqenable_l1;
        u32 dma_ocp_sysconfig;
        u32 dma_gcr;
 } omap_dma_global_context;
@@ -1973,10 +1974,17 @@ static struct irqaction omap24xx_dma_irq;
 
 /*----------------------------------------------------------------------------*/
 
+/*
+ * Note that we are currently using only IRQENABLE_L0 and L1.
+ * As the DSP may be using IRQENABLE_L2 and L3, let's not
+ * touch those for now.
+ */
 void omap_dma_global_context_save(void)
 {
        omap_dma_global_context.dma_irqenable_l0 =
                p->dma_read(IRQENABLE_L0, 0);
+       omap_dma_global_context.dma_irqenable_l1 =
+               p->dma_read(IRQENABLE_L1, 0);
        omap_dma_global_context.dma_ocp_sysconfig =
                p->dma_read(OCP_SYSCONFIG, 0);
        omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
@@ -1991,6 +1999,8 @@ void omap_dma_global_context_restore(void)
                OCP_SYSCONFIG, 0);
        p->dma_write(omap_dma_global_context.dma_irqenable_l0,
                IRQENABLE_L0, 0);
+       p->dma_write(omap_dma_global_context.dma_irqenable_l1,
+               IRQENABLE_L1, 0);
 
        if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
                p->dma_write(0x3 , IRQSTATUS_L0, 0);
index 90c811f..7b1c67a 100644 (file)
@@ -266,7 +266,7 @@ static inline pmd_t pte_pmd(pte_t pte)
 
 #define pmd_page(pmd)           pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
 
-#define set_pmd_at(mm, addr, pmdp, pmd)        set_pmd(pmdp, pmd)
+#define set_pmd_at(mm, addr, pmdp, pmd)        set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
 
 static inline int has_transparent_hugepage(void)
 {
index d651102..2e73dff 100644 (file)
@@ -85,6 +85,7 @@ ia64_acpi_release_global_lock (unsigned int *lock)
        ((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
 
 #ifdef CONFIG_ACPI
+extern int acpi_lapic;
 #define acpi_disabled 0        /* ACPI always enabled on IA64 */
 #define acpi_noirq 0   /* ACPI always enabled on IA64 */
 #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
@@ -92,7 +93,6 @@ ia64_acpi_release_global_lock (unsigned int *lock)
 #endif
 #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
 static inline void disable_acpi(void) { }
-static inline void pci_acpi_crs_quirks(void) { }
 
 #ifdef CONFIG_IA64_GENERIC
 const char *acpi_get_sysname (void);
index 0d407b3..615ef81 100644 (file)
@@ -56,6 +56,7 @@
 
 #define PREFIX                 "ACPI: "
 
+int acpi_lapic;
 unsigned int acpi_cpei_override;
 unsigned int acpi_cpei_phys_cpuid;
 
@@ -676,6 +677,8 @@ int __init early_acpi_boot_init(void)
        if (ret < 1)
                printk(KERN_ERR PREFIX
                       "Error parsing MADT - no LAPIC entries\n");
+       else
+               acpi_lapic = 1;
 
 #ifdef CONFIG_SMP
        if (available_cpus == 0) {
index 1a5b403..60a359c 100644 (file)
@@ -151,7 +151,7 @@ cflags-$(CONFIG_CPU_NEVADA) += $(call cc-option,-march=rm5200,-march=r5000) \
                        -Wa,--trap
 cflags-$(CONFIG_CPU_RM7000)    += $(call cc-option,-march=rm7000,-march=r5000) \
                        -Wa,--trap
-cflags-$(CONFIG_CPU_SB1)       += $(call cc-option,-march=sb1,-march=r5000) \
+cflags-$(CONFIG_CPU_SB1)       += $(call cc-option,-march=sb1 -mno-mdmx -mno-mips3d,-march=r5000) \
                        -Wa,--trap
 cflags-$(CONFIG_CPU_R8000)     += -march=r8000 -Wa,--trap
 cflags-$(CONFIG_CPU_R10000)    += $(call cc-option,-march=r10000,-march=r8000) \
index dc2135b..ff2707a 100644 (file)
@@ -39,14 +39,14 @@ struct cache_desc {
 #define MIPS_CACHE_PINDEX      0x00000020      /* Physically indexed cache */
 
 struct cpuinfo_mips {
-       unsigned int            udelay_val;
-       unsigned int            asid_cache;
+       unsigned long           asid_cache;
 
        /*
         * Capability and feature descriptor structure for MIPS CPU
         */
        unsigned long           options;
        unsigned long           ases;
+       unsigned int            udelay_val;
        unsigned int            processor_id;
        unsigned int            fpu_id;
        unsigned int            msa_id;
index 2692abb..5805414 100644 (file)
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux                 4000
-#define __NR_O32_Linux_syscalls                350
+#define __NR_O32_Linux_syscalls                351
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         310
+#define __NR_64_Linux_syscalls         311
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                314
+#define __NR_N32_Linux_syscalls                315
 
 #endif /* _UAPI_ASM_UNISTD_H */
index 4d78bf4..76122ff 100644 (file)
@@ -317,7 +317,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                if (regs->regs[insn.i_format.rs] ==
                    regs->regs[insn.i_format.rt]) {
                        epc = epc + 4 + (insn.i_format.simmediate << 2);
-                       if (insn.i_format.rt == beql_op)
+                       if (insn.i_format.opcode == beql_op)
                                ret = BRANCH_LIKELY_TAKEN;
                } else
                        epc += 8;
@@ -329,7 +329,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                if (regs->regs[insn.i_format.rs] !=
                    regs->regs[insn.i_format.rt]) {
                        epc = epc + 4 + (insn.i_format.simmediate << 2);
-                       if (insn.i_format.rt == bnel_op)
+                       if (insn.i_format.opcode == bnel_op)
                                ret = BRANCH_LIKELY_TAKEN;
                } else
                        epc += 8;
@@ -341,7 +341,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                /* rt field assumed to be zero */
                if ((long)regs->regs[insn.i_format.rs] <= 0) {
                        epc = epc + 4 + (insn.i_format.simmediate << 2);
-                       if (insn.i_format.rt == bnel_op)
+                       if (insn.i_format.opcode == blezl_op)
                                ret = BRANCH_LIKELY_TAKEN;
                } else
                        epc += 8;
@@ -353,7 +353,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                /* rt field assumed to be zero */
                if ((long)regs->regs[insn.i_format.rs] > 0) {
                        epc = epc + 4 + (insn.i_format.simmediate << 2);
-                       if (insn.i_format.rt == bnel_op)
+                       if (insn.i_format.opcode == bgtzl_op)
                                ret = BRANCH_LIKELY_TAKEN;
                } else
                        epc += 8;
index 71f85f4..f639ccd 100644 (file)
@@ -163,7 +163,7 @@ int ptrace_get_watch_regs(struct task_struct *child,
        enum pt_watch_style style;
        int i;
 
-       if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
+       if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
                return -EIO;
        if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
                return -EIO;
@@ -177,14 +177,14 @@ int ptrace_get_watch_regs(struct task_struct *child,
 #endif
 
        __put_user(style, &addr->style);
-       __put_user(current_cpu_data.watch_reg_use_cnt,
+       __put_user(boot_cpu_data.watch_reg_use_cnt,
                   &addr->WATCH_STYLE.num_valid);
-       for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
+       for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
                __put_user(child->thread.watch.mips3264.watchlo[i],
                           &addr->WATCH_STYLE.watchlo[i]);
                __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
                           &addr->WATCH_STYLE.watchhi[i]);
-               __put_user(current_cpu_data.watch_reg_masks[i],
+               __put_user(boot_cpu_data.watch_reg_masks[i],
                           &addr->WATCH_STYLE.watch_masks[i]);
        }
        for (; i < 8; i++) {
@@ -204,12 +204,12 @@ int ptrace_set_watch_regs(struct task_struct *child,
        unsigned long lt[NUM_WATCH_REGS];
        u16 ht[NUM_WATCH_REGS];
 
-       if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
+       if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
                return -EIO;
        if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
                return -EIO;
        /* Check the values. */
-       for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
+       for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
                __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
 #ifdef CONFIG_32BIT
                if (lt[i] & __UA_LIMIT)
@@ -228,7 +228,7 @@ int ptrace_set_watch_regs(struct task_struct *child,
                        return -EINVAL;
        }
        /* Install them. */
-       for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
+       for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
                if (lt[i] & 7)
                        watch_active = 1;
                child->thread.watch.mips3264.watchlo[i] = lt[i];
index 074e857..8119ac2 100644 (file)
@@ -1545,7 +1545,7 @@ asmlinkage void cache_parity_error(void)
               reg_val & (1<<30) ? "secondary" : "primary",
               reg_val & (1<<31) ? "data" : "insn");
        if (cpu_has_mips_r2 &&
-           ((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) {
+           ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
                pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
                        reg_val & (1<<29) ? "ED " : "",
                        reg_val & (1<<28) ? "ET " : "",
@@ -1585,7 +1585,7 @@ asmlinkage void do_ftlb(void)
 
        /* For the moment, report the problem and hang. */
        if (cpu_has_mips_r2 &&
-           ((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) {
+           ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
                pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
                       read_c0_ecc());
                pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
index c639b9d..12c75db 100644 (file)
@@ -27,8 +27,7 @@
 
 #include <cs5536/cs5536_mfgpt.h>
 
-DEFINE_SPINLOCK(mfgpt_lock);
-EXPORT_SYMBOL(mfgpt_lock);
+static DEFINE_RAW_SPINLOCK(mfgpt_lock);
 
 static u32 mfgpt_base;
 
@@ -55,7 +54,7 @@ EXPORT_SYMBOL(enable_mfgpt0_counter);
 static void init_mfgpt_timer(enum clock_event_mode mode,
                             struct clock_event_device *evt)
 {
-       spin_lock(&mfgpt_lock);
+       raw_spin_lock(&mfgpt_lock);
 
        switch (mode) {
        case CLOCK_EVT_MODE_PERIODIC:
@@ -79,7 +78,7 @@ static void init_mfgpt_timer(enum clock_event_mode mode,
                /* Nothing to do here */
                break;
        }
-       spin_unlock(&mfgpt_lock);
+       raw_spin_unlock(&mfgpt_lock);
 }
 
 static struct clock_event_device mfgpt_clockevent = {
@@ -157,7 +156,7 @@ static cycle_t mfgpt_read(struct clocksource *cs)
        static int old_count;
        static u32 old_jifs;
 
-       spin_lock_irqsave(&mfgpt_lock, flags);
+       raw_spin_lock_irqsave(&mfgpt_lock, flags);
        /*
         * Although our caller may have the read side of xtime_lock,
         * this is now a seqlock, and we are cheating in this routine
@@ -191,7 +190,7 @@ static cycle_t mfgpt_read(struct clocksource *cs)
        old_count = count;
        old_jifs = jifs;
 
-       spin_unlock_irqrestore(&mfgpt_lock, flags);
+       raw_spin_unlock_irqrestore(&mfgpt_lock, flags);
 
        return (cycle_t) (jifs * COMPARE) + count;
 }
index 58033c4..b611102 100644 (file)
@@ -273,7 +273,7 @@ void build_clear_page(void)
                uasm_i_ori(&buf, A2, A0, off);
 
        if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
-               uasm_i_lui(&buf, AT, 0xa000);
+               uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
 
        off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
                                * cache_line_size : 0;
@@ -424,7 +424,7 @@ void build_copy_page(void)
                uasm_i_ori(&buf, A2, A0, off);
 
        if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
-               uasm_i_lui(&buf, AT, 0xa000);
+               uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
 
        off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
                                cache_line_size : 0;
index 6d0f4ab..f2364e4 100644 (file)
@@ -27,7 +27,7 @@ unsigned long physical_memsize = 0L;
 fw_memblock_t * __init fw_getmdesc(int eva)
 {
        char *memsize_str, *ememsize_str __maybe_unused = NULL, *ptr;
-       unsigned long memsize, ememsize __maybe_unused = 0;
+       unsigned long memsize = 0, ememsize __maybe_unused = 0;
        static char cmdline[COMMAND_LINE_SIZE] __initdata;
        int tmp;
 
index b128cb9..7f6ce6d 100644 (file)
@@ -53,7 +53,6 @@ static struct resource rc32434_res_pci_mem1 = {
        .start = 0x50000000,
        .end = 0x5FFFFFFF,
        .flags = IORESOURCE_MEM,
-       .parent = &rc32434_res_pci_mem1,
        .sibling = NULL,
        .child = &rc32434_res_pci_mem2
 };
index 4c0cedf..ce4c68a 100644 (file)
@@ -150,7 +150,9 @@ endif
 
 CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell)
 
-KBUILD_CPPFLAGS        += -Iarch/$(ARCH)
+asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
+
+KBUILD_CPPFLAGS        += -Iarch/$(ARCH) $(asinstr)
 KBUILD_AFLAGS  += -Iarch/$(ARCH)
 KBUILD_CFLAGS  += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
 CPP            = $(CC) -E $(KBUILD_CFLAGS)
index 6586a40..cded7c1 100644 (file)
@@ -318,11 +318,16 @@ n:
        addi    reg,reg,(name - 0b)@l;
 
 #ifdef __powerpc64__
+#ifdef HAVE_AS_ATHIGH
+#define __AS_ATHIGH high
+#else
+#define __AS_ATHIGH h
+#endif
 #define LOAD_REG_IMMEDIATE(reg,expr)           \
        lis     reg,(expr)@highest;             \
        ori     reg,reg,(expr)@higher;  \
        rldicr  reg,reg,32,31;          \
-       oris    reg,reg,(expr)@h;               \
+       oris    reg,reg,(expr)@__AS_ATHIGH;     \
        ori     reg,reg,(expr)@l;
 
 #define LOAD_REG_ADDR(reg,name)                        \
index d0e784e..5217903 100644 (file)
@@ -39,6 +39,17 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
                (unsigned long)_stext < end;
 }
 
+static inline int overlaps_kvm_tmp(unsigned long start, unsigned long end)
+{
+#ifdef CONFIG_KVM_GUEST
+       extern char kvm_tmp[];
+       return start < (unsigned long)kvm_tmp &&
+               (unsigned long)&kvm_tmp[1024 * 1024] < end;
+#else
+       return 0;
+#endif
+}
+
 #undef dereference_function_descriptor
 static inline void *dereference_function_descriptor(void *ptr)
 {
index 3ddf702..ea4dc3a 100644 (file)
@@ -361,3 +361,4 @@ SYSCALL(finit_module)
 SYSCALL(ni_syscall) /* sys_kcmp */
 SYSCALL_SPU(sched_setattr)
 SYSCALL_SPU(sched_getattr)
+SYSCALL_SPU(renameat2)
index 4494f02..9b892bb 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define __NR_syscalls          357
+#define __NR_syscalls          358
 
 #define __NR__exit __NR_exit
 #define NR_syscalls    __NR_syscalls
index 881bf2e..2d526f7 100644 (file)
 #define __NR_kcmp              354
 #define __NR_sched_setattr     355
 #define __NR_sched_getattr     356
+#define __NR_renameat2         357
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index 6a01752..dd8695f 100644 (file)
@@ -74,7 +74,7 @@
 #define KVM_INST_MTSRIN                0x7c0001e4
 
 static bool kvm_patching_worked = true;
-static char kvm_tmp[1024 * 1024];
+char kvm_tmp[1024 * 1024];
 static int kvm_tmp_index;
 
 static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
index 59d229a..879b3aa 100644 (file)
@@ -237,7 +237,7 @@ static void wake_offline_cpus(void)
                if (!cpu_online(cpu)) {
                        printk(KERN_INFO "kexec: Waking offline cpu %d.\n",
                               cpu);
-                       cpu_up(cpu);
+                       WARN_ON(cpu_up(cpu));
                }
        }
 }
index 94e597e..7af190a 100644 (file)
@@ -886,7 +886,7 @@ static int kvmppc_book3s_init(void)
        r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
        if (r)
                return r;
-#ifdef CONFIG_KVM_BOOK3S_32
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
        r = kvmppc_book3s_init_pr();
 #endif
        return r;
@@ -895,7 +895,7 @@ static int kvmppc_book3s_init(void)
 
 static void kvmppc_book3s_exit(void)
 {
-#ifdef CONFIG_KVM_BOOK3S_32
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
        kvmppc_book3s_exit_pr();
 #endif
        kvm_exit();
@@ -905,7 +905,7 @@ module_init(kvmppc_book3s_init);
 module_exit(kvmppc_book3s_exit);
 
 /* On 32bit this is our one and only kernel module */
-#ifdef CONFIG_KVM_BOOK3S_32
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
 MODULE_ALIAS_MISCDEV(KVM_MINOR);
 MODULE_ALIAS("devname:kvm");
 #endif
index 1d6c56a..8fcc363 100644 (file)
@@ -234,7 +234,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
                pte_size = psize;
                pte = lookup_linux_pte_and_update(pgdir, hva, writing,
                                                  &pte_size);
-               if (pte_present(pte)) {
+               if (pte_present(pte) && !pte_numa(pte)) {
                        if (writing && !pte_write(pte))
                                /* make the actual HPTE be read-only */
                                ptel = hpte_make_readonly(ptel);
index b031f93..07c8b5b 100644 (file)
@@ -1323,6 +1323,110 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
        mr      r3, r9
        bl      kvmppc_save_fp
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+       b       2f
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
+       /* Turn on TM. */
+       mfmsr   r8
+       li      r0, 1
+       rldimi  r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+       mtmsrd  r8
+
+       ld      r5, VCPU_MSR(r9)
+       rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+       beq     1f      /* TM not active in guest. */
+
+       li      r3, TM_CAUSE_KVM_RESCHED
+
+       /* Clear the MSR RI since r1, r13 are all going to be foobar. */
+       li      r5, 0
+       mtmsrd  r5, 1
+
+       /* All GPRs are volatile at this point. */
+       TRECLAIM(R3)
+
+       /* Temporarily store r13 and r9 so we have some regs to play with */
+       SET_SCRATCH0(r13)
+       GET_PACA(r13)
+       std     r9, PACATMSCRATCH(r13)
+       ld      r9, HSTATE_KVM_VCPU(r13)
+
+       /* Get a few more GPRs free. */
+       std     r29, VCPU_GPRS_TM(29)(r9)
+       std     r30, VCPU_GPRS_TM(30)(r9)
+       std     r31, VCPU_GPRS_TM(31)(r9)
+
+       /* Save away PPR and DSCR soon so don't run with user values. */
+       mfspr   r31, SPRN_PPR
+       HMT_MEDIUM
+       mfspr   r30, SPRN_DSCR
+       ld      r29, HSTATE_DSCR(r13)
+       mtspr   SPRN_DSCR, r29
+
+       /* Save all but r9, r13 & r29-r31 */
+       reg = 0
+       .rept   29
+       .if (reg != 9) && (reg != 13)
+       std     reg, VCPU_GPRS_TM(reg)(r9)
+       .endif
+       reg = reg + 1
+       .endr
+       /* ... now save r13 */
+       GET_SCRATCH0(r4)
+       std     r4, VCPU_GPRS_TM(13)(r9)
+       /* ... and save r9 */
+       ld      r4, PACATMSCRATCH(r13)
+       std     r4, VCPU_GPRS_TM(9)(r9)
+
+       /* Reload stack pointer and TOC. */
+       ld      r1, HSTATE_HOST_R1(r13)
+       ld      r2, PACATOC(r13)
+
+       /* Set MSR RI now we have r1 and r13 back. */
+       li      r5, MSR_RI
+       mtmsrd  r5, 1
+
+       /* Save away checkpinted SPRs. */
+       std     r31, VCPU_PPR_TM(r9)
+       std     r30, VCPU_DSCR_TM(r9)
+       mflr    r5
+       mfcr    r6
+       mfctr   r7
+       mfspr   r8, SPRN_AMR
+       mfspr   r10, SPRN_TAR
+       std     r5, VCPU_LR_TM(r9)
+       stw     r6, VCPU_CR_TM(r9)
+       std     r7, VCPU_CTR_TM(r9)
+       std     r8, VCPU_AMR_TM(r9)
+       std     r10, VCPU_TAR_TM(r9)
+
+       /* Restore r12 as trap number. */
+       lwz     r12, VCPU_TRAP(r9)
+
+       /* Save FP/VSX. */
+       addi    r3, r9, VCPU_FPRS_TM
+       bl      .store_fp_state
+       addi    r3, r9, VCPU_VRS_TM
+       bl      .store_vr_state
+       mfspr   r6, SPRN_VRSAVE
+       stw     r6, VCPU_VRSAVE_TM(r9)
+1:
+       /*
+        * We need to save these SPRs after the treclaim so that the software
+        * error code is recorded correctly in the TEXASR.  Also the user may
+        * change these outside of a transaction, so they must always be
+        * context switched.
+        */
+       mfspr   r5, SPRN_TFHAR
+       mfspr   r6, SPRN_TFIAR
+       mfspr   r7, SPRN_TEXASR
+       std     r5, VCPU_TFHAR(r9)
+       std     r6, VCPU_TFIAR(r9)
+       std     r7, VCPU_TEXASR(r9)
+2:
+#endif
+
        /* Increment yield count if they have a VPA */
        ld      r8, VCPU_VPA(r9)        /* do they have a VPA? */
        cmpdi   r8, 0
index c5c052a..02f1def 100644 (file)
@@ -1153,7 +1153,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
                goto free_vcpu;
        vcpu->arch.book3s = vcpu_book3s;
 
-#ifdef CONFIG_KVM_BOOK3S_32
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
        vcpu->arch.shadow_vcpu =
                kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
        if (!vcpu->arch.shadow_vcpu)
@@ -1198,7 +1198,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
 uninit_vcpu:
        kvm_vcpu_uninit(vcpu);
 free_shadow_vcpu:
-#ifdef CONFIG_KVM_BOOK3S_32
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
        kfree(vcpu->arch.shadow_vcpu);
 free_vcpu3s:
 #endif
@@ -1215,7 +1215,7 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
 
        free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
        kvm_vcpu_uninit(vcpu);
-#ifdef CONFIG_KVM_BOOK3S_32
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
        kfree(vcpu->arch.shadow_vcpu);
 #endif
        vfree(vcpu_book3s);
index d766d6e..06ba83b 100644 (file)
@@ -207,6 +207,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
                if (overlaps_kernel_text(vaddr, vaddr + step))
                        tprot &= ~HPTE_R_N;
 
+               /* Make kvm guest trampolines executable */
+               if (overlaps_kvm_tmp(vaddr, vaddr + step))
+                       tprot &= ~HPTE_R_N;
+
                /*
                 * If relocatable, check if it overlaps interrupt vectors that
                 * are copied down to real 0. For relocatable kernel
index b3ecb8f..9ae6664 100644 (file)
@@ -158,6 +158,7 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_ONE_REG:
        case KVM_CAP_ENABLE_CAP:
        case KVM_CAP_S390_CSS_SUPPORT:
+       case KVM_CAP_IRQFD:
        case KVM_CAP_IOEVENTFD:
        case KVM_CAP_DEVICE_CTRL:
        case KVM_CAP_ENABLE_CAP_VM:
index 8de6d9c..6782051 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef _ASM_X86_PAGE_64_DEFS_H
 #define _ASM_X86_PAGE_64_DEFS_H
 
-#define THREAD_SIZE_ORDER      1
+#define THREAD_SIZE_ORDER      2
 #define THREAD_SIZE  (PAGE_SIZE << THREAD_SIZE_ORDER)
 #define CURRENT_MASK (~(THREAD_SIZE - 1))
 
index 33e8c02..138ceff 100644 (file)
@@ -7778,7 +7778,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
        exec_control = vmcs12->pin_based_vm_exec_control;
        exec_control |= vmcs_config.pin_based_exec_ctrl;
-       exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+       exec_control &= ~(PIN_BASED_VMX_PREEMPTION_TIMER |
+                          PIN_BASED_POSTED_INTR);
        vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
 
        vmx->nested.preemption_timer_expired = false;
@@ -7815,7 +7816,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
                if (!vmx->rdtscp_enabled)
                        exec_control &= ~SECONDARY_EXEC_RDTSCP;
                /* Take the following fields only from vmcs12 */
-               exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+               exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+                                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
+                                  SECONDARY_EXEC_APIC_REGISTER_VIRT);
                if (nested_cpu_has(vmcs12,
                                CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
                        exec_control |= vmcs12->secondary_vm_exec_control;
index b6c0bac..20316c6 100644 (file)
@@ -106,6 +106,8 @@ EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
 static u32 tsc_tolerance_ppm = 250;
 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
 
+static bool backwards_tsc_observed = false;
+
 #define KVM_NR_SHARED_MSRS 16
 
 struct kvm_shared_msrs_global {
@@ -1486,7 +1488,8 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
                                        &ka->master_kernel_ns,
                                        &ka->master_cycle_now);
 
-       ka->use_master_clock = host_tsc_clocksource & vcpus_matched;
+       ka->use_master_clock = host_tsc_clocksource && vcpus_matched
+                               && !backwards_tsc_observed;
 
        if (ka->use_master_clock)
                atomic_set(&kvm_guest_has_master_clock, 1);
@@ -6945,6 +6948,7 @@ int kvm_arch_hardware_enable(void *garbage)
         */
        if (backwards_tsc) {
                u64 delta_cyc = max_tsc - local_tsc;
+               backwards_tsc_observed = true;
                list_for_each_entry(kvm, &vm_list, vm_list) {
                        kvm_for_each_vcpu(i, vcpu, kvm) {
                                vcpu->arch.tsc_offset_adjustment += delta_cyc;
index 37d7302..f148a05 100644 (file)
@@ -156,12 +156,13 @@ static int power_saving_thread(void *data)
 
        while (!kthread_should_stop()) {
                int cpu;
-               u64 expire_time;
+               unsigned long expire_time;
 
                try_to_freeze();
 
                /* round robin to cpus */
-               if (last_jiffies + round_robin_time * HZ < jiffies) {
+               expire_time = last_jiffies + round_robin_time * HZ;
+               if (time_before(expire_time, jiffies)) {
                        last_jiffies = jiffies;
                        round_robin_cpu(tsk_index);
                }
@@ -200,7 +201,7 @@ static int power_saving_thread(void *data)
                                        CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
                        local_irq_enable();
 
-                       if (jiffies > expire_time) {
+                       if (time_before(expire_time, jiffies)) {
                                do_sleep = 1;
                                break;
                        }
@@ -215,8 +216,15 @@ static int power_saving_thread(void *data)
                 * borrow CPU time from this CPU and cause RT task use > 95%
                 * CPU time. To make 'avoid starvation' work, takes a nap here.
                 */
-               if (do_sleep)
+               if (unlikely(do_sleep))
                        schedule_timeout_killable(HZ * idle_pct / 100);
+
+               /* If an external event has set the need_resched flag, then
+                * we need to deal with it, or this loop will continue to
+                * spin without calling __mwait().
+                */
+               if (unlikely(need_resched()))
+                       schedule();
        }
 
        exit_round_robin(tsk_index);
index 52c81c4..1c08574 100644 (file)
@@ -268,7 +268,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
        pr->apic_id = apic_id;
 
        cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
-       if (!cpu0_initialized) {
+       if (!cpu0_initialized && !acpi_lapic) {
                cpu0_initialized = 1;
                /* Handle UP system running SMP kernel, with no LAPIC in MADT */
                if ((cpu_index == -1) && (num_online_cpus() == 1))
index 8445d57..4ce0ea1 100644 (file)
@@ -52,6 +52,12 @@ struct proc_dir_entry *acpi_root_dir;
 EXPORT_SYMBOL(acpi_root_dir);
 
 #ifdef CONFIG_X86
+#ifdef CONFIG_ACPI_CUSTOM_DSDT
+static inline int set_copy_dsdt(const struct dmi_system_id *id)
+{
+       return 0;
+}
+#else
 static int set_copy_dsdt(const struct dmi_system_id *id)
 {
        printk(KERN_NOTICE "%s detected - "
@@ -59,6 +65,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
        acpi_gbl_copy_dsdt_locally = 1;
        return 0;
 }
+#endif
 
 static struct dmi_system_id dsdt_dmi_table[] __initdata = {
        /*
@@ -360,16 +367,18 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
 {
        struct acpi_device *adev;
        struct acpi_driver *driver;
-       acpi_status status;
        u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
+       bool hotplug_event = false;
 
        switch (type) {
        case ACPI_NOTIFY_BUS_CHECK:
                acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
+               hotplug_event = true;
                break;
 
        case ACPI_NOTIFY_DEVICE_CHECK:
                acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
+               hotplug_event = true;
                break;
 
        case ACPI_NOTIFY_DEVICE_WAKE:
@@ -378,6 +387,7 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
 
        case ACPI_NOTIFY_EJECT_REQUEST:
                acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
+               hotplug_event = true;
                break;
 
        case ACPI_NOTIFY_DEVICE_CHECK_LIGHT:
@@ -413,16 +423,9 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
            (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
                driver->ops.notify(adev, type);
 
-       switch (type) {
-       case ACPI_NOTIFY_BUS_CHECK:
-       case ACPI_NOTIFY_DEVICE_CHECK:
-       case ACPI_NOTIFY_EJECT_REQUEST:
-               status = acpi_hotplug_schedule(adev, type);
-               if (ACPI_SUCCESS(status))
-                       return;
-       default:
-               break;
-       }
+       if (hotplug_event && ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
+               return;
+
        acpi_bus_put_acpi_device(adev);
        return;
 
index 7f70f31..4fcbd67 100644 (file)
@@ -121,6 +121,13 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
        struct acpi_processor *pr = per_cpu(processors, cpu);
        struct acpi_device *device;
 
+       /*
+        * CPU_STARTING and CPU_DYING must not sleep. Return here since
+        * acpi_bus_get_device() may sleep.
+        */
+       if (action == CPU_STARTING || action == CPU_DYING)
+               return NOTIFY_DONE;
+
        if (!pr || acpi_bus_get_device(pr->handle, &device))
                return NOTIFY_DONE;
 
index 7efe546..db5fc6f 100644 (file)
@@ -2259,12 +2259,16 @@ int __init acpi_scan_init(void)
        if (result)
                goto out;
 
-       result = acpi_bus_scan_fixed();
-       if (result) {
-               acpi_detach_data(acpi_root->handle, acpi_scan_drop_device);
-               acpi_device_del(acpi_root);
-               put_device(&acpi_root->dev);
-               goto out;
+       /* Fixed feature devices do not exist on HW-reduced platform */
+       if (!acpi_gbl_reduced_hardware) {
+               result = acpi_bus_scan_fixed();
+               if (result) {
+                       acpi_detach_data(acpi_root->handle,
+                                        acpi_scan_drop_device);
+                       acpi_device_del(acpi_root);
+                       put_device(&acpi_root->dev);
+                       goto out;
+               }
        }
 
        acpi_update_all_gpes();
index c1e31a4..25bbc55 100644 (file)
@@ -1278,8 +1278,8 @@ static int __init acpi_thermal_init(void)
 
 static void __exit acpi_thermal_exit(void)
 {
-       destroy_workqueue(acpi_thermal_pm_queue);
        acpi_bus_unregister_driver(&acpi_thermal_driver);
+       destroy_workqueue(acpi_thermal_pm_queue);
 
        return;
 }
index bba5261..07c8c5a 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/types.h>
 #include <linux/hardirq.h>
 #include <linux/acpi.h>
+#include <linux/dynamic_debug.h>
 
 #include "internal.h"
 
@@ -456,6 +457,24 @@ acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code,
 }
 EXPORT_SYMBOL(acpi_evaluate_ost);
 
+/**
+ * acpi_handle_path: Return the object path of handle
+ *
+ * Caller must free the returned buffer
+ */
+static char *acpi_handle_path(acpi_handle handle)
+{
+       struct acpi_buffer buffer = {
+               .length = ACPI_ALLOCATE_BUFFER,
+               .pointer = NULL
+       };
+
+       if (in_interrupt() ||
+           acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer) != AE_OK)
+               return NULL;
+       return buffer.pointer;
+}
+
 /**
  * acpi_handle_printk: Print message with ACPI prefix and object path
  *
@@ -469,29 +488,50 @@ acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...)
 {
        struct va_format vaf;
        va_list args;
-       struct acpi_buffer buffer = {
-               .length = ACPI_ALLOCATE_BUFFER,
-               .pointer = NULL
-       };
        const char *path;
 
        va_start(args, fmt);
        vaf.fmt = fmt;
        vaf.va = &args;
 
-       if (in_interrupt() ||
-           acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer) != AE_OK)
-               path = "<n/a>";
-       else
-               path = buffer.pointer;
-
-       printk("%sACPI: %s: %pV", level, path, &vaf);
+       path = acpi_handle_path(handle);
+       printk("%sACPI: %s: %pV", level, path ? path : "<n/a>" , &vaf);
 
        va_end(args);
-       kfree(buffer.pointer);
+       kfree(path);
 }
 EXPORT_SYMBOL(acpi_handle_printk);
 
+#if defined(CONFIG_DYNAMIC_DEBUG)
+/**
+ * __acpi_handle_debug: pr_debug with ACPI prefix and object path
+ *
+ * This function is called through acpi_handle_debug macro and debug
+ * prints a message with ACPI prefix and object path. This function
+ * acquires the global namespace mutex to obtain an object path.  In
+ * interrupt context, it shows the object path as <n/a>.
+ */
+void
+__acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle,
+                   const char *fmt, ...)
+{
+       struct va_format vaf;
+       va_list args;
+       const char *path;
+
+       va_start(args, fmt);
+       vaf.fmt = fmt;
+       vaf.va = &args;
+
+       path = acpi_handle_path(handle);
+       __dynamic_pr_debug(descriptor, "ACPI: %s: %pV", path ? path : "<n/a>", &vaf);
+
+       va_end(args);
+       kfree(path);
+}
+EXPORT_SYMBOL(__acpi_handle_debug);
+#endif
+
 /**
  * acpi_has_method: Check whether @handle has a method named @name
  * @handle: ACPI device handle
index 6d8a87f..cb9b1f8 100644 (file)
@@ -144,11 +144,11 @@ static void virtblk_done(struct virtqueue *vq)
                if (unlikely(virtqueue_is_broken(vq)))
                        break;
        } while (!virtqueue_enable_cb(vq));
-       spin_unlock_irqrestore(&vblk->vq_lock, flags);
 
        /* In case queue is stopped waiting for more buffers. */
        if (req_done)
                blk_mq_start_stopped_hw_queues(vblk->disk->queue);
+       spin_unlock_irqrestore(&vblk->vq_lock, flags);
 }
 
 static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
@@ -202,8 +202,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
        err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num);
        if (err) {
                virtqueue_kick(vblk->vq);
-               spin_unlock_irqrestore(&vblk->vq_lock, flags);
                blk_mq_stop_hw_queue(hctx);
+               spin_unlock_irqrestore(&vblk->vq_lock, flags);
                /* Out of mem doesn't actually happen, since we fall back
                 * to direct descriptors */
                if (err == -ENOMEM || err == -ENOSPC)
index 4637697..3fbee45 100644 (file)
@@ -147,7 +147,7 @@ static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
 static int _round_up_table(const struct clk_div_table *table, int div)
 {
        const struct clk_div_table *clkt;
-       int up = _get_table_maxdiv(table);
+       int up = INT_MAX;
 
        for (clkt = table; clkt->div; clkt++) {
                if (clkt->div == div)
index bca0a0b..a886702 100644 (file)
@@ -521,8 +521,10 @@ static struct clk * __init clkgen_odf_register(const char *parent_name,
        gate->lock = odf_lock;
 
        div = kzalloc(sizeof(*div), GFP_KERNEL);
-       if (!div)
+       if (!div) {
+               kfree(gate);
                return ERR_PTR(-ENOMEM);
+       }
 
        div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
        div->reg = reg + pll_data->odf[odf].offset;
index e1769ad..6aad8ab 100644 (file)
@@ -58,9 +58,9 @@
 #define PLLDU_LFCON_SET_DIVN 600
 
 #define PLLE_BASE_DIVCML_SHIFT 24
-#define PLLE_BASE_DIVCML_WIDTH 4
+#define PLLE_BASE_DIVCML_MASK 0xf
 #define PLLE_BASE_DIVP_SHIFT 16
-#define PLLE_BASE_DIVP_WIDTH 7
+#define PLLE_BASE_DIVP_WIDTH 6
 #define PLLE_BASE_DIVN_SHIFT 8
 #define PLLE_BASE_DIVN_WIDTH 8
 #define PLLE_BASE_DIVM_SHIFT 0
 #define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\
                      mask(p->params->div_nmp->divp_width))
 
+#define divm_shift(p) (p)->params->div_nmp->divm_shift
+#define divn_shift(p) (p)->params->div_nmp->divn_shift
+#define divp_shift(p) (p)->params->div_nmp->divp_shift
+
+#define divm_mask_shifted(p) (divm_mask(p) << divm_shift(p))
+#define divn_mask_shifted(p) (divn_mask(p) << divn_shift(p))
+#define divp_mask_shifted(p) (divp_mask(p) << divp_shift(p))
+
 #define divm_max(p) (divm_mask(p))
 #define divn_max(p) (divn_mask(p))
 #define divp_max(p) (1 << (divp_mask(p)))
@@ -476,13 +484,12 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll,
        } else {
                val = pll_readl_base(pll);
 
-               val &= ~((divm_mask(pll) << div_nmp->divm_shift) |
-                (divn_mask(pll) << div_nmp->divn_shift) |
-                (divp_mask(pll) << div_nmp->divp_shift));
+               val &= ~(divm_mask_shifted(pll) | divn_mask_shifted(pll) |
+                        divp_mask_shifted(pll));
 
-               val |= ((cfg->m << div_nmp->divm_shift) |
-                       (cfg->n << div_nmp->divn_shift) |
-                       (cfg->p << div_nmp->divp_shift));
+               val |= (cfg->m << divm_shift(pll)) |
+                      (cfg->n << divn_shift(pll)) |
+                      (cfg->p << divp_shift(pll));
 
                pll_writel_base(val, pll);
        }
@@ -730,11 +737,12 @@ static int clk_plle_enable(struct clk_hw *hw)
        if (pll->params->flags & TEGRA_PLLE_CONFIGURE) {
                /* configure dividers */
                val = pll_readl_base(pll);
-               val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll));
-               val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT);
-               val |= sel.m << pll->params->div_nmp->divm_shift;
-               val |= sel.n << pll->params->div_nmp->divn_shift;
-               val |= sel.p << pll->params->div_nmp->divp_shift;
+               val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) |
+                        divm_mask_shifted(pll));
+               val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT);
+               val |= sel.m << divm_shift(pll);
+               val |= sel.n << divn_shift(pll);
+               val |= sel.p << divp_shift(pll);
                val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
                pll_writel_base(val, pll);
        }
@@ -745,10 +753,11 @@ static int clk_plle_enable(struct clk_hw *hw)
        pll_writel_misc(val, pll);
 
        val = readl(pll->clk_base + PLLE_SS_CTRL);
+       val &= ~PLLE_SS_COEFFICIENTS_MASK;
        val |= PLLE_SS_DISABLE;
        writel(val, pll->clk_base + PLLE_SS_CTRL);
 
-       val |= pll_readl_base(pll);
+       val = pll_readl_base(pll);
        val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE);
        pll_writel_base(val, pll);
 
@@ -1292,10 +1301,11 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
        pll_writel(val, PLLE_SS_CTRL, pll);
 
        val = pll_readl_base(pll);
-       val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll));
-       val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT);
-       val |= sel.m << pll->params->div_nmp->divm_shift;
-       val |= sel.n << pll->params->div_nmp->divn_shift;
+       val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) |
+                divm_mask_shifted(pll));
+       val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT);
+       val |= sel.m << divm_shift(pll);
+       val |= sel.n << divn_shift(pll);
        val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
        pll_writel_base(val, pll);
        udelay(1);
@@ -1410,6 +1420,15 @@ struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
        return clk;
 }
 
+static struct div_nmp pll_e_nmp = {
+       .divn_shift = PLLE_BASE_DIVN_SHIFT,
+       .divn_width = PLLE_BASE_DIVN_WIDTH,
+       .divm_shift = PLLE_BASE_DIVM_SHIFT,
+       .divm_width = PLLE_BASE_DIVM_WIDTH,
+       .divp_shift = PLLE_BASE_DIVP_SHIFT,
+       .divp_width = PLLE_BASE_DIVP_WIDTH,
+};
+
 struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
                void __iomem *clk_base, void __iomem *pmc,
                unsigned long flags, struct tegra_clk_pll_params *pll_params,
@@ -1420,6 +1439,10 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
 
        pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS;
        pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
+
+       if (!pll_params->div_nmp)
+               pll_params->div_nmp = &pll_e_nmp;
+
        pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
        if (IS_ERR(pll))
                return ERR_CAST(pll);
@@ -1557,9 +1580,8 @@ struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name,
                int m;
 
                m = _pll_fixed_mdiv(pll_params, parent_rate);
-               val = m << PLL_BASE_DIVM_SHIFT;
-               val |= (pll_params->vco_min / parent_rate)
-                               << PLL_BASE_DIVN_SHIFT;
+               val = m << divm_shift(pll);
+               val |= (pll_params->vco_min / parent_rate) << divn_shift(pll);
                pll_writel_base(val, pll);
        }
 
index 00fdd11..a8d7ea1 100644 (file)
@@ -100,7 +100,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
                        || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
                __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
                __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
-               clk_disable_unprepare(tcd->clk);
+               clk_disable(tcd->clk);
        }
 
        switch (m) {
@@ -109,7 +109,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
         * of oneshot, we get lower overhead and improved accuracy.
         */
        case CLOCK_EVT_MODE_PERIODIC:
-               clk_prepare_enable(tcd->clk);
+               clk_enable(tcd->clk);
 
                /* slow clock, count up to RC, then irq and restart */
                __raw_writel(timer_clock
@@ -126,7 +126,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
                break;
 
        case CLOCK_EVT_MODE_ONESHOT:
-               clk_prepare_enable(tcd->clk);
+               clk_enable(tcd->clk);
 
                /* slow clock, count up to RC, then irq and stop */
                __raw_writel(timer_clock | ATMEL_TC_CPCSTOP
@@ -194,7 +194,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
        ret = clk_prepare_enable(t2_clk);
        if (ret)
                return ret;
-       clk_disable_unprepare(t2_clk);
+       clk_disable(t2_clk);
 
        clkevt.regs = tc->regs;
        clkevt.clk = t2_clk;
index b52e1c0..7f5374d 100644 (file)
@@ -199,7 +199,7 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
 
        action->dev_id = ce;
        BUG_ON(setup_irq(ce->irq, action));
-       irq_set_affinity(action->irq, cpumask_of(cpu));
+       irq_force_affinity(action->irq, cpumask_of(cpu));
 
        clockevents_register_device(ce);
        return 0;
index 1bf6bba..09b9129 100644 (file)
@@ -130,7 +130,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
                return -ENOENT;
        }
 
-       cpu_reg = devm_regulator_get_optional(cpu_dev, "cpu0");
+       cpu_reg = regulator_get_optional(cpu_dev, "cpu0");
        if (IS_ERR(cpu_reg)) {
                /*
                 * If cpu0 regulator supply node is present, but regulator is
@@ -145,23 +145,23 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
                        PTR_ERR(cpu_reg));
        }
 
-       cpu_clk = devm_clk_get(cpu_dev, NULL);
+       cpu_clk = clk_get(cpu_dev, NULL);
        if (IS_ERR(cpu_clk)) {
                ret = PTR_ERR(cpu_clk);
                pr_err("failed to get cpu0 clock: %d\n", ret);
-               goto out_put_node;
+               goto out_put_reg;
        }
 
        ret = of_init_opp_table(cpu_dev);
        if (ret) {
                pr_err("failed to init OPP table: %d\n", ret);
-               goto out_put_node;
+               goto out_put_clk;
        }
 
        ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
        if (ret) {
                pr_err("failed to init cpufreq table: %d\n", ret);
-               goto out_put_node;
+               goto out_put_clk;
        }
 
        of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
@@ -216,6 +216,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
 
 out_free_table:
        dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_put_clk:
+       if (!IS_ERR(cpu_clk))
+               clk_put(cpu_clk);
+out_put_reg:
+       if (!IS_ERR(cpu_reg))
+               regulator_put(cpu_reg);
 out_put_node:
        of_node_put(np);
        return ret;
index ba43991..e1c6433 100644 (file)
@@ -366,6 +366,11 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                break;
 
        case CPUFREQ_GOV_LIMITS:
+               mutex_lock(&dbs_data->mutex);
+               if (!cpu_cdbs->cur_policy) {
+                       mutex_unlock(&dbs_data->mutex);
+                       break;
+               }
                mutex_lock(&cpu_cdbs->timer_mutex);
                if (policy->max < cpu_cdbs->cur_policy->cur)
                        __cpufreq_driver_target(cpu_cdbs->cur_policy,
@@ -375,6 +380,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                                        policy->min, CPUFREQ_RELATION_L);
                dbs_check_cpu(dbs_data, cpu);
                mutex_unlock(&cpu_cdbs->timer_mutex);
+               mutex_unlock(&dbs_data->mutex);
                break;
        }
        return 0;
index eab8ccf..db2e45b 100644 (file)
 #define BYT_TURBO_VIDS         0x66d
 
 
-#define FRAC_BITS 6
+#define FRAC_BITS 8
 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
 #define fp_toint(X) ((X) >> FRAC_BITS)
-#define FP_ROUNDUP(X) ((X) += 1 << FRAC_BITS)
+
 
 static inline int32_t mul_fp(int32_t x, int32_t y)
 {
@@ -59,8 +59,8 @@ struct sample {
        int32_t core_pct_busy;
        u64 aperf;
        u64 mperf;
-       unsigned long long tsc;
        int freq;
+       ktime_t time;
 };
 
 struct pstate_data {
@@ -98,9 +98,9 @@ struct cpudata {
        struct vid_data vid;
        struct _pid pid;
 
+       ktime_t last_sample_time;
        u64     prev_aperf;
        u64     prev_mperf;
-       unsigned long long prev_tsc;
        struct sample sample;
 };
 
@@ -200,7 +200,10 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
        pid->last_err = fp_error;
 
        result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
-
+       if (result >= 0)
+               result = result + (1 << (FRAC_BITS-1));
+       else
+               result = result - (1 << (FRAC_BITS-1));
        return (signed int)fp_toint(result);
 }
 
@@ -560,47 +563,42 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
 static inline void intel_pstate_calc_busy(struct cpudata *cpu,
                                        struct sample *sample)
 {
-       int32_t core_pct;
-       int32_t c0_pct;
+       int64_t core_pct;
+       int32_t rem;
 
-       core_pct = div_fp(int_tofp((sample->aperf)),
-                       int_tofp((sample->mperf)));
-       core_pct = mul_fp(core_pct, int_tofp(100));
-       FP_ROUNDUP(core_pct);
+       core_pct = int_tofp(sample->aperf) * int_tofp(100);
+       core_pct = div_u64_rem(core_pct, int_tofp(sample->mperf), &rem);
 
-       c0_pct = div_fp(int_tofp(sample->mperf), int_tofp(sample->tsc));
+       if ((rem << 1) >= int_tofp(sample->mperf))
+               core_pct += 1;
 
        sample->freq = fp_toint(
                mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct));
 
-       sample->core_pct_busy = mul_fp(core_pct, c0_pct);
+       sample->core_pct_busy = (int32_t)core_pct;
 }
 
 static inline void intel_pstate_sample(struct cpudata *cpu)
 {
        u64 aperf, mperf;
-       unsigned long long tsc;
 
        rdmsrl(MSR_IA32_APERF, aperf);
        rdmsrl(MSR_IA32_MPERF, mperf);
-       tsc = native_read_tsc();
 
        aperf = aperf >> FRAC_BITS;
        mperf = mperf >> FRAC_BITS;
-       tsc = tsc >> FRAC_BITS;
 
+       cpu->last_sample_time = cpu->sample.time;
+       cpu->sample.time = ktime_get();
        cpu->sample.aperf = aperf;
        cpu->sample.mperf = mperf;
-       cpu->sample.tsc = tsc;
        cpu->sample.aperf -= cpu->prev_aperf;
        cpu->sample.mperf -= cpu->prev_mperf;
-       cpu->sample.tsc -= cpu->prev_tsc;
 
        intel_pstate_calc_busy(cpu, &cpu->sample);
 
        cpu->prev_aperf = aperf;
        cpu->prev_mperf = mperf;
-       cpu->prev_tsc = tsc;
 }
 
 static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
@@ -614,13 +612,25 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
 
 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
 {
-       int32_t core_busy, max_pstate, current_pstate;
+       int32_t core_busy, max_pstate, current_pstate, sample_ratio;
+       u32 duration_us;
+       u32 sample_time;
 
        core_busy = cpu->sample.core_pct_busy;
        max_pstate = int_tofp(cpu->pstate.max_pstate);
        current_pstate = int_tofp(cpu->pstate.current_pstate);
        core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
-       return FP_ROUNDUP(core_busy);
+
+       sample_time = (pid_params.sample_rate_ms  * USEC_PER_MSEC);
+       duration_us = (u32) ktime_us_delta(cpu->sample.time,
+                                       cpu->last_sample_time);
+       if (duration_us > sample_time * 3) {
+               sample_ratio = div_fp(int_tofp(sample_time),
+                               int_tofp(duration_us));
+               core_busy = mul_fp(core_busy, sample_ratio);
+       }
+
+       return core_busy;
 }
 
 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
index cfdbb92..7a74076 100644 (file)
@@ -1548,11 +1548,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
        /* Disable BLOCK interrupts as well */
        channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
 
-       err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
-                              IRQF_SHARED, "dw_dmac", dw);
-       if (err)
-               return err;
-
        /* Create a pool of consistent memory blocks for hardware descriptors */
        dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
                                         sizeof(struct dw_desc), 4, 0);
@@ -1563,6 +1558,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
 
        tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
 
+       err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
+                         "dw_dmac", dw);
+       if (err)
+               return err;
+
        INIT_LIST_HEAD(&dw->dma.channels);
        for (i = 0; i < nr_channels; i++) {
                struct dw_dma_chan      *dwc = &dw->chan[i];
@@ -1667,6 +1667,7 @@ int dw_dma_remove(struct dw_dma_chip *chip)
        dw_dma_off(dw);
        dma_async_device_unregister(&dw->dma);
 
+       free_irq(chip->irq, dw);
        tasklet_kill(&dw->tasklet);
 
        list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
index ab26d46..5ebdfbc 100644 (file)
@@ -113,11 +113,9 @@ struct sa11x0_dma_phy {
        struct sa11x0_dma_desc  *txd_load;
        unsigned                sg_done;
        struct sa11x0_dma_desc  *txd_done;
-#ifdef CONFIG_PM_SLEEP
        u32                     dbs[2];
        u32                     dbt[2];
        u32                     dcsr;
-#endif
 };
 
 struct sa11x0_dma_dev {
@@ -984,7 +982,6 @@ static int sa11x0_dma_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
 static int sa11x0_dma_suspend(struct device *dev)
 {
        struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
@@ -1054,7 +1051,6 @@ static int sa11x0_dma_resume(struct device *dev)
 
        return 0;
 }
-#endif
 
 static const struct dev_pm_ops sa11x0_dma_pm_ops = {
        .suspend_noirq = sa11x0_dma_suspend,
index c98764a..f477308 100644 (file)
@@ -237,8 +237,8 @@ static inline bool is_next_generation(int new_generation, int old_generation)
 
 #define LOCAL_BUS 0xffc0
 
-/* arbitrarily chosen maximum range for physical DMA: 128 TB */
-#define FW_MAX_PHYSICAL_RANGE          (128ULL << 40)
+/* OHCI-1394's default upper bound for physical DMA: 4 GB */
+#define FW_MAX_PHYSICAL_RANGE          (1ULL << 32)
 
 void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
 void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
index 8db6632..586f2f7 100644 (file)
@@ -3716,7 +3716,7 @@ static int pci_probe(struct pci_dev *dev,
                    version >> 16, version & 0xff, ohci->card.index,
                    ohci->n_ir, ohci->n_it, ohci->quirks,
                    reg_read(ohci, OHCI1394_PhyUpperBound) ?
-                       ", >4 GB phys DMA" : "");
+                       ", physUB" : "");
 
        return 0;
 
index 96177ee..eedb023 100644 (file)
@@ -1833,7 +1833,6 @@ int i915_driver_unload(struct drm_device *dev)
                flush_workqueue(dev_priv->wq);
 
                mutex_lock(&dev->struct_mutex);
-               i915_gem_free_all_phys_object(dev);
                i915_gem_cleanup_ringbuffer(dev);
                i915_gem_context_fini(dev);
                WARN_ON(dev_priv->mm.aliasing_ppgtt);
index 108e1ec..388c028 100644 (file)
@@ -242,18 +242,6 @@ struct intel_ddi_plls {
 #define WATCH_LISTS    0
 #define WATCH_GTT      0
 
-#define I915_GEM_PHYS_CURSOR_0 1
-#define I915_GEM_PHYS_CURSOR_1 2
-#define I915_GEM_PHYS_OVERLAY_REGS 3
-#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
-
-struct drm_i915_gem_phys_object {
-       int id;
-       struct page **page_list;
-       drm_dma_handle_t *handle;
-       struct drm_i915_gem_object *cur_obj;
-};
-
 struct opregion_header;
 struct opregion_acpi;
 struct opregion_swsci;
@@ -1187,9 +1175,6 @@ struct i915_gem_mm {
        /** Bit 6 swizzling required for Y tiling */
        uint32_t bit_6_swizzle_y;
 
-       /* storage for physical objects */
-       struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
-
        /* accounting, useful for userland debugging */
        spinlock_t object_stat_lock;
        size_t object_memory;
@@ -1769,7 +1754,7 @@ struct drm_i915_gem_object {
        struct drm_file *pin_filp;
 
        /** for phy allocated objects */
-       struct drm_i915_gem_phys_object *phys_obj;
+       drm_dma_handle_t *phys_handle;
 };
 
 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
@@ -2204,10 +2189,12 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
 #define PIN_MAPPABLE 0x1
 #define PIN_NONBLOCK 0x2
 #define PIN_GLOBAL 0x4
+#define PIN_OFFSET_BIAS 0x8
+#define PIN_OFFSET_MASK (~4095)
 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
                                     struct i915_address_space *vm,
                                     uint32_t alignment,
-                                    unsigned flags);
+                                    uint64_t flags);
 int __must_check i915_vma_unbind(struct i915_vma *vma);
 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
@@ -2334,13 +2321,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                                     u32 alignment,
                                     struct intel_ring_buffer *pipelined);
 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
-int i915_gem_attach_phys_object(struct drm_device *dev,
-                               struct drm_i915_gem_object *obj,
-                               int id,
+int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
                                int align);
-void i915_gem_detach_phys_object(struct drm_device *dev,
-                                struct drm_i915_gem_object *obj);
-void i915_gem_free_all_phys_object(struct drm_device *dev);
 int i915_gem_open(struct drm_device *dev, struct drm_file *file);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
@@ -2465,6 +2447,8 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
                                          int min_size,
                                          unsigned alignment,
                                          unsigned cache_level,
+                                         unsigned long start,
+                                         unsigned long end,
                                          unsigned flags);
 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
 int i915_gem_evict_everything(struct drm_device *dev);
index 2871ce7..3326770 100644 (file)
@@ -43,10 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
 static __must_check int
 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
                               bool readonly);
-static int i915_gem_phys_pwrite(struct drm_device *dev,
-                               struct drm_i915_gem_object *obj,
-                               struct drm_i915_gem_pwrite *args,
-                               struct drm_file *file);
 
 static void i915_gem_write_fence(struct drm_device *dev, int reg,
                                 struct drm_i915_gem_object *obj);
@@ -209,6 +205,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
+static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
+{
+       drm_dma_handle_t *phys = obj->phys_handle;
+
+       if (!phys)
+               return;
+
+       if (obj->madv == I915_MADV_WILLNEED) {
+               struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
+               char *vaddr = phys->vaddr;
+               int i;
+
+               for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+                       struct page *page = shmem_read_mapping_page(mapping, i);
+                       if (!IS_ERR(page)) {
+                               char *dst = kmap_atomic(page);
+                               memcpy(dst, vaddr, PAGE_SIZE);
+                               drm_clflush_virt_range(dst, PAGE_SIZE);
+                               kunmap_atomic(dst);
+
+                               set_page_dirty(page);
+                               mark_page_accessed(page);
+                               page_cache_release(page);
+                       }
+                       vaddr += PAGE_SIZE;
+               }
+               i915_gem_chipset_flush(obj->base.dev);
+       }
+
+#ifdef CONFIG_X86
+       set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
+#endif
+       drm_pci_free(obj->base.dev, phys);
+       obj->phys_handle = NULL;
+}
+
+int
+i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
+                           int align)
+{
+       drm_dma_handle_t *phys;
+       struct address_space *mapping;
+       char *vaddr;
+       int i;
+
+       if (obj->phys_handle) {
+               if ((unsigned long)obj->phys_handle->vaddr & (align -1))
+                       return -EBUSY;
+
+               return 0;
+       }
+
+       if (obj->madv != I915_MADV_WILLNEED)
+               return -EFAULT;
+
+       if (obj->base.filp == NULL)
+               return -EINVAL;
+
+       /* create a new object */
+       phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
+       if (!phys)
+               return -ENOMEM;
+
+       vaddr = phys->vaddr;
+#ifdef CONFIG_X86
+       set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
+#endif
+       mapping = file_inode(obj->base.filp)->i_mapping;
+       for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+               struct page *page;
+               char *src;
+
+               page = shmem_read_mapping_page(mapping, i);
+               if (IS_ERR(page)) {
+#ifdef CONFIG_X86
+                       set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
+#endif
+                       drm_pci_free(obj->base.dev, phys);
+                       return PTR_ERR(page);
+               }
+
+               src = kmap_atomic(page);
+               memcpy(vaddr, src, PAGE_SIZE);
+               kunmap_atomic(src);
+
+               mark_page_accessed(page);
+               page_cache_release(page);
+
+               vaddr += PAGE_SIZE;
+       }
+
+       obj->phys_handle = phys;
+       return 0;
+}
+
+static int
+i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
+                    struct drm_i915_gem_pwrite *args,
+                    struct drm_file *file_priv)
+{
+       struct drm_device *dev = obj->base.dev;
+       void *vaddr = obj->phys_handle->vaddr + args->offset;
+       char __user *user_data = to_user_ptr(args->data_ptr);
+
+       if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
+               unsigned long unwritten;
+
+               /* The physical object once assigned is fixed for the lifetime
+                * of the obj, so we can safely drop the lock and continue
+                * to access vaddr.
+                */
+               mutex_unlock(&dev->struct_mutex);
+               unwritten = copy_from_user(vaddr, user_data, args->size);
+               mutex_lock(&dev->struct_mutex);
+               if (unwritten)
+                       return -EFAULT;
+       }
+
+       i915_gem_chipset_flush(dev);
+       return 0;
+}
+
 void *i915_gem_object_alloc(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -921,8 +1039,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
         * pread/pwrite currently are reading and writing from the CPU
         * perspective, requiring manual detiling by the client.
         */
-       if (obj->phys_obj) {
-               ret = i915_gem_phys_pwrite(dev, obj, args, file);
+       if (obj->phys_handle) {
+               ret = i915_gem_phys_pwrite(obj, args, file);
                goto out;
        }
 
@@ -3208,12 +3326,14 @@ static struct i915_vma *
 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
                           struct i915_address_space *vm,
                           unsigned alignment,
-                          unsigned flags)
+                          uint64_t flags)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 size, fence_size, fence_alignment, unfenced_alignment;
-       size_t gtt_max =
+       unsigned long start =
+               flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
+       unsigned long end =
                flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
        struct i915_vma *vma;
        int ret;
@@ -3242,11 +3362,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
        /* If the object is bigger than the entire aperture, reject it early
         * before evicting everything in a vain attempt to find space.
         */
-       if (obj->base.size > gtt_max) {
-               DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
+       if (obj->base.size > end) {
+               DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
                          obj->base.size,
                          flags & PIN_MAPPABLE ? "mappable" : "total",
-                         gtt_max);
+                         end);
                return ERR_PTR(-E2BIG);
        }
 
@@ -3263,12 +3383,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 search_free:
        ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
                                                  size, alignment,
-                                                 obj->cache_level, 0, gtt_max,
+                                                 obj->cache_level,
+                                                 start, end,
                                                  DRM_MM_SEARCH_DEFAULT,
                                                  DRM_MM_CREATE_DEFAULT);
        if (ret) {
                ret = i915_gem_evict_something(dev, vm, size, alignment,
-                                              obj->cache_level, flags);
+                                              obj->cache_level,
+                                              start, end,
+                                              flags);
                if (ret == 0)
                        goto search_free;
 
@@ -3828,11 +3951,30 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        return ret;
 }
 
+static bool
+i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
+{
+       struct drm_i915_gem_object *obj = vma->obj;
+
+       if (alignment &&
+           vma->node.start & (alignment - 1))
+               return true;
+
+       if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
+               return true;
+
+       if (flags & PIN_OFFSET_BIAS &&
+           vma->node.start < (flags & PIN_OFFSET_MASK))
+               return true;
+
+       return false;
+}
+
 int
 i915_gem_object_pin(struct drm_i915_gem_object *obj,
                    struct i915_address_space *vm,
                    uint32_t alignment,
-                   unsigned flags)
+                   uint64_t flags)
 {
        struct i915_vma *vma;
        int ret;
@@ -3845,15 +3987,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
                if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
                        return -EBUSY;
 
-               if ((alignment &&
-                    vma->node.start & (alignment - 1)) ||
-                   (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
+               if (i915_vma_misplaced(vma, alignment, flags)) {
                        WARN(vma->pin_count,
                             "bo is already pinned with incorrect alignment:"
                             " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
                             " obj->map_and_fenceable=%d\n",
                             i915_gem_obj_offset(obj, vm), alignment,
-                            flags & PIN_MAPPABLE,
+                            !!(flags & PIN_MAPPABLE),
                             obj->map_and_fenceable);
                        ret = i915_vma_unbind(vma);
                        if (ret)
@@ -4163,9 +4303,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 
        trace_i915_gem_object_destroy(obj);
 
-       if (obj->phys_obj)
-               i915_gem_detach_phys_object(dev, obj);
-
        list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
                int ret;
 
@@ -4183,6 +4320,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
                }
        }
 
+       i915_gem_object_detach_phys(obj);
+
        /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
         * before progressing. */
        if (obj->stolen)
@@ -4646,190 +4785,6 @@ i915_gem_load(struct drm_device *dev)
        register_shrinker(&dev_priv->mm.inactive_shrinker);
 }
 
-/*
- * Create a physically contiguous memory object for this object
- * e.g. for cursor + overlay regs
- */
-static int i915_gem_init_phys_object(struct drm_device *dev,
-                                    int id, int size, int align)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_phys_object *phys_obj;
-       int ret;
-
-       if (dev_priv->mm.phys_objs[id - 1] || !size)
-               return 0;
-
-       phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
-       if (!phys_obj)
-               return -ENOMEM;
-
-       phys_obj->id = id;
-
-       phys_obj->handle = drm_pci_alloc(dev, size, align);
-       if (!phys_obj->handle) {
-               ret = -ENOMEM;
-               goto kfree_obj;
-       }
-#ifdef CONFIG_X86
-       set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
-#endif
-
-       dev_priv->mm.phys_objs[id - 1] = phys_obj;
-
-       return 0;
-kfree_obj:
-       kfree(phys_obj);
-       return ret;
-}
-
-static void i915_gem_free_phys_object(struct drm_device *dev, int id)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_phys_object *phys_obj;
-
-       if (!dev_priv->mm.phys_objs[id - 1])
-               return;
-
-       phys_obj = dev_priv->mm.phys_objs[id - 1];
-       if (phys_obj->cur_obj) {
-               i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
-       }
-
-#ifdef CONFIG_X86
-       set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
-#endif
-       drm_pci_free(dev, phys_obj->handle);
-       kfree(phys_obj);
-       dev_priv->mm.phys_objs[id - 1] = NULL;
-}
-
-void i915_gem_free_all_phys_object(struct drm_device *dev)
-{
-       int i;
-
-       for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
-               i915_gem_free_phys_object(dev, i);
-}
-
-void i915_gem_detach_phys_object(struct drm_device *dev,
-                                struct drm_i915_gem_object *obj)
-{
-       struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
-       char *vaddr;
-       int i;
-       int page_count;
-
-       if (!obj->phys_obj)
-               return;
-       vaddr = obj->phys_obj->handle->vaddr;
-
-       page_count = obj->base.size / PAGE_SIZE;
-       for (i = 0; i < page_count; i++) {
-               struct page *page = shmem_read_mapping_page(mapping, i);
-               if (!IS_ERR(page)) {
-                       char *dst = kmap_atomic(page);
-                       memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
-                       kunmap_atomic(dst);
-
-                       drm_clflush_pages(&page, 1);
-
-                       set_page_dirty(page);
-                       mark_page_accessed(page);
-                       page_cache_release(page);
-               }
-       }
-       i915_gem_chipset_flush(dev);
-
-       obj->phys_obj->cur_obj = NULL;
-       obj->phys_obj = NULL;
-}
-
-int
-i915_gem_attach_phys_object(struct drm_device *dev,
-                           struct drm_i915_gem_object *obj,
-                           int id,
-                           int align)
-{
-       struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret = 0;
-       int page_count;
-       int i;
-
-       if (id > I915_MAX_PHYS_OBJECT)
-               return -EINVAL;
-
-       if (obj->phys_obj) {
-               if (obj->phys_obj->id == id)
-                       return 0;
-               i915_gem_detach_phys_object(dev, obj);
-       }
-
-       /* create a new object */
-       if (!dev_priv->mm.phys_objs[id - 1]) {
-               ret = i915_gem_init_phys_object(dev, id,
-                                               obj->base.size, align);
-               if (ret) {
-                       DRM_ERROR("failed to init phys object %d size: %zu\n",
-                                 id, obj->base.size);
-                       return ret;
-               }
-       }
-
-       /* bind to the object */
-       obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
-       obj->phys_obj->cur_obj = obj;
-
-       page_count = obj->base.size / PAGE_SIZE;
-
-       for (i = 0; i < page_count; i++) {
-               struct page *page;
-               char *dst, *src;
-
-               page = shmem_read_mapping_page(mapping, i);
-               if (IS_ERR(page))
-                       return PTR_ERR(page);
-
-               src = kmap_atomic(page);
-               dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
-               memcpy(dst, src, PAGE_SIZE);
-               kunmap_atomic(src);
-
-               mark_page_accessed(page);
-               page_cache_release(page);
-       }
-
-       return 0;
-}
-
-static int
-i915_gem_phys_pwrite(struct drm_device *dev,
-                    struct drm_i915_gem_object *obj,
-                    struct drm_i915_gem_pwrite *args,
-                    struct drm_file *file_priv)
-{
-       void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
-       char __user *user_data = to_user_ptr(args->data_ptr);
-
-       if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
-               unsigned long unwritten;
-
-               /* The physical object once assigned is fixed for the lifetime
-                * of the obj, so we can safely drop the lock and continue
-                * to access vaddr.
-                */
-               mutex_unlock(&dev->struct_mutex);
-               unwritten = copy_from_user(vaddr, user_data, args->size);
-               mutex_lock(&dev->struct_mutex);
-               if (unwritten)
-                       return -EFAULT;
-       }
-
-       i915_gem_chipset_flush(dev);
-       return 0;
-}
-
 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
index 75fca63..bbf4b12 100644 (file)
@@ -68,9 +68,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
 int
 i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
                         int min_size, unsigned alignment, unsigned cache_level,
+                        unsigned long start, unsigned long end,
                         unsigned flags)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct list_head eviction_list, unwind_list;
        struct i915_vma *vma;
        int ret = 0;
@@ -102,11 +102,10 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
         */
 
        INIT_LIST_HEAD(&unwind_list);
-       if (flags & PIN_MAPPABLE) {
-               BUG_ON(!i915_is_ggtt(vm));
+       if (start != 0 || end != vm->total) {
                drm_mm_init_scan_with_range(&vm->mm, min_size,
-                                           alignment, cache_level, 0,
-                                           dev_priv->gtt.mappable_end);
+                                           alignment, cache_level,
+                                           start, end);
        } else
                drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
 
index 2c9d9cb..20fef6c 100644 (file)
@@ -35,6 +35,9 @@
 
 #define  __EXEC_OBJECT_HAS_PIN (1<<31)
 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
+#define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)
+
+#define BATCH_OFFSET_BIAS (256*1024)
 
 struct eb_vmas {
        struct list_head vmas;
@@ -545,7 +548,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
        bool need_fence;
-       unsigned flags;
+       uint64_t flags;
        int ret;
 
        flags = 0;
@@ -559,6 +562,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 
        if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
                flags |= PIN_GLOBAL;
+       if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
+               flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
 
        ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
        if (ret)
@@ -592,6 +597,36 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
        return 0;
 }
 
+static bool
+eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
+{
+       struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+       struct drm_i915_gem_object *obj = vma->obj;
+       bool need_fence, need_mappable;
+
+       need_fence =
+               has_fenced_gpu_access &&
+               entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+               obj->tiling_mode != I915_TILING_NONE;
+       need_mappable = need_fence || need_reloc_mappable(vma);
+
+       WARN_ON((need_mappable || need_fence) &&
+              !i915_is_ggtt(vma->vm));
+
+       if (entry->alignment &&
+           vma->node.start & (entry->alignment - 1))
+               return true;
+
+       if (need_mappable && !obj->map_and_fenceable)
+               return true;
+
+       if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
+           vma->node.start < BATCH_OFFSET_BIAS)
+               return true;
+
+       return false;
+}
+
 static int
 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
                            struct list_head *vmas,
@@ -653,26 +688,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 
                /* Unbind any ill-fitting objects or pin. */
                list_for_each_entry(vma, vmas, exec_list) {
-                       struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
-                       bool need_fence, need_mappable;
-
-                       obj = vma->obj;
-
                        if (!drm_mm_node_allocated(&vma->node))
                                continue;
 
-                       need_fence =
-                               has_fenced_gpu_access &&
-                               entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
-                               obj->tiling_mode != I915_TILING_NONE;
-                       need_mappable = need_fence || need_reloc_mappable(vma);
-
-                       WARN_ON((need_mappable || need_fence) &&
-                              !i915_is_ggtt(vma->vm));
-
-                       if ((entry->alignment &&
-                            vma->node.start & (entry->alignment - 1)) ||
-                           (need_mappable && !obj->map_and_fenceable))
+                       if (eb_vma_misplaced(vma, has_fenced_gpu_access))
                                ret = i915_vma_unbind(vma);
                        else
                                ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
@@ -773,9 +792,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
                 * relocations were valid.
                 */
                for (j = 0; j < exec[i].relocation_count; j++) {
-                       if (copy_to_user(&user_relocs[j].presumed_offset,
-                                        &invalid_offset,
-                                        sizeof(invalid_offset))) {
+                       if (__copy_to_user(&user_relocs[j].presumed_offset,
+                                          &invalid_offset,
+                                          sizeof(invalid_offset))) {
                                ret = -EFAULT;
                                mutex_lock(&dev->struct_mutex);
                                goto err;
@@ -999,6 +1018,25 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
        return 0;
 }
 
+static struct drm_i915_gem_object *
+eb_get_batch(struct eb_vmas *eb)
+{
+       struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
+
+       /*
+        * SNA is doing fancy tricks with compressing batch buffers, which leads
+        * to negative relocation deltas. Usually that works out ok since the
+        * relocate address is still positive, except when the batch is placed
+        * very low in the GTT. Ensure this doesn't happen.
+        *
+        * Note that actual hangs have only been observed on gen7, but for
+        * paranoia do it everywhere.
+        */
+       vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
+
+       return vma->obj;
+}
+
 static int
 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct drm_file *file,
@@ -1153,7 +1191,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                goto err;
 
        /* take note of the batch buffer before we might reorder the lists */
-       batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
+       batch_obj = eb_get_batch(eb);
 
        /* Move the objects en-masse into the GTT, evicting if necessary. */
        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
@@ -1355,18 +1393,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 
        ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
        if (!ret) {
+               struct drm_i915_gem_exec_object __user *user_exec_list =
+                       to_user_ptr(args->buffers_ptr);
+
                /* Copy the new buffer offsets back to the user's exec list. */
-               for (i = 0; i < args->buffer_count; i++)
-                       exec_list[i].offset = exec2_list[i].offset;
-               /* ... and back out to userspace */
-               ret = copy_to_user(to_user_ptr(args->buffers_ptr),
-                                  exec_list,
-                                  sizeof(*exec_list) * args->buffer_count);
-               if (ret) {
-                       ret = -EFAULT;
-                       DRM_DEBUG("failed to copy %d exec entries "
-                                 "back to user (%d)\n",
-                                 args->buffer_count, ret);
+               for (i = 0; i < args->buffer_count; i++) {
+                       ret = __copy_to_user(&user_exec_list[i].offset,
+                                            &exec2_list[i].offset,
+                                            sizeof(user_exec_list[i].offset));
+                       if (ret) {
+                               ret = -EFAULT;
+                               DRM_DEBUG("failed to copy %d exec entries "
+                                         "back to user (%d)\n",
+                                         args->buffer_count, ret);
+                               break;
+                       }
                }
        }
 
@@ -1412,14 +1453,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
        ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
        if (!ret) {
                /* Copy the new buffer offsets back to the user's exec list. */
-               ret = copy_to_user(to_user_ptr(args->buffers_ptr),
-                                  exec2_list,
-                                  sizeof(*exec2_list) * args->buffer_count);
-               if (ret) {
-                       ret = -EFAULT;
-                       DRM_DEBUG("failed to copy %d exec entries "
-                                 "back to user (%d)\n",
-                                 args->buffer_count, ret);
+               struct drm_i915_gem_exec_object2 *user_exec_list =
+                                  to_user_ptr(args->buffers_ptr);
+               int i;
+
+               for (i = 0; i < args->buffer_count; i++) {
+                       ret = __copy_to_user(&user_exec_list[i].offset,
+                                            &exec2_list[i].offset,
+                                            sizeof(user_exec_list[i].offset));
+                       if (ret) {
+                               ret = -EFAULT;
+                               DRM_DEBUG("failed to copy %d exec entries "
+                                         "back to user\n",
+                                         args->buffer_count);
+                               break;
+                       }
                }
        }
 
index 154b0f8..5deb228 100644 (file)
@@ -1089,7 +1089,9 @@ alloc:
        if (ret == -ENOSPC && !retried) {
                ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
                                               GEN6_PD_SIZE, GEN6_PD_ALIGN,
-                                              I915_CACHE_NONE, 0);
+                                              I915_CACHE_NONE,
+                                              0, dev_priv->gtt.base.total,
+                                              0);
                if (ret)
                        return ret;
 
index 48aa516..5b60e25 100644 (file)
@@ -7825,14 +7825,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
                addr = i915_gem_obj_ggtt_offset(obj);
        } else {
                int align = IS_I830(dev) ? 16 * 1024 : 256;
-               ret = i915_gem_attach_phys_object(dev, obj,
-                                                 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
-                                                 align);
+               ret = i915_gem_object_attach_phys(obj, align);
                if (ret) {
                        DRM_DEBUG_KMS("failed to attach phys object\n");
                        goto fail_locked;
                }
-               addr = obj->phys_obj->handle->busaddr;
+               addr = obj->phys_handle->busaddr;
        }
 
        if (IS_GEN2(dev))
@@ -7840,10 +7838,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
 
  finish:
        if (intel_crtc->cursor_bo) {
-               if (INTEL_INFO(dev)->cursor_needs_physical) {
-                       if (intel_crtc->cursor_bo != obj)
-                               i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
-               } else
+               if (!INTEL_INFO(dev)->cursor_needs_physical)
                        i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
                drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
        }
index d8adc91..129db0c 100644 (file)
@@ -193,7 +193,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
        struct overlay_registers __iomem *regs;
 
        if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
-               regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
+               regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
        else
                regs = io_mapping_map_wc(dev_priv->gtt.mappable,
                                         i915_gem_obj_ggtt_offset(overlay->reg_bo));
@@ -1340,14 +1340,12 @@ void intel_setup_overlay(struct drm_device *dev)
        overlay->reg_bo = reg_bo;
 
        if (OVERLAY_NEEDS_PHYSICAL(dev)) {
-               ret = i915_gem_attach_phys_object(dev, reg_bo,
-                                                 I915_GEM_PHYS_OVERLAY_REGS,
-                                                 PAGE_SIZE);
+               ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
                if (ret) {
                        DRM_ERROR("failed to attach phys overlay regs\n");
                        goto out_free_bo;
                }
-               overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
+               overlay->flip_addr = reg_bo->phys_handle->busaddr;
        } else {
                ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
                if (ret) {
@@ -1428,7 +1426,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
                /* Cast to make sparse happy, but it's wc memory anyway, so
                 * equivalent to the wc io mapping on X86. */
                regs = (struct overlay_registers __iomem *)
-                       overlay->reg_bo->phys_obj->handle->vaddr;
+                       overlay->reg_bo->phys_handle->vaddr;
        else
                regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
                                                i915_gem_obj_ggtt_offset(overlay->reg_bo));
@@ -1462,7 +1460,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
        error->dovsta = I915_READ(DOVSTA);
        error->isr = I915_READ(ISR);
        if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
-               error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
+               error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
        else
                error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
 
index 2b6e0eb..41ecf8a 100644 (file)
@@ -152,6 +152,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
                        uint32_t domain = r->write_domain ?
                                r->write_domain : r->read_domains;
 
+                       if (domain & RADEON_GEM_DOMAIN_CPU) {
+                               DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
+                                         "for command submission\n");
+                               return -EINVAL;
+                       }
+
                        p->relocs[i].domain = domain;
                        if (domain == RADEON_GEM_DOMAIN_VRAM)
                                domain |= RADEON_GEM_DOMAIN_GTT;
@@ -342,10 +348,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                        return -EINVAL;
 
                /* we only support VM on some SI+ rings */
-               if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) &&
-                  ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
-                       DRM_ERROR("Ring %d requires VM!\n", p->ring);
-                       return -EINVAL;
+               if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
+                       if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
+                               DRM_ERROR("Ring %d requires VM!\n", p->ring);
+                               return -EINVAL;
+                       }
+               } else {
+                       if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
+                               DRM_ERROR("VM not supported on ring %d!\n",
+                                         p->ring);
+                               return -EINVAL;
+                       }
                }
        }
 
index 0e770bb..1467140 100644 (file)
@@ -1533,11 +1533,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
 
        radeon_restore_bios_scratch_regs(rdev);
 
-       if (fbcon) {
-               radeon_fbdev_set_suspend(rdev, 0);
-               console_unlock();
-       }
-
        /* init dig PHYs, disp eng pll */
        if (rdev->is_atom_bios) {
                radeon_atom_encoder_init(rdev);
@@ -1562,6 +1557,12 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
        }
 
        drm_kms_helper_poll_enable(dev);
+
+       if (fbcon) {
+               radeon_fbdev_set_suspend(rdev, 0);
+               console_unlock();
+       }
+
        return 0;
 }
 
index f00dbbf..356b733 100644 (file)
@@ -862,7 +862,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
                                 unsigned *fb_div, unsigned *ref_div)
 {
        /* limit reference * post divider to a maximum */
-       ref_div_max = min(128 / post_div, ref_div_max);
+       ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
 
        /* get matching reference and feedback divider */
        *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
index d9ab99f..1f42669 100644 (file)
@@ -130,10 +130,10 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
                                          struct list_head *head)
 {
        struct radeon_cs_reloc *list;
-       unsigned i, idx, size;
+       unsigned i, idx;
 
-       size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc);
-       list = kmalloc(size, GFP_KERNEL);
+       list = kmalloc_array(vm->max_pde_used + 1,
+                            sizeof(struct radeon_cs_reloc), GFP_KERNEL);
        if (!list)
                return NULL;
 
index 76842d7..ffc7ad3 100644 (file)
@@ -71,7 +71,7 @@ config KEYBOARD_ATKBD
        default y
        select SERIO
        select SERIO_LIBPS2
-       select SERIO_I8042 if X86
+       select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
        select SERIO_GSCPS2 if GSC
        help
          Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
index d8241ba..a15063b 100644 (file)
@@ -111,6 +111,8 @@ struct pxa27x_keypad {
        unsigned short keycodes[MAX_KEYPAD_KEYS];
        int rotary_rel_code[2];
 
+       unsigned int row_shift;
+
        /* state row bits of each column scan */
        uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS];
        uint32_t direct_key_state;
@@ -467,7 +469,8 @@ scan:
                        if ((bits_changed & (1 << row)) == 0)
                                continue;
 
-                       code = MATRIX_SCAN_CODE(row, col, MATRIX_ROW_SHIFT);
+                       code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
+
                        input_event(input_dev, EV_MSC, MSC_SCAN, code);
                        input_report_key(input_dev, keypad->keycodes[code],
                                         new_state[col] & (1 << row));
@@ -802,6 +805,8 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
                goto failed_put_clk;
        }
 
+       keypad->row_shift = get_count_order(pdata->matrix_key_cols);
+
        if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) ||
            (pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) {
                input_dev->evbit[0] |= BIT_MASK(EV_REL);
index effa9c5..6b8441f 100644 (file)
@@ -17,7 +17,7 @@ config MOUSE_PS2
        default y
        select SERIO
        select SERIO_LIBPS2
-       select SERIO_I8042 if X86
+       select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
        select SERIO_GSCPS2 if GSC
        help
          Say Y here if you have a PS/2 mouse connected to your system. This
index d68d33f..c5ec703 100644 (file)
@@ -117,6 +117,31 @@ void synaptics_reset(struct psmouse *psmouse)
 }
 
 #ifdef CONFIG_MOUSE_PS2_SYNAPTICS
+struct min_max_quirk {
+       const char * const *pnp_ids;
+       int x_min, x_max, y_min, y_max;
+};
+
+static const struct min_max_quirk min_max_pnpid_table[] = {
+       {
+               (const char * const []){"LEN0033", NULL},
+               1024, 5052, 2258, 4832
+       },
+       {
+               (const char * const []){"LEN0035", "LEN0042", NULL},
+               1232, 5710, 1156, 4696
+       },
+       {
+               (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
+               1024, 5112, 2024, 4832
+       },
+       {
+               (const char * const []){"LEN2001", NULL},
+               1024, 5022, 2508, 4832
+       },
+       { }
+};
+
 /* This list has been kindly provided by Synaptics. */
 static const char * const topbuttonpad_pnp_ids[] = {
        "LEN0017",
@@ -129,7 +154,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
        "LEN002D",
        "LEN002E",
        "LEN0033", /* Helix */
-       "LEN0034", /* T431s, T540, X1 Carbon 2nd */
+       "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
        "LEN0035", /* X240 */
        "LEN0036", /* T440 */
        "LEN0037",
@@ -142,7 +167,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
        "LEN0048",
        "LEN0049",
        "LEN2000",
-       "LEN2001",
+       "LEN2001", /* Edge E431 */
        "LEN2002",
        "LEN2003",
        "LEN2004", /* L440 */
@@ -156,6 +181,18 @@ static const char * const topbuttonpad_pnp_ids[] = {
        NULL
 };
 
+static bool matches_pnp_id(struct psmouse *psmouse, const char * const ids[])
+{
+       int i;
+
+       if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4))
+               for (i = 0; ids[i]; i++)
+                       if (strstr(psmouse->ps2dev.serio->firmware_id, ids[i]))
+                               return true;
+
+       return false;
+}
+
 /*****************************************************************************
  *     Synaptics communications functions
  ****************************************************************************/
@@ -304,20 +341,20 @@ static int synaptics_identify(struct psmouse *psmouse)
  * Resolution is left zero if touchpad does not support the query
  */
 
-static const int *quirk_min_max;
-
 static int synaptics_resolution(struct psmouse *psmouse)
 {
        struct synaptics_data *priv = psmouse->private;
        unsigned char resp[3];
+       int i;
 
-       if (quirk_min_max) {
-               priv->x_min = quirk_min_max[0];
-               priv->x_max = quirk_min_max[1];
-               priv->y_min = quirk_min_max[2];
-               priv->y_max = quirk_min_max[3];
-               return 0;
-       }
+       for (i = 0; min_max_pnpid_table[i].pnp_ids; i++)
+               if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) {
+                       priv->x_min = min_max_pnpid_table[i].x_min;
+                       priv->x_max = min_max_pnpid_table[i].x_max;
+                       priv->y_min = min_max_pnpid_table[i].y_min;
+                       priv->y_max = min_max_pnpid_table[i].y_max;
+                       return 0;
+               }
 
        if (SYN_ID_MAJOR(priv->identity) < 4)
                return 0;
@@ -1365,17 +1402,8 @@ static void set_input_params(struct psmouse *psmouse,
 
        if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
                __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
-               /* See if this buttonpad has a top button area */
-               if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4)) {
-                       for (i = 0; topbuttonpad_pnp_ids[i]; i++) {
-                               if (strstr(psmouse->ps2dev.serio->firmware_id,
-                                          topbuttonpad_pnp_ids[i])) {
-                                       __set_bit(INPUT_PROP_TOPBUTTONPAD,
-                                                 dev->propbit);
-                                       break;
-                               }
-                       }
-               }
+               if (matches_pnp_id(psmouse, topbuttonpad_pnp_ids))
+                       __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit);
                /* Clickpads report only left button */
                __clear_bit(BTN_RIGHT, dev->keybit);
                __clear_bit(BTN_MIDDLE, dev->keybit);
@@ -1547,104 +1575,10 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
        { }
 };
 
-static const struct dmi_system_id min_max_dmi_table[] __initconst = {
-#if defined(CONFIG_DMI)
-       {
-               /* Lenovo ThinkPad Helix */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
-               },
-               .driver_data = (int []){1024, 5052, 2258, 4832},
-       },
-       {
-               /* Lenovo ThinkPad X240 */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
-               },
-               .driver_data = (int []){1232, 5710, 1156, 4696},
-       },
-       {
-               /* Lenovo ThinkPad Edge E431 */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"),
-               },
-               .driver_data = (int []){1024, 5022, 2508, 4832},
-       },
-       {
-               /* Lenovo ThinkPad T431s */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"),
-               },
-               .driver_data = (int []){1024, 5112, 2024, 4832},
-       },
-       {
-               /* Lenovo ThinkPad T440s */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
-               },
-               .driver_data = (int []){1024, 5112, 2024, 4832},
-       },
-       {
-               /* Lenovo ThinkPad L440 */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"),
-               },
-               .driver_data = (int []){1024, 5112, 2024, 4832},
-       },
-       {
-               /* Lenovo ThinkPad T540p */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
-               },
-               .driver_data = (int []){1024, 5056, 2058, 4832},
-       },
-       {
-               /* Lenovo ThinkPad L540 */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"),
-               },
-               .driver_data = (int []){1024, 5112, 2024, 4832},
-       },
-       {
-               /* Lenovo Yoga S1 */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
-                                       "ThinkPad S1 Yoga"),
-               },
-               .driver_data = (int []){1232, 5710, 1156, 4696},
-       },
-       {
-               /* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION,
-                                       "ThinkPad X1 Carbon 2nd"),
-               },
-               .driver_data = (int []){1024, 5112, 2024, 4832},
-       },
-#endif
-       { }
-};
-
 void __init synaptics_module_init(void)
 {
-       const struct dmi_system_id *min_max_dmi;
-
        impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
        broken_olpc_ec = dmi_check_system(olpc_dmi_table);
-
-       min_max_dmi = dmi_first_match(min_max_dmi_table);
-       if (min_max_dmi)
-               quirk_min_max = min_max_dmi->driver_data;
 }
 
 static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
index 762b084..8b748d9 100644 (file)
@@ -79,7 +79,8 @@ static int amba_kmi_open(struct serio *io)
        writeb(divisor, KMICLKDIV);
        writeb(KMICR_EN, KMICR);
 
-       ret = request_irq(kmi->irq, amba_kmi_int, 0, "kmi-pl050", kmi);
+       ret = request_irq(kmi->irq, amba_kmi_int, IRQF_SHARED, "kmi-pl050",
+                         kmi);
        if (ret) {
                printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq);
                writeb(0, KMICR);
index 68edc9d..b845e93 100644 (file)
@@ -640,7 +640,7 @@ config TOUCHSCREEN_WM9713
 
 config TOUCHSCREEN_WM97XX_ATMEL
        tristate "WM97xx Atmel accelerated touch"
-       depends on TOUCHSCREEN_WM97XX && (AVR32 || ARCH_AT91)
+       depends on TOUCHSCREEN_WM97XX && AVR32
        help
          Say Y here for support for streaming mode with WM97xx touchscreens
          on Atmel AT91 or AVR32 systems with an AC97C module.
index 9380be7..5f054c4 100644 (file)
@@ -2178,6 +2178,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
        ti->num_discard_bios = 1;
        ti->discards_supported = true;
        ti->discard_zeroes_data_unsupported = true;
+       /* Discard bios must be split on a block boundary */
+       ti->split_discard_bios = true;
 
        cache->features = ca->features;
        ti->per_bio_data_size = get_per_bio_data_size(cache);
index fa0f6cb..ebfa411 100644 (file)
@@ -445,11 +445,11 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
        else
                m->saved_queue_if_no_path = queue_if_no_path;
        m->queue_if_no_path = queue_if_no_path;
-       if (!m->queue_if_no_path)
-               dm_table_run_md_queue_async(m->ti->table);
-
        spin_unlock_irqrestore(&m->lock, flags);
 
+       if (!queue_if_no_path)
+               dm_table_run_md_queue_async(m->ti->table);
+
        return 0;
 }
 
@@ -954,7 +954,7 @@ out:
  */
 static int reinstate_path(struct pgpath *pgpath)
 {
-       int r = 0;
+       int r = 0, run_queue = 0;
        unsigned long flags;
        struct multipath *m = pgpath->pg->m;
 
@@ -978,7 +978,7 @@ static int reinstate_path(struct pgpath *pgpath)
 
        if (!m->nr_valid_paths++) {
                m->current_pgpath = NULL;
-               dm_table_run_md_queue_async(m->ti->table);
+               run_queue = 1;
        } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
                if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
                        m->pg_init_in_progress++;
@@ -991,6 +991,8 @@ static int reinstate_path(struct pgpath *pgpath)
 
 out:
        spin_unlock_irqrestore(&m->lock, flags);
+       if (run_queue)
+               dm_table_run_md_queue_async(m->ti->table);
 
        return r;
 }
@@ -1566,8 +1568,8 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
                }
                if (m->pg_init_required)
                        __pg_init_all_paths(m);
-               dm_table_run_md_queue_async(m->ti->table);
                spin_unlock_irqrestore(&m->lock, flags);
+               dm_table_run_md_queue_async(m->ti->table);
        }
 
        return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
index 2e71de8..242ac2e 100644 (file)
@@ -27,7 +27,9 @@
 #define MAPPING_POOL_SIZE 1024
 #define PRISON_CELLS 1024
 #define COMMIT_PERIOD HZ
-#define NO_SPACE_TIMEOUT (HZ * 60)
+#define NO_SPACE_TIMEOUT_SECS 60
+
+static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
 
 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
                "A percentage of time allocated for copy on write");
@@ -1670,6 +1672,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
        struct pool_c *pt = pool->ti->private;
        bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
        enum pool_mode old_mode = get_pool_mode(pool);
+       unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
 
        /*
         * Never allow the pool to transition to PM_WRITE mode if user
@@ -1732,8 +1735,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
                pool->process_prepared_mapping = process_prepared_mapping;
                pool->process_prepared_discard = process_prepared_discard_passdown;
 
-               if (!pool->pf.error_if_no_space)
-                       queue_delayed_work(pool->wq, &pool->no_space_timeout, NO_SPACE_TIMEOUT);
+               if (!pool->pf.error_if_no_space && no_space_timeout)
+                       queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
                break;
 
        case PM_WRITE:
@@ -3508,6 +3511,9 @@ static void dm_thin_exit(void)
 module_init(dm_thin_init);
 module_exit(dm_thin_exit);
 
+module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
+
 MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
 MODULE_LICENSE("GPL");
index 9802b67..2c61281 100644 (file)
@@ -523,17 +523,6 @@ static int wmt_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
                return GPIOF_DIR_IN;
 }
 
-static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
-static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
-                                    int value)
-{
-       return pinctrl_gpio_direction_output(chip->base + offset);
-}
-
 static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset)
 {
        struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev);
@@ -568,6 +557,18 @@ static void wmt_gpio_set_value(struct gpio_chip *chip, unsigned offset,
                wmt_clearbits(data, reg_data_out, BIT(bit));
 }
 
+static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+       return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+                                    int value)
+{
+       wmt_gpio_set_value(chip, offset, value);
+       return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
 static struct gpio_chip wmt_gpio_chip = {
        .label = "gpio-wmt",
        .owner = THIS_MODULE,
index 42ae01e..be2bea8 100644 (file)
@@ -441,42 +441,12 @@ void d_drop(struct dentry *dentry)
 }
 EXPORT_SYMBOL(d_drop);
 
-/*
- * Finish off a dentry we've decided to kill.
- * dentry->d_lock must be held, returns with it unlocked.
- * If ref is non-zero, then decrement the refcount too.
- * Returns dentry requiring refcount drop, or NULL if we're done.
- */
-static struct dentry *
-dentry_kill(struct dentry *dentry, int unlock_on_failure)
-       __releases(dentry->d_lock)
+static void __dentry_kill(struct dentry *dentry)
 {
-       struct inode *inode;
        struct dentry *parent = NULL;
        bool can_free = true;
-
-       if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
-               can_free = dentry->d_flags & DCACHE_MAY_FREE;
-               spin_unlock(&dentry->d_lock);
-               goto out;
-       }
-
-       inode = dentry->d_inode;
-       if (inode && !spin_trylock(&inode->i_lock)) {
-relock:
-               if (unlock_on_failure) {
-                       spin_unlock(&dentry->d_lock);
-                       cpu_relax();
-               }
-               return dentry; /* try again with same dentry */
-       }
        if (!IS_ROOT(dentry))
                parent = dentry->d_parent;
-       if (parent && !spin_trylock(&parent->d_lock)) {
-               if (inode)
-                       spin_unlock(&inode->i_lock);
-               goto relock;
-       }
 
        /*
         * The dentry is now unrecoverably dead to the world.
@@ -520,9 +490,72 @@ relock:
                can_free = false;
        }
        spin_unlock(&dentry->d_lock);
-out:
        if (likely(can_free))
                dentry_free(dentry);
+}
+
+/*
+ * Finish off a dentry we've decided to kill.
+ * dentry->d_lock must be held, returns with it unlocked.
+ * If ref is non-zero, then decrement the refcount too.
+ * Returns dentry requiring refcount drop, or NULL if we're done.
+ */
+static struct dentry *dentry_kill(struct dentry *dentry)
+       __releases(dentry->d_lock)
+{
+       struct inode *inode = dentry->d_inode;
+       struct dentry *parent = NULL;
+
+       if (inode && unlikely(!spin_trylock(&inode->i_lock)))
+               goto failed;
+
+       if (!IS_ROOT(dentry)) {
+               parent = dentry->d_parent;
+               if (unlikely(!spin_trylock(&parent->d_lock))) {
+                       if (inode)
+                               spin_unlock(&inode->i_lock);
+                       goto failed;
+               }
+       }
+
+       __dentry_kill(dentry);
+       return parent;
+
+failed:
+       spin_unlock(&dentry->d_lock);
+       cpu_relax();
+       return dentry; /* try again with same dentry */
+}
+
+static inline struct dentry *lock_parent(struct dentry *dentry)
+{
+       struct dentry *parent = dentry->d_parent;
+       if (IS_ROOT(dentry))
+               return NULL;
+       if (likely(spin_trylock(&parent->d_lock)))
+               return parent;
+       spin_unlock(&dentry->d_lock);
+       rcu_read_lock();
+again:
+       parent = ACCESS_ONCE(dentry->d_parent);
+       spin_lock(&parent->d_lock);
+       /*
+        * We can't blindly lock dentry until we are sure
+        * that we won't violate the locking order.
+        * Any changes of dentry->d_parent must have
+        * been done with parent->d_lock held, so
+        * spin_lock() above is enough of a barrier
+        * for checking if it's still our child.
+        */
+       if (unlikely(parent != dentry->d_parent)) {
+               spin_unlock(&parent->d_lock);
+               goto again;
+       }
+       rcu_read_unlock();
+       if (parent != dentry)
+               spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+       else
+               parent = NULL;
        return parent;
 }
 
@@ -579,7 +612,7 @@ repeat:
        return;
 
 kill_it:
-       dentry = dentry_kill(dentry, 1);
+       dentry = dentry_kill(dentry);
        if (dentry)
                goto repeat;
 }
@@ -797,8 +830,11 @@ static void shrink_dentry_list(struct list_head *list)
        struct dentry *dentry, *parent;
 
        while (!list_empty(list)) {
+               struct inode *inode;
                dentry = list_entry(list->prev, struct dentry, d_lru);
                spin_lock(&dentry->d_lock);
+               parent = lock_parent(dentry);
+
                /*
                 * The dispose list is isolated and dentries are not accounted
                 * to the LRU here, so we can simply remove it from the list
@@ -812,26 +848,33 @@ static void shrink_dentry_list(struct list_head *list)
                 */
                if ((int)dentry->d_lockref.count > 0) {
                        spin_unlock(&dentry->d_lock);
+                       if (parent)
+                               spin_unlock(&parent->d_lock);
                        continue;
                }
 
-               parent = dentry_kill(dentry, 0);
-               /*
-                * If dentry_kill returns NULL, we have nothing more to do.
-                */
-               if (!parent)
+
+               if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
+                       bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
+                       spin_unlock(&dentry->d_lock);
+                       if (parent)
+                               spin_unlock(&parent->d_lock);
+                       if (can_free)
+                               dentry_free(dentry);
                        continue;
+               }
 
-               if (unlikely(parent == dentry)) {
-                       /*
-                        * trylocks have failed and d_lock has been held the
-                        * whole time, so it could not have been added to any
-                        * other lists. Just add it back to the shrink list.
-                        */
+               inode = dentry->d_inode;
+               if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
                        d_shrink_add(dentry, list);
                        spin_unlock(&dentry->d_lock);
+                       if (parent)
+                               spin_unlock(&parent->d_lock);
                        continue;
                }
+
+               __dentry_kill(dentry);
+
                /*
                 * We need to prune ancestors too. This is necessary to prevent
                 * quadratic behavior of shrink_dcache_parent(), but is also
@@ -839,8 +882,26 @@ static void shrink_dentry_list(struct list_head *list)
                 * fragmentation.
                 */
                dentry = parent;
-               while (dentry && !lockref_put_or_lock(&dentry->d_lockref))
-                       dentry = dentry_kill(dentry, 1);
+               while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
+                       parent = lock_parent(dentry);
+                       if (dentry->d_lockref.count != 1) {
+                               dentry->d_lockref.count--;
+                               spin_unlock(&dentry->d_lock);
+                               if (parent)
+                                       spin_unlock(&parent->d_lock);
+                               break;
+                       }
+                       inode = dentry->d_inode;        /* can't be NULL */
+                       if (unlikely(!spin_trylock(&inode->i_lock))) {
+                               spin_unlock(&dentry->d_lock);
+                               if (parent)
+                                       spin_unlock(&parent->d_lock);
+                               cpu_relax();
+                               continue;
+                       }
+                       __dentry_kill(dentry);
+                       dentry = parent;
+               }
        }
 }
 
index 9bc07d2..e246954 100644 (file)
@@ -1537,7 +1537,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
        struct iovec iovstack[UIO_FASTIOV];
        struct iovec *iov = iovstack;
        struct iov_iter iter;
-       ssize_t count = 0;
+       ssize_t count;
 
        pipe = get_pipe_info(file);
        if (!pipe)
@@ -1546,8 +1546,9 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
        ret = rw_copy_check_uvector(READ, uiov, nr_segs,
                                    ARRAY_SIZE(iovstack), iovstack, &iov);
        if (ret <= 0)
-               return ret;
+               goto out;
 
+       count = ret;
        iov_iter_init(&iter, iov, nr_segs, count, 0);
 
        sd.len = 0;
@@ -1560,6 +1561,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
        ret = __splice_from_pipe(pipe, &sd, pipe_to_user);
        pipe_unlock(pipe);
 
+out:
        if (iov != iovstack)
                kfree(iov);
 
index d504613..ea6428b 100644 (file)
@@ -96,7 +96,12 @@ struct pci_dev *acpi_get_pci_dev(acpi_handle);
 /* Arch-defined function to add a bus to the system */
 
 struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root);
+
+#ifdef CONFIG_X86
 void pci_acpi_crs_quirks(void);
+#else
+static inline void pci_acpi_crs_quirks(void) { }
+#endif
 
 /* --------------------------------------------------------------------------
                                     Processor
index 7a8f2cd..0e25690 100644 (file)
@@ -37,6 +37,7 @@
 
 #include <linux/list.h>
 #include <linux/mod_devicetable.h>
+#include <linux/dynamic_debug.h>
 
 #include <acpi/acpi.h>
 #include <acpi/acpi_bus.h>
@@ -589,6 +590,14 @@ static inline __printf(3, 4) void
 acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {}
 #endif /* !CONFIG_ACPI */
 
+#if defined(CONFIG_ACPI) && defined(CONFIG_DYNAMIC_DEBUG)
+__printf(3, 4)
+void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const char *fmt, ...);
+#else
+#define __acpi_handle_debug(descriptor, handle, fmt, ...)              \
+       acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__);
+#endif
+
 /*
  * acpi_handle_<level>: Print message with ACPI prefix and object path
  *
@@ -610,11 +619,19 @@ acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {}
 #define acpi_handle_info(handle, fmt, ...)                             \
        acpi_handle_printk(KERN_INFO, handle, fmt, ##__VA_ARGS__)
 
-/* REVISIT: Support CONFIG_DYNAMIC_DEBUG when necessary */
-#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(DEBUG)
 #define acpi_handle_debug(handle, fmt, ...)                            \
        acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__)
 #else
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define acpi_handle_debug(handle, fmt, ...)                            \
+do {                                                                   \
+       DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);                 \
+       if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT))          \
+               __acpi_handle_debug(&descriptor, handle, pr_fmt(fmt),   \
+                               ##__VA_ARGS__);                         \
+} while (0)
+#else
 #define acpi_handle_debug(handle, fmt, ...)                            \
 ({                                                                     \
        if (0)                                                          \
@@ -622,5 +639,6 @@ acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {}
        0;                                                              \
 })
 #endif
+#endif
 
 #endif /*_LINUX_ACPI_H*/
index 63b5eff..fdd7e1b 100644 (file)
@@ -47,6 +47,7 @@ struct amba_driver {
 enum amba_vendor {
        AMBA_VENDOR_ARM = 0x41,
        AMBA_VENDOR_ST = 0x80,
+       AMBA_VENDOR_QCOM = 0x51,
 };
 
 extern struct bus_type amba_bustype;
index 41a13e7..7944cdc 100644 (file)
@@ -10,7 +10,7 @@
 
 struct dma_chan;
 
-#if defined(CONFIG_DMA_OMAP) || defined(CONFIG_DMA_OMAP_MODULE)
+#if defined(CONFIG_DMA_OMAP) || (defined(CONFIG_DMA_OMAP_MODULE) && defined(MODULE))
 bool omap_dma_filter_fn(struct dma_chan *, void *);
 #else
 static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d)
index 1b1efdd..4c31a36 100644 (file)
@@ -357,7 +357,7 @@ enum {
 #define AUDIT_ARCH_MIPS64N32   (EM_MIPS|__AUDIT_ARCH_64BIT|\
                                 __AUDIT_ARCH_CONVENTION_MIPS64_N32)
 #define AUDIT_ARCH_MIPSEL64    (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
-#define AUDIT_ARCH_MIPSEL64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE\
+#define AUDIT_ARCH_MIPSEL64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE|\
                                 __AUDIT_ARCH_CONVENTION_MIPS64_N32)
 #define AUDIT_ARCH_OPENRISC    (EM_OPENRISC)
 #define AUDIT_ARCH_PARISC      (EM_PARISC)
index a9e710e..247979a 100644 (file)
@@ -726,10 +726,12 @@ void set_cpu_present(unsigned int cpu, bool present)
 
 void set_cpu_online(unsigned int cpu, bool online)
 {
-       if (online)
+       if (online) {
                cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
-       else
+               cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
+       } else {
                cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
+       }
 }
 
 void set_cpu_active(unsigned int cpu, bool active)
index 5f58927..81dbe77 100644 (file)
@@ -745,7 +745,8 @@ void exit_pi_state_list(struct task_struct *curr)
 
 static int
 lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
-               union futex_key *key, struct futex_pi_state **ps)
+               union futex_key *key, struct futex_pi_state **ps,
+               struct task_struct *task)
 {
        struct futex_pi_state *pi_state = NULL;
        struct futex_q *this, *next;
@@ -786,6 +787,16 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
                                        return -EINVAL;
                        }
 
+                       /*
+                        * Protect against a corrupted uval. If uval
+                        * is 0x80000000 then pid is 0 and the waiter
+                        * bit is set. So the deadlock check in the
+                        * calling code has failed and we did not fall
+                        * into the check above due to !pid.
+                        */
+                       if (task && pi_state->owner == task)
+                               return -EDEADLK;
+
                        atomic_inc(&pi_state->refcount);
                        *ps = pi_state;
 
@@ -803,6 +814,11 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
        if (!p)
                return -ESRCH;
 
+       if (!p->mm) {
+               put_task_struct(p);
+               return -EPERM;
+       }
+
        /*
         * We need to look at the task state flags to figure out,
         * whether the task is exiting. To protect against the do_exit
@@ -935,7 +951,7 @@ retry:
         * We dont have the lock. Look up the PI state (or create it if
         * we are the first waiter):
         */
-       ret = lookup_pi_state(uval, hb, key, ps);
+       ret = lookup_pi_state(uval, hb, key, ps, task);
 
        if (unlikely(ret)) {
                switch (ret) {
@@ -1347,7 +1363,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
  *
  * Return:
  *  0 - failed to acquire the lock atomically;
- *  1 - acquired the lock;
+ * >0 - acquired the lock, return value is vpid of the top_waiter
  * <0 - error
  */
 static int futex_proxy_trylock_atomic(u32 __user *pifutex,
@@ -1358,7 +1374,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
 {
        struct futex_q *top_waiter = NULL;
        u32 curval;
-       int ret;
+       int ret, vpid;
 
        if (get_futex_value_locked(&curval, pifutex))
                return -EFAULT;
@@ -1386,11 +1402,13 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
         * the contended case or if set_waiters is 1.  The pi_state is returned
         * in ps in contended cases.
         */
+       vpid = task_pid_vnr(top_waiter->task);
        ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
                                   set_waiters);
-       if (ret == 1)
+       if (ret == 1) {
                requeue_pi_wake_futex(top_waiter, key2, hb2);
-
+               return vpid;
+       }
        return ret;
 }
 
@@ -1421,7 +1439,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
        struct futex_pi_state *pi_state = NULL;
        struct futex_hash_bucket *hb1, *hb2;
        struct futex_q *this, *next;
-       u32 curval2;
 
        if (requeue_pi) {
                /*
@@ -1509,16 +1526,25 @@ retry_private:
                 * At this point the top_waiter has either taken uaddr2 or is
                 * waiting on it.  If the former, then the pi_state will not
                 * exist yet, look it up one more time to ensure we have a
-                * reference to it.
+                * reference to it. If the lock was taken, ret contains the
+                * vpid of the top waiter task.
                 */
-               if (ret == 1) {
+               if (ret > 0) {
                        WARN_ON(pi_state);
                        drop_count++;
                        task_count++;
-                       ret = get_futex_value_locked(&curval2, uaddr2);
-                       if (!ret)
-                               ret = lookup_pi_state(curval2, hb2, &key2,
-                                                     &pi_state);
+                       /*
+                        * If we acquired the lock, then the user
+                        * space value of uaddr2 should be vpid. It
+                        * cannot be changed by the top waiter as it
+                        * is blocked on hb2 lock if it tries to do
+                        * so. If something fiddled with it behind our
+                        * back the pi state lookup might unearth
+                        * it. So we rather use the known value than
+                        * rereading and handing potential crap to
+                        * lookup_pi_state.
+                        */
+                       ret = lookup_pi_state(ret, hb2, &key2, &pi_state, NULL);
                }
 
                switch (ret) {
index c8380ad..28c5706 100644 (file)
@@ -1683,6 +1683,14 @@ int kernel_kexec(void)
                kexec_in_progress = true;
                kernel_restart_prepare(NULL);
                migrate_to_reboot_cpu();
+
+               /*
+                * migrate_to_reboot_cpu() disables CPU hotplug assuming that
+                * no further code needs to use CPU hotplug (which is true in
+                * the reboot case). However, the kexec path depends on using
+                * CPU hotplug again; so re-enable it here.
+                */
+               cpu_hotplug_enable();
                printk(KERN_EMERG "Starting new kernel\n");
                machine_shutdown();
        }
index aa4dff0..a620d4d 100644 (file)
@@ -343,9 +343,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
         * top_waiter can be NULL, when we are in the deboosting
         * mode!
         */
-       if (top_waiter && (!task_has_pi_waiters(task) ||
-                          top_waiter != task_top_pi_waiter(task)))
-               goto out_unlock_pi;
+       if (top_waiter) {
+               if (!task_has_pi_waiters(task))
+                       goto out_unlock_pi;
+               /*
+                * If deadlock detection is off, we stop here if we
+                * are not the top pi waiter of the task.
+                */
+               if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
+                       goto out_unlock_pi;
+       }
 
        /*
         * When deadlock detection is off then we check, if further
@@ -361,7 +368,12 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
                goto retry;
        }
 
-       /* Deadlock detection */
+       /*
+        * Deadlock detection. If the lock is the same as the original
+        * lock which caused us to walk the lock chain or if the
+        * current lock is owned by the task which initiated the chain
+        * walk, we detected a deadlock.
+        */
        if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
                debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
                raw_spin_unlock(&lock->wait_lock);
@@ -527,6 +539,18 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
        unsigned long flags;
        int chain_walk = 0, res;
 
+       /*
+        * Early deadlock detection. We really don't want the task to
+        * enqueue on itself just to untangle the mess later. It's not
+        * only an optimization. We drop the locks, so another waiter
+        * can come in before the chain walk detects the deadlock. So
+        * the other will detect the deadlock and return -EDEADLOCK,
+        * which is wrong, as the other waiter is not in a deadlock
+        * situation.
+        */
+       if (detect_deadlock && owner == task)
+               return -EDEADLK;
+
        raw_spin_lock_irqsave(&task->pi_lock, flags);
        __rt_mutex_adjust_prio(task);
        waiter->task = task;
index 204d3d2..0a72516 100644 (file)
@@ -3195,17 +3195,40 @@ __getparam_dl(struct task_struct *p, struct sched_attr *attr)
  * We ask for the deadline not being zero, and greater or equal
  * than the runtime, as well as the period of being zero or
  * greater than deadline. Furthermore, we have to be sure that
- * user parameters are above the internal resolution (1us); we
- * check sched_runtime only since it is always the smaller one.
+ * user parameters are above the internal resolution of 1us (we
+ * check sched_runtime only since it is always the smaller one) and
+ * below 2^63 ns (we have to check both sched_deadline and
+ * sched_period, as the latter can be zero).
  */
 static bool
 __checkparam_dl(const struct sched_attr *attr)
 {
-       return attr && attr->sched_deadline != 0 &&
-               (attr->sched_period == 0 ||
-               (s64)(attr->sched_period   - attr->sched_deadline) >= 0) &&
-               (s64)(attr->sched_deadline - attr->sched_runtime ) >= 0  &&
-               attr->sched_runtime >= (2 << (DL_SCALE - 1));
+       /* deadline != 0 */
+       if (attr->sched_deadline == 0)
+               return false;
+
+       /*
+        * Since we truncate DL_SCALE bits, make sure we're at least
+        * that big.
+        */
+       if (attr->sched_runtime < (1ULL << DL_SCALE))
+               return false;
+
+       /*
+        * Since we use the MSB for wrap-around and sign issues, make
+        * sure it's not set (mind that period can be equal to zero).
+        */
+       if (attr->sched_deadline & (1ULL << 63) ||
+           attr->sched_period & (1ULL << 63))
+               return false;
+
+       /* runtime <= deadline <= period (if period != 0) */
+       if ((attr->sched_period != 0 &&
+            attr->sched_period < attr->sched_deadline) ||
+           attr->sched_deadline < attr->sched_runtime)
+               return false;
+
+       return true;
 }
 
 /*
@@ -3658,8 +3681,12 @@ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
        if (!uattr || pid < 0 || flags)
                return -EINVAL;
 
-       if (sched_copy_attr(uattr, &attr))
-               return -EFAULT;
+       retval = sched_copy_attr(uattr, &attr);
+       if (retval)
+               return retval;
+
+       if (attr.sched_policy < 0)
+               return -EINVAL;
 
        rcu_read_lock();
        retval = -ESRCH;
@@ -3709,7 +3736,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
  */
 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
 {
-       struct sched_param lp;
+       struct sched_param lp = { .sched_priority = 0 };
        struct task_struct *p;
        int retval;
 
@@ -3726,11 +3753,8 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
        if (retval)
                goto out_unlock;
 
-       if (task_has_dl_policy(p)) {
-               retval = -EINVAL;
-               goto out_unlock;
-       }
-       lp.sched_priority = p->rt_priority;
+       if (task_has_rt_policy(p))
+               lp.sched_priority = p->rt_priority;
        rcu_read_unlock();
 
        /*
@@ -5052,7 +5076,6 @@ static int sched_cpu_active(struct notifier_block *nfb,
                                      unsigned long action, void *hcpu)
 {
        switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_STARTING:
        case CPU_DOWN_FAILED:
                set_cpu_active((long)hcpu, true);
                return NOTIFY_OK;
index ab001b5..bd95963 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/gfp.h>
 #include <linux/kernel.h>
+#include <linux/slab.h>
 #include "cpudeadline.h"
 
 static inline int parent(int i)
@@ -39,8 +40,10 @@ static void cpudl_exchange(struct cpudl *cp, int a, int b)
 {
        int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu;
 
-       swap(cp->elements[a], cp->elements[b]);
-       swap(cp->cpu_to_idx[cpu_a], cp->cpu_to_idx[cpu_b]);
+       swap(cp->elements[a].cpu, cp->elements[b].cpu);
+       swap(cp->elements[a].dl , cp->elements[b].dl );
+
+       swap(cp->elements[cpu_a].idx, cp->elements[cpu_b].idx);
 }
 
 static void cpudl_heapify(struct cpudl *cp, int idx)
@@ -140,7 +143,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
        WARN_ON(!cpu_present(cpu));
 
        raw_spin_lock_irqsave(&cp->lock, flags);
-       old_idx = cp->cpu_to_idx[cpu];
+       old_idx = cp->elements[cpu].idx;
        if (!is_valid) {
                /* remove item */
                if (old_idx == IDX_INVALID) {
@@ -155,8 +158,8 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
                cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl;
                cp->elements[old_idx].cpu = new_cpu;
                cp->size--;
-               cp->cpu_to_idx[new_cpu] = old_idx;
-               cp->cpu_to_idx[cpu] = IDX_INVALID;
+               cp->elements[new_cpu].idx = old_idx;
+               cp->elements[cpu].idx = IDX_INVALID;
                while (old_idx > 0 && dl_time_before(
                                cp->elements[parent(old_idx)].dl,
                                cp->elements[old_idx].dl)) {
@@ -173,7 +176,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
                cp->size++;
                cp->elements[cp->size - 1].dl = 0;
                cp->elements[cp->size - 1].cpu = cpu;
-               cp->cpu_to_idx[cpu] = cp->size - 1;
+               cp->elements[cpu].idx = cp->size - 1;
                cpudl_change_key(cp, cp->size - 1, dl);
                cpumask_clear_cpu(cpu, cp->free_cpus);
        } else {
@@ -195,10 +198,21 @@ int cpudl_init(struct cpudl *cp)
        memset(cp, 0, sizeof(*cp));
        raw_spin_lock_init(&cp->lock);
        cp->size = 0;
-       for (i = 0; i < NR_CPUS; i++)
-               cp->cpu_to_idx[i] = IDX_INVALID;
-       if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL))
+
+       cp->elements = kcalloc(nr_cpu_ids,
+                              sizeof(struct cpudl_item),
+                              GFP_KERNEL);
+       if (!cp->elements)
+               return -ENOMEM;
+
+       if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
+               kfree(cp->elements);
                return -ENOMEM;
+       }
+
+       for_each_possible_cpu(i)
+               cp->elements[i].idx = IDX_INVALID;
+
        cpumask_setall(cp->free_cpus);
 
        return 0;
@@ -211,4 +225,5 @@ int cpudl_init(struct cpudl *cp)
 void cpudl_cleanup(struct cpudl *cp)
 {
        free_cpumask_var(cp->free_cpus);
+       kfree(cp->elements);
 }
index a202789..538c979 100644 (file)
@@ -5,17 +5,17 @@
 
 #define IDX_INVALID     -1
 
-struct array_item {
+struct cpudl_item {
        u64 dl;
        int cpu;
+       int idx;
 };
 
 struct cpudl {
        raw_spinlock_t lock;
        int size;
-       int cpu_to_idx[NR_CPUS];
-       struct array_item elements[NR_CPUS];
        cpumask_var_t free_cpus;
+       struct cpudl_item *elements;
 };
 
 
index 3031bac..8834243 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/gfp.h>
 #include <linux/sched.h>
 #include <linux/sched/rt.h>
+#include <linux/slab.h>
 #include "cpupri.h"
 
 /* Convert between a 140 based task->prio, and our 102 based cpupri */
@@ -218,8 +219,13 @@ int cpupri_init(struct cpupri *cp)
                        goto cleanup;
        }
 
+       cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);
+       if (!cp->cpu_to_pri)
+               goto cleanup;
+
        for_each_possible_cpu(i)
                cp->cpu_to_pri[i] = CPUPRI_INVALID;
+
        return 0;
 
 cleanup:
@@ -236,6 +242,7 @@ void cpupri_cleanup(struct cpupri *cp)
 {
        int i;
 
+       kfree(cp->cpu_to_pri);
        for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
                free_cpumask_var(cp->pri_to_cpu[i].mask);
 }
index f6d7561..6b03334 100644 (file)
@@ -17,7 +17,7 @@ struct cpupri_vec {
 
 struct cpupri {
        struct cpupri_vec pri_to_cpu[CPUPRI_NR_PRIORITIES];
-       int               cpu_to_pri[NR_CPUS];
+       int *cpu_to_pri;
 };
 
 #ifdef CONFIG_SMP
index 94d0873..76cbb9e 100644 (file)
@@ -182,6 +182,7 @@ static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
 int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
 {
        struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+       struct snd_pcm_runtime *runtime = substream->runtime;
        int ret;
 
        switch (cmd) {
@@ -196,6 +197,11 @@ int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
                dmaengine_resume(prtd->dma_chan);
                break;
        case SNDRV_PCM_TRIGGER_SUSPEND:
+               if (runtime->info & SNDRV_PCM_INFO_PAUSE)
+                       dmaengine_pause(prtd->dma_chan);
+               else
+                       dmaengine_terminate_all(prtd->dma_chan);
+               break;
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
                dmaengine_pause(prtd->dma_chan);
                break;
index 2c54629..6cc3cf2 100644 (file)
@@ -1743,6 +1743,9 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        /* Lynx Point */
        { PCI_DEVICE(0x8086, 0x8c20),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+       /* 9 Series */
+       { PCI_DEVICE(0x8086, 0x8ca0),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
        /* Wellsburg */
        { PCI_DEVICE(0x8086, 0x8d20),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },