Merge branches 'devel-stable', 'fixes' and 'mmci' into for-linus
authorRussell King <rmk+kernel@arm.linux.org.uk>
Sun, 3 Mar 2013 00:32:50 +0000 (00:32 +0000)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Sun, 3 Mar 2013 00:32:50 +0000 (00:32 +0000)
63 files changed:
Documentation/devicetree/bindings/arm/psci.txt [new file with mode: 0644]
Documentation/virtual/kvm/api.txt
MAINTAINERS
arch/arm/Kconfig
arch/arm/Makefile
arch/arm/include/asm/assembler.h
arch/arm/include/asm/cputype.h
arch/arm/include/asm/cti.h
arch/arm/include/asm/hardware/coresight.h
arch/arm/include/asm/hw_breakpoint.h
arch/arm/include/asm/idmap.h
arch/arm/include/asm/kvm_arm.h [new file with mode: 0644]
arch/arm/include/asm/kvm_asm.h [new file with mode: 0644]
arch/arm/include/asm/kvm_coproc.h [new file with mode: 0644]
arch/arm/include/asm/kvm_emulate.h [new file with mode: 0644]
arch/arm/include/asm/kvm_host.h [new file with mode: 0644]
arch/arm/include/asm/kvm_mmio.h [new file with mode: 0644]
arch/arm/include/asm/kvm_mmu.h [new file with mode: 0644]
arch/arm/include/asm/kvm_psci.h [new file with mode: 0644]
arch/arm/include/asm/opcodes-sec.h [new file with mode: 0644]
arch/arm/include/asm/opcodes.h
arch/arm/include/asm/pgtable-3level-hwdef.h
arch/arm/include/asm/pgtable-3level.h
arch/arm/include/asm/pgtable.h
arch/arm/include/asm/psci.h [new file with mode: 0644]
arch/arm/include/asm/virt.h
arch/arm/include/uapi/asm/kvm.h [new file with mode: 0644]
arch/arm/kernel/Makefile
arch/arm/kernel/asm-offsets.c
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/perf_event.c
arch/arm/kernel/perf_event_cpu.c
arch/arm/kernel/perf_event_v6.c
arch/arm/kernel/perf_event_v7.c
arch/arm/kernel/perf_event_xscale.c
arch/arm/kernel/psci.c [new file with mode: 0644]
arch/arm/kernel/smp.c
arch/arm/kernel/vmlinux.lds.S
arch/arm/kvm/Kconfig [new file with mode: 0644]
arch/arm/kvm/Makefile [new file with mode: 0644]
arch/arm/kvm/arm.c [new file with mode: 0644]
arch/arm/kvm/coproc.c [new file with mode: 0644]
arch/arm/kvm/coproc.h [new file with mode: 0644]
arch/arm/kvm/coproc_a15.c [new file with mode: 0644]
arch/arm/kvm/emulate.c [new file with mode: 0644]
arch/arm/kvm/guest.c [new file with mode: 0644]
arch/arm/kvm/init.S [new file with mode: 0644]
arch/arm/kvm/interrupts.S [new file with mode: 0644]
arch/arm/kvm/interrupts_head.S [new file with mode: 0644]
arch/arm/kvm/mmio.c [new file with mode: 0644]
arch/arm/kvm/mmu.c [new file with mode: 0644]
arch/arm/kvm/psci.c [new file with mode: 0644]
arch/arm/kvm/reset.c [new file with mode: 0644]
arch/arm/kvm/trace.h [new file with mode: 0644]
arch/arm/mm/Kconfig
arch/arm/mm/idmap.c
arch/arm/mm/mmu.c
drivers/mmc/host/mmci.c
drivers/mmc/host/mmci.h
include/linux/clockchips.h
include/uapi/linux/kvm.h
kernel/time/Kconfig
kernel/time/tick-broadcast.c

diff --git a/Documentation/devicetree/bindings/arm/psci.txt b/Documentation/devicetree/bindings/arm/psci.txt
new file mode 100644 (file)
index 0000000..433afe9
--- /dev/null
@@ -0,0 +1,55 @@
+* Power State Coordination Interface (PSCI)
+
+Firmware implementing the PSCI functions described in ARM document number
+ARM DEN 0022A ("Power State Coordination Interface System Software on ARM
+processors") can be used by Linux to initiate various CPU-centric power
+operations.
+
+Issue A of the specification describes functions for CPU suspend, hotplug
+and migration of secure software.
+
+Functions are invoked by trapping to the privilege level of the PSCI
+firmware (specified as part of the binding below) and passing arguments
+in a manner similar to that specified by AAPCS:
+
+        r0             => 32-bit Function ID / return value
+       {r1 - r3}       => Parameters
+
+Note that the immediate field of the trapping instruction must be set
+to #0.
+
+
+Main node required properties:
+
+ - compatible    : Must be "arm,psci"
+
+ - method        : The method of calling the PSCI firmware. Permitted
+                   values are:
+
+                   "smc" : SMC #0, with the register assignments specified
+                          in this binding.
+
+                   "hvc" : HVC #0, with the register assignments specified
+                          in this binding.
+
+Main node optional properties:
+
+ - cpu_suspend   : Function ID for CPU_SUSPEND operation
+
+ - cpu_off       : Function ID for CPU_OFF operation
+
+ - cpu_on        : Function ID for CPU_ON operation
+
+ - migrate       : Function ID for MIGRATE operation
+
+
+Example:
+
+       psci {
+               compatible      = "arm,psci";
+               method          = "smc";
+               cpu_suspend     = <0x95c10000>;
+               cpu_off         = <0x95c10001>;
+               cpu_on          = <0x95c10002>;
+               migrate         = <0x95c10003>;
+       };
index a4df553..c25439a 100644 (file)
@@ -293,7 +293,7 @@ kvm_run' (see below).
 4.11 KVM_GET_REGS
 
 Capability: basic
-Architectures: all
+Architectures: all except ARM
 Type: vcpu ioctl
 Parameters: struct kvm_regs (out)
 Returns: 0 on success, -1 on error
@@ -314,7 +314,7 @@ struct kvm_regs {
 4.12 KVM_SET_REGS
 
 Capability: basic
-Architectures: all
+Architectures: all except ARM
 Type: vcpu ioctl
 Parameters: struct kvm_regs (in)
 Returns: 0 on success, -1 on error
@@ -600,7 +600,7 @@ struct kvm_fpu {
 4.24 KVM_CREATE_IRQCHIP
 
 Capability: KVM_CAP_IRQCHIP
-Architectures: x86, ia64
+Architectures: x86, ia64, ARM
 Type: vm ioctl
 Parameters: none
 Returns: 0 on success, -1 on error
@@ -608,21 +608,39 @@ Returns: 0 on success, -1 on error
 Creates an interrupt controller model in the kernel.  On x86, creates a virtual
 ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a
 local APIC.  IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23
-only go to the IOAPIC.  On ia64, a IOSAPIC is created.
+only go to the IOAPIC.  On ia64, a IOSAPIC is created. On ARM, a GIC is
+created.
 
 
 4.25 KVM_IRQ_LINE
 
 Capability: KVM_CAP_IRQCHIP
-Architectures: x86, ia64
+Architectures: x86, ia64, arm
 Type: vm ioctl
 Parameters: struct kvm_irq_level
 Returns: 0 on success, -1 on error
 
 Sets the level of a GSI input to the interrupt controller model in the kernel.
-Requires that an interrupt controller model has been previously created with
-KVM_CREATE_IRQCHIP.  Note that edge-triggered interrupts require the level
-to be set to 1 and then back to 0.
+On some architectures it is required that an interrupt controller model has
+been previously created with KVM_CREATE_IRQCHIP.  Note that edge-triggered
+interrupts require the level to be set to 1 and then back to 0.
+
+ARM can signal an interrupt either at the CPU level, or at the in-kernel irqchip
+(GIC), and for in-kernel irqchip can tell the GIC to use PPIs designated for
+specific cpus.  The irq field is interpreted like this:
+
+ Â bits:  | 31 ... 24 | 23  ... 16 | 15    ...    0 |
+  field: | irq_type  | vcpu_index |     irq_id     |
+
+The irq_type field has the following values:
+- irq_type[0]: out-of-kernel GIC: irq_id 0 is IRQ, irq_id 1 is FIQ
+- irq_type[1]: in-kernel GIC: SPI, irq_id between 32 and 1019 (incl.)
+               (the vcpu_index field is ignored)
+- irq_type[2]: in-kernel GIC: PPI, irq_id between 16 and 31 (incl.)
+
+(The irq_id field thus corresponds nicely to the IRQ ID in the ARM GIC specs)
+
+In both cases, level is used to raise/lower the line.
 
 struct kvm_irq_level {
        union {
@@ -1775,6 +1793,27 @@ registers, find a list below:
   PPC   | KVM_REG_PPC_VPA_DTL   | 128
   PPC   | KVM_REG_PPC_EPCR     | 32
 
+ARM registers are mapped using the lower 32 bits.  The upper 16 of that
+is the register group type, or coprocessor number:
+
+ARM core registers have the following id bit patterns:
+  0x4002 0000 0010 <index into the kvm_regs struct:16>
+
+ARM 32-bit CP15 registers have the following id bit patterns:
+  0x4002 0000 000F <zero:1> <crn:4> <crm:4> <opc1:4> <opc2:3>
+
+ARM 64-bit CP15 registers have the following id bit patterns:
+  0x4003 0000 000F <zero:1> <zero:4> <crm:4> <opc1:4> <zero:3>
+
+ARM CCSIDR registers are demultiplexed by CSSELR value:
+  0x4002 0000 0011 00 <csselr:8>
+
+ARM 32-bit VFP control registers have the following id bit patterns:
+  0x4002 0000 0012 1 <regno:12>
+
+ARM 64-bit FP registers have the following id bit patterns:
+  0x4002 0000 0012 0 <regno:12>
+
 4.69 KVM_GET_ONE_REG
 
 Capability: KVM_CAP_ONE_REG
@@ -2127,6 +2166,50 @@ written, then `n_invalid' invalid entries, invalidating any previously
 valid entries found.
 
 
+4.77 KVM_ARM_VCPU_INIT
+
+Capability: basic
+Architectures: arm
+Type: vcpu ioctl
+Parameters: struct struct kvm_vcpu_init (in)
+Returns: 0 on success; -1 on error
+Errors:
+ Â EINVAL: Â Â Â the target is unknown, or the combination of features is invalid.
+ Â ENOENT: Â Â Â a features bit specified is unknown.
+
+This tells KVM what type of CPU to present to the guest, and what
+optional features it should have. Â This will cause a reset of the cpu
+registers to their initial values. Â If this is not called, KVM_RUN will
+return ENOEXEC for that vcpu.
+
+Note that because some registers reflect machine topology, all vcpus
+should be created before this ioctl is invoked.
+
+Possible features:
+       - KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state.
+         Depends on KVM_CAP_ARM_PSCI.
+
+
+4.78 KVM_GET_REG_LIST
+
+Capability: basic
+Architectures: arm
+Type: vcpu ioctl
+Parameters: struct kvm_reg_list (in/out)
+Returns: 0 on success; -1 on error
+Errors:
+ Â E2BIG: Â Â Â Â the reg index list is too big to fit in the array specified by
+ Â Â Â Â Â Â Â Â Â Â Â Â the user (the number required will be written into n).
+
+struct kvm_reg_list {
+       __u64 n; /* number of registers in reg[] */
+       __u64 reg[0];
+};
+
+This ioctl returns the guest registers that are supported for the
+KVM_GET_ONE_REG/KVM_SET_ONE_REG calls.
+
+
 5. The kvm_run structure
 ------------------------
 
index 35a56bc..cfceb75 100644 (file)
@@ -4481,6 +4481,15 @@ F:       arch/s390/include/asm/kvm*
 F:     arch/s390/kvm/
 F:     drivers/s390/kvm/
 
+KERNEL VIRTUAL MACHINE (KVM) FOR ARM
+M:     Christoffer Dall <cdall@cs.columbia.edu>
+L:     kvmarm@lists.cs.columbia.edu
+W:     http://systems.cs.columbia.edu/projects/kvm-arm
+S:     Maintained
+F:     arch/arm/include/uapi/asm/kvm*
+F:     arch/arm/include/asm/kvm*
+F:     arch/arm/kvm/
+
 KEXEC
 M:     Eric Biederman <ebiederm@xmission.com>
 W:     http://kernel.org/pub/linux/utils/kernel/kexec/
index 67874b8..56e1b06 100644 (file)
@@ -4,6 +4,7 @@ config ARM
        select ARCH_BINFMT_ELF_RANDOMIZE_PIE
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
        select ARCH_HAVE_CUSTOM_GPIO_H
+       select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_WANT_IPC_PARSE_VERSION
        select BUILDTIME_EXTABLE_SORT if MMU
        select CPU_PM if (SUSPEND || CPU_IDLE)
@@ -1620,6 +1621,16 @@ config HOTPLUG_CPU
          Say Y here to experiment with turning CPUs off and on.  CPUs
          can be controlled through /sys/devices/system/cpu.
 
+config ARM_PSCI
+       bool "Support for the ARM Power State Coordination Interface (PSCI)"
+       depends on CPU_V7
+       help
+         Say Y here if you want Linux to communicate with system firmware
+         implementing the PSCI specification for CPU-centric power
+         management operations described in ARM document number ARM DEN
+         0022A ("Power State Coordination Interface System Software on
+         ARM processors").
+
 config LOCAL_TIMERS
        bool "Use local timer interrupts"
        depends on SMP
@@ -2322,3 +2333,5 @@ source "security/Kconfig"
 source "crypto/Kconfig"
 
 source "lib/Kconfig"
+
+source "arch/arm/kvm/Kconfig"
index 30c443c..4bcd2d6 100644 (file)
@@ -252,6 +252,7 @@ core-$(CONFIG_FPE_NWFPE)    += arch/arm/nwfpe/
 core-$(CONFIG_FPE_FASTFPE)     += $(FASTFPE_OBJ)
 core-$(CONFIG_VFP)             += arch/arm/vfp/
 core-$(CONFIG_XEN)             += arch/arm/xen/
+core-$(CONFIG_KVM_ARM_HOST)    += arch/arm/kvm/
 
 # If we have a machine-specific directory, then include it in the build.
 core-y                         += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/
index eb87200..05ee9ee 100644 (file)
  *
  * This macro is intended for forcing the CPU into SVC mode at boot time.
  * you cannot return to the original mode.
- *
- * Beware, it also clobers LR.
  */
 .macro safe_svcmode_maskall reg:req
 #if __LINUX_ARM_ARCH__ >= 6
        mrs     \reg , cpsr
-       mov     lr , \reg
-       and     lr , lr , #MODE_MASK
-       cmp     lr , #HYP_MODE
-       orr     \reg , \reg , #PSR_I_BIT | PSR_F_BIT
+       eor     \reg, \reg, #HYP_MODE
+       tst     \reg, #MODE_MASK
        bic     \reg , \reg , #MODE_MASK
-       orr     \reg , \reg , #SVC_MODE
+       orr     \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
 THUMB( orr     \reg , \reg , #PSR_T_BIT        )
        bne     1f
        orr     \reg, \reg, #PSR_A_BIT
index a59dcb5..ad41ec2 100644 (file)
@@ -64,6 +64,24 @@ extern unsigned int processor_id;
 #define read_cpuid_ext(reg) 0
 #endif
 
+#define ARM_CPU_IMP_ARM                        0x41
+#define ARM_CPU_IMP_INTEL              0x69
+
+#define ARM_CPU_PART_ARM1136           0xB360
+#define ARM_CPU_PART_ARM1156           0xB560
+#define ARM_CPU_PART_ARM1176           0xB760
+#define ARM_CPU_PART_ARM11MPCORE       0xB020
+#define ARM_CPU_PART_CORTEX_A8         0xC080
+#define ARM_CPU_PART_CORTEX_A9         0xC090
+#define ARM_CPU_PART_CORTEX_A5         0xC050
+#define ARM_CPU_PART_CORTEX_A15                0xC0F0
+#define ARM_CPU_PART_CORTEX_A7         0xC070
+
+#define ARM_CPU_XSCALE_ARCH_MASK       0xe000
+#define ARM_CPU_XSCALE_ARCH_V1         0x2000
+#define ARM_CPU_XSCALE_ARCH_V2         0x4000
+#define ARM_CPU_XSCALE_ARCH_V3         0x6000
+
 /*
  * The CPU ID never changes at run time, so we might as well tell the
  * compiler that it's constant.  Use this function to read the CPU ID
@@ -74,6 +92,21 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
        return read_cpuid(CPUID_ID);
 }
 
+static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
+{
+       return (read_cpuid_id() & 0xFF000000) >> 24;
+}
+
+static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
+{
+       return read_cpuid_id() & 0xFFF0;
+}
+
+static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
+{
+       return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK;
+}
+
 static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
 {
        return read_cpuid(CPUID_CACHETYPE);
index f2e5cad..2381199 100644 (file)
@@ -2,6 +2,7 @@
 #define __ASMARM_CTI_H
 
 #include       <asm/io.h>
+#include       <asm/hardware/coresight.h>
 
 /* The registers' definition is from section 3.2 of
  * Embedded Cross Trigger Revision: r0p0
 #define                LOCKACCESS              0xFB0
 #define                LOCKSTATUS              0xFB4
 
-/* write this value to LOCKACCESS will unlock the module, and
- * other value will lock the module
- */
-#define                LOCKCODE                0xC5ACCE55
-
 /**
  * struct cti - cross trigger interface struct
  * @base: mapped virtual address for the cti base
@@ -146,7 +142,7 @@ static inline void cti_irq_ack(struct cti *cti)
  */
 static inline void cti_unlock(struct cti *cti)
 {
-       __raw_writel(LOCKCODE, cti->base + LOCKACCESS);
+       __raw_writel(CS_LAR_KEY, cti->base + LOCKACCESS);
 }
 
 /**
@@ -158,6 +154,6 @@ static inline void cti_unlock(struct cti *cti)
  */
 static inline void cti_lock(struct cti *cti)
 {
-       __raw_writel(~LOCKCODE, cti->base + LOCKACCESS);
+       __raw_writel(~CS_LAR_KEY, cti->base + LOCKACCESS);
 }
 #endif
index 7ecd793..0cf7a6b 100644 (file)
@@ -36,7 +36,7 @@
 /* CoreSight Component Registers */
 #define CSCR_CLASS     0xff4
 
-#define UNLOCK_MAGIC   0xc5acce55
+#define CS_LAR_KEY     0xc5acce55
 
 /* ETM control register, "ETM Architecture", 3.3.1 */
 #define ETMR_CTRL              0
 
 #define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
 #define etm_unlock(t) \
-       do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
+       do { etm_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
 
 #define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
 #define etb_unlock(t) \
-       do { etb_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
+       do { etb_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
 
 #endif /* __ASM_HARDWARE_CORESIGHT_H */
 
index 01169dd..eef55ea 100644 (file)
@@ -85,6 +85,9 @@ static inline void decode_ctrl_reg(u32 reg,
 #define ARM_DSCR_HDBGEN                (1 << 14)
 #define ARM_DSCR_MDBGEN                (1 << 15)
 
+/* OSLSR os lock model bits */
+#define ARM_OSLSR_OSLM0                (1 << 0)
+
 /* opcode2 numbers for the co-processor instructions. */
 #define ARM_OP2_BVR            4
 #define ARM_OP2_BCR            5
index bf863ed..1a66f90 100644 (file)
@@ -8,6 +8,7 @@
 #define __idmap __section(.idmap.text) noinline notrace
 
 extern pgd_t *idmap_pgd;
+extern pgd_t *hyp_pgd;
 
 void setup_mm_for_reboot(void);
 
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
new file mode 100644 (file)
index 0000000..7c3d813
--- /dev/null
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_ARM_H__
+#define __ARM_KVM_ARM_H__
+
+#include <linux/types.h>
+
+/* Hyp Configuration Register (HCR) bits */
+#define HCR_TGE                (1 << 27)
+#define HCR_TVM                (1 << 26)
+#define HCR_TTLB       (1 << 25)
+#define HCR_TPU                (1 << 24)
+#define HCR_TPC                (1 << 23)
+#define HCR_TSW                (1 << 22)
+#define HCR_TAC                (1 << 21)
+#define HCR_TIDCP      (1 << 20)
+#define HCR_TSC                (1 << 19)
+#define HCR_TID3       (1 << 18)
+#define HCR_TID2       (1 << 17)
+#define HCR_TID1       (1 << 16)
+#define HCR_TID0       (1 << 15)
+#define HCR_TWE                (1 << 14)
+#define HCR_TWI                (1 << 13)
+#define HCR_DC         (1 << 12)
+#define HCR_BSU                (3 << 10)
+#define HCR_BSU_IS     (1 << 10)
+#define HCR_FB         (1 << 9)
+#define HCR_VA         (1 << 8)
+#define HCR_VI         (1 << 7)
+#define HCR_VF         (1 << 6)
+#define HCR_AMO                (1 << 5)
+#define HCR_IMO                (1 << 4)
+#define HCR_FMO                (1 << 3)
+#define HCR_PTW                (1 << 2)
+#define HCR_SWIO       (1 << 1)
+#define HCR_VM         1
+
+/*
+ * The bits we set in HCR:
+ * TAC:                Trap ACTLR
+ * TSC:                Trap SMC
+ * TSW:                Trap cache operations by set/way
+ * TWI:                Trap WFI
+ * TIDCP:      Trap L2CTLR/L2ECTLR
+ * BSU_IS:     Upgrade barriers to the inner shareable domain
+ * FB:         Force broadcast of all maintainance operations
+ * AMO:                Override CPSR.A and enable signaling with VA
+ * IMO:                Override CPSR.I and enable signaling with VI
+ * FMO:                Override CPSR.F and enable signaling with VF
+ * SWIO:       Turn set/way invalidates into set/way clean+invalidate
+ */
+#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
+                       HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
+                       HCR_SWIO | HCR_TIDCP)
+#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
+
+/* System Control Register (SCTLR) bits */
+#define SCTLR_TE       (1 << 30)
+#define SCTLR_EE       (1 << 25)
+#define SCTLR_V                (1 << 13)
+
+/* Hyp System Control Register (HSCTLR) bits */
+#define HSCTLR_TE      (1 << 30)
+#define HSCTLR_EE      (1 << 25)
+#define HSCTLR_FI      (1 << 21)
+#define HSCTLR_WXN     (1 << 19)
+#define HSCTLR_I       (1 << 12)
+#define HSCTLR_C       (1 << 2)
+#define HSCTLR_A       (1 << 1)
+#define HSCTLR_M       1
+#define HSCTLR_MASK    (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \
+                        HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE)
+
+/* TTBCR and HTCR Registers bits */
+#define TTBCR_EAE      (1 << 31)
+#define TTBCR_IMP      (1 << 30)
+#define TTBCR_SH1      (3 << 28)
+#define TTBCR_ORGN1    (3 << 26)
+#define TTBCR_IRGN1    (3 << 24)
+#define TTBCR_EPD1     (1 << 23)
+#define TTBCR_A1       (1 << 22)
+#define TTBCR_T1SZ     (3 << 16)
+#define TTBCR_SH0      (3 << 12)
+#define TTBCR_ORGN0    (3 << 10)
+#define TTBCR_IRGN0    (3 << 8)
+#define TTBCR_EPD0     (1 << 7)
+#define TTBCR_T0SZ     3
+#define HTCR_MASK      (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
+
+/* Hyp System Trap Register */
+#define HSTR_T(x)      (1 << x)
+#define HSTR_TTEE      (1 << 16)
+#define HSTR_TJDBX     (1 << 17)
+
+/* Hyp Coprocessor Trap Register */
+#define HCPTR_TCP(x)   (1 << x)
+#define HCPTR_TCP_MASK (0x3fff)
+#define HCPTR_TASE     (1 << 15)
+#define HCPTR_TTA      (1 << 20)
+#define HCPTR_TCPAC    (1 << 31)
+
+/* Hyp Debug Configuration Register bits */
+#define HDCR_TDRA      (1 << 11)
+#define HDCR_TDOSA     (1 << 10)
+#define HDCR_TDA       (1 << 9)
+#define HDCR_TDE       (1 << 8)
+#define HDCR_HPME      (1 << 7)
+#define HDCR_TPM       (1 << 6)
+#define HDCR_TPMCR     (1 << 5)
+#define HDCR_HPMN_MASK (0x1F)
+
+/*
+ * The architecture supports 40-bit IPA as input to the 2nd stage translations
+ * and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address
+ * space.
+ */
+#define KVM_PHYS_SHIFT (40)
+#define KVM_PHYS_SIZE  (1ULL << KVM_PHYS_SHIFT)
+#define KVM_PHYS_MASK  (KVM_PHYS_SIZE - 1ULL)
+#define PTRS_PER_S2_PGD        (1ULL << (KVM_PHYS_SHIFT - 30))
+#define S2_PGD_ORDER   get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
+#define S2_PGD_SIZE    (1 << S2_PGD_ORDER)
+
+/* Virtualization Translation Control Register (VTCR) bits */
+#define VTCR_SH0       (3 << 12)
+#define VTCR_ORGN0     (3 << 10)
+#define VTCR_IRGN0     (3 << 8)
+#define VTCR_SL0       (3 << 6)
+#define VTCR_S         (1 << 4)
+#define VTCR_T0SZ      (0xf)
+#define VTCR_MASK      (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \
+                        VTCR_S | VTCR_T0SZ)
+#define VTCR_HTCR_SH   (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0)
+#define VTCR_SL_L2     (0 << 6)        /* Starting-level: 2 */
+#define VTCR_SL_L1     (1 << 6)        /* Starting-level: 1 */
+#define KVM_VTCR_SL0   VTCR_SL_L1
+/* stage-2 input address range defined as 2^(32-T0SZ) */
+#define KVM_T0SZ       (32 - KVM_PHYS_SHIFT)
+#define KVM_VTCR_T0SZ  (KVM_T0SZ & VTCR_T0SZ)
+#define KVM_VTCR_S     ((KVM_VTCR_T0SZ << 1) & VTCR_S)
+
+/* Virtualization Translation Table Base Register (VTTBR) bits */
+#if KVM_VTCR_SL0 == VTCR_SL_L2 /* see ARM DDI 0406C: B4-1720 */
+#define VTTBR_X                (14 - KVM_T0SZ)
+#else
+#define VTTBR_X                (5 - KVM_T0SZ)
+#endif
+#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
+#define VTTBR_BADDR_MASK  (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_VMID_SHIFT  (48LLU)
+#define VTTBR_VMID_MASK          (0xffLLU << VTTBR_VMID_SHIFT)
+
+/* Hyp Syndrome Register (HSR) bits */
+#define HSR_EC_SHIFT   (26)
+#define HSR_EC         (0x3fU << HSR_EC_SHIFT)
+#define HSR_IL         (1U << 25)
+#define HSR_ISS                (HSR_IL - 1)
+#define HSR_ISV_SHIFT  (24)
+#define HSR_ISV                (1U << HSR_ISV_SHIFT)
+#define HSR_SRT_SHIFT  (16)
+#define HSR_SRT_MASK   (0xf << HSR_SRT_SHIFT)
+#define HSR_FSC                (0x3f)
+#define HSR_FSC_TYPE   (0x3c)
+#define HSR_SSE                (1 << 21)
+#define HSR_WNR                (1 << 6)
+#define HSR_CV_SHIFT   (24)
+#define HSR_CV         (1U << HSR_CV_SHIFT)
+#define HSR_COND_SHIFT (20)
+#define HSR_COND       (0xfU << HSR_COND_SHIFT)
+
+#define FSC_FAULT      (0x04)
+#define FSC_PERM       (0x0c)
+
+/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
+#define HPFAR_MASK     (~0xf)
+
+#define HSR_EC_UNKNOWN (0x00)
+#define HSR_EC_WFI     (0x01)
+#define HSR_EC_CP15_32 (0x03)
+#define HSR_EC_CP15_64 (0x04)
+#define HSR_EC_CP14_MR (0x05)
+#define HSR_EC_CP14_LS (0x06)
+#define HSR_EC_CP_0_13 (0x07)
+#define HSR_EC_CP10_ID (0x08)
+#define HSR_EC_JAZELLE (0x09)
+#define HSR_EC_BXJ     (0x0A)
+#define HSR_EC_CP14_64 (0x0C)
+#define HSR_EC_SVC_HYP (0x11)
+#define HSR_EC_HVC     (0x12)
+#define HSR_EC_SMC     (0x13)
+#define HSR_EC_IABT    (0x20)
+#define HSR_EC_IABT_HYP        (0x21)
+#define HSR_EC_DABT    (0x24)
+#define HSR_EC_DABT_HYP        (0x25)
+
+#define HSR_HVC_IMM_MASK       ((1UL << 16) - 1)
+
+#endif /* __ARM_KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
new file mode 100644 (file)
index 0000000..5e06e81
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_ASM_H__
+#define __ARM_KVM_ASM_H__
+
+/* 0 is reserved as an invalid value. */
+#define c0_MPIDR       1       /* MultiProcessor ID Register */
+#define c0_CSSELR      2       /* Cache Size Selection Register */
+#define c1_SCTLR       3       /* System Control Register */
+#define c1_ACTLR       4       /* Auxilliary Control Register */
+#define c1_CPACR       5       /* Coprocessor Access Control */
+#define c2_TTBR0       6       /* Translation Table Base Register 0 */
+#define c2_TTBR0_high  7       /* TTBR0 top 32 bits */
+#define c2_TTBR1       8       /* Translation Table Base Register 1 */
+#define c2_TTBR1_high  9       /* TTBR1 top 32 bits */
+#define c2_TTBCR       10      /* Translation Table Base Control R. */
+#define c3_DACR                11      /* Domain Access Control Register */
+#define c5_DFSR                12      /* Data Fault Status Register */
+#define c5_IFSR                13      /* Instruction Fault Status Register */
+#define c5_ADFSR       14      /* Auxilary Data Fault Status R */
+#define c5_AIFSR       15      /* Auxilary Instrunction Fault Status R */
+#define c6_DFAR                16      /* Data Fault Address Register */
+#define c6_IFAR                17      /* Instruction Fault Address Register */
+#define c9_L2CTLR      18      /* Cortex A15 L2 Control Register */
+#define c10_PRRR       19      /* Primary Region Remap Register */
+#define c10_NMRR       20      /* Normal Memory Remap Register */
+#define c12_VBAR       21      /* Vector Base Address Register */
+#define c13_CID                22      /* Context ID Register */
+#define c13_TID_URW    23      /* Thread ID, User R/W */
+#define c13_TID_URO    24      /* Thread ID, User R/O */
+#define c13_TID_PRIV   25      /* Thread ID, Privileged */
+#define NR_CP15_REGS   26      /* Number of regs (incl. invalid) */
+
+#define ARM_EXCEPTION_RESET      0
+#define ARM_EXCEPTION_UNDEFINED   1
+#define ARM_EXCEPTION_SOFTWARE    2
+#define ARM_EXCEPTION_PREF_ABORT  3
+#define ARM_EXCEPTION_DATA_ABORT  4
+#define ARM_EXCEPTION_IRQ        5
+#define ARM_EXCEPTION_FIQ        6
+#define ARM_EXCEPTION_HVC        7
+
+#ifndef __ASSEMBLY__
+struct kvm;
+struct kvm_vcpu;
+
+extern char __kvm_hyp_init[];
+extern char __kvm_hyp_init_end[];
+
+extern char __kvm_hyp_exit[];
+extern char __kvm_hyp_exit_end[];
+
+extern char __kvm_hyp_vector[];
+
+extern char __kvm_hyp_code_start[];
+extern char __kvm_hyp_code_end[];
+
+extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+
+extern void __kvm_flush_vm_context(void);
+extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+
+extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+#endif
+
+#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h
new file mode 100644 (file)
index 0000000..4917c2f
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2012 Rusty Russell IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_COPROC_H__
+#define __ARM_KVM_COPROC_H__
+#include <linux/kvm_host.h>
+
+void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
+
+struct kvm_coproc_target_table {
+       unsigned target;
+       const struct coproc_reg *table;
+       size_t num;
+};
+void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
+
+int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
+unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+void kvm_coproc_table_init(void);
+
+struct kvm_one_reg;
+int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
+int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
+unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
+#endif /* __ARM_KVM_COPROC_H__ */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
new file mode 100644 (file)
index 0000000..fd61199
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_EMULATE_H__
+#define __ARM_KVM_EMULATE_H__
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmio.h>
+
+u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
+u32 *vcpu_spsr(struct kvm_vcpu *vcpu);
+
+int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run);
+void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
+void kvm_inject_undefined(struct kvm_vcpu *vcpu);
+void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
+void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+
+static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
+{
+       return 1;
+}
+
+static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu)
+{
+       return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc;
+}
+
+static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu)
+{
+       return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr;
+}
+
+static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
+{
+       *vcpu_cpsr(vcpu) |= PSR_T_BIT;
+}
+
+static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
+{
+       unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
+       return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
+}
+
+static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
+{
+       unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
+       return cpsr_mode > USR_MODE;;
+}
+
+static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
+{
+       return reg == 15;
+}
+
+#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
new file mode 100644 (file)
index 0000000..98b4d1a
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_HOST_H__
+#define __ARM_KVM_HOST_H__
+
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmio.h>
+#include <asm/fpstate.h>
+
+#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
+#define KVM_MEMORY_SLOTS 32
+#define KVM_PRIVATE_MEM_SLOTS 4
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#define KVM_HAVE_ONE_REG
+
+#define KVM_VCPU_MAX_FEATURES 1
+
+/* We don't currently support large pages. */
+#define KVM_HPAGE_GFN_SHIFT(x) 0
+#define KVM_NR_PAGE_SIZES      1
+#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
+
+struct kvm_vcpu;
+u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
+int kvm_target_cpu(void);
+int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
+void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
+
+struct kvm_arch {
+       /* VTTBR value associated with below pgd and vmid */
+       u64    vttbr;
+
+       /*
+        * Anything that is not used directly from assembly code goes
+        * here.
+        */
+
+       /* The VMID generation used for the virt. memory system */
+       u64    vmid_gen;
+       u32    vmid;
+
+       /* Stage-2 page table */
+       pgd_t *pgd;
+};
+
+#define KVM_NR_MEM_OBJS     40
+
+/*
+ * We don't want allocation failures within the mmu code, so we preallocate
+ * enough memory for a single page fault in a cache.
+ */
+struct kvm_mmu_memory_cache {
+       int nobjs;
+       void *objects[KVM_NR_MEM_OBJS];
+};
+
+struct kvm_vcpu_arch {
+       struct kvm_regs regs;
+
+       int target; /* Processor target */
+       DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
+
+       /* System control coprocessor (cp15) */
+       u32 cp15[NR_CP15_REGS];
+
+       /* The CPU type we expose to the VM */
+       u32 midr;
+
+       /* Exception Information */
+       u32 hsr;                /* Hyp Syndrome Register */
+       u32 hxfar;              /* Hyp Data/Inst Fault Address Register */
+       u32 hpfar;              /* Hyp IPA Fault Address Register */
+
+       /* Floating point registers (VFP and Advanced SIMD/NEON) */
+       struct vfp_hard_struct vfp_guest;
+       struct vfp_hard_struct *vfp_host;
+
+       /*
+        * Anything that is not used directly from assembly code goes
+        * here.
+        */
+       /* dcache set/way operation pending */
+       int last_pcpu;
+       cpumask_t require_dcache_flush;
+
+       /* Don't run the guest on this vcpu */
+       bool pause;
+
+       /* IO related fields */
+       struct kvm_decode mmio_decode;
+
+       /* Interrupt related fields */
+       u32 irq_lines;          /* IRQ and FIQ levels */
+
+       /* Hyp exception information */
+       u32 hyp_pc;             /* PC when exception was taken from Hyp mode */
+
+       /* Cache some mmu pages needed inside spinlock regions */
+       struct kvm_mmu_memory_cache mmu_page_cache;
+
+       /* Detect first run of a vcpu */
+       bool has_run_once;
+};
+
+struct kvm_vm_stat {
+       u32 remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat {
+       u32 halt_wakeup;
+};
+
+struct kvm_vcpu_init;
+int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+                       const struct kvm_vcpu_init *init);
+unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
+struct kvm_one_reg;
+int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+u64 kvm_call_hyp(void *hypfn, ...);
+void force_vm_exit(const cpumask_t *mask);
+
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+struct kvm;
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
+int kvm_unmap_hva_range(struct kvm *kvm,
+                       unsigned long start, unsigned long end);
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+
+unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
+
+/* We do not have shadow page tables, hence the empty hooks */
+static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+       return 0;
+}
+
+static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+       return 0;
+}
+#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
new file mode 100644 (file)
index 0000000..adcc0d7
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_MMIO_H__
+#define __ARM_KVM_MMIO_H__
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+
+struct kvm_decode {
+       unsigned long rt;
+       bool sign_extend;
+};
+
+/*
+ * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
+ * which is an anonymous type. Use our own type instead.
+ */
+struct kvm_exit_mmio {
+       phys_addr_t     phys_addr;
+       u8              data[8];
+       u32             len;
+       bool            is_write;
+};
+
+static inline void kvm_prepare_mmio(struct kvm_run *run,
+                                   struct kvm_exit_mmio *mmio)
+{
+       run->mmio.phys_addr     = mmio->phys_addr;
+       run->mmio.len           = mmio->len;
+       run->mmio.is_write      = mmio->is_write;
+       memcpy(run->mmio.data, mmio->data, mmio->len);
+       run->exit_reason        = KVM_EXIT_MMIO;
+}
+
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
+                phys_addr_t fault_ipa);
+
+#endif /* __ARM_KVM_MMIO_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
new file mode 100644 (file)
index 0000000..421a20b
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_MMU_H__
+#define __ARM_KVM_MMU_H__
+
+int create_hyp_mappings(void *from, void *to);
+int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
+void free_hyp_pmds(void);
+
+int kvm_alloc_stage2_pgd(struct kvm *kvm);
+void kvm_free_stage2_pgd(struct kvm *kvm);
+int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
+                         phys_addr_t pa, unsigned long size);
+
+int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
+void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
+
+phys_addr_t kvm_mmu_get_httbr(void);
+int kvm_mmu_init(void);
+void kvm_clear_hyp_idmap(void);
+
+static inline bool kvm_is_write_fault(unsigned long hsr)
+{
+       unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
+       if (hsr_ec == HSR_EC_IABT)
+               return false;
+       else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR))
+               return false;
+       else
+               return true;
+}
+
+#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h
new file mode 100644 (file)
index 0000000..9a83d98
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2012 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM_KVM_PSCI_H__
+#define __ARM_KVM_PSCI_H__
+
+bool kvm_psci_call(struct kvm_vcpu *vcpu);
+
+#endif /* __ARM_KVM_PSCI_H__ */
diff --git a/arch/arm/include/asm/opcodes-sec.h b/arch/arm/include/asm/opcodes-sec.h
new file mode 100644 (file)
index 0000000..bc3a917
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#ifndef __ASM_ARM_OPCODES_SEC_H
+#define __ASM_ARM_OPCODES_SEC_H
+
+#include <asm/opcodes.h>
+
+#define __SMC(imm4) __inst_arm_thumb32(                                        \
+       0xE1600070 | (((imm4) & 0xF) << 0),                             \
+       0xF7F08000 | (((imm4) & 0xF) << 16)                             \
+)
+
+#endif /* __ASM_ARM_OPCODES_SEC_H */
index 74e211a..e796c59 100644 (file)
@@ -10,6 +10,7 @@
 #define __ASM_ARM_OPCODES_H
 
 #ifndef __ASSEMBLY__
+#include <linux/linkage.h>
 extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
 #endif
 
index d795282..18f5cef 100644 (file)
@@ -32,6 +32,9 @@
 #define PMD_TYPE_SECT          (_AT(pmdval_t, 1) << 0)
 #define PMD_BIT4               (_AT(pmdval_t, 0))
 #define PMD_DOMAIN(x)          (_AT(pmdval_t, 0))
+#define PMD_APTABLE_SHIFT      (61)
+#define PMD_APTABLE            (_AT(pgdval_t, 3) << PGD_APTABLE_SHIFT)
+#define PMD_PXNTABLE           (_AT(pgdval_t, 1) << 59)
 
 /*
  *   - section
 #define PMD_SECT_S             (_AT(pmdval_t, 3) << 8)
 #define PMD_SECT_AF            (_AT(pmdval_t, 1) << 10)
 #define PMD_SECT_nG            (_AT(pmdval_t, 1) << 11)
+#define PMD_SECT_PXN           (_AT(pmdval_t, 1) << 53)
 #define PMD_SECT_XN            (_AT(pmdval_t, 1) << 54)
 #define PMD_SECT_AP_WRITE      (_AT(pmdval_t, 0))
 #define PMD_SECT_AP_READ       (_AT(pmdval_t, 0))
+#define PMD_SECT_AP1           (_AT(pmdval_t, 1) << 6)
 #define PMD_SECT_TEX(x)                (_AT(pmdval_t, 0))
 
 /*
index a3f3792..6ef8afd 100644 (file)
  */
 #define L_PGD_SWAPPER          (_AT(pgdval_t, 1) << 55)        /* swapper_pg_dir entry */
 
+/*
+ * 2nd stage PTE definitions for LPAE.
+ */
+#define L_PTE_S2_MT_UNCACHED    (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */
+#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
+#define L_PTE_S2_MT_WRITEBACK   (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
+#define L_PTE_S2_RDONLY                 (_AT(pteval_t, 1) << 6)   /* HAP[1]   */
+#define L_PTE_S2_RDWR           (_AT(pteval_t, 2) << 6)   /* HAP[2:1] */
+
+/*
+ * Hyp-mode PL2 PTE definitions for LPAE.
+ */
+#define L_PTE_HYP              L_PTE_USER
+
 #ifndef __ASSEMBLY__
 
 #define pud_none(pud)          (!pud_val(pud))
 #define pud_bad(pud)           (!(pud_val(pud) & 2))
 #define pud_present(pud)       (pud_val(pud))
+#define pmd_table(pmd)         ((pmd_val(pmd) & PMD_TYPE_MASK) == \
+                                                PMD_TYPE_TABLE)
+#define pmd_sect(pmd)          ((pmd_val(pmd) & PMD_TYPE_MASK) == \
+                                                PMD_TYPE_SECT)
 
 #define pud_clear(pudp)                        \
        do {                            \
index c094749..80d6fc4 100644 (file)
@@ -70,6 +70,9 @@ extern void __pgd_error(const char *file, int line, pgd_t);
 
 extern pgprot_t                pgprot_user;
 extern pgprot_t                pgprot_kernel;
+extern pgprot_t                pgprot_hyp_device;
+extern pgprot_t                pgprot_s2;
+extern pgprot_t                pgprot_s2_device;
 
 #define _MOD_PROT(p, b)        __pgprot(pgprot_val(p) | (b))
 
@@ -82,6 +85,10 @@ extern pgprot_t              pgprot_kernel;
 #define PAGE_READONLY_EXEC     _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
 #define PAGE_KERNEL            _MOD_PROT(pgprot_kernel, L_PTE_XN)
 #define PAGE_KERNEL_EXEC       pgprot_kernel
+#define PAGE_HYP               _MOD_PROT(pgprot_kernel, L_PTE_HYP)
+#define PAGE_HYP_DEVICE                _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
+#define PAGE_S2                        _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
+#define PAGE_S2_DEVICE         _MOD_PROT(pgprot_s2_device, L_PTE_USER | L_PTE_S2_RDONLY)
 
 #define __PAGE_NONE            __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
 #define __PAGE_SHARED          __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
new file mode 100644 (file)
index 0000000..ce0dbe7
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#ifndef __ASM_ARM_PSCI_H
+#define __ASM_ARM_PSCI_H
+
+#define PSCI_POWER_STATE_TYPE_STANDBY          0
+#define PSCI_POWER_STATE_TYPE_POWER_DOWN       1
+
+struct psci_power_state {
+       u16     id;
+       u8      type;
+       u8      affinity_level;
+};
+
+struct psci_operations {
+       int (*cpu_suspend)(struct psci_power_state state,
+                          unsigned long entry_point);
+       int (*cpu_off)(struct psci_power_state state);
+       int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+       int (*migrate)(unsigned long cpuid);
+};
+
+extern struct psci_operations psci_ops;
+
+#endif /* __ASM_ARM_PSCI_H */
index 86164df..50af92b 100644 (file)
@@ -24,9 +24,9 @@
 /*
  * Flag indicating that the kernel was not entered in the same mode on every
  * CPU.  The zImage loader stashes this value in an SPSR, so we need an
- * architecturally defined flag bit here (the N flag, as it happens)
+ * architecturally defined flag bit here.
  */
-#define BOOT_CPU_MODE_MISMATCH (1<<31)
+#define BOOT_CPU_MODE_MISMATCH PSR_N_BIT
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
new file mode 100644 (file)
index 0000000..3303ff5
--- /dev/null
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_H__
+#define __ARM_KVM_H__
+
+#include <linux/types.h>
+#include <asm/ptrace.h>
+
+#define __KVM_HAVE_GUEST_DEBUG
+#define __KVM_HAVE_IRQ_LINE
+
+#define KVM_REG_SIZE(id)                                               \
+       (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
+
+/* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */
+#define KVM_ARM_SVC_sp         svc_regs[0]
+#define KVM_ARM_SVC_lr         svc_regs[1]
+#define KVM_ARM_SVC_spsr       svc_regs[2]
+#define KVM_ARM_ABT_sp         abt_regs[0]
+#define KVM_ARM_ABT_lr         abt_regs[1]
+#define KVM_ARM_ABT_spsr       abt_regs[2]
+#define KVM_ARM_UND_sp         und_regs[0]
+#define KVM_ARM_UND_lr         und_regs[1]
+#define KVM_ARM_UND_spsr       und_regs[2]
+#define KVM_ARM_IRQ_sp         irq_regs[0]
+#define KVM_ARM_IRQ_lr         irq_regs[1]
+#define KVM_ARM_IRQ_spsr       irq_regs[2]
+
+/* Valid only for fiq_regs in struct kvm_regs */
+#define KVM_ARM_FIQ_r8         fiq_regs[0]
+#define KVM_ARM_FIQ_r9         fiq_regs[1]
+#define KVM_ARM_FIQ_r10                fiq_regs[2]
+#define KVM_ARM_FIQ_fp         fiq_regs[3]
+#define KVM_ARM_FIQ_ip         fiq_regs[4]
+#define KVM_ARM_FIQ_sp         fiq_regs[5]
+#define KVM_ARM_FIQ_lr         fiq_regs[6]
+#define KVM_ARM_FIQ_spsr       fiq_regs[7]
+
+struct kvm_regs {
+       struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */
+       __u32 svc_regs[3];      /* SP_svc, LR_svc, SPSR_svc */
+       __u32 abt_regs[3];      /* SP_abt, LR_abt, SPSR_abt */
+       __u32 und_regs[3];      /* SP_und, LR_und, SPSR_und */
+       __u32 irq_regs[3];      /* SP_irq, LR_irq, SPSR_irq */
+       __u32 fiq_regs[8];      /* R8_fiq - R14_fiq, SPSR_fiq */
+};
+
+/* Supported Processor Types */
+#define KVM_ARM_TARGET_CORTEX_A15      0
+#define KVM_ARM_NUM_TARGETS            1
+
+#define KVM_ARM_VCPU_POWER_OFF         0 /* CPU is started in OFF state */
+
+struct kvm_vcpu_init {
+       __u32 target;
+       __u32 features[7];
+};
+
+struct kvm_sregs {
+};
+
+struct kvm_fpu {
+};
+
+struct kvm_guest_debug_arch {
+};
+
+struct kvm_debug_exit_arch {
+};
+
+struct kvm_sync_regs {
+};
+
+struct kvm_arch_memory_slot {
+};
+
+/* If you need to interpret the index values, here is the key: */
+#define KVM_REG_ARM_COPROC_MASK                0x000000000FFF0000
+#define KVM_REG_ARM_COPROC_SHIFT       16
+#define KVM_REG_ARM_32_OPC2_MASK       0x0000000000000007
+#define KVM_REG_ARM_32_OPC2_SHIFT      0
+#define KVM_REG_ARM_OPC1_MASK          0x0000000000000078
+#define KVM_REG_ARM_OPC1_SHIFT         3
+#define KVM_REG_ARM_CRM_MASK           0x0000000000000780
+#define KVM_REG_ARM_CRM_SHIFT          7
+#define KVM_REG_ARM_32_CRN_MASK                0x0000000000007800
+#define KVM_REG_ARM_32_CRN_SHIFT       11
+
+/* Normal registers are mapped as coprocessor 16. */
+#define KVM_REG_ARM_CORE               (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_CORE_REG(name)     (offsetof(struct kvm_regs, name) / 4)
+
+/* Some registers need more space to represent values. */
+#define KVM_REG_ARM_DEMUX              (0x0011 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_DEMUX_ID_MASK      0x000000000000FF00
+#define KVM_REG_ARM_DEMUX_ID_SHIFT     8
+#define KVM_REG_ARM_DEMUX_ID_CCSIDR    (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT)
+#define KVM_REG_ARM_DEMUX_VAL_MASK     0x00000000000000FF
+#define KVM_REG_ARM_DEMUX_VAL_SHIFT    0
+
+/* VFP registers: we could overload CP10 like ARM does, but that's ugly. */
+#define KVM_REG_ARM_VFP                        (0x0012 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_VFP_MASK           0x000000000000FFFF
+#define KVM_REG_ARM_VFP_BASE_REG       0x0
+#define KVM_REG_ARM_VFP_FPSID          0x1000
+#define KVM_REG_ARM_VFP_FPSCR          0x1001
+#define KVM_REG_ARM_VFP_MVFR1          0x1006
+#define KVM_REG_ARM_VFP_MVFR0          0x1007
+#define KVM_REG_ARM_VFP_FPEXC          0x1008
+#define KVM_REG_ARM_VFP_FPINST         0x1009
+#define KVM_REG_ARM_VFP_FPINST2                0x100A
+
+
+/* KVM_IRQ_LINE irq field index values */
+#define KVM_ARM_IRQ_TYPE_SHIFT         24
+#define KVM_ARM_IRQ_TYPE_MASK          0xff
+#define KVM_ARM_IRQ_VCPU_SHIFT         16
+#define KVM_ARM_IRQ_VCPU_MASK          0xff
+#define KVM_ARM_IRQ_NUM_SHIFT          0
+#define KVM_ARM_IRQ_NUM_MASK           0xffff
+
+/* irq_type field */
+#define KVM_ARM_IRQ_TYPE_CPU           0
+#define KVM_ARM_IRQ_TYPE_SPI           1
+#define KVM_ARM_IRQ_TYPE_PPI           2
+
+/* out-of-kernel GIC cpu interrupt injection irq_number field */
+#define KVM_ARM_IRQ_CPU_IRQ            0
+#define KVM_ARM_IRQ_CPU_FIQ            1
+
+/* Highest supported SPI, from VGIC_NR_IRQS */
+#define KVM_ARM_IRQ_GIC_MAX            127
+
+/* PSCI interface */
+#define KVM_PSCI_FN_BASE               0x95c1ba5e
+#define KVM_PSCI_FN(n)                 (KVM_PSCI_FN_BASE + (n))
+
+#define KVM_PSCI_FN_CPU_SUSPEND                KVM_PSCI_FN(0)
+#define KVM_PSCI_FN_CPU_OFF            KVM_PSCI_FN(1)
+#define KVM_PSCI_FN_CPU_ON             KVM_PSCI_FN(2)
+#define KVM_PSCI_FN_MIGRATE            KVM_PSCI_FN(3)
+
+#define KVM_PSCI_RET_SUCCESS           0
+#define KVM_PSCI_RET_NI                        ((unsigned long)-1)
+#define KVM_PSCI_RET_INVAL             ((unsigned long)-2)
+#define KVM_PSCI_RET_DENIED            ((unsigned long)-3)
+
+#endif /* __ARM_KVM_H__ */
index 5bbec7b..5f3338e 100644 (file)
@@ -82,5 +82,6 @@ obj-$(CONFIG_DEBUG_LL)        += debug.o
 obj-$(CONFIG_EARLY_PRINTK)     += early_printk.o
 
 obj-$(CONFIG_ARM_VIRT_EXT)     += hyp-stub.o
+obj-$(CONFIG_ARM_PSCI)         += psci.o
 
 extra-y := $(head-y) vmlinux.lds
index c985b48..c8b3272 100644 (file)
@@ -13,6 +13,9 @@
 #include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
+#ifdef CONFIG_KVM_ARM_HOST
+#include <linux/kvm_host.h>
+#endif
 #include <asm/cacheflush.h>
 #include <asm/glue-df.h>
 #include <asm/glue-pf.h>
@@ -146,5 +149,27 @@ int main(void)
   DEFINE(DMA_BIDIRECTIONAL,    DMA_BIDIRECTIONAL);
   DEFINE(DMA_TO_DEVICE,                DMA_TO_DEVICE);
   DEFINE(DMA_FROM_DEVICE,      DMA_FROM_DEVICE);
+#ifdef CONFIG_KVM_ARM_HOST
+  DEFINE(VCPU_KVM,             offsetof(struct kvm_vcpu, kvm));
+  DEFINE(VCPU_MIDR,            offsetof(struct kvm_vcpu, arch.midr));
+  DEFINE(VCPU_CP15,            offsetof(struct kvm_vcpu, arch.cp15));
+  DEFINE(VCPU_VFP_GUEST,       offsetof(struct kvm_vcpu, arch.vfp_guest));
+  DEFINE(VCPU_VFP_HOST,                offsetof(struct kvm_vcpu, arch.vfp_host));
+  DEFINE(VCPU_REGS,            offsetof(struct kvm_vcpu, arch.regs));
+  DEFINE(VCPU_USR_REGS,                offsetof(struct kvm_vcpu, arch.regs.usr_regs));
+  DEFINE(VCPU_SVC_REGS,                offsetof(struct kvm_vcpu, arch.regs.svc_regs));
+  DEFINE(VCPU_ABT_REGS,                offsetof(struct kvm_vcpu, arch.regs.abt_regs));
+  DEFINE(VCPU_UND_REGS,                offsetof(struct kvm_vcpu, arch.regs.und_regs));
+  DEFINE(VCPU_IRQ_REGS,                offsetof(struct kvm_vcpu, arch.regs.irq_regs));
+  DEFINE(VCPU_FIQ_REGS,                offsetof(struct kvm_vcpu, arch.regs.fiq_regs));
+  DEFINE(VCPU_PC,              offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
+  DEFINE(VCPU_CPSR,            offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
+  DEFINE(VCPU_IRQ_LINES,       offsetof(struct kvm_vcpu, arch.irq_lines));
+  DEFINE(VCPU_HSR,             offsetof(struct kvm_vcpu, arch.hsr));
+  DEFINE(VCPU_HxFAR,           offsetof(struct kvm_vcpu, arch.hxfar));
+  DEFINE(VCPU_HPFAR,           offsetof(struct kvm_vcpu, arch.hpfar));
+  DEFINE(VCPU_HYP_PC,          offsetof(struct kvm_vcpu, arch.hyp_pc));
+  DEFINE(KVM_VTTBR,            offsetof(struct kvm, arch.vttbr));
+#endif
   return 0; 
 }
index 5ff2e77..5eae53e 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/perf_event.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/smp.h>
+#include <linux/cpu_pm.h>
 
 #include <asm/cacheflush.h>
 #include <asm/cputype.h>
@@ -35,6 +36,7 @@
 #include <asm/hw_breakpoint.h>
 #include <asm/kdebug.h>
 #include <asm/traps.h>
+#include <asm/hardware/coresight.h>
 
 /* Breakpoint currently in use for each BRP. */
 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
@@ -49,6 +51,9 @@ static int core_num_wrps;
 /* Debug architecture version. */
 static u8 debug_arch;
 
+/* Does debug architecture support OS Save and Restore? */
+static bool has_ossr;
+
 /* Maximum supported watchpoint length. */
 static u8 max_watchpoint_len;
 
@@ -903,6 +908,23 @@ static struct undef_hook debug_reg_hook = {
        .fn             = debug_reg_trap,
 };
 
+/* Does this core support OS Save and Restore? */
+static bool core_has_os_save_restore(void)
+{
+       u32 oslsr;
+
+       switch (get_debug_arch()) {
+       case ARM_DEBUG_ARCH_V7_1:
+               return true;
+       case ARM_DEBUG_ARCH_V7_ECP14:
+               ARM_DBG_READ(c1, c1, 4, oslsr);
+               if (oslsr & ARM_OSLSR_OSLM0)
+                       return true;
+       default:
+               return false;
+       }
+}
+
 static void reset_ctrl_regs(void *unused)
 {
        int i, raw_num_brps, err = 0, cpu = smp_processor_id();
@@ -930,11 +952,7 @@ static void reset_ctrl_regs(void *unused)
                if ((val & 0x1) == 0)
                        err = -EPERM;
 
-               /*
-                * Check whether we implement OS save and restore.
-                */
-               ARM_DBG_READ(c1, c1, 4, val);
-               if ((val & 0x9) == 0)
+               if (!has_ossr)
                        goto clear_vcr;
                break;
        case ARM_DEBUG_ARCH_V7_1:
@@ -955,9 +973,9 @@ static void reset_ctrl_regs(void *unused)
 
        /*
         * Unconditionally clear the OS lock by writing a value
-        * other than 0xC5ACCE55 to the access register.
+        * other than CS_LAR_KEY to the access register.
         */
-       ARM_DBG_WRITE(c1, c0, 4, 0);
+       ARM_DBG_WRITE(c1, c0, 4, ~CS_LAR_KEY);
        isb();
 
        /*
@@ -1015,6 +1033,30 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
        .notifier_call = dbg_reset_notify,
 };
 
+#ifdef CONFIG_CPU_PM
+static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
+                            void *v)
+{
+       if (action == CPU_PM_EXIT)
+               reset_ctrl_regs(NULL);
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = {
+       .notifier_call = dbg_cpu_pm_notify,
+};
+
+static void __init pm_init(void)
+{
+       cpu_pm_register_notifier(&dbg_cpu_pm_nb);
+}
+#else
+static inline void pm_init(void)
+{
+}
+#endif
+
 static int __init arch_hw_breakpoint_init(void)
 {
        debug_arch = get_debug_arch();
@@ -1024,6 +1066,8 @@ static int __init arch_hw_breakpoint_init(void)
                return 0;
        }
 
+       has_ossr = core_has_os_save_restore();
+
        /* Determine how many BRPs/WRPs are available. */
        core_num_brps = get_num_brps();
        core_num_wrps = get_num_wrps();
@@ -1062,8 +1106,9 @@ static int __init arch_hw_breakpoint_init(void)
        hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
                        TRAP_HWBKPT, "breakpoint debug exception");
 
-       /* Register hotplug notifier. */
+       /* Register hotplug and PM notifiers. */
        register_cpu_notifier(&dbg_reset_nb);
+       pm_init();
        return 0;
 }
 arch_initcall(arch_hw_breakpoint_init);
index f9e8657..31e0eb3 100644 (file)
@@ -149,12 +149,6 @@ again:
 static void
 armpmu_read(struct perf_event *event)
 {
-       struct hw_perf_event *hwc = &event->hw;
-
-       /* Don't read disabled counters! */
-       if (hwc->idx < 0)
-               return;
-
        armpmu_event_update(event);
 }
 
@@ -207,8 +201,6 @@ armpmu_del(struct perf_event *event, int flags)
        struct hw_perf_event *hwc = &event->hw;
        int idx = hwc->idx;
 
-       WARN_ON(idx < 0);
-
        armpmu_stop(event, PERF_EF_UPDATE);
        hw_events->events[idx] = NULL;
        clear_bit(idx, hw_events->used_mask);
@@ -358,7 +350,7 @@ __hw_perf_event_init(struct perf_event *event)
 {
        struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
        struct hw_perf_event *hwc = &event->hw;
-       int mapping, err;
+       int mapping;
 
        mapping = armpmu->map_event(event);
 
@@ -407,14 +399,12 @@ __hw_perf_event_init(struct perf_event *event)
                local64_set(&hwc->period_left, hwc->sample_period);
        }
 
-       err = 0;
        if (event->group_leader != event) {
-               err = validate_group(event);
-               if (err)
+               if (validate_group(event) != 0);
                        return -EINVAL;
        }
 
-       return err;
+       return 0;
 }
 
 static int armpmu_event_init(struct perf_event *event)
index 5f66206..1f2740e 100644 (file)
@@ -147,7 +147,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
        cpu_pmu->free_irq       = cpu_pmu_free_irq;
 
        /* Ensure the PMU has sane values out of reset. */
-       if (cpu_pmu && cpu_pmu->reset)
+       if (cpu_pmu->reset)
                on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
 }
 
@@ -201,48 +201,46 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = {
 static int probe_current_pmu(struct arm_pmu *pmu)
 {
        int cpu = get_cpu();
-       unsigned long cpuid = read_cpuid_id();
-       unsigned long implementor = (cpuid & 0xFF000000) >> 24;
-       unsigned long part_number = (cpuid & 0xFFF0);
+       unsigned long implementor = read_cpuid_implementor();
+       unsigned long part_number = read_cpuid_part_number();
        int ret = -ENODEV;
 
        pr_info("probing PMU on CPU %d\n", cpu);
 
        /* ARM Ltd CPUs. */
-       if (0x41 == implementor) {
+       if (implementor == ARM_CPU_IMP_ARM) {
                switch (part_number) {
-               case 0xB360:    /* ARM1136 */
-               case 0xB560:    /* ARM1156 */
-               case 0xB760:    /* ARM1176 */
+               case ARM_CPU_PART_ARM1136:
+               case ARM_CPU_PART_ARM1156:
+               case ARM_CPU_PART_ARM1176:
                        ret = armv6pmu_init(pmu);
                        break;
-               case 0xB020:    /* ARM11mpcore */
+               case ARM_CPU_PART_ARM11MPCORE:
                        ret = armv6mpcore_pmu_init(pmu);
                        break;
-               case 0xC080:    /* Cortex-A8 */
+               case ARM_CPU_PART_CORTEX_A8:
                        ret = armv7_a8_pmu_init(pmu);
                        break;
-               case 0xC090:    /* Cortex-A9 */
+               case ARM_CPU_PART_CORTEX_A9:
                        ret = armv7_a9_pmu_init(pmu);
                        break;
-               case 0xC050:    /* Cortex-A5 */
+               case ARM_CPU_PART_CORTEX_A5:
                        ret = armv7_a5_pmu_init(pmu);
                        break;
-               case 0xC0F0:    /* Cortex-A15 */
+               case ARM_CPU_PART_CORTEX_A15:
                        ret = armv7_a15_pmu_init(pmu);
                        break;
-               case 0xC070:    /* Cortex-A7 */
+               case ARM_CPU_PART_CORTEX_A7:
                        ret = armv7_a7_pmu_init(pmu);
                        break;
                }
        /* Intel CPUs [xscale]. */
-       } else if (0x69 == implementor) {
-               part_number = (cpuid >> 13) & 0x7;
-               switch (part_number) {
-               case 1:
+       } else if (implementor == ARM_CPU_IMP_INTEL) {
+               switch (xscale_cpu_arch_version()) {
+               case ARM_CPU_XSCALE_ARCH_V1:
                        ret = xscale1pmu_init(pmu);
                        break;
-               case 2:
+               case ARM_CPU_XSCALE_ARCH_V2:
                        ret = xscale2pmu_init(pmu);
                        break;
                }
@@ -279,17 +277,22 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
        }
 
        if (ret) {
-               pr_info("failed to register PMU devices!");
-               kfree(pmu);
-               return ret;
+               pr_info("failed to probe PMU!");
+               goto out_free;
        }
 
        cpu_pmu = pmu;
        cpu_pmu->plat_device = pdev;
        cpu_pmu_init(cpu_pmu);
-       armpmu_register(cpu_pmu, PERF_TYPE_RAW);
+       ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW);
 
-       return 0;
+       if (!ret)
+               return 0;
+
+out_free:
+       pr_info("failed to register PMU devices!");
+       kfree(pmu);
+       return ret;
 }
 
 static struct platform_driver cpu_pmu_driver = {
index 041d052..03664b0 100644 (file)
@@ -106,7 +106,7 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                },
                [C(OP_WRITE)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV6_PERFCTR_ICACHE_MISS,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -259,7 +259,7 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                },
                [C(OP_WRITE)] = {
                        [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
+                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
index 4fbc757..8c79a9e 100644 (file)
@@ -157,8 +157,8 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                        [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_ICACHE_REFILL,
                },
                [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -282,7 +282,7 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                },
                [C(OP_WRITE)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -399,8 +399,8 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                        [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_ICACHE_REFILL,
                },
                [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
                },
                /*
                 * The prefetch counters don't differentiate between the I
@@ -527,8 +527,8 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                        [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_ICACHE_REFILL,
                },
                [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -651,8 +651,8 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                        [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_ICACHE_REFILL,
                },
                [C(OP_WRITE)] = {
-                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
-                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
index 2b0fe30..63990c4 100644 (file)
@@ -83,7 +83,7 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                },
                [C(OP_WRITE)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = XSCALE_PERFCTR_ICACHE_MISS,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
new file mode 100644 (file)
index 0000000..3653164
--- /dev/null
@@ -0,0 +1,211 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#define pr_fmt(fmt) "psci: " fmt
+
+#include <linux/init.h>
+#include <linux/of.h>
+
+#include <asm/compiler.h>
+#include <asm/errno.h>
+#include <asm/opcodes-sec.h>
+#include <asm/opcodes-virt.h>
+#include <asm/psci.h>
+
+struct psci_operations psci_ops;
+
+static int (*invoke_psci_fn)(u32, u32, u32, u32);
+
+enum psci_function {
+       PSCI_FN_CPU_SUSPEND,
+       PSCI_FN_CPU_ON,
+       PSCI_FN_CPU_OFF,
+       PSCI_FN_MIGRATE,
+       PSCI_FN_MAX,
+};
+
+static u32 psci_function_id[PSCI_FN_MAX];
+
+#define PSCI_RET_SUCCESS               0
+#define PSCI_RET_EOPNOTSUPP            -1
+#define PSCI_RET_EINVAL                        -2
+#define PSCI_RET_EPERM                 -3
+
+static int psci_to_linux_errno(int errno)
+{
+       switch (errno) {
+       case PSCI_RET_SUCCESS:
+               return 0;
+       case PSCI_RET_EOPNOTSUPP:
+               return -EOPNOTSUPP;
+       case PSCI_RET_EINVAL:
+               return -EINVAL;
+       case PSCI_RET_EPERM:
+               return -EPERM;
+       };
+
+       return -EINVAL;
+}
+
+#define PSCI_POWER_STATE_ID_MASK       0xffff
+#define PSCI_POWER_STATE_ID_SHIFT      0
+#define PSCI_POWER_STATE_TYPE_MASK     0x1
+#define PSCI_POWER_STATE_TYPE_SHIFT    16
+#define PSCI_POWER_STATE_AFFL_MASK     0x3
+#define PSCI_POWER_STATE_AFFL_SHIFT    24
+
+static u32 psci_power_state_pack(struct psci_power_state state)
+{
+       return  ((state.id & PSCI_POWER_STATE_ID_MASK)
+                       << PSCI_POWER_STATE_ID_SHIFT)   |
+               ((state.type & PSCI_POWER_STATE_TYPE_MASK)
+                       << PSCI_POWER_STATE_TYPE_SHIFT) |
+               ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK)
+                       << PSCI_POWER_STATE_AFFL_SHIFT);
+}
+
+/*
+ * The following two functions are invoked via the invoke_psci_fn pointer
+ * and will not be inlined, allowing us to piggyback on the AAPCS.
+ */
+static noinline int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1,
+                                        u32 arg2)
+{
+       asm volatile(
+                       __asmeq("%0", "r0")
+                       __asmeq("%1", "r1")
+                       __asmeq("%2", "r2")
+                       __asmeq("%3", "r3")
+                       __HVC(0)
+               : "+r" (function_id)
+               : "r" (arg0), "r" (arg1), "r" (arg2));
+
+       return function_id;
+}
+
+static noinline int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1,
+                                        u32 arg2)
+{
+       asm volatile(
+                       __asmeq("%0", "r0")
+                       __asmeq("%1", "r1")
+                       __asmeq("%2", "r2")
+                       __asmeq("%3", "r3")
+                       __SMC(0)
+               : "+r" (function_id)
+               : "r" (arg0), "r" (arg1), "r" (arg2));
+
+       return function_id;
+}
+
+static int psci_cpu_suspend(struct psci_power_state state,
+                           unsigned long entry_point)
+{
+       int err;
+       u32 fn, power_state;
+
+       fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
+       power_state = psci_power_state_pack(state);
+       err = invoke_psci_fn(fn, power_state, entry_point, 0);
+       return psci_to_linux_errno(err);
+}
+
+static int psci_cpu_off(struct psci_power_state state)
+{
+       int err;
+       u32 fn, power_state;
+
+       fn = psci_function_id[PSCI_FN_CPU_OFF];
+       power_state = psci_power_state_pack(state);
+       err = invoke_psci_fn(fn, power_state, 0, 0);
+       return psci_to_linux_errno(err);
+}
+
+static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
+{
+       int err;
+       u32 fn;
+
+       fn = psci_function_id[PSCI_FN_CPU_ON];
+       err = invoke_psci_fn(fn, cpuid, entry_point, 0);
+       return psci_to_linux_errno(err);
+}
+
+static int psci_migrate(unsigned long cpuid)
+{
+       int err;
+       u32 fn;
+
+       fn = psci_function_id[PSCI_FN_MIGRATE];
+       err = invoke_psci_fn(fn, cpuid, 0, 0);
+       return psci_to_linux_errno(err);
+}
+
+static const struct of_device_id psci_of_match[] __initconst = {
+       { .compatible = "arm,psci",     },
+       {},
+};
+
+static int __init psci_init(void)
+{
+       struct device_node *np;
+       const char *method;
+       u32 id;
+
+       np = of_find_matching_node(NULL, psci_of_match);
+       if (!np)
+               return 0;
+
+       pr_info("probing function IDs from device-tree\n");
+
+       if (of_property_read_string(np, "method", &method)) {
+               pr_warning("missing \"method\" property\n");
+               goto out_put_node;
+       }
+
+       if (!strcmp("hvc", method)) {
+               invoke_psci_fn = __invoke_psci_fn_hvc;
+       } else if (!strcmp("smc", method)) {
+               invoke_psci_fn = __invoke_psci_fn_smc;
+       } else {
+               pr_warning("invalid \"method\" property: %s\n", method);
+               goto out_put_node;
+       }
+
+       if (!of_property_read_u32(np, "cpu_suspend", &id)) {
+               psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
+               psci_ops.cpu_suspend = psci_cpu_suspend;
+       }
+
+       if (!of_property_read_u32(np, "cpu_off", &id)) {
+               psci_function_id[PSCI_FN_CPU_OFF] = id;
+               psci_ops.cpu_off = psci_cpu_off;
+       }
+
+       if (!of_property_read_u32(np, "cpu_on", &id)) {
+               psci_function_id[PSCI_FN_CPU_ON] = id;
+               psci_ops.cpu_on = psci_cpu_on;
+       }
+
+       if (!of_property_read_u32(np, "migrate", &id)) {
+               psci_function_id[PSCI_FN_MIGRATE] = id;
+               psci_ops.migrate = psci_migrate;
+       }
+
+out_put_node:
+       of_node_put(np);
+       return 0;
+}
+early_initcall(psci_init);
index 58af91c..87d30e7 100644 (file)
@@ -475,19 +475,11 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
  */
 static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
 
-static void ipi_timer(void)
-{
-       struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
-       evt->event_handler(evt);
-}
-
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
-static void smp_timer_broadcast(const struct cpumask *mask)
+void tick_broadcast(const struct cpumask *mask)
 {
        smp_cross_call(mask, IPI_TIMER);
 }
-#else
-#define smp_timer_broadcast    NULL
 #endif
 
 static void broadcast_timer_set_mode(enum clock_event_mode mode,
@@ -530,7 +522,6 @@ static void __cpuinit percpu_timer_setup(void)
        struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
 
        evt->cpumask = cpumask_of(cpu);
-       evt->broadcast = smp_timer_broadcast;
 
        if (!lt_ops || lt_ops->setup(evt))
                broadcast_timer_setup(evt);
@@ -596,11 +587,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
        case IPI_WAKEUP:
                break;
 
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
        case IPI_TIMER:
                irq_enter();
-               ipi_timer();
+               tick_receive_broadcast();
                irq_exit();
                break;
+#endif
 
        case IPI_RESCHEDULE:
                scheduler_ipi();
index 11c1785..b571484 100644 (file)
        ALIGN_FUNCTION();                                               \
        VMLINUX_SYMBOL(__idmap_text_start) = .;                         \
        *(.idmap.text)                                                  \
-       VMLINUX_SYMBOL(__idmap_text_end) = .;
+       VMLINUX_SYMBOL(__idmap_text_end) = .;                           \
+       ALIGN_FUNCTION();                                               \
+       VMLINUX_SYMBOL(__hyp_idmap_text_start) = .;                     \
+       *(.hyp.idmap.text)                                              \
+       VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
 
 #ifdef CONFIG_HOTPLUG_CPU
 #define ARM_CPU_DISCARD(x)
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
new file mode 100644 (file)
index 0000000..05227cb
--- /dev/null
@@ -0,0 +1,56 @@
+#
+# KVM configuration
+#
+
+source "virt/kvm/Kconfig"
+
+menuconfig VIRTUALIZATION
+       bool "Virtualization"
+       ---help---
+         Say Y here to get to see options for using your Linux host to run
+         other operating systems inside virtual machines (guests).
+         This option alone does not add any kernel code.
+
+         If you say N, all options in this submenu will be skipped and
+         disabled.
+
+if VIRTUALIZATION
+
+config KVM
+       bool "Kernel-based Virtual Machine (KVM) support"
+       select PREEMPT_NOTIFIERS
+       select ANON_INODES
+       select KVM_MMIO
+       select KVM_ARM_HOST
+       depends on ARM_VIRT_EXT && ARM_LPAE
+       ---help---
+         Support hosting virtualized guest machines. You will also
+         need to select one or more of the processor modules below.
+
+         This module provides access to the hardware capabilities through
+         a character device node named /dev/kvm.
+
+         If unsure, say N.
+
+config KVM_ARM_HOST
+       bool "KVM host support for ARM cpus."
+       depends on KVM
+       depends on MMU
+       select  MMU_NOTIFIER
+       ---help---
+         Provides host support for ARM processors.
+
+config KVM_ARM_MAX_VCPUS
+       int "Number maximum supported virtual CPUs per VM"
+       depends on KVM_ARM_HOST
+       default 4
+       help
+         Static number of max supported virtual CPUs per VM.
+
+         If you choose a high number, the vcpu structures will be quite
+         large, so only choose a reasonable number that you expect to
+         actually use.
+
+source drivers/virtio/Kconfig
+
+endif # VIRTUALIZATION
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
new file mode 100644 (file)
index 0000000..ea27987
--- /dev/null
@@ -0,0 +1,21 @@
+#
+# Makefile for Kernel-based Virtual Machine module
+#
+
+plus_virt := $(call as-instr,.arch_extension virt,+virt)
+ifeq ($(plus_virt),+virt)
+       plus_virt_def := -DREQUIRES_VIRT=1
+endif
+
+ccflags-y += -Ivirt/kvm -Iarch/arm/kvm
+CFLAGS_arm.o := -I. $(plus_virt_def)
+CFLAGS_mmu.o := -I.
+
+AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
+AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
+
+kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+
+obj-y += kvm-arm.o init.o interrupts.o
+obj-y += arm.o guest.o mmu.o emulate.o reset.o
+obj-y += coproc.o coproc_a15.o mmio.o psci.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
new file mode 100644 (file)
index 0000000..2d30e3a
--- /dev/null
@@ -0,0 +1,1015 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+#include <linux/kvm.h>
+#include <trace/events/kvm.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#include <asm/unified.h>
+#include <asm/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/mman.h>
+#include <asm/cputype.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/virt.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_coproc.h>
+#include <asm/kvm_psci.h>
+#include <asm/opcodes.h>
+
+#ifdef REQUIRES_VIRT
+__asm__(".arch_extension       virt");
+#endif
+
+static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+static struct vfp_hard_struct __percpu *kvm_host_vfp_state;
+static unsigned long hyp_default_vectors;
+
+/* The VMID used in the VTTBR */
+static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
+static u8 kvm_next_vmid;
+static DEFINE_SPINLOCK(kvm_vmid_lock);
+
+int kvm_arch_hardware_enable(void *garbage)
+{
+       return 0;
+}
+
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+       return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+}
+
+int kvm_arch_hardware_setup(void)
+{
+       return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+       *(int *)rtn = 0;
+}
+
+void kvm_arch_sync_events(struct kvm *kvm)
+{
+}
+
+/**
+ * kvm_arch_init_vm - initializes a VM data structure
+ * @kvm:       pointer to the KVM struct
+ */
+int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+{
+       int ret = 0;
+
+       if (type)
+               return -EINVAL;
+
+       ret = kvm_alloc_stage2_pgd(kvm);
+       if (ret)
+               goto out_fail_alloc;
+
+       ret = create_hyp_mappings(kvm, kvm + 1);
+       if (ret)
+               goto out_free_stage2_pgd;
+
+       /* Mark the initial VMID generation invalid */
+       kvm->arch.vmid_gen = 0;
+
+       return ret;
+out_free_stage2_pgd:
+       kvm_free_stage2_pgd(kvm);
+out_fail_alloc:
+       return ret;
+}
+
+int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+{
+       return VM_FAULT_SIGBUS;
+}
+
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+                          struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+{
+       return 0;
+}
+
+/**
+ * kvm_arch_destroy_vm - destroy the VM data structure
+ * @kvm:       pointer to the KVM struct
+ */
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+       int i;
+
+       kvm_free_stage2_pgd(kvm);
+
+       for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+               if (kvm->vcpus[i]) {
+                       kvm_arch_vcpu_free(kvm->vcpus[i]);
+                       kvm->vcpus[i] = NULL;
+               }
+       }
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+       int r;
+       switch (ext) {
+       case KVM_CAP_USER_MEMORY:
+       case KVM_CAP_SYNC_MMU:
+       case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
+       case KVM_CAP_ONE_REG:
+       case KVM_CAP_ARM_PSCI:
+               r = 1;
+               break;
+       case KVM_CAP_COALESCED_MMIO:
+               r = KVM_COALESCED_MMIO_PAGE_OFFSET;
+               break;
+       case KVM_CAP_NR_VCPUS:
+               r = num_online_cpus();
+               break;
+       case KVM_CAP_MAX_VCPUS:
+               r = KVM_MAX_VCPUS;
+               break;
+       default:
+               r = 0;
+               break;
+       }
+       return r;
+}
+
+long kvm_arch_dev_ioctl(struct file *filp,
+                       unsigned int ioctl, unsigned long arg)
+{
+       return -EINVAL;
+}
+
+int kvm_arch_set_memory_region(struct kvm *kvm,
+                              struct kvm_userspace_memory_region *mem,
+                              struct kvm_memory_slot old,
+                              int user_alloc)
+{
+       return 0;
+}
+
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+                                  struct kvm_memory_slot *memslot,
+                                  struct kvm_memory_slot old,
+                                  struct kvm_userspace_memory_region *mem,
+                                  int user_alloc)
+{
+       return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+                                  struct kvm_userspace_memory_region *mem,
+                                  struct kvm_memory_slot old,
+                                  int user_alloc)
+{
+}
+
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+}
+
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+                                  struct kvm_memory_slot *slot)
+{
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+       int err;
+       struct kvm_vcpu *vcpu;
+
+       vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+       if (!vcpu) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       err = kvm_vcpu_init(vcpu, kvm, id);
+       if (err)
+               goto free_vcpu;
+
+       err = create_hyp_mappings(vcpu, vcpu + 1);
+       if (err)
+               goto vcpu_uninit;
+
+       return vcpu;
+vcpu_uninit:
+       kvm_vcpu_uninit(vcpu);
+free_vcpu:
+       kmem_cache_free(kvm_vcpu_cache, vcpu);
+out:
+       return ERR_PTR(err);
+}
+
+int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+       kvm_mmu_free_memory_caches(vcpu);
+       kmem_cache_free(kvm_vcpu_cache, vcpu);
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+       kvm_arch_vcpu_free(vcpu);
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+int __attribute_const__ kvm_target_cpu(void)
+{
+       unsigned long implementor = read_cpuid_implementor();
+       unsigned long part_number = read_cpuid_part_number();
+
+       if (implementor != ARM_CPU_IMP_ARM)
+               return -EINVAL;
+
+       switch (part_number) {
+       case ARM_CPU_PART_CORTEX_A15:
+               return KVM_ARM_TARGET_CORTEX_A15;
+       default:
+               return -EINVAL;
+       }
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       /* Force users to call KVM_ARM_VCPU_INIT */
+       vcpu->arch.target = -1;
+       return 0;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+}
+
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+       vcpu->cpu = cpu;
+       vcpu->arch.vfp_host = this_cpu_ptr(kvm_host_vfp_state);
+
+       /*
+        * Check whether this vcpu requires the cache to be flushed on
+        * this physical CPU. This is a consequence of doing dcache
+        * operations by set/way on this vcpu. We do it here to be in
+        * a non-preemptible section.
+        */
+       if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
+               flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
+}
+
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+}
+
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+                                       struct kvm_guest_debug *dbg)
+{
+       return -EINVAL;
+}
+
+
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state)
+{
+       return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state)
+{
+       return -EINVAL;
+}
+
+/**
+ * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
+ * @v:         The VCPU pointer
+ *
+ * If the guest CPU is not waiting for interrupts or an interrupt line is
+ * asserted, the CPU is by definition runnable.
+ */
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
+{
+       return !!v->arch.irq_lines;
+}
+
+/* Just ensure a guest exit from a particular CPU */
+static void exit_vm_noop(void *info)
+{
+}
+
+void force_vm_exit(const cpumask_t *mask)
+{
+       smp_call_function_many(mask, exit_vm_noop, NULL, true);
+}
+
+/**
+ * need_new_vmid_gen - check that the VMID is still valid
+ * @kvm: The VM's VMID to checkt
+ *
+ * return true if there is a new generation of VMIDs being used
+ *
+ * The hardware supports only 256 values with the value zero reserved for the
+ * host, so we check if an assigned value belongs to a previous generation,
+ * which which requires us to assign a new value. If we're the first to use a
+ * VMID for the new generation, we must flush necessary caches and TLBs on all
+ * CPUs.
+ */
+static bool need_new_vmid_gen(struct kvm *kvm)
+{
+       return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
+}
+
+/**
+ * update_vttbr - Update the VTTBR with a valid VMID before the guest runs
+ * @kvm        The guest that we are about to run
+ *
+ * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
+ * VM has a valid VMID, otherwise assigns a new one and flushes corresponding
+ * caches and TLBs.
+ */
+static void update_vttbr(struct kvm *kvm)
+{
+       phys_addr_t pgd_phys;
+       u64 vmid;
+
+       if (!need_new_vmid_gen(kvm))
+               return;
+
+       spin_lock(&kvm_vmid_lock);
+
+       /*
+        * We need to re-check the vmid_gen here to ensure that if another vcpu
+        * already allocated a valid vmid for this vm, then this vcpu should
+        * use the same vmid.
+        */
+       if (!need_new_vmid_gen(kvm)) {
+               spin_unlock(&kvm_vmid_lock);
+               return;
+       }
+
+       /* First user of a new VMID generation? */
+       if (unlikely(kvm_next_vmid == 0)) {
+               atomic64_inc(&kvm_vmid_gen);
+               kvm_next_vmid = 1;
+
+               /*
+                * On SMP we know no other CPUs can use this CPU's or each
+                * other's VMID after force_vm_exit returns since the
+                * kvm_vmid_lock blocks them from reentry to the guest.
+                */
+               force_vm_exit(cpu_all_mask);
+               /*
+                * Now broadcast TLB + ICACHE invalidation over the inner
+                * shareable domain to make sure all data structures are
+                * clean.
+                */
+               kvm_call_hyp(__kvm_flush_vm_context);
+       }
+
+       kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
+       kvm->arch.vmid = kvm_next_vmid;
+       kvm_next_vmid++;
+
+       /* update vttbr to be used with the new vmid */
+       pgd_phys = virt_to_phys(kvm->arch.pgd);
+       vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
+       kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK;
+       kvm->arch.vttbr |= vmid;
+
+       spin_unlock(&kvm_vmid_lock);
+}
+
+static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       /* SVC called from Hyp mode should never get here */
+       kvm_debug("SVC called from Hyp mode shouldn't go here\n");
+       BUG();
+       return -EINVAL; /* Squash warning */
+}
+
+static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
+                     vcpu->arch.hsr & HSR_HVC_IMM_MASK);
+
+       if (kvm_psci_call(vcpu))
+               return 1;
+
+       kvm_inject_undefined(vcpu);
+       return 1;
+}
+
+static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       if (kvm_psci_call(vcpu))
+               return 1;
+
+       kvm_inject_undefined(vcpu);
+       return 1;
+}
+
+static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       /* The hypervisor should never cause aborts */
+       kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
+               vcpu->arch.hxfar, vcpu->arch.hsr);
+       return -EFAULT;
+}
+
+static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       /* This is either an error in the ws. code or an external abort */
+       kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
+               vcpu->arch.hxfar, vcpu->arch.hsr);
+       return -EFAULT;
+}
+
+typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
+static exit_handle_fn arm_exit_handlers[] = {
+       [HSR_EC_WFI]            = kvm_handle_wfi,
+       [HSR_EC_CP15_32]        = kvm_handle_cp15_32,
+       [HSR_EC_CP15_64]        = kvm_handle_cp15_64,
+       [HSR_EC_CP14_MR]        = kvm_handle_cp14_access,
+       [HSR_EC_CP14_LS]        = kvm_handle_cp14_load_store,
+       [HSR_EC_CP14_64]        = kvm_handle_cp14_access,
+       [HSR_EC_CP_0_13]        = kvm_handle_cp_0_13_access,
+       [HSR_EC_CP10_ID]        = kvm_handle_cp10_id,
+       [HSR_EC_SVC_HYP]        = handle_svc_hyp,
+       [HSR_EC_HVC]            = handle_hvc,
+       [HSR_EC_SMC]            = handle_smc,
+       [HSR_EC_IABT]           = kvm_handle_guest_abort,
+       [HSR_EC_IABT_HYP]       = handle_pabt_hyp,
+       [HSR_EC_DABT]           = kvm_handle_guest_abort,
+       [HSR_EC_DABT_HYP]       = handle_dabt_hyp,
+};
+
+/*
+ * A conditional instruction is allowed to trap, even though it
+ * wouldn't be executed.  So let's re-implement the hardware, in
+ * software!
+ */
+static bool kvm_condition_valid(struct kvm_vcpu *vcpu)
+{
+       unsigned long cpsr, cond, insn;
+
+       /*
+        * Exception Code 0 can only happen if we set HCR.TGE to 1, to
+        * catch undefined instructions, and then we won't get past
+        * the arm_exit_handlers test anyway.
+        */
+       BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0);
+
+       /* Top two bits non-zero?  Unconditional. */
+       if (vcpu->arch.hsr >> 30)
+               return true;
+
+       cpsr = *vcpu_cpsr(vcpu);
+
+       /* Is condition field valid? */
+       if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT)
+               cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT;
+       else {
+               /* This can happen in Thumb mode: examine IT state. */
+               unsigned long it;
+
+               it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
+
+               /* it == 0 => unconditional. */
+               if (it == 0)
+                       return true;
+
+               /* The cond for this insn works out as the top 4 bits. */
+               cond = (it >> 4);
+       }
+
+       /* Shift makes it look like an ARM-mode instruction */
+       insn = cond << 28;
+       return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
+}
+
+/*
+ * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
+ * proper exit to QEMU.
+ */
+static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
+                      int exception_index)
+{
+       unsigned long hsr_ec;
+
+       switch (exception_index) {
+       case ARM_EXCEPTION_IRQ:
+               return 1;
+       case ARM_EXCEPTION_UNDEFINED:
+               kvm_err("Undefined exception in Hyp mode at: %#08x\n",
+                       vcpu->arch.hyp_pc);
+               BUG();
+               panic("KVM: Hypervisor undefined exception!\n");
+       case ARM_EXCEPTION_DATA_ABORT:
+       case ARM_EXCEPTION_PREF_ABORT:
+       case ARM_EXCEPTION_HVC:
+               hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT;
+
+               if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers)
+                   || !arm_exit_handlers[hsr_ec]) {
+                       kvm_err("Unkown exception class: %#08lx, "
+                               "hsr: %#08x\n", hsr_ec,
+                               (unsigned int)vcpu->arch.hsr);
+                       BUG();
+               }
+
+               /*
+                * See ARM ARM B1.14.1: "Hyp traps on instructions
+                * that fail their condition code check"
+                */
+               if (!kvm_condition_valid(vcpu)) {
+                       bool is_wide = vcpu->arch.hsr & HSR_IL;
+                       kvm_skip_instr(vcpu, is_wide);
+                       return 1;
+               }
+
+               return arm_exit_handlers[hsr_ec](vcpu, run);
+       default:
+               kvm_pr_unimpl("Unsupported exception type: %d",
+                             exception_index);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               return 0;
+       }
+}
+
+static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
+{
+       if (likely(vcpu->arch.has_run_once))
+               return 0;
+
+       vcpu->arch.has_run_once = true;
+
+       /*
+        * Handle the "start in power-off" case by calling into the
+        * PSCI code.
+        */
+       if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) {
+               *vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF;
+               kvm_psci_call(vcpu);
+       }
+
+       return 0;
+}
+
+static void vcpu_pause(struct kvm_vcpu *vcpu)
+{
+       wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
+
+       wait_event_interruptible(*wq, !vcpu->arch.pause);
+}
+
+/**
+ * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
+ * @vcpu:      The VCPU pointer
+ * @run:       The kvm_run structure pointer used for userspace state exchange
+ *
+ * This function is called through the VCPU_RUN ioctl called from user space. It
+ * will execute VM code in a loop until the time slice for the process is used
+ * or some emulation is needed from user space in which case the function will
+ * return with return value 0 and with the kvm_run structure filled in with the
+ * required data for the requested emulation.
+ */
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       int ret;
+       sigset_t sigsaved;
+
+       /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */
+       if (unlikely(vcpu->arch.target < 0))
+               return -ENOEXEC;
+
+       ret = kvm_vcpu_first_run_init(vcpu);
+       if (ret)
+               return ret;
+
+       if (run->exit_reason == KVM_EXIT_MMIO) {
+               ret = kvm_handle_mmio_return(vcpu, vcpu->run);
+               if (ret)
+                       return ret;
+       }
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+       ret = 1;
+       run->exit_reason = KVM_EXIT_UNKNOWN;
+       while (ret > 0) {
+               /*
+                * Check conditions before entering the guest
+                */
+               cond_resched();
+
+               update_vttbr(vcpu->kvm);
+
+               if (vcpu->arch.pause)
+                       vcpu_pause(vcpu);
+
+               local_irq_disable();
+
+               /*
+                * Re-check atomic conditions
+                */
+               if (signal_pending(current)) {
+                       ret = -EINTR;
+                       run->exit_reason = KVM_EXIT_INTR;
+               }
+
+               if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) {
+                       local_irq_enable();
+                       continue;
+               }
+
+               /**************************************************************
+                * Enter the guest
+                */
+               trace_kvm_entry(*vcpu_pc(vcpu));
+               kvm_guest_enter();
+               vcpu->mode = IN_GUEST_MODE;
+
+               ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
+
+               vcpu->mode = OUTSIDE_GUEST_MODE;
+               vcpu->arch.last_pcpu = smp_processor_id();
+               kvm_guest_exit();
+               trace_kvm_exit(*vcpu_pc(vcpu));
+               /*
+                * We may have taken a host interrupt in HYP mode (ie
+                * while executing the guest). This interrupt is still
+                * pending, as we haven't serviced it yet!
+                *
+                * We're now back in SVC mode, with interrupts
+                * disabled.  Enabling the interrupts now will have
+                * the effect of taking the interrupt again, in SVC
+                * mode this time.
+                */
+               local_irq_enable();
+
+               /*
+                * Back from guest
+                *************************************************************/
+
+               ret = handle_exit(vcpu, run, ret);
+       }
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       return ret;
+}
+
+static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
+{
+       int bit_index;
+       bool set;
+       unsigned long *ptr;
+
+       if (number == KVM_ARM_IRQ_CPU_IRQ)
+               bit_index = __ffs(HCR_VI);
+       else /* KVM_ARM_IRQ_CPU_FIQ */
+               bit_index = __ffs(HCR_VF);
+
+       ptr = (unsigned long *)&vcpu->arch.irq_lines;
+       if (level)
+               set = test_and_set_bit(bit_index, ptr);
+       else
+               set = test_and_clear_bit(bit_index, ptr);
+
+       /*
+        * If we didn't change anything, no need to wake up or kick other CPUs
+        */
+       if (set == level)
+               return 0;
+
+       /*
+        * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
+        * trigger a world-switch round on the running physical CPU to set the
+        * virtual IRQ/FIQ fields in the HCR appropriately.
+        */
+       kvm_vcpu_kick(vcpu);
+
+       return 0;
+}
+
+int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level)
+{
+       u32 irq = irq_level->irq;
+       unsigned int irq_type, vcpu_idx, irq_num;
+       int nrcpus = atomic_read(&kvm->online_vcpus);
+       struct kvm_vcpu *vcpu = NULL;
+       bool level = irq_level->level;
+
+       irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
+       vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
+       irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
+
+       trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
+
+       if (irq_type != KVM_ARM_IRQ_TYPE_CPU)
+               return -EINVAL;
+
+       if (vcpu_idx >= nrcpus)
+               return -EINVAL;
+
+       vcpu = kvm_get_vcpu(kvm, vcpu_idx);
+       if (!vcpu)
+               return -EINVAL;
+
+       if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
+               return -EINVAL;
+
+       return vcpu_interrupt_line(vcpu, irq_num, level);
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+                        unsigned int ioctl, unsigned long arg)
+{
+       struct kvm_vcpu *vcpu = filp->private_data;
+       void __user *argp = (void __user *)arg;
+
+       switch (ioctl) {
+       case KVM_ARM_VCPU_INIT: {
+               struct kvm_vcpu_init init;
+
+               if (copy_from_user(&init, argp, sizeof(init)))
+                       return -EFAULT;
+
+               return kvm_vcpu_set_target(vcpu, &init);
+
+       }
+       case KVM_SET_ONE_REG:
+       case KVM_GET_ONE_REG: {
+               struct kvm_one_reg reg;
+               if (copy_from_user(&reg, argp, sizeof(reg)))
+                       return -EFAULT;
+               if (ioctl == KVM_SET_ONE_REG)
+                       return kvm_arm_set_reg(vcpu, &reg);
+               else
+                       return kvm_arm_get_reg(vcpu, &reg);
+       }
+       case KVM_GET_REG_LIST: {
+               struct kvm_reg_list __user *user_list = argp;
+               struct kvm_reg_list reg_list;
+               unsigned n;
+
+               if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
+                       return -EFAULT;
+               n = reg_list.n;
+               reg_list.n = kvm_arm_num_regs(vcpu);
+               if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
+                       return -EFAULT;
+               if (n < reg_list.n)
+                       return -E2BIG;
+               return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
+       }
+       default:
+               return -EINVAL;
+       }
+}
+
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+       return -EINVAL;
+}
+
+long kvm_arch_vm_ioctl(struct file *filp,
+                      unsigned int ioctl, unsigned long arg)
+{
+       return -EINVAL;
+}
+
+static void cpu_init_hyp_mode(void *vector)
+{
+       unsigned long long pgd_ptr;
+       unsigned long pgd_low, pgd_high;
+       unsigned long hyp_stack_ptr;
+       unsigned long stack_page;
+       unsigned long vector_ptr;
+
+       /* Switch from the HYP stub to our own HYP init vector */
+       __hyp_set_vectors((unsigned long)vector);
+
+       pgd_ptr = (unsigned long long)kvm_mmu_get_httbr();
+       pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
+       pgd_high = (pgd_ptr >> 32ULL);
+       stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
+       hyp_stack_ptr = stack_page + PAGE_SIZE;
+       vector_ptr = (unsigned long)__kvm_hyp_vector;
+
+       /*
+        * Call initialization code, and switch to the full blown
+        * HYP code. The init code doesn't need to preserve these registers as
+        * r1-r3 and r12 are already callee save according to the AAPCS.
+        * Note that we slightly misuse the prototype by casing the pgd_low to
+        * a void *.
+        */
+       kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
+}
+
+/**
+ * Inits Hyp-mode on all online CPUs
+ */
+static int init_hyp_mode(void)
+{
+       phys_addr_t init_phys_addr;
+       int cpu;
+       int err = 0;
+
+       /*
+        * Allocate Hyp PGD and setup Hyp identity mapping
+        */
+       err = kvm_mmu_init();
+       if (err)
+               goto out_err;
+
+       /*
+        * It is probably enough to obtain the default on one
+        * CPU. It's unlikely to be different on the others.
+        */
+       hyp_default_vectors = __hyp_get_vectors();
+
+       /*
+        * Allocate stack pages for Hypervisor-mode
+        */
+       for_each_possible_cpu(cpu) {
+               unsigned long stack_page;
+
+               stack_page = __get_free_page(GFP_KERNEL);
+               if (!stack_page) {
+                       err = -ENOMEM;
+                       goto out_free_stack_pages;
+               }
+
+               per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
+       }
+
+       /*
+        * Execute the init code on each CPU.
+        *
+        * Note: The stack is not mapped yet, so don't do anything else than
+        * initializing the hypervisor mode on each CPU using a local stack
+        * space for temporary storage.
+        */
+       init_phys_addr = virt_to_phys(__kvm_hyp_init);
+       for_each_online_cpu(cpu) {
+               smp_call_function_single(cpu, cpu_init_hyp_mode,
+                                        (void *)(long)init_phys_addr, 1);
+       }
+
+       /*
+        * Unmap the identity mapping
+        */
+       kvm_clear_hyp_idmap();
+
+       /*
+        * Map the Hyp-code called directly from the host
+        */
+       err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
+       if (err) {
+               kvm_err("Cannot map world-switch code\n");
+               goto out_free_mappings;
+       }
+
+       /*
+        * Map the Hyp stack pages
+        */
+       for_each_possible_cpu(cpu) {
+               char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
+               err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE);
+
+               if (err) {
+                       kvm_err("Cannot map hyp stack\n");
+                       goto out_free_mappings;
+               }
+       }
+
+       /*
+        * Map the host VFP structures
+        */
+       kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct);
+       if (!kvm_host_vfp_state) {
+               err = -ENOMEM;
+               kvm_err("Cannot allocate host VFP state\n");
+               goto out_free_mappings;
+       }
+
+       for_each_possible_cpu(cpu) {
+               struct vfp_hard_struct *vfp;
+
+               vfp = per_cpu_ptr(kvm_host_vfp_state, cpu);
+               err = create_hyp_mappings(vfp, vfp + 1);
+
+               if (err) {
+                       kvm_err("Cannot map host VFP state: %d\n", err);
+                       goto out_free_vfp;
+               }
+       }
+
+       kvm_info("Hyp mode initialized successfully\n");
+       return 0;
+out_free_vfp:
+       free_percpu(kvm_host_vfp_state);
+out_free_mappings:
+       free_hyp_pmds();
+out_free_stack_pages:
+       for_each_possible_cpu(cpu)
+               free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+out_err:
+       kvm_err("error initializing Hyp mode: %d\n", err);
+       return err;
+}
+
+/**
+ * Initialize Hyp-mode and memory mappings on all CPUs.
+ */
+int kvm_arch_init(void *opaque)
+{
+       int err;
+
+       if (!is_hyp_mode_available()) {
+               kvm_err("HYP mode not available\n");
+               return -ENODEV;
+       }
+
+       if (kvm_target_cpu() < 0) {
+               kvm_err("Target CPU not supported!\n");
+               return -ENODEV;
+       }
+
+       err = init_hyp_mode();
+       if (err)
+               goto out_err;
+
+       kvm_coproc_table_init();
+       return 0;
+out_err:
+       return err;
+}
+
+/* NOP: Compiling as a module not supported */
+void kvm_arch_exit(void)
+{
+}
+
+static int arm_init(void)
+{
+       int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+       return rc;
+}
+
+module_init(arm_init);
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
new file mode 100644 (file)
index 0000000..d782638
--- /dev/null
@@ -0,0 +1,1046 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Authors: Rusty Russell <rusty@rustcorp.com.au>
+ *          Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/mm.h>
+#include <linux/kvm_host.h>
+#include <linux/uaccess.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_coproc.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <trace/events/kvm.h>
+#include <asm/vfp.h>
+#include "../vfp/vfpinstr.h"
+
+#include "trace.h"
+#include "coproc.h"
+
+
+/******************************************************************************
+ * Co-processor emulation
+ *****************************************************************************/
+
+/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
+static u32 cache_levels;
+
+/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
+#define CSSELR_MAX 12
+
+int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       kvm_inject_undefined(vcpu);
+       return 1;
+}
+
+int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       /*
+        * We can get here, if the host has been built without VFPv3 support,
+        * but the guest attempted a floating point operation.
+        */
+       kvm_inject_undefined(vcpu);
+       return 1;
+}
+
+int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       kvm_inject_undefined(vcpu);
+       return 1;
+}
+
+int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       kvm_inject_undefined(vcpu);
+       return 1;
+}
+
+/* See note at ARM ARM B1.14.4 */
+static bool access_dcsw(struct kvm_vcpu *vcpu,
+                       const struct coproc_params *p,
+                       const struct coproc_reg *r)
+{
+       u32 val;
+       int cpu;
+
+       cpu = get_cpu();
+
+       if (!p->is_write)
+               return read_from_write_only(vcpu, p);
+
+       cpumask_setall(&vcpu->arch.require_dcache_flush);
+       cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
+
+       /* If we were already preempted, take the long way around */
+       if (cpu != vcpu->arch.last_pcpu) {
+               flush_cache_all();
+               goto done;
+       }
+
+       val = *vcpu_reg(vcpu, p->Rt1);
+
+       switch (p->CRm) {
+       case 6:                 /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
+       case 14:                /* DCCISW */
+               asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
+               break;
+
+       case 10:                /* DCCSW */
+               asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
+               break;
+       }
+
+done:
+       put_cpu();
+
+       return true;
+}
+
+/*
+ * We could trap ID_DFR0 and tell the guest we don't support performance
+ * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
+ * NAKed, so it will read the PMCR anyway.
+ *
+ * Therefore we tell the guest we have 0 counters.  Unfortunately, we
+ * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
+ * all PM registers, which doesn't crash the guest kernel at least.
+ */
+static bool pm_fake(struct kvm_vcpu *vcpu,
+                   const struct coproc_params *p,
+                   const struct coproc_reg *r)
+{
+       if (p->is_write)
+               return ignore_write(vcpu, p);
+       else
+               return read_zero(vcpu, p);
+}
+
+#define access_pmcr pm_fake
+#define access_pmcntenset pm_fake
+#define access_pmcntenclr pm_fake
+#define access_pmovsr pm_fake
+#define access_pmselr pm_fake
+#define access_pmceid0 pm_fake
+#define access_pmceid1 pm_fake
+#define access_pmccntr pm_fake
+#define access_pmxevtyper pm_fake
+#define access_pmxevcntr pm_fake
+#define access_pmuserenr pm_fake
+#define access_pmintenset pm_fake
+#define access_pmintenclr pm_fake
+
+/* Architected CP15 registers.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
+ */
+static const struct coproc_reg cp15_regs[] = {
+       /* CSSELR: swapped by interrupt.S. */
+       { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
+                       NULL, reset_unknown, c0_CSSELR },
+
+       /* TTBR0/TTBR1: swapped by interrupt.S. */
+       { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
+       { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
+
+       /* TTBCR: swapped by interrupt.S. */
+       { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
+                       NULL, reset_val, c2_TTBCR, 0x00000000 },
+
+       /* DACR: swapped by interrupt.S. */
+       { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
+                       NULL, reset_unknown, c3_DACR },
+
+       /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
+       { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
+                       NULL, reset_unknown, c5_DFSR },
+       { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
+                       NULL, reset_unknown, c5_IFSR },
+       { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
+                       NULL, reset_unknown, c5_ADFSR },
+       { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
+                       NULL, reset_unknown, c5_AIFSR },
+
+       /* DFAR/IFAR: swapped by interrupt.S. */
+       { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
+                       NULL, reset_unknown, c6_DFAR },
+       { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
+                       NULL, reset_unknown, c6_IFAR },
+       /*
+        * DC{C,I,CI}SW operations:
+        */
+       { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
+       { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
+       { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
+       /*
+        * Dummy performance monitor implementation.
+        */
+       { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
+       { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
+       { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
+       { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
+       { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
+       { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
+       { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
+       { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
+       { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
+       { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
+       { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
+       { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
+       { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
+
+       /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
+       { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
+                       NULL, reset_unknown, c10_PRRR},
+       { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
+                       NULL, reset_unknown, c10_NMRR},
+
+       /* VBAR: swapped by interrupt.S. */
+       { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
+                       NULL, reset_val, c12_VBAR, 0x00000000 },
+
+       /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
+       { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
+                       NULL, reset_val, c13_CID, 0x00000000 },
+       { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
+                       NULL, reset_unknown, c13_TID_URW },
+       { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
+                       NULL, reset_unknown, c13_TID_URO },
+       { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
+                       NULL, reset_unknown, c13_TID_PRIV },
+};
+
+/* Target specific emulation tables */
+static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
+
+void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
+{
+       target_tables[table->target] = table;
+}
+
+/* Get specific register table for this target. */
+static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
+{
+       struct kvm_coproc_target_table *table;
+
+       table = target_tables[target];
+       *num = table->num;
+       return table->table;
+}
+
+static const struct coproc_reg *find_reg(const struct coproc_params *params,
+                                        const struct coproc_reg table[],
+                                        unsigned int num)
+{
+       unsigned int i;
+
+       for (i = 0; i < num; i++) {
+               const struct coproc_reg *r = &table[i];
+
+               if (params->is_64bit != r->is_64)
+                       continue;
+               if (params->CRn != r->CRn)
+                       continue;
+               if (params->CRm != r->CRm)
+                       continue;
+               if (params->Op1 != r->Op1)
+                       continue;
+               if (params->Op2 != r->Op2)
+                       continue;
+
+               return r;
+       }
+       return NULL;
+}
+
+static int emulate_cp15(struct kvm_vcpu *vcpu,
+                       const struct coproc_params *params)
+{
+       size_t num;
+       const struct coproc_reg *table, *r;
+
+       trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
+                                  params->CRm, params->Op2, params->is_write);
+
+       table = get_target_table(vcpu->arch.target, &num);
+
+       /* Search target-specific then generic table. */
+       r = find_reg(params, table, num);
+       if (!r)
+               r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
+
+       if (likely(r)) {
+               /* If we don't have an accessor, we should never get here! */
+               BUG_ON(!r->access);
+
+               if (likely(r->access(vcpu, params, r))) {
+                       /* Skip instruction, since it was emulated */
+                       kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
+                       return 1;
+               }
+               /* If access function fails, it should complain. */
+       } else {
+               kvm_err("Unsupported guest CP15 access at: %08x\n",
+                       *vcpu_pc(vcpu));
+               print_cp_instr(params);
+       }
+       kvm_inject_undefined(vcpu);
+       return 1;
+}
+
+/**
+ * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
+ * @vcpu: The VCPU pointer
+ * @run:  The kvm_run struct
+ */
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       struct coproc_params params;
+
+       params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
+       params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
+       params.is_write = ((vcpu->arch.hsr & 1) == 0);
+       params.is_64bit = true;
+
+       params.Op1 = (vcpu->arch.hsr >> 16) & 0xf;
+       params.Op2 = 0;
+       params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf;
+       params.CRn = 0;
+
+       return emulate_cp15(vcpu, &params);
+}
+
+static void reset_coproc_regs(struct kvm_vcpu *vcpu,
+                             const struct coproc_reg *table, size_t num)
+{
+       unsigned long i;
+
+       for (i = 0; i < num; i++)
+               if (table[i].reset)
+                       table[i].reset(vcpu, &table[i]);
+}
+
+/**
+ * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
+ * @vcpu: The VCPU pointer
+ * @run:  The kvm_run struct
+ */
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       struct coproc_params params;
+
+       params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
+       params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
+       params.is_write = ((vcpu->arch.hsr & 1) == 0);
+       params.is_64bit = false;
+
+       params.CRn = (vcpu->arch.hsr >> 10) & 0xf;
+       params.Op1 = (vcpu->arch.hsr >> 14) & 0x7;
+       params.Op2 = (vcpu->arch.hsr >> 17) & 0x7;
+       params.Rt2 = 0;
+
+       return emulate_cp15(vcpu, &params);
+}
+
+/******************************************************************************
+ * Userspace API
+ *****************************************************************************/
+
+static bool index_to_params(u64 id, struct coproc_params *params)
+{
+       switch (id & KVM_REG_SIZE_MASK) {
+       case KVM_REG_SIZE_U32:
+               /* Any unused index bits means it's not valid. */
+               if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
+                          | KVM_REG_ARM_COPROC_MASK
+                          | KVM_REG_ARM_32_CRN_MASK
+                          | KVM_REG_ARM_CRM_MASK
+                          | KVM_REG_ARM_OPC1_MASK
+                          | KVM_REG_ARM_32_OPC2_MASK))
+                       return false;
+
+               params->is_64bit = false;
+               params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK)
+                              >> KVM_REG_ARM_32_CRN_SHIFT);
+               params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
+                              >> KVM_REG_ARM_CRM_SHIFT);
+               params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
+                              >> KVM_REG_ARM_OPC1_SHIFT);
+               params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK)
+                              >> KVM_REG_ARM_32_OPC2_SHIFT);
+               return true;
+       case KVM_REG_SIZE_U64:
+               /* Any unused index bits means it's not valid. */
+               if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
+                             | KVM_REG_ARM_COPROC_MASK
+                             | KVM_REG_ARM_CRM_MASK
+                             | KVM_REG_ARM_OPC1_MASK))
+                       return false;
+               params->is_64bit = true;
+               params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
+                              >> KVM_REG_ARM_CRM_SHIFT);
+               params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
+                              >> KVM_REG_ARM_OPC1_SHIFT);
+               params->Op2 = 0;
+               params->CRn = 0;
+               return true;
+       default:
+               return false;
+       }
+}
+
+/* Decode an index value, and find the cp15 coproc_reg entry. */
+static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu,
+                                                   u64 id)
+{
+       size_t num;
+       const struct coproc_reg *table, *r;
+       struct coproc_params params;
+
+       /* We only do cp15 for now. */
+       if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15)
+               return NULL;
+
+       if (!index_to_params(id, &params))
+               return NULL;
+
+       table = get_target_table(vcpu->arch.target, &num);
+       r = find_reg(&params, table, num);
+       if (!r)
+               r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs));
+
+       /* Not saved in the cp15 array? */
+       if (r && !r->reg)
+               r = NULL;
+
+       return r;
+}
+
+/*
+ * These are the invariant cp15 registers: we let the guest see the host
+ * versions of these, so they're part of the guest state.
+ *
+ * A future CPU may provide a mechanism to present different values to
+ * the guest, or a future kvm may trap them.
+ */
+/* Unfortunately, there's no register-argument for mrc, so generate. */
+#define FUNCTION_FOR32(crn, crm, op1, op2, name)                       \
+       static void get_##name(struct kvm_vcpu *v,                      \
+                              const struct coproc_reg *r)              \
+       {                                                               \
+               u32 val;                                                \
+                                                                       \
+               asm volatile("mrc p15, " __stringify(op1)               \
+                            ", %0, c" __stringify(crn)                 \
+                            ", c" __stringify(crm)                     \
+                            ", " __stringify(op2) "\n" : "=r" (val));  \
+               ((struct coproc_reg *)r)->val = val;                    \
+       }
+
+FUNCTION_FOR32(0, 0, 0, 0, MIDR)
+FUNCTION_FOR32(0, 0, 0, 1, CTR)
+FUNCTION_FOR32(0, 0, 0, 2, TCMTR)
+FUNCTION_FOR32(0, 0, 0, 3, TLBTR)
+FUNCTION_FOR32(0, 0, 0, 6, REVIDR)
+FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0)
+FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1)
+FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0)
+FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0)
+FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0)
+FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1)
+FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2)
+FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3)
+FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0)
+FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1)
+FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2)
+FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3)
+FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4)
+FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5)
+FUNCTION_FOR32(0, 0, 1, 1, CLIDR)
+FUNCTION_FOR32(0, 0, 1, 7, AIDR)
+
+/* ->val is filled in by kvm_invariant_coproc_table_init() */
+static struct coproc_reg invariant_cp15[] = {
+       { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR },
+       { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR },
+       { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR },
+       { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
+       { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
+
+       { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
+       { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
+       { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
+       { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 },
+       { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 },
+       { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 },
+       { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 },
+       { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 },
+
+       { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 },
+       { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 },
+       { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 },
+       { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
+       { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
+       { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
+
+       { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
+       { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
+};
+
+static int reg_from_user(void *val, const void __user *uaddr, u64 id)
+{
+       /* This Just Works because we are little endian. */
+       if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
+               return -EFAULT;
+       return 0;
+}
+
+static int reg_to_user(void __user *uaddr, const void *val, u64 id)
+{
+       /* This Just Works because we are little endian. */
+       if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
+               return -EFAULT;
+       return 0;
+}
+
+static int get_invariant_cp15(u64 id, void __user *uaddr)
+{
+       struct coproc_params params;
+       const struct coproc_reg *r;
+
+       if (!index_to_params(id, &params))
+               return -ENOENT;
+
+       r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
+       if (!r)
+               return -ENOENT;
+
+       return reg_to_user(uaddr, &r->val, id);
+}
+
+static int set_invariant_cp15(u64 id, void __user *uaddr)
+{
+       struct coproc_params params;
+       const struct coproc_reg *r;
+       int err;
+       u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
+
+       if (!index_to_params(id, &params))
+               return -ENOENT;
+       r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
+       if (!r)
+               return -ENOENT;
+
+       err = reg_from_user(&val, uaddr, id);
+       if (err)
+               return err;
+
+       /* This is what we mean by invariant: you can't change it. */
+       if (r->val != val)
+               return -EINVAL;
+
+       return 0;
+}
+
+static bool is_valid_cache(u32 val)
+{
+       u32 level, ctype;
+
+       if (val >= CSSELR_MAX)
+               return -ENOENT;
+
+       /* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
+        level = (val >> 1);
+        ctype = (cache_levels >> (level * 3)) & 7;
+
+       switch (ctype) {
+       case 0: /* No cache */
+               return false;
+       case 1: /* Instruction cache only */
+               return (val & 1);
+       case 2: /* Data cache only */
+       case 4: /* Unified cache */
+               return !(val & 1);
+       case 3: /* Separate instruction and data caches */
+               return true;
+       default: /* Reserved: we can't know instruction or data. */
+               return false;
+       }
+}
+
+/* Which cache CCSIDR represents depends on CSSELR value. */
+static u32 get_ccsidr(u32 csselr)
+{
+       u32 ccsidr;
+
+       /* Make sure noone else changes CSSELR during this! */
+       local_irq_disable();
+       /* Put value into CSSELR */
+       asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr));
+       isb();
+       /* Read result out of CCSIDR */
+       asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr));
+       local_irq_enable();
+
+       return ccsidr;
+}
+
+static int demux_c15_get(u64 id, void __user *uaddr)
+{
+       u32 val;
+       u32 __user *uval = uaddr;
+
+       /* Fail if we have unknown bits set. */
+       if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
+                  | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
+               return -ENOENT;
+
+       switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
+       case KVM_REG_ARM_DEMUX_ID_CCSIDR:
+               if (KVM_REG_SIZE(id) != 4)
+                       return -ENOENT;
+               val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
+                       >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
+               if (!is_valid_cache(val))
+                       return -ENOENT;
+
+               return put_user(get_ccsidr(val), uval);
+       default:
+               return -ENOENT;
+       }
+}
+
+static int demux_c15_set(u64 id, void __user *uaddr)
+{
+       u32 val, newval;
+       u32 __user *uval = uaddr;
+
+       /* Fail if we have unknown bits set. */
+       if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
+                  | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
+               return -ENOENT;
+
+       switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
+       case KVM_REG_ARM_DEMUX_ID_CCSIDR:
+               if (KVM_REG_SIZE(id) != 4)
+                       return -ENOENT;
+               val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
+                       >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
+               if (!is_valid_cache(val))
+                       return -ENOENT;
+
+               if (get_user(newval, uval))
+                       return -EFAULT;
+
+               /* This is also invariant: you can't change it. */
+               if (newval != get_ccsidr(val))
+                       return -EINVAL;
+               return 0;
+       default:
+               return -ENOENT;
+       }
+}
+
+#ifdef CONFIG_VFPv3
+static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC,
+                                  KVM_REG_ARM_VFP_FPSCR,
+                                  KVM_REG_ARM_VFP_FPINST,
+                                  KVM_REG_ARM_VFP_FPINST2,
+                                  KVM_REG_ARM_VFP_MVFR0,
+                                  KVM_REG_ARM_VFP_MVFR1,
+                                  KVM_REG_ARM_VFP_FPSID };
+
+static unsigned int num_fp_regs(void)
+{
+       if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2)
+               return 32;
+       else
+               return 16;
+}
+
+static unsigned int num_vfp_regs(void)
+{
+       /* Normal FP regs + control regs. */
+       return num_fp_regs() + ARRAY_SIZE(vfp_sysregs);
+}
+
+static int copy_vfp_regids(u64 __user *uindices)
+{
+       unsigned int i;
+       const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP;
+       const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
+
+       for (i = 0; i < num_fp_regs(); i++) {
+               if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i,
+                            uindices))
+                       return -EFAULT;
+               uindices++;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) {
+               if (put_user(u32reg | vfp_sysregs[i], uindices))
+                       return -EFAULT;
+               uindices++;
+       }
+
+       return num_vfp_regs();
+}
+
+static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
+{
+       u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
+       u32 val;
+
+       /* Fail if we have unknown bits set. */
+       if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
+                  | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
+               return -ENOENT;
+
+       if (vfpid < num_fp_regs()) {
+               if (KVM_REG_SIZE(id) != 8)
+                       return -ENOENT;
+               return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid],
+                                  id);
+       }
+
+       /* FP control registers are all 32 bit. */
+       if (KVM_REG_SIZE(id) != 4)
+               return -ENOENT;
+
+       switch (vfpid) {
+       case KVM_REG_ARM_VFP_FPEXC:
+               return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id);
+       case KVM_REG_ARM_VFP_FPSCR:
+               return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id);
+       case KVM_REG_ARM_VFP_FPINST:
+               return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id);
+       case KVM_REG_ARM_VFP_FPINST2:
+               return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id);
+       case KVM_REG_ARM_VFP_MVFR0:
+               val = fmrx(MVFR0);
+               return reg_to_user(uaddr, &val, id);
+       case KVM_REG_ARM_VFP_MVFR1:
+               val = fmrx(MVFR1);
+               return reg_to_user(uaddr, &val, id);
+       case KVM_REG_ARM_VFP_FPSID:
+               val = fmrx(FPSID);
+               return reg_to_user(uaddr, &val, id);
+       default:
+               return -ENOENT;
+       }
+}
+
+static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
+{
+       u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
+       u32 val;
+
+       /* Fail if we have unknown bits set. */
+       if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
+                  | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
+               return -ENOENT;
+
+       if (vfpid < num_fp_regs()) {
+               if (KVM_REG_SIZE(id) != 8)
+                       return -ENOENT;
+               return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid],
+                                    uaddr, id);
+       }
+
+       /* FP control registers are all 32 bit. */
+       if (KVM_REG_SIZE(id) != 4)
+               return -ENOENT;
+
+       switch (vfpid) {
+       case KVM_REG_ARM_VFP_FPEXC:
+               return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id);
+       case KVM_REG_ARM_VFP_FPSCR:
+               return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id);
+       case KVM_REG_ARM_VFP_FPINST:
+               return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id);
+       case KVM_REG_ARM_VFP_FPINST2:
+               return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id);
+       /* These are invariant. */
+       case KVM_REG_ARM_VFP_MVFR0:
+               if (reg_from_user(&val, uaddr, id))
+                       return -EFAULT;
+               if (val != fmrx(MVFR0))
+                       return -EINVAL;
+               return 0;
+       case KVM_REG_ARM_VFP_MVFR1:
+               if (reg_from_user(&val, uaddr, id))
+                       return -EFAULT;
+               if (val != fmrx(MVFR1))
+                       return -EINVAL;
+               return 0;
+       case KVM_REG_ARM_VFP_FPSID:
+               if (reg_from_user(&val, uaddr, id))
+                       return -EFAULT;
+               if (val != fmrx(FPSID))
+                       return -EINVAL;
+               return 0;
+       default:
+               return -ENOENT;
+       }
+}
+#else /* !CONFIG_VFPv3 */
+static unsigned int num_vfp_regs(void)
+{
+       return 0;
+}
+
+static int copy_vfp_regids(u64 __user *uindices)
+{
+       return 0;
+}
+
+static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
+{
+       return -ENOENT;
+}
+
+static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
+{
+       return -ENOENT;
+}
+#endif /* !CONFIG_VFPv3 */
+
+int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+       const struct coproc_reg *r;
+       void __user *uaddr = (void __user *)(long)reg->addr;
+
+       if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
+               return demux_c15_get(reg->id, uaddr);
+
+       if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
+               return vfp_get_reg(vcpu, reg->id, uaddr);
+
+       r = index_to_coproc_reg(vcpu, reg->id);
+       if (!r)
+               return get_invariant_cp15(reg->id, uaddr);
+
+       /* Note: copies two regs if size is 64 bit. */
+       return reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id);
+}
+
+int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+       const struct coproc_reg *r;
+       void __user *uaddr = (void __user *)(long)reg->addr;
+
+       if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
+               return demux_c15_set(reg->id, uaddr);
+
+       if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
+               return vfp_set_reg(vcpu, reg->id, uaddr);
+
+       r = index_to_coproc_reg(vcpu, reg->id);
+       if (!r)
+               return set_invariant_cp15(reg->id, uaddr);
+
+       /* Note: copies two regs if size is 64 bit */
+       return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id);
+}
+
+static unsigned int num_demux_regs(void)
+{
+       unsigned int i, count = 0;
+
+       for (i = 0; i < CSSELR_MAX; i++)
+               if (is_valid_cache(i))
+                       count++;
+
+       return count;
+}
+
+static int write_demux_regids(u64 __user *uindices)
+{
+       u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
+       unsigned int i;
+
+       val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
+       for (i = 0; i < CSSELR_MAX; i++) {
+               if (!is_valid_cache(i))
+                       continue;
+               if (put_user(val | i, uindices))
+                       return -EFAULT;
+               uindices++;
+       }
+       return 0;
+}
+
+static u64 cp15_to_index(const struct coproc_reg *reg)
+{
+       u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
+       if (reg->is_64) {
+               val |= KVM_REG_SIZE_U64;
+               val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
+               val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
+       } else {
+               val |= KVM_REG_SIZE_U32;
+               val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
+               val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT);
+               val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
+               val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT);
+       }
+       return val;
+}
+
+static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind)
+{
+       if (!*uind)
+               return true;
+
+       if (put_user(cp15_to_index(reg), *uind))
+               return false;
+
+       (*uind)++;
+       return true;
+}
+
+/* Assumed ordered tables, see kvm_coproc_table_init. */
+static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind)
+{
+       const struct coproc_reg *i1, *i2, *end1, *end2;
+       unsigned int total = 0;
+       size_t num;
+
+       /* We check for duplicates here, to allow arch-specific overrides. */
+       i1 = get_target_table(vcpu->arch.target, &num);
+       end1 = i1 + num;
+       i2 = cp15_regs;
+       end2 = cp15_regs + ARRAY_SIZE(cp15_regs);
+
+       BUG_ON(i1 == end1 || i2 == end2);
+
+       /* Walk carefully, as both tables may refer to the same register. */
+       while (i1 || i2) {
+               int cmp = cmp_reg(i1, i2);
+               /* target-specific overrides generic entry. */
+               if (cmp <= 0) {
+                       /* Ignore registers we trap but don't save. */
+                       if (i1->reg) {
+                               if (!copy_reg_to_user(i1, &uind))
+                                       return -EFAULT;
+                               total++;
+                       }
+               } else {
+                       /* Ignore registers we trap but don't save. */
+                       if (i2->reg) {
+                               if (!copy_reg_to_user(i2, &uind))
+                                       return -EFAULT;
+                               total++;
+                       }
+               }
+
+               if (cmp <= 0 && ++i1 == end1)
+                       i1 = NULL;
+               if (cmp >= 0 && ++i2 == end2)
+                       i2 = NULL;
+       }
+       return total;
+}
+
+unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu)
+{
+       return ARRAY_SIZE(invariant_cp15)
+               + num_demux_regs()
+               + num_vfp_regs()
+               + walk_cp15(vcpu, (u64 __user *)NULL);
+}
+
+int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+       unsigned int i;
+       int err;
+
+       /* Then give them all the invariant registers' indices. */
+       for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) {
+               if (put_user(cp15_to_index(&invariant_cp15[i]), uindices))
+                       return -EFAULT;
+               uindices++;
+       }
+
+       err = walk_cp15(vcpu, uindices);
+       if (err < 0)
+               return err;
+       uindices += err;
+
+       err = copy_vfp_regids(uindices);
+       if (err < 0)
+               return err;
+       uindices += err;
+
+       return write_demux_regids(uindices);
+}
+
+void kvm_coproc_table_init(void)
+{
+       unsigned int i;
+
+       /* Make sure tables are unique and in order. */
+       for (i = 1; i < ARRAY_SIZE(cp15_regs); i++)
+               BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0);
+
+       /* We abuse the reset function to overwrite the table itself. */
+       for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
+               invariant_cp15[i].reset(NULL, &invariant_cp15[i]);
+
+       /*
+        * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
+        *
+        *   If software reads the Cache Type fields from Ctype1
+        *   upwards, once it has seen a value of 0b000, no caches
+        *   exist at further-out levels of the hierarchy. So, for
+        *   example, if Ctype3 is the first Cache Type field with a
+        *   value of 0b000, the values of Ctype4 to Ctype7 must be
+        *   ignored.
+        */
+       asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels));
+       for (i = 0; i < 7; i++)
+               if (((cache_levels >> (i*3)) & 7) == 0)
+                       break;
+       /* Clear all higher bits. */
+       cache_levels &= (1 << (i*3))-1;
+}
+
+/**
+ * kvm_reset_coprocs - sets cp15 registers to reset value
+ * @vcpu: The VCPU pointer
+ *
+ * This function finds the right table above and sets the registers on the
+ * virtual CPU struct to their architecturally defined reset values.
+ */
+void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
+{
+       size_t num;
+       const struct coproc_reg *table;
+
+       /* Catch someone adding a register without putting in reset entry. */
+       memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15));
+
+       /* Generic chip reset first (so target could override). */
+       reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
+
+       table = get_target_table(vcpu->arch.target, &num);
+       reset_coproc_regs(vcpu, table, num);
+
+       for (num = 1; num < NR_CP15_REGS; num++)
+               if (vcpu->arch.cp15[num] == 0x42424242)
+                       panic("Didn't reset vcpu->arch.cp15[%zi]", num);
+}
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
new file mode 100644 (file)
index 0000000..992adfa
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Authors: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_COPROC_LOCAL_H__
+#define __ARM_KVM_COPROC_LOCAL_H__
+
+struct coproc_params {
+       unsigned long CRn;
+       unsigned long CRm;
+       unsigned long Op1;
+       unsigned long Op2;
+       unsigned long Rt1;
+       unsigned long Rt2;
+       bool is_64bit;
+       bool is_write;
+};
+
+struct coproc_reg {
+       /* MRC/MCR/MRRC/MCRR instruction which accesses it. */
+       unsigned long CRn;
+       unsigned long CRm;
+       unsigned long Op1;
+       unsigned long Op2;
+
+       bool is_64;
+
+       /* Trapped access from guest, if non-NULL. */
+       bool (*access)(struct kvm_vcpu *,
+                      const struct coproc_params *,
+                      const struct coproc_reg *);
+
+       /* Initialization for vcpu. */
+       void (*reset)(struct kvm_vcpu *, const struct coproc_reg *);
+
+       /* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */
+       unsigned long reg;
+
+       /* Value (usually reset value) */
+       u64 val;
+};
+
+static inline void print_cp_instr(const struct coproc_params *p)
+{
+       /* Look, we even formatted it for you to paste into the table! */
+       if (p->is_64bit) {
+               kvm_pr_unimpl(" { CRm(%2lu), Op1(%2lu), is64, func_%s },\n",
+                             p->CRm, p->Op1, p->is_write ? "write" : "read");
+       } else {
+               kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32,"
+                             " func_%s },\n",
+                             p->CRn, p->CRm, p->Op1, p->Op2,
+                             p->is_write ? "write" : "read");
+       }
+}
+
+static inline bool ignore_write(struct kvm_vcpu *vcpu,
+                               const struct coproc_params *p)
+{
+       return true;
+}
+
+static inline bool read_zero(struct kvm_vcpu *vcpu,
+                            const struct coproc_params *p)
+{
+       *vcpu_reg(vcpu, p->Rt1) = 0;
+       return true;
+}
+
+static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
+                                     const struct coproc_params *params)
+{
+       kvm_debug("CP15 write to read-only register at: %08x\n",
+                 *vcpu_pc(vcpu));
+       print_cp_instr(params);
+       return false;
+}
+
+static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
+                                       const struct coproc_params *params)
+{
+       kvm_debug("CP15 read to write-only register at: %08x\n",
+                 *vcpu_pc(vcpu));
+       print_cp_instr(params);
+       return false;
+}
+
+/* Reset functions */
+static inline void reset_unknown(struct kvm_vcpu *vcpu,
+                                const struct coproc_reg *r)
+{
+       BUG_ON(!r->reg);
+       BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15));
+       vcpu->arch.cp15[r->reg] = 0xdecafbad;
+}
+
+static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+{
+       BUG_ON(!r->reg);
+       BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15));
+       vcpu->arch.cp15[r->reg] = r->val;
+}
+
+static inline void reset_unknown64(struct kvm_vcpu *vcpu,
+                                  const struct coproc_reg *r)
+{
+       BUG_ON(!r->reg);
+       BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15));
+
+       vcpu->arch.cp15[r->reg] = 0xdecafbad;
+       vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee;
+}
+
+static inline int cmp_reg(const struct coproc_reg *i1,
+                         const struct coproc_reg *i2)
+{
+       BUG_ON(i1 == i2);
+       if (!i1)
+               return 1;
+       else if (!i2)
+               return -1;
+       if (i1->CRn != i2->CRn)
+               return i1->CRn - i2->CRn;
+       if (i1->CRm != i2->CRm)
+               return i1->CRm - i2->CRm;
+       if (i1->Op1 != i2->Op1)
+               return i1->Op1 - i2->Op1;
+       return i1->Op2 - i2->Op2;
+}
+
+
+#define CRn(_x)                .CRn = _x
+#define CRm(_x)        .CRm = _x
+#define Op1(_x)        .Op1 = _x
+#define Op2(_x)        .Op2 = _x
+#define is64           .is_64 = true
+#define is32           .is_64 = false
+
+#endif /* __ARM_KVM_COPROC_LOCAL_H__ */
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
new file mode 100644 (file)
index 0000000..685063a
--- /dev/null
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Authors: Rusty Russell <rusty@rustcorp.au>
+ *          Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/kvm_host.h>
+#include <asm/cputype.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_coproc.h>
+#include <linux/init.h>
+
+static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+{
+       /*
+        * Compute guest MPIDR:
+        * (Even if we present only one VCPU to the guest on an SMP
+        * host we don't set the U bit in the MPIDR, or vice versa, as
+        * revealing the underlying hardware properties is likely to
+        * be the best choice).
+        */
+       vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & ~MPIDR_LEVEL_MASK)
+               | (vcpu->vcpu_id & MPIDR_LEVEL_MASK);
+}
+
+#include "coproc.h"
+
+/* A15 TRM 4.3.28: RO WI */
+static bool access_actlr(struct kvm_vcpu *vcpu,
+                        const struct coproc_params *p,
+                        const struct coproc_reg *r)
+{
+       if (p->is_write)
+               return ignore_write(vcpu, p);
+
+       *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR];
+       return true;
+}
+
+/* A15 TRM 4.3.60: R/O. */
+static bool access_cbar(struct kvm_vcpu *vcpu,
+                       const struct coproc_params *p,
+                       const struct coproc_reg *r)
+{
+       if (p->is_write)
+               return write_to_read_only(vcpu, p);
+       return read_zero(vcpu, p);
+}
+
+/* A15 TRM 4.3.48: R/O WI. */
+static bool access_l2ctlr(struct kvm_vcpu *vcpu,
+                         const struct coproc_params *p,
+                         const struct coproc_reg *r)
+{
+       if (p->is_write)
+               return ignore_write(vcpu, p);
+
+       *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR];
+       return true;
+}
+
+static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+{
+       u32 l2ctlr, ncores;
+
+       asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
+       l2ctlr &= ~(3 << 24);
+       ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
+       l2ctlr |= (ncores & 3) << 24;
+
+       vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
+}
+
+static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+{
+       u32 actlr;
+
+       /* ACTLR contains SMP bit: make sure you create all cpus first! */
+       asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
+       /* Make the SMP bit consistent with the guest configuration */
+       if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
+               actlr |= 1U << 6;
+       else
+               actlr &= ~(1U << 6);
+
+       vcpu->arch.cp15[c1_ACTLR] = actlr;
+}
+
+/* A15 TRM 4.3.49: R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). */
+static bool access_l2ectlr(struct kvm_vcpu *vcpu,
+                          const struct coproc_params *p,
+                          const struct coproc_reg *r)
+{
+       if (p->is_write)
+               return ignore_write(vcpu, p);
+
+       *vcpu_reg(vcpu, p->Rt1) = 0;
+       return true;
+}
+
+/*
+ * A15-specific CP15 registers.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
+ */
+static const struct coproc_reg a15_regs[] = {
+       /* MPIDR: we use VMPIDR for guest access. */
+       { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
+                       NULL, reset_mpidr, c0_MPIDR },
+
+       /* SCTLR: swapped by interrupt.S. */
+       { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
+                       NULL, reset_val, c1_SCTLR, 0x00C50078 },
+       /* ACTLR: trapped by HCR.TAC bit. */
+       { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
+                       access_actlr, reset_actlr, c1_ACTLR },
+       /* CPACR: swapped by interrupt.S. */
+       { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
+                       NULL, reset_val, c1_CPACR, 0x00000000 },
+
+       /*
+        * L2CTLR access (guest wants to know #CPUs).
+        */
+       { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
+                       access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
+       { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
+
+       /* The Configuration Base Address Register. */
+       { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
+};
+
+static struct kvm_coproc_target_table a15_target_table = {
+       .target = KVM_ARM_TARGET_CORTEX_A15,
+       .table = a15_regs,
+       .num = ARRAY_SIZE(a15_regs),
+};
+
+static int __init coproc_a15_init(void)
+{
+       unsigned int i;
+
+       for (i = 1; i < ARRAY_SIZE(a15_regs); i++)
+               BUG_ON(cmp_reg(&a15_regs[i-1],
+                              &a15_regs[i]) >= 0);
+
+       kvm_register_target_coproc_table(&a15_target_table);
+       return 0;
+}
+late_initcall(coproc_a15_init);
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
new file mode 100644 (file)
index 0000000..d61450a
--- /dev/null
@@ -0,0 +1,373 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/mm.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_emulate.h>
+#include <trace/events/kvm.h>
+
+#include "trace.h"
+
+#define VCPU_NR_MODES          6
+#define VCPU_REG_OFFSET_USR    0
+#define VCPU_REG_OFFSET_FIQ    1
+#define VCPU_REG_OFFSET_IRQ    2
+#define VCPU_REG_OFFSET_SVC    3
+#define VCPU_REG_OFFSET_ABT    4
+#define VCPU_REG_OFFSET_UND    5
+#define REG_OFFSET(_reg) \
+       (offsetof(struct kvm_regs, _reg) / sizeof(u32))
+
+#define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
+
+static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
+       /* USR/SYS Registers */
+       [VCPU_REG_OFFSET_USR] = {
+               USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+               USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+               USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
+               USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
+               USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
+       },
+
+       /* FIQ Registers */
+       [VCPU_REG_OFFSET_FIQ] = {
+               USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+               USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+               USR_REG_OFFSET(6), USR_REG_OFFSET(7),
+               REG_OFFSET(fiq_regs[0]), /* r8 */
+               REG_OFFSET(fiq_regs[1]), /* r9 */
+               REG_OFFSET(fiq_regs[2]), /* r10 */
+               REG_OFFSET(fiq_regs[3]), /* r11 */
+               REG_OFFSET(fiq_regs[4]), /* r12 */
+               REG_OFFSET(fiq_regs[5]), /* r13 */
+               REG_OFFSET(fiq_regs[6]), /* r14 */
+       },
+
+       /* IRQ Registers */
+       [VCPU_REG_OFFSET_IRQ] = {
+               USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+               USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+               USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
+               USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
+               USR_REG_OFFSET(12),
+               REG_OFFSET(irq_regs[0]), /* r13 */
+               REG_OFFSET(irq_regs[1]), /* r14 */
+       },
+
+       /* SVC Registers */
+       [VCPU_REG_OFFSET_SVC] = {
+               USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+               USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+               USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
+               USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
+               USR_REG_OFFSET(12),
+               REG_OFFSET(svc_regs[0]), /* r13 */
+               REG_OFFSET(svc_regs[1]), /* r14 */
+       },
+
+       /* ABT Registers */
+       [VCPU_REG_OFFSET_ABT] = {
+               USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+               USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+               USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
+               USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
+               USR_REG_OFFSET(12),
+               REG_OFFSET(abt_regs[0]), /* r13 */
+               REG_OFFSET(abt_regs[1]), /* r14 */
+       },
+
+       /* UND Registers */
+       [VCPU_REG_OFFSET_UND] = {
+               USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+               USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+               USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
+               USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
+               USR_REG_OFFSET(12),
+               REG_OFFSET(und_regs[0]), /* r13 */
+               REG_OFFSET(und_regs[1]), /* r14 */
+       },
+};
+
+/*
+ * Return a pointer to the register number valid in the current mode of
+ * the virtual CPU.
+ */
+u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
+{
+       u32 *reg_array = (u32 *)&vcpu->arch.regs;
+       u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK;
+
+       switch (mode) {
+       case USR_MODE...SVC_MODE:
+               mode &= ~MODE32_BIT; /* 0 ... 3 */
+               break;
+
+       case ABT_MODE:
+               mode = VCPU_REG_OFFSET_ABT;
+               break;
+
+       case UND_MODE:
+               mode = VCPU_REG_OFFSET_UND;
+               break;
+
+       case SYSTEM_MODE:
+               mode = VCPU_REG_OFFSET_USR;
+               break;
+
+       default:
+               BUG();
+       }
+
+       return reg_array + vcpu_reg_offsets[mode][reg_num];
+}
+
+/*
+ * Return the SPSR for the current mode of the virtual CPU.
+ */
+u32 *vcpu_spsr(struct kvm_vcpu *vcpu)
+{
+       u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK;
+       switch (mode) {
+       case SVC_MODE:
+               return &vcpu->arch.regs.KVM_ARM_SVC_spsr;
+       case ABT_MODE:
+               return &vcpu->arch.regs.KVM_ARM_ABT_spsr;
+       case UND_MODE:
+               return &vcpu->arch.regs.KVM_ARM_UND_spsr;
+       case IRQ_MODE:
+               return &vcpu->arch.regs.KVM_ARM_IRQ_spsr;
+       case FIQ_MODE:
+               return &vcpu->arch.regs.KVM_ARM_FIQ_spsr;
+       default:
+               BUG();
+       }
+}
+
+/**
+ * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
+ * @vcpu:      the vcpu pointer
+ * @run:       the kvm_run structure pointer
+ *
+ * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
+ * halt execution of world-switches and schedule other host processes until
+ * there is an incoming IRQ or FIQ to the VM.
+ */
+int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       trace_kvm_wfi(*vcpu_pc(vcpu));
+       kvm_vcpu_block(vcpu);
+       return 1;
+}
+
+/**
+ * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
+ * @vcpu:      The VCPU pointer
+ *
+ * When exceptions occur while instructions are executed in Thumb IF-THEN
+ * blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have
+ * to do this little bit of work manually. The fields map like this:
+ *
+ * IT[7:0] -> CPSR[26:25],CPSR[15:10]
+ */
+static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
+{
+       unsigned long itbits, cond;
+       unsigned long cpsr = *vcpu_cpsr(vcpu);
+       bool is_arm = !(cpsr & PSR_T_BIT);
+
+       BUG_ON(is_arm && (cpsr & PSR_IT_MASK));
+
+       if (!(cpsr & PSR_IT_MASK))
+               return;
+
+       cond = (cpsr & 0xe000) >> 13;
+       itbits = (cpsr & 0x1c00) >> (10 - 2);
+       itbits |= (cpsr & (0x3 << 25)) >> 25;
+
+       /* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */
+       if ((itbits & 0x7) == 0)
+               itbits = cond = 0;
+       else
+               itbits = (itbits << 1) & 0x1f;
+
+       cpsr &= ~PSR_IT_MASK;
+       cpsr |= cond << 13;
+       cpsr |= (itbits & 0x1c) << (10 - 2);
+       cpsr |= (itbits & 0x3) << 25;
+       *vcpu_cpsr(vcpu) = cpsr;
+}
+
+/**
+ * kvm_skip_instr - skip a trapped instruction and proceed to the next
+ * @vcpu: The vcpu pointer
+ */
+void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
+{
+       bool is_thumb;
+
+       is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
+       if (is_thumb && !is_wide_instr)
+               *vcpu_pc(vcpu) += 2;
+       else
+               *vcpu_pc(vcpu) += 4;
+       kvm_adjust_itstate(vcpu);
+}
+
+
+/******************************************************************************
+ * Inject exceptions into the guest
+ */
+
+static u32 exc_vector_base(struct kvm_vcpu *vcpu)
+{
+       u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
+       u32 vbar = vcpu->arch.cp15[c12_VBAR];
+
+       if (sctlr & SCTLR_V)
+               return 0xffff0000;
+       else /* always have security exceptions */
+               return vbar;
+}
+
+/**
+ * kvm_inject_undefined - inject an undefined exception into the guest
+ * @vcpu: The VCPU to receive the undefined exception
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ *
+ * Modelled after TakeUndefInstrException() pseudocode.
+ */
+void kvm_inject_undefined(struct kvm_vcpu *vcpu)
+{
+       u32 new_lr_value;
+       u32 new_spsr_value;
+       u32 cpsr = *vcpu_cpsr(vcpu);
+       u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
+       bool is_thumb = (cpsr & PSR_T_BIT);
+       u32 vect_offset = 4;
+       u32 return_offset = (is_thumb) ? 2 : 4;
+
+       new_spsr_value = cpsr;
+       new_lr_value = *vcpu_pc(vcpu) - return_offset;
+
+       *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE;
+       *vcpu_cpsr(vcpu) |= PSR_I_BIT;
+       *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
+
+       if (sctlr & SCTLR_TE)
+               *vcpu_cpsr(vcpu) |= PSR_T_BIT;
+       if (sctlr & SCTLR_EE)
+               *vcpu_cpsr(vcpu) |= PSR_E_BIT;
+
+       /* Note: These now point to UND banked copies */
+       *vcpu_spsr(vcpu) = cpsr;
+       *vcpu_reg(vcpu, 14) = new_lr_value;
+
+       /* Branch to exception vector */
+       *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
+}
+
+/*
+ * Modelled after TakeDataAbortException() and TakePrefetchAbortException
+ * pseudocode.
+ */
+static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
+{
+       u32 new_lr_value;
+       u32 new_spsr_value;
+       u32 cpsr = *vcpu_cpsr(vcpu);
+       u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
+       bool is_thumb = (cpsr & PSR_T_BIT);
+       u32 vect_offset;
+       u32 return_offset = (is_thumb) ? 4 : 0;
+       bool is_lpae;
+
+       new_spsr_value = cpsr;
+       new_lr_value = *vcpu_pc(vcpu) + return_offset;
+
+       *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE;
+       *vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT;
+       *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
+
+       if (sctlr & SCTLR_TE)
+               *vcpu_cpsr(vcpu) |= PSR_T_BIT;
+       if (sctlr & SCTLR_EE)
+               *vcpu_cpsr(vcpu) |= PSR_E_BIT;
+
+       /* Note: These now point to ABT banked copies */
+       *vcpu_spsr(vcpu) = cpsr;
+       *vcpu_reg(vcpu, 14) = new_lr_value;
+
+       if (is_pabt)
+               vect_offset = 12;
+       else
+               vect_offset = 16;
+
+       /* Branch to exception vector */
+       *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
+
+       if (is_pabt) {
+               /* Set DFAR and DFSR */
+               vcpu->arch.cp15[c6_IFAR] = addr;
+               is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
+               /* Always give debug fault for now - should give guest a clue */
+               if (is_lpae)
+                       vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22;
+               else
+                       vcpu->arch.cp15[c5_IFSR] = 2;
+       } else { /* !iabt */
+               /* Set DFAR and DFSR */
+               vcpu->arch.cp15[c6_DFAR] = addr;
+               is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
+               /* Always give debug fault for now - should give guest a clue */
+               if (is_lpae)
+                       vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22;
+               else
+                       vcpu->arch.cp15[c5_DFSR] = 2;
+       }
+
+}
+
+/**
+ * kvm_inject_dabt - inject a data abort into the guest
+ * @vcpu: The VCPU to receive the undefined exception
+ * @addr: The address to report in the DFAR
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+       inject_abt(vcpu, false, addr);
+}
+
+/**
+ * kvm_inject_pabt - inject a prefetch abort into the guest
+ * @vcpu: The VCPU to receive the undefined exception
+ * @addr: The address to report in the DFAR
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+       inject_abt(vcpu, true, addr);
+}
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
new file mode 100644 (file)
index 0000000..2339d96
--- /dev/null
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_coproc.h>
+
+#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
+#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
+
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+       { NULL }
+};
+
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+static u64 core_reg_offset_from_id(u64 id)
+{
+       return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
+}
+
+static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+       u32 __user *uaddr = (u32 __user *)(long)reg->addr;
+       struct kvm_regs *regs = &vcpu->arch.regs;
+       u64 off;
+
+       if (KVM_REG_SIZE(reg->id) != 4)
+               return -ENOENT;
+
+       /* Our ID is an index into the kvm_regs struct. */
+       off = core_reg_offset_from_id(reg->id);
+       if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
+               return -ENOENT;
+
+       return put_user(((u32 *)regs)[off], uaddr);
+}
+
+static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+       u32 __user *uaddr = (u32 __user *)(long)reg->addr;
+       struct kvm_regs *regs = &vcpu->arch.regs;
+       u64 off, val;
+
+       if (KVM_REG_SIZE(reg->id) != 4)
+               return -ENOENT;
+
+       /* Our ID is an index into the kvm_regs struct. */
+       off = core_reg_offset_from_id(reg->id);
+       if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
+               return -ENOENT;
+
+       if (get_user(val, uaddr) != 0)
+               return -EFAULT;
+
+       if (off == KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr)) {
+               unsigned long mode = val & MODE_MASK;
+               switch (mode) {
+               case USR_MODE:
+               case FIQ_MODE:
+               case IRQ_MODE:
+               case SVC_MODE:
+               case ABT_MODE:
+               case UND_MODE:
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       ((u32 *)regs)[off] = val;
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       return -EINVAL;
+}
+
+static unsigned long num_core_regs(void)
+{
+       return sizeof(struct kvm_regs) / sizeof(u32);
+}
+
+/**
+ * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
+ *
+ * This is for all registers.
+ */
+unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
+{
+       return num_core_regs() + kvm_arm_num_coproc_regs(vcpu);
+}
+
+/**
+ * kvm_arm_copy_reg_indices - get indices of all registers.
+ *
+ * We do core registers right here, then we apppend coproc regs.
+ */
+int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+       unsigned int i;
+       const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE;
+
+       for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) {
+               if (put_user(core_reg | i, uindices))
+                       return -EFAULT;
+               uindices++;
+       }
+
+       return kvm_arm_copy_coproc_indices(vcpu, uindices);
+}
+
+int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+       /* We currently use nothing arch-specific in upper 32 bits */
+       if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
+               return -EINVAL;
+
+       /* Register group 16 means we want a core register. */
+       if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
+               return get_core_reg(vcpu, reg);
+
+       return kvm_arm_coproc_get_reg(vcpu, reg);
+}
+
+int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+       /* We currently use nothing arch-specific in upper 32 bits */
+       if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
+               return -EINVAL;
+
+       /* Register group 16 means we set a core register. */
+       if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
+               return set_core_reg(vcpu, reg);
+
+       return kvm_arm_coproc_set_reg(vcpu, reg);
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+                                 struct kvm_sregs *sregs)
+{
+       return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+                                 struct kvm_sregs *sregs)
+{
+       return -EINVAL;
+}
+
+int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+                       const struct kvm_vcpu_init *init)
+{
+       unsigned int i;
+
+       /* We can only do a cortex A15 for now. */
+       if (init->target != kvm_target_cpu())
+               return -EINVAL;
+
+       vcpu->arch.target = init->target;
+       bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
+
+       /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
+       for (i = 0; i < sizeof(init->features) * 8; i++) {
+               if (test_bit(i, (void *)init->features)) {
+                       if (i >= KVM_VCPU_MAX_FEATURES)
+                               return -ENOENT;
+                       set_bit(i, vcpu->arch.features);
+               }
+       }
+
+       /* Now we know what it is, we can reset it. */
+       return kvm_reset_vcpu(vcpu);
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+                                 struct kvm_translation *tr)
+{
+       return -EINVAL;
+}
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
new file mode 100644 (file)
index 0000000..9f37a79
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unified.h>
+#include <asm/asm-offsets.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+
+/********************************************************************
+ * Hypervisor initialization
+ *   - should be called with:
+ *       r0,r1 = Hypervisor pgd pointer
+ *       r2 = top of Hyp stack (kernel VA)
+ *       r3 = pointer to hyp vectors
+ */
+
+       .text
+       .pushsection    .hyp.idmap.text,"ax"
+       .align 5
+__kvm_hyp_init:
+       .globl __kvm_hyp_init
+
+       @ Hyp-mode exception vector
+       W(b)    .
+       W(b)    .
+       W(b)    .
+       W(b)    .
+       W(b)    .
+       W(b)    __do_hyp_init
+       W(b)    .
+       W(b)    .
+
+__do_hyp_init:
+       @ Set the HTTBR to point to the hypervisor PGD pointer passed
+       mcrr    p15, 4, r0, r1, c2
+
+       @ Set the HTCR and VTCR to the same shareability and cacheability
+       @ settings as the non-secure TTBCR and with T0SZ == 0.
+       mrc     p15, 4, r0, c2, c0, 2   @ HTCR
+       ldr     r12, =HTCR_MASK
+       bic     r0, r0, r12
+       mrc     p15, 0, r1, c2, c0, 2   @ TTBCR
+       and     r1, r1, #(HTCR_MASK & ~TTBCR_T0SZ)
+       orr     r0, r0, r1
+       mcr     p15, 4, r0, c2, c0, 2   @ HTCR
+
+       mrc     p15, 4, r1, c2, c1, 2   @ VTCR
+       ldr     r12, =VTCR_MASK
+       bic     r1, r1, r12
+       bic     r0, r0, #(~VTCR_HTCR_SH)        @ clear non-reusable HTCR bits
+       orr     r1, r0, r1
+       orr     r1, r1, #(KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S)
+       mcr     p15, 4, r1, c2, c1, 2   @ VTCR
+
+       @ Use the same memory attributes for hyp. accesses as the kernel
+       @ (copy MAIRx ro HMAIRx).
+       mrc     p15, 0, r0, c10, c2, 0
+       mcr     p15, 4, r0, c10, c2, 0
+       mrc     p15, 0, r0, c10, c2, 1
+       mcr     p15, 4, r0, c10, c2, 1
+
+       @ Set the HSCTLR to:
+       @  - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel)
+       @  - Endianness: Kernel config
+       @  - Fast Interrupt Features: Kernel config
+       @  - Write permission implies XN: disabled
+       @  - Instruction cache: enabled
+       @  - Data/Unified cache: enabled
+       @  - Memory alignment checks: enabled
+       @  - MMU: enabled (this code must be run from an identity mapping)
+       mrc     p15, 4, r0, c1, c0, 0   @ HSCR
+       ldr     r12, =HSCTLR_MASK
+       bic     r0, r0, r12
+       mrc     p15, 0, r1, c1, c0, 0   @ SCTLR
+       ldr     r12, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
+       and     r1, r1, r12
+ ARM(  ldr     r12, =(HSCTLR_M | HSCTLR_A)                     )
+ THUMB(        ldr     r12, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE)         )
+       orr     r1, r1, r12
+       orr     r0, r0, r1
+       isb
+       mcr     p15, 4, r0, c1, c0, 0   @ HSCR
+       isb
+
+       @ Set stack pointer and return to the kernel
+       mov     sp, r2
+
+       @ Set HVBAR to point to the HYP vectors
+       mcr     p15, 4, r3, c12, c0, 0  @ HVBAR
+
+       eret
+
+       .ltorg
+
+       .globl __kvm_hyp_init_end
+__kvm_hyp_init_end:
+
+       .popsection
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
new file mode 100644 (file)
index 0000000..c5400d2
--- /dev/null
@@ -0,0 +1,478 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/linkage.h>
+#include <linux/const.h>
+#include <asm/unified.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/asm-offsets.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+#include <asm/vfpmacros.h>
+#include "interrupts_head.S"
+
+       .text
+
+__kvm_hyp_code_start:
+       .globl __kvm_hyp_code_start
+
+/********************************************************************
+ * Flush per-VMID TLBs
+ *
+ * void __kvm_tlb_flush_vmid(struct kvm *kvm);
+ *
+ * We rely on the hardware to broadcast the TLB invalidation to all CPUs
+ * inside the inner-shareable domain (which is the case for all v7
+ * implementations).  If we come across a non-IS SMP implementation, we'll
+ * have to use an IPI based mechanism. Until then, we stick to the simple
+ * hardware assisted version.
+ */
+ENTRY(__kvm_tlb_flush_vmid)
+       push    {r2, r3}
+
+       add     r0, r0, #KVM_VTTBR
+       ldrd    r2, r3, [r0]
+       mcrr    p15, 6, r2, r3, c2      @ Write VTTBR
+       isb
+       mcr     p15, 0, r0, c8, c3, 0   @ TLBIALLIS (rt ignored)
+       dsb
+       isb
+       mov     r2, #0
+       mov     r3, #0
+       mcrr    p15, 6, r2, r3, c2      @ Back to VMID #0
+       isb                             @ Not necessary if followed by eret
+
+       pop     {r2, r3}
+       bx      lr
+ENDPROC(__kvm_tlb_flush_vmid)
+
+/********************************************************************
+ * Flush TLBs and instruction caches of all CPUs inside the inner-shareable
+ * domain, for all VMIDs
+ *
+ * void __kvm_flush_vm_context(void);
+ */
+ENTRY(__kvm_flush_vm_context)
+       mov     r0, #0                  @ rn parameter for c15 flushes is SBZ
+
+       /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */
+       mcr     p15, 4, r0, c8, c3, 4
+       /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */
+       mcr     p15, 0, r0, c7, c1, 0
+       dsb
+       isb                             @ Not necessary if followed by eret
+
+       bx      lr
+ENDPROC(__kvm_flush_vm_context)
+
+
+/********************************************************************
+ *  Hypervisor world-switch code
+ *
+ *
+ * int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
+ */
+ENTRY(__kvm_vcpu_run)
+       @ Save the vcpu pointer
+       mcr     p15, 4, vcpu, c13, c0, 2        @ HTPIDR
+
+       save_host_regs
+
+       @ Store hardware CP15 state and load guest state
+       read_cp15_state store_to_vcpu = 0
+       write_cp15_state read_from_vcpu = 1
+
+       @ If the host kernel has not been configured with VFPv3 support,
+       @ then it is safer if we deny guests from using it as well.
+#ifdef CONFIG_VFPv3
+       @ Set FPEXC_EN so the guest doesn't trap floating point instructions
+       VFPFMRX r2, FPEXC               @ VMRS
+       push    {r2}
+       orr     r2, r2, #FPEXC_EN
+       VFPFMXR FPEXC, r2               @ VMSR
+#endif
+
+       @ Configure Hyp-role
+       configure_hyp_role vmentry
+
+       @ Trap coprocessor CRx accesses
+       set_hstr vmentry
+       set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
+       set_hdcr vmentry
+
+       @ Write configured ID register into MIDR alias
+       ldr     r1, [vcpu, #VCPU_MIDR]
+       mcr     p15, 4, r1, c0, c0, 0
+
+       @ Write guest view of MPIDR into VMPIDR
+       ldr     r1, [vcpu, #CP15_OFFSET(c0_MPIDR)]
+       mcr     p15, 4, r1, c0, c0, 5
+
+       @ Set up guest memory translation
+       ldr     r1, [vcpu, #VCPU_KVM]
+       add     r1, r1, #KVM_VTTBR
+       ldrd    r2, r3, [r1]
+       mcrr    p15, 6, r2, r3, c2      @ Write VTTBR
+
+       @ We're all done, just restore the GPRs and go to the guest
+       restore_guest_regs
+       clrex                           @ Clear exclusive monitor
+       eret
+
+__kvm_vcpu_return:
+       /*
+        * return convention:
+        * guest r0, r1, r2 saved on the stack
+        * r0: vcpu pointer
+        * r1: exception code
+        */
+       save_guest_regs
+
+       @ Set VMID == 0
+       mov     r2, #0
+       mov     r3, #0
+       mcrr    p15, 6, r2, r3, c2      @ Write VTTBR
+
+       @ Don't trap coprocessor accesses for host kernel
+       set_hstr vmexit
+       set_hdcr vmexit
+       set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
+
+#ifdef CONFIG_VFPv3
+       @ Save floating point registers we if let guest use them.
+       tst     r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
+       bne     after_vfp_restore
+
+       @ Switch VFP/NEON hardware state to the host's
+       add     r7, vcpu, #VCPU_VFP_GUEST
+       store_vfp_state r7
+       add     r7, vcpu, #VCPU_VFP_HOST
+       ldr     r7, [r7]
+       restore_vfp_state r7
+
+after_vfp_restore:
+       @ Restore FPEXC_EN which we clobbered on entry
+       pop     {r2}
+       VFPFMXR FPEXC, r2
+#endif
+
+       @ Reset Hyp-role
+       configure_hyp_role vmexit
+
+       @ Let host read hardware MIDR
+       mrc     p15, 0, r2, c0, c0, 0
+       mcr     p15, 4, r2, c0, c0, 0
+
+       @ Back to hardware MPIDR
+       mrc     p15, 0, r2, c0, c0, 5
+       mcr     p15, 4, r2, c0, c0, 5
+
+       @ Store guest CP15 state and restore host state
+       read_cp15_state store_to_vcpu = 1
+       write_cp15_state read_from_vcpu = 0
+
+       restore_host_regs
+       clrex                           @ Clear exclusive monitor
+       mov     r0, r1                  @ Return the return code
+       mov     r1, #0                  @ Clear upper bits in return value
+       bx      lr                      @ return to IOCTL
+
+/********************************************************************
+ *  Call function in Hyp mode
+ *
+ *
+ * u64 kvm_call_hyp(void *hypfn, ...);
+ *
+ * This is not really a variadic function in the classic C-way and care must
+ * be taken when calling this to ensure parameters are passed in registers
+ * only, since the stack will change between the caller and the callee.
+ *
+ * Call the function with the first argument containing a pointer to the
+ * function you wish to call in Hyp mode, and subsequent arguments will be
+ * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the
+ * function pointer can be passed).  The function being called must be mapped
+ * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are
+ * passed in r0 and r1.
+ *
+ * The calling convention follows the standard AAPCS:
+ *   r0 - r3: caller save
+ *   r12:     caller save
+ *   rest:    callee save
+ */
+ENTRY(kvm_call_hyp)
+       hvc     #0
+       bx      lr
+
+/********************************************************************
+ * Hypervisor exception vector and handlers
+ *
+ *
+ * The KVM/ARM Hypervisor ABI is defined as follows:
+ *
+ * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
+ * instruction is issued since all traps are disabled when running the host
+ * kernel as per the Hyp-mode initialization at boot time.
+ *
+ * HVC instructions cause a trap to the vector page + offset 0x18 (see hyp_hvc
+ * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
+ * host kernel) and they cause a trap to the vector page + offset 0xc when HVC
+ * instructions are called from within Hyp-mode.
+ *
+ * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
+ *    Switching to Hyp mode is done through a simple HVC #0 instruction. The
+ *    exception vector code will check that the HVC comes from VMID==0 and if
+ *    so will push the necessary state (SPSR, lr_usr) on the Hyp stack.
+ *    - r0 contains a pointer to a HYP function
+ *    - r1, r2, and r3 contain arguments to the above function.
+ *    - The HYP function will be called with its arguments in r0, r1 and r2.
+ *    On HYP function return, we return directly to SVC.
+ *
+ * Note that the above is used to execute code in Hyp-mode from a host-kernel
+ * point of view, and is a different concept from performing a world-switch and
+ * executing guest code SVC mode (with a VMID != 0).
+ */
+
+/* Handle undef, svc, pabt, or dabt by crashing with a user notice */
+.macro bad_exception exception_code, panic_str
+       push    {r0-r2}
+       mrrc    p15, 6, r0, r1, c2      @ Read VTTBR
+       lsr     r1, r1, #16
+       ands    r1, r1, #0xff
+       beq     99f
+
+       load_vcpu                       @ Load VCPU pointer
+       .if \exception_code == ARM_EXCEPTION_DATA_ABORT
+       mrc     p15, 4, r2, c5, c2, 0   @ HSR
+       mrc     p15, 4, r1, c6, c0, 0   @ HDFAR
+       str     r2, [vcpu, #VCPU_HSR]
+       str     r1, [vcpu, #VCPU_HxFAR]
+       .endif
+       .if \exception_code == ARM_EXCEPTION_PREF_ABORT
+       mrc     p15, 4, r2, c5, c2, 0   @ HSR
+       mrc     p15, 4, r1, c6, c0, 2   @ HIFAR
+       str     r2, [vcpu, #VCPU_HSR]
+       str     r1, [vcpu, #VCPU_HxFAR]
+       .endif
+       mov     r1, #\exception_code
+       b       __kvm_vcpu_return
+
+       @ We were in the host already. Let's craft a panic-ing return to SVC.
+99:    mrs     r2, cpsr
+       bic     r2, r2, #MODE_MASK
+       orr     r2, r2, #SVC_MODE
+THUMB( orr     r2, r2, #PSR_T_BIT      )
+       msr     spsr_cxsf, r2
+       mrs     r1, ELR_hyp
+       ldr     r2, =BSYM(panic)
+       msr     ELR_hyp, r2
+       ldr     r0, =\panic_str
+       eret
+.endm
+
+       .text
+
+       .align 5
+__kvm_hyp_vector:
+       .globl __kvm_hyp_vector
+
+       @ Hyp-mode exception vector
+       W(b)    hyp_reset
+       W(b)    hyp_undef
+       W(b)    hyp_svc
+       W(b)    hyp_pabt
+       W(b)    hyp_dabt
+       W(b)    hyp_hvc
+       W(b)    hyp_irq
+       W(b)    hyp_fiq
+
+       .align
+hyp_reset:
+       b       hyp_reset
+
+       .align
+hyp_undef:
+       bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str
+
+       .align
+hyp_svc:
+       bad_exception ARM_EXCEPTION_HVC, svc_die_str
+
+       .align
+hyp_pabt:
+       bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str
+
+       .align
+hyp_dabt:
+       bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str
+
+       .align
+hyp_hvc:
+       /*
+        * Getting here is either becuase of a trap from a guest or from calling
+        * HVC from the host kernel, which means "switch to Hyp mode".
+        */
+       push    {r0, r1, r2}
+
+       @ Check syndrome register
+       mrc     p15, 4, r1, c5, c2, 0   @ HSR
+       lsr     r0, r1, #HSR_EC_SHIFT
+#ifdef CONFIG_VFPv3
+       cmp     r0, #HSR_EC_CP_0_13
+       beq     switch_to_guest_vfp
+#endif
+       cmp     r0, #HSR_EC_HVC
+       bne     guest_trap              @ Not HVC instr.
+
+       /*
+        * Let's check if the HVC came from VMID 0 and allow simple
+        * switch to Hyp mode
+        */
+       mrrc    p15, 6, r0, r2, c2
+       lsr     r2, r2, #16
+       and     r2, r2, #0xff
+       cmp     r2, #0
+       bne     guest_trap              @ Guest called HVC
+
+host_switch_to_hyp:
+       pop     {r0, r1, r2}
+
+       push    {lr}
+       mrs     lr, SPSR
+       push    {lr}
+
+       mov     lr, r0
+       mov     r0, r1
+       mov     r1, r2
+       mov     r2, r3
+
+THUMB( orr     lr, #1)
+       blx     lr                      @ Call the HYP function
+
+       pop     {lr}
+       msr     SPSR_csxf, lr
+       pop     {lr}
+       eret
+
+guest_trap:
+       load_vcpu                       @ Load VCPU pointer to r0
+       str     r1, [vcpu, #VCPU_HSR]
+
+       @ Check if we need the fault information
+       lsr     r1, r1, #HSR_EC_SHIFT
+       cmp     r1, #HSR_EC_IABT
+       mrceq   p15, 4, r2, c6, c0, 2   @ HIFAR
+       beq     2f
+       cmp     r1, #HSR_EC_DABT
+       bne     1f
+       mrc     p15, 4, r2, c6, c0, 0   @ HDFAR
+
+2:     str     r2, [vcpu, #VCPU_HxFAR]
+
+       /*
+        * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
+        *
+        * Abort on the stage 2 translation for a memory access from a
+        * Non-secure PL1 or PL0 mode:
+        *
+        * For any Access flag fault or Translation fault, and also for any
+        * Permission fault on the stage 2 translation of a memory access
+        * made as part of a translation table walk for a stage 1 translation,
+        * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
+        * is UNKNOWN.
+        */
+
+       /* Check for permission fault, and S1PTW */
+       mrc     p15, 4, r1, c5, c2, 0   @ HSR
+       and     r0, r1, #HSR_FSC_TYPE
+       cmp     r0, #FSC_PERM
+       tsteq   r1, #(1 << 7)           @ S1PTW
+       mrcne   p15, 4, r2, c6, c0, 4   @ HPFAR
+       bne     3f
+
+       /* Resolve IPA using the xFAR */
+       mcr     p15, 0, r2, c7, c8, 0   @ ATS1CPR
+       isb
+       mrrc    p15, 0, r0, r1, c7      @ PAR
+       tst     r0, #1
+       bne     4f                      @ Failed translation
+       ubfx    r2, r0, #12, #20
+       lsl     r2, r2, #4
+       orr     r2, r2, r1, lsl #24
+
+3:     load_vcpu                       @ Load VCPU pointer to r0
+       str     r2, [r0, #VCPU_HPFAR]
+
+1:     mov     r1, #ARM_EXCEPTION_HVC
+       b       __kvm_vcpu_return
+
+4:     pop     {r0, r1, r2}            @ Failed translation, return to guest
+       eret
+
+/*
+ * If VFPv3 support is not available, then we will not switch the VFP
+ * registers; however cp10 and cp11 accesses will still trap and fallback
+ * to the regular coprocessor emulation code, which currently will
+ * inject an undefined exception to the guest.
+ */
+#ifdef CONFIG_VFPv3
+switch_to_guest_vfp:
+       load_vcpu                       @ Load VCPU pointer to r0
+       push    {r3-r7}
+
+       @ NEON/VFP used.  Turn on VFP access.
+       set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11))
+
+       @ Switch VFP/NEON hardware state to the guest's
+       add     r7, r0, #VCPU_VFP_HOST
+       ldr     r7, [r7]
+       store_vfp_state r7
+       add     r7, r0, #VCPU_VFP_GUEST
+       restore_vfp_state r7
+
+       pop     {r3-r7}
+       pop     {r0-r2}
+       eret
+#endif
+
+       .align
+hyp_irq:
+       push    {r0, r1, r2}
+       mov     r1, #ARM_EXCEPTION_IRQ
+       load_vcpu                       @ Load VCPU pointer to r0
+       b       __kvm_vcpu_return
+
+       .align
+hyp_fiq:
+       b       hyp_fiq
+
+       .ltorg
+
+__kvm_hyp_code_end:
+       .globl  __kvm_hyp_code_end
+
+       .section ".rodata"
+
+und_die_str:
+       .ascii  "unexpected undefined exception in Hyp mode at: %#08x"
+pabt_die_str:
+       .ascii  "unexpected prefetch abort in Hyp mode at: %#08x"
+dabt_die_str:
+       .ascii  "unexpected data abort in Hyp mode at: %#08x"
+svc_die_str:
+       .ascii  "unexpected HVC/SVC trap in Hyp mode at: %#08x"
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
new file mode 100644 (file)
index 0000000..6a95d34
--- /dev/null
@@ -0,0 +1,441 @@
+#define VCPU_USR_REG(_reg_nr)  (VCPU_USR_REGS + (_reg_nr * 4))
+#define VCPU_USR_SP            (VCPU_USR_REG(13))
+#define VCPU_USR_LR            (VCPU_USR_REG(14))
+#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4))
+
+/*
+ * Many of these macros need to access the VCPU structure, which is always
+ * held in r0. These macros should never clobber r1, as it is used to hold the
+ * exception code on the return path (except of course the macro that switches
+ * all the registers before the final jump to the VM).
+ */
+vcpu   .req    r0              @ vcpu pointer always in r0
+
+/* Clobbers {r2-r6} */
+.macro store_vfp_state vfp_base
+       @ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions
+       VFPFMRX r2, FPEXC
+       @ Make sure VFP is enabled so we can touch the registers.
+       orr     r6, r2, #FPEXC_EN
+       VFPFMXR FPEXC, r6
+
+       VFPFMRX r3, FPSCR
+       tst     r2, #FPEXC_EX           @ Check for VFP Subarchitecture
+       beq     1f
+       @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so
+       @ we only need to save them if FPEXC_EX is set.
+       VFPFMRX r4, FPINST
+       tst     r2, #FPEXC_FP2V
+       VFPFMRX r5, FPINST2, ne         @ vmrsne
+       bic     r6, r2, #FPEXC_EX       @ FPEXC_EX disable
+       VFPFMXR FPEXC, r6
+1:
+       VFPFSTMIA \vfp_base, r6         @ Save VFP registers
+       stm     \vfp_base, {r2-r5}      @ Save FPEXC, FPSCR, FPINST, FPINST2
+.endm
+
+/* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */
+.macro restore_vfp_state vfp_base
+       VFPFLDMIA \vfp_base, r6         @ Load VFP registers
+       ldm     \vfp_base, {r2-r5}      @ Load FPEXC, FPSCR, FPINST, FPINST2
+
+       VFPFMXR FPSCR, r3
+       tst     r2, #FPEXC_EX           @ Check for VFP Subarchitecture
+       beq     1f
+       VFPFMXR FPINST, r4
+       tst     r2, #FPEXC_FP2V
+       VFPFMXR FPINST2, r5, ne
+1:
+       VFPFMXR FPEXC, r2       @ FPEXC (last, in case !EN)
+.endm
+
+/* These are simply for the macros to work - value don't have meaning */
+.equ usr, 0
+.equ svc, 1
+.equ abt, 2
+.equ und, 3
+.equ irq, 4
+.equ fiq, 5
+
+.macro push_host_regs_mode mode
+       mrs     r2, SP_\mode
+       mrs     r3, LR_\mode
+       mrs     r4, SPSR_\mode
+       push    {r2, r3, r4}
+.endm
+
+/*
+ * Store all host persistent registers on the stack.
+ * Clobbers all registers, in all modes, except r0 and r1.
+ */
+.macro save_host_regs
+       /* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */
+       mrs     r2, ELR_hyp
+       push    {r2}
+
+       /* usr regs */
+       push    {r4-r12}        @ r0-r3 are always clobbered
+       mrs     r2, SP_usr
+       mov     r3, lr
+       push    {r2, r3}
+
+       push_host_regs_mode svc
+       push_host_regs_mode abt
+       push_host_regs_mode und
+       push_host_regs_mode irq
+
+       /* fiq regs */
+       mrs     r2, r8_fiq
+       mrs     r3, r9_fiq
+       mrs     r4, r10_fiq
+       mrs     r5, r11_fiq
+       mrs     r6, r12_fiq
+       mrs     r7, SP_fiq
+       mrs     r8, LR_fiq
+       mrs     r9, SPSR_fiq
+       push    {r2-r9}
+.endm
+
+.macro pop_host_regs_mode mode
+       pop     {r2, r3, r4}
+       msr     SP_\mode, r2
+       msr     LR_\mode, r3
+       msr     SPSR_\mode, r4
+.endm
+
+/*
+ * Restore all host registers from the stack.
+ * Clobbers all registers, in all modes, except r0 and r1.
+ */
+.macro restore_host_regs
+       pop     {r2-r9}
+       msr     r8_fiq, r2
+       msr     r9_fiq, r3
+       msr     r10_fiq, r4
+       msr     r11_fiq, r5
+       msr     r12_fiq, r6
+       msr     SP_fiq, r7
+       msr     LR_fiq, r8
+       msr     SPSR_fiq, r9
+
+       pop_host_regs_mode irq
+       pop_host_regs_mode und
+       pop_host_regs_mode abt
+       pop_host_regs_mode svc
+
+       pop     {r2, r3}
+       msr     SP_usr, r2
+       mov     lr, r3
+       pop     {r4-r12}
+
+       pop     {r2}
+       msr     ELR_hyp, r2
+.endm
+
+/*
+ * Restore SP, LR and SPSR for a given mode. offset is the offset of
+ * this mode's registers from the VCPU base.
+ *
+ * Assumes vcpu pointer in vcpu reg
+ *
+ * Clobbers r1, r2, r3, r4.
+ */
+.macro restore_guest_regs_mode mode, offset
+       add     r1, vcpu, \offset
+       ldm     r1, {r2, r3, r4}
+       msr     SP_\mode, r2
+       msr     LR_\mode, r3
+       msr     SPSR_\mode, r4
+.endm
+
+/*
+ * Restore all guest registers from the vcpu struct.
+ *
+ * Assumes vcpu pointer in vcpu reg
+ *
+ * Clobbers *all* registers.
+ */
+.macro restore_guest_regs
+       restore_guest_regs_mode svc, #VCPU_SVC_REGS
+       restore_guest_regs_mode abt, #VCPU_ABT_REGS
+       restore_guest_regs_mode und, #VCPU_UND_REGS
+       restore_guest_regs_mode irq, #VCPU_IRQ_REGS
+
+       add     r1, vcpu, #VCPU_FIQ_REGS
+       ldm     r1, {r2-r9}
+       msr     r8_fiq, r2
+       msr     r9_fiq, r3
+       msr     r10_fiq, r4
+       msr     r11_fiq, r5
+       msr     r12_fiq, r6
+       msr     SP_fiq, r7
+       msr     LR_fiq, r8
+       msr     SPSR_fiq, r9
+
+       @ Load return state
+       ldr     r2, [vcpu, #VCPU_PC]
+       ldr     r3, [vcpu, #VCPU_CPSR]
+       msr     ELR_hyp, r2
+       msr     SPSR_cxsf, r3
+
+       @ Load user registers
+       ldr     r2, [vcpu, #VCPU_USR_SP]
+       ldr     r3, [vcpu, #VCPU_USR_LR]
+       msr     SP_usr, r2
+       mov     lr, r3
+       add     vcpu, vcpu, #(VCPU_USR_REGS)
+       ldm     vcpu, {r0-r12}
+.endm
+
+/*
+ * Save SP, LR and SPSR for a given mode. offset is the offset of
+ * this mode's registers from the VCPU base.
+ *
+ * Assumes vcpu pointer in vcpu reg
+ *
+ * Clobbers r2, r3, r4, r5.
+ */
+.macro save_guest_regs_mode mode, offset
+       add     r2, vcpu, \offset
+       mrs     r3, SP_\mode
+       mrs     r4, LR_\mode
+       mrs     r5, SPSR_\mode
+       stm     r2, {r3, r4, r5}
+.endm
+
+/*
+ * Save all guest registers to the vcpu struct
+ * Expects guest's r0, r1, r2 on the stack.
+ *
+ * Assumes vcpu pointer in vcpu reg
+ *
+ * Clobbers r2, r3, r4, r5.
+ */
+.macro save_guest_regs
+       @ Store usr registers
+       add     r2, vcpu, #VCPU_USR_REG(3)
+       stm     r2, {r3-r12}
+       add     r2, vcpu, #VCPU_USR_REG(0)
+       pop     {r3, r4, r5}            @ r0, r1, r2
+       stm     r2, {r3, r4, r5}
+       mrs     r2, SP_usr
+       mov     r3, lr
+       str     r2, [vcpu, #VCPU_USR_SP]
+       str     r3, [vcpu, #VCPU_USR_LR]
+
+       @ Store return state
+       mrs     r2, ELR_hyp
+       mrs     r3, spsr
+       str     r2, [vcpu, #VCPU_PC]
+       str     r3, [vcpu, #VCPU_CPSR]
+
+       @ Store other guest registers
+       save_guest_regs_mode svc, #VCPU_SVC_REGS
+       save_guest_regs_mode abt, #VCPU_ABT_REGS
+       save_guest_regs_mode und, #VCPU_UND_REGS
+       save_guest_regs_mode irq, #VCPU_IRQ_REGS
+.endm
+
+/* Reads cp15 registers from hardware and stores them in memory
+ * @store_to_vcpu: If 0, registers are written in-order to the stack,
+ *                otherwise to the VCPU struct pointed to by vcpup
+ *
+ * Assumes vcpu pointer in vcpu reg
+ *
+ * Clobbers r2 - r12
+ */
+.macro read_cp15_state store_to_vcpu
+       mrc     p15, 0, r2, c1, c0, 0   @ SCTLR
+       mrc     p15, 0, r3, c1, c0, 2   @ CPACR
+       mrc     p15, 0, r4, c2, c0, 2   @ TTBCR
+       mrc     p15, 0, r5, c3, c0, 0   @ DACR
+       mrrc    p15, 0, r6, r7, c2      @ TTBR 0
+       mrrc    p15, 1, r8, r9, c2      @ TTBR 1
+       mrc     p15, 0, r10, c10, c2, 0 @ PRRR
+       mrc     p15, 0, r11, c10, c2, 1 @ NMRR
+       mrc     p15, 2, r12, c0, c0, 0  @ CSSELR
+
+       .if \store_to_vcpu == 0
+       push    {r2-r12}                @ Push CP15 registers
+       .else
+       str     r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
+       str     r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
+       str     r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
+       str     r5, [vcpu, #CP15_OFFSET(c3_DACR)]
+       add     r2, vcpu, #CP15_OFFSET(c2_TTBR0)
+       strd    r6, r7, [r2]
+       add     r2, vcpu, #CP15_OFFSET(c2_TTBR1)
+       strd    r8, r9, [r2]
+       str     r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
+       str     r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
+       str     r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
+       .endif
+
+       mrc     p15, 0, r2, c13, c0, 1  @ CID
+       mrc     p15, 0, r3, c13, c0, 2  @ TID_URW
+       mrc     p15, 0, r4, c13, c0, 3  @ TID_URO
+       mrc     p15, 0, r5, c13, c0, 4  @ TID_PRIV
+       mrc     p15, 0, r6, c5, c0, 0   @ DFSR
+       mrc     p15, 0, r7, c5, c0, 1   @ IFSR
+       mrc     p15, 0, r8, c5, c1, 0   @ ADFSR
+       mrc     p15, 0, r9, c5, c1, 1   @ AIFSR
+       mrc     p15, 0, r10, c6, c0, 0  @ DFAR
+       mrc     p15, 0, r11, c6, c0, 2  @ IFAR
+       mrc     p15, 0, r12, c12, c0, 0 @ VBAR
+
+       .if \store_to_vcpu == 0
+       push    {r2-r12}                @ Push CP15 registers
+       .else
+       str     r2, [vcpu, #CP15_OFFSET(c13_CID)]
+       str     r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
+       str     r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
+       str     r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
+       str     r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
+       str     r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
+       str     r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
+       str     r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
+       str     r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
+       str     r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
+       str     r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
+       .endif
+.endm
+
+/*
+ * Reads cp15 registers from memory and writes them to hardware
+ * @read_from_vcpu: If 0, registers are read in-order from the stack,
+ *                 otherwise from the VCPU struct pointed to by vcpup
+ *
+ * Assumes vcpu pointer in vcpu reg
+ */
+.macro write_cp15_state read_from_vcpu
+       .if \read_from_vcpu == 0
+       pop     {r2-r12}
+       .else
+       ldr     r2, [vcpu, #CP15_OFFSET(c13_CID)]
+       ldr     r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
+       ldr     r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
+       ldr     r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
+       ldr     r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
+       ldr     r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
+       ldr     r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
+       ldr     r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
+       ldr     r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
+       ldr     r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
+       ldr     r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
+       .endif
+
+       mcr     p15, 0, r2, c13, c0, 1  @ CID
+       mcr     p15, 0, r3, c13, c0, 2  @ TID_URW
+       mcr     p15, 0, r4, c13, c0, 3  @ TID_URO
+       mcr     p15, 0, r5, c13, c0, 4  @ TID_PRIV
+       mcr     p15, 0, r6, c5, c0, 0   @ DFSR
+       mcr     p15, 0, r7, c5, c0, 1   @ IFSR
+       mcr     p15, 0, r8, c5, c1, 0   @ ADFSR
+       mcr     p15, 0, r9, c5, c1, 1   @ AIFSR
+       mcr     p15, 0, r10, c6, c0, 0  @ DFAR
+       mcr     p15, 0, r11, c6, c0, 2  @ IFAR
+       mcr     p15, 0, r12, c12, c0, 0 @ VBAR
+
+       .if \read_from_vcpu == 0
+       pop     {r2-r12}
+       .else
+       ldr     r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
+       ldr     r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
+       ldr     r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
+       ldr     r5, [vcpu, #CP15_OFFSET(c3_DACR)]
+       add     r12, vcpu, #CP15_OFFSET(c2_TTBR0)
+       ldrd    r6, r7, [r12]
+       add     r12, vcpu, #CP15_OFFSET(c2_TTBR1)
+       ldrd    r8, r9, [r12]
+       ldr     r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
+       ldr     r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
+       ldr     r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
+       .endif
+
+       mcr     p15, 0, r2, c1, c0, 0   @ SCTLR
+       mcr     p15, 0, r3, c1, c0, 2   @ CPACR
+       mcr     p15, 0, r4, c2, c0, 2   @ TTBCR
+       mcr     p15, 0, r5, c3, c0, 0   @ DACR
+       mcrr    p15, 0, r6, r7, c2      @ TTBR 0
+       mcrr    p15, 1, r8, r9, c2      @ TTBR 1
+       mcr     p15, 0, r10, c10, c2, 0 @ PRRR
+       mcr     p15, 0, r11, c10, c2, 1 @ NMRR
+       mcr     p15, 2, r12, c0, c0, 0  @ CSSELR
+.endm
+
+/*
+ * Save the VGIC CPU state into memory
+ *
+ * Assumes vcpu pointer in vcpu reg
+ */
+.macro save_vgic_state
+.endm
+
+/*
+ * Restore the VGIC CPU state from memory
+ *
+ * Assumes vcpu pointer in vcpu reg
+ */
+.macro restore_vgic_state
+.endm
+
+.equ vmentry,  0
+.equ vmexit,   1
+
+/* Configures the HSTR (Hyp System Trap Register) on entry/return
+ * (hardware reset value is 0) */
+.macro set_hstr operation
+       mrc     p15, 4, r2, c1, c1, 3
+       ldr     r3, =HSTR_T(15)
+       .if \operation == vmentry
+       orr     r2, r2, r3              @ Trap CR{15}
+       .else
+       bic     r2, r2, r3              @ Don't trap any CRx accesses
+       .endif
+       mcr     p15, 4, r2, c1, c1, 3
+.endm
+
+/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
+ * (hardware reset value is 0). Keep previous value in r2. */
+.macro set_hcptr operation, mask
+       mrc     p15, 4, r2, c1, c1, 2
+       ldr     r3, =\mask
+       .if \operation == vmentry
+       orr     r3, r2, r3              @ Trap coproc-accesses defined in mask
+       .else
+       bic     r3, r2, r3              @ Don't trap defined coproc-accesses
+       .endif
+       mcr     p15, 4, r3, c1, c1, 2
+.endm
+
+/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
+ * (hardware reset value is 0) */
+.macro set_hdcr operation
+       mrc     p15, 4, r2, c1, c1, 1
+       ldr     r3, =(HDCR_TPM|HDCR_TPMCR)
+       .if \operation == vmentry
+       orr     r2, r2, r3              @ Trap some perfmon accesses
+       .else
+       bic     r2, r2, r3              @ Don't trap any perfmon accesses
+       .endif
+       mcr     p15, 4, r2, c1, c1, 1
+.endm
+
+/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */
+.macro configure_hyp_role operation
+       mrc     p15, 4, r2, c1, c1, 0   @ HCR
+       bic     r2, r2, #HCR_VIRT_EXCP_MASK
+       ldr     r3, =HCR_GUEST_MASK
+       .if \operation == vmentry
+       orr     r2, r2, r3
+       ldr     r3, [vcpu, #VCPU_IRQ_LINES]
+       orr     r2, r2, r3
+       .else
+       bic     r2, r2, r3
+       .endif
+       mcr     p15, 4, r2, c1, c1, 0
+.endm
+
+.macro load_vcpu
+       mrc     p15, 4, vcpu, c13, c0, 2        @ HTPIDR
+.endm
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
new file mode 100644 (file)
index 0000000..0144baf
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_mmio.h>
+#include <asm/kvm_emulate.h>
+#include <trace/events/kvm.h>
+
+#include "trace.h"
+
+/**
+ * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
+ * @vcpu: The VCPU pointer
+ * @run:  The VCPU run struct containing the mmio data
+ *
+ * This should only be called after returning from userspace for MMIO load
+ * emulation.
+ */
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       __u32 *dest;
+       unsigned int len;
+       int mask;
+
+       if (!run->mmio.is_write) {
+               dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt);
+               memset(dest, 0, sizeof(int));
+
+               len = run->mmio.len;
+               if (len > 4)
+                       return -EINVAL;
+
+               memcpy(dest, run->mmio.data, len);
+
+               trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
+                               *((u64 *)run->mmio.data));
+
+               if (vcpu->arch.mmio_decode.sign_extend && len < 4) {
+                       mask = 1U << ((len * 8) - 1);
+                       *dest = (*dest ^ mask) - mask;
+               }
+       }
+
+       return 0;
+}
+
+static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+                     struct kvm_exit_mmio *mmio)
+{
+       unsigned long rt, len;
+       bool is_write, sign_extend;
+
+       if ((vcpu->arch.hsr >> 8) & 1) {
+               /* cache operation on I/O addr, tell guest unsupported */
+               kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
+               return 1;
+       }
+
+       if ((vcpu->arch.hsr >> 7) & 1) {
+               /* page table accesses IO mem: tell guest to fix its TTBR */
+               kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
+               return 1;
+       }
+
+       switch ((vcpu->arch.hsr >> 22) & 0x3) {
+       case 0:
+               len = 1;
+               break;
+       case 1:
+               len = 2;
+               break;
+       case 2:
+               len = 4;
+               break;
+       default:
+               kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
+               return -EFAULT;
+       }
+
+       is_write = vcpu->arch.hsr & HSR_WNR;
+       sign_extend = vcpu->arch.hsr & HSR_SSE;
+       rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
+
+       if (kvm_vcpu_reg_is_pc(vcpu, rt)) {
+               /* IO memory trying to read/write pc */
+               kvm_inject_pabt(vcpu, vcpu->arch.hxfar);
+               return 1;
+       }
+
+       mmio->is_write = is_write;
+       mmio->phys_addr = fault_ipa;
+       mmio->len = len;
+       vcpu->arch.mmio_decode.sign_extend = sign_extend;
+       vcpu->arch.mmio_decode.rt = rt;
+
+       /*
+        * The MMIO instruction is emulated and should not be re-executed
+        * in the guest.
+        */
+       kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
+       return 0;
+}
+
+int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
+                phys_addr_t fault_ipa)
+{
+       struct kvm_exit_mmio mmio;
+       unsigned long rt;
+       int ret;
+
+       /*
+        * Prepare MMIO operation. First stash it in a private
+        * structure that we can use for in-kernel emulation. If the
+        * kernel can't handle it, copy it into run->mmio and let user
+        * space do its magic.
+        */
+
+       if (vcpu->arch.hsr & HSR_ISV) {
+               ret = decode_hsr(vcpu, fault_ipa, &mmio);
+               if (ret)
+                       return ret;
+       } else {
+               kvm_err("load/store instruction decoding not implemented\n");
+               return -ENOSYS;
+       }
+
+       rt = vcpu->arch.mmio_decode.rt;
+       trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
+                                        KVM_TRACE_MMIO_READ_UNSATISFIED,
+                       mmio.len, fault_ipa,
+                       (mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0);
+
+       if (mmio.is_write)
+               memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len);
+
+       kvm_prepare_mmio(run, &mmio);
+       return 0;
+}
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
new file mode 100644 (file)
index 0000000..f30e131
--- /dev/null
@@ -0,0 +1,787 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/mman.h>
+#include <linux/kvm_host.h>
+#include <linux/io.h>
+#include <trace/events/kvm.h>
+#include <asm/idmap.h>
+#include <asm/pgalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_mmio.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_emulate.h>
+#include <asm/mach/map.h>
+#include <trace/events/kvm.h>
+
+#include "trace.h"
+
+extern char  __hyp_idmap_text_start[], __hyp_idmap_text_end[];
+
+static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
+
+static void kvm_tlb_flush_vmid(struct kvm *kvm)
+{
+       kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
+}
+
+static void kvm_set_pte(pte_t *pte, pte_t new_pte)
+{
+       pte_val(*pte) = new_pte;
+       /*
+        * flush_pmd_entry just takes a void pointer and cleans the necessary
+        * cache entries, so we can reuse the function for ptes.
+        */
+       flush_pmd_entry(pte);
+}
+
+static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
+                                 int min, int max)
+{
+       void *page;
+
+       BUG_ON(max > KVM_NR_MEM_OBJS);
+       if (cache->nobjs >= min)
+               return 0;
+       while (cache->nobjs < max) {
+               page = (void *)__get_free_page(PGALLOC_GFP);
+               if (!page)
+                       return -ENOMEM;
+               cache->objects[cache->nobjs++] = page;
+       }
+       return 0;
+}
+
+static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
+{
+       while (mc->nobjs)
+               free_page((unsigned long)mc->objects[--mc->nobjs]);
+}
+
+static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
+{
+       void *p;
+
+       BUG_ON(!mc || !mc->nobjs);
+       p = mc->objects[--mc->nobjs];
+       return p;
+}
+
+static void free_ptes(pmd_t *pmd, unsigned long addr)
+{
+       pte_t *pte;
+       unsigned int i;
+
+       for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) {
+               if (!pmd_none(*pmd) && pmd_table(*pmd)) {
+                       pte = pte_offset_kernel(pmd, addr);
+                       pte_free_kernel(NULL, pte);
+               }
+               pmd++;
+       }
+}
+
+/**
+ * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables
+ *
+ * Assumes this is a page table used strictly in Hyp-mode and therefore contains
+ * only mappings in the kernel memory area, which is above PAGE_OFFSET.
+ */
+void free_hyp_pmds(void)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       unsigned long addr;
+
+       mutex_lock(&kvm_hyp_pgd_mutex);
+       for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) {
+               pgd = hyp_pgd + pgd_index(addr);
+               pud = pud_offset(pgd, addr);
+
+               if (pud_none(*pud))
+                       continue;
+               BUG_ON(pud_bad(*pud));
+
+               pmd = pmd_offset(pud, addr);
+               free_ptes(pmd, addr);
+               pmd_free(NULL, pmd);
+               pud_clear(pud);
+       }
+       mutex_unlock(&kvm_hyp_pgd_mutex);
+}
+
+static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
+                                   unsigned long end)
+{
+       pte_t *pte;
+       unsigned long addr;
+       struct page *page;
+
+       for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
+               pte = pte_offset_kernel(pmd, addr);
+               BUG_ON(!virt_addr_valid(addr));
+               page = virt_to_page(addr);
+               kvm_set_pte(pte, mk_pte(page, PAGE_HYP));
+       }
+}
+
+static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
+                                      unsigned long end,
+                                      unsigned long *pfn_base)
+{
+       pte_t *pte;
+       unsigned long addr;
+
+       for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
+               pte = pte_offset_kernel(pmd, addr);
+               BUG_ON(pfn_valid(*pfn_base));
+               kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE));
+               (*pfn_base)++;
+       }
+}
+
+static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
+                                  unsigned long end, unsigned long *pfn_base)
+{
+       pmd_t *pmd;
+       pte_t *pte;
+       unsigned long addr, next;
+
+       for (addr = start; addr < end; addr = next) {
+               pmd = pmd_offset(pud, addr);
+
+               BUG_ON(pmd_sect(*pmd));
+
+               if (pmd_none(*pmd)) {
+                       pte = pte_alloc_one_kernel(NULL, addr);
+                       if (!pte) {
+                               kvm_err("Cannot allocate Hyp pte\n");
+                               return -ENOMEM;
+                       }
+                       pmd_populate_kernel(NULL, pmd, pte);
+               }
+
+               next = pmd_addr_end(addr, end);
+
+               /*
+                * If pfn_base is NULL, we map kernel pages into HYP with the
+                * virtual address. Otherwise, this is considered an I/O
+                * mapping and we map the physical region starting at
+                * *pfn_base to [start, end[.
+                */
+               if (!pfn_base)
+                       create_hyp_pte_mappings(pmd, addr, next);
+               else
+                       create_hyp_io_pte_mappings(pmd, addr, next, pfn_base);
+       }
+
+       return 0;
+}
+
+static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
+{
+       unsigned long start = (unsigned long)from;
+       unsigned long end = (unsigned long)to;
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       unsigned long addr, next;
+       int err = 0;
+
+       BUG_ON(start > end);
+       if (start < PAGE_OFFSET)
+               return -EINVAL;
+
+       mutex_lock(&kvm_hyp_pgd_mutex);
+       for (addr = start; addr < end; addr = next) {
+               pgd = hyp_pgd + pgd_index(addr);
+               pud = pud_offset(pgd, addr);
+
+               if (pud_none_or_clear_bad(pud)) {
+                       pmd = pmd_alloc_one(NULL, addr);
+                       if (!pmd) {
+                               kvm_err("Cannot allocate Hyp pmd\n");
+                               err = -ENOMEM;
+                               goto out;
+                       }
+                       pud_populate(NULL, pud, pmd);
+               }
+
+               next = pgd_addr_end(addr, end);
+               err = create_hyp_pmd_mappings(pud, addr, next, pfn_base);
+               if (err)
+                       goto out;
+       }
+out:
+       mutex_unlock(&kvm_hyp_pgd_mutex);
+       return err;
+}
+
+/**
+ * create_hyp_mappings - map a kernel virtual address range in Hyp mode
+ * @from:      The virtual kernel start address of the range
+ * @to:                The virtual kernel end address of the range (exclusive)
+ *
+ * The same virtual address as the kernel virtual address is also used in
+ * Hyp-mode mapping to the same underlying physical pages.
+ *
+ * Note: Wrapping around zero in the "to" address is not supported.
+ */
+int create_hyp_mappings(void *from, void *to)
+{
+       return __create_hyp_mappings(from, to, NULL);
+}
+
+/**
+ * create_hyp_io_mappings - map a physical IO range in Hyp mode
+ * @from:      The virtual HYP start address of the range
+ * @to:                The virtual HYP end address of the range (exclusive)
+ * @addr:      The physical start address which gets mapped
+ */
+int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr)
+{
+       unsigned long pfn = __phys_to_pfn(addr);
+       return __create_hyp_mappings(from, to, &pfn);
+}
+
+/**
+ * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
+ * @kvm:       The KVM struct pointer for the VM.
+ *
+ * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
+ * support either full 40-bit input addresses or limited to 32-bit input
+ * addresses). Clears the allocated pages.
+ *
+ * Note we don't need locking here as this is only called when the VM is
+ * created, which can only be done once.
+ */
+int kvm_alloc_stage2_pgd(struct kvm *kvm)
+{
+       pgd_t *pgd;
+
+       if (kvm->arch.pgd != NULL) {
+               kvm_err("kvm_arch already initialized?\n");
+               return -EINVAL;
+       }
+
+       pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
+       if (!pgd)
+               return -ENOMEM;
+
+       /* stage-2 pgd must be aligned to its size */
+       VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
+
+       memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
+       clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
+       kvm->arch.pgd = pgd;
+
+       return 0;
+}
+
+static void clear_pud_entry(pud_t *pud)
+{
+       pmd_t *pmd_table = pmd_offset(pud, 0);
+       pud_clear(pud);
+       pmd_free(NULL, pmd_table);
+       put_page(virt_to_page(pud));
+}
+
+static void clear_pmd_entry(pmd_t *pmd)
+{
+       pte_t *pte_table = pte_offset_kernel(pmd, 0);
+       pmd_clear(pmd);
+       pte_free_kernel(NULL, pte_table);
+       put_page(virt_to_page(pmd));
+}
+
+static bool pmd_empty(pmd_t *pmd)
+{
+       struct page *pmd_page = virt_to_page(pmd);
+       return page_count(pmd_page) == 1;
+}
+
+static void clear_pte_entry(pte_t *pte)
+{
+       if (pte_present(*pte)) {
+               kvm_set_pte(pte, __pte(0));
+               put_page(virt_to_page(pte));
+       }
+}
+
+static bool pte_empty(pte_t *pte)
+{
+       struct page *pte_page = virt_to_page(pte);
+       return page_count(pte_page) == 1;
+}
+
+/**
+ * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
+ * @kvm:   The VM pointer
+ * @start: The intermediate physical base address of the range to unmap
+ * @size:  The size of the area to unmap
+ *
+ * Clear a range of stage-2 mappings, lowering the various ref-counts.  Must
+ * be called while holding mmu_lock (unless for freeing the stage2 pgd before
+ * destroying the VM), otherwise another faulting VCPU may come in and mess
+ * with things behind our backs.
+ */
+static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+       phys_addr_t addr = start, end = start + size;
+       u64 range;
+
+       while (addr < end) {
+               pgd = kvm->arch.pgd + pgd_index(addr);
+               pud = pud_offset(pgd, addr);
+               if (pud_none(*pud)) {
+                       addr += PUD_SIZE;
+                       continue;
+               }
+
+               pmd = pmd_offset(pud, addr);
+               if (pmd_none(*pmd)) {
+                       addr += PMD_SIZE;
+                       continue;
+               }
+
+               pte = pte_offset_kernel(pmd, addr);
+               clear_pte_entry(pte);
+               range = PAGE_SIZE;
+
+               /* If we emptied the pte, walk back up the ladder */
+               if (pte_empty(pte)) {
+                       clear_pmd_entry(pmd);
+                       range = PMD_SIZE;
+                       if (pmd_empty(pmd)) {
+                               clear_pud_entry(pud);
+                               range = PUD_SIZE;
+                       }
+               }
+
+               addr += range;
+       }
+}
+
+/**
+ * kvm_free_stage2_pgd - free all stage-2 tables
+ * @kvm:       The KVM struct pointer for the VM.
+ *
+ * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
+ * underlying level-2 and level-3 tables before freeing the actual level-1 table
+ * and setting the struct pointer to NULL.
+ *
+ * Note we don't need locking here as this is only called when the VM is
+ * destroyed, which can only be done once.
+ */
+void kvm_free_stage2_pgd(struct kvm *kvm)
+{
+       if (kvm->arch.pgd == NULL)
+               return;
+
+       unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+       free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
+       kvm->arch.pgd = NULL;
+}
+
+
+static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+                         phys_addr_t addr, const pte_t *new_pte, bool iomap)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte, old_pte;
+
+       /* Create 2nd stage page table mapping - Level 1 */
+       pgd = kvm->arch.pgd + pgd_index(addr);
+       pud = pud_offset(pgd, addr);
+       if (pud_none(*pud)) {
+               if (!cache)
+                       return 0; /* ignore calls from kvm_set_spte_hva */
+               pmd = mmu_memory_cache_alloc(cache);
+               pud_populate(NULL, pud, pmd);
+               pmd += pmd_index(addr);
+               get_page(virt_to_page(pud));
+       } else
+               pmd = pmd_offset(pud, addr);
+
+       /* Create 2nd stage page table mapping - Level 2 */
+       if (pmd_none(*pmd)) {
+               if (!cache)
+                       return 0; /* ignore calls from kvm_set_spte_hva */
+               pte = mmu_memory_cache_alloc(cache);
+               clean_pte_table(pte);
+               pmd_populate_kernel(NULL, pmd, pte);
+               pte += pte_index(addr);
+               get_page(virt_to_page(pmd));
+       } else
+               pte = pte_offset_kernel(pmd, addr);
+
+       if (iomap && pte_present(*pte))
+               return -EFAULT;
+
+       /* Create 2nd stage page table mapping - Level 3 */
+       old_pte = *pte;
+       kvm_set_pte(pte, *new_pte);
+       if (pte_present(old_pte))
+               kvm_tlb_flush_vmid(kvm);
+       else
+               get_page(virt_to_page(pte));
+
+       return 0;
+}
+
+/**
+ * kvm_phys_addr_ioremap - map a device range to guest IPA
+ *
+ * @kvm:       The KVM pointer
+ * @guest_ipa: The IPA at which to insert the mapping
+ * @pa:                The physical address of the device
+ * @size:      The size of the mapping
+ */
+int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
+                         phys_addr_t pa, unsigned long size)
+{
+       phys_addr_t addr, end;
+       int ret = 0;
+       unsigned long pfn;
+       struct kvm_mmu_memory_cache cache = { 0, };
+
+       end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
+       pfn = __phys_to_pfn(pa);
+
+       for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
+               pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR);
+
+               ret = mmu_topup_memory_cache(&cache, 2, 2);
+               if (ret)
+                       goto out;
+               spin_lock(&kvm->mmu_lock);
+               ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
+               spin_unlock(&kvm->mmu_lock);
+               if (ret)
+                       goto out;
+
+               pfn++;
+       }
+
+out:
+       mmu_free_memory_cache(&cache);
+       return ret;
+}
+
+static void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
+{
+       /*
+        * If we are going to insert an instruction page and the icache is
+        * either VIPT or PIPT, there is a potential problem where the host
+        * (or another VM) may have used the same page as this guest, and we
+        * read incorrect data from the icache.  If we're using a PIPT cache,
+        * we can invalidate just that page, but if we are using a VIPT cache
+        * we need to invalidate the entire icache - damn shame - as written
+        * in the ARM ARM (DDI 0406C.b - Page B3-1393).
+        *
+        * VIVT caches are tagged using both the ASID and the VMID and doesn't
+        * need any kind of flushing (DDI 0406C.b - Page B3-1392).
+        */
+       if (icache_is_pipt()) {
+               unsigned long hva = gfn_to_hva(kvm, gfn);
+               __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
+       } else if (!icache_is_vivt_asid_tagged()) {
+               /* any kind of VIPT cache */
+               __flush_icache_all();
+       }
+}
+
+static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+                         gfn_t gfn, struct kvm_memory_slot *memslot,
+                         unsigned long fault_status)
+{
+       pte_t new_pte;
+       pfn_t pfn;
+       int ret;
+       bool write_fault, writable;
+       unsigned long mmu_seq;
+       struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+
+       write_fault = kvm_is_write_fault(vcpu->arch.hsr);
+       if (fault_status == FSC_PERM && !write_fault) {
+               kvm_err("Unexpected L2 read permission error\n");
+               return -EFAULT;
+       }
+
+       /* We need minimum second+third level pages */
+       ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
+       if (ret)
+               return ret;
+
+       mmu_seq = vcpu->kvm->mmu_notifier_seq;
+       /*
+        * Ensure the read of mmu_notifier_seq happens before we call
+        * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
+        * the page we just got a reference to gets unmapped before we have a
+        * chance to grab the mmu_lock, which ensure that if the page gets
+        * unmapped afterwards, the call to kvm_unmap_hva will take it away
+        * from us again properly. This smp_rmb() interacts with the smp_wmb()
+        * in kvm_mmu_notifier_invalidate_<page|range_end>.
+        */
+       smp_rmb();
+
+       pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
+       if (is_error_pfn(pfn))
+               return -EFAULT;
+
+       new_pte = pfn_pte(pfn, PAGE_S2);
+       coherent_icache_guest_page(vcpu->kvm, gfn);
+
+       spin_lock(&vcpu->kvm->mmu_lock);
+       if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
+               goto out_unlock;
+       if (writable) {
+               pte_val(new_pte) |= L_PTE_S2_RDWR;
+               kvm_set_pfn_dirty(pfn);
+       }
+       stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
+
+out_unlock:
+       spin_unlock(&vcpu->kvm->mmu_lock);
+       kvm_release_pfn_clean(pfn);
+       return 0;
+}
+
+/**
+ * kvm_handle_guest_abort - handles all 2nd stage aborts
+ * @vcpu:      the VCPU pointer
+ * @run:       the kvm_run structure
+ *
+ * Any abort that gets to the host is almost guaranteed to be caused by a
+ * missing second stage translation table entry, which can mean that either the
+ * guest simply needs more memory and we must allocate an appropriate page or it
+ * can mean that the guest tried to access I/O memory, which is emulated by user
+ * space. The distinction is based on the IPA causing the fault and whether this
+ * memory region has been registered as standard RAM by user space.
+ */
+int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       unsigned long hsr_ec;
+       unsigned long fault_status;
+       phys_addr_t fault_ipa;
+       struct kvm_memory_slot *memslot;
+       bool is_iabt;
+       gfn_t gfn;
+       int ret, idx;
+
+       hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT;
+       is_iabt = (hsr_ec == HSR_EC_IABT);
+       fault_ipa = ((phys_addr_t)vcpu->arch.hpfar & HPFAR_MASK) << 8;
+
+       trace_kvm_guest_fault(*vcpu_pc(vcpu), vcpu->arch.hsr,
+                             vcpu->arch.hxfar, fault_ipa);
+
+       /* Check the stage-2 fault is trans. fault or write fault */
+       fault_status = (vcpu->arch.hsr & HSR_FSC_TYPE);
+       if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
+               kvm_err("Unsupported fault status: EC=%#lx DFCS=%#lx\n",
+                       hsr_ec, fault_status);
+               return -EFAULT;
+       }
+
+       idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+       gfn = fault_ipa >> PAGE_SHIFT;
+       if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
+               if (is_iabt) {
+                       /* Prefetch Abort on I/O address */
+                       kvm_inject_pabt(vcpu, vcpu->arch.hxfar);
+                       ret = 1;
+                       goto out_unlock;
+               }
+
+               if (fault_status != FSC_FAULT) {
+                       kvm_err("Unsupported fault status on io memory: %#lx\n",
+                               fault_status);
+                       ret = -EFAULT;
+                       goto out_unlock;
+               }
+
+               /* Adjust page offset */
+               fault_ipa |= vcpu->arch.hxfar & ~PAGE_MASK;
+               ret = io_mem_abort(vcpu, run, fault_ipa);
+               goto out_unlock;
+       }
+
+       memslot = gfn_to_memslot(vcpu->kvm, gfn);
+       if (!memslot->user_alloc) {
+               kvm_err("non user-alloc memslots not supported\n");
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
+       if (ret == 0)
+               ret = 1;
+out_unlock:
+       srcu_read_unlock(&vcpu->kvm->srcu, idx);
+       return ret;
+}
+
+static void handle_hva_to_gpa(struct kvm *kvm,
+                             unsigned long start,
+                             unsigned long end,
+                             void (*handler)(struct kvm *kvm,
+                                             gpa_t gpa, void *data),
+                             void *data)
+{
+       struct kvm_memslots *slots;
+       struct kvm_memory_slot *memslot;
+
+       slots = kvm_memslots(kvm);
+
+       /* we only care about the pages that the guest sees */
+       kvm_for_each_memslot(memslot, slots) {
+               unsigned long hva_start, hva_end;
+               gfn_t gfn, gfn_end;
+
+               hva_start = max(start, memslot->userspace_addr);
+               hva_end = min(end, memslot->userspace_addr +
+                                       (memslot->npages << PAGE_SHIFT));
+               if (hva_start >= hva_end)
+                       continue;
+
+               /*
+                * {gfn(page) | page intersects with [hva_start, hva_end)} =
+                * {gfn_start, gfn_start+1, ..., gfn_end-1}.
+                */
+               gfn = hva_to_gfn_memslot(hva_start, memslot);
+               gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
+
+               for (; gfn < gfn_end; ++gfn) {
+                       gpa_t gpa = gfn << PAGE_SHIFT;
+                       handler(kvm, gpa, data);
+               }
+       }
+}
+
+static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
+{
+       unmap_stage2_range(kvm, gpa, PAGE_SIZE);
+       kvm_tlb_flush_vmid(kvm);
+}
+
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+{
+       unsigned long end = hva + PAGE_SIZE;
+
+       if (!kvm->arch.pgd)
+               return 0;
+
+       trace_kvm_unmap_hva(hva);
+       handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
+       return 0;
+}
+
+int kvm_unmap_hva_range(struct kvm *kvm,
+                       unsigned long start, unsigned long end)
+{
+       if (!kvm->arch.pgd)
+               return 0;
+
+       trace_kvm_unmap_hva_range(start, end);
+       handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
+       return 0;
+}
+
+static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
+{
+       pte_t *pte = (pte_t *)data;
+
+       stage2_set_pte(kvm, NULL, gpa, pte, false);
+}
+
+
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+{
+       unsigned long end = hva + PAGE_SIZE;
+       pte_t stage2_pte;
+
+       if (!kvm->arch.pgd)
+               return;
+
+       trace_kvm_set_spte_hva(hva);
+       stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
+       handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
+}
+
+void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
+{
+       mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+}
+
+phys_addr_t kvm_mmu_get_httbr(void)
+{
+       VM_BUG_ON(!virt_addr_valid(hyp_pgd));
+       return virt_to_phys(hyp_pgd);
+}
+
+int kvm_mmu_init(void)
+{
+       if (!hyp_pgd) {
+               kvm_err("Hyp mode PGD not allocated\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+/**
+ * kvm_clear_idmap - remove all idmaps from the hyp pgd
+ *
+ * Free the underlying pmds for all pgds in range and clear the pgds (but
+ * don't free them) afterwards.
+ */
+void kvm_clear_hyp_idmap(void)
+{
+       unsigned long addr, end;
+       unsigned long next;
+       pgd_t *pgd = hyp_pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+
+       addr = virt_to_phys(__hyp_idmap_text_start);
+       end = virt_to_phys(__hyp_idmap_text_end);
+
+       pgd += pgd_index(addr);
+       do {
+               next = pgd_addr_end(addr, end);
+               if (pgd_none_or_clear_bad(pgd))
+                       continue;
+               pud = pud_offset(pgd, addr);
+               pmd = pmd_offset(pud, addr);
+
+               pud_clear(pud);
+               clean_pmd_entry(pmd);
+               pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));
+       } while (pgd++, addr = next, addr < end);
+}
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
new file mode 100644 (file)
index 0000000..7ee5bb7
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2012 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/wait.h>
+
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_psci.h>
+
+/*
+ * This is an implementation of the Power State Coordination Interface
+ * as described in ARM document number ARM DEN 0022A.
+ */
+
+static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.pause = true;
+}
+
+static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+{
+       struct kvm *kvm = source_vcpu->kvm;
+       struct kvm_vcpu *vcpu;
+       wait_queue_head_t *wq;
+       unsigned long cpu_id;
+       phys_addr_t target_pc;
+
+       cpu_id = *vcpu_reg(source_vcpu, 1);
+       if (vcpu_mode_is_32bit(source_vcpu))
+               cpu_id &= ~((u32) 0);
+
+       if (cpu_id >= atomic_read(&kvm->online_vcpus))
+               return KVM_PSCI_RET_INVAL;
+
+       target_pc = *vcpu_reg(source_vcpu, 2);
+
+       vcpu = kvm_get_vcpu(kvm, cpu_id);
+
+       wq = kvm_arch_vcpu_wq(vcpu);
+       if (!waitqueue_active(wq))
+               return KVM_PSCI_RET_INVAL;
+
+       kvm_reset_vcpu(vcpu);
+
+       /* Gracefully handle Thumb2 entry point */
+       if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
+               target_pc &= ~((phys_addr_t) 1);
+               vcpu_set_thumb(vcpu);
+       }
+
+       *vcpu_pc(vcpu) = target_pc;
+       vcpu->arch.pause = false;
+       smp_mb();               /* Make sure the above is visible */
+
+       wake_up_interruptible(wq);
+
+       return KVM_PSCI_RET_SUCCESS;
+}
+
+/**
+ * kvm_psci_call - handle PSCI call if r0 value is in range
+ * @vcpu: Pointer to the VCPU struct
+ *
+ * Handle PSCI calls from guests through traps from HVC or SMC instructions.
+ * The calling convention is similar to SMC calls to the secure world where
+ * the function number is placed in r0 and this function returns true if the
+ * function number specified in r0 is withing the PSCI range, and false
+ * otherwise.
+ */
+bool kvm_psci_call(struct kvm_vcpu *vcpu)
+{
+       unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
+       unsigned long val;
+
+       switch (psci_fn) {
+       case KVM_PSCI_FN_CPU_OFF:
+               kvm_psci_vcpu_off(vcpu);
+               val = KVM_PSCI_RET_SUCCESS;
+               break;
+       case KVM_PSCI_FN_CPU_ON:
+               val = kvm_psci_vcpu_on(vcpu);
+               break;
+       case KVM_PSCI_FN_CPU_SUSPEND:
+       case KVM_PSCI_FN_MIGRATE:
+               val = KVM_PSCI_RET_NI;
+               break;
+
+       default:
+               return false;
+       }
+
+       *vcpu_reg(vcpu, 0) = val;
+       return true;
+}
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
new file mode 100644 (file)
index 0000000..b80256b
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kvm_host.h>
+#include <linux/kvm.h>
+
+#include <asm/unified.h>
+#include <asm/ptrace.h>
+#include <asm/cputype.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_coproc.h>
+
+/******************************************************************************
+ * Cortex-A15 Reset Values
+ */
+
+static const int a15_max_cpu_idx = 3;
+
+static struct kvm_regs a15_regs_reset = {
+       .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
+};
+
+
+/*******************************************************************************
+ * Exported reset function
+ */
+
+/**
+ * kvm_reset_vcpu - sets core registers and cp15 registers to reset value
+ * @vcpu: The VCPU pointer
+ *
+ * This function finds the right table above and sets the registers on the
+ * virtual CPU struct to their architectually defined reset values.
+ */
+int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+{
+       struct kvm_regs *cpu_reset;
+
+       switch (vcpu->arch.target) {
+       case KVM_ARM_TARGET_CORTEX_A15:
+               if (vcpu->vcpu_id > a15_max_cpu_idx)
+                       return -EINVAL;
+               cpu_reset = &a15_regs_reset;
+               vcpu->arch.midr = read_cpuid_id();
+               break;
+       default:
+               return -ENODEV;
+       }
+
+       /* Reset core registers */
+       memcpy(&vcpu->arch.regs, cpu_reset, sizeof(vcpu->arch.regs));
+
+       /* Reset CP15 registers */
+       kvm_reset_coprocs(vcpu);
+
+       return 0;
+}
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
new file mode 100644 (file)
index 0000000..a8e73ed
--- /dev/null
@@ -0,0 +1,235 @@
+#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+
+/*
+ * Tracepoints for entry/exit to guest
+ */
+TRACE_EVENT(kvm_entry,
+       TP_PROTO(unsigned long vcpu_pc),
+       TP_ARGS(vcpu_pc),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  vcpu_pc         )
+       ),
+
+       TP_fast_assign(
+               __entry->vcpu_pc                = vcpu_pc;
+       ),
+
+       TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
+);
+
+TRACE_EVENT(kvm_exit,
+       TP_PROTO(unsigned long vcpu_pc),
+       TP_ARGS(vcpu_pc),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  vcpu_pc         )
+       ),
+
+       TP_fast_assign(
+               __entry->vcpu_pc                = vcpu_pc;
+       ),
+
+       TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
+);
+
+TRACE_EVENT(kvm_guest_fault,
+       TP_PROTO(unsigned long vcpu_pc, unsigned long hsr,
+                unsigned long hxfar,
+                unsigned long long ipa),
+       TP_ARGS(vcpu_pc, hsr, hxfar, ipa),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  vcpu_pc         )
+               __field(        unsigned long,  hsr             )
+               __field(        unsigned long,  hxfar           )
+               __field(   unsigned long long,  ipa             )
+       ),
+
+       TP_fast_assign(
+               __entry->vcpu_pc                = vcpu_pc;
+               __entry->hsr                    = hsr;
+               __entry->hxfar                  = hxfar;
+               __entry->ipa                    = ipa;
+       ),
+
+       TP_printk("guest fault at PC %#08lx (hxfar %#08lx, "
+                 "ipa %#16llx, hsr %#08lx",
+                 __entry->vcpu_pc, __entry->hxfar,
+                 __entry->ipa, __entry->hsr)
+);
+
+TRACE_EVENT(kvm_irq_line,
+       TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level),
+       TP_ARGS(type, vcpu_idx, irq_num, level),
+
+       TP_STRUCT__entry(
+               __field(        unsigned int,   type            )
+               __field(        int,            vcpu_idx        )
+               __field(        int,            irq_num         )
+               __field(        int,            level           )
+       ),
+
+       TP_fast_assign(
+               __entry->type           = type;
+               __entry->vcpu_idx       = vcpu_idx;
+               __entry->irq_num        = irq_num;
+               __entry->level          = level;
+       ),
+
+       TP_printk("Inject %s interrupt (%d), vcpu->idx: %d, num: %d, level: %d",
+                 (__entry->type == KVM_ARM_IRQ_TYPE_CPU) ? "CPU" :
+                 (__entry->type == KVM_ARM_IRQ_TYPE_PPI) ? "VGIC PPI" :
+                 (__entry->type == KVM_ARM_IRQ_TYPE_SPI) ? "VGIC SPI" : "UNKNOWN",
+                 __entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level)
+);
+
+TRACE_EVENT(kvm_mmio_emulate,
+       TP_PROTO(unsigned long vcpu_pc, unsigned long instr,
+                unsigned long cpsr),
+       TP_ARGS(vcpu_pc, instr, cpsr),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  vcpu_pc         )
+               __field(        unsigned long,  instr           )
+               __field(        unsigned long,  cpsr            )
+       ),
+
+       TP_fast_assign(
+               __entry->vcpu_pc                = vcpu_pc;
+               __entry->instr                  = instr;
+               __entry->cpsr                   = cpsr;
+       ),
+
+       TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)",
+                 __entry->vcpu_pc, __entry->instr, __entry->cpsr)
+);
+
+/* Architecturally implementation defined CP15 register access */
+TRACE_EVENT(kvm_emulate_cp15_imp,
+       TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn,
+                unsigned long CRm, unsigned long Op2, bool is_write),
+       TP_ARGS(Op1, Rt1, CRn, CRm, Op2, is_write),
+
+       TP_STRUCT__entry(
+               __field(        unsigned int,   Op1             )
+               __field(        unsigned int,   Rt1             )
+               __field(        unsigned int,   CRn             )
+               __field(        unsigned int,   CRm             )
+               __field(        unsigned int,   Op2             )
+               __field(        bool,           is_write        )
+       ),
+
+       TP_fast_assign(
+               __entry->is_write               = is_write;
+               __entry->Op1                    = Op1;
+               __entry->Rt1                    = Rt1;
+               __entry->CRn                    = CRn;
+               __entry->CRm                    = CRm;
+               __entry->Op2                    = Op2;
+       ),
+
+       TP_printk("Implementation defined CP15: %s\tp15, %u, r%u, c%u, c%u, %u",
+                       (__entry->is_write) ? "mcr" : "mrc",
+                       __entry->Op1, __entry->Rt1, __entry->CRn,
+                       __entry->CRm, __entry->Op2)
+);
+
+TRACE_EVENT(kvm_wfi,
+       TP_PROTO(unsigned long vcpu_pc),
+       TP_ARGS(vcpu_pc),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  vcpu_pc         )
+       ),
+
+       TP_fast_assign(
+               __entry->vcpu_pc                = vcpu_pc;
+       ),
+
+       TP_printk("guest executed wfi at: 0x%08lx", __entry->vcpu_pc)
+);
+
+TRACE_EVENT(kvm_unmap_hva,
+       TP_PROTO(unsigned long hva),
+       TP_ARGS(hva),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  hva             )
+       ),
+
+       TP_fast_assign(
+               __entry->hva            = hva;
+       ),
+
+       TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
+);
+
+TRACE_EVENT(kvm_unmap_hva_range,
+       TP_PROTO(unsigned long start, unsigned long end),
+       TP_ARGS(start, end),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  start           )
+               __field(        unsigned long,  end             )
+       ),
+
+       TP_fast_assign(
+               __entry->start          = start;
+               __entry->end            = end;
+       ),
+
+       TP_printk("mmu notifier unmap range: %#08lx -- %#08lx",
+                 __entry->start, __entry->end)
+);
+
+TRACE_EVENT(kvm_set_spte_hva,
+       TP_PROTO(unsigned long hva),
+       TP_ARGS(hva),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  hva             )
+       ),
+
+       TP_fast_assign(
+               __entry->hva            = hva;
+       ),
+
+       TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
+);
+
+TRACE_EVENT(kvm_hvc,
+       TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
+       TP_ARGS(vcpu_pc, r0, imm),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  vcpu_pc         )
+               __field(        unsigned long,  r0              )
+               __field(        unsigned long,  imm             )
+       ),
+
+       TP_fast_assign(
+               __entry->vcpu_pc                = vcpu_pc;
+               __entry->r0             = r0;
+               __entry->imm            = imm;
+       ),
+
+       TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx",
+                 __entry->vcpu_pc, __entry->r0, __entry->imm)
+);
+
+#endif /* _TRACE_KVM_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH arch/arm/kvm
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 3fd629d..025d173 100644 (file)
@@ -629,8 +629,9 @@ config ARM_THUMBEE
          make use of it. Say N for code that can run on CPUs without ThumbEE.
 
 config ARM_VIRT_EXT
-       bool "Native support for the ARM Virtualization Extensions"
-       depends on MMU && CPU_V7
+       bool
+       depends on MMU
+       default y if CPU_V7
        help
          Enable the kernel to make use of the ARM Virtualization
          Extensions to install hypervisors without run-time firmware
@@ -640,11 +641,6 @@ config ARM_VIRT_EXT
          use of this feature.  Refer to Documentation/arm/Booting for
          details.
 
-         It is safe to enable this option even if the kernel may not be
-         booted in HYP mode, may not have support for the
-         virtualization extensions, or may be booted with a
-         non-compliant bootloader.
-
 config SWP_EMULATE
        bool "Emulate SWP/SWPB instructions"
        depends on !CPU_USE_DOMAINS && CPU_V7
index 99db769..2dffc01 100644 (file)
@@ -1,4 +1,6 @@
+#include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/slab.h>
 
 #include <asm/cputype.h>
 #include <asm/idmap.h>
@@ -6,6 +8,7 @@
 #include <asm/pgtable.h>
 #include <asm/sections.h>
 #include <asm/system_info.h>
+#include <asm/virt.h>
 
 pgd_t *idmap_pgd;
 
@@ -59,11 +62,17 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
        } while (pud++, addr = next, addr != end);
 }
 
-static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
+static void identity_mapping_add(pgd_t *pgd, const char *text_start,
+                                const char *text_end, unsigned long prot)
 {
-       unsigned long prot, next;
+       unsigned long addr, end;
+       unsigned long next;
+
+       addr = virt_to_phys(text_start);
+       end = virt_to_phys(text_end);
+
+       prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
 
-       prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
        if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
                prot |= PMD_BIT4;
 
@@ -74,28 +83,52 @@ static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long e
        } while (pgd++, addr = next, addr != end);
 }
 
+#if defined(CONFIG_ARM_VIRT_EXT) && defined(CONFIG_ARM_LPAE)
+pgd_t *hyp_pgd;
+
+extern char  __hyp_idmap_text_start[], __hyp_idmap_text_end[];
+
+static int __init init_static_idmap_hyp(void)
+{
+       hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+       if (!hyp_pgd)
+               return -ENOMEM;
+
+       pr_info("Setting up static HYP identity map for 0x%p - 0x%p\n",
+               __hyp_idmap_text_start, __hyp_idmap_text_end);
+       identity_mapping_add(hyp_pgd, __hyp_idmap_text_start,
+                            __hyp_idmap_text_end, PMD_SECT_AP1);
+
+       return 0;
+}
+#else
+static int __init init_static_idmap_hyp(void)
+{
+       return 0;
+}
+#endif
+
 extern char  __idmap_text_start[], __idmap_text_end[];
 
 static int __init init_static_idmap(void)
 {
-       phys_addr_t idmap_start, idmap_end;
+       int ret;
 
        idmap_pgd = pgd_alloc(&init_mm);
        if (!idmap_pgd)
                return -ENOMEM;
 
-       /* Add an identity mapping for the physical address of the section. */
-       idmap_start = virt_to_phys((void *)__idmap_text_start);
-       idmap_end = virt_to_phys((void *)__idmap_text_end);
+       pr_info("Setting up static identity map for 0x%p - 0x%p\n",
+               __idmap_text_start, __idmap_text_end);
+       identity_mapping_add(idmap_pgd, __idmap_text_start,
+                            __idmap_text_end, 0);
 
-       pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
-               (long long)idmap_start, (long long)idmap_end);
-       identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
+       ret = init_static_idmap_hyp();
 
        /* Flush L1 for the hardware to see this page table content */
        flush_cache_louis();
 
-       return 0;
+       return ret;
 }
 early_initcall(init_static_idmap);
 
index ce328c7..8fcf8bd 100644 (file)
@@ -57,6 +57,9 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
 static unsigned int ecc_mask __initdata = 0;
 pgprot_t pgprot_user;
 pgprot_t pgprot_kernel;
+pgprot_t pgprot_hyp_device;
+pgprot_t pgprot_s2;
+pgprot_t pgprot_s2_device;
 
 EXPORT_SYMBOL(pgprot_user);
 EXPORT_SYMBOL(pgprot_kernel);
@@ -66,34 +69,46 @@ struct cachepolicy {
        unsigned int    cr_mask;
        pmdval_t        pmd;
        pteval_t        pte;
+       pteval_t        pte_s2;
 };
 
+#ifdef CONFIG_ARM_LPAE
+#define s2_policy(policy)      policy
+#else
+#define s2_policy(policy)      0
+#endif
+
 static struct cachepolicy cache_policies[] __initdata = {
        {
                .policy         = "uncached",
                .cr_mask        = CR_W|CR_C,
                .pmd            = PMD_SECT_UNCACHED,
                .pte            = L_PTE_MT_UNCACHED,
+               .pte_s2         = s2_policy(L_PTE_S2_MT_UNCACHED),
        }, {
                .policy         = "buffered",
                .cr_mask        = CR_C,
                .pmd            = PMD_SECT_BUFFERED,
                .pte            = L_PTE_MT_BUFFERABLE,
+               .pte_s2         = s2_policy(L_PTE_S2_MT_UNCACHED),
        }, {
                .policy         = "writethrough",
                .cr_mask        = 0,
                .pmd            = PMD_SECT_WT,
                .pte            = L_PTE_MT_WRITETHROUGH,
+               .pte_s2         = s2_policy(L_PTE_S2_MT_WRITETHROUGH),
        }, {
                .policy         = "writeback",
                .cr_mask        = 0,
                .pmd            = PMD_SECT_WB,
                .pte            = L_PTE_MT_WRITEBACK,
+               .pte_s2         = s2_policy(L_PTE_S2_MT_WRITEBACK),
        }, {
                .policy         = "writealloc",
                .cr_mask        = 0,
                .pmd            = PMD_SECT_WBWA,
                .pte            = L_PTE_MT_WRITEALLOC,
+               .pte_s2         = s2_policy(L_PTE_S2_MT_WRITEBACK),
        }
 };
 
@@ -310,6 +325,7 @@ static void __init build_mem_type_table(void)
        struct cachepolicy *cp;
        unsigned int cr = get_cr();
        pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
+       pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
        int cpu_arch = cpu_architecture();
        int i;
 
@@ -421,6 +437,8 @@ static void __init build_mem_type_table(void)
         */
        cp = &cache_policies[cachepolicy];
        vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
+       s2_pgprot = cp->pte_s2;
+       hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
 
        /*
         * ARMv6 and above have extended page tables.
@@ -444,6 +462,7 @@ static void __init build_mem_type_table(void)
                        user_pgprot |= L_PTE_SHARED;
                        kern_pgprot |= L_PTE_SHARED;
                        vecs_pgprot |= L_PTE_SHARED;
+                       s2_pgprot |= L_PTE_SHARED;
                        mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
                        mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
                        mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
@@ -498,6 +517,9 @@ static void __init build_mem_type_table(void)
        pgprot_user   = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
        pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
                                 L_PTE_DIRTY | kern_pgprot);
+       pgprot_s2  = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
+       pgprot_s2_device  = __pgprot(s2_device_pgprot);
+       pgprot_hyp_device  = __pgprot(hyp_device_pgprot);
 
        mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
        mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
index 1507723..372e921 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/err.h>
 #include <linux/highmem.h>
 #include <linux/log2.h>
+#include <linux/mmc/pm.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/card.h>
 #include <linux/amba/bus.h>
@@ -59,6 +60,7 @@ static unsigned int fmax = 515633;
  * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
  * @pwrreg_powerup: power up value for MMCIPOWER register
  * @signal_direction: input/out direction of bus signals can be indicated
+ * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
  */
 struct variant_data {
        unsigned int            clkreg;
@@ -71,6 +73,7 @@ struct variant_data {
        bool                    blksz_datactrl16;
        u32                     pwrreg_powerup;
        bool                    signal_direction;
+       bool                    pwrreg_clkgate;
 };
 
 static struct variant_data variant_arm = {
@@ -87,6 +90,14 @@ static struct variant_data variant_arm_extended_fifo = {
        .pwrreg_powerup         = MCI_PWR_UP,
 };
 
+static struct variant_data variant_arm_extended_fifo_hwfc = {
+       .fifosize               = 128 * 4,
+       .fifohalfsize           = 64 * 4,
+       .clkreg_enable          = MCI_ARM_HWFCEN,
+       .datalength_bits        = 16,
+       .pwrreg_powerup         = MCI_PWR_UP,
+};
+
 static struct variant_data variant_u300 = {
        .fifosize               = 16 * 4,
        .fifohalfsize           = 8 * 4,
@@ -95,6 +106,7 @@ static struct variant_data variant_u300 = {
        .sdio                   = true,
        .pwrreg_powerup         = MCI_PWR_ON,
        .signal_direction       = true,
+       .pwrreg_clkgate         = true,
 };
 
 static struct variant_data variant_nomadik = {
@@ -106,6 +118,7 @@ static struct variant_data variant_nomadik = {
        .st_clkdiv              = true,
        .pwrreg_powerup         = MCI_PWR_ON,
        .signal_direction       = true,
+       .pwrreg_clkgate         = true,
 };
 
 static struct variant_data variant_ux500 = {
@@ -118,6 +131,7 @@ static struct variant_data variant_ux500 = {
        .st_clkdiv              = true,
        .pwrreg_powerup         = MCI_PWR_ON,
        .signal_direction       = true,
+       .pwrreg_clkgate         = true,
 };
 
 static struct variant_data variant_ux500v2 = {
@@ -131,8 +145,27 @@ static struct variant_data variant_ux500v2 = {
        .blksz_datactrl16       = true,
        .pwrreg_powerup         = MCI_PWR_ON,
        .signal_direction       = true,
+       .pwrreg_clkgate         = true,
 };
 
+/*
+ * Validate mmc prerequisites
+ */
+static int mmci_validate_data(struct mmci_host *host,
+                             struct mmc_data *data)
+{
+       if (!data)
+               return 0;
+
+       if (!is_power_of_2(data->blksz)) {
+               dev_err(mmc_dev(host->mmc),
+                       "unsupported block size (%d bytes)\n", data->blksz);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 /*
  * This must be called with host->lock held
  */
@@ -202,6 +235,9 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
        if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
                clk |= MCI_ST_8BIT_BUS;
 
+       if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
+               clk |= MCI_ST_UX500_NEG_EDGE;
+
        mmci_write_clkreg(host, clk);
 }
 
@@ -352,10 +388,33 @@ static inline void mmci_dma_release(struct mmci_host *host)
        host->dma_rx_channel = host->dma_tx_channel = NULL;
 }
 
+static void mmci_dma_data_error(struct mmci_host *host)
+{
+       dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
+       dmaengine_terminate_all(host->dma_current);
+       host->dma_current = NULL;
+       host->dma_desc_current = NULL;
+       host->data->host_cookie = 0;
+}
+
 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
 {
-       struct dma_chan *chan = host->dma_current;
+       struct dma_chan *chan;
        enum dma_data_direction dir;
+
+       if (data->flags & MMC_DATA_READ) {
+               dir = DMA_FROM_DEVICE;
+               chan = host->dma_rx_channel;
+       } else {
+               dir = DMA_TO_DEVICE;
+               chan = host->dma_tx_channel;
+       }
+
+       dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+}
+
+static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
+{
        u32 status;
        int i;
 
@@ -374,19 +433,13 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
         * contiguous buffers.  On TX, we'll get a FIFO underrun error.
         */
        if (status & MCI_RXDATAAVLBLMASK) {
-               dmaengine_terminate_all(chan);
+               mmci_dma_data_error(host);
                if (!data->error)
                        data->error = -EIO;
        }
 
-       if (data->flags & MMC_DATA_WRITE) {
-               dir = DMA_TO_DEVICE;
-       } else {
-               dir = DMA_FROM_DEVICE;
-       }
-
        if (!data->host_cookie)
-               dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+               mmci_dma_unmap(host, data);
 
        /*
         * Use of DMA with scatter-gather is impossible.
@@ -396,16 +449,15 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
                dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
                mmci_dma_release(host);
        }
-}
 
-static void mmci_dma_data_error(struct mmci_host *host)
-{
-       dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
-       dmaengine_terminate_all(host->dma_current);
+       host->dma_current = NULL;
+       host->dma_desc_current = NULL;
 }
 
-static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
-                             struct mmci_host_next *next)
+/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
+static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
+                               struct dma_chan **dma_chan,
+                               struct dma_async_tx_descriptor **dma_desc)
 {
        struct variant_data *variant = host->variant;
        struct dma_slave_config conf = {
@@ -423,16 +475,6 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
        enum dma_data_direction buffer_dirn;
        int nr_sg;
 
-       /* Check if next job is already prepared */
-       if (data->host_cookie && !next &&
-           host->dma_current && host->dma_desc_current)
-               return 0;
-
-       if (!next) {
-               host->dma_current = NULL;
-               host->dma_desc_current = NULL;
-       }
-
        if (data->flags & MMC_DATA_READ) {
                conf.direction = DMA_DEV_TO_MEM;
                buffer_dirn = DMA_FROM_DEVICE;
@@ -462,29 +504,41 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
        if (!desc)
                goto unmap_exit;
 
-       if (next) {
-               next->dma_chan = chan;
-               next->dma_desc = desc;
-       } else {
-               host->dma_current = chan;
-               host->dma_desc_current = desc;
-       }
+       *dma_chan = chan;
+       *dma_desc = desc;
 
        return 0;
 
  unmap_exit:
-       if (!next)
-               dmaengine_terminate_all(chan);
        dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
        return -ENOMEM;
 }
 
+static inline int mmci_dma_prep_data(struct mmci_host *host,
+                                    struct mmc_data *data)
+{
+       /* Check if next job is already prepared. */
+       if (host->dma_current && host->dma_desc_current)
+               return 0;
+
+       /* No job were prepared thus do it now. */
+       return __mmci_dma_prep_data(host, data, &host->dma_current,
+                                   &host->dma_desc_current);
+}
+
+static inline int mmci_dma_prep_next(struct mmci_host *host,
+                                    struct mmc_data *data)
+{
+       struct mmci_host_next *nd = &host->next_data;
+       return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
+}
+
 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
 {
        int ret;
        struct mmc_data *data = host->data;
 
-       ret = mmci_dma_prep_data(host, host->data, NULL);
+       ret = mmci_dma_prep_data(host, host->data);
        if (ret)
                return ret;
 
@@ -514,19 +568,11 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
 {
        struct mmci_host_next *next = &host->next_data;
 
-       if (data->host_cookie && data->host_cookie != next->cookie) {
-               pr_warning("[%s] invalid cookie: data->host_cookie %d"
-                      " host->next_data.cookie %d\n",
-                      __func__, data->host_cookie, host->next_data.cookie);
-               data->host_cookie = 0;
-       }
-
-       if (!data->host_cookie)
-               return;
+       WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
+       WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
 
        host->dma_desc_current = next->dma_desc;
        host->dma_current = next->dma_chan;
-
        next->dma_desc = NULL;
        next->dma_chan = NULL;
 }
@@ -541,19 +587,13 @@ static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
        if (!data)
                return;
 
-       if (data->host_cookie) {
-               data->host_cookie = 0;
+       BUG_ON(data->host_cookie);
+
+       if (mmci_validate_data(host, data))
                return;
-       }
 
-       /* if config for dma */
-       if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
-           ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
-               if (mmci_dma_prep_data(host, data, nd))
-                       data->host_cookie = 0;
-               else
-                       data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
-       }
+       if (!mmci_dma_prep_next(host, data))
+               data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
 }
 
 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
@@ -561,29 +601,23 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
 {
        struct mmci_host *host = mmc_priv(mmc);
        struct mmc_data *data = mrq->data;
-       struct dma_chan *chan;
-       enum dma_data_direction dir;
 
-       if (!data)
+       if (!data || !data->host_cookie)
                return;
 
-       if (data->flags & MMC_DATA_READ) {
-               dir = DMA_FROM_DEVICE;
-               chan = host->dma_rx_channel;
-       } else {
-               dir = DMA_TO_DEVICE;
-               chan = host->dma_tx_channel;
-       }
+       mmci_dma_unmap(host, data);
 
+       if (err) {
+               struct mmci_host_next *next = &host->next_data;
+               struct dma_chan *chan;
+               if (data->flags & MMC_DATA_READ)
+                       chan = host->dma_rx_channel;
+               else
+                       chan = host->dma_tx_channel;
+               dmaengine_terminate_all(chan);
 
-       /* if config for dma */
-       if (chan) {
-               if (err)
-                       dmaengine_terminate_all(chan);
-               if (data->host_cookie)
-                       dma_unmap_sg(mmc_dev(host->mmc), data->sg,
-                                    data->sg_len, dir);
-               mrq->data->host_cookie = 0;
+               next->dma_desc = NULL;
+               next->dma_chan = NULL;
        }
 }
 
@@ -604,6 +638,11 @@ static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
 {
 }
 
+static inline void mmci_dma_finalize(struct mmci_host *host,
+                                    struct mmc_data *data)
+{
+}
+
 static inline void mmci_dma_data_error(struct mmci_host *host)
 {
 }
@@ -680,6 +719,9 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
                        mmci_write_clkreg(host, clk);
                }
 
+       if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
+               datactrl |= MCI_ST_DPSM_DDRMODE;
+
        /*
         * Attempt to use DMA operation mode, if this
         * should fail, fall back to PIO mode
@@ -751,8 +793,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
                u32 remain, success;
 
                /* Terminate the DMA transfer */
-               if (dma_inprogress(host))
+               if (dma_inprogress(host)) {
                        mmci_dma_data_error(host);
+                       mmci_dma_unmap(host, data);
+               }
 
                /*
                 * Calculate how far we are into the transfer.  Note that
@@ -791,7 +835,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
 
        if (status & MCI_DATAEND || data->error) {
                if (dma_inprogress(host))
-                       mmci_dma_unmap(host, data);
+                       mmci_dma_finalize(host, data);
                mmci_stop_data(host);
 
                if (!data->error)
@@ -828,8 +872,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
        if (!cmd->data || cmd->error) {
                if (host->data) {
                        /* Terminate the DMA transfer */
-                       if (dma_inprogress(host))
+                       if (dma_inprogress(host)) {
                                mmci_dma_data_error(host);
+                               mmci_dma_unmap(host, host->data);
+                       }
                        mmci_stop_data(host);
                }
                mmci_request_end(host, cmd->mrq);
@@ -1055,10 +1101,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 
        WARN_ON(host->mrq != NULL);
 
-       if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
-               dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
-                       mrq->data->blksz);
-               mrq->cmd->error = -EINVAL;
+       mrq->cmd->error = mmci_validate_data(host, mrq->data);
+       if (mrq->cmd->error) {
                mmc_request_done(mmc, mrq);
                return;
        }
@@ -1086,7 +1130,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        struct variant_data *variant = host->variant;
        u32 pwr = 0;
        unsigned long flags;
-       int ret;
 
        pm_runtime_get_sync(mmc_dev(mmc));
 
@@ -1096,23 +1139,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 
        switch (ios->power_mode) {
        case MMC_POWER_OFF:
-               if (host->vcc)
-                       ret = mmc_regulator_set_ocr(mmc, host->vcc, 0);
+               if (!IS_ERR(mmc->supply.vmmc))
+                       mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
                break;
        case MMC_POWER_UP:
-               if (host->vcc) {
-                       ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd);
-                       if (ret) {
-                               dev_err(mmc_dev(mmc), "unable to set OCR\n");
-                               /*
-                                * The .set_ios() function in the mmc_host_ops
-                                * struct return void, and failing to set the
-                                * power should be rare so we print an error
-                                * and return here.
-                                */
-                               goto out;
-                       }
-               }
+               if (!IS_ERR(mmc->supply.vmmc))
+                       mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+
                /*
                 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
                 * and instead uses MCI_PWR_ON so apply whatever value is
@@ -1154,6 +1187,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                }
        }
 
+       /*
+        * If clock = 0 and the variant requires the MMCIPOWER to be used for
+        * gating the clock, the MCI_PWR_ON bit is cleared.
+        */
+       if (!ios->clock && variant->pwrreg_clkgate)
+               pwr &= ~MCI_PWR_ON;
+
        spin_lock_irqsave(&host->lock, flags);
 
        mmci_set_clkreg(host, ios->clock);
@@ -1161,7 +1201,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 
        spin_unlock_irqrestore(&host->lock, flags);
 
- out:
        pm_runtime_mark_last_busy(mmc_dev(mmc));
        pm_runtime_put_autosuspend(mmc_dev(mmc));
 }
@@ -1384,32 +1423,19 @@ static int mmci_probe(struct amba_device *dev,
        } else
                dev_warn(&dev->dev, "could not get default pinstate\n");
 
-#ifdef CONFIG_REGULATOR
-       /* If we're using the regulator framework, try to fetch a regulator */
-       host->vcc = regulator_get(&dev->dev, "vmmc");
-       if (IS_ERR(host->vcc))
-               host->vcc = NULL;
-       else {
-               int mask = mmc_regulator_get_ocrmask(host->vcc);
-
-               if (mask < 0)
-                       dev_err(&dev->dev, "error getting OCR mask (%d)\n",
-                               mask);
-               else {
-                       host->mmc->ocr_avail = (u32) mask;
-                       if (plat->ocr_mask)
-                               dev_warn(&dev->dev,
-                                "Provided ocr_mask/setpower will not be used "
-                                "(using regulator instead)\n");
-               }
-       }
-#endif
-       /* Fall back to platform data if no regulator is found */
-       if (host->vcc == NULL)
+       /* Get regulators and the supported OCR mask */
+       mmc_regulator_get_supply(mmc);
+       if (!mmc->ocr_avail)
                mmc->ocr_avail = plat->ocr_mask;
+       else if (plat->ocr_mask)
+               dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
+
        mmc->caps = plat->capabilities;
        mmc->caps2 = plat->capabilities2;
 
+       /* We support these PM capabilities. */
+       mmc->pm_caps = MMC_PM_KEEP_POWER;
+
        /*
         * We can do SGIO
         */
@@ -1585,10 +1611,6 @@ static int mmci_remove(struct amba_device *dev)
                clk_disable_unprepare(host->clk);
                clk_put(host->clk);
 
-               if (host->vcc)
-                       mmc_regulator_set_ocr(mmc, host->vcc, 0);
-               regulator_put(host->vcc);
-
                mmc_free_host(mmc);
 
                amba_release_regions(dev);
@@ -1636,8 +1658,37 @@ static int mmci_resume(struct device *dev)
 }
 #endif
 
+#ifdef CONFIG_PM_RUNTIME
+static int mmci_runtime_suspend(struct device *dev)
+{
+       struct amba_device *adev = to_amba_device(dev);
+       struct mmc_host *mmc = amba_get_drvdata(adev);
+
+       if (mmc) {
+               struct mmci_host *host = mmc_priv(mmc);
+               clk_disable_unprepare(host->clk);
+       }
+
+       return 0;
+}
+
+static int mmci_runtime_resume(struct device *dev)
+{
+       struct amba_device *adev = to_amba_device(dev);
+       struct mmc_host *mmc = amba_get_drvdata(adev);
+
+       if (mmc) {
+               struct mmci_host *host = mmc_priv(mmc);
+               clk_prepare_enable(host->clk);
+       }
+
+       return 0;
+}
+#endif
+
 static const struct dev_pm_ops mmci_dev_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume)
+       SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
 };
 
 static struct amba_id mmci_ids[] = {
@@ -1651,6 +1702,11 @@ static struct amba_id mmci_ids[] = {
                .mask   = 0xff0fffff,
                .data   = &variant_arm_extended_fifo,
        },
+       {
+               .id     = 0x02041180,
+               .mask   = 0xff0fffff,
+               .data   = &variant_arm_extended_fifo_hwfc,
+       },
        {
                .id     = 0x00041181,
                .mask   = 0x000fffff,
index d34d8c0..1f33ad5 100644 (file)
@@ -28,6 +28,8 @@
 #define MCI_ST_UX500_NEG_EDGE  (1 << 13)
 #define MCI_ST_UX500_HWFCEN    (1 << 14)
 #define MCI_ST_UX500_CLK_INV   (1 << 15)
+/* Modified PL180 on Versatile Express platform */
+#define MCI_ARM_HWFCEN         (1 << 12)
 
 #define MMCIARGUMENT           0x008
 #define MMCICOMMAND            0x00c
@@ -193,7 +195,6 @@ struct mmci_host {
        /* pio stuff */
        struct sg_mapping_iter  sg_miter;
        unsigned int            size;
-       struct regulator        *vcc;
 
        /* pinctrl handles */
        struct pinctrl          *pinctrl;
index 8a7096f..6634652 100644 (file)
@@ -161,6 +161,15 @@ clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec)
 extern void clockevents_suspend(void);
 extern void clockevents_resume(void);
 
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+#ifdef CONFIG_ARCH_HAS_TICK_BROADCAST
+extern void tick_broadcast(const struct cpumask *mask);
+#else
+#define tick_broadcast NULL
+#endif
+extern int tick_receive_broadcast(void);
+#endif
+
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 extern void clockevents_notify(unsigned long reason, void *arg);
 #else
index e6e5d4b..7f2360a 100644 (file)
@@ -115,6 +115,7 @@ struct kvm_irq_level {
         * ACPI gsi notion of irq.
         * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47..
         * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23..
+        * For ARM: See Documentation/virtual/kvm/api.txt
         */
        union {
                __u32 irq;
@@ -635,6 +636,7 @@ struct kvm_ppc_smmu_info {
 #define KVM_CAP_IRQFD_RESAMPLE 82
 #define KVM_CAP_PPC_BOOKE_WATCHDOG 83
 #define KVM_CAP_PPC_HTAB_FD 84
+#define KVM_CAP_ARM_PSCI 87
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -764,6 +766,11 @@ struct kvm_dirty_tlb {
 #define KVM_REG_SIZE_U512      0x0060000000000000ULL
 #define KVM_REG_SIZE_U1024     0x0070000000000000ULL
 
+struct kvm_reg_list {
+       __u64 n; /* number of regs */
+       __u64 reg[0];
+};
+
 struct kvm_one_reg {
        __u64 id;
        __u64 addr;
@@ -932,6 +939,8 @@ struct kvm_s390_ucas_mapping {
 #define KVM_SET_ONE_REG                  _IOW(KVMIO,  0xac, struct kvm_one_reg)
 /* VM is being stopped by host */
 #define KVM_KVMCLOCK_CTRL        _IO(KVMIO,   0xad)
+#define KVM_ARM_VCPU_INIT        _IOW(KVMIO,  0xae, struct kvm_vcpu_init)
+#define KVM_GET_REG_LIST         _IOWR(KVMIO, 0xb0, struct kvm_reg_list)
 
 #define KVM_DEV_ASSIGN_ENABLE_IOMMU    (1 << 0)
 #define KVM_DEV_ASSIGN_PCI_2_3         (1 << 1)
index 8601f0d..b696922 100644 (file)
@@ -38,6 +38,10 @@ config GENERIC_CLOCKEVENTS_BUILD
        default y
        depends on GENERIC_CLOCKEVENTS
 
+# Architecture can handle broadcast in a driver-agnostic way
+config ARCH_HAS_TICK_BROADCAST
+       bool
+
 # Clockevents broadcasting infrastructure
 config GENERIC_CLOCKEVENTS_BROADCAST
        bool
index f113755..f726537 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/percpu.h>
 #include <linux/profile.h>
 #include <linux/sched.h>
+#include <linux/smp.h>
 
 #include "tick-internal.h"
 
@@ -86,6 +87,11 @@ int tick_is_broadcast_device(struct clock_event_device *dev)
        return (dev && tick_broadcast_device.evtdev == dev);
 }
 
+static void err_broadcast(const struct cpumask *mask)
+{
+       pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
+}
+
 /*
  * Check, if the device is disfunctional and a place holder, which
  * needs to be handled by the broadcast device.
@@ -105,6 +111,13 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
         */
        if (!tick_device_is_functional(dev)) {
                dev->event_handler = tick_handle_periodic;
+               if (!dev->broadcast)
+                       dev->broadcast = tick_broadcast;
+               if (!dev->broadcast) {
+                       pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
+                                    dev->name);
+                       dev->broadcast = err_broadcast;
+               }
                cpumask_set_cpu(cpu, tick_get_broadcast_mask());
                tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
                ret = 1;
@@ -125,6 +138,23 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
        return ret;
 }
 
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+int tick_receive_broadcast(void)
+{
+       struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
+       struct clock_event_device *evt = td->evtdev;
+
+       if (!evt)
+               return -ENODEV;
+
+       if (!evt->event_handler)
+               return -EINVAL;
+
+       evt->event_handler(evt);
+       return 0;
+}
+#endif
+
 /*
  * Broadcast the event to the cpus, which are set in the mask (mangled).
  */