KVM: s390: move vcpu wakeup code to a central point
[pandora-kernel.git] / arch / s390 / kvm / sigp.c
index 26caeb5..c6f1c2b 100644 (file)
@@ -54,33 +54,23 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
 
 static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
 {
-       struct kvm_s390_local_interrupt *li;
-       struct kvm_s390_interrupt_info *inti;
+       struct kvm_s390_interrupt s390int = {
+               .type = KVM_S390_INT_EMERGENCY,
+               .parm = vcpu->vcpu_id,
+       };
        struct kvm_vcpu *dst_vcpu = NULL;
+       int rc = 0;
 
        if (cpu_addr < KVM_MAX_VCPUS)
                dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
        if (!dst_vcpu)
                return SIGP_CC_NOT_OPERATIONAL;
 
-       inti = kzalloc(sizeof(*inti), GFP_KERNEL);
-       if (!inti)
-               return -ENOMEM;
-
-       inti->type = KVM_S390_INT_EMERGENCY;
-       inti->emerg.code = vcpu->vcpu_id;
+       rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
+       if (!rc)
+               VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
 
-       li = &dst_vcpu->arch.local_int;
-       spin_lock_bh(&li->lock);
-       list_add_tail(&inti->list, &li->list);
-       atomic_set(&li->active, 1);
-       atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
-       if (waitqueue_active(li->wq))
-               wake_up_interruptible(li->wq);
-       spin_unlock_bh(&li->lock);
-       VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
-
-       return SIGP_CC_ORDER_CODE_ACCEPTED;
+       return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
 }
 
 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
@@ -116,37 +106,28 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
 
 static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
 {
-       struct kvm_s390_local_interrupt *li;
-       struct kvm_s390_interrupt_info *inti;
+       struct kvm_s390_interrupt s390int = {
+               .type = KVM_S390_INT_EXTERNAL_CALL,
+               .parm = vcpu->vcpu_id,
+       };
        struct kvm_vcpu *dst_vcpu = NULL;
+       int rc;
 
        if (cpu_addr < KVM_MAX_VCPUS)
                dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
        if (!dst_vcpu)
                return SIGP_CC_NOT_OPERATIONAL;
 
-       inti = kzalloc(sizeof(*inti), GFP_KERNEL);
-       if (!inti)
-               return -ENOMEM;
-
-       inti->type = KVM_S390_INT_EXTERNAL_CALL;
-       inti->extcall.code = vcpu->vcpu_id;
-
-       li = &dst_vcpu->arch.local_int;
-       spin_lock_bh(&li->lock);
-       list_add_tail(&inti->list, &li->list);
-       atomic_set(&li->active, 1);
-       atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
-       if (waitqueue_active(li->wq))
-               wake_up_interruptible(li->wq);
-       spin_unlock_bh(&li->lock);
-       VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
+       rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
+       if (!rc)
+               VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
 
-       return SIGP_CC_ORDER_CODE_ACCEPTED;
+       return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
 }
 
-static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
+static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
 {
+       struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
        struct kvm_s390_interrupt_info *inti;
        int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
 
@@ -155,7 +136,12 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
                return -ENOMEM;
        inti->type = KVM_S390_SIGP_STOP;
 
-       spin_lock_bh(&li->lock);
+       spin_lock(&li->lock);
+       if (li->action_bits & ACTION_STOP_ON_STOP) {
+               /* another SIGP STOP is pending */
+               rc = SIGP_CC_BUSY;
+               goto out;
+       }
        if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
                kfree(inti);
                if ((action & ACTION_STORE_ON_STOP) != 0)
@@ -164,19 +150,17 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
        }
        list_add_tail(&inti->list, &li->list);
        atomic_set(&li->active, 1);
-       atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
        li->action_bits |= action;
-       if (waitqueue_active(li->wq))
-               wake_up_interruptible(li->wq);
+       atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
+       kvm_s390_vcpu_wakeup(dst_vcpu);
 out:
-       spin_unlock_bh(&li->lock);
+       spin_unlock(&li->lock);
 
        return rc;
 }
 
 static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
 {
-       struct kvm_s390_local_interrupt *li;
        struct kvm_vcpu *dst_vcpu = NULL;
        int rc;
 
@@ -186,9 +170,8 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
        dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
        if (!dst_vcpu)
                return SIGP_CC_NOT_OPERATIONAL;
-       li = &dst_vcpu->arch.local_int;
 
-       rc = __inject_sigp_stop(li, action);
+       rc = __inject_sigp_stop(dst_vcpu, action);
 
        VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
 
@@ -235,7 +218,6 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
        struct kvm_vcpu *dst_vcpu = NULL;
        struct kvm_s390_interrupt_info *inti;
        int rc;
-       u8 tmp;
 
        if (cpu_addr < KVM_MAX_VCPUS)
                dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
@@ -243,10 +225,13 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
                return SIGP_CC_NOT_OPERATIONAL;
        li = &dst_vcpu->arch.local_int;
 
-       /* make sure that the new value is valid memory */
-       address = address & 0x7fffe000u;
-       if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
-          copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
+       /*
+        * Make sure the new value is valid memory. We only need to check the
+        * first page, since address is 8k aligned and memory pieces are always
+        * at least 1MB aligned and have at least a size of 1MB.
+        */
+       address &= 0x7fffe000u;
+       if (kvm_is_error_gpa(vcpu->kvm, address)) {
                *reg &= 0xffffffff00000000UL;
                *reg |= SIGP_STATUS_INVALID_PARAMETER;
                return SIGP_CC_STATUS_STORED;
@@ -256,7 +241,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
        if (!inti)
                return SIGP_CC_BUSY;
 
-       spin_lock_bh(&li->lock);
+       spin_lock(&li->lock);
        /* cpu must be in stopped state */
        if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
                *reg &= 0xffffffff00000000UL;
@@ -271,13 +256,12 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
 
        list_add_tail(&inti->list, &li->list);
        atomic_set(&li->active, 1);
-       if (waitqueue_active(li->wq))
-               wake_up_interruptible(li->wq);
+       kvm_s390_vcpu_wakeup(dst_vcpu);
        rc = SIGP_CC_ORDER_CODE_ACCEPTED;
 
        VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
 out_li:
-       spin_unlock_bh(&li->lock);
+       spin_unlock(&li->lock);
        return rc;
 }
 
@@ -293,9 +277,9 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
        if (!dst_vcpu)
                return SIGP_CC_NOT_OPERATIONAL;
 
-       spin_lock_bh(&dst_vcpu->arch.local_int.lock);
+       spin_lock(&dst_vcpu->arch.local_int.lock);
        flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
-       spin_unlock_bh(&dst_vcpu->arch.local_int.lock);
+       spin_unlock(&dst_vcpu->arch.local_int.lock);
        if (!(flags & CPUSTAT_STOPPED)) {
                *reg &= 0xffffffff00000000UL;
                *reg |= SIGP_STATUS_INCORRECT_STATE;
@@ -356,10 +340,10 @@ static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
        if (!dst_vcpu)
                return SIGP_CC_NOT_OPERATIONAL;
        li = &dst_vcpu->arch.local_int;
-       spin_lock_bh(&li->lock);
+       spin_lock(&li->lock);
        if (li->action_bits & ACTION_STOP_ON_STOP)
                rc = SIGP_CC_BUSY;
-       spin_unlock_bh(&li->lock);
+       spin_unlock(&li->lock);
 
        return rc;
 }
@@ -456,3 +440,33 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
        kvm_s390_set_psw_cc(vcpu, rc);
        return 0;
 }
+
+/*
+ * Handle SIGP partial execution interception.
+ *
+ * This interception will occur at the source cpu when a source cpu sends an
+ * external call to a target cpu and the target cpu has the WAIT bit set in
+ * its cpuflags. Interception will occurr after the interrupt indicator bits at
+ * the target cpu have been set. All error cases will lead to instruction
+ * interception, therefore nothing is to be checked or prepared.
+ */
+int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
+{
+       int r3 = vcpu->arch.sie_block->ipa & 0x000f;
+       u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
+       struct kvm_vcpu *dest_vcpu;
+       u8 order_code = kvm_s390_get_base_disp_rs(vcpu);
+
+       trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
+
+       if (order_code == SIGP_EXTERNAL_CALL) {
+               dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
+               BUG_ON(dest_vcpu == NULL);
+
+               kvm_s390_vcpu_wakeup(dest_vcpu);
+               kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
+               return 0;
+       }
+
+       return -EOPNOTSUPP;
+}