KVM: async_pf: Async page fault support on s390
[pandora-kernel.git] / arch / s390 / kvm / sigp.c
1 /*
2  * handling interprocessor communication
3  *
4  * Copyright IBM Corp. 2008, 2013
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
13  */
14
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <linux/slab.h>
18 #include <asm/sigp.h>
19 #include "gaccess.h"
20 #include "kvm-s390.h"
21 #include "trace.h"
22
23 static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
24                         u64 *reg)
25 {
26         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
27         int rc;
28
29         if (cpu_addr >= KVM_MAX_VCPUS)
30                 return SIGP_CC_NOT_OPERATIONAL;
31
32         spin_lock(&fi->lock);
33         if (fi->local_int[cpu_addr] == NULL)
34                 rc = SIGP_CC_NOT_OPERATIONAL;
35         else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
36                    & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
37                 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
38         else {
39                 *reg &= 0xffffffff00000000UL;
40                 if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
41                     & CPUSTAT_ECALL_PEND)
42                         *reg |= SIGP_STATUS_EXT_CALL_PENDING;
43                 if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
44                     & CPUSTAT_STOPPED)
45                         *reg |= SIGP_STATUS_STOPPED;
46                 rc = SIGP_CC_STATUS_STORED;
47         }
48         spin_unlock(&fi->lock);
49
50         VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
51         return rc;
52 }
53
54 static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
55 {
56         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
57         struct kvm_s390_local_interrupt *li;
58         struct kvm_s390_interrupt_info *inti;
59         int rc;
60
61         if (cpu_addr >= KVM_MAX_VCPUS)
62                 return SIGP_CC_NOT_OPERATIONAL;
63
64         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
65         if (!inti)
66                 return -ENOMEM;
67
68         inti->type = KVM_S390_INT_EMERGENCY;
69         inti->emerg.code = vcpu->vcpu_id;
70
71         spin_lock(&fi->lock);
72         li = fi->local_int[cpu_addr];
73         if (li == NULL) {
74                 rc = SIGP_CC_NOT_OPERATIONAL;
75                 kfree(inti);
76                 goto unlock;
77         }
78         spin_lock_bh(&li->lock);
79         list_add_tail(&inti->list, &li->list);
80         atomic_set(&li->active, 1);
81         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
82         if (waitqueue_active(li->wq))
83                 wake_up_interruptible(li->wq);
84         spin_unlock_bh(&li->lock);
85         rc = SIGP_CC_ORDER_CODE_ACCEPTED;
86         VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
87 unlock:
88         spin_unlock(&fi->lock);
89         return rc;
90 }
91
92 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
93                                         u16 asn, u64 *reg)
94 {
95         struct kvm_vcpu *dst_vcpu = NULL;
96         const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
97         u16 p_asn, s_asn;
98         psw_t *psw;
99         u32 flags;
100
101         if (cpu_addr < KVM_MAX_VCPUS)
102                 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
103         if (!dst_vcpu)
104                 return SIGP_CC_NOT_OPERATIONAL;
105         flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
106         psw = &dst_vcpu->arch.sie_block->gpsw;
107         p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff;  /* Primary ASN */
108         s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff;  /* Secondary ASN */
109
110         /* Deliver the emergency signal? */
111         if (!(flags & CPUSTAT_STOPPED)
112             || (psw->mask & psw_int_mask) != psw_int_mask
113             || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
114             || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
115                 return __sigp_emergency(vcpu, cpu_addr);
116         } else {
117                 *reg &= 0xffffffff00000000UL;
118                 *reg |= SIGP_STATUS_INCORRECT_STATE;
119                 return SIGP_CC_STATUS_STORED;
120         }
121 }
122
123 static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
124 {
125         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
126         struct kvm_s390_local_interrupt *li;
127         struct kvm_s390_interrupt_info *inti;
128         int rc;
129
130         if (cpu_addr >= KVM_MAX_VCPUS)
131                 return SIGP_CC_NOT_OPERATIONAL;
132
133         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
134         if (!inti)
135                 return -ENOMEM;
136
137         inti->type = KVM_S390_INT_EXTERNAL_CALL;
138         inti->extcall.code = vcpu->vcpu_id;
139
140         spin_lock(&fi->lock);
141         li = fi->local_int[cpu_addr];
142         if (li == NULL) {
143                 rc = SIGP_CC_NOT_OPERATIONAL;
144                 kfree(inti);
145                 goto unlock;
146         }
147         spin_lock_bh(&li->lock);
148         list_add_tail(&inti->list, &li->list);
149         atomic_set(&li->active, 1);
150         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
151         if (waitqueue_active(li->wq))
152                 wake_up_interruptible(li->wq);
153         spin_unlock_bh(&li->lock);
154         rc = SIGP_CC_ORDER_CODE_ACCEPTED;
155         VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
156 unlock:
157         spin_unlock(&fi->lock);
158         return rc;
159 }
160
161 static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
162 {
163         struct kvm_s390_interrupt_info *inti;
164         int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
165
166         inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
167         if (!inti)
168                 return -ENOMEM;
169         inti->type = KVM_S390_SIGP_STOP;
170
171         spin_lock_bh(&li->lock);
172         if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
173                 kfree(inti);
174                 if ((action & ACTION_STORE_ON_STOP) != 0)
175                         rc = -ESHUTDOWN;
176                 goto out;
177         }
178         list_add_tail(&inti->list, &li->list);
179         atomic_set(&li->active, 1);
180         atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
181         li->action_bits |= action;
182         if (waitqueue_active(li->wq))
183                 wake_up_interruptible(li->wq);
184 out:
185         spin_unlock_bh(&li->lock);
186
187         return rc;
188 }
189
190 static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
191 {
192         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
193         struct kvm_s390_local_interrupt *li;
194         int rc;
195
196         if (cpu_addr >= KVM_MAX_VCPUS)
197                 return SIGP_CC_NOT_OPERATIONAL;
198
199         spin_lock(&fi->lock);
200         li = fi->local_int[cpu_addr];
201         if (li == NULL) {
202                 rc = SIGP_CC_NOT_OPERATIONAL;
203                 goto unlock;
204         }
205
206         rc = __inject_sigp_stop(li, action);
207
208 unlock:
209         spin_unlock(&fi->lock);
210         VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
211
212         if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
213                 /* If the CPU has already been stopped, we still have
214                  * to save the status when doing stop-and-store. This
215                  * has to be done after unlocking all spinlocks. */
216                 struct kvm_vcpu *dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
217                 rc = kvm_s390_store_status_unloaded(dst_vcpu,
218                                                 KVM_S390_STORE_STATUS_NOADDR);
219         }
220
221         return rc;
222 }
223
224 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
225 {
226         int rc;
227         unsigned int i;
228         struct kvm_vcpu *v;
229
230         switch (parameter & 0xff) {
231         case 0:
232                 rc = SIGP_CC_NOT_OPERATIONAL;
233                 break;
234         case 1:
235         case 2:
236                 kvm_for_each_vcpu(i, v, vcpu->kvm) {
237                         v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
238                         kvm_clear_async_pf_completion_queue(v);
239                 }
240
241                 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
242                 break;
243         default:
244                 rc = -EOPNOTSUPP;
245         }
246         return rc;
247 }
248
249 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
250                              u64 *reg)
251 {
252         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
253         struct kvm_s390_local_interrupt *li = NULL;
254         struct kvm_s390_interrupt_info *inti;
255         int rc;
256         u8 tmp;
257
258         /* make sure that the new value is valid memory */
259         address = address & 0x7fffe000u;
260         if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
261            copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
262                 *reg &= 0xffffffff00000000UL;
263                 *reg |= SIGP_STATUS_INVALID_PARAMETER;
264                 return SIGP_CC_STATUS_STORED;
265         }
266
267         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
268         if (!inti)
269                 return SIGP_CC_BUSY;
270
271         spin_lock(&fi->lock);
272         if (cpu_addr < KVM_MAX_VCPUS)
273                 li = fi->local_int[cpu_addr];
274
275         if (li == NULL) {
276                 *reg &= 0xffffffff00000000UL;
277                 *reg |= SIGP_STATUS_INCORRECT_STATE;
278                 rc = SIGP_CC_STATUS_STORED;
279                 kfree(inti);
280                 goto out_fi;
281         }
282
283         spin_lock_bh(&li->lock);
284         /* cpu must be in stopped state */
285         if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
286                 *reg &= 0xffffffff00000000UL;
287                 *reg |= SIGP_STATUS_INCORRECT_STATE;
288                 rc = SIGP_CC_STATUS_STORED;
289                 kfree(inti);
290                 goto out_li;
291         }
292
293         inti->type = KVM_S390_SIGP_SET_PREFIX;
294         inti->prefix.address = address;
295
296         list_add_tail(&inti->list, &li->list);
297         atomic_set(&li->active, 1);
298         if (waitqueue_active(li->wq))
299                 wake_up_interruptible(li->wq);
300         rc = SIGP_CC_ORDER_CODE_ACCEPTED;
301
302         VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
303 out_li:
304         spin_unlock_bh(&li->lock);
305 out_fi:
306         spin_unlock(&fi->lock);
307         return rc;
308 }
309
310 static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
311                                         u32 addr, u64 *reg)
312 {
313         struct kvm_vcpu *dst_vcpu = NULL;
314         int flags;
315         int rc;
316
317         if (cpu_id < KVM_MAX_VCPUS)
318                 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id);
319         if (!dst_vcpu)
320                 return SIGP_CC_NOT_OPERATIONAL;
321
322         spin_lock_bh(&dst_vcpu->arch.local_int.lock);
323         flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
324         spin_unlock_bh(&dst_vcpu->arch.local_int.lock);
325         if (!(flags & CPUSTAT_STOPPED)) {
326                 *reg &= 0xffffffff00000000UL;
327                 *reg |= SIGP_STATUS_INCORRECT_STATE;
328                 return SIGP_CC_STATUS_STORED;
329         }
330
331         addr &= 0x7ffffe00;
332         rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
333         if (rc == -EFAULT) {
334                 *reg &= 0xffffffff00000000UL;
335                 *reg |= SIGP_STATUS_INVALID_PARAMETER;
336                 rc = SIGP_CC_STATUS_STORED;
337         }
338         return rc;
339 }
340
341 static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
342                                 u64 *reg)
343 {
344         int rc;
345         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
346
347         if (cpu_addr >= KVM_MAX_VCPUS)
348                 return SIGP_CC_NOT_OPERATIONAL;
349
350         spin_lock(&fi->lock);
351         if (fi->local_int[cpu_addr] == NULL)
352                 rc = SIGP_CC_NOT_OPERATIONAL;
353         else {
354                 if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
355                     & CPUSTAT_RUNNING) {
356                         /* running */
357                         rc = SIGP_CC_ORDER_CODE_ACCEPTED;
358                 } else {
359                         /* not running */
360                         *reg &= 0xffffffff00000000UL;
361                         *reg |= SIGP_STATUS_NOT_RUNNING;
362                         rc = SIGP_CC_STATUS_STORED;
363                 }
364         }
365         spin_unlock(&fi->lock);
366
367         VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
368                    rc);
369
370         return rc;
371 }
372
373 /* Test whether the destination CPU is available and not busy */
374 static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
375 {
376         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
377         struct kvm_s390_local_interrupt *li;
378         int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
379
380         if (cpu_addr >= KVM_MAX_VCPUS)
381                 return SIGP_CC_NOT_OPERATIONAL;
382
383         spin_lock(&fi->lock);
384         li = fi->local_int[cpu_addr];
385         if (li == NULL) {
386                 rc = SIGP_CC_NOT_OPERATIONAL;
387                 goto out;
388         }
389
390         spin_lock_bh(&li->lock);
391         if (li->action_bits & ACTION_STOP_ON_STOP)
392                 rc = SIGP_CC_BUSY;
393         spin_unlock_bh(&li->lock);
394 out:
395         spin_unlock(&fi->lock);
396         return rc;
397 }
398
399 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
400 {
401         int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
402         int r3 = vcpu->arch.sie_block->ipa & 0x000f;
403         u32 parameter;
404         u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
405         u8 order_code;
406         int rc;
407
408         /* sigp in userspace can exit */
409         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
410                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
411
412         order_code = kvm_s390_get_base_disp_rs(vcpu);
413
414         if (r1 % 2)
415                 parameter = vcpu->run->s.regs.gprs[r1];
416         else
417                 parameter = vcpu->run->s.regs.gprs[r1 + 1];
418
419         trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
420         switch (order_code) {
421         case SIGP_SENSE:
422                 vcpu->stat.instruction_sigp_sense++;
423                 rc = __sigp_sense(vcpu, cpu_addr,
424                                   &vcpu->run->s.regs.gprs[r1]);
425                 break;
426         case SIGP_EXTERNAL_CALL:
427                 vcpu->stat.instruction_sigp_external_call++;
428                 rc = __sigp_external_call(vcpu, cpu_addr);
429                 break;
430         case SIGP_EMERGENCY_SIGNAL:
431                 vcpu->stat.instruction_sigp_emergency++;
432                 rc = __sigp_emergency(vcpu, cpu_addr);
433                 break;
434         case SIGP_STOP:
435                 vcpu->stat.instruction_sigp_stop++;
436                 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
437                 break;
438         case SIGP_STOP_AND_STORE_STATUS:
439                 vcpu->stat.instruction_sigp_stop++;
440                 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
441                                                  ACTION_STOP_ON_STOP);
442                 break;
443         case SIGP_STORE_STATUS_AT_ADDRESS:
444                 rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter,
445                                                  &vcpu->run->s.regs.gprs[r1]);
446                 break;
447         case SIGP_SET_ARCHITECTURE:
448                 vcpu->stat.instruction_sigp_arch++;
449                 rc = __sigp_set_arch(vcpu, parameter);
450                 break;
451         case SIGP_SET_PREFIX:
452                 vcpu->stat.instruction_sigp_prefix++;
453                 rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
454                                        &vcpu->run->s.regs.gprs[r1]);
455                 break;
456         case SIGP_COND_EMERGENCY_SIGNAL:
457                 rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter,
458                                                   &vcpu->run->s.regs.gprs[r1]);
459                 break;
460         case SIGP_SENSE_RUNNING:
461                 vcpu->stat.instruction_sigp_sense_running++;
462                 rc = __sigp_sense_running(vcpu, cpu_addr,
463                                           &vcpu->run->s.regs.gprs[r1]);
464                 break;
465         case SIGP_START:
466                 rc = sigp_check_callable(vcpu, cpu_addr);
467                 if (rc == SIGP_CC_ORDER_CODE_ACCEPTED)
468                         rc = -EOPNOTSUPP;    /* Handle START in user space */
469                 break;
470         case SIGP_RESTART:
471                 vcpu->stat.instruction_sigp_restart++;
472                 rc = sigp_check_callable(vcpu, cpu_addr);
473                 if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) {
474                         VCPU_EVENT(vcpu, 4,
475                                    "sigp restart %x to handle userspace",
476                                    cpu_addr);
477                         /* user space must know about restart */
478                         rc = -EOPNOTSUPP;
479                 }
480                 break;
481         default:
482                 return -EOPNOTSUPP;
483         }
484
485         if (rc < 0)
486                 return rc;
487
488         kvm_s390_set_psw_cc(vcpu, rc);
489         return 0;
490 }