Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu
[pandora-kernel.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/switch_to.h>
31 #include <asm/sclp.h>
32 #include "kvm-s390.h"
33 #include "gaccess.h"
34
35 #define CREATE_TRACE_POINTS
36 #include "trace.h"
37 #include "trace-s390.h"
38
39 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
40
41 struct kvm_stats_debugfs_item debugfs_entries[] = {
42         { "userspace_handled", VCPU_STAT(exit_userspace) },
43         { "exit_null", VCPU_STAT(exit_null) },
44         { "exit_validity", VCPU_STAT(exit_validity) },
45         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
46         { "exit_external_request", VCPU_STAT(exit_external_request) },
47         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
48         { "exit_instruction", VCPU_STAT(exit_instruction) },
49         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
50         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
51         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
52         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
53         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
54         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
55         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
56         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
57         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
58         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
59         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
62         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
63         { "instruction_spx", VCPU_STAT(instruction_spx) },
64         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
65         { "instruction_stap", VCPU_STAT(instruction_stap) },
66         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
67         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
68         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
69         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
70         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
71         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
72         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
73         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
74         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
75         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
76         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
77         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
78         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
79         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
80         { "diagnose_10", VCPU_STAT(diagnose_10) },
81         { "diagnose_44", VCPU_STAT(diagnose_44) },
82         { "diagnose_9c", VCPU_STAT(diagnose_9c) },
83         { NULL }
84 };
85
86 static unsigned long long *facilities;
87
88 /* Section: not file related */
89 int kvm_arch_hardware_enable(void *garbage)
90 {
91         /* every s390 is virtualization enabled ;-) */
92         return 0;
93 }
94
95 void kvm_arch_hardware_disable(void *garbage)
96 {
97 }
98
99 int kvm_arch_hardware_setup(void)
100 {
101         return 0;
102 }
103
104 void kvm_arch_hardware_unsetup(void)
105 {
106 }
107
108 void kvm_arch_check_processor_compat(void *rtn)
109 {
110 }
111
112 int kvm_arch_init(void *opaque)
113 {
114         return 0;
115 }
116
117 void kvm_arch_exit(void)
118 {
119 }
120
121 /* Section: device related */
122 long kvm_arch_dev_ioctl(struct file *filp,
123                         unsigned int ioctl, unsigned long arg)
124 {
125         if (ioctl == KVM_S390_ENABLE_SIE)
126                 return s390_enable_sie();
127         return -EINVAL;
128 }
129
130 int kvm_dev_ioctl_check_extension(long ext)
131 {
132         int r;
133
134         switch (ext) {
135         case KVM_CAP_S390_PSW:
136         case KVM_CAP_S390_GMAP:
137         case KVM_CAP_SYNC_MMU:
138 #ifdef CONFIG_KVM_S390_UCONTROL
139         case KVM_CAP_S390_UCONTROL:
140 #endif
141         case KVM_CAP_SYNC_REGS:
142         case KVM_CAP_ONE_REG:
143                 r = 1;
144                 break;
145         case KVM_CAP_NR_VCPUS:
146         case KVM_CAP_MAX_VCPUS:
147                 r = KVM_MAX_VCPUS;
148                 break;
149         case KVM_CAP_S390_COW:
150                 r = sclp_get_fac85() & 0x2;
151                 break;
152         default:
153                 r = 0;
154         }
155         return r;
156 }
157
158 /* Section: vm related */
159 /*
160  * Get (and clear) the dirty memory log for a memory slot.
161  */
162 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
163                                struct kvm_dirty_log *log)
164 {
165         return 0;
166 }
167
168 long kvm_arch_vm_ioctl(struct file *filp,
169                        unsigned int ioctl, unsigned long arg)
170 {
171         struct kvm *kvm = filp->private_data;
172         void __user *argp = (void __user *)arg;
173         int r;
174
175         switch (ioctl) {
176         case KVM_S390_INTERRUPT: {
177                 struct kvm_s390_interrupt s390int;
178
179                 r = -EFAULT;
180                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
181                         break;
182                 r = kvm_s390_inject_vm(kvm, &s390int);
183                 break;
184         }
185         default:
186                 r = -ENOTTY;
187         }
188
189         return r;
190 }
191
192 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
193 {
194         int rc;
195         char debug_name[16];
196
197         rc = -EINVAL;
198 #ifdef CONFIG_KVM_S390_UCONTROL
199         if (type & ~KVM_VM_S390_UCONTROL)
200                 goto out_err;
201         if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
202                 goto out_err;
203 #else
204         if (type)
205                 goto out_err;
206 #endif
207
208         rc = s390_enable_sie();
209         if (rc)
210                 goto out_err;
211
212         rc = -ENOMEM;
213
214         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
215         if (!kvm->arch.sca)
216                 goto out_err;
217
218         sprintf(debug_name, "kvm-%u", current->pid);
219
220         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
221         if (!kvm->arch.dbf)
222                 goto out_nodbf;
223
224         spin_lock_init(&kvm->arch.float_int.lock);
225         INIT_LIST_HEAD(&kvm->arch.float_int.list);
226
227         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
228         VM_EVENT(kvm, 3, "%s", "vm created");
229
230         if (type & KVM_VM_S390_UCONTROL) {
231                 kvm->arch.gmap = NULL;
232         } else {
233                 kvm->arch.gmap = gmap_alloc(current->mm);
234                 if (!kvm->arch.gmap)
235                         goto out_nogmap;
236         }
237         return 0;
238 out_nogmap:
239         debug_unregister(kvm->arch.dbf);
240 out_nodbf:
241         free_page((unsigned long)(kvm->arch.sca));
242 out_err:
243         return rc;
244 }
245
246 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
247 {
248         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
249         trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
250         if (!kvm_is_ucontrol(vcpu->kvm)) {
251                 clear_bit(63 - vcpu->vcpu_id,
252                           (unsigned long *) &vcpu->kvm->arch.sca->mcn);
253                 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
254                     (__u64) vcpu->arch.sie_block)
255                         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
256         }
257         smp_mb();
258
259         if (kvm_is_ucontrol(vcpu->kvm))
260                 gmap_free(vcpu->arch.gmap);
261
262         free_page((unsigned long)(vcpu->arch.sie_block));
263         kvm_vcpu_uninit(vcpu);
264         kfree(vcpu);
265 }
266
267 static void kvm_free_vcpus(struct kvm *kvm)
268 {
269         unsigned int i;
270         struct kvm_vcpu *vcpu;
271
272         kvm_for_each_vcpu(i, vcpu, kvm)
273                 kvm_arch_vcpu_destroy(vcpu);
274
275         mutex_lock(&kvm->lock);
276         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
277                 kvm->vcpus[i] = NULL;
278
279         atomic_set(&kvm->online_vcpus, 0);
280         mutex_unlock(&kvm->lock);
281 }
282
283 void kvm_arch_sync_events(struct kvm *kvm)
284 {
285 }
286
287 void kvm_arch_destroy_vm(struct kvm *kvm)
288 {
289         kvm_free_vcpus(kvm);
290         free_page((unsigned long)(kvm->arch.sca));
291         debug_unregister(kvm->arch.dbf);
292         if (!kvm_is_ucontrol(kvm))
293                 gmap_free(kvm->arch.gmap);
294 }
295
296 /* Section: vcpu related */
297 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
298 {
299         if (kvm_is_ucontrol(vcpu->kvm)) {
300                 vcpu->arch.gmap = gmap_alloc(current->mm);
301                 if (!vcpu->arch.gmap)
302                         return -ENOMEM;
303                 return 0;
304         }
305
306         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
307         vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
308                                     KVM_SYNC_GPRS |
309                                     KVM_SYNC_ACRS |
310                                     KVM_SYNC_CRS;
311         return 0;
312 }
313
314 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
315 {
316         /* Nothing todo */
317 }
318
319 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
320 {
321         save_fp_regs(&vcpu->arch.host_fpregs);
322         save_access_regs(vcpu->arch.host_acrs);
323         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
324         restore_fp_regs(&vcpu->arch.guest_fpregs);
325         restore_access_regs(vcpu->run->s.regs.acrs);
326         gmap_enable(vcpu->arch.gmap);
327         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
328 }
329
330 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
331 {
332         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
333         gmap_disable(vcpu->arch.gmap);
334         save_fp_regs(&vcpu->arch.guest_fpregs);
335         save_access_regs(vcpu->run->s.regs.acrs);
336         restore_fp_regs(&vcpu->arch.host_fpregs);
337         restore_access_regs(vcpu->arch.host_acrs);
338 }
339
340 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
341 {
342         /* this equals initial cpu reset in pop, but we don't switch to ESA */
343         vcpu->arch.sie_block->gpsw.mask = 0UL;
344         vcpu->arch.sie_block->gpsw.addr = 0UL;
345         kvm_s390_set_prefix(vcpu, 0);
346         vcpu->arch.sie_block->cputm     = 0UL;
347         vcpu->arch.sie_block->ckc       = 0UL;
348         vcpu->arch.sie_block->todpr     = 0;
349         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
350         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
351         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
352         vcpu->arch.guest_fpregs.fpc = 0;
353         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
354         vcpu->arch.sie_block->gbea = 1;
355         atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
356 }
357
358 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
359 {
360         return 0;
361 }
362
363 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
364 {
365         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
366                                                     CPUSTAT_SM |
367                                                     CPUSTAT_STOPPED);
368         vcpu->arch.sie_block->ecb   = 6;
369         vcpu->arch.sie_block->eca   = 0xC1002001U;
370         vcpu->arch.sie_block->fac   = (int) (long) facilities;
371         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
372         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
373                      (unsigned long) vcpu);
374         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
375         get_cpu_id(&vcpu->arch.cpu_id);
376         vcpu->arch.cpu_id.version = 0xff;
377         return 0;
378 }
379
380 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
381                                       unsigned int id)
382 {
383         struct kvm_vcpu *vcpu;
384         int rc = -EINVAL;
385
386         if (id >= KVM_MAX_VCPUS)
387                 goto out;
388
389         rc = -ENOMEM;
390
391         vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
392         if (!vcpu)
393                 goto out;
394
395         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
396                                         get_zeroed_page(GFP_KERNEL);
397
398         if (!vcpu->arch.sie_block)
399                 goto out_free_cpu;
400
401         vcpu->arch.sie_block->icpua = id;
402         if (!kvm_is_ucontrol(kvm)) {
403                 if (!kvm->arch.sca) {
404                         WARN_ON_ONCE(1);
405                         goto out_free_cpu;
406                 }
407                 if (!kvm->arch.sca->cpu[id].sda)
408                         kvm->arch.sca->cpu[id].sda =
409                                 (__u64) vcpu->arch.sie_block;
410                 vcpu->arch.sie_block->scaoh =
411                         (__u32)(((__u64)kvm->arch.sca) >> 32);
412                 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
413                 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
414         }
415
416         spin_lock_init(&vcpu->arch.local_int.lock);
417         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
418         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
419         spin_lock(&kvm->arch.float_int.lock);
420         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
421         init_waitqueue_head(&vcpu->arch.local_int.wq);
422         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
423         spin_unlock(&kvm->arch.float_int.lock);
424
425         rc = kvm_vcpu_init(vcpu, kvm, id);
426         if (rc)
427                 goto out_free_sie_block;
428         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
429                  vcpu->arch.sie_block);
430         trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
431
432         return vcpu;
433 out_free_sie_block:
434         free_page((unsigned long)(vcpu->arch.sie_block));
435 out_free_cpu:
436         kfree(vcpu);
437 out:
438         return ERR_PTR(rc);
439 }
440
441 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
442 {
443         /* kvm common code refers to this, but never calls it */
444         BUG();
445         return 0;
446 }
447
448 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
449 {
450         /* kvm common code refers to this, but never calls it */
451         BUG();
452         return 0;
453 }
454
455 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
456                                            struct kvm_one_reg *reg)
457 {
458         int r = -EINVAL;
459
460         switch (reg->id) {
461         case KVM_REG_S390_TODPR:
462                 r = put_user(vcpu->arch.sie_block->todpr,
463                              (u32 __user *)reg->addr);
464                 break;
465         case KVM_REG_S390_EPOCHDIFF:
466                 r = put_user(vcpu->arch.sie_block->epoch,
467                              (u64 __user *)reg->addr);
468                 break;
469         case KVM_REG_S390_CPU_TIMER:
470                 r = put_user(vcpu->arch.sie_block->cputm,
471                              (u64 __user *)reg->addr);
472                 break;
473         case KVM_REG_S390_CLOCK_COMP:
474                 r = put_user(vcpu->arch.sie_block->ckc,
475                              (u64 __user *)reg->addr);
476                 break;
477         default:
478                 break;
479         }
480
481         return r;
482 }
483
484 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
485                                            struct kvm_one_reg *reg)
486 {
487         int r = -EINVAL;
488
489         switch (reg->id) {
490         case KVM_REG_S390_TODPR:
491                 r = get_user(vcpu->arch.sie_block->todpr,
492                              (u32 __user *)reg->addr);
493                 break;
494         case KVM_REG_S390_EPOCHDIFF:
495                 r = get_user(vcpu->arch.sie_block->epoch,
496                              (u64 __user *)reg->addr);
497                 break;
498         case KVM_REG_S390_CPU_TIMER:
499                 r = get_user(vcpu->arch.sie_block->cputm,
500                              (u64 __user *)reg->addr);
501                 break;
502         case KVM_REG_S390_CLOCK_COMP:
503                 r = get_user(vcpu->arch.sie_block->ckc,
504                              (u64 __user *)reg->addr);
505                 break;
506         default:
507                 break;
508         }
509
510         return r;
511 }
512
513 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
514 {
515         kvm_s390_vcpu_initial_reset(vcpu);
516         return 0;
517 }
518
519 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
520 {
521         memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
522         return 0;
523 }
524
525 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
526 {
527         memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
528         return 0;
529 }
530
531 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
532                                   struct kvm_sregs *sregs)
533 {
534         memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
535         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
536         restore_access_regs(vcpu->run->s.regs.acrs);
537         return 0;
538 }
539
540 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
541                                   struct kvm_sregs *sregs)
542 {
543         memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
544         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
545         return 0;
546 }
547
548 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
549 {
550         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
551         vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
552         restore_fp_regs(&vcpu->arch.guest_fpregs);
553         return 0;
554 }
555
556 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
557 {
558         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
559         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
560         return 0;
561 }
562
563 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
564 {
565         int rc = 0;
566
567         if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
568                 rc = -EBUSY;
569         else {
570                 vcpu->run->psw_mask = psw.mask;
571                 vcpu->run->psw_addr = psw.addr;
572         }
573         return rc;
574 }
575
576 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
577                                   struct kvm_translation *tr)
578 {
579         return -EINVAL; /* not implemented yet */
580 }
581
582 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
583                                         struct kvm_guest_debug *dbg)
584 {
585         return -EINVAL; /* not implemented yet */
586 }
587
588 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
589                                     struct kvm_mp_state *mp_state)
590 {
591         return -EINVAL; /* not implemented yet */
592 }
593
594 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
595                                     struct kvm_mp_state *mp_state)
596 {
597         return -EINVAL; /* not implemented yet */
598 }
599
600 static int __vcpu_run(struct kvm_vcpu *vcpu)
601 {
602         int rc;
603
604         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
605
606         if (need_resched())
607                 schedule();
608
609         if (test_thread_flag(TIF_MCCK_PENDING))
610                 s390_handle_mcck();
611
612         if (!kvm_is_ucontrol(vcpu->kvm))
613                 kvm_s390_deliver_pending_interrupts(vcpu);
614
615         vcpu->arch.sie_block->icptcode = 0;
616         preempt_disable();
617         kvm_guest_enter();
618         preempt_enable();
619         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
620                    atomic_read(&vcpu->arch.sie_block->cpuflags));
621         trace_kvm_s390_sie_enter(vcpu,
622                                  atomic_read(&vcpu->arch.sie_block->cpuflags));
623         rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
624         if (rc) {
625                 if (kvm_is_ucontrol(vcpu->kvm)) {
626                         rc = SIE_INTERCEPT_UCONTROL;
627                 } else {
628                         VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
629                         trace_kvm_s390_sie_fault(vcpu);
630                         kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
631                         rc = 0;
632                 }
633         }
634         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
635                    vcpu->arch.sie_block->icptcode);
636         trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
637         kvm_guest_exit();
638
639         memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
640         return rc;
641 }
642
643 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
644 {
645         int rc;
646         sigset_t sigsaved;
647
648 rerun_vcpu:
649         if (vcpu->sigset_active)
650                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
651
652         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
653
654         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
655
656         switch (kvm_run->exit_reason) {
657         case KVM_EXIT_S390_SIEIC:
658         case KVM_EXIT_UNKNOWN:
659         case KVM_EXIT_INTR:
660         case KVM_EXIT_S390_RESET:
661         case KVM_EXIT_S390_UCONTROL:
662                 break;
663         default:
664                 BUG();
665         }
666
667         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
668         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
669         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
670                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
671                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
672         }
673         if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
674                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
675                 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
676                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
677         }
678
679         might_fault();
680
681         do {
682                 rc = __vcpu_run(vcpu);
683                 if (rc)
684                         break;
685                 if (kvm_is_ucontrol(vcpu->kvm))
686                         rc = -EOPNOTSUPP;
687                 else
688                         rc = kvm_handle_sie_intercept(vcpu);
689         } while (!signal_pending(current) && !rc);
690
691         if (rc == SIE_INTERCEPT_RERUNVCPU)
692                 goto rerun_vcpu;
693
694         if (signal_pending(current) && !rc) {
695                 kvm_run->exit_reason = KVM_EXIT_INTR;
696                 rc = -EINTR;
697         }
698
699 #ifdef CONFIG_KVM_S390_UCONTROL
700         if (rc == SIE_INTERCEPT_UCONTROL) {
701                 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
702                 kvm_run->s390_ucontrol.trans_exc_code =
703                         current->thread.gmap_addr;
704                 kvm_run->s390_ucontrol.pgm_code = 0x10;
705                 rc = 0;
706         }
707 #endif
708
709         if (rc == -EOPNOTSUPP) {
710                 /* intercept cannot be handled in-kernel, prepare kvm-run */
711                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
712                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
713                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
714                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
715                 rc = 0;
716         }
717
718         if (rc == -EREMOTE) {
719                 /* intercept was handled, but userspace support is needed
720                  * kvm_run has been prepared by the handler */
721                 rc = 0;
722         }
723
724         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
725         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
726         kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
727         memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
728
729         if (vcpu->sigset_active)
730                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
731
732         vcpu->stat.exit_userspace++;
733         return rc;
734 }
735
736 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
737                        unsigned long n, int prefix)
738 {
739         if (prefix)
740                 return copy_to_guest(vcpu, guestdest, from, n);
741         else
742                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
743 }
744
745 /*
746  * store status at address
747  * we use have two special cases:
748  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
749  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
750  */
751 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
752 {
753         unsigned char archmode = 1;
754         int prefix;
755
756         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
757                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
758                         return -EFAULT;
759                 addr = SAVE_AREA_BASE;
760                 prefix = 0;
761         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
762                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
763                         return -EFAULT;
764                 addr = SAVE_AREA_BASE;
765                 prefix = 1;
766         } else
767                 prefix = 0;
768
769         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
770                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
771                 return -EFAULT;
772
773         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
774                         vcpu->run->s.regs.gprs, 128, prefix))
775                 return -EFAULT;
776
777         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
778                         &vcpu->arch.sie_block->gpsw, 16, prefix))
779                 return -EFAULT;
780
781         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
782                         &vcpu->arch.sie_block->prefix, 4, prefix))
783                 return -EFAULT;
784
785         if (__guestcopy(vcpu,
786                         addr + offsetof(struct save_area, fp_ctrl_reg),
787                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
788                 return -EFAULT;
789
790         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
791                         &vcpu->arch.sie_block->todpr, 4, prefix))
792                 return -EFAULT;
793
794         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
795                         &vcpu->arch.sie_block->cputm, 8, prefix))
796                 return -EFAULT;
797
798         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
799                         &vcpu->arch.sie_block->ckc, 8, prefix))
800                 return -EFAULT;
801
802         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
803                         &vcpu->run->s.regs.acrs, 64, prefix))
804                 return -EFAULT;
805
806         if (__guestcopy(vcpu,
807                         addr + offsetof(struct save_area, ctrl_regs),
808                         &vcpu->arch.sie_block->gcr, 128, prefix))
809                 return -EFAULT;
810         return 0;
811 }
812
813 long kvm_arch_vcpu_ioctl(struct file *filp,
814                          unsigned int ioctl, unsigned long arg)
815 {
816         struct kvm_vcpu *vcpu = filp->private_data;
817         void __user *argp = (void __user *)arg;
818         long r;
819
820         switch (ioctl) {
821         case KVM_S390_INTERRUPT: {
822                 struct kvm_s390_interrupt s390int;
823
824                 r = -EFAULT;
825                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
826                         break;
827                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
828                 break;
829         }
830         case KVM_S390_STORE_STATUS:
831                 r = kvm_s390_vcpu_store_status(vcpu, arg);
832                 break;
833         case KVM_S390_SET_INITIAL_PSW: {
834                 psw_t psw;
835
836                 r = -EFAULT;
837                 if (copy_from_user(&psw, argp, sizeof(psw)))
838                         break;
839                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
840                 break;
841         }
842         case KVM_S390_INITIAL_RESET:
843                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
844                 break;
845         case KVM_SET_ONE_REG:
846         case KVM_GET_ONE_REG: {
847                 struct kvm_one_reg reg;
848                 r = -EFAULT;
849                 if (copy_from_user(&reg, argp, sizeof(reg)))
850                         break;
851                 if (ioctl == KVM_SET_ONE_REG)
852                         r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
853                 else
854                         r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
855                 break;
856         }
857 #ifdef CONFIG_KVM_S390_UCONTROL
858         case KVM_S390_UCAS_MAP: {
859                 struct kvm_s390_ucas_mapping ucasmap;
860
861                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
862                         r = -EFAULT;
863                         break;
864                 }
865
866                 if (!kvm_is_ucontrol(vcpu->kvm)) {
867                         r = -EINVAL;
868                         break;
869                 }
870
871                 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
872                                      ucasmap.vcpu_addr, ucasmap.length);
873                 break;
874         }
875         case KVM_S390_UCAS_UNMAP: {
876                 struct kvm_s390_ucas_mapping ucasmap;
877
878                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
879                         r = -EFAULT;
880                         break;
881                 }
882
883                 if (!kvm_is_ucontrol(vcpu->kvm)) {
884                         r = -EINVAL;
885                         break;
886                 }
887
888                 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
889                         ucasmap.length);
890                 break;
891         }
892 #endif
893         case KVM_S390_VCPU_FAULT: {
894                 r = gmap_fault(arg, vcpu->arch.gmap);
895                 if (!IS_ERR_VALUE(r))
896                         r = 0;
897                 break;
898         }
899         default:
900                 r = -ENOTTY;
901         }
902         return r;
903 }
904
905 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
906 {
907 #ifdef CONFIG_KVM_S390_UCONTROL
908         if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
909                  && (kvm_is_ucontrol(vcpu->kvm))) {
910                 vmf->page = virt_to_page(vcpu->arch.sie_block);
911                 get_page(vmf->page);
912                 return 0;
913         }
914 #endif
915         return VM_FAULT_SIGBUS;
916 }
917
918 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
919                            struct kvm_memory_slot *dont)
920 {
921 }
922
923 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
924 {
925         return 0;
926 }
927
928 /* Section: memory related */
929 int kvm_arch_prepare_memory_region(struct kvm *kvm,
930                                    struct kvm_memory_slot *memslot,
931                                    struct kvm_memory_slot old,
932                                    struct kvm_userspace_memory_region *mem,
933                                    int user_alloc)
934 {
935         /* A few sanity checks. We can have exactly one memory slot which has
936            to start at guest virtual zero and which has to be located at a
937            page boundary in userland and which has to end at a page boundary.
938            The memory in userland is ok to be fragmented into various different
939            vmas. It is okay to mmap() and munmap() stuff in this slot after
940            doing this call at any time */
941
942         if (mem->slot)
943                 return -EINVAL;
944
945         if (mem->guest_phys_addr)
946                 return -EINVAL;
947
948         if (mem->userspace_addr & 0xffffful)
949                 return -EINVAL;
950
951         if (mem->memory_size & 0xffffful)
952                 return -EINVAL;
953
954         if (!user_alloc)
955                 return -EINVAL;
956
957         return 0;
958 }
959
960 void kvm_arch_commit_memory_region(struct kvm *kvm,
961                                 struct kvm_userspace_memory_region *mem,
962                                 struct kvm_memory_slot old,
963                                 int user_alloc)
964 {
965         int rc;
966
967
968         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
969                 mem->guest_phys_addr, mem->memory_size);
970         if (rc)
971                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
972         return;
973 }
974
975 void kvm_arch_flush_shadow_all(struct kvm *kvm)
976 {
977 }
978
979 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
980                                    struct kvm_memory_slot *slot)
981 {
982 }
983
984 static int __init kvm_s390_init(void)
985 {
986         int ret;
987         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
988         if (ret)
989                 return ret;
990
991         /*
992          * guests can ask for up to 255+1 double words, we need a full page
993          * to hold the maximum amount of facilities. On the other hand, we
994          * only set facilities that are known to work in KVM.
995          */
996         facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
997         if (!facilities) {
998                 kvm_exit();
999                 return -ENOMEM;
1000         }
1001         memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
1002         facilities[0] &= 0xff00fff3f47c0000ULL;
1003         facilities[1] &= 0x001c000000000000ULL;
1004         return 0;
1005 }
1006
1007 static void __exit kvm_s390_exit(void)
1008 {
1009         free_page((unsigned long) facilities);
1010         kvm_exit();
1011 }
1012
1013 module_init(kvm_s390_init);
1014 module_exit(kvm_s390_exit);